151c0b2f7Stbbdev /*
2*c21e688aSSergey Zheltov Copyright (c) 2005-2022 Intel Corporation
351c0b2f7Stbbdev
451c0b2f7Stbbdev Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev you may not use this file except in compliance with the License.
651c0b2f7Stbbdev You may obtain a copy of the License at
751c0b2f7Stbbdev
851c0b2f7Stbbdev http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev
1051c0b2f7Stbbdev Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev See the License for the specific language governing permissions and
1451c0b2f7Stbbdev limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev
1751c0b2f7Stbbdev /** Before making any changes in the implementation, please emulate algorithmic changes
1851c0b2f7Stbbdev with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml.
1951c0b2f7Stbbdev There could be some code looking as "can be restructured" but its structure does matter! */
2051c0b2f7Stbbdev
2149e08aacStbbdev #include "oneapi/tbb/queuing_rw_mutex.h"
2249e08aacStbbdev #include "oneapi/tbb/detail/_assert.h"
2349e08aacStbbdev #include "oneapi/tbb/detail/_utils.h"
2451c0b2f7Stbbdev #include "itt_notify.h"
2551c0b2f7Stbbdev
2651c0b2f7Stbbdev namespace tbb {
2751c0b2f7Stbbdev namespace detail {
2851c0b2f7Stbbdev namespace r1 {
2951c0b2f7Stbbdev
3051c0b2f7Stbbdev #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
3151c0b2f7Stbbdev // Workaround for overzealous compiler warnings
3251c0b2f7Stbbdev #pragma warning (push)
3351c0b2f7Stbbdev #pragma warning (disable: 4311 4312)
3451c0b2f7Stbbdev #endif
3551c0b2f7Stbbdev
3651c0b2f7Stbbdev //! A view of a T* with additional functionality for twiddling low-order bits.
3751c0b2f7Stbbdev template<typename T>
3851c0b2f7Stbbdev class tricky_atomic_pointer {
3951c0b2f7Stbbdev public:
4051c0b2f7Stbbdev using word = uintptr_t;
4151c0b2f7Stbbdev
fetch_add(std::atomic<word> & location,word addend,std::memory_order memory_order)4251c0b2f7Stbbdev static T* fetch_add( std::atomic<word>& location, word addend, std::memory_order memory_order ) {
4351c0b2f7Stbbdev return reinterpret_cast<T*>(location.fetch_add(addend, memory_order));
4451c0b2f7Stbbdev }
4551c0b2f7Stbbdev
exchange(std::atomic<word> & location,T * value,std::memory_order memory_order)4651c0b2f7Stbbdev static T* exchange( std::atomic<word>& location, T* value, std::memory_order memory_order ) {
4751c0b2f7Stbbdev return reinterpret_cast<T*>(location.exchange(reinterpret_cast<word>(value), memory_order));
4851c0b2f7Stbbdev }
4951c0b2f7Stbbdev
compare_exchange_strong(std::atomic<word> & obj,const T * expected,const T * desired,std::memory_order memory_order)5051c0b2f7Stbbdev static T* compare_exchange_strong( std::atomic<word>& obj, const T* expected, const T* desired, std::memory_order memory_order ) {
5151c0b2f7Stbbdev word expd = reinterpret_cast<word>(expected);
5251c0b2f7Stbbdev obj.compare_exchange_strong(expd, reinterpret_cast<word>(desired), memory_order);
5351c0b2f7Stbbdev return reinterpret_cast<T*>(expd);
5451c0b2f7Stbbdev }
5551c0b2f7Stbbdev
store(std::atomic<word> & location,const T * value,std::memory_order memory_order)5651c0b2f7Stbbdev static void store( std::atomic<word>& location, const T* value, std::memory_order memory_order ) {
5751c0b2f7Stbbdev location.store(reinterpret_cast<word>(value), memory_order);
5851c0b2f7Stbbdev }
5951c0b2f7Stbbdev
load(std::atomic<word> & location,std::memory_order memory_order)6051c0b2f7Stbbdev static T* load( std::atomic<word>& location, std::memory_order memory_order ) {
6151c0b2f7Stbbdev return reinterpret_cast<T*>(location.load(memory_order));
6251c0b2f7Stbbdev }
6351c0b2f7Stbbdev
spin_wait_while_eq(const std::atomic<word> & location,const T * value)6451c0b2f7Stbbdev static void spin_wait_while_eq(const std::atomic<word>& location, const T* value) {
6551c0b2f7Stbbdev tbb::detail::d0::spin_wait_while_eq(location, reinterpret_cast<word>(value) );
6651c0b2f7Stbbdev }
6751c0b2f7Stbbdev
6851c0b2f7Stbbdev T* & ref;
tricky_atomic_pointer(T * & original)6951c0b2f7Stbbdev tricky_atomic_pointer( T*& original ) : ref(original) {};
7051c0b2f7Stbbdev tricky_atomic_pointer(const tricky_atomic_pointer&) = delete;
7151c0b2f7Stbbdev tricky_atomic_pointer& operator=(const tricky_atomic_pointer&) = delete;
operator &(const word operand2) const7251c0b2f7Stbbdev T* operator&( const word operand2 ) const {
7351c0b2f7Stbbdev return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
7451c0b2f7Stbbdev }
operator |(const word operand2) const7551c0b2f7Stbbdev T* operator|( const word operand2 ) const {
7651c0b2f7Stbbdev return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
7751c0b2f7Stbbdev }
7851c0b2f7Stbbdev };
7951c0b2f7Stbbdev
8051c0b2f7Stbbdev using tricky_pointer = tricky_atomic_pointer<queuing_rw_mutex::scoped_lock>;
8151c0b2f7Stbbdev
8251c0b2f7Stbbdev #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
8351c0b2f7Stbbdev // Workaround for overzealous compiler warnings
8451c0b2f7Stbbdev #pragma warning (pop)
8551c0b2f7Stbbdev #endif
8651c0b2f7Stbbdev
8751c0b2f7Stbbdev //! Flag bits in a state_t that specify information about a locking request.
8851c0b2f7Stbbdev enum state_t_flags : unsigned char {
8951c0b2f7Stbbdev STATE_NONE = 0,
9051c0b2f7Stbbdev STATE_WRITER = 1<<0,
9151c0b2f7Stbbdev STATE_READER = 1<<1,
9251c0b2f7Stbbdev STATE_READER_UNBLOCKNEXT = 1<<2,
9351c0b2f7Stbbdev STATE_ACTIVEREADER = 1<<3,
9451c0b2f7Stbbdev STATE_UPGRADE_REQUESTED = 1<<4,
9551c0b2f7Stbbdev STATE_UPGRADE_WAITING = 1<<5,
9651c0b2f7Stbbdev STATE_UPGRADE_LOSER = 1<<6,
9751c0b2f7Stbbdev STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT,
9851c0b2f7Stbbdev STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER,
9951c0b2f7Stbbdev STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER
10051c0b2f7Stbbdev };
10151c0b2f7Stbbdev
10251c0b2f7Stbbdev static const unsigned char RELEASED = 0;
10351c0b2f7Stbbdev static const unsigned char ACQUIRED = 1;
10451c0b2f7Stbbdev
10551c0b2f7Stbbdev struct queuing_rw_mutex_impl {
10651c0b2f7Stbbdev //! Try to acquire the internal lock
10751c0b2f7Stbbdev /** Returns true if lock was successfully acquired. */
try_acquire_internal_locktbb::detail::r1::queuing_rw_mutex_impl10851c0b2f7Stbbdev static bool try_acquire_internal_lock(d1::queuing_rw_mutex::scoped_lock& s)
10951c0b2f7Stbbdev {
11051c0b2f7Stbbdev auto expected = RELEASED;
11151c0b2f7Stbbdev return s.my_internal_lock.compare_exchange_strong(expected, ACQUIRED);
11251c0b2f7Stbbdev }
11351c0b2f7Stbbdev
11451c0b2f7Stbbdev //! Acquire the internal lock
acquire_internal_locktbb::detail::r1::queuing_rw_mutex_impl11551c0b2f7Stbbdev static void acquire_internal_lock(d1::queuing_rw_mutex::scoped_lock& s)
11651c0b2f7Stbbdev {
11751c0b2f7Stbbdev // Usually, we would use the test-test-and-set idiom here, with exponential backoff.
11851c0b2f7Stbbdev // But so far, experiments indicate there is no value in doing so here.
11951c0b2f7Stbbdev while( !try_acquire_internal_lock(s) ) {
12051c0b2f7Stbbdev machine_pause(1);
12151c0b2f7Stbbdev }
12251c0b2f7Stbbdev }
12351c0b2f7Stbbdev
12451c0b2f7Stbbdev //! Release the internal lock
release_internal_locktbb::detail::r1::queuing_rw_mutex_impl12551c0b2f7Stbbdev static void release_internal_lock(d1::queuing_rw_mutex::scoped_lock& s)
12651c0b2f7Stbbdev {
12751c0b2f7Stbbdev s.my_internal_lock.store(RELEASED, std::memory_order_release);
12851c0b2f7Stbbdev }
12951c0b2f7Stbbdev
13051c0b2f7Stbbdev //! Wait for internal lock to be released
wait_for_release_of_internal_locktbb::detail::r1::queuing_rw_mutex_impl13151c0b2f7Stbbdev static void wait_for_release_of_internal_lock(d1::queuing_rw_mutex::scoped_lock& s)
13251c0b2f7Stbbdev {
13351c0b2f7Stbbdev spin_wait_until_eq(s.my_internal_lock, RELEASED);
13451c0b2f7Stbbdev }
13551c0b2f7Stbbdev
13651c0b2f7Stbbdev //! A helper function
unblock_or_wait_on_internal_locktbb::detail::r1::queuing_rw_mutex_impl13751c0b2f7Stbbdev static void unblock_or_wait_on_internal_lock(d1::queuing_rw_mutex::scoped_lock& s, uintptr_t flag ) {
13851c0b2f7Stbbdev if( flag ) {
13951c0b2f7Stbbdev wait_for_release_of_internal_lock(s);
14051c0b2f7Stbbdev }
14151c0b2f7Stbbdev else {
14251c0b2f7Stbbdev release_internal_lock(s);
14351c0b2f7Stbbdev }
14451c0b2f7Stbbdev }
14551c0b2f7Stbbdev
14651c0b2f7Stbbdev //! Mask for low order bit of a pointer.
14751c0b2f7Stbbdev static const tricky_pointer::word FLAG = 0x1;
14851c0b2f7Stbbdev
get_flagtbb::detail::r1::queuing_rw_mutex_impl14951c0b2f7Stbbdev static uintptr_t get_flag( d1::queuing_rw_mutex::scoped_lock* ptr ) {
15051c0b2f7Stbbdev return reinterpret_cast<uintptr_t>(ptr) & FLAG;
15151c0b2f7Stbbdev }
15251c0b2f7Stbbdev
15351c0b2f7Stbbdev //------------------------------------------------------------------------
15451c0b2f7Stbbdev // Methods of queuing_rw_mutex::scoped_lock
15551c0b2f7Stbbdev //------------------------------------------------------------------------
15651c0b2f7Stbbdev
15751c0b2f7Stbbdev //! A method to acquire queuing_rw_mutex lock
acquiretbb::detail::r1::queuing_rw_mutex_impl15851c0b2f7Stbbdev static void acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write)
15951c0b2f7Stbbdev {
16051c0b2f7Stbbdev __TBB_ASSERT( !s.my_mutex, "scoped_lock is already holding a mutex");
16151c0b2f7Stbbdev
16251c0b2f7Stbbdev // Must set all fields before the exchange, because once the
16351c0b2f7Stbbdev // exchange executes, *this becomes accessible to other threads.
16451c0b2f7Stbbdev s.my_mutex = &m;
16551c0b2f7Stbbdev s.my_prev.store(0U, std::memory_order_relaxed);
16651c0b2f7Stbbdev s.my_next.store(0U, std::memory_order_relaxed);
16751c0b2f7Stbbdev s.my_going.store(0U, std::memory_order_relaxed);
16851c0b2f7Stbbdev s.my_state.store(d1::queuing_rw_mutex::scoped_lock::state_t(write ? STATE_WRITER : STATE_READER), std::memory_order_relaxed);
16951c0b2f7Stbbdev s.my_internal_lock.store(RELEASED, std::memory_order_relaxed);
17051c0b2f7Stbbdev
17140a9a106SPavel
17240a9a106SPavel // The CAS must have release semantics, because we are
17340a9a106SPavel // "sending" the fields initialized above to other actors.
17440a9a106SPavel // We need acquire semantics, because we are acquiring the predecessor (or mutex if no predecessor)
17540a9a106SPavel queuing_rw_mutex::scoped_lock* predecessor = m.q_tail.exchange(&s, std::memory_order_acq_rel);
17651c0b2f7Stbbdev
17751c0b2f7Stbbdev if( write ) { // Acquiring for write
17851c0b2f7Stbbdev
17951c0b2f7Stbbdev if( predecessor ) {
18051c0b2f7Stbbdev ITT_NOTIFY(sync_prepare, s.my_mutex);
18151c0b2f7Stbbdev predecessor = tricky_pointer(predecessor) & ~FLAG;
18251c0b2f7Stbbdev __TBB_ASSERT( !predecessor->my_next, "the predecessor has another successor!");
18351c0b2f7Stbbdev tricky_pointer::store(predecessor->my_next, &s, std::memory_order_release);
18440a9a106SPavel // We are acquiring the mutex
18540a9a106SPavel spin_wait_until_eq(s.my_going, 1U, std::memory_order_acquire);
18651c0b2f7Stbbdev }
18751c0b2f7Stbbdev
18851c0b2f7Stbbdev } else { // Acquiring for read
18951c0b2f7Stbbdev #if __TBB_USE_ITT_NOTIFY
19051c0b2f7Stbbdev bool sync_prepare_done = false;
19151c0b2f7Stbbdev #endif
19251c0b2f7Stbbdev if( predecessor ) {
193478de5b1Stbbdev unsigned char pred_state{};
194478de5b1Stbbdev __TBB_ASSERT( !s.my_prev.load(std::memory_order_relaxed), "the predecessor is already set" );
19551c0b2f7Stbbdev if( tricky_pointer(predecessor) & FLAG ) {
19651c0b2f7Stbbdev /* this is only possible if predecessor is an upgrading reader and it signals us to wait */
19751c0b2f7Stbbdev pred_state = STATE_UPGRADE_WAITING;
19851c0b2f7Stbbdev predecessor = tricky_pointer(predecessor) & ~FLAG;
19951c0b2f7Stbbdev } else {
20051c0b2f7Stbbdev // Load predecessor->my_state now, because once predecessor->my_next becomes
20157f524caSIlya Isaev // non-null, we must assume that *predecessor might be destroyed.
20240a9a106SPavel pred_state = predecessor->my_state.load(std::memory_order_relaxed);
20340a9a106SPavel if (pred_state == STATE_READER) {
20440a9a106SPavel // Notify the previous reader to unblock us.
20540a9a106SPavel predecessor->my_state.compare_exchange_strong(pred_state, STATE_READER_UNBLOCKNEXT, std::memory_order_relaxed);
20651c0b2f7Stbbdev }
20740a9a106SPavel if (pred_state == STATE_ACTIVEREADER) { // either we initially read it or CAS failed
20840a9a106SPavel // Active reader means that the predecessor already acquired the mutex and cannot notify us.
20940a9a106SPavel // Therefore, we need to acquire the mutex ourselves by re-reading predecessor state.
21040a9a106SPavel (void)predecessor->my_state.load(std::memory_order_acquire);
21140a9a106SPavel }
21240a9a106SPavel }
21340a9a106SPavel tricky_pointer::store(s.my_prev, predecessor, std::memory_order_relaxed);
21451c0b2f7Stbbdev __TBB_ASSERT( !( tricky_pointer(predecessor) & FLAG ), "use of corrupted pointer!" );
215478de5b1Stbbdev __TBB_ASSERT( !predecessor->my_next.load(std::memory_order_relaxed), "the predecessor has another successor!");
21651c0b2f7Stbbdev tricky_pointer::store(predecessor->my_next, &s, std::memory_order_release);
21751c0b2f7Stbbdev if( pred_state != STATE_ACTIVEREADER ) {
21851c0b2f7Stbbdev #if __TBB_USE_ITT_NOTIFY
21951c0b2f7Stbbdev sync_prepare_done = true;
22051c0b2f7Stbbdev ITT_NOTIFY(sync_prepare, s.my_mutex);
22151c0b2f7Stbbdev #endif
22240a9a106SPavel // We are acquiring the mutex
223478de5b1Stbbdev spin_wait_until_eq(s.my_going, 1U, std::memory_order_acquire);
22451c0b2f7Stbbdev }
22551c0b2f7Stbbdev }
22651c0b2f7Stbbdev
22751c0b2f7Stbbdev // The protected state must have been acquired here before it can be further released to any other reader(s):
22851c0b2f7Stbbdev unsigned char old_state = STATE_READER;
22940a9a106SPavel // When this reader is signaled by previous actor it acquires the mutex.
23040a9a106SPavel // We need to build happens-before relation with all other coming readers that will read our ACTIVEREADER
23140a9a106SPavel // without blocking on my_going. Therefore, we need to publish ACTIVEREADER with release semantics.
23240a9a106SPavel // On fail it is relaxed, because we will build happens-before on my_going.
23340a9a106SPavel s.my_state.compare_exchange_strong(old_state, STATE_ACTIVEREADER, std::memory_order_release, std::memory_order_relaxed);
23451c0b2f7Stbbdev if( old_state!=STATE_READER ) {
23551c0b2f7Stbbdev #if __TBB_USE_ITT_NOTIFY
23651c0b2f7Stbbdev if( !sync_prepare_done )
23751c0b2f7Stbbdev ITT_NOTIFY(sync_prepare, s.my_mutex);
23851c0b2f7Stbbdev #endif
23951c0b2f7Stbbdev // Failed to become active reader -> need to unblock the next waiting reader first
240478de5b1Stbbdev __TBB_ASSERT( s.my_state.load(std::memory_order_relaxed)==STATE_READER_UNBLOCKNEXT, "unexpected state" );
24140a9a106SPavel spin_wait_while_eq(s.my_next, 0U, std::memory_order_acquire);
24251c0b2f7Stbbdev /* my_state should be changed before unblocking the next otherwise it might finish
24351c0b2f7Stbbdev and another thread can get our old state and left blocked */
24451c0b2f7Stbbdev s.my_state.store(STATE_ACTIVEREADER, std::memory_order_relaxed);
24551c0b2f7Stbbdev tricky_pointer::load(s.my_next, std::memory_order_relaxed)->my_going.store(1U, std::memory_order_release);
24651c0b2f7Stbbdev }
247478de5b1Stbbdev __TBB_ASSERT(s.my_state.load(std::memory_order_relaxed) == STATE_ACTIVEREADER, "unlocked reader is active reader");
24851c0b2f7Stbbdev }
24951c0b2f7Stbbdev
25051c0b2f7Stbbdev ITT_NOTIFY(sync_acquired, s.my_mutex);
25151c0b2f7Stbbdev }
25251c0b2f7Stbbdev
25351c0b2f7Stbbdev //! A method to acquire queuing_rw_mutex if it is free
try_acquiretbb::detail::r1::queuing_rw_mutex_impl25451c0b2f7Stbbdev static bool try_acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write)
25551c0b2f7Stbbdev {
25651c0b2f7Stbbdev __TBB_ASSERT( !s.my_mutex, "scoped_lock is already holding a mutex");
25751c0b2f7Stbbdev
25851c0b2f7Stbbdev if( m.q_tail.load(std::memory_order_relaxed) )
25951c0b2f7Stbbdev return false; // Someone already took the lock
26051c0b2f7Stbbdev
26151c0b2f7Stbbdev // Must set all fields before the exchange, because once the
26251c0b2f7Stbbdev // exchange executes, *this becomes accessible to other threads.
26351c0b2f7Stbbdev s.my_prev.store(0U, std::memory_order_relaxed);
26451c0b2f7Stbbdev s.my_next.store(0U, std::memory_order_relaxed);
26551c0b2f7Stbbdev s.my_going.store(0U, std::memory_order_relaxed); // TODO: remove dead assignment?
26651c0b2f7Stbbdev s.my_state.store(d1::queuing_rw_mutex::scoped_lock::state_t(write ? STATE_WRITER : STATE_ACTIVEREADER), std::memory_order_relaxed);
26751c0b2f7Stbbdev s.my_internal_lock.store(RELEASED, std::memory_order_relaxed);
26851c0b2f7Stbbdev
26951c0b2f7Stbbdev // The CAS must have release semantics, because we are
27040a9a106SPavel // "sending" the fields initialized above to other actors.
27140a9a106SPavel // We need acquire semantics, because we are acquiring the mutex
27251c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* expected = nullptr;
27340a9a106SPavel if (!m.q_tail.compare_exchange_strong(expected, &s, std::memory_order_acq_rel))
27451c0b2f7Stbbdev return false; // Someone already took the lock
27551c0b2f7Stbbdev s.my_mutex = &m;
27651c0b2f7Stbbdev ITT_NOTIFY(sync_acquired, s.my_mutex);
27751c0b2f7Stbbdev return true;
27851c0b2f7Stbbdev }
27951c0b2f7Stbbdev
28051c0b2f7Stbbdev //! A method to release queuing_rw_mutex lock
releasetbb::detail::r1::queuing_rw_mutex_impl28151c0b2f7Stbbdev static void release(d1::queuing_rw_mutex::scoped_lock& s) {
28251c0b2f7Stbbdev __TBB_ASSERT(s.my_mutex!=nullptr, "no lock acquired");
28351c0b2f7Stbbdev
28451c0b2f7Stbbdev ITT_NOTIFY(sync_releasing, s.my_mutex);
28551c0b2f7Stbbdev
28651c0b2f7Stbbdev if( s.my_state.load(std::memory_order_relaxed) == STATE_WRITER ) { // Acquired for write
28751c0b2f7Stbbdev
28851c0b2f7Stbbdev // The logic below is the same as "writerUnlock", but elides
28951c0b2f7Stbbdev // "return" from the middle of the routine.
29051c0b2f7Stbbdev // In the statement below, acquire semantics of reading my_next is required
29151c0b2f7Stbbdev // so that following operations with fields of my_next are safe.
29251c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* next = tricky_pointer::load(s.my_next, std::memory_order_acquire);
29351c0b2f7Stbbdev if( !next ) {
29451c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* expected = &s;
29540a9a106SPavel // Release mutex on success otherwise wait for successor publication
29640a9a106SPavel if( s.my_mutex->q_tail.compare_exchange_strong(expected, nullptr,
29740a9a106SPavel std::memory_order_release, std::memory_order_relaxed) )
29840a9a106SPavel {
29951c0b2f7Stbbdev // this was the only item in the queue, and the queue is now empty.
30051c0b2f7Stbbdev goto done;
30151c0b2f7Stbbdev }
30240a9a106SPavel spin_wait_while_eq(s.my_next, 0U, std::memory_order_relaxed);
30351c0b2f7Stbbdev next = tricky_pointer::load(s.my_next, std::memory_order_acquire);
30451c0b2f7Stbbdev }
30551c0b2f7Stbbdev next->my_going.store(2U, std::memory_order_relaxed); // protect next queue node from being destroyed too early
30640a9a106SPavel // If the next is STATE_UPGRADE_WAITING, it is expected to acquire all other released readers via release
30740a9a106SPavel // sequence in next->my_state. In that case, we need to preserve release sequence in next->my_state
30840a9a106SPavel // contributed by other reader. So, there are two approaches not to break the release sequence:
30940a9a106SPavel // 1. Use read-modify-write (exchange) operation to store with release the UPGRADE_LOSER state;
31040a9a106SPavel // 2. Acquire the release sequence and store the sequence and UPGRADE_LOSER state.
31140a9a106SPavel // The second approach seems better on x86 because it does not involve interlocked operations.
31240a9a106SPavel // Therefore, we read next->my_state with acquire while it is not required for else branch to get the
31340a9a106SPavel // release sequence.
31440a9a106SPavel if( next->my_state.load(std::memory_order_acquire)==STATE_UPGRADE_WAITING ) {
31551c0b2f7Stbbdev // the next waiting for upgrade means this writer was upgraded before.
31651c0b2f7Stbbdev acquire_internal_lock(s);
31751c0b2f7Stbbdev // Responsibility transition, the one who reads uncorrupted my_prev will do release.
31840a9a106SPavel // Guarantee that above store of 2 into next->my_going happens-before resetting of next->my_prev
31951c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::exchange(next->my_prev, nullptr, std::memory_order_release);
32040a9a106SPavel // Pass the release sequence that we acquired with the above load of next->my_state.
32140a9a106SPavel next->my_state.store(STATE_UPGRADE_LOSER, std::memory_order_release);
32240a9a106SPavel // We are releasing the mutex
32351c0b2f7Stbbdev next->my_going.store(1U, std::memory_order_release);
32451c0b2f7Stbbdev unblock_or_wait_on_internal_lock(s, get_flag(tmp));
32551c0b2f7Stbbdev } else {
32651c0b2f7Stbbdev // next->state cannot be STATE_UPGRADE_REQUESTED
327478de5b1Stbbdev __TBB_ASSERT( next->my_state.load(std::memory_order_relaxed) & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
328478de5b1Stbbdev __TBB_ASSERT( !( next->my_prev.load(std::memory_order_relaxed) & FLAG ), "use of corrupted pointer!" );
32940a9a106SPavel // Guarantee that above store of 2 into next->my_going happens-before resetting of next->my_prev
33040a9a106SPavel tricky_pointer::store(next->my_prev, nullptr, std::memory_order_release);
33140a9a106SPavel // We are releasing the mutex
33251c0b2f7Stbbdev next->my_going.store(1U, std::memory_order_release);
33351c0b2f7Stbbdev }
33451c0b2f7Stbbdev
33551c0b2f7Stbbdev } else { // Acquired for read
33640a9a106SPavel // The basic idea it to build happens-before relation with left and right readers via prev and next. In addition,
33740a9a106SPavel // the first reader should acquire the left (prev) signal and propagate to right (next). To simplify, we always
33840a9a106SPavel // build happens-before relation between left and right (left is happened before right).
33951c0b2f7Stbbdev queuing_rw_mutex::scoped_lock *tmp = nullptr;
34051c0b2f7Stbbdev retry:
34151c0b2f7Stbbdev // Addition to the original paper: Mark my_prev as in use
34251c0b2f7Stbbdev queuing_rw_mutex::scoped_lock *predecessor = tricky_pointer::fetch_add(s.my_prev, FLAG, std::memory_order_acquire);
34351c0b2f7Stbbdev
34451c0b2f7Stbbdev if( predecessor ) {
34551c0b2f7Stbbdev if( !(try_acquire_internal_lock(*predecessor)) )
34651c0b2f7Stbbdev {
34751c0b2f7Stbbdev // Failed to acquire the lock on predecessor. The predecessor either unlinks or upgrades.
34851c0b2f7Stbbdev // In the second case, it could or could not know my "in use" flag - need to check
34951c0b2f7Stbbdev // Responsibility transition, the one who reads uncorrupted my_prev will do release.
35040a9a106SPavel tmp = tricky_pointer::compare_exchange_strong(s.my_prev, tricky_pointer(predecessor) | FLAG, predecessor, std::memory_order_acquire);
35151c0b2f7Stbbdev if( !(tricky_pointer(tmp) & FLAG) ) {
352478de5b1Stbbdev __TBB_ASSERT(tricky_pointer::load(s.my_prev, std::memory_order_relaxed) != (tricky_pointer(predecessor) | FLAG), nullptr);
35351c0b2f7Stbbdev // Now owner of predecessor is waiting for _us_ to release its lock
35451c0b2f7Stbbdev release_internal_lock(*predecessor);
35551c0b2f7Stbbdev }
35651c0b2f7Stbbdev // else the "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do
35751c0b2f7Stbbdev
35851c0b2f7Stbbdev tmp = nullptr;
35951c0b2f7Stbbdev goto retry;
36051c0b2f7Stbbdev }
36151c0b2f7Stbbdev __TBB_ASSERT(predecessor && predecessor->my_internal_lock.load(std::memory_order_relaxed)==ACQUIRED, "predecessor's lock is not acquired");
36251c0b2f7Stbbdev tricky_pointer::store(s.my_prev, predecessor, std::memory_order_relaxed);
36351c0b2f7Stbbdev acquire_internal_lock(s);
36451c0b2f7Stbbdev
36551c0b2f7Stbbdev tricky_pointer::store(predecessor->my_next, nullptr, std::memory_order_release);
36651c0b2f7Stbbdev
36751c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* expected = &s;
36840a9a106SPavel if( !tricky_pointer::load(s.my_next, std::memory_order_acquire) && !s.my_mutex->q_tail.compare_exchange_strong(expected, predecessor, std::memory_order_release) ) {
369478de5b1Stbbdev spin_wait_while_eq( s.my_next, 0U, std::memory_order_acquire );
37051c0b2f7Stbbdev }
371478de5b1Stbbdev __TBB_ASSERT( !(s.my_next.load(std::memory_order_relaxed) & FLAG), "use of corrupted pointer" );
37251c0b2f7Stbbdev
37340a9a106SPavel // my_next is acquired either with load or spin_wait.
37440a9a106SPavel if(d1::queuing_rw_mutex::scoped_lock *const l_next = tricky_pointer::load(s.my_next, std::memory_order_relaxed) ) { // I->next != nil, TODO: rename to next after clearing up and adapting the n in the comment two lines below
37551c0b2f7Stbbdev // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
37651c0b2f7Stbbdev tmp = tricky_pointer::exchange(l_next->my_prev, predecessor, std::memory_order_release);
37751c0b2f7Stbbdev // I->prev->next = I->next;
37851c0b2f7Stbbdev __TBB_ASSERT(tricky_pointer::load(s.my_prev, std::memory_order_relaxed)==predecessor, nullptr);
37951c0b2f7Stbbdev predecessor->my_next.store(s.my_next.load(std::memory_order_relaxed), std::memory_order_release);
38051c0b2f7Stbbdev }
38151c0b2f7Stbbdev // Safe to release in the order opposite to acquiring which makes the code simpler
38251c0b2f7Stbbdev release_internal_lock(*predecessor);
38351c0b2f7Stbbdev
38451c0b2f7Stbbdev } else { // No predecessor when we looked
38551c0b2f7Stbbdev acquire_internal_lock(s); // "exclusiveLock(&I->EL)"
38651c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* next = tricky_pointer::load(s.my_next, std::memory_order_acquire);
38751c0b2f7Stbbdev if( !next ) {
38851c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* expected = &s;
38940a9a106SPavel // Release mutex on success otherwise wait for successor publication
39040a9a106SPavel if( !s.my_mutex->q_tail.compare_exchange_strong(expected, nullptr,
39140a9a106SPavel std::memory_order_release, std::memory_order_relaxed) )
39240a9a106SPavel {
39340a9a106SPavel spin_wait_while_eq( s.my_next, 0U, std::memory_order_relaxed );
39440a9a106SPavel next = tricky_pointer::load(s.my_next, std::memory_order_acquire);
39551c0b2f7Stbbdev } else {
39651c0b2f7Stbbdev goto unlock_self;
39751c0b2f7Stbbdev }
39851c0b2f7Stbbdev }
39951c0b2f7Stbbdev next->my_going.store(2U, std::memory_order_relaxed);
40051c0b2f7Stbbdev // Responsibility transition, the one who reads uncorrupted my_prev will do release.
40151c0b2f7Stbbdev tmp = tricky_pointer::exchange(next->my_prev, nullptr, std::memory_order_release);
40251c0b2f7Stbbdev next->my_going.store(1U, std::memory_order_release);
40351c0b2f7Stbbdev }
40451c0b2f7Stbbdev unlock_self:
40551c0b2f7Stbbdev unblock_or_wait_on_internal_lock(s, get_flag(tmp));
40651c0b2f7Stbbdev }
40751c0b2f7Stbbdev done:
40840a9a106SPavel // Lifetime synchronization, no need to build happens-before relation
40940a9a106SPavel spin_wait_while_eq( s.my_going, 2U, std::memory_order_relaxed );
41051c0b2f7Stbbdev
41151c0b2f7Stbbdev s.initialize();
41251c0b2f7Stbbdev }
41351c0b2f7Stbbdev
downgrade_to_readertbb::detail::r1::queuing_rw_mutex_impl41451c0b2f7Stbbdev static bool downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock& s) {
41551c0b2f7Stbbdev if ( s.my_state.load(std::memory_order_relaxed) == STATE_ACTIVEREADER ) return true; // Already a reader
41651c0b2f7Stbbdev
41751c0b2f7Stbbdev ITT_NOTIFY(sync_releasing, s.my_mutex);
41840a9a106SPavel d1::queuing_rw_mutex::scoped_lock* next = tricky_pointer::load(s.my_next, std::memory_order_acquire);
41940a9a106SPavel if( !next ) {
42040a9a106SPavel s.my_state.store(STATE_READER, std::memory_order_seq_cst);
42151c0b2f7Stbbdev // the following load of q_tail must not be reordered with setting STATE_READER above
42240a9a106SPavel if( &s == s.my_mutex->q_tail.load(std::memory_order_seq_cst) ) {
42351c0b2f7Stbbdev unsigned char old_state = STATE_READER;
42440a9a106SPavel // When this reader is signaled by previous actor it acquires the mutex.
42540a9a106SPavel // We need to build happens-before relation with all other coming readers that will read our ACTIVEREADER
42640a9a106SPavel // without blocking on my_going. Therefore, we need to publish ACTIVEREADER with release semantics.
42740a9a106SPavel // On fail it is relaxed, because we will build happens-before on my_going.
42840a9a106SPavel s.my_state.compare_exchange_strong(old_state, STATE_ACTIVEREADER, std::memory_order_release, std::memory_order_relaxed);
42951c0b2f7Stbbdev if( old_state==STATE_READER )
43051c0b2f7Stbbdev return true; // Downgrade completed
43151c0b2f7Stbbdev }
43251c0b2f7Stbbdev /* wait for the next to register */
43340a9a106SPavel spin_wait_while_eq(s.my_next, 0U, std::memory_order_relaxed);
43440a9a106SPavel next = tricky_pointer::load(s.my_next, std::memory_order_acquire);
43551c0b2f7Stbbdev }
43640a9a106SPavel
43751c0b2f7Stbbdev __TBB_ASSERT( next, "still no successor at this point!" );
43840a9a106SPavel if( next->my_state.load(std::memory_order_relaxed) & STATE_COMBINED_WAITINGREADER )
43951c0b2f7Stbbdev next->my_going.store(1U, std::memory_order_release);
44040a9a106SPavel // If the next is STATE_UPGRADE_WAITING, it is expected to acquire all other released readers via release
44140a9a106SPavel // sequence in next->my_state. In that case, we need to preserve release sequence in next->my_state
44240a9a106SPavel // contributed by other reader. So, there are two approaches not to break the release sequence:
44340a9a106SPavel // 1. Use read-modify-write (exchange) operation to store with release the UPGRADE_LOSER state;
44440a9a106SPavel // 2. Acquire the release sequence and store the sequence and UPGRADE_LOSER state.
44540a9a106SPavel // The second approach seems better on x86 because it does not involve interlocked operations.
44640a9a106SPavel // Therefore, we read next->my_state with acquire while it is not required for else branch to get the
44740a9a106SPavel // release sequence.
44840a9a106SPavel else if( next->my_state.load(std::memory_order_acquire)==STATE_UPGRADE_WAITING )
44951c0b2f7Stbbdev // the next waiting for upgrade means this writer was upgraded before.
45040a9a106SPavel // To safe release sequence on next->my_state read it with acquire
45140a9a106SPavel next->my_state.store(STATE_UPGRADE_LOSER, std::memory_order_release);
45240a9a106SPavel s.my_state.store(STATE_ACTIVEREADER, std::memory_order_release);
45351c0b2f7Stbbdev return true;
45451c0b2f7Stbbdev }
45551c0b2f7Stbbdev
upgrade_to_writertbb::detail::r1::queuing_rw_mutex_impl45651c0b2f7Stbbdev static bool upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock& s) {
457478de5b1Stbbdev if (s.my_state.load(std::memory_order_relaxed) == STATE_WRITER) {
458478de5b1Stbbdev // Already a writer
459478de5b1Stbbdev return true;
460478de5b1Stbbdev }
46151c0b2f7Stbbdev
462478de5b1Stbbdev __TBB_ASSERT(s.my_state.load(std::memory_order_relaxed) == STATE_ACTIVEREADER, "only active reader can be updated");
46351c0b2f7Stbbdev
464478de5b1Stbbdev queuing_rw_mutex::scoped_lock* tmp{};
46551c0b2f7Stbbdev queuing_rw_mutex::scoped_lock* me = &s;
46651c0b2f7Stbbdev
46751c0b2f7Stbbdev ITT_NOTIFY(sync_releasing, s.my_mutex);
46840a9a106SPavel // Publish ourselves into my_state that other UPGRADE_WAITING actors can acquire our state.
46940a9a106SPavel s.my_state.store(STATE_UPGRADE_REQUESTED, std::memory_order_release);
47051c0b2f7Stbbdev requested:
471478de5b1Stbbdev __TBB_ASSERT( !(s.my_next.load(std::memory_order_relaxed) & FLAG), "use of corrupted pointer!" );
47251c0b2f7Stbbdev acquire_internal_lock(s);
47351c0b2f7Stbbdev d1::queuing_rw_mutex::scoped_lock* expected = &s;
47440a9a106SPavel if( !s.my_mutex->q_tail.compare_exchange_strong(expected, tricky_pointer(me)|FLAG, std::memory_order_acq_rel) ) {
47540a9a106SPavel spin_wait_while_eq( s.my_next, 0U, std::memory_order_relaxed );
47651c0b2f7Stbbdev queuing_rw_mutex::scoped_lock * next;
47751c0b2f7Stbbdev next = tricky_pointer::fetch_add(s.my_next, FLAG, std::memory_order_acquire);
47840a9a106SPavel // While we were READER the next READER might reach STATE_UPGRADE_WAITING state.
47940a9a106SPavel // Therefore, it did not build happens before relation with us and we need to acquire the
48040a9a106SPavel // next->my_state to build the happens before relation ourselves
48140a9a106SPavel unsigned short n_state = next->my_state.load(std::memory_order_acquire);
48251c0b2f7Stbbdev /* the next reader can be blocked by our state. the best thing to do is to unblock it */
48351c0b2f7Stbbdev if( n_state & STATE_COMBINED_WAITINGREADER )
48451c0b2f7Stbbdev next->my_going.store(1U, std::memory_order_release);
48551c0b2f7Stbbdev // Responsibility transition, the one who reads uncorrupted my_prev will do release.
48651c0b2f7Stbbdev tmp = tricky_pointer::exchange(next->my_prev, &s, std::memory_order_release);
48751c0b2f7Stbbdev unblock_or_wait_on_internal_lock(s, get_flag(tmp));
48851c0b2f7Stbbdev if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) {
48951c0b2f7Stbbdev // save next|FLAG for simplicity of following comparisons
49051c0b2f7Stbbdev tmp = tricky_pointer(next)|FLAG;
49151c0b2f7Stbbdev for( atomic_backoff b; tricky_pointer::load(s.my_next, std::memory_order_relaxed)==tmp; b.pause() ) {
49240a9a106SPavel if( s.my_state.load(std::memory_order_acquire) & STATE_COMBINED_UPGRADING ) {
49351c0b2f7Stbbdev if( tricky_pointer::load(s.my_next, std::memory_order_acquire)==tmp )
49451c0b2f7Stbbdev tricky_pointer::store(s.my_next, next, std::memory_order_relaxed);
49551c0b2f7Stbbdev goto waiting;
49651c0b2f7Stbbdev }
49751c0b2f7Stbbdev }
49851c0b2f7Stbbdev __TBB_ASSERT(tricky_pointer::load(s.my_next, std::memory_order_relaxed) != (tricky_pointer(next)|FLAG), nullptr);
49951c0b2f7Stbbdev goto requested;
50051c0b2f7Stbbdev } else {
50151c0b2f7Stbbdev __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state");
50251c0b2f7Stbbdev __TBB_ASSERT( (tricky_pointer(next)|FLAG) == tricky_pointer::load(s.my_next, std::memory_order_relaxed), nullptr);
50351c0b2f7Stbbdev tricky_pointer::store(s.my_next, next, std::memory_order_relaxed);
50451c0b2f7Stbbdev }
50551c0b2f7Stbbdev } else {
50651c0b2f7Stbbdev /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */
50751c0b2f7Stbbdev release_internal_lock(s);
50851c0b2f7Stbbdev } // if( this != my_mutex->q_tail... )
50951c0b2f7Stbbdev {
51051c0b2f7Stbbdev unsigned char old_state = STATE_UPGRADE_REQUESTED;
51140a9a106SPavel // If we reach STATE_UPGRADE_WAITING state we do not build happens-before relation with READER on
51240a9a106SPavel // left. We delegate this responsibility to READER on left when it try upgrading. Therefore, we are releasing
51340a9a106SPavel // on success.
51440a9a106SPavel // Otherwise, on fail, we already acquired the next->my_state.
51540a9a106SPavel s.my_state.compare_exchange_strong(old_state, STATE_UPGRADE_WAITING, std::memory_order_release, std::memory_order_relaxed);
51651c0b2f7Stbbdev }
51751c0b2f7Stbbdev waiting:
51851c0b2f7Stbbdev __TBB_ASSERT( !( s.my_next.load(std::memory_order_relaxed) & FLAG ), "use of corrupted pointer!" );
51951c0b2f7Stbbdev __TBB_ASSERT( s.my_state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" );
52051c0b2f7Stbbdev __TBB_ASSERT( me==&s, nullptr );
52151c0b2f7Stbbdev ITT_NOTIFY(sync_prepare, s.my_mutex);
52251c0b2f7Stbbdev /* if no one was blocked by the "corrupted" q_tail, turn it back */
52351c0b2f7Stbbdev expected = tricky_pointer(me)|FLAG;
52451c0b2f7Stbbdev s.my_mutex->q_tail.compare_exchange_strong(expected, &s, std::memory_order_release);
52551c0b2f7Stbbdev queuing_rw_mutex::scoped_lock * predecessor;
52651c0b2f7Stbbdev // Mark my_prev as 'in use' to prevent predecessor from releasing
52751c0b2f7Stbbdev predecessor = tricky_pointer::fetch_add(s.my_prev, FLAG, std::memory_order_acquire);
52851c0b2f7Stbbdev if( predecessor ) {
52951c0b2f7Stbbdev bool success = try_acquire_internal_lock(*predecessor);
53051c0b2f7Stbbdev {
53151c0b2f7Stbbdev // While the predecessor pointer (my_prev) is in use (FLAG is set), we can safely update the node`s state.
53251c0b2f7Stbbdev // Corrupted pointer transitions responsibility to release the predecessor`s node on us.
53351c0b2f7Stbbdev unsigned char old_state = STATE_UPGRADE_REQUESTED;
53440a9a106SPavel // Try to build happens before with the upgrading READER on left. If fail, the predecessor state is not
53540a9a106SPavel // important for us because it will acquire our state.
53640a9a106SPavel predecessor->my_state.compare_exchange_strong(old_state, STATE_UPGRADE_WAITING, std::memory_order_release,
53740a9a106SPavel std::memory_order_relaxed);
53851c0b2f7Stbbdev }
53951c0b2f7Stbbdev if( !success ) {
54051c0b2f7Stbbdev // Responsibility transition, the one who reads uncorrupted my_prev will do release.
54140a9a106SPavel tmp = tricky_pointer::compare_exchange_strong(s.my_prev, tricky_pointer(predecessor)|FLAG, predecessor, std::memory_order_acquire);
54251c0b2f7Stbbdev if( tricky_pointer(tmp) & FLAG ) {
54351c0b2f7Stbbdev tricky_pointer::spin_wait_while_eq(s.my_prev, predecessor);
54451c0b2f7Stbbdev predecessor = tricky_pointer::load(s.my_prev, std::memory_order_relaxed);
54551c0b2f7Stbbdev } else {
54651c0b2f7Stbbdev // TODO: spin_wait condition seems never reachable
54751c0b2f7Stbbdev tricky_pointer::spin_wait_while_eq(s.my_prev, tricky_pointer(predecessor)|FLAG);
54851c0b2f7Stbbdev release_internal_lock(*predecessor);
54951c0b2f7Stbbdev }
55051c0b2f7Stbbdev } else {
55151c0b2f7Stbbdev tricky_pointer::store(s.my_prev, predecessor, std::memory_order_relaxed);
55251c0b2f7Stbbdev release_internal_lock(*predecessor);
55351c0b2f7Stbbdev tricky_pointer::spin_wait_while_eq(s.my_prev, predecessor);
55451c0b2f7Stbbdev predecessor = tricky_pointer::load(s.my_prev, std::memory_order_relaxed);
55551c0b2f7Stbbdev }
55651c0b2f7Stbbdev if( predecessor )
55751c0b2f7Stbbdev goto waiting;
55851c0b2f7Stbbdev } else {
55951c0b2f7Stbbdev tricky_pointer::store(s.my_prev, nullptr, std::memory_order_relaxed);
56051c0b2f7Stbbdev }
56151c0b2f7Stbbdev __TBB_ASSERT( !predecessor && !s.my_prev, nullptr );
56251c0b2f7Stbbdev
56351c0b2f7Stbbdev // additional lifetime issue prevention checks
56451c0b2f7Stbbdev // wait for the successor to finish working with my fields
56551c0b2f7Stbbdev wait_for_release_of_internal_lock(s);
56651c0b2f7Stbbdev // now wait for the predecessor to finish working with my fields
56751c0b2f7Stbbdev spin_wait_while_eq( s.my_going, 2U );
56851c0b2f7Stbbdev
56951c0b2f7Stbbdev bool result = ( s.my_state != STATE_UPGRADE_LOSER );
57051c0b2f7Stbbdev s.my_state.store(STATE_WRITER, std::memory_order_relaxed);
57151c0b2f7Stbbdev s.my_going.store(1U, std::memory_order_relaxed);
57251c0b2f7Stbbdev
57351c0b2f7Stbbdev ITT_NOTIFY(sync_acquired, s.my_mutex);
57451c0b2f7Stbbdev return result;
57551c0b2f7Stbbdev }
57651c0b2f7Stbbdev
is_writertbb::detail::r1::queuing_rw_mutex_impl5779e15720bStbbdev static bool is_writer(const d1::queuing_rw_mutex::scoped_lock& m) {
5789e15720bStbbdev return m.my_state.load(std::memory_order_relaxed) == STATE_WRITER;
5799e15720bStbbdev }
5809e15720bStbbdev
constructtbb::detail::r1::queuing_rw_mutex_impl58151c0b2f7Stbbdev static void construct(d1::queuing_rw_mutex& m) {
58251c0b2f7Stbbdev suppress_unused_warning(m);
58351c0b2f7Stbbdev ITT_SYNC_CREATE(&m, _T("tbb::queuing_rw_mutex"), _T(""));
58451c0b2f7Stbbdev }
58551c0b2f7Stbbdev };
58651c0b2f7Stbbdev
acquire(d1::queuing_rw_mutex & m,d1::queuing_rw_mutex::scoped_lock & s,bool write)58751c0b2f7Stbbdev void __TBB_EXPORTED_FUNC acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write) {
58851c0b2f7Stbbdev queuing_rw_mutex_impl::acquire(m, s, write);
58951c0b2f7Stbbdev }
59051c0b2f7Stbbdev
try_acquire(d1::queuing_rw_mutex & m,d1::queuing_rw_mutex::scoped_lock & s,bool write)59151c0b2f7Stbbdev bool __TBB_EXPORTED_FUNC try_acquire(d1::queuing_rw_mutex& m, d1::queuing_rw_mutex::scoped_lock& s, bool write) {
59251c0b2f7Stbbdev return queuing_rw_mutex_impl::try_acquire(m, s, write);
59351c0b2f7Stbbdev }
59451c0b2f7Stbbdev
release(d1::queuing_rw_mutex::scoped_lock & s)59551c0b2f7Stbbdev void __TBB_EXPORTED_FUNC release(d1::queuing_rw_mutex::scoped_lock& s) {
59651c0b2f7Stbbdev queuing_rw_mutex_impl::release(s);
59751c0b2f7Stbbdev }
59851c0b2f7Stbbdev
upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock & s)59951c0b2f7Stbbdev bool __TBB_EXPORTED_FUNC upgrade_to_writer(d1::queuing_rw_mutex::scoped_lock& s) {
60051c0b2f7Stbbdev return queuing_rw_mutex_impl::upgrade_to_writer(s);
60151c0b2f7Stbbdev }
60251c0b2f7Stbbdev
is_writer(const d1::queuing_rw_mutex::scoped_lock & s)6039e15720bStbbdev bool __TBB_EXPORTED_FUNC is_writer(const d1::queuing_rw_mutex::scoped_lock& s) {
6049e15720bStbbdev return queuing_rw_mutex_impl::is_writer(s);
6059e15720bStbbdev }
6069e15720bStbbdev
downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock & s)60751c0b2f7Stbbdev bool __TBB_EXPORTED_FUNC downgrade_to_reader(d1::queuing_rw_mutex::scoped_lock& s) {
60851c0b2f7Stbbdev return queuing_rw_mutex_impl::downgrade_to_reader(s);
60951c0b2f7Stbbdev }
61051c0b2f7Stbbdev
construct(d1::queuing_rw_mutex & m)61151c0b2f7Stbbdev void __TBB_EXPORTED_FUNC construct(d1::queuing_rw_mutex& m) {
61251c0b2f7Stbbdev queuing_rw_mutex_impl::construct(m);
61351c0b2f7Stbbdev }
61451c0b2f7Stbbdev
61551c0b2f7Stbbdev } // namespace r1
61651c0b2f7Stbbdev } // namespace detail
61751c0b2f7Stbbdev } // namespace tbb
618