1 /* 2 Copyright (c) 2005-2021 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #define TBB_PREVIEW_MUTEXES 1 18 #include "test_mutex.h" 19 20 #include <tbb/spin_mutex.h> 21 #include "oneapi/tbb/mutex.h" 22 #include <tbb/spin_rw_mutex.h> 23 #include "oneapi/tbb/rw_mutex.h" 24 #include <tbb/queuing_mutex.h> 25 #include <tbb/queuing_rw_mutex.h> 26 #include <tbb/null_mutex.h> 27 #include <tbb/null_rw_mutex.h> 28 #include <tbb/parallel_for.h> 29 #include <oneapi/tbb/detail/_utils.h> 30 #include <oneapi/tbb/detail/_machine.h> 31 32 //! \file test_mutex.cpp 33 //! \brief Test for [mutex.spin_mutex mutex.spin_rw_mutex mutex.queuing_mutex mutex.queuing_rw_mutexmutex.speculative_spin_mutex mutex.speculative_spin_rw_mutex] specifications 34 35 // TODO: Investigate why RTM doesn't work on some macOS. 36 #if __TBB_TSX_INTRINSICS_PRESENT && !__APPLE__ 37 38 inline static bool IsInsideTx() { 39 return _xtest() != 0; 40 } 41 42 bool have_TSX() { 43 bool result = false; 44 const int rtm_ebx_mask = 1 << 11; 45 #if _MSC_VER 46 int info[4] = { 0,0,0,0 }; 47 const int reg_ebx = 1; 48 __cpuidex(info, 7, 0); 49 result = (info[reg_ebx] & rtm_ebx_mask) != 0; 50 #elif __GNUC__ || __SUNPRO_CC 51 int32_t reg_ebx = 0; 52 int32_t reg_eax = 7; 53 int32_t reg_ecx = 0; 54 __asm__ __volatile__("movl %%ebx, %%esi\n" 55 "cpuid\n" 56 "movl %%ebx, %0\n" 57 "movl %%esi, %%ebx\n" 58 : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi", 59 #if __TBB_x86_64 60 "ebx", 61 #endif 62 "edx" 63 ); 64 result = (reg_ebx & rtm_ebx_mask) != 0; 65 #endif 66 return result; 67 } 68 69 //! Function object for use with parallel_for.h to see if a transaction is actually attempted. 70 std::atomic<std::size_t> n_transactions_attempted; 71 template<typename C> 72 struct AddOne_CheckTransaction { 73 74 AddOne_CheckTransaction& operator=(const AddOne_CheckTransaction&) = delete; 75 AddOne_CheckTransaction(const AddOne_CheckTransaction&) = default; 76 AddOne_CheckTransaction() = default; 77 78 C& counter; 79 /** Increments counter once for each iteration in the iteration space. */ 80 void operator()(tbb::blocked_range<size_t>& range) const { 81 for (std::size_t i = range.begin(); i != range.end(); ++i) { 82 bool transaction_attempted = false; 83 { 84 typename C::mutex_type::scoped_lock lock(counter.mutex); 85 if (IsInsideTx()) transaction_attempted = true; 86 counter.value = counter.value + 1; 87 } 88 if (transaction_attempted) ++n_transactions_attempted; 89 tbb::detail::machine_pause(static_cast<int32_t>(i)); 90 } 91 } 92 AddOne_CheckTransaction(C& counter_) : counter(counter_) {} 93 }; 94 95 /* TestTransaction() checks if a speculative mutex actually uses transactions. */ 96 template<typename M> 97 void TestTransaction(const char* name) 98 { 99 utils::Counter<M> counter; 100 constexpr int n = 550; 101 102 n_transactions_attempted = 0; 103 for (int i = 0; i < 5 && n_transactions_attempted.load(std::memory_order_relaxed) == 0; ++i) { 104 counter.value = 0; 105 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, n, 2), AddOne_CheckTransaction<utils::Counter<M>>(counter)); 106 REQUIRE(counter.value == n); 107 } 108 REQUIRE_MESSAGE(n_transactions_attempted.load(std::memory_order_relaxed), "ERROR for " << name << ": transactions were never attempted"); 109 } 110 111 112 //! \brief \ref error_guessing 113 TEST_CASE("Transaction test") { 114 if (have_TSX()) { 115 TestTransaction<tbb::speculative_spin_mutex>("Speculative Spin Mutex"); 116 TestTransaction<tbb::speculative_spin_rw_mutex>("Speculative Spin RW Mutex"); 117 } 118 } 119 #endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */ 120 121 //! \brief \ref error_guessing 122 TEST_CASE("test upgrade/downgrade with spin_rw_mutex") { 123 test_rwm_upgrade_downgrade<tbb::spin_rw_mutex>(); 124 } 125 126 //! \brief \ref error_guessing 127 TEST_CASE("test upgrade/downgrade with queueing_rw_mutex") { 128 test_rwm_upgrade_downgrade<tbb::queuing_rw_mutex>(); 129 } 130 131 //! \brief \ref error_guessing 132 TEST_CASE("test upgrade/downgrade with speculative_spin_rw_mutex") { 133 test_rwm_upgrade_downgrade<tbb::speculative_spin_rw_mutex>(); 134 } 135 136 //! \brief \ref error_guessing 137 TEST_CASE("test spin_mutex with native threads") { 138 test_with_native_threads::test<tbb::spin_mutex>(); 139 } 140 141 //! \brief \ref error_guessing 142 TEST_CASE("test queuing_mutex with native threads") { 143 test_with_native_threads::test<tbb::queuing_mutex>(); 144 } 145 146 //! \brief \ref error_guessing 147 TEST_CASE("test spin_rw_mutex with native threads") { 148 test_with_native_threads::test<tbb::spin_rw_mutex>(); 149 test_with_native_threads::test_rw<tbb::spin_rw_mutex>(); 150 } 151 152 //! \brief \ref error_guessing 153 TEST_CASE("test queuing_rw_mutex with native threads") { 154 test_with_native_threads::test<tbb::queuing_rw_mutex>(); 155 test_with_native_threads::test_rw<tbb::queuing_rw_mutex>(); 156 } 157 158 //! Test scoped_lock::is_writer getter 159 //! \brief \ref error_guessing 160 TEST_CASE("scoped_lock::is_writer") { 161 TestIsWriter<oneapi::tbb::spin_rw_mutex>("spin_rw_mutex"); 162 TestIsWriter<oneapi::tbb::queuing_rw_mutex>("queuing_rw_mutex"); 163 TestIsWriter<oneapi::tbb::speculative_spin_rw_mutex>("speculative_spin_rw_mutex"); 164 TestIsWriter<oneapi::tbb::null_rw_mutex>("null_rw_mutex"); 165 TestIsWriter<oneapi::tbb::rw_mutex>("rw_mutex"); 166 } 167 168 #if __TBB_CPP20_CONCEPTS_PRESENT 169 template <typename... Args> 170 concept mutexes = (... && tbb::detail::scoped_lockable<Args>); 171 172 template <typename... Args> 173 concept rw_mutexes = (... && tbb::detail::rw_scoped_lockable<Args>); 174 175 //! \brief \ref error_guessing 176 TEST_CASE("internal mutex concepts") { 177 static_assert(mutexes<tbb::spin_mutex, tbb::speculative_spin_mutex, tbb::null_mutex, tbb::queuing_mutex, 178 tbb::spin_rw_mutex, tbb::speculative_spin_rw_mutex, tbb::null_rw_mutex, tbb::queuing_rw_mutex>); 179 static_assert(rw_mutexes<tbb::spin_rw_mutex, tbb::speculative_spin_rw_mutex, 180 tbb::null_rw_mutex, tbb::queuing_rw_mutex>); 181 } 182 #endif // __TBB_CPP20_CONCEPTS_PRESENT 183