1 /* 2 Copyright (c) 2005-2021 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #define TBB_PREVIEW_MUTEXES 1 18 #include "test_mutex.h" 19 20 #include <tbb/spin_mutex.h> 21 #include "oneapi/tbb/mutex.h" 22 #include <tbb/spin_rw_mutex.h> 23 #include "oneapi/tbb/rw_mutex.h" 24 #include <tbb/queuing_mutex.h> 25 #include <tbb/queuing_rw_mutex.h> 26 #include <tbb/parallel_for.h> 27 #include <oneapi/tbb/detail/_utils.h> 28 #include <oneapi/tbb/detail/_machine.h> 29 30 //! \file test_mutex.cpp 31 //! \brief Test for [mutex.spin_mutex mutex.spin_rw_mutex mutex.queuing_mutex mutex.queuing_rw_mutexmutex.speculative_spin_mutex mutex.speculative_spin_rw_mutex] specifications 32 33 // TODO: Investigate why RTM doesn't work on some macOS. 34 #if __TBB_TSX_INTRINSICS_PRESENT && !__APPLE__ 35 36 inline static bool IsInsideTx() { 37 return _xtest() != 0; 38 } 39 40 bool have_TSX() { 41 bool result = false; 42 const int rtm_ebx_mask = 1 << 11; 43 #if _MSC_VER 44 int info[4] = { 0,0,0,0 }; 45 const int reg_ebx = 1; 46 __cpuidex(info, 7, 0); 47 result = (info[reg_ebx] & rtm_ebx_mask) != 0; 48 #elif __GNUC__ || __SUNPRO_CC 49 int32_t reg_ebx = 0; 50 int32_t reg_eax = 7; 51 int32_t reg_ecx = 0; 52 __asm__ __volatile__("movl %%ebx, %%esi\n" 53 "cpuid\n" 54 "movl %%ebx, %0\n" 55 "movl %%esi, %%ebx\n" 56 : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi", 57 #if __TBB_x86_64 58 "ebx", 59 #endif 60 "edx" 61 ); 62 result = (reg_ebx & rtm_ebx_mask) != 0; 63 #endif 64 return result; 65 } 66 67 //! Function object for use with parallel_for.h to see if a transaction is actually attempted. 68 std::atomic<std::size_t> n_transactions_attempted; 69 template<typename C> 70 struct AddOne_CheckTransaction { 71 72 AddOne_CheckTransaction& operator=(const AddOne_CheckTransaction&) = delete; 73 AddOne_CheckTransaction(const AddOne_CheckTransaction&) = default; 74 AddOne_CheckTransaction() = default; 75 76 C& counter; 77 /** Increments counter once for each iteration in the iteration space. */ 78 void operator()(tbb::blocked_range<size_t>& range) const { 79 for (std::size_t i = range.begin(); i != range.end(); ++i) { 80 bool transaction_attempted = false; 81 { 82 typename C::mutex_type::scoped_lock lock(counter.mutex); 83 if (IsInsideTx()) transaction_attempted = true; 84 counter.value = counter.value + 1; 85 } 86 if (transaction_attempted) ++n_transactions_attempted; 87 tbb::detail::machine_pause(static_cast<int32_t>(i)); 88 } 89 } 90 AddOne_CheckTransaction(C& counter_) : counter(counter_) {} 91 }; 92 93 /* TestTransaction() checks if a speculative mutex actually uses transactions. */ 94 template<typename M> 95 void TestTransaction(const char* name) 96 { 97 utils::Counter<M> counter; 98 constexpr int n = 550; 99 100 n_transactions_attempted = 0; 101 for (int i = 0; i < 5 && n_transactions_attempted.load(std::memory_order_relaxed) == 0; ++i) { 102 counter.value = 0; 103 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, n, 2), AddOne_CheckTransaction<utils::Counter<M>>(counter)); 104 REQUIRE(counter.value == n); 105 } 106 REQUIRE_MESSAGE(n_transactions_attempted.load(std::memory_order_relaxed), "ERROR for " << name << ": transactions were never attempted"); 107 } 108 109 110 //! \brief \ref error_guessing 111 TEST_CASE("Transaction test") { 112 if (have_TSX()) { 113 TestTransaction<tbb::speculative_spin_mutex>("Speculative Spin Mutex"); 114 TestTransaction<tbb::speculative_spin_rw_mutex>("Speculative Spin RW Mutex"); 115 } 116 } 117 #endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */ 118 119 //! \brief \ref error_guessing 120 TEST_CASE("test upgrade/downgrade with spin_rw_mutex") { 121 test_rwm_upgrade_downgrade<tbb::spin_rw_mutex>(); 122 } 123 124 //! \brief \ref error_guessing 125 TEST_CASE("test upgrade/downgrade with queueing_rw_mutex") { 126 test_rwm_upgrade_downgrade<tbb::queuing_rw_mutex>(); 127 } 128 129 //! \brief \ref error_guessing 130 TEST_CASE("test upgrade/downgrade with speculative_spin_rw_mutex") { 131 test_rwm_upgrade_downgrade<tbb::speculative_spin_rw_mutex>(); 132 } 133 134 //! \brief \ref error_guessing 135 TEST_CASE("test spin_mutex with native threads") { 136 test_with_native_threads::test<tbb::spin_mutex>(); 137 } 138 139 //! \brief \ref error_guessing 140 TEST_CASE("test queuing_mutex with native threads") { 141 test_with_native_threads::test<tbb::queuing_mutex>(); 142 } 143 144 //! \brief \ref error_guessing 145 TEST_CASE("test spin_rw_mutex with native threads") { 146 test_with_native_threads::test<tbb::spin_rw_mutex>(); 147 test_with_native_threads::test_rw<tbb::spin_rw_mutex>(); 148 } 149 150 //! \brief \ref error_guessing 151 TEST_CASE("test queuing_rw_mutex with native threads") { 152 test_with_native_threads::test<tbb::queuing_rw_mutex>(); 153 test_with_native_threads::test_rw<tbb::queuing_rw_mutex>(); 154 } 155 156 //! Test scoped_lock::is_writer getter 157 //! \brief \ref error_guessing 158 TEST_CASE("scoped_lock::is_writer") { 159 TestIsWriter<oneapi::tbb::spin_rw_mutex>("spin_rw_mutex"); 160 TestIsWriter<oneapi::tbb::queuing_rw_mutex>("queuing_rw_mutex"); 161 TestIsWriter<oneapi::tbb::speculative_spin_rw_mutex>("speculative_spin_rw_mutex"); 162 TestIsWriter<oneapi::tbb::null_rw_mutex>("null_rw_mutex"); 163 TestIsWriter<oneapi::tbb::rw_mutex>("rw_mutex"); 164 } 165