xref: /oneTBB/test/tbb/test_mutex.cpp (revision 4523a761)
1 /*
2     Copyright (c) 2005-2021 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #include "test_mutex.h"
18 
19 #include <tbb/spin_mutex.h>
20 #include "oneapi/tbb/mutex.h"
21 #include <tbb/spin_rw_mutex.h>
22 #include "oneapi/tbb/rw_mutex.h"
23 #include <tbb/queuing_mutex.h>
24 #include <tbb/queuing_rw_mutex.h>
25 #include <tbb/parallel_for.h>
26 #include <oneapi/tbb/detail/_utils.h>
27 #include <oneapi/tbb/detail/_machine.h>
28 
29 //! \file test_mutex.cpp
30 //! \brief Test for [mutex.spin_mutex mutex.spin_rw_mutex mutex.queuing_mutex mutex.queuing_rw_mutexmutex.speculative_spin_mutex mutex.speculative_spin_rw_mutex] specifications
31 
32 // TODO: Investigate why RTM doesn't work on some macOS.
33 #if __TBB_TSX_INTRINSICS_PRESENT && !__APPLE__
34 
35 inline static bool IsInsideTx() {
36     return _xtest() != 0;
37 }
38 
39 bool have_TSX() {
40     bool result = false;
41     const int rtm_ebx_mask = 1 << 11;
42 #if _MSC_VER
43     int info[4] = { 0,0,0,0 };
44     const int reg_ebx = 1;
45     __cpuidex(info, 7, 0);
46     result = (info[reg_ebx] & rtm_ebx_mask) != 0;
47 #elif __GNUC__ || __SUNPRO_CC
48     int32_t reg_ebx = 0;
49     int32_t reg_eax = 7;
50     int32_t reg_ecx = 0;
51     __asm__ __volatile__("movl %%ebx, %%esi\n"
52         "cpuid\n"
53         "movl %%ebx, %0\n"
54         "movl %%esi, %%ebx\n"
55         : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi",
56 #if __TBB_x86_64
57         "ebx",
58 #endif
59         "edx"
60     );
61     result = (reg_ebx & rtm_ebx_mask) != 0;
62 #endif
63     return result;
64 }
65 
66 //! Function object for use with parallel_for.h to see if a transaction is actually attempted.
67 std::atomic<std::size_t> n_transactions_attempted;
68 template<typename C>
69 struct AddOne_CheckTransaction {
70 
71     AddOne_CheckTransaction& operator=(const AddOne_CheckTransaction&) = delete;
72     AddOne_CheckTransaction(const AddOne_CheckTransaction&) = default;
73     AddOne_CheckTransaction() = default;
74 
75     C& counter;
76     /** Increments counter once for each iteration in the iteration space. */
77     void operator()(tbb::blocked_range<size_t>& range) const {
78         for (std::size_t i = range.begin(); i != range.end(); ++i) {
79             bool transaction_attempted = false;
80             {
81                 typename C::mutex_type::scoped_lock lock(counter.mutex);
82                 if (IsInsideTx()) transaction_attempted = true;
83                 counter.value = counter.value + 1;
84             }
85             if (transaction_attempted) ++n_transactions_attempted;
86             tbb::detail::machine_pause(static_cast<int32_t>(i));
87         }
88     }
89     AddOne_CheckTransaction(C& counter_) : counter(counter_) {}
90 };
91 
92 /* TestTransaction() checks if a speculative mutex actually uses transactions. */
93 template<typename M>
94 void TestTransaction(const char* name)
95 {
96     utils::Counter<M> counter;
97     constexpr int n = 550;
98 
99     n_transactions_attempted = 0;
100     for (int i = 0; i < 5 && n_transactions_attempted.load(std::memory_order_relaxed) == 0; ++i) {
101         counter.value = 0;
102         tbb::parallel_for(tbb::blocked_range<std::size_t>(0, n, 2), AddOne_CheckTransaction<utils::Counter<M>>(counter));
103         REQUIRE(counter.value == n);
104     }
105     REQUIRE_MESSAGE(n_transactions_attempted.load(std::memory_order_relaxed), "ERROR for " << name << ": transactions were never attempted");
106 }
107 
108 
109 //! \brief \ref error_guessing
110 TEST_CASE("Transaction test") {
111     if (have_TSX()) {
112         TestTransaction<tbb::speculative_spin_mutex>("Speculative Spin Mutex");
113         TestTransaction<tbb::speculative_spin_rw_mutex>("Speculative Spin RW Mutex");
114     }
115 }
116 #endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */
117 
118 //! \brief \ref error_guessing
119 TEST_CASE("test upgrade/downgrade with spin_rw_mutex") {
120     test_rwm_upgrade_downgrade<tbb::spin_rw_mutex>();
121 }
122 
123 //! \brief \ref error_guessing
124 TEST_CASE("test upgrade/downgrade with queueing_rw_mutex") {
125     test_rwm_upgrade_downgrade<tbb::queuing_rw_mutex>();
126 }
127 
128 //! \brief \ref error_guessing
129 TEST_CASE("test upgrade/downgrade with speculative_spin_rw_mutex") {
130     test_rwm_upgrade_downgrade<tbb::speculative_spin_rw_mutex>();
131 }
132 
133 //! \brief \ref error_guessing
134 TEST_CASE("test spin_mutex with native threads") {
135     test_with_native_threads::test<tbb::spin_mutex>();
136 }
137 
138 //! \brief \ref error_guessing
139 TEST_CASE("test queuing_mutex with native threads") {
140     test_with_native_threads::test<tbb::queuing_mutex>();
141 }
142 
143 //! \brief \ref error_guessing
144 TEST_CASE("test spin_rw_mutex with native threads") {
145     test_with_native_threads::test<tbb::spin_rw_mutex>();
146     test_with_native_threads::test_rw<tbb::spin_rw_mutex>();
147 }
148 
149 //! \brief \ref error_guessing
150 TEST_CASE("test queuing_rw_mutex with native threads") {
151     test_with_native_threads::test<tbb::queuing_rw_mutex>();
152     test_with_native_threads::test_rw<tbb::queuing_rw_mutex>();
153 }
154