xref: /oneTBB/test/tbb/test_mutex.cpp (revision d86ed7fb)
1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #include "common/test.h"
18 #include "common/utils.h"
19 #include "common/utils_concurrency_limit.h"
20 #include "common/config.h"
21 #include "common/rwm_upgrade_downgrade.h"
22 
23 #include <tbb/spin_mutex.h>
24 #include <tbb/spin_rw_mutex.h>
25 #include <tbb/queuing_mutex.h>
26 #include <tbb/queuing_rw_mutex.h>
27 #include <tbb/parallel_for.h>
28 #include <oneapi/tbb/detail/_utils.h>
29 #include <oneapi/tbb/detail/_machine.h>
30 
31 #include <atomic>
32 
33 //! \file test_mutex.cpp
34 //! \brief Test for [mutex.spin_mutex mutex.spin_rw_mutex mutex.queuing_mutex mutex.queuing_rw_mutexmutex.speculative_spin_mutex mutex.speculative_spin_rw_mutex] specifications
35 
36 template<typename M>
37 struct Counter {
38     typedef M mutex_type;
39     M mutex;
40     volatile long value;
41 };
42 
43 // TODO: Investigate why RTM doesn't work on some macOS.
44 #if __TBB_TSX_INTRINSICS_PRESENT && !__APPLE__
45 
46 inline static bool IsInsideTx() {
47     return _xtest() != 0;
48 }
49 
50 bool have_TSX() {
51     bool result = false;
52     const int rtm_ebx_mask = 1<<11;
53 #if _MSC_VER
54     int info[4] = {0,0,0,0};
55     const int reg_ebx = 1;
56     __cpuidex(info, 7, 0);
57     result = (info[reg_ebx] & rtm_ebx_mask)!=0;
58 #elif __GNUC__ || __SUNPRO_CC
59     int32_t reg_ebx = 0;
60     int32_t reg_eax = 7;
61     int32_t reg_ecx = 0;
62     __asm__ __volatile__ ( "movl %%ebx, %%esi\n"
63                            "cpuid\n"
64                            "movl %%ebx, %0\n"
65                            "movl %%esi, %%ebx\n"
66                            : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi",
67 #if __TBB_x86_64
68                            "ebx",
69 #endif
70                            "edx"
71                            );
72     result = (reg_ebx & rtm_ebx_mask)!=0 ;
73 #endif
74     return result;
75 }
76 
77 //! Function object for use with parallel_for.h to see if a transaction is actually attempted.
78 std::atomic<std::size_t> n_transactions_attempted;
79 template<typename C>
80 struct AddOne_CheckTransaction {
81 
82     AddOne_CheckTransaction& operator=(const AddOne_CheckTransaction&) = delete;
83     AddOne_CheckTransaction(const AddOne_CheckTransaction&) = default;
84     AddOne_CheckTransaction() = default;
85 
86     C& counter;
87     /** Increments counter once for each iteration in the iteration space. */
88     void operator()(tbb::blocked_range<size_t>& range) const {
89         for (std::size_t i = range.begin(); i != range.end(); ++i) {
90             bool transaction_attempted = false;
91             {
92               typename C::mutex_type::scoped_lock lock(counter.mutex);
93               if (IsInsideTx()) transaction_attempted = true;
94               counter.value = counter.value + 1;
95             }
96             if(transaction_attempted) ++n_transactions_attempted;
97             tbb::detail::machine_pause(static_cast<int32_t>(i));
98         }
99     }
100     AddOne_CheckTransaction(C& counter_) : counter(counter_) {}
101 };
102 
103 /* TestTransaction() checks if a speculative mutex actually uses transactions. */
104 template<typename M>
105 void TestTransaction(const char* name)
106 {
107     Counter<M> counter;
108     constexpr int n = 550;
109 
110     n_transactions_attempted = 0;
111     for(int i = 0; i < 5 && n_transactions_attempted.load(std::memory_order_relaxed) == 0; ++i) {
112         counter.value = 0;
113         tbb::parallel_for(tbb::blocked_range<std::size_t>(0, n, 2), AddOne_CheckTransaction<Counter<M>>(counter));
114         REQUIRE(counter.value == n);
115     }
116     REQUIRE_MESSAGE(n_transactions_attempted.load(std::memory_order_relaxed), "ERROR for " << name << ": transactions were never attempted");
117 }
118 
119 
120 //! \brief \ref error_guessing
121 TEST_CASE("Transaction test") {
122     if(have_TSX()) {
123         TestTransaction<tbb::speculative_spin_mutex>("Speculative Spin Mutex");
124         TestTransaction<tbb::speculative_spin_rw_mutex>("Speculative Spin RW Mutex");
125     }
126 }
127 #endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */
128 
129 namespace test_with_native_threads {
130 
131 template <typename M>
132 struct Counter {
133     using mutex_type = M;
134 
135     M mutex;
136     volatile long value;
137 
138     void flog_once( std::size_t mode ) {
139         // Increments counter once for each iteration in the iteration space
140         if (mode & 1) {
141             // Try implicit acquire and explicit release
142             typename mutex_type::scoped_lock lock(mutex);
143             value += 1;
144             lock.release();
145         } else {
146             // Try explicit acquire and implicit release
147             typename mutex_type::scoped_lock lock;
148             lock.acquire(mutex);
149             value += 1;
150         }
151     }
152 }; // struct Counter
153 
154 template <typename M, long N>
155 struct Invariant {
156     using mutex_type = M;
157 
158     M mutex;
159     volatile long value[N];
160 
161     Invariant() {
162         for (long k = 0; k < N; ++k) {
163             value[k] = 0;
164         }
165     }
166 
167     void update() {
168         for (long k = 0; k < N; ++k) {
169             ++value[k];
170         }
171     }
172 
173     bool value_is( long expected_value ) const {
174         long tmp;
175 
176         for (long k = 0; k < N; ++k) {
177             if ((tmp = value[k]) != expected_value) {
178                 return false;
179             }
180         }
181         return true;
182     }
183 
184     bool is_okay() {
185         return value_is(value[0]);
186     }
187 
188     void flog_once( std::size_t mode ) {
189         // Every 8th access is a write access
190         bool write = (mode % 8) == 7;
191         bool okay = true;
192         bool lock_kept = true;
193 
194         if ((mode / 8) & 1) {
195             // Try implicit acquire and explicit release
196             typename mutex_type::scoped_lock lock(mutex, write);
197             if (write) {
198                 long my_value = value[0];
199                 update();
200                 if (mode % 16 == 7) {
201                     lock_kept = lock.downgrade_to_reader();
202                     if (!lock_kept) {
203                         my_value = value[0] - 1;
204                     }
205                     okay = value_is(my_value + 1);
206                 }
207             } else {
208                 okay = is_okay();
209                 if (mode % 8 == 3) {
210                     long my_value = value[0];
211                     lock_kept = lock.upgrade_to_writer();
212                     if (!lock_kept) {
213                         my_value = value[0];
214                     }
215                     update();
216                     okay = value_is(my_value + 1);
217                 }
218             }
219             lock.release();
220         } else {
221             // Try explicit acquire and implicit release
222             typename mutex_type::scoped_lock lock;
223             lock.acquire(mutex, write);
224             if (write) {
225                 long my_value = value[0];
226                 update();
227                 if (mode % 16 == 7) {
228                     lock_kept = lock.downgrade_to_reader();
229                     if (!lock_kept) {
230                         my_value = value[0] - 1;
231                     }
232                     okay = value_is(my_value + 1);
233                 }
234             } else {
235                 okay = is_okay();
236                 if (mode % 8 == 3) {
237                     long my_value = value[0];
238                     lock_kept = lock.upgrade_to_writer();
239                     if (!lock_kept) {
240                         my_value = value[0];
241                     }
242                     update();
243                     okay = value_is(my_value + 1);
244                 }
245             }
246         }
247         REQUIRE(okay);
248     }
249 }; // struct Invariant
250 
251 static std::atomic<std::size_t> Order;
252 
253 template <typename State, long TestSize>
254 struct Work : utils::NoAssign {
255     static constexpr std::size_t chunk = 100;
256     State& state;
257 
258     Work( State& st ) : state(st){ Order = 0; }
259 
260     void operator()( int ) const {
261         std::size_t step;
262         while( (step = Order.fetch_add(chunk, std::memory_order_acquire)) < TestSize ) {
263             for (std::size_t i = 0; i < chunk && step < TestSize; ++i, ++step) {
264                 state.flog_once(step);
265             }
266         }
267     }
268 }; // struct Work
269 
270 constexpr std::size_t TEST_SIZE = 100000;
271 
272 template <typename M>
273 void test_basic( std::size_t nthread ) {
274     Counter<M> counter;
275     counter.value = 0;
276     Order = 0;
277     utils::NativeParallelFor(nthread, Work<Counter<M>, TEST_SIZE>(counter));
278 
279     REQUIRE(counter.value == TEST_SIZE);
280 }
281 
282 template <typename M>
283 void test_rw_basic( std::size_t nthread ) {
284     Invariant<M, 8> invariant;
285     Order = 0;
286     // use the macro because of a gcc 4.6 issue
287     utils::NativeParallelFor(nthread, Work<Invariant<M, 8>, TEST_SIZE>(invariant));
288     // There is either a writer or a reader upgraded to a writer for each 4th iteration
289     long expected_value = TEST_SIZE / 4;
290     REQUIRE(invariant.value_is(expected_value));
291 }
292 
293 template <typename M>
294 void test() {
295     for (std::size_t p : utils::concurrency_range()) {
296         test_basic<M>(p);
297     }
298 }
299 
300 template <typename M>
301 void test_rw() {
302     for (std::size_t p : utils::concurrency_range()) {
303         test_rw_basic<M>(p);
304     }
305 }
306 
307 } // namespace test_with_native_threads
308 
309 //! \brief \ref error_guessing
310 TEST_CASE("test upgrade/downgrade with spin_rw_mutex") {
311     test_rwm_upgrade_downgrade<tbb::spin_rw_mutex>();
312 }
313 
314 //! \brief \ref error_guessing
315 TEST_CASE("test upgrade/downgrade with queueing_rw_mutex") {
316     test_rwm_upgrade_downgrade<tbb::queuing_rw_mutex>();
317 }
318 
319 //! \brief \ref error_guessing
320 TEST_CASE("test upgrade/downgrade with speculative_spin_rw_mutex") {
321     test_rwm_upgrade_downgrade<tbb::speculative_spin_rw_mutex>();
322 }
323 
324 //! \brief \ref error_guessing
325 TEST_CASE("test spin_mutex with native threads") {
326     test_with_native_threads::test<tbb::spin_mutex>();
327 }
328 
329 //! \brief \ref error_guessing
330 TEST_CASE("test queuing_mutex with native threads") {
331     test_with_native_threads::test<tbb::queuing_mutex>();
332 }
333 
334 //! \brief \ref error_guessing
335 TEST_CASE("test spin_rw_mutex with native threads") {
336     test_with_native_threads::test<tbb::spin_rw_mutex>();
337     test_with_native_threads::test_rw<tbb::spin_rw_mutex>();
338 }
339 
340 //! \brief \ref error_guessing
341 TEST_CASE("test queuing_rw_mutex with native threads") {
342     test_with_native_threads::test<tbb::queuing_rw_mutex>();
343     test_with_native_threads::test_rw<tbb::queuing_rw_mutex>();
344 }
345