xref: /oneTBB/test/tbb/test_mutex.cpp (revision 51c0b2f7)
1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #include "common/test.h"
18 #include "common/utils.h"
19 #include "common/utils_concurrency_limit.h"
20 #include "common/config.h"
21 #include "common/rwm_upgrade_downgrade.h"
22 
23 #include <tbb/spin_mutex.h>
24 #include <tbb/spin_rw_mutex.h>
25 #include <tbb/queuing_mutex.h>
26 #include <tbb/queuing_rw_mutex.h>
27 #include <tbb/parallel_for.h>
28 #include <tbb/detail/_utils.h>
29 #include <tbb/detail/_machine.h>
30 
31 #include <atomic>
32 
33 //! \file test_mutex.cpp
34 //! \brief Test for [mutex.spin_mutex mutex.spin_rw_mutex mutex.queuing_mutex mutex.queuing_rw_mutexmutex.speculative_spin_mutex mutex.speculative_spin_rw_mutex] specifications
35 
36 template<typename M>
37 struct Counter {
38     typedef M mutex_type;
39     M mutex;
40     volatile long value;
41 };
42 
43 #if __TBB_TSX_INTRINSICS_PRESENT
44 
45 inline static bool IsInsideTx() {
46     return _xtest() != 0;
47 }
48 
49 bool have_TSX() {
50     bool result = false;
51     const int rtm_ebx_mask = 1<<11;
52 #if _MSC_VER
53     int info[4] = {0,0,0,0};
54     const int reg_ebx = 1;
55     __cpuidex(info, 7, 0);
56     result = (info[reg_ebx] & rtm_ebx_mask)!=0;
57 #elif __GNUC__ || __SUNPRO_CC
58     int32_t reg_ebx = 0;
59     int32_t reg_eax = 7;
60     int32_t reg_ecx = 0;
61     __asm__ __volatile__ ( "movl %%ebx, %%esi\n"
62                            "cpuid\n"
63                            "movl %%ebx, %0\n"
64                            "movl %%esi, %%ebx\n"
65                            : "=a"(reg_ebx) : "0" (reg_eax), "c" (reg_ecx) : "esi",
66 #if __TBB_x86_64
67                            "ebx",
68 #endif
69                            "edx"
70                            );
71     result = (reg_ebx & rtm_ebx_mask)!=0 ;
72 #endif
73     return result;
74 }
75 
76 //! Function object for use with parallel_for.h to see if a transaction is actually attempted.
77 std::atomic<std::size_t> n_transactions_attempted;
78 template<typename C>
79 struct AddOne_CheckTransaction {
80 
81     AddOne_CheckTransaction& operator=(const AddOne_CheckTransaction&) = delete;
82     AddOne_CheckTransaction(const AddOne_CheckTransaction&) = default;
83     AddOne_CheckTransaction() = default;
84 
85     C& counter;
86     /** Increments counter once for each iteration in the iteration space. */
87     void operator()(tbb::blocked_range<size_t>& range) const {
88         for (std::size_t i = range.begin(); i != range.end(); ++i) {
89             bool transaction_attempted = false;
90             {
91               typename C::mutex_type::scoped_lock lock(counter.mutex);
92               if (IsInsideTx()) transaction_attempted = true;
93               counter.value = counter.value + 1;
94             }
95             if(transaction_attempted) ++n_transactions_attempted;
96             tbb::detail::machine_pause(static_cast<int32_t>(i));
97         }
98     }
99     AddOne_CheckTransaction(C& counter_) : counter(counter_) {}
100 };
101 
102 /* TestTransaction() checks if a speculative mutex actually uses transactions. */
103 template<typename M>
104 void TestTransaction(const char* name)
105 {
106     Counter<M> counter;
107     constexpr int n = 550;
108 
109     n_transactions_attempted = 0;
110     for(int i = 0; i < 5 && n_transactions_attempted.load(std::memory_order_relaxed) == 0; ++i) {
111         counter.value = 0;
112         tbb::parallel_for(tbb::blocked_range<std::size_t>(0, n, 2), AddOne_CheckTransaction<Counter<M>>(counter));
113         REQUIRE(counter.value == n);
114     }
115     REQUIRE_MESSAGE(n_transactions_attempted.load(std::memory_order_relaxed), "ERROR for " << name << ": transactions were never attempted");
116 }
117 
118 
119 //! \brief \ref error_guessing
120 TEST_CASE("Transaction test") {
121     if(have_TSX()) {
122         TestTransaction<tbb::speculative_spin_mutex>("Speculative Spin Mutex");
123         TestTransaction<tbb::speculative_spin_rw_mutex>("Speculative Spin RW Mutex");
124     }
125 }
126 #endif /* __TBB_TSX_TESTING_ENABLED_FOR_THIS_COMPILER */
127 
128 namespace test_with_native_threads {
129 
130 template <typename M>
131 struct Counter {
132     using mutex_type = M;
133 
134     M mutex;
135     volatile long value;
136 
137     void flog_once( std::size_t mode ) {
138         // Increments counter once for each iteration in the iteration space
139         if (mode & 1) {
140             // Try implicit acquire and explicit release
141             typename mutex_type::scoped_lock lock(mutex);
142             value += 1;
143             lock.release();
144         } else {
145             // Try explicit acquire and implicit release
146             typename mutex_type::scoped_lock lock;
147             lock.acquire(mutex);
148             value += 1;
149         }
150     }
151 }; // struct Counter
152 
153 template <typename M, long N>
154 struct Invariant {
155     using mutex_type = M;
156 
157     M mutex;
158     volatile long value[N];
159 
160     Invariant() {
161         for (long k = 0; k < N; ++k) {
162             value[k] = 0;
163         }
164     }
165 
166     void update() {
167         for (long k = 0; k < N; ++k) {
168             ++value[k];
169         }
170     }
171 
172     bool value_is( long expected_value ) const {
173         long tmp;
174 
175         for (long k = 0; k < N; ++k) {
176             if ((tmp = value[k]) != expected_value) {
177                 return false;
178             }
179         }
180         return true;
181     }
182 
183     bool is_okay() {
184         return value_is(value[0]);
185     }
186 
187     void flog_once( std::size_t mode ) {
188         // Every 8th access is a write access
189         bool write = (mode % 8) == 7;
190         bool okay = true;
191         bool lock_kept = true;
192 
193         if ((mode / 8) & 1) {
194             // Try implicit acquire and explicit release
195             typename mutex_type::scoped_lock lock(mutex, write);
196             if (write) {
197                 long my_value = value[0];
198                 update();
199                 if (mode % 16 == 7) {
200                     lock_kept = lock.downgrade_to_reader();
201                     if (!lock_kept) {
202                         my_value = value[0] - 1;
203                     }
204                     okay = value_is(my_value + 1);
205                 }
206             } else {
207                 okay = is_okay();
208                 if (mode % 8 == 3) {
209                     long my_value = value[0];
210                     lock_kept = lock.upgrade_to_writer();
211                     if (!lock_kept) {
212                         my_value = value[0];
213                     }
214                     update();
215                     okay = value_is(my_value + 1);
216                 }
217             }
218             lock.release();
219         } else {
220             // Try explicit acquire and implicit release
221             typename mutex_type::scoped_lock lock;
222             lock.acquire(mutex, write);
223             if (write) {
224                 long my_value = value[0];
225                 update();
226                 if (mode % 16 == 7) {
227                     lock_kept = lock.downgrade_to_reader();
228                     if (!lock_kept) {
229                         my_value = value[0] - 1;
230                     }
231                     okay = value_is(my_value + 1);
232                 }
233             } else {
234                 okay = is_okay();
235                 if (mode % 8 == 3) {
236                     long my_value = value[0];
237                     lock_kept = lock.upgrade_to_writer();
238                     if (!lock_kept) {
239                         my_value = value[0];
240                     }
241                     update();
242                     okay = value_is(my_value + 1);
243                 }
244             }
245         }
246         REQUIRE(okay);
247     }
248 }; // struct Invariant
249 
250 static std::atomic<std::size_t> Order;
251 
252 template <typename State, long TestSize>
253 struct Work : utils::NoAssign {
254     static constexpr std::size_t chunk = 100;
255     State& state;
256 
257     Work( State& st ) : state(st){ Order = 0; }
258 
259     void operator()( int ) const {
260         std::size_t step;
261         while( (step = Order.fetch_add(chunk, std::memory_order_acquire)) < TestSize ) {
262             for (std::size_t i = 0; i < chunk && step < TestSize; ++i, ++step) {
263                 state.flog_once(step);
264             }
265         }
266     }
267 }; // struct Work
268 
269 constexpr std::size_t TEST_SIZE = 100000;
270 
271 template <typename M>
272 void test_basic( std::size_t nthread ) {
273     Counter<M> counter;
274     counter.value = 0;
275     Order = 0;
276     utils::NativeParallelFor(nthread, Work<Counter<M>, TEST_SIZE>(counter));
277 
278     REQUIRE(counter.value == TEST_SIZE);
279 }
280 
281 template <typename M>
282 void test_rw_basic( std::size_t nthread ) {
283     Invariant<M, 8> invariant;
284     Order = 0;
285     // use the macro because of a gcc 4.6 issue
286     utils::NativeParallelFor(nthread, Work<Invariant<M, 8>, TEST_SIZE>(invariant));
287     // There is either a writer or a reader upgraded to a writer for each 4th iteration
288     long expected_value = TEST_SIZE / 4;
289     REQUIRE(invariant.value_is(expected_value));
290 }
291 
292 template <typename M>
293 void test() {
294     for (std::size_t p : utils::concurrency_range()) {
295         test_basic<M>(p);
296     }
297 }
298 
299 template <typename M>
300 void test_rw() {
301     for (std::size_t p : utils::concurrency_range()) {
302         test_rw_basic<M>(p);
303     }
304 }
305 
306 } // namespace test_with_native_threads
307 
308 //! \brief \ref error_guessing
309 TEST_CASE("test upgrade/downgrade with spin_rw_mutex") {
310     test_rwm_upgrade_downgrade<tbb::spin_rw_mutex>();
311 }
312 
313 //! \brief \ref error_guessing
314 TEST_CASE("test upgrade/downgrade with queueing_rw_mutex") {
315     test_rwm_upgrade_downgrade<tbb::queuing_rw_mutex>();
316 }
317 
318 //! \brief \ref error_guessing
319 TEST_CASE("test upgrade/downgrade with speculative_spin_rw_mutex") {
320     test_rwm_upgrade_downgrade<tbb::speculative_spin_rw_mutex>();
321 }
322 
323 //! \brief \ref error_guessing
324 TEST_CASE("test spin_mutex with native threads") {
325     test_with_native_threads::test<tbb::spin_mutex>();
326 }
327 
328 //! \brief \ref error_guessing
329 TEST_CASE("test queuing_mutex with native threads") {
330     test_with_native_threads::test<tbb::queuing_mutex>();
331 }
332 
333 //! \brief \ref error_guessing
334 TEST_CASE("test spin_rw_mutex with native threads") {
335     test_with_native_threads::test<tbb::spin_rw_mutex>();
336     test_with_native_threads::test_rw<tbb::spin_rw_mutex>();
337 }
338 
339 //! \brief \ref error_guessing
340 TEST_CASE("test queuing_rw_mutex with native threads") {
341     test_with_native_threads::test<tbb::queuing_rw_mutex>();
342     test_with_native_threads::test_rw<tbb::queuing_rw_mutex>();
343 }
344