1 /* 2 Copyright (c) 2005-2020 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #define __TBB_NO_IMPLICIT_LINKAGE 1 18 19 #include "common/test.h" 20 #include "common/utils.h" 21 #include "common/spin_barrier.h" 22 #include "tbb/detail/_utils.h" 23 #include "tbb/scalable_allocator.h" 24 #include <thread> 25 26 static constexpr std::size_t MaxTasks = 16; 27 std::atomic<std::size_t> FinishedTasks; 28 29 static constexpr std::size_t MaxThread = 4; 30 31 /*--------------------------------------------------------------------*/ 32 // The regression test against a bug triggered when malloc initialization 33 // and thread shutdown were called simultaneously, in which case 34 // Windows dynamic loader lock and allocator initialization/termination lock 35 // were taken in different order. 36 37 38 39 class TestFunc1 { 40 utils::SpinBarrier* my_barr; 41 public: 42 TestFunc1 (utils::SpinBarrier& barr) : my_barr(&barr) {} 43 void operator() (bool do_malloc) const { 44 my_barr->wait(); 45 if (do_malloc) scalable_malloc(10); 46 ++FinishedTasks; 47 } 48 }; 49 50 void Test1 () { 51 std::size_t NTasks = utils::min(MaxTasks, utils::max(std::size_t(2), MaxThread)); 52 utils::SpinBarrier barr(NTasks); 53 TestFunc1 tf(barr); 54 FinishedTasks = 0; 55 56 utils::NativeParallelFor(NTasks, [&] (std::size_t thread_idx) { 57 tf(thread_idx % 2 == 0); 58 utils::Sleep(1000); // wait a second :) 59 REQUIRE_MESSAGE(FinishedTasks == NTasks, "Some threads appear to deadlock" ); 60 }); 61 } 62 63 /*--------------------------------------------------------------------*/ 64 // The regression test against a bug when cross-thread deallocation 65 // caused livelock at thread shutdown. 66 67 std::atomic<void*> gPtr(nullptr); 68 69 class TestFunc2a { 70 utils::SpinBarrier* my_barr; 71 public: 72 TestFunc2a (utils::SpinBarrier& barr) : my_barr(&barr) {} 73 void operator() (std::size_t) const { 74 gPtr = scalable_malloc(8); 75 my_barr->wait(); 76 ++FinishedTasks; 77 } 78 }; 79 80 class TestFunc2b { 81 utils::SpinBarrier* my_barr; 82 std::thread& my_ward; 83 public: 84 TestFunc2b (utils::SpinBarrier& barr, std::thread& t) : my_barr(&barr), my_ward(t) {} 85 void operator() (std::size_t) const { 86 utils::SpinWaitWhileEq(gPtr, (void*)nullptr); 87 scalable_free(gPtr); 88 my_barr->wait(); 89 my_ward.join(); 90 ++FinishedTasks; 91 } 92 }; 93 void Test2() { 94 utils::SpinBarrier barr(2); 95 TestFunc2a func2a(barr); 96 std::thread t2a; 97 TestFunc2b func2b(barr, t2a); 98 FinishedTasks = 0; 99 t2a = std::thread(func2a, std::size_t(0)); 100 std::thread t2b(func2b, std::size_t(1)); 101 utils::Sleep(1000); // wait a second :) 102 REQUIRE_MESSAGE( FinishedTasks==2, "Threads appear to deadlock" ); 103 104 t2b.join(); // t2a is monitored by t2b 105 106 if (t2a.joinable()) t2a.join(); 107 } 108 109 #if _WIN32||_WIN64 110 111 void TestKeyDtor() {} 112 113 #else 114 115 void *currSmall, *prevSmall, *currLarge, *prevLarge; 116 117 extern "C" void threadDtor(void*) { 118 // First, release memory that was allocated before; 119 // it will not re-initialize the thread-local data if already deleted 120 prevSmall = currSmall; 121 scalable_free(currSmall); 122 prevLarge = currLarge; 123 scalable_free(currLarge); 124 // Then, allocate more memory. 125 // It will re-initialize the allocator data in the thread. 126 scalable_free(scalable_malloc(8)); 127 } 128 129 inline bool intersectingObjects(const void *p1, const void *p2, size_t n) 130 { 131 return p1>p2 ? ((uintptr_t)p1-(uintptr_t)p2)<n : ((uintptr_t)p2-(uintptr_t)p1)<n; 132 } 133 134 struct TestThread: utils::NoAssign { 135 TestThread(int ) {} 136 137 void operator()( std::size_t /*id*/ ) const { 138 pthread_key_t key; 139 140 currSmall = scalable_malloc(8); 141 REQUIRE_MESSAGE((!prevSmall || currSmall==prevSmall), "Possible memory leak"); 142 currLarge = scalable_malloc(32*1024); 143 // intersectingObjects takes into account object shuffle 144 REQUIRE_MESSAGE((!prevLarge || intersectingObjects(currLarge, prevLarge, 32*1024)), "Possible memory leak"); 145 pthread_key_create( &key, &threadDtor ); 146 pthread_setspecific(key, (const void*)42); 147 } 148 }; 149 150 // test releasing memory from pthread key destructor 151 void TestKeyDtor() { 152 // Allocate region for large objects to prevent whole region release 153 // on scalable_free(currLarge) call, which result in wrong assert inside intersectingObjects check 154 void* preventLargeRelease = scalable_malloc(32*1024); 155 for (int i=0; i<4; i++) 156 utils::NativeParallelFor( 1, TestThread(1) ); 157 scalable_free(preventLargeRelease); 158 } 159 160 #endif // _WIN32||_WIN64 161 162 163 //! \brief \ref error_guessing 164 TEST_CASE("test1") { 165 Test1(); // requires malloc initialization so should be first 166 } 167 168 //! \brief \ref error_guessing 169 TEST_CASE("test2") { 170 Test2(); 171 } 172 173 //! \brief \ref error_guessing 174 TEST_CASE("test key dtor") { 175 TestKeyDtor(); 176 } 177