1 /* 2 Copyright (c) 2005-2021 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #if __INTEL_COMPILER && _MSC_VER 18 #pragma warning(disable : 2586) // decorated name length exceeded, name was truncated 19 #endif 20 21 #include <common/test.h> 22 #include <common/spin_barrier.h> 23 #include <common/state_trackable.h> 24 #include <common/container_move_support.h> 25 #include <common/range_based_for_support.h> 26 #include <common/utils.h> 27 #include <common/utils_concurrency_limit.h> 28 #include <common/vector_types.h> 29 #include <tbb/concurrent_vector.h> 30 #include <tbb/tick_count.h> 31 #include <tbb/parallel_reduce.h> 32 #include <tbb/parallel_for.h> 33 #include <algorithm> 34 #include <cmath> 35 36 //! \file test_concurrent_vector.cpp 37 //! \brief Test for [containers.concurrent_vector] specification 38 39 void TestSort() { 40 for( int n=0; n<100; n=n*3+1 ) { 41 tbb::concurrent_vector<int> array(n); 42 for( int i=0; i<n; ++i ){ 43 array.at(i) = (i*7)%n; 44 } 45 std::sort( array.begin(), array.end() ); 46 for( int i=0; i<n; ++i ){ 47 REQUIRE( array[i]==i ); 48 } 49 } 50 } 51 52 void TestRangeBasedFor(){ 53 using namespace range_based_for_support_tests; 54 55 using c_vector = tbb::concurrent_vector<int>; 56 c_vector a_c_vector; 57 58 const int sequence_length = 10; 59 for (int i = 1; i<= sequence_length; ++i){ 60 a_c_vector.push_back(i); 61 } 62 63 REQUIRE_MESSAGE( range_based_for_accumulate(a_c_vector, std::plus<int>(), 0) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?"); 64 } 65 66 struct default_container_traits { 67 template <typename container_type, typename iterator_type> 68 static container_type& construct_container(typename std::aligned_storage<sizeof(container_type)>::type& storage, iterator_type begin, iterator_type end){ 69 container_type* ptr = reinterpret_cast<container_type*>(&storage); 70 new (ptr) container_type(begin, end); 71 return *ptr; 72 } 73 74 template <typename container_type, typename iterator_type, typename allocator_type> 75 static container_type& construct_container(typename std::aligned_storage<sizeof(container_type)>::type& storage, iterator_type begin, iterator_type end, allocator_type const& a){ 76 container_type* ptr = reinterpret_cast<container_type*>(&storage); 77 new (ptr) container_type(begin, end, a); 78 return *ptr; 79 } 80 }; 81 82 struct c_vector_type : default_container_traits { 83 template <typename T, typename Allocator> 84 using container_type = tbb::concurrent_vector<T, Allocator>; 85 86 template <typename T> 87 using container_value_type = T; 88 89 using init_iterator_type = move_support_tests::FooIterator; 90 template<typename element_type, typename allocator_type> 91 struct apply{ 92 using type = tbb::concurrent_vector<element_type, allocator_type >; 93 }; 94 95 enum{ expected_number_of_items_to_allocate_for_steal_move = 0 }; 96 97 template<typename element_type, typename allocator_type, typename iterator> 98 static bool equal(tbb::concurrent_vector<element_type, allocator_type > const& c, iterator begin, iterator end){ 99 bool equal_sizes = (size_t)std::distance(begin, end) == c.size(); 100 return equal_sizes && std::equal(c.begin(), c.end(), begin); 101 } 102 }; 103 104 void TestSerialGrowByWithMoveIterators(){ 105 using fixture_t = move_support_tests::DefaultStatefulFixtureHelper<c_vector_type>::type; 106 using vector_t = fixture_t::container_type; 107 108 fixture_t fixture; 109 110 vector_t dst(fixture.dst_allocator); 111 dst.grow_by(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end())); 112 113 fixture.verify_content_deep_moved(dst); 114 } 115 116 #if HAVE_m128 || HAVE_m256 117 118 template<typename ClassWithVectorType> 119 void TestVectorTypes() { 120 tbb::concurrent_vector<ClassWithVectorType> v; 121 for( int i = 0; i < 100; ++i ) { 122 // VC8 does not properly align a temporary value; to work around, use explicit variable 123 ClassWithVectorType foo(i); 124 v.push_back(foo); 125 for( int j=0; j<i; ++j ) { 126 ClassWithVectorType bar(j); 127 REQUIRE( v[j]==bar ); 128 } 129 } 130 } 131 #endif /* HAVE_m128 | HAVE_m256 */ 132 133 134 static tbb::concurrent_vector<std::size_t> Primes; 135 136 class FindPrimes { 137 bool is_prime( std::size_t val ) const { 138 int limit, factor = 3; 139 if( val<5u ) 140 return val==2; 141 else { 142 limit = long(sqrtf(float(val))+0.5f); 143 while( factor<=limit && val % factor ) 144 ++factor; 145 return factor>limit; 146 } 147 } 148 public: 149 void operator()( const std::size_t idx ) const { 150 if( idx % 2 && is_prime(idx) ) { 151 Primes.push_back( idx ); 152 } 153 } 154 }; 155 156 double TimeFindPrimes( std::size_t nthread ) { 157 Primes.clear(); 158 const std::size_t count = 1048576; 159 Primes.reserve(count);// TODO: or compact()? 160 tbb::tick_count t0 = tbb::tick_count::now(); 161 std::size_t block_size = count / nthread; 162 utils::NativeParallelFor(count, block_size, FindPrimes() ); 163 tbb::tick_count t1 = tbb::tick_count::now(); 164 return (t1-t0).seconds(); 165 } 166 167 void TestFindPrimes() { 168 // Time fully subscribed run. 169 170 // TimeFindPrimes( tbb::task_scheduler_init::automatic ); 171 double t2 = TimeFindPrimes( utils::get_platform_max_threads() ); 172 173 // Time parallel run that is very likely oversubscribed. 174 #if TBB_TEST_LOW_WORKLOAD 175 double tx = TimeFindPrimes(32); 176 #else 177 double tx = TimeFindPrimes(128); 178 #endif 179 INFO("TestFindPrimes: t2 == " << t2 << " tx == " << tx << "k == " << tx / t2); 180 181 // We allow the X-thread run a little extra time to allow for thread overhead. 182 // Theoretically, following test will fail on machine with >X processors. 183 // But that situation is not going to come up in the near future, 184 // and the generalization to fix the issue is not worth the trouble. 185 WARN_MESSAGE( tx <= 1.3*t2, "Warning: grow_by is pathetically slow"); 186 INFO("t2 == " << t2 << " tx == " << tx << "k == " << tx / t2); 187 } 188 189 template <typename Type, typename Allocator> 190 class test_grow_by_and_resize { 191 tbb::concurrent_vector<Type, Allocator> &my_c; 192 public: 193 test_grow_by_and_resize( tbb::concurrent_vector<Type, Allocator> &c ) : my_c(c) {} 194 void operator()() const { 195 const typename tbb::concurrent_vector<Type, Allocator>::size_type sz = my_c.size(); 196 my_c.grow_by( 5 ); 197 REQUIRE( my_c.size() == sz + 5 ); 198 my_c.resize( sz ); 199 REQUIRE( my_c.size() == sz ); 200 } 201 }; 202 203 void test_scoped_allocator() { 204 using allocator_data_type = AllocatorAwareData<std::scoped_allocator_adaptor<std::allocator<int>>>; 205 using allocator_type = std::scoped_allocator_adaptor<std::allocator<allocator_data_type>>; 206 using container_type = tbb::concurrent_vector<allocator_data_type, allocator_type>; 207 208 allocator_type allocator; 209 allocator_data_type data1(1, allocator); 210 allocator_data_type data2(2, allocator); 211 212 auto init_list = {data1, data2}; 213 214 container_type c1(allocator), c2(allocator); 215 216 allocator_data_type::activate(); 217 218 c1.grow_by(100); 219 c1.grow_by(10, data1); 220 c1.grow_by(init_list.begin(), init_list.end()); 221 c1.grow_by(init_list); 222 223 c1.clear(); 224 225 c1.grow_to_at_least(100); 226 c1.grow_to_at_least(110, data1); 227 228 c1.clear(); 229 230 c1.push_back(data1); 231 c1.push_back(data2); 232 c1.push_back(std::move(data1)); 233 c1.emplace_back(1); 234 235 c1.clear(); 236 237 c1.reserve(100); 238 c1.resize(110); 239 c1.resize(100); 240 c1.resize(110, data1); 241 c1.resize(100, data1); 242 243 c1.shrink_to_fit(); 244 245 c1.clear(); 246 247 c1.grow_by(10, data1); 248 c2.grow_by(20, data2); 249 250 c1 = c2; 251 c2 = std::move(c1); 252 253 allocator_data_type::deactivate(); 254 } 255 256 template <bool default_construction_present> struct do_default_construction_test { 257 template<typename FuncType> void operator() ( FuncType func ) const { func(); } 258 }; 259 template <> struct do_default_construction_test<false> { 260 template<typename FuncType> void operator()( FuncType ) const {} 261 }; 262 263 template <typename Type, typename Allocator> 264 void CompareVectors( const tbb::concurrent_vector<Type, Allocator> &c1, const tbb::concurrent_vector<Type, Allocator> &c2 ) { 265 REQUIRE( (!(c1 == c2) && c1 != c2) ); 266 REQUIRE( (c1 <= c2 && c1 < c2 && c2 >= c1 && c2 > c1) ); 267 } 268 269 template <typename Type, typename Allocator> 270 void CompareVectors( const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> &, const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> & ) { 271 /* do nothing for std::weak_ptr */ 272 } 273 274 template <bool default_construction_present, typename Type, typename Allocator> 275 void Examine( tbb::concurrent_vector<Type, Allocator> c, const std::vector<Type> &vec ) { 276 using vector_t = tbb::concurrent_vector<Type, Allocator>; 277 using size_type_t = typename vector_t::size_type; 278 279 280 REQUIRE( c.size() == vec.size() ); 281 for ( size_type_t i=0; i<c.size(); ++i ) { 282 REQUIRE( utils::IsEqual()(c[i], vec[i]) ); 283 } 284 do_default_construction_test<default_construction_present>()(test_grow_by_and_resize<Type,Allocator>(c)); 285 c.grow_by( size_type_t(5), c[0] ); 286 c.grow_to_at_least( c.size()+5, c.at(0) ); 287 vector_t c2; 288 c2.reserve( 5 ); 289 std::copy( c.begin(), c.begin() + 5, std::back_inserter( c2 ) ); 290 291 c.grow_by( c2.begin(), c2.end() ); 292 const vector_t& cvcr = c; 293 REQUIRE( utils::IsEqual()(cvcr.front(), *(c2.rend()-1)) ); 294 REQUIRE( utils::IsEqual()(cvcr.back(), *c2.rbegin())); 295 REQUIRE( utils::IsEqual()(*c.cbegin(), *(c.crend()-1)) ); 296 REQUIRE( utils::IsEqual()(*(c.cend()-1), *c.crbegin()) ); 297 c.swap( c2 ); 298 REQUIRE( c.size() == 5 ); 299 CompareVectors( c, c2 ); 300 c.swap( c2 ); 301 c2.clear(); 302 REQUIRE( c2.size() == 0 ); 303 c2.shrink_to_fit(); 304 Allocator a = c.get_allocator(); 305 a.deallocate( a.allocate(1), 1 ); 306 } 307 308 template <typename Type> 309 class test_default_construction { 310 const std::vector<Type> &my_vec; 311 public: 312 test_default_construction( const std::vector<Type> &vec ) : my_vec(vec) {} 313 void operator()() const { 314 // Construction with initial size specified by argument n. 315 tbb::concurrent_vector<Type> c7( my_vec.size() ); 316 std::copy( my_vec.begin(), my_vec.end(), c7.begin() ); 317 Examine</*default_construction_present = */true>( c7, my_vec ); 318 tbb::concurrent_vector< Type, std::allocator<Type> > c8( my_vec.size() ); 319 std::copy( c7.begin(), c7.end(), c8.begin() ); 320 Examine</*default_construction_present = */true>( c8, my_vec ); 321 } 322 }; 323 324 template <bool default_construction_present, typename Type> 325 void TypeTester( const std::vector<Type> &vec ) { 326 __TBB_ASSERT( vec.size() >= 5, "Array should have at least 5 elements" ); 327 // Construct empty vector. 328 tbb::concurrent_vector<Type> c1; 329 std::copy( vec.begin(), vec.end(), std::back_inserter(c1) ); 330 Examine<default_construction_present>( c1, vec ); 331 // Constructor from initializer_list. 332 tbb::concurrent_vector<Type> c2({vec[0],vec[1],vec[2]}); 333 std::copy( vec.begin()+3, vec.end(), std::back_inserter(c2) ); 334 Examine<default_construction_present>( c2, vec ); 335 // Copying constructor. 336 tbb::concurrent_vector<Type> c3(c1); 337 Examine<default_construction_present>( c3, vec ); 338 // Construct with non-default allocator 339 tbb::concurrent_vector< Type, std::allocator<Type> > c4; 340 std::copy( vec.begin(), vec.end(), std::back_inserter(c4) ); 341 Examine<default_construction_present>( c4, vec ); 342 // Construction with initial size specified by argument n. 343 do_default_construction_test<default_construction_present>()(test_default_construction<Type>(vec)); 344 // Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance. 345 std::allocator<Type> allocator; 346 tbb::concurrent_vector< Type, std::allocator<Type> > c9(vec.size(), vec[1], allocator); 347 Examine<default_construction_present>( c9, std::vector<Type>(vec.size(), vec[1]) ); 348 // Construction with copying iteration range and given allocator instance. 349 tbb::concurrent_vector< Type, std::allocator<Type> > c10(c1.begin(), c1.end(), allocator); 350 Examine<default_construction_present>( c10, vec ); 351 tbb::concurrent_vector<Type> c11(vec.begin(), vec.end()); 352 Examine<default_construction_present>( c11, vec ); 353 } 354 355 void TestTypes() { 356 const int NUMBER = 100; 357 358 std::vector<int> intArr; 359 for ( int i=0; i<NUMBER; ++i ) intArr.push_back(i); 360 TypeTester</*default_construction_present = */true>( intArr ); 361 362 std::vector< std::reference_wrapper<int> > refArr; 363 // The constructor of std::reference_wrapper<T> from T& is explicit in some versions of libstdc++. 364 for ( int i=0; i<NUMBER; ++i ) refArr.push_back( std::reference_wrapper<int>(intArr[i]) ); 365 TypeTester</*default_construction_present = */false>( refArr ); 366 367 // std::vector< std::atomic<int> > tbbIntArr( NUMBER ); //TODO compilation error 368 // for ( int i=0; i<NUMBER; ++i ) tbbIntArr[i] = i; 369 // TypeTester</*default_construction_present = */true>( tbbIntArr ); 370 371 std::vector< std::shared_ptr<int> > shrPtrArr; 372 for ( int i=0; i<NUMBER; ++i ) shrPtrArr.push_back( std::make_shared<int>(i) ); 373 TypeTester</*default_construction_present = */true>( shrPtrArr ); 374 375 std::vector< std::weak_ptr<int> > wkPtrArr; 376 std::copy( shrPtrArr.begin(), shrPtrArr.end(), std::back_inserter(wkPtrArr) ); 377 TypeTester</*default_construction_present = */true>( wkPtrArr ); 378 } 379 380 template <typename Vector> 381 void test_grow_by_empty_range( Vector &v, typename Vector::value_type* range_begin_end ) { 382 const Vector v_copy = v; 383 REQUIRE_MESSAGE( (v.grow_by( range_begin_end, range_begin_end ) == v.end()), "grow_by(empty_range) returned a wrong iterator." ); 384 REQUIRE_MESSAGE( v == v_copy, "grow_by(empty_range) has changed the vector." ); 385 } 386 387 void TestSerialGrowByRange( bool fragmented_vector ) { 388 tbb::concurrent_vector<int> v; 389 if ( fragmented_vector ) { 390 v.reserve( 1 ); 391 } 392 int init_range[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; 393 REQUIRE_MESSAGE( (v.grow_by( init_range, init_range + (utils::array_length( init_range )) ) == v.begin()), "grow_by(I,I) returned a wrong iterator." ); 394 REQUIRE_MESSAGE( std::equal( v.begin(), v.end(), init_range ), "grow_by(I,I) did not properly copied all elements ?" ); 395 test_grow_by_empty_range( v, init_range ); 396 test_grow_by_empty_range( v, (int*)nullptr ); 397 } 398 399 template <typename allocator_type> 400 void TestConcurrentOperationsWithUnSafeOperations(std::size_t threads_number) { 401 using vector_type = tbb::concurrent_vector<move_support_tests::Foo, allocator_type>; 402 403 vector_type vector; 404 405 constexpr std::size_t max_operations = 1000; 406 std::atomic<int> curr_unsafe_thread{-1}; 407 // 0 - is safe operations 408 // 1 - is shrink_to_fit 409 // 2 - is clear + shrink_to_fit 410 // 3 - is resize 411 std::vector<std::size_t> operations(std::size_t(max_operations * 0.95), 0); 412 utils::FastRandom<> op_rand(42); 413 for (std::size_t i = std::size_t(max_operations * 0.95); i < max_operations; ++i) { 414 std::size_t random_operation = op_rand.get() % 3; 415 operations.push_back(random_operation + 1); 416 } 417 418 // Array of active threads 419 std::unique_ptr<std::atomic<int>[]> active_threads{ new std::atomic<int>[threads_number]() }; 420 // If thread still have i < max_operations than in array will be false 421 // When some thread finish it operation, set true in active_thread on thread_id position and start executing only safe operations 422 // Than wait all threads 423 // When all threads is finish their operations, all thread exit from lambda 424 auto all_done = [&active_threads, threads_number] { 425 for (std::size_t i = 0; i < threads_number; ++i) { 426 if (active_threads[i].load(std::memory_order_relaxed) == 0) return false; 427 } 428 return true; 429 }; 430 431 // Need double synchronization to correct work 432 std::unique_ptr<std::atomic<int>[]> ready_threads{ new std::atomic<int>[threads_number]() }; 433 auto all_ready_leave = [&ready_threads, threads_number] { 434 for (std::size_t i = 0; i < threads_number; ++i) { 435 if (ready_threads[i].load(std::memory_order_relaxed) == 0) return false; 436 } 437 return true; 438 }; 439 440 utils::SpinBarrier barrier(threads_number); 441 auto concurrent_func = [&operations, &vector, &curr_unsafe_thread, &barrier, &all_done, &active_threads, 442 &all_ready_leave, &ready_threads] (std::size_t thread_id) 443 { 444 std::vector<std::size_t> local_operations(operations); 445 utils::FastRandom<> rand(thread_id); 446 // std::shuffle doesn't work with msvc2017 and FastRandom 447 for (std::size_t i = local_operations.size(); i > 1; --i) { 448 std::size_t j = rand.get() % i; 449 std::swap(local_operations[i - 1], local_operations[j]); 450 } 451 452 std::size_t i = 0; 453 do { 454 if (all_done()) ready_threads[thread_id] = 1; 455 if (curr_unsafe_thread.load() != -1) { 456 // If lock taken, wait 457 // First wait unblock unsafe thread 458 // Second wait finish unsafe operations 459 barrier.wait(); 460 barrier.wait(); 461 } 462 // Is safe operation 463 if (active_threads[thread_id] == 1 || local_operations[i] == 0) { 464 // If lock is free, perform various operations 465 std::size_t random_operation = rand.get() % 3; 466 switch (random_operation) { 467 case 0: 468 { 469 vector.push_back(1); 470 } 471 break; 472 case 1: 473 { 474 std::size_t grow_size = rand.get() % 100; 475 vector.grow_by(grow_size, 1); 476 } 477 break; 478 case 2: 479 { 480 std::size_t grow_at_least_size = vector.size() + rand.get() % 100; 481 vector.grow_to_at_least(grow_at_least_size, 1); 482 } 483 break; 484 } 485 } else { 486 int default_unsafe_thread = -1; 487 if (curr_unsafe_thread.compare_exchange_strong(default_unsafe_thread, int(thread_id))) { 488 barrier.wait(); 489 // All threads are blocked we can execute our unsafe operation 490 switch (local_operations[i]) { 491 case 1: 492 vector.shrink_to_fit(); 493 break; 494 case 2: 495 { 496 vector.clear(); 497 vector.shrink_to_fit(); 498 } 499 break; 500 case 3: 501 { 502 vector.resize(0); 503 } 504 break; 505 } 506 curr_unsafe_thread = -1; 507 barrier.wait(); 508 } 509 } 510 ++i; 511 if (i >= local_operations.size()) active_threads[thread_id] = 1; 512 } while (!all_ready_leave() || !all_done()); 513 }; 514 515 utils::NativeParallelFor(threads_number, concurrent_func); 516 517 vector.clear(); 518 vector.shrink_to_fit(); 519 } 520 521 template <typename RangeType> 522 int reduce_vector(RangeType test_range) { 523 return tbb::parallel_reduce(test_range, 0, 524 [] ( const RangeType& range, int sum ) { 525 for (auto it = range.begin(); it != range.end(); ++it) { 526 sum += *it; 527 } 528 529 return sum; 530 }, 531 [] ( const int& lhs, const int& rhs) { 532 return lhs + rhs; 533 } 534 ); 535 } 536 537 //! Test the grow_by on range 538 //! \brief \ref interface \ref requirement 539 TEST_CASE("testing serial grow_by range"){ 540 TestSerialGrowByRange(/*fragmented_vector = */false); 541 TestSerialGrowByRange(/*fragmented_vector = */true); 542 } 543 544 //! Test of push_back operation 545 //! \brief \ref interface 546 TEST_CASE("testing range based for support"){ 547 TestRangeBasedFor(); 548 } 549 550 //! Test of work STL algorithms with concurrent_vector iterator. 551 //! \brief \ref interface 552 TEST_CASE("testing sort"){ 553 TestSort(); 554 } 555 556 //! Test concurrent_vector with vector types 557 //! \brief \ref error_guessing 558 TEST_CASE("testing concurrent_vector with vector types"){ 559 #if HAVE_m128 560 TestVectorTypes<ClassWithSSE>(); 561 #endif 562 #if HAVE_m256 563 if (have_AVX()) TestVectorTypes<ClassWithAVX>(); 564 #endif 565 } 566 567 //! Test concurrent push_back operation 568 //! \brief \ref error_guessing 569 TEST_CASE("testing find primes"){ 570 TestFindPrimes(); 571 } 572 573 //! Test concurrent_vector with std::scoped_allocator_adaptor 574 //! \brief \ref error_guessing 575 TEST_CASE("test concurrent_vector with std::scoped_allocator_adaptor") { 576 test_scoped_allocator(); 577 } 578 579 //! Test type of vector 580 //! \brief \ref requirement 581 TEST_CASE("testing types"){ 582 TestTypes(); 583 } 584 585 //! Test concurrent and unsafe operations 586 //! \brief \ref regression \ref error_guessing 587 TEST_CASE("Work without hang") { 588 using allocator_type = StaticSharedCountingAllocator<std::allocator<move_support_tests::Foo>>; 589 std::size_t max_threads = utils::get_platform_max_threads() - 1; 590 591 for (std::size_t threads = 1; threads < max_threads; threads = std::size_t(double(threads) * 2.7)) { 592 allocator_type::init_counters(); 593 TestConcurrentOperationsWithUnSafeOperations<allocator_type>(threads); 594 595 REQUIRE( allocator_type::allocations == allocator_type::frees ); 596 REQUIRE( allocator_type::items_allocated == allocator_type::items_freed ); 597 REQUIRE( allocator_type::items_constructed == allocator_type::items_destroyed ); 598 } 599 } 600 601 #if TBB_USE_EXCEPTIONS 602 //! Whitebox test for segment table extension 603 //! \brief \ref regression \ref error_guessing 604 TEST_CASE("Whitebox test for segment table extension") { 605 using allocator_type = StaticSharedCountingAllocator<std::allocator<move_support_tests::Foo>>; 606 using vector_type = tbb::concurrent_vector<move_support_tests::Foo, allocator_type>; 607 608 std::size_t max_number_of_elements_in_embedded = 12; 609 610 for (std::size_t i = 3; i < max_number_of_elements_in_embedded; i += 3) { 611 vector_type vector; 612 allocator_type::init_counters(); 613 allocator_type::set_limits(std::size_t(1) << (i + 1)); 614 615 try { 616 for (std::size_t j = 0; j < std::size_t(1) << i; ++j) { 617 vector.push_back(1); 618 } 619 vector.grow_by(1000); 620 } catch (std::bad_alloc& ) { 621 allocator_type::set_limits(); 622 vector_type copy_of_vector(vector); 623 vector_type copy_of_copy(copy_of_vector); 624 vector_type assigned_vector; 625 assigned_vector = vector; 626 REQUIRE(copy_of_vector == copy_of_copy); 627 REQUIRE(assigned_vector == copy_of_copy); 628 } 629 } 630 } 631 632 //! Test exception in constructors 633 //! \brief \ref regression \ref error_guessing 634 TEST_CASE("Test exception in constructors") { 635 using allocator_type = StaticSharedCountingAllocator<std::allocator<double>>; 636 using vector_type = tbb::concurrent_vector<double, allocator_type>; 637 638 allocator_type::set_limits(1); 639 640 REQUIRE_THROWS_AS( [] { 641 vector_type vec1(42, 42.); 642 utils::suppress_unused_warning(vec1); 643 }(), const std::bad_alloc); 644 645 auto list = { 42., 42., 42., 42., 42., 42., 42., 42., 42., 42. }; 646 REQUIRE_THROWS_AS( [&] { 647 vector_type vec2(list.begin(), list.end()); 648 utils::suppress_unused_warning(vec2); 649 }(), const std::bad_alloc); 650 651 allocator_type::init_counters(); 652 allocator_type::set_limits(0); 653 vector_type src_vec(42, 42.); 654 allocator_type::set_limits(1); 655 656 REQUIRE_THROWS_AS( [&] { 657 vector_type vec3(src_vec, allocator_type{}); 658 utils::suppress_unused_warning(vec3); 659 }(), const std::bad_alloc); 660 } 661 #endif // TBB_USE_EXCEPTIONS 662 663 //! \brief \ref regression \ref error_guessing 664 TEST_CASE("Reducing concurrent_vector") { 665 constexpr int final_sum = 100000; 666 tbb::concurrent_vector<int> vec(final_sum, 1); 667 const tbb::concurrent_vector<int> cvec(vec); 668 669 CHECK(reduce_vector(vec.range()) == final_sum); 670 CHECK(reduce_vector(cvec.range()) == final_sum); 671 } 672 673 674 //! \brief \ref error_guessing 675 TEST_CASE("swap with not always equal allocators"){ 676 using allocator_type = NotAlwaysEqualAllocator<int>; 677 using vector_type = tbb::concurrent_vector<int, allocator_type>; 678 679 vector_type vec1{}; 680 vector_type vec2(42, 42); 681 682 swap(vec1, vec2); 683 684 CHECK(vec2.empty()); 685 } 686 687 // The problem was that after allocating first_block, 688 // no write was made to the embedded table. 689 // Also, two threads could be in the table extension section at once. 690 // NOTE: If the implementation of the vector has an issue, this test will either hang 691 // or fail with the assertion in debug mode. 692 //! \brief \ref regression 693 TEST_CASE("Testing vector in a highly concurrent environment") { 694 for (std::size_t i = 0; i < 10000; ++i) { 695 tbb::concurrent_vector<int> test_vec; 696 697 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, 10000), [&] (const tbb::blocked_range<std::size_t>&) { 698 test_vec.grow_by(1); 699 }, tbb::static_partitioner{}); 700 701 REQUIRE(test_vec.size() == utils::get_platform_max_threads()); 702 } 703 } 704