1 /* 2 Copyright (c) 2005-2020 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #include <common/test.h> 18 #include <common/spin_barrier.h> 19 #include <common/state_trackable.h> 20 #include <common/container_move_support.h> 21 #include <common/range_based_for_support.h> 22 #include <common/utils.h> 23 #include <common/utils_concurrency_limit.h> 24 #include <common/vector_types.h> 25 #include <tbb/concurrent_vector.h> 26 #include <tbb/tick_count.h> 27 #include <tbb/parallel_reduce.h> 28 #include <tbb/parallel_for.h> 29 #include <algorithm> 30 #include <cmath> 31 32 //! \file test_concurrent_vector.cpp 33 //! \brief Test for [containers.concurrent_vector] specification 34 35 void TestSort() { 36 for( int n=0; n<100; n=n*3+1 ) { 37 tbb::concurrent_vector<int> array(n); 38 for( int i=0; i<n; ++i ){ 39 array.at(i) = (i*7)%n; 40 } 41 std::sort( array.begin(), array.end() ); 42 for( int i=0; i<n; ++i ){ 43 REQUIRE( array[i]==i ); 44 } 45 } 46 } 47 48 void TestRangeBasedFor(){ 49 using namespace range_based_for_support_tests; 50 51 using c_vector = tbb::concurrent_vector<int>; 52 c_vector a_c_vector; 53 54 const int sequence_length = 10; 55 for (int i = 1; i<= sequence_length; ++i){ 56 a_c_vector.push_back(i); 57 } 58 59 REQUIRE_MESSAGE( range_based_for_accumulate(a_c_vector, std::plus<int>(), 0) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?"); 60 } 61 62 struct default_container_traits { 63 template <typename container_type, typename iterator_type> 64 static container_type& construct_container(typename std::aligned_storage<sizeof(container_type)>::type& storage, iterator_type begin, iterator_type end){ 65 container_type* ptr = reinterpret_cast<container_type*>(&storage); 66 new (ptr) container_type(begin, end); 67 return *ptr; 68 } 69 70 template <typename container_type, typename iterator_type, typename allocator_type> 71 static container_type& construct_container(typename std::aligned_storage<sizeof(container_type)>::type& storage, iterator_type begin, iterator_type end, allocator_type const& a){ 72 container_type* ptr = reinterpret_cast<container_type*>(&storage); 73 new (ptr) container_type(begin, end, a); 74 return *ptr; 75 } 76 }; 77 78 struct c_vector_type : default_container_traits { 79 template <typename T, typename Allocator> 80 using container_type = tbb::concurrent_vector<T, Allocator>; 81 82 template <typename T> 83 using container_value_type = T; 84 85 using init_iterator_type = move_support_tests::FooIterator; 86 template<typename element_type, typename allocator_type> 87 struct apply{ 88 using type = tbb::concurrent_vector<element_type, allocator_type >; 89 }; 90 91 enum{ expected_number_of_items_to_allocate_for_steal_move = 0 }; 92 93 template<typename element_type, typename allocator_type, typename iterator> 94 static bool equal(tbb::concurrent_vector<element_type, allocator_type > const& c, iterator begin, iterator end){ 95 bool equal_sizes = (size_t)std::distance(begin, end) == c.size(); 96 return equal_sizes && std::equal(c.begin(), c.end(), begin); 97 } 98 }; 99 100 void TestSerialGrowByWithMoveIterators(){ 101 using fixture_t = move_support_tests::DefaultStatefulFixtureHelper<c_vector_type>::type; 102 using vector_t = fixture_t::container_type; 103 104 fixture_t fixture; 105 106 vector_t dst(fixture.dst_allocator); 107 dst.grow_by(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end())); 108 109 fixture.verify_content_deep_moved(dst); 110 } 111 112 #if HAVE_m128 || HAVE_m256 113 114 template<typename ClassWithVectorType> 115 void TestVectorTypes() { 116 tbb::concurrent_vector<ClassWithVectorType> v; 117 for( int i = 0; i < 100; ++i ) { 118 // VC8 does not properly align a temporary value; to work around, use explicit variable 119 ClassWithVectorType foo(i); 120 v.push_back(foo); 121 for( int j=0; j<i; ++j ) { 122 ClassWithVectorType bar(j); 123 REQUIRE( v[j]==bar ); 124 } 125 } 126 } 127 #endif /* HAVE_m128 | HAVE_m256 */ 128 129 130 static tbb::concurrent_vector<std::size_t> Primes; 131 132 class FindPrimes { 133 bool is_prime( std::size_t val ) const { 134 int limit, factor = 3; 135 if( val<5u ) 136 return val==2; 137 else { 138 limit = long(sqrtf(float(val))+0.5f); 139 while( factor<=limit && val % factor ) 140 ++factor; 141 return factor>limit; 142 } 143 } 144 public: 145 void operator()( const std::size_t idx ) const { 146 if( idx % 2 && is_prime(idx) ) { 147 Primes.push_back( idx ); 148 } 149 } 150 }; 151 152 double TimeFindPrimes( std::size_t nthread ) { 153 Primes.clear(); 154 const std::size_t count = 1048576; 155 Primes.reserve(count);// TODO: or compact()? 156 tbb::tick_count t0 = tbb::tick_count::now(); 157 std::size_t block_size = count / nthread; 158 utils::NativeParallelFor(count, block_size, FindPrimes() ); 159 tbb::tick_count t1 = tbb::tick_count::now(); 160 return (t1-t0).seconds(); 161 } 162 163 void TestFindPrimes() { 164 // Time fully subscribed run. 165 166 // TimeFindPrimes( tbb::task_scheduler_init::automatic ); 167 double t2 = TimeFindPrimes( utils::get_platform_max_threads() ); 168 169 // Time parallel run that is very likely oversubscribed. 170 #if TBB_TEST_LOW_WORKLOAD 171 double tx = TimeFindPrimes(32); 172 #else 173 double tx = TimeFindPrimes(128); 174 #endif 175 INFO("TestFindPrimes: t2 == " << t2 << " tx == " << tx << "k == " << tx / t2); 176 177 // We allow the X-thread run a little extra time to allow for thread overhead. 178 // Theoretically, following test will fail on machine with >X processors. 179 // But that situation is not going to come up in the near future, 180 // and the generalization to fix the issue is not worth the trouble. 181 WARN_MESSAGE( tx <= 1.3*t2, "Warning: grow_by is pathetically slow"); 182 INFO("t2 == " << t2 << " tx == " << tx << "k == " << tx / t2); 183 } 184 185 template <typename Type, typename Allocator> 186 class test_grow_by_and_resize { 187 tbb::concurrent_vector<Type, Allocator> &my_c; 188 public: 189 test_grow_by_and_resize( tbb::concurrent_vector<Type, Allocator> &c ) : my_c(c) {} 190 void operator()() const { 191 const typename tbb::concurrent_vector<Type, Allocator>::size_type sz = my_c.size(); 192 my_c.grow_by( 5 ); 193 REQUIRE( my_c.size() == sz + 5 ); 194 my_c.resize( sz ); 195 REQUIRE( my_c.size() == sz ); 196 } 197 }; 198 199 void test_scoped_allocator() { 200 using allocator_data_type = AllocatorAwareData<std::scoped_allocator_adaptor<std::allocator<int>>>; 201 using allocator_type = std::scoped_allocator_adaptor<std::allocator<allocator_data_type>>; 202 using container_type = tbb::concurrent_vector<allocator_data_type, allocator_type>; 203 204 allocator_type allocator; 205 allocator_data_type data1(1, allocator); 206 allocator_data_type data2(2, allocator); 207 208 auto init_list = {data1, data2}; 209 210 container_type c1(allocator), c2(allocator); 211 212 allocator_data_type::activate(); 213 214 c1.grow_by(100); 215 c1.grow_by(10, data1); 216 c1.grow_by(init_list.begin(), init_list.end()); 217 c1.grow_by(init_list); 218 219 c1.clear(); 220 221 c1.grow_to_at_least(100); 222 c1.grow_to_at_least(110, data1); 223 224 c1.clear(); 225 226 c1.push_back(data1); 227 c1.push_back(data2); 228 c1.push_back(std::move(data1)); 229 c1.emplace_back(1); 230 231 c1.clear(); 232 233 c1.reserve(100); 234 c1.resize(110); 235 c1.resize(100); 236 c1.resize(110, data1); 237 c1.resize(100, data1); 238 239 c1.shrink_to_fit(); 240 241 c1.clear(); 242 243 c1.grow_by(10, data1); 244 c2.grow_by(20, data2); 245 246 c1 = c2; 247 c2 = std::move(c1); 248 249 allocator_data_type::deactivate(); 250 } 251 252 template <bool default_construction_present> struct do_default_construction_test { 253 template<typename FuncType> void operator() ( FuncType func ) const { func(); } 254 }; 255 template <> struct do_default_construction_test<false> { 256 template<typename FuncType> void operator()( FuncType ) const {} 257 }; 258 259 template <typename Type, typename Allocator> 260 void CompareVectors( const tbb::concurrent_vector<Type, Allocator> &c1, const tbb::concurrent_vector<Type, Allocator> &c2 ) { 261 REQUIRE( (!(c1 == c2) && c1 != c2) ); 262 REQUIRE( (c1 <= c2 && c1 < c2 && c2 >= c1 && c2 > c1) ); 263 } 264 265 template <typename Type, typename Allocator> 266 void CompareVectors( const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> &, const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> & ) { 267 /* do nothing for std::weak_ptr */ 268 } 269 270 template <bool default_construction_present, typename Type, typename Allocator> 271 void Examine( tbb::concurrent_vector<Type, Allocator> c, const std::vector<Type> &vec ) { 272 using vector_t = tbb::concurrent_vector<Type, Allocator>; 273 using size_type_t = typename vector_t::size_type; 274 275 276 REQUIRE( c.size() == vec.size() ); 277 for ( size_type_t i=0; i<c.size(); ++i ) { 278 REQUIRE( utils::IsEqual()(c[i], vec[i]) ); 279 } 280 do_default_construction_test<default_construction_present>()(test_grow_by_and_resize<Type,Allocator>(c)); 281 c.grow_by( size_type_t(5), c[0] ); 282 c.grow_to_at_least( c.size()+5, c.at(0) ); 283 vector_t c2; 284 c2.reserve( 5 ); 285 std::copy( c.begin(), c.begin() + 5, std::back_inserter( c2 ) ); 286 287 c.grow_by( c2.begin(), c2.end() ); 288 const vector_t& cvcr = c; 289 REQUIRE( utils::IsEqual()(cvcr.front(), *(c2.rend()-1)) ); 290 REQUIRE( utils::IsEqual()(cvcr.back(), *c2.rbegin())); 291 REQUIRE( utils::IsEqual()(*c.cbegin(), *(c.crend()-1)) ); 292 REQUIRE( utils::IsEqual()(*(c.cend()-1), *c.crbegin()) ); 293 c.swap( c2 ); 294 REQUIRE( c.size() == 5 ); 295 CompareVectors( c, c2 ); 296 c.swap( c2 ); 297 c2.clear(); 298 REQUIRE( c2.size() == 0 ); 299 c2.shrink_to_fit(); 300 Allocator a = c.get_allocator(); 301 a.deallocate( a.allocate(1), 1 ); 302 } 303 304 template <typename Type> 305 class test_default_construction { 306 const std::vector<Type> &my_vec; 307 public: 308 test_default_construction( const std::vector<Type> &vec ) : my_vec(vec) {} 309 void operator()() const { 310 // Construction with initial size specified by argument n. 311 tbb::concurrent_vector<Type> c7( my_vec.size() ); 312 std::copy( my_vec.begin(), my_vec.end(), c7.begin() ); 313 Examine</*default_construction_present = */true>( c7, my_vec ); 314 tbb::concurrent_vector< Type, std::allocator<Type> > c8( my_vec.size() ); 315 std::copy( c7.begin(), c7.end(), c8.begin() ); 316 Examine</*default_construction_present = */true>( c8, my_vec ); 317 } 318 }; 319 320 template <bool default_construction_present, typename Type> 321 void TypeTester( const std::vector<Type> &vec ) { 322 __TBB_ASSERT( vec.size() >= 5, "Array should have at least 5 elements" ); 323 // Construct empty vector. 324 tbb::concurrent_vector<Type> c1; 325 std::copy( vec.begin(), vec.end(), std::back_inserter(c1) ); 326 Examine<default_construction_present>( c1, vec ); 327 // Constructor from initializer_list. 328 tbb::concurrent_vector<Type> c2({vec[0],vec[1],vec[2]}); 329 std::copy( vec.begin()+3, vec.end(), std::back_inserter(c2) ); 330 Examine<default_construction_present>( c2, vec ); 331 // Copying constructor. 332 tbb::concurrent_vector<Type> c3(c1); 333 Examine<default_construction_present>( c3, vec ); 334 // Construct with non-default allocator 335 tbb::concurrent_vector< Type, std::allocator<Type> > c4; 336 std::copy( vec.begin(), vec.end(), std::back_inserter(c4) ); 337 Examine<default_construction_present>( c4, vec ); 338 // Construction with initial size specified by argument n. 339 do_default_construction_test<default_construction_present>()(test_default_construction<Type>(vec)); 340 // Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance. 341 std::allocator<Type> allocator; 342 tbb::concurrent_vector< Type, std::allocator<Type> > c9(vec.size(), vec[1], allocator); 343 Examine<default_construction_present>( c9, std::vector<Type>(vec.size(), vec[1]) ); 344 // Construction with copying iteration range and given allocator instance. 345 tbb::concurrent_vector< Type, std::allocator<Type> > c10(c1.begin(), c1.end(), allocator); 346 Examine<default_construction_present>( c10, vec ); 347 tbb::concurrent_vector<Type> c11(vec.begin(), vec.end()); 348 Examine<default_construction_present>( c11, vec ); 349 } 350 351 void TestTypes() { 352 const int NUMBER = 100; 353 354 std::vector<int> intArr; 355 for ( int i=0; i<NUMBER; ++i ) intArr.push_back(i); 356 TypeTester</*default_construction_present = */true>( intArr ); 357 358 std::vector< std::reference_wrapper<int> > refArr; 359 // The constructor of std::reference_wrapper<T> from T& is explicit in some versions of libstdc++. 360 for ( int i=0; i<NUMBER; ++i ) refArr.push_back( std::reference_wrapper<int>(intArr[i]) ); 361 TypeTester</*default_construction_present = */false>( refArr ); 362 363 // std::vector< std::atomic<int> > tbbIntArr( NUMBER ); //TODO compilation error 364 // for ( int i=0; i<NUMBER; ++i ) tbbIntArr[i] = i; 365 // TypeTester</*default_construction_present = */true>( tbbIntArr ); 366 367 std::vector< std::shared_ptr<int> > shrPtrArr; 368 for ( int i=0; i<NUMBER; ++i ) shrPtrArr.push_back( std::make_shared<int>(i) ); 369 TypeTester</*default_construction_present = */true>( shrPtrArr ); 370 371 std::vector< std::weak_ptr<int> > wkPtrArr; 372 std::copy( shrPtrArr.begin(), shrPtrArr.end(), std::back_inserter(wkPtrArr) ); 373 TypeTester</*default_construction_present = */true>( wkPtrArr ); 374 } 375 376 template <typename Vector> 377 void test_grow_by_empty_range( Vector &v, typename Vector::value_type* range_begin_end ) { 378 const Vector v_copy = v; 379 REQUIRE_MESSAGE( (v.grow_by( range_begin_end, range_begin_end ) == v.end()), "grow_by(empty_range) returned a wrong iterator." ); 380 REQUIRE_MESSAGE( v == v_copy, "grow_by(empty_range) has changed the vector." ); 381 } 382 383 void TestSerialGrowByRange( bool fragmented_vector ) { 384 tbb::concurrent_vector<int> v; 385 if ( fragmented_vector ) { 386 v.reserve( 1 ); 387 } 388 int init_range[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; 389 REQUIRE_MESSAGE( (v.grow_by( init_range, init_range + (utils::array_length( init_range )) ) == v.begin()), "grow_by(I,I) returned a wrong iterator." ); 390 REQUIRE_MESSAGE( std::equal( v.begin(), v.end(), init_range ), "grow_by(I,I) did not properly copied all elements ?" ); 391 test_grow_by_empty_range( v, init_range ); 392 test_grow_by_empty_range( v, (int*)nullptr ); 393 } 394 395 template <typename allocator_type> 396 void TestConcurrentOperationsWithUnSafeOperations(std::size_t threads_number) { 397 using vector_type = tbb::concurrent_vector<move_support_tests::Foo, allocator_type>; 398 399 vector_type vector; 400 401 constexpr std::size_t max_operations = 1000; 402 std::atomic<int> curr_unsafe_thread{-1}; 403 // 0 - is safe operations 404 // 1 - is shrink_to_fit 405 // 2 - is clear + shrink_to_fit 406 // 3 - is resize 407 std::vector<std::size_t> operations(std::size_t(max_operations * 0.95), 0); 408 utils::FastRandom<> op_rand(42); 409 for (std::size_t i = std::size_t(max_operations * 0.95); i < max_operations; ++i) { 410 std::size_t random_operation = op_rand.get() % 3; 411 operations.push_back(random_operation + 1); 412 } 413 414 // Array of active threads, 415 std::vector<int> active_threads(threads_number, 0); 416 // If thread still have i < max_operations than in array will be false 417 // When some thread finish it operation, set true in active_thread on thread_id position and start executing only safe operations 418 // Than wait all threads 419 // When all threads is finish their operations, all thread exit from lambda 420 auto all_done = [&active_threads] { 421 for (std::size_t i = 0; i < active_threads.size(); ++i) { 422 if (active_threads[i] == 0) return false; 423 } 424 return true; 425 }; 426 427 // Need double synchronization to correct work 428 std::vector<int> ready_threads(threads_number, 0); 429 auto all_ready_leave = [&ready_threads] { 430 for (std::size_t i = 0; i < ready_threads.size(); ++i) { 431 if (ready_threads[i] == 0) return false; 432 } 433 return true; 434 }; 435 436 utils::SpinBarrier barrier(threads_number); 437 auto concurrent_func = [operations, &vector, &curr_unsafe_thread, &barrier, &all_done, &active_threads, 438 &all_ready_leave, &ready_threads] (std::size_t thread_id) 439 { 440 utils::FastRandom<> rand(thread_id); 441 442 // std::shuffle doesn't work with msvc2017 and FastRandom 443 for (std::size_t i = 0; i < operations.size(); ++i) { 444 std::size_t j = rand.get() % operations.size(); 445 std::swap(*const_cast<std::size_t*>(&operations[i]), *const_cast<std::size_t*>(&operations[j])); 446 } 447 448 std::size_t i = 0; 449 do { 450 if (all_done()) ready_threads[thread_id] = 1; 451 if (curr_unsafe_thread.load() != -1) { 452 // If lock taken, wait 453 // First wait unblock unsafe thread 454 // Second wait finish unsafe operations 455 barrier.wait(); 456 barrier.wait(); 457 } 458 // Is safe operation 459 if (active_threads[thread_id] == 1 || operations[i] == 0) { 460 // If lock is free, perform various operations 461 std::size_t random_operation = rand.get() % 3; 462 switch (random_operation) { 463 case 0: 464 { 465 vector.push_back(1); 466 } 467 break; 468 case 1: 469 { 470 std::size_t grow_size = rand.get() % 100; 471 vector.grow_by(grow_size, 1); 472 } 473 break; 474 case 2: 475 { 476 std::size_t grow_at_least_size = vector.size() + rand.get() % 100; 477 vector.grow_to_at_least(grow_at_least_size, 1); 478 } 479 break; 480 } 481 } else { 482 int default_unsafe_thread = -1; 483 if (curr_unsafe_thread.compare_exchange_strong(default_unsafe_thread, int(thread_id))) { 484 barrier.wait(); 485 // All threads are blocked we can execute our unsafe operation 486 switch (operations[i]) { 487 case 1: 488 vector.shrink_to_fit(); 489 break; 490 case 2: 491 { 492 vector.clear(); 493 vector.shrink_to_fit(); 494 } 495 break; 496 case 3: 497 { 498 vector.resize(0); 499 } 500 break; 501 } 502 curr_unsafe_thread = -1; 503 barrier.wait(); 504 } 505 } 506 ++i; 507 if (i >= operations.size()) active_threads[thread_id] = 1; 508 } while (!all_ready_leave() || !all_done()); 509 }; 510 511 utils::NativeParallelFor(threads_number, concurrent_func); 512 513 vector.clear(); 514 vector.shrink_to_fit(); 515 } 516 517 template <typename RangeType> 518 int reduce_vector(RangeType test_range) { 519 return tbb::parallel_reduce(test_range, 0, 520 [] ( const RangeType& range, int sum ) { 521 for (auto it = range.begin(); it != range.end(); ++it) { 522 sum += *it; 523 } 524 525 return sum; 526 }, 527 [] ( const int& lhs, const int& rhs) { 528 return lhs + rhs; 529 } 530 ); 531 } 532 533 //! Test the grow_by on range 534 //! \brief \ref interface \ref requirement 535 TEST_CASE("testing serial grow_by range"){ 536 TestSerialGrowByRange(/*fragmented_vector = */false); 537 TestSerialGrowByRange(/*fragmented_vector = */true); 538 } 539 540 //! Test of push_back operation 541 //! \brief \ref interface 542 TEST_CASE("testing range based for support"){ 543 TestRangeBasedFor(); 544 } 545 546 //! Test of work STL algorithms with concurrent_vector iterator. 547 //! \brief \ref interface 548 TEST_CASE("testing sort"){ 549 TestSort(); 550 } 551 552 //! Test concurrent_vector with vector types 553 //! \brief \ref error_guessing 554 TEST_CASE("testing concurrent_vector with vector types"){ 555 #if HAVE_m128 556 TestVectorTypes<ClassWithSSE>(); 557 #endif 558 #if HAVE_m256 559 if (have_AVX()) TestVectorTypes<ClassWithAVX>(); 560 #endif 561 } 562 563 //! Test concurrent push_back operation 564 //! \brief \ref error_guessing 565 TEST_CASE("testing find primes"){ 566 TestFindPrimes(); 567 } 568 569 //! Test concurrent_vector with std::scoped_allocator_adaptor 570 //! \brief \ref error_guessing 571 TEST_CASE("test concurrent_vector with std::scoped_allocator_adaptor") { 572 test_scoped_allocator(); 573 } 574 575 //! Test type of vector 576 //! \brief \ref requirement 577 TEST_CASE("testing types"){ 578 TestTypes(); 579 } 580 581 //! Test concurrent and unsafe operations 582 //! \brief \ref regression \ref error_guessing 583 TEST_CASE("Work without hang") { 584 using allocator_type = StaticSharedCountingAllocator<std::allocator<move_support_tests::Foo>>; 585 std::size_t max_threads = utils::get_platform_max_threads() - 1; 586 587 for (std::size_t threads = 1; threads < max_threads; threads = std::size_t(double(threads) * 2.7)) { 588 allocator_type::init_counters(); 589 TestConcurrentOperationsWithUnSafeOperations<allocator_type>(threads); 590 591 REQUIRE( allocator_type::allocations == allocator_type::frees ); 592 REQUIRE( allocator_type::items_allocated == allocator_type::items_freed ); 593 REQUIRE( allocator_type::items_constructed == allocator_type::items_destroyed ); 594 } 595 } 596 597 #if TBB_USE_EXCEPTIONS 598 //! Whitebox test for segment table extension 599 //! \brief \ref regression \ref error_guessing 600 TEST_CASE("Whitebox test for segment table extension") { 601 using allocator_type = StaticSharedCountingAllocator<std::allocator<move_support_tests::Foo>>; 602 using vector_type = tbb::concurrent_vector<move_support_tests::Foo, allocator_type>; 603 604 std::size_t max_number_of_elements_in_embedded = 12; 605 606 for (std::size_t i = 3; i < max_number_of_elements_in_embedded; i += 3) { 607 vector_type vector; 608 allocator_type::init_counters(); 609 allocator_type::set_limits(std::size_t(1) << (i + 1)); 610 611 try { 612 for (std::size_t j = 0; j < std::size_t(1) << i; ++j) { 613 vector.push_back(1); 614 } 615 vector.grow_by(1000); 616 } catch (std::bad_alloc& ) { 617 allocator_type::set_limits(); 618 vector_type copy_of_vector(vector); 619 vector_type copy_of_copy(copy_of_vector); 620 vector_type assigned_vector; 621 assigned_vector = vector; 622 REQUIRE(copy_of_vector == copy_of_copy); 623 REQUIRE(assigned_vector == copy_of_copy); 624 } 625 } 626 } 627 628 //! Test exception in constructors 629 //! \brief \ref regression \ref error_guessing 630 TEST_CASE("Test exception in constructors") { 631 using allocator_type = StaticSharedCountingAllocator<std::allocator<double>>; 632 using vector_type = tbb::concurrent_vector<double, allocator_type>; 633 634 allocator_type::set_limits(1); 635 636 REQUIRE_THROWS_AS( [] { 637 vector_type vec1(42, 42.); 638 utils::suppress_unused_warning(vec1); 639 }(), const std::bad_alloc); 640 641 auto list = { 42., 42., 42., 42., 42., 42., 42., 42., 42., 42. }; 642 REQUIRE_THROWS_AS( [&] { 643 vector_type vec2(list.begin(), list.end()); 644 utils::suppress_unused_warning(vec2); 645 }(), const std::bad_alloc); 646 647 allocator_type::init_counters(); 648 allocator_type::set_limits(0); 649 vector_type src_vec(42, 42.); 650 allocator_type::set_limits(1); 651 652 REQUIRE_THROWS_AS( [&] { 653 vector_type vec3(src_vec, allocator_type{}); 654 utils::suppress_unused_warning(vec3); 655 }(), const std::bad_alloc); 656 } 657 #endif // TBB_USE_EXCEPTIONS 658 659 //! \brief \ref regression \ref error_guessing 660 TEST_CASE("Reducing concurrent_vector") { 661 constexpr int final_sum = 100000; 662 tbb::concurrent_vector<int> vec(final_sum, 1); 663 const tbb::concurrent_vector<int> cvec(vec); 664 665 CHECK(reduce_vector(vec.range()) == final_sum); 666 CHECK(reduce_vector(cvec.range()) == final_sum); 667 } 668 669 670 //! \brief \ref error_guessing 671 TEST_CASE("swap with not always equal allocators"){ 672 using allocator_type = NotAlwaysEqualAllocator<int>; 673 using vector_type = tbb::concurrent_vector<int, allocator_type>; 674 675 vector_type vec1{}; 676 vector_type vec2(42, 42); 677 678 swap(vec1, vec2); 679 680 CHECK(vec2.empty()); 681 } 682 683 // The problem was that after allocating first_block, 684 // no write was made to the embedded table. 685 // Also, two threads could be in the table extension section at once. 686 // NOTE: If the implementation of the vector has an issue, this test will either hang 687 // or fail with the assertion in debug mode. 688 //! \brief \ref regression 689 TEST_CASE("Testing vector in a highly concurrent environment") { 690 for (std::size_t i = 0; i < 10000; ++i) { 691 tbb::concurrent_vector<int> test_vec; 692 693 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, 10000), [&] (const tbb::blocked_range<std::size_t>&) { 694 test_vec.grow_by(1); 695 }, tbb::static_partitioner{}); 696 697 REQUIRE(test_vec.size() == utils::get_platform_max_threads()); 698 } 699 } 700