1 /* 2 Copyright (c) 2005-2021 Intel Corporation 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 #if __INTEL_COMPILER && _MSC_VER 18 #pragma warning(disable : 2586) // decorated name length exceeded, name was truncated 19 #endif 20 21 #include <common/test.h> 22 #include <common/spin_barrier.h> 23 #include <common/state_trackable.h> 24 #include <common/container_move_support.h> 25 #include <common/range_based_for_support.h> 26 #include <common/utils.h> 27 #include <common/utils_concurrency_limit.h> 28 #include <common/vector_types.h> 29 #include <common/concepts_common.h> 30 #include <tbb/concurrent_vector.h> 31 #include <tbb/tick_count.h> 32 #include <tbb/parallel_reduce.h> 33 #include <tbb/parallel_for.h> 34 #include <algorithm> 35 #include <cmath> 36 37 //! \file test_concurrent_vector.cpp 38 //! \brief Test for [containers.concurrent_vector] specification 39 40 void TestSort() { 41 for( int n=0; n<100; n=n*3+1 ) { 42 tbb::concurrent_vector<int> array(n); 43 for( int i=0; i<n; ++i ){ 44 array.at(i) = (i*7)%n; 45 } 46 std::sort( array.begin(), array.end() ); 47 for( int i=0; i<n; ++i ){ 48 REQUIRE( array[i]==i ); 49 } 50 } 51 } 52 53 void TestRangeBasedFor(){ 54 using namespace range_based_for_support_tests; 55 56 using c_vector = tbb::concurrent_vector<int>; 57 c_vector a_c_vector; 58 59 const int sequence_length = 10; 60 for (int i = 1; i<= sequence_length; ++i){ 61 a_c_vector.push_back(i); 62 } 63 64 REQUIRE_MESSAGE( range_based_for_accumulate(a_c_vector, std::plus<int>(), 0) == gauss_summ_of_int_sequence(sequence_length), "incorrect accumulated value generated via range based for ?"); 65 } 66 67 struct default_container_traits { 68 template <typename container_type, typename iterator_type> 69 static container_type& construct_container(typename std::aligned_storage<sizeof(container_type)>::type& storage, iterator_type begin, iterator_type end){ 70 container_type* ptr = reinterpret_cast<container_type*>(&storage); 71 new (ptr) container_type(begin, end); 72 return *ptr; 73 } 74 75 template <typename container_type, typename iterator_type, typename allocator_type> 76 static container_type& construct_container(typename std::aligned_storage<sizeof(container_type)>::type& storage, iterator_type begin, iterator_type end, allocator_type const& a){ 77 container_type* ptr = reinterpret_cast<container_type*>(&storage); 78 new (ptr) container_type(begin, end, a); 79 return *ptr; 80 } 81 }; 82 83 struct c_vector_type : default_container_traits { 84 template <typename T, typename Allocator> 85 using container_type = tbb::concurrent_vector<T, Allocator>; 86 87 template <typename T> 88 using container_value_type = T; 89 90 using init_iterator_type = move_support_tests::FooIterator; 91 template<typename element_type, typename allocator_type> 92 struct apply{ 93 using type = tbb::concurrent_vector<element_type, allocator_type >; 94 }; 95 96 enum{ expected_number_of_items_to_allocate_for_steal_move = 0 }; 97 98 template<typename element_type, typename allocator_type, typename iterator> 99 static bool equal(tbb::concurrent_vector<element_type, allocator_type > const& c, iterator begin, iterator end){ 100 bool equal_sizes = (size_t)std::distance(begin, end) == c.size(); 101 return equal_sizes && std::equal(c.begin(), c.end(), begin); 102 } 103 }; 104 105 void TestSerialGrowByWithMoveIterators(){ 106 using fixture_t = move_support_tests::DefaultStatefulFixtureHelper<c_vector_type>::type; 107 using vector_t = fixture_t::container_type; 108 109 fixture_t fixture; 110 111 vector_t dst(fixture.dst_allocator); 112 dst.grow_by(std::make_move_iterator(fixture.source.begin()), std::make_move_iterator(fixture.source.end())); 113 114 fixture.verify_content_deep_moved(dst); 115 } 116 117 #if HAVE_m128 || HAVE_m256 118 119 template<typename ClassWithVectorType> 120 void TestVectorTypes() { 121 tbb::concurrent_vector<ClassWithVectorType> v; 122 for( int i = 0; i < 100; ++i ) { 123 // VC8 does not properly align a temporary value; to work around, use explicit variable 124 ClassWithVectorType foo(i); 125 v.push_back(foo); 126 for( int j=0; j<i; ++j ) { 127 ClassWithVectorType bar(j); 128 REQUIRE( v[j]==bar ); 129 } 130 } 131 } 132 #endif /* HAVE_m128 | HAVE_m256 */ 133 134 135 static tbb::concurrent_vector<std::size_t> Primes; 136 137 class FindPrimes { 138 bool is_prime( std::size_t val ) const { 139 int limit, factor = 3; 140 if( val<5u ) 141 return val==2; 142 else { 143 limit = long(sqrtf(float(val))+0.5f); 144 while( factor<=limit && val % factor ) 145 ++factor; 146 return factor>limit; 147 } 148 } 149 public: 150 void operator()( const std::size_t idx ) const { 151 if( idx % 2 && is_prime(idx) ) { 152 Primes.push_back( idx ); 153 } 154 } 155 }; 156 157 double TimeFindPrimes( std::size_t nthread ) { 158 Primes.clear(); 159 const std::size_t count = 1048576; 160 Primes.reserve(count);// TODO: or compact()? 161 tbb::tick_count t0 = tbb::tick_count::now(); 162 std::size_t block_size = count / nthread; 163 utils::NativeParallelFor(count, block_size, FindPrimes() ); 164 tbb::tick_count t1 = tbb::tick_count::now(); 165 return (t1-t0).seconds(); 166 } 167 168 void TestFindPrimes() { 169 // Time fully subscribed run. 170 171 // TimeFindPrimes( tbb::task_scheduler_init::automatic ); 172 double t2 = TimeFindPrimes( utils::get_platform_max_threads() ); 173 174 // Time parallel run that is very likely oversubscribed. 175 #if TBB_TEST_LOW_WORKLOAD 176 double tx = TimeFindPrimes(32); 177 #else 178 double tx = TimeFindPrimes(128); 179 #endif 180 INFO("TestFindPrimes: t2 == " << t2 << " tx == " << tx << "k == " << tx / t2); 181 182 // We allow the X-thread run a little extra time to allow for thread overhead. 183 // Theoretically, following test will fail on machine with >X processors. 184 // But that situation is not going to come up in the near future, 185 // and the generalization to fix the issue is not worth the trouble. 186 WARN_MESSAGE( tx <= 1.3*t2, "Warning: grow_by is pathetically slow"); 187 INFO("t2 == " << t2 << " tx == " << tx << "k == " << tx / t2); 188 } 189 190 template <typename Type, typename Allocator> 191 class test_grow_by_and_resize { 192 tbb::concurrent_vector<Type, Allocator> &my_c; 193 public: 194 test_grow_by_and_resize( tbb::concurrent_vector<Type, Allocator> &c ) : my_c(c) {} 195 void operator()() const { 196 const typename tbb::concurrent_vector<Type, Allocator>::size_type sz = my_c.size(); 197 my_c.grow_by( 5 ); 198 REQUIRE( my_c.size() == sz + 5 ); 199 my_c.resize( sz ); 200 REQUIRE( my_c.size() == sz ); 201 } 202 }; 203 204 void test_scoped_allocator() { 205 using allocator_data_type = AllocatorAwareData<std::scoped_allocator_adaptor<std::allocator<int>>>; 206 using allocator_type = std::scoped_allocator_adaptor<std::allocator<allocator_data_type>>; 207 using container_type = tbb::concurrent_vector<allocator_data_type, allocator_type>; 208 209 allocator_type allocator; 210 allocator_data_type data1(1, allocator); 211 allocator_data_type data2(2, allocator); 212 213 auto init_list = {data1, data2}; 214 215 container_type c1(allocator), c2(allocator); 216 217 allocator_data_type::activate(); 218 219 c1.grow_by(100); 220 c1.grow_by(10, data1); 221 c1.grow_by(init_list.begin(), init_list.end()); 222 c1.grow_by(init_list); 223 224 c1.clear(); 225 226 c1.grow_to_at_least(100); 227 c1.grow_to_at_least(110, data1); 228 229 c1.clear(); 230 231 c1.push_back(data1); 232 c1.push_back(data2); 233 c1.push_back(std::move(data1)); 234 c1.emplace_back(1); 235 236 c1.clear(); 237 238 c1.reserve(100); 239 c1.resize(110); 240 c1.resize(100); 241 c1.resize(110, data1); 242 c1.resize(100, data1); 243 244 c1.shrink_to_fit(); 245 246 c1.clear(); 247 248 c1.grow_by(10, data1); 249 c2.grow_by(20, data2); 250 251 c1 = c2; 252 c2 = std::move(c1); 253 254 allocator_data_type::deactivate(); 255 } 256 257 template <bool default_construction_present> struct do_default_construction_test { 258 template<typename FuncType> void operator() ( FuncType func ) const { func(); } 259 }; 260 template <> struct do_default_construction_test<false> { 261 template<typename FuncType> void operator()( FuncType ) const {} 262 }; 263 264 template <typename Type, typename Allocator> 265 void CompareVectors( const tbb::concurrent_vector<Type, Allocator> &c1, const tbb::concurrent_vector<Type, Allocator> &c2 ) { 266 REQUIRE( (!(c1 == c2) && c1 != c2) ); 267 REQUIRE( (c1 <= c2 && c1 < c2 && c2 >= c1 && c2 > c1) ); 268 } 269 270 template <typename Type, typename Allocator> 271 void CompareVectors( const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> &, const tbb::concurrent_vector<std::weak_ptr<Type>, Allocator> & ) { 272 /* do nothing for std::weak_ptr */ 273 } 274 275 template <bool default_construction_present, typename Type, typename Allocator> 276 void Examine( tbb::concurrent_vector<Type, Allocator> c, const std::vector<Type> &vec ) { 277 using vector_t = tbb::concurrent_vector<Type, Allocator>; 278 using size_type_t = typename vector_t::size_type; 279 280 281 REQUIRE( c.size() == vec.size() ); 282 for ( size_type_t i=0; i<c.size(); ++i ) { 283 REQUIRE( utils::IsEqual()(c[i], vec[i]) ); 284 } 285 do_default_construction_test<default_construction_present>()(test_grow_by_and_resize<Type,Allocator>(c)); 286 c.grow_by( size_type_t(5), c[0] ); 287 c.grow_to_at_least( c.size()+5, c.at(0) ); 288 vector_t c2; 289 c2.reserve( 5 ); 290 std::copy( c.begin(), c.begin() + 5, std::back_inserter( c2 ) ); 291 292 c.grow_by( c2.begin(), c2.end() ); 293 const vector_t& cvcr = c; 294 REQUIRE( utils::IsEqual()(cvcr.front(), *(c2.rend()-1)) ); 295 REQUIRE( utils::IsEqual()(cvcr.back(), *c2.rbegin())); 296 REQUIRE( utils::IsEqual()(*c.cbegin(), *(c.crend()-1)) ); 297 REQUIRE( utils::IsEqual()(*(c.cend()-1), *c.crbegin()) ); 298 c.swap( c2 ); 299 REQUIRE( c.size() == 5 ); 300 CompareVectors( c, c2 ); 301 c.swap( c2 ); 302 c2.clear(); 303 REQUIRE( c2.size() == 0 ); 304 c2.shrink_to_fit(); 305 Allocator a = c.get_allocator(); 306 a.deallocate( a.allocate(1), 1 ); 307 } 308 309 template <typename Type> 310 class test_default_construction { 311 const std::vector<Type> &my_vec; 312 public: 313 test_default_construction( const std::vector<Type> &vec ) : my_vec(vec) {} 314 void operator()() const { 315 // Construction with initial size specified by argument n. 316 tbb::concurrent_vector<Type> c7( my_vec.size() ); 317 std::copy( my_vec.begin(), my_vec.end(), c7.begin() ); 318 Examine</*default_construction_present = */true>( c7, my_vec ); 319 tbb::concurrent_vector< Type, std::allocator<Type> > c8( my_vec.size() ); 320 std::copy( c7.begin(), c7.end(), c8.begin() ); 321 Examine</*default_construction_present = */true>( c8, my_vec ); 322 } 323 }; 324 325 template <bool default_construction_present, typename Type> 326 void TypeTester( const std::vector<Type> &vec ) { 327 __TBB_ASSERT( vec.size() >= 5, "Array should have at least 5 elements" ); 328 // Construct empty vector. 329 tbb::concurrent_vector<Type> c1; 330 std::copy( vec.begin(), vec.end(), std::back_inserter(c1) ); 331 Examine<default_construction_present>( c1, vec ); 332 // Constructor from initializer_list. 333 tbb::concurrent_vector<Type> c2({vec[0],vec[1],vec[2]}); 334 std::copy( vec.begin()+3, vec.end(), std::back_inserter(c2) ); 335 Examine<default_construction_present>( c2, vec ); 336 // Copying constructor. 337 tbb::concurrent_vector<Type> c3(c1); 338 Examine<default_construction_present>( c3, vec ); 339 // Construct with non-default allocator 340 tbb::concurrent_vector< Type, std::allocator<Type> > c4; 341 std::copy( vec.begin(), vec.end(), std::back_inserter(c4) ); 342 Examine<default_construction_present>( c4, vec ); 343 // Construction with initial size specified by argument n. 344 do_default_construction_test<default_construction_present>()(test_default_construction<Type>(vec)); 345 // Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance. 346 std::allocator<Type> allocator; 347 tbb::concurrent_vector< Type, std::allocator<Type> > c9(vec.size(), vec[1], allocator); 348 Examine<default_construction_present>( c9, std::vector<Type>(vec.size(), vec[1]) ); 349 // Construction with copying iteration range and given allocator instance. 350 tbb::concurrent_vector< Type, std::allocator<Type> > c10(c1.begin(), c1.end(), allocator); 351 Examine<default_construction_present>( c10, vec ); 352 tbb::concurrent_vector<Type> c11(vec.begin(), vec.end()); 353 Examine<default_construction_present>( c11, vec ); 354 } 355 356 void TestTypes() { 357 const int NUMBER = 100; 358 359 std::vector<int> intArr; 360 for ( int i=0; i<NUMBER; ++i ) intArr.push_back(i); 361 TypeTester</*default_construction_present = */true>( intArr ); 362 363 std::vector< std::reference_wrapper<int> > refArr; 364 // The constructor of std::reference_wrapper<T> from T& is explicit in some versions of libstdc++. 365 for ( int i=0; i<NUMBER; ++i ) refArr.push_back( std::reference_wrapper<int>(intArr[i]) ); 366 TypeTester</*default_construction_present = */false>( refArr ); 367 368 // std::vector< std::atomic<int> > tbbIntArr( NUMBER ); //TODO compilation error 369 // for ( int i=0; i<NUMBER; ++i ) tbbIntArr[i] = i; 370 // TypeTester</*default_construction_present = */true>( tbbIntArr ); 371 372 std::vector< std::shared_ptr<int> > shrPtrArr; 373 for ( int i=0; i<NUMBER; ++i ) shrPtrArr.push_back( std::make_shared<int>(i) ); 374 TypeTester</*default_construction_present = */true>( shrPtrArr ); 375 376 std::vector< std::weak_ptr<int> > wkPtrArr; 377 std::copy( shrPtrArr.begin(), shrPtrArr.end(), std::back_inserter(wkPtrArr) ); 378 TypeTester</*default_construction_present = */true>( wkPtrArr ); 379 } 380 381 template <typename Vector> 382 void test_grow_by_empty_range( Vector &v, typename Vector::value_type* range_begin_end ) { 383 const Vector v_copy = v; 384 REQUIRE_MESSAGE( (v.grow_by( range_begin_end, range_begin_end ) == v.end()), "grow_by(empty_range) returned a wrong iterator." ); 385 REQUIRE_MESSAGE( v == v_copy, "grow_by(empty_range) has changed the vector." ); 386 } 387 388 void TestSerialGrowByRange( bool fragmented_vector ) { 389 tbb::concurrent_vector<int> v; 390 if ( fragmented_vector ) { 391 v.reserve( 1 ); 392 } 393 int init_range[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }; 394 REQUIRE_MESSAGE( (v.grow_by( init_range, init_range + (utils::array_length( init_range )) ) == v.begin()), "grow_by(I,I) returned a wrong iterator." ); 395 REQUIRE_MESSAGE( std::equal( v.begin(), v.end(), init_range ), "grow_by(I,I) did not properly copied all elements ?" ); 396 test_grow_by_empty_range( v, init_range ); 397 test_grow_by_empty_range( v, (int*)nullptr ); 398 } 399 400 template <typename allocator_type> 401 void TestConcurrentOperationsWithUnSafeOperations(std::size_t threads_number) { 402 using vector_type = tbb::concurrent_vector<move_support_tests::Foo, allocator_type>; 403 404 vector_type vector; 405 406 constexpr std::size_t max_operations = 1000; 407 std::atomic<int> curr_unsafe_thread{-1}; 408 // 0 - is safe operations 409 // 1 - is shrink_to_fit 410 // 2 - is clear + shrink_to_fit 411 // 3 - is resize 412 std::vector<std::size_t> operations(std::size_t(max_operations * 0.95), 0); 413 utils::FastRandom<> op_rand(42); 414 for (std::size_t i = std::size_t(max_operations * 0.95); i < max_operations; ++i) { 415 std::size_t random_operation = op_rand.get() % 3; 416 operations.push_back(random_operation + 1); 417 } 418 419 // Array of active threads 420 std::unique_ptr<std::atomic<int>[]> active_threads{ new std::atomic<int>[threads_number]() }; 421 // If thread still have i < max_operations than in array will be false 422 // When some thread finish it operation, set true in active_thread on thread_id position and start executing only safe operations 423 // Than wait all threads 424 // When all threads is finish their operations, all thread exit from lambda 425 auto all_done = [&active_threads, threads_number] { 426 for (std::size_t i = 0; i < threads_number; ++i) { 427 if (active_threads[i].load(std::memory_order_relaxed) == 0) return false; 428 } 429 return true; 430 }; 431 432 // Need double synchronization to correct work 433 std::unique_ptr<std::atomic<int>[]> ready_threads{ new std::atomic<int>[threads_number]() }; 434 auto all_ready_leave = [&ready_threads, threads_number] { 435 for (std::size_t i = 0; i < threads_number; ++i) { 436 if (ready_threads[i].load(std::memory_order_relaxed) == 0) return false; 437 } 438 return true; 439 }; 440 441 utils::SpinBarrier barrier(threads_number); 442 auto concurrent_func = [&operations, &vector, &curr_unsafe_thread, &barrier, &all_done, &active_threads, 443 &all_ready_leave, &ready_threads] (std::size_t thread_id) 444 { 445 std::vector<std::size_t> local_operations(operations); 446 utils::FastRandom<> rand(thread_id); 447 // std::shuffle doesn't work with msvc2017 and FastRandom 448 for (std::size_t i = local_operations.size(); i > 1; --i) { 449 std::size_t j = rand.get() % i; 450 std::swap(local_operations[i - 1], local_operations[j]); 451 } 452 453 std::size_t i = 0; 454 do { 455 if (all_done()) ready_threads[thread_id] = 1; 456 if (curr_unsafe_thread.load() != -1) { 457 // If lock taken, wait 458 // First wait unblock unsafe thread 459 // Second wait finish unsafe operations 460 barrier.wait(); 461 barrier.wait(); 462 } 463 // Is safe operation 464 if (active_threads[thread_id] == 1 || local_operations[i] == 0) { 465 // If lock is free, perform various operations 466 std::size_t random_operation = rand.get() % 3; 467 switch (random_operation) { 468 case 0: 469 { 470 vector.push_back(1); 471 } 472 break; 473 case 1: 474 { 475 std::size_t grow_size = rand.get() % 100; 476 vector.grow_by(grow_size, 1); 477 } 478 break; 479 case 2: 480 { 481 std::size_t grow_at_least_size = vector.size() + rand.get() % 100; 482 vector.grow_to_at_least(grow_at_least_size, 1); 483 } 484 break; 485 } 486 } else { 487 int default_unsafe_thread = -1; 488 if (curr_unsafe_thread.compare_exchange_strong(default_unsafe_thread, int(thread_id))) { 489 barrier.wait(); 490 // All threads are blocked we can execute our unsafe operation 491 switch (local_operations[i]) { 492 case 1: 493 vector.shrink_to_fit(); 494 break; 495 case 2: 496 { 497 vector.clear(); 498 vector.shrink_to_fit(); 499 } 500 break; 501 case 3: 502 { 503 vector.resize(0); 504 } 505 break; 506 } 507 curr_unsafe_thread = -1; 508 barrier.wait(); 509 } 510 } 511 ++i; 512 if (i >= local_operations.size()) active_threads[thread_id] = 1; 513 } while (!all_ready_leave() || !all_done()); 514 }; 515 516 utils::NativeParallelFor(threads_number, concurrent_func); 517 518 vector.clear(); 519 vector.shrink_to_fit(); 520 } 521 522 template <typename RangeType> 523 int reduce_vector(RangeType test_range) { 524 return tbb::parallel_reduce(test_range, 0, 525 [] ( const RangeType& range, int sum ) { 526 for (auto it = range.begin(); it != range.end(); ++it) { 527 sum += *it; 528 } 529 530 return sum; 531 }, 532 [] ( const int& lhs, const int& rhs) { 533 return lhs + rhs; 534 } 535 ); 536 } 537 538 //! Test the grow_by on range 539 //! \brief \ref interface \ref requirement 540 TEST_CASE("testing serial grow_by range"){ 541 TestSerialGrowByRange(/*fragmented_vector = */false); 542 TestSerialGrowByRange(/*fragmented_vector = */true); 543 } 544 545 //! Test of push_back operation 546 //! \brief \ref interface 547 TEST_CASE("testing range based for support"){ 548 TestRangeBasedFor(); 549 } 550 551 //! Test of work STL algorithms with concurrent_vector iterator. 552 //! \brief \ref interface 553 TEST_CASE("testing sort"){ 554 TestSort(); 555 } 556 557 //! Test concurrent_vector with vector types 558 //! \brief \ref error_guessing 559 TEST_CASE("testing concurrent_vector with vector types"){ 560 #if HAVE_m128 561 TestVectorTypes<ClassWithSSE>(); 562 #endif 563 #if HAVE_m256 564 if (have_AVX()) TestVectorTypes<ClassWithAVX>(); 565 #endif 566 } 567 568 //! Test concurrent push_back operation 569 //! \brief \ref error_guessing 570 TEST_CASE("testing find primes"){ 571 TestFindPrimes(); 572 } 573 574 //! Test concurrent_vector with std::scoped_allocator_adaptor 575 //! \brief \ref error_guessing 576 TEST_CASE("test concurrent_vector with std::scoped_allocator_adaptor") { 577 test_scoped_allocator(); 578 } 579 580 //! Test type of vector 581 //! \brief \ref requirement 582 TEST_CASE("testing types"){ 583 TestTypes(); 584 } 585 586 //! Test concurrent and unsafe operations 587 //! \brief \ref regression \ref error_guessing 588 TEST_CASE("Work without hang") { 589 using allocator_type = StaticSharedCountingAllocator<std::allocator<move_support_tests::Foo>>; 590 std::size_t max_threads = utils::get_platform_max_threads() - 1; 591 592 for (std::size_t threads = 1; threads < max_threads; threads = std::size_t(double(threads) * 2.7)) { 593 allocator_type::init_counters(); 594 TestConcurrentOperationsWithUnSafeOperations<allocator_type>(threads); 595 596 REQUIRE( allocator_type::allocations == allocator_type::frees ); 597 REQUIRE( allocator_type::items_allocated == allocator_type::items_freed ); 598 REQUIRE( allocator_type::items_constructed == allocator_type::items_destroyed ); 599 } 600 } 601 602 #if TBB_USE_EXCEPTIONS 603 //! Whitebox test for segment table extension 604 //! \brief \ref regression \ref error_guessing 605 TEST_CASE("Whitebox test for segment table extension") { 606 using allocator_type = StaticSharedCountingAllocator<std::allocator<move_support_tests::Foo>>; 607 using vector_type = tbb::concurrent_vector<move_support_tests::Foo, allocator_type>; 608 609 std::size_t max_number_of_elements_in_embedded = 12; 610 611 for (std::size_t i = 3; i < max_number_of_elements_in_embedded; i += 3) { 612 vector_type vector; 613 allocator_type::init_counters(); 614 allocator_type::set_limits(std::size_t(1) << (i + 1)); 615 616 try { 617 for (std::size_t j = 0; j < std::size_t(1) << i; ++j) { 618 vector.push_back(1); 619 } 620 vector.grow_by(1000); 621 } catch (std::bad_alloc& ) { 622 allocator_type::set_limits(); 623 vector_type copy_of_vector(vector); 624 vector_type copy_of_copy(copy_of_vector); 625 vector_type assigned_vector; 626 assigned_vector = vector; 627 REQUIRE(copy_of_vector == copy_of_copy); 628 REQUIRE(assigned_vector == copy_of_copy); 629 } 630 } 631 } 632 633 //! Test exception in constructors 634 //! \brief \ref regression \ref error_guessing 635 TEST_CASE("Test exception in constructors") { 636 using allocator_type = StaticSharedCountingAllocator<std::allocator<double>>; 637 using vector_type = tbb::concurrent_vector<double, allocator_type>; 638 639 allocator_type::set_limits(1); 640 641 REQUIRE_THROWS_AS( [] { 642 vector_type vec1(42, 42.); 643 utils::suppress_unused_warning(vec1); 644 }(), const std::bad_alloc); 645 646 auto list = { 42., 42., 42., 42., 42., 42., 42., 42., 42., 42. }; 647 REQUIRE_THROWS_AS( [&] { 648 vector_type vec2(list.begin(), list.end()); 649 utils::suppress_unused_warning(vec2); 650 }(), const std::bad_alloc); 651 652 allocator_type::init_counters(); 653 allocator_type::set_limits(0); 654 vector_type src_vec(42, 42.); 655 allocator_type::set_limits(1); 656 657 REQUIRE_THROWS_AS( [&] { 658 vector_type vec3(src_vec, allocator_type{}); 659 utils::suppress_unused_warning(vec3); 660 }(), const std::bad_alloc); 661 } 662 #endif // TBB_USE_EXCEPTIONS 663 664 //! \brief \ref regression \ref error_guessing 665 TEST_CASE("Reducing concurrent_vector") { 666 constexpr int final_sum = 100000; 667 tbb::concurrent_vector<int> vec(final_sum, 1); 668 const tbb::concurrent_vector<int> cvec(vec); 669 670 CHECK(reduce_vector(vec.range()) == final_sum); 671 CHECK(reduce_vector(cvec.range()) == final_sum); 672 } 673 674 675 //! \brief \ref error_guessing 676 TEST_CASE("swap with not always equal allocators"){ 677 using allocator_type = NotAlwaysEqualAllocator<int>; 678 using vector_type = tbb::concurrent_vector<int, allocator_type>; 679 680 vector_type vec1{}; 681 vector_type vec2(42, 42); 682 683 swap(vec1, vec2); 684 685 CHECK(vec2.empty()); 686 } 687 688 // The problem was that after allocating first_block, 689 // no write was made to the embedded table. 690 // Also, two threads could be in the table extension section at once. 691 // NOTE: If the implementation of the vector has an issue, this test will either hang 692 // or fail with the assertion in debug mode. 693 //! \brief \ref regression 694 TEST_CASE("Testing vector in a highly concurrent environment") { 695 for (std::size_t i = 0; i < 10000; ++i) { 696 tbb::concurrent_vector<int> test_vec; 697 698 tbb::parallel_for(tbb::blocked_range<std::size_t>(0, 10000), [&] (const tbb::blocked_range<std::size_t>&) { 699 test_vec.grow_by(1); 700 }, tbb::static_partitioner{}); 701 702 REQUIRE(test_vec.size() == utils::get_platform_max_threads()); 703 } 704 } 705 706 #if __TBB_CPP20_CONCEPTS_PRESENT 707 //! \brief \ref error_guessing 708 TEST_CASE("container_range concept for concurrent_vector ranges") { 709 static_assert(test_concepts::container_range<typename tbb::concurrent_vector<int>::range_type>); 710 static_assert(test_concepts::container_range<typename tbb::concurrent_vector<int>::const_range_type>); 711 } 712 #endif // __TBB_CPP20_CONCEPTS_PRESENT 713