xref: /oneTBB/test/tbb/test_function_node.cpp (revision 478de5b1)
1 /*
2     Copyright (c) 2005-2021 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #if __INTEL_COMPILER && _MSC_VER
18 #pragma warning(disable : 2586) // decorated name length exceeded, name was truncated
19 #endif
20 
21 #include "common/config.h"
22 
23 #include "tbb/flow_graph.h"
24 #include "tbb/spin_rw_mutex.h"
25 #include "tbb/global_control.h"
26 
27 #include "common/test.h"
28 #include "common/utils.h"
29 #include "common/graph_utils.h"
30 #include "common/test_follows_and_precedes_api.h"
31 #include "common/concepts_common.h"
32 
33 
34 //! \file test_function_node.cpp
35 //! \brief Test for [flow_graph.function_node] specification
36 
37 
38 #define N 100
39 #define MAX_NODES 4
40 
41 //! Performs test on function nodes with limited concurrency and buffering
42 /** These tests check:
43     1) that the number of executing copies never exceed the concurrency limit
44     2) that the node never rejects
45     3) that no items are lost
46     and 4) all of this happens even if there are multiple predecessors and successors
47 */
48 
49 template<typename IO>
50 struct pass_through {
51     IO operator()(const IO& i) { return i; }
52 };
53 
54 template< typename InputType, typename OutputType, typename Body >
55 void buffered_levels( size_t concurrency, Body body ) {
56 
57    // Do for lc = 1 to concurrency level
58    for ( size_t lc = 1; lc <= concurrency; ++lc ) {
59    tbb::flow::graph g;
60 
61    // Set the execute_counter back to zero in the harness
62    harness_graph_executor<InputType, OutputType>::execute_count = 0;
63    // Set the number of current executors to zero.
64    harness_graph_executor<InputType, OutputType>::current_executors = 0;
65    // Set the max allowed executors to lc.  There is a check in the functor to make sure this is never exceeded.
66    harness_graph_executor<InputType, OutputType>::max_executors = lc;
67 
68    // Create the function_node with the appropriate concurrency level, and use default buffering
69    tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, body );
70    tbb::flow::function_node<InputType, InputType> pass_thru( g, tbb::flow::unlimited, pass_through<InputType>());
71 
72    // Create a vector of identical exe_nodes and pass_thrus
73    std::vector< tbb::flow::function_node< InputType, OutputType > > exe_vec(2, exe_node);
74    std::vector< tbb::flow::function_node< InputType, InputType > > pass_thru_vec(2, pass_thru);
75    // Attach each pass_thru to its corresponding exe_node
76    for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
77        tbb::flow::make_edge(pass_thru_vec[node_idx], exe_vec[node_idx]);
78    }
79 
80    // TODO: why the test is executed serially for the node pairs, not concurrently?
81    for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
82    // For num_receivers = 1 to MAX_NODES
83        for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
84            // Create num_receivers counting receivers and connect the exe_vec[node_idx] to them.
85            std::vector< std::shared_ptr<harness_mapped_receiver<OutputType>> > receivers;
86            for (size_t i = 0; i < num_receivers; i++) {
87                receivers.push_back( std::make_shared<harness_mapped_receiver<OutputType>>(g) );
88            }
89 
90            for (size_t r = 0; r < num_receivers; ++r ) {
91                tbb::flow::make_edge( exe_vec[node_idx], *receivers[r] );
92            }
93 
94            // Do the test with varying numbers of senders
95            std::vector< std::shared_ptr<harness_counting_sender<InputType>> > senders;
96            for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
97                // Create num_senders senders, set there message limit each to N, and connect them to
98                // pass_thru_vec[node_idx]
99                senders.clear();
100                for (size_t s = 0; s < num_senders; ++s ) {
101                    senders.push_back( std::make_shared<harness_counting_sender<InputType>>() );
102                    senders.back()->my_limit = N;
103                    senders.back()->register_successor(pass_thru_vec[node_idx] );
104                }
105 
106                // Initialize the receivers so they know how many senders and messages to check for
107                for (size_t r = 0; r < num_receivers; ++r ) {
108                    receivers[r]->initialize_map( N, num_senders );
109                }
110 
111                // Do the test
112                utils::NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
113                g.wait_for_all();
114 
115                // confirm that each sender was requested from N times
116                for (size_t s = 0; s < num_senders; ++s ) {
117                    size_t n = senders[s]->my_received;
118                    CHECK( n == N );
119                    CHECK( senders[s]->my_receiver.load(std::memory_order_relaxed) == &pass_thru_vec[node_idx] );
120                }
121                // validate the receivers
122                for (size_t r = 0; r < num_receivers; ++r ) {
123                    receivers[r]->validate();
124                }
125            }
126            for (size_t r = 0; r < num_receivers; ++r ) {
127                tbb::flow::remove_edge( exe_vec[node_idx], *receivers[r] );
128            }
129            CHECK( exe_vec[node_idx].try_put( InputType() ) == true );
130            g.wait_for_all();
131            for (size_t r = 0; r < num_receivers; ++r ) {
132                // since it's detached, nothing should have changed
133                receivers[r]->validate();
134            }
135 
136        } // for num_receivers
137     } // for node_idx
138     } // for concurrency level lc
139 }
140 
141 const size_t Offset = 123;
142 std::atomic<size_t> global_execute_count;
143 
144 struct inc_functor {
145 
146     std::atomic<size_t> local_execute_count;
147     inc_functor( ) { local_execute_count = 0; }
148     inc_functor( const inc_functor &f ) { local_execute_count = size_t(f.local_execute_count); }
149     void operator=( const inc_functor &f ) { local_execute_count = size_t(f.local_execute_count); }
150 
151     int operator()( int i ) {
152        ++global_execute_count;
153        ++local_execute_count;
154        return i;
155     }
156 
157 };
158 
159 template< typename InputType, typename OutputType >
160 void buffered_levels_with_copy( size_t concurrency ) {
161 
162     // Do for lc = 1 to concurrency level
163     for ( size_t lc = 1; lc <= concurrency; ++lc ) {
164         tbb::flow::graph g;
165 
166         inc_functor cf;
167         cf.local_execute_count = Offset;
168         global_execute_count = Offset;
169 
170         tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, cf );
171 
172         for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
173 
174             std::vector< std::shared_ptr<harness_mapped_receiver<OutputType>> > receivers;
175             for (size_t i = 0; i < num_receivers; i++) {
176                 receivers.push_back( std::make_shared<harness_mapped_receiver<OutputType>>(g) );
177             }
178 
179             for (size_t r = 0; r < num_receivers; ++r ) {
180                 tbb::flow::make_edge( exe_node, *receivers[r] );
181             }
182 
183             std::vector< std::shared_ptr<harness_counting_sender<InputType>> > senders;
184             for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
185                 senders.clear();
186                 for (size_t s = 0; s < num_senders; ++s ) {
187                     senders.push_back( std::make_shared<harness_counting_sender<InputType>>() );
188                     senders.back()->my_limit = N;
189                     tbb::flow::make_edge( *senders.back(), exe_node );
190                 }
191 
192                 for (size_t r = 0; r < num_receivers; ++r ) {
193                     receivers[r]->initialize_map( N, num_senders );
194                 }
195 
196                 utils::NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
197                 g.wait_for_all();
198 
199                 for (size_t s = 0; s < num_senders; ++s ) {
200                     size_t n = senders[s]->my_received;
201                     CHECK( n == N );
202                     CHECK( senders[s]->my_receiver.load(std::memory_order_relaxed) == &exe_node );
203                 }
204                 for (size_t r = 0; r < num_receivers; ++r ) {
205                     receivers[r]->validate();
206                 }
207             }
208             for (size_t r = 0; r < num_receivers; ++r ) {
209                 tbb::flow::remove_edge( exe_node, *receivers[r] );
210             }
211             CHECK( exe_node.try_put( InputType() ) == true );
212             g.wait_for_all();
213             for (size_t r = 0; r < num_receivers; ++r ) {
214                 receivers[r]->validate();
215             }
216         }
217 
218         // validate that the local body matches the global execute_count and both are correct
219         inc_functor body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
220         const size_t expected_count = N/2 * MAX_NODES * MAX_NODES * ( MAX_NODES + 1 ) + MAX_NODES + Offset;
221         size_t global_count = global_execute_count;
222         size_t inc_count = body_copy.local_execute_count;
223         CHECK(global_count == expected_count);
224         CHECK(global_count == inc_count );
225         g.reset(tbb::flow::rf_reset_bodies);
226         body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
227         inc_count = body_copy.local_execute_count;
228         CHECK_MESSAGE( Offset == inc_count, "reset(rf_reset_bodies) did not reset functor" );
229     }
230 }
231 
232 template< typename InputType, typename OutputType >
233 void run_buffered_levels( int c ) {
234     buffered_levels<InputType,OutputType>( c, []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::func(i); } );
235     buffered_levels<InputType,OutputType>( c, &harness_graph_executor<InputType, OutputType>::func );
236     buffered_levels<InputType,OutputType>( c, typename harness_graph_executor<InputType, OutputType>::functor() );
237     buffered_levels_with_copy<InputType,OutputType>( c );
238 }
239 
240 
241 //! Performs test on executable nodes with limited concurrency
242 /** These tests check:
243     1) that the nodes will accepts puts up to the concurrency limit,
244     2) the nodes do not exceed the concurrency limit even when run with more threads (this is checked in the harness_graph_executor),
245     3) the nodes will receive puts from multiple successors simultaneously,
246     and 4) the nodes will send to multiple predecessors.
247     There is no checking of the contents of the messages for corruption.
248 */
249 
250 template< typename InputType, typename OutputType, typename Body >
251 void concurrency_levels( size_t concurrency, Body body ) {
252 
253     for ( size_t lc = 1; lc <= concurrency; ++lc ) {
254         tbb::flow::graph g;
255 
256         // Set the execute_counter back to zero in the harness
257         harness_graph_executor<InputType, OutputType>::execute_count = 0;
258         // Set the number of current executors to zero.
259         harness_graph_executor<InputType, OutputType>::current_executors = 0;
260         // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded.
261         harness_graph_executor<InputType, OutputType>::max_executors = lc;
262 
263         typedef tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > fnode_type;
264         fnode_type exe_node( g, lc, body );
265 
266         for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
267 
268             std::vector< std::shared_ptr<harness_counting_receiver<OutputType>> > receivers;
269             for (size_t i = 0; i < num_receivers; ++i) {
270                 receivers.push_back( std::make_shared<harness_counting_receiver<OutputType>>(g) );
271             }
272 
273             for (size_t r = 0; r < num_receivers; ++r ) {
274                 tbb::flow::make_edge( exe_node, *receivers[r] );
275             }
276 
277             std::vector< std::shared_ptr<harness_counting_sender<InputType>> > senders;
278 
279             for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
280                 senders.clear();
281                 {
282                     // Exclusively lock m to prevent exe_node from finishing
283                     tbb::spin_rw_mutex::scoped_lock l(
284                         harness_graph_executor<InputType, OutputType>::template mutex_holder<tbb::spin_rw_mutex>::mutex
285                     );
286 
287                     // put to lc level, it will accept and then block at m
288                     for ( size_t c = 0 ; c < lc ; ++c ) {
289                         CHECK( exe_node.try_put( InputType() ) == true );
290                     }
291                     // it only accepts to lc level
292                     CHECK( exe_node.try_put( InputType() ) == false );
293 
294                     for (size_t s = 0; s < num_senders; ++s ) {
295                         senders.push_back( std::make_shared<harness_counting_sender<InputType>>() );
296                         // register a sender
297                         senders.back()->my_limit = N;
298                         exe_node.register_predecessor( *senders.back() );
299                     }
300 
301                 } // release lock at end of scope, setting the exe node free to continue
302                 // wait for graph to settle down
303                 g.wait_for_all();
304 
305                 // confirm that each sender was requested from N times
306                 for (size_t s = 0; s < num_senders; ++s ) {
307                     size_t n = senders[s]->my_received;
308                     CHECK( n == N );
309                     CHECK( senders[s]->my_receiver.load(std::memory_order_relaxed) == &exe_node );
310                 }
311                 // confirm that each receivers got N * num_senders + the initial lc puts
312                 for (size_t r = 0; r < num_receivers; ++r ) {
313                     size_t n = receivers[r]->my_count;
314                     CHECK( n == num_senders*N+lc );
315                     receivers[r]->my_count = 0;
316                 }
317             }
318             for (size_t r = 0; r < num_receivers; ++r ) {
319                 tbb::flow::remove_edge( exe_node, *receivers[r] );
320             }
321             CHECK( exe_node.try_put( InputType() ) == true );
322             g.wait_for_all();
323             for (size_t r = 0; r < num_receivers; ++r ) {
324                 CHECK( int(receivers[r]->my_count) == 0 );
325             }
326         }
327     }
328 }
329 
330 
331 template< typename InputType, typename OutputType >
332 void run_concurrency_levels( int c ) {
333     concurrency_levels<InputType,OutputType>( c, []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::template tfunc<tbb::spin_rw_mutex>(i); } );
334     concurrency_levels<InputType,OutputType>( c, &harness_graph_executor<InputType, OutputType>::template tfunc<tbb::spin_rw_mutex> );
335     concurrency_levels<InputType,OutputType>( c, typename harness_graph_executor<InputType, OutputType>::template tfunctor<tbb::spin_rw_mutex>() );
336 }
337 
338 
339 struct empty_no_assign {
340    empty_no_assign() {}
341    empty_no_assign( int ) {}
342    operator int() { return 0; }
343 };
344 
345 template< typename InputType >
346 struct parallel_puts : private utils::NoAssign {
347 
348     tbb::flow::receiver< InputType > * const my_exe_node;
349 
350     parallel_puts( tbb::flow::receiver< InputType > &exe_node ) : my_exe_node(&exe_node) {}
351 
352     void operator()( int ) const  {
353         for ( int i = 0; i < N; ++i ) {
354             // the nodes will accept all puts
355             CHECK( my_exe_node->try_put( InputType() ) == true );
356         }
357     }
358 
359 };
360 
361 //! Performs test on executable nodes with unlimited concurrency
362 /** These tests check:
363     1) that the nodes will accept all puts
364     2) the nodes will receive puts from multiple predecessors simultaneously,
365     and 3) the nodes will send to multiple successors.
366     There is no checking of the contents of the messages for corruption.
367 */
368 
369 template< typename InputType, typename OutputType, typename Body >
370 void unlimited_concurrency( Body body ) {
371 
372     for (unsigned p = 1; p < 2*utils::MaxThread; ++p) {
373         tbb::flow::graph g;
374         tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > exe_node( g, tbb::flow::unlimited, body );
375 
376         for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
377 
378             std::vector< std::shared_ptr<harness_counting_receiver<OutputType>> > receivers;
379             for (size_t i = 0; i < num_receivers; ++i) {
380                 receivers.push_back( std::make_shared<harness_counting_receiver<OutputType>>(g) );
381             }
382 
383             harness_graph_executor<InputType, OutputType>::execute_count = 0;
384 
385             for (size_t r = 0; r < num_receivers; ++r ) {
386                 tbb::flow::make_edge( exe_node, *receivers[r] );
387             }
388 
389             utils::NativeParallelFor( p, parallel_puts<InputType>(exe_node) );
390             g.wait_for_all();
391 
392             // 2) the nodes will receive puts from multiple predecessors simultaneously,
393             size_t ec = harness_graph_executor<InputType, OutputType>::execute_count;
394             CHECK( ec == p*N );
395             for (size_t r = 0; r < num_receivers; ++r ) {
396                 size_t c = receivers[r]->my_count;
397                 // 3) the nodes will send to multiple successors.
398                 CHECK( c == p*N );
399             }
400             for (size_t r = 0; r < num_receivers; ++r ) {
401                 tbb::flow::remove_edge( exe_node, *receivers[r] );
402             }
403             }
404         }
405     }
406 
407 template< typename InputType, typename OutputType >
408 void run_unlimited_concurrency() {
409     harness_graph_executor<InputType, OutputType>::max_executors = 0;
410     unlimited_concurrency<InputType,OutputType>( []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::func(i); } );
411     unlimited_concurrency<InputType,OutputType>( &harness_graph_executor<InputType, OutputType>::func );
412     unlimited_concurrency<InputType,OutputType>( typename harness_graph_executor<InputType, OutputType>::functor() );
413 }
414 
415 struct continue_msg_to_int {
416     int my_int;
417     continue_msg_to_int(int x) : my_int(x) {}
418     int operator()(tbb::flow::continue_msg) { return my_int; }
419 };
420 
421 void test_function_node_with_continue_msg_as_input() {
422     // If this function terminates, then this test is successful
423     tbb::flow::graph g;
424 
425     tbb::flow::broadcast_node<tbb::flow::continue_msg> Start(g);
426 
427     tbb::flow::function_node<tbb::flow::continue_msg, int, tbb::flow::rejecting> FN1( g, tbb::flow::serial, continue_msg_to_int(42));
428     tbb::flow::function_node<tbb::flow::continue_msg, int, tbb::flow::rejecting> FN2( g, tbb::flow::serial, continue_msg_to_int(43));
429 
430     tbb::flow::make_edge( Start, FN1 );
431     tbb::flow::make_edge( Start, FN2 );
432 
433     Start.try_put( tbb::flow::continue_msg() );
434     g.wait_for_all();
435 }
436 
437 //! Tests limited concurrency cases for nodes that accept data messages
438 void test_concurrency(int num_threads) {
439     tbb::global_control thread_limit(tbb::global_control::max_allowed_parallelism, num_threads);
440     run_concurrency_levels<int,int>(num_threads);
441     run_concurrency_levels<int,tbb::flow::continue_msg>(num_threads);
442     run_buffered_levels<int, int>(num_threads);
443     run_unlimited_concurrency<int,int>();
444     run_unlimited_concurrency<int,empty_no_assign>();
445     run_unlimited_concurrency<empty_no_assign,int>();
446     run_unlimited_concurrency<empty_no_assign,empty_no_assign>();
447     run_unlimited_concurrency<int,tbb::flow::continue_msg>();
448     run_unlimited_concurrency<empty_no_assign,tbb::flow::continue_msg>();
449     test_function_node_with_continue_msg_as_input();
450 }
451 
452 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
453 #include <array>
454 #include <vector>
455 void test_follows_and_precedes_api() {
456     using msg_t = tbb::flow::continue_msg;
457 
458     std::array<msg_t, 3> messages_for_follows = { {msg_t(), msg_t(), msg_t()} };
459     std::vector<msg_t> messages_for_precedes = { msg_t() };
460 
461     pass_through<msg_t> pass_msg;
462 
463     follows_and_precedes_testing::test_follows
464         <msg_t, tbb::flow::function_node<msg_t, msg_t>>
465         (messages_for_follows, tbb::flow::unlimited, pass_msg);
466     follows_and_precedes_testing::test_precedes
467         <msg_t, tbb::flow::function_node<msg_t, msg_t>>
468         (messages_for_precedes, tbb::flow::unlimited, pass_msg, tbb::flow::node_priority_t(1));
469 }
470 #endif
471 
472 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
473 
474 int function_body_f(const int&) { return 1; }
475 
476 template <typename Body>
477 void test_deduction_guides_common(Body body) {
478     using namespace tbb::flow;
479     graph g;
480 
481     function_node f1(g, unlimited, body);
482     static_assert(std::is_same_v<decltype(f1), function_node<int, int>>);
483 
484     function_node f2(g, unlimited, body, rejecting());
485     static_assert(std::is_same_v<decltype(f2), function_node<int, int, rejecting>>);
486 
487     function_node f3(g, unlimited, body, node_priority_t(5));
488     static_assert(std::is_same_v<decltype(f3), function_node<int, int>>);
489 
490     function_node f4(g, unlimited, body, rejecting(), node_priority_t(5));
491     static_assert(std::is_same_v<decltype(f4), function_node<int, int, rejecting>>);
492 
493 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
494     function_node f5(follows(f2), unlimited, body);
495     static_assert(std::is_same_v<decltype(f5), function_node<int, int>>);
496 
497     function_node f6(follows(f5), unlimited, body, rejecting());
498     static_assert(std::is_same_v<decltype(f6), function_node<int, int, rejecting>>);
499 
500     function_node f7(follows(f6), unlimited, body, node_priority_t(5));
501     static_assert(std::is_same_v<decltype(f7), function_node<int, int>>);
502 
503     function_node f8(follows(f7), unlimited, body, rejecting(), node_priority_t(5));
504     static_assert(std::is_same_v<decltype(f8), function_node<int, int, rejecting>>);
505 #endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
506 
507     function_node f9(f1);
508     static_assert(std::is_same_v<decltype(f9), function_node<int, int>>);
509 }
510 
511 void test_deduction_guides() {
512     test_deduction_guides_common([](const int&)->int { return 1; });
513     test_deduction_guides_common([](const int&) mutable ->int { return 1; });
514     test_deduction_guides_common(function_body_f);
515 }
516 
517 #endif
518 
519 //! Test various node bodies with concurrency
520 //! \brief \ref error_guessing
521 TEST_CASE("Concurrency test") {
522     for(unsigned int p = utils::MinThread; p <= utils::MaxThread; ++p ) {
523         test_concurrency(p);
524     }
525 }
526 
527 //! NativeParallelFor testing with various concurrency settings
528 //! \brief \ref error_guessing
529 TEST_CASE("Lightweight testing"){
530    lightweight_testing::test<tbb::flow::function_node>(10);
531 }
532 
533 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
534 //! Test follows and precedes API
535 //! \brief \ref error_guessing
536 TEST_CASE("Flowgraph node set test"){
537      test_follows_and_precedes_api();
538 }
539 #endif
540 
541 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
542 //! Test decution guides
543 //! \brief \ref requirement
544 TEST_CASE("Deduction guides test"){
545      test_deduction_guides();
546 }
547 #endif
548 
549 //! try_release and try_consume test
550 //! \brief \ref error_guessing
551 TEST_CASE("try_release try_consume"){
552     tbb::flow::graph g;
553 
554     tbb::flow::function_node<int, int> fn(g, tbb::flow::unlimited, [](const int&v){return v;});
555 
556     CHECK_MESSAGE((fn.try_release()==false), "try_release should initially return false on a node");
557     CHECK_MESSAGE((fn.try_consume()==false), "try_consume should initially return false on a node");
558 }
559 
560 #if __TBB_CPP20_CONCEPTS_PRESENT
561 //! \brief \ref error_guessing
562 TEST_CASE("constraints for function_node input and output") {
563     struct InputObject {
564         InputObject() = default;
565         InputObject( const InputObject& ) = default;
566     };
567     struct OutputObject : test_concepts::Copyable {};
568 
569     static_assert(utils::well_formed_instantiation<tbb::flow::function_node, InputObject, OutputObject>);
570     static_assert(utils::well_formed_instantiation<tbb::flow::function_node, int, int>);
571     static_assert(!utils::well_formed_instantiation<tbb::flow::function_node, test_concepts::NonCopyable, OutputObject>);
572     static_assert(!utils::well_formed_instantiation<tbb::flow::function_node, test_concepts::NonDefaultInitializable, OutputObject>);
573     static_assert(!utils::well_formed_instantiation<tbb::flow::function_node, InputObject, test_concepts::NonCopyable>);
574 }
575 
576 template <typename Input, typename Output, typename Body>
577 concept can_call_function_node_ctor = requires( tbb::flow::graph& graph, std::size_t concurrency, Body body,
578                                                 tbb::flow::node_priority_t priority, tbb::flow::buffer_node<int>& f ) {
579     tbb::flow::function_node<Input, Output>(graph, concurrency, body);
580     tbb::flow::function_node<Input, Output>(graph, concurrency, body, priority);
581 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
582     tbb::flow::function_node<Input, Output>(tbb::flow::follows(f), concurrency, body);
583     tbb::flow::function_node<Input, Output>(tbb::flow::follows(f), concurrency, body, priority);
584 #endif
585 };
586 
587 //! \brief \ref error_guessing
588 TEST_CASE("constraints for function_node body") {
589     using input_type = int;
590     using output_type = int;
591     using namespace test_concepts::function_node_body;
592 
593     static_assert(can_call_function_node_ctor<input_type, output_type, Correct<input_type, output_type>>);
594     static_assert(!can_call_function_node_ctor<input_type, output_type, NonCopyable<input_type, output_type>>);
595     static_assert(!can_call_function_node_ctor<input_type, output_type, NonDestructible<input_type, output_type>>);
596     static_assert(!can_call_function_node_ctor<input_type, output_type, NoOperatorRoundBrackets<input_type, output_type>>);
597     static_assert(!can_call_function_node_ctor<input_type, output_type, WrongInputRoundBrackets<input_type, output_type>>);
598     static_assert(!can_call_function_node_ctor<input_type, output_type, WrongReturnRoundBrackets<input_type, output_type>>);
599 }
600 #endif // __TBB_CPP20_CONCEPTS_PRESENT
601