1 /*
2 Copyright (c) 2005-2021 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #if __INTEL_COMPILER && _MSC_VER
18 #pragma warning(disable : 2586) // decorated name length exceeded, name was truncated
19 #endif
20
21 #include "common/config.h"
22
23 #include "tbb/flow_graph.h"
24 #include "tbb/spin_rw_mutex.h"
25 #include "tbb/global_control.h"
26
27 #include "common/test.h"
28 #include "common/utils.h"
29 #include "common/graph_utils.h"
30 #include "common/test_follows_and_precedes_api.h"
31 #include "common/concepts_common.h"
32
33
34 //! \file test_function_node.cpp
35 //! \brief Test for [flow_graph.function_node] specification
36
37
38 #define N 100
39 #define MAX_NODES 4
40
41 //! Performs test on function nodes with limited concurrency and buffering
42 /** These tests check:
43 1) that the number of executing copies never exceed the concurrency limit
44 2) that the node never rejects
45 3) that no items are lost
46 and 4) all of this happens even if there are multiple predecessors and successors
47 */
48
49 template<typename IO>
50 struct pass_through {
operator ()pass_through51 IO operator()(const IO& i) { return i; }
52 };
53
54 template< typename InputType, typename OutputType, typename Body >
buffered_levels(size_t concurrency,Body body)55 void buffered_levels( size_t concurrency, Body body ) {
56
57 // Do for lc = 1 to concurrency level
58 for ( size_t lc = 1; lc <= concurrency; ++lc ) {
59 tbb::flow::graph g;
60
61 // Set the execute_counter back to zero in the harness
62 harness_graph_executor<InputType, OutputType>::execute_count = 0;
63 // Set the number of current executors to zero.
64 harness_graph_executor<InputType, OutputType>::current_executors = 0;
65 // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded.
66 harness_graph_executor<InputType, OutputType>::max_executors = lc;
67
68 // Create the function_node with the appropriate concurrency level, and use default buffering
69 tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, body );
70 tbb::flow::function_node<InputType, InputType> pass_thru( g, tbb::flow::unlimited, pass_through<InputType>());
71
72 // Create a vector of identical exe_nodes and pass_thrus
73 std::vector< tbb::flow::function_node< InputType, OutputType > > exe_vec(2, exe_node);
74 std::vector< tbb::flow::function_node< InputType, InputType > > pass_thru_vec(2, pass_thru);
75 // Attach each pass_thru to its corresponding exe_node
76 for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
77 tbb::flow::make_edge(pass_thru_vec[node_idx], exe_vec[node_idx]);
78 }
79
80 // TODO: why the test is executed serially for the node pairs, not concurrently?
81 for (size_t node_idx=0; node_idx<exe_vec.size(); ++node_idx) {
82 // For num_receivers = 1 to MAX_NODES
83 for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
84 // Create num_receivers counting receivers and connect the exe_vec[node_idx] to them.
85 std::vector< std::shared_ptr<harness_mapped_receiver<OutputType>> > receivers;
86 for (size_t i = 0; i < num_receivers; i++) {
87 receivers.push_back( std::make_shared<harness_mapped_receiver<OutputType>>(g) );
88 }
89
90 for (size_t r = 0; r < num_receivers; ++r ) {
91 tbb::flow::make_edge( exe_vec[node_idx], *receivers[r] );
92 }
93
94 // Do the test with varying numbers of senders
95 std::vector< std::shared_ptr<harness_counting_sender<InputType>> > senders;
96 for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
97 // Create num_senders senders, set there message limit each to N, and connect them to
98 // pass_thru_vec[node_idx]
99 senders.clear();
100 for (size_t s = 0; s < num_senders; ++s ) {
101 senders.push_back( std::make_shared<harness_counting_sender<InputType>>() );
102 senders.back()->my_limit = N;
103 senders.back()->register_successor(pass_thru_vec[node_idx] );
104 }
105
106 // Initialize the receivers so they know how many senders and messages to check for
107 for (size_t r = 0; r < num_receivers; ++r ) {
108 receivers[r]->initialize_map( N, num_senders );
109 }
110
111 // Do the test
112 utils::NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
113 g.wait_for_all();
114
115 // confirm that each sender was requested from N times
116 for (size_t s = 0; s < num_senders; ++s ) {
117 size_t n = senders[s]->my_received;
118 CHECK( n == N );
119 CHECK( senders[s]->my_receiver.load(std::memory_order_relaxed) == &pass_thru_vec[node_idx] );
120 }
121 // validate the receivers
122 for (size_t r = 0; r < num_receivers; ++r ) {
123 receivers[r]->validate();
124 }
125 }
126 for (size_t r = 0; r < num_receivers; ++r ) {
127 tbb::flow::remove_edge( exe_vec[node_idx], *receivers[r] );
128 }
129 CHECK( exe_vec[node_idx].try_put( InputType() ) == true );
130 g.wait_for_all();
131 for (size_t r = 0; r < num_receivers; ++r ) {
132 // since it's detached, nothing should have changed
133 receivers[r]->validate();
134 }
135
136 } // for num_receivers
137 } // for node_idx
138 } // for concurrency level lc
139 }
140
141 const size_t Offset = 123;
142 std::atomic<size_t> global_execute_count;
143
144 struct inc_functor {
145
146 std::atomic<size_t> local_execute_count;
inc_functorinc_functor147 inc_functor( ) { local_execute_count = 0; }
inc_functorinc_functor148 inc_functor( const inc_functor &f ) { local_execute_count = size_t(f.local_execute_count); }
operator =inc_functor149 void operator=( const inc_functor &f ) { local_execute_count = size_t(f.local_execute_count); }
150
operator ()inc_functor151 int operator()( int i ) {
152 ++global_execute_count;
153 ++local_execute_count;
154 return i;
155 }
156
157 };
158
159 template< typename InputType, typename OutputType >
buffered_levels_with_copy(size_t concurrency)160 void buffered_levels_with_copy( size_t concurrency ) {
161
162 // Do for lc = 1 to concurrency level
163 for ( size_t lc = 1; lc <= concurrency; ++lc ) {
164 tbb::flow::graph g;
165
166 inc_functor cf;
167 cf.local_execute_count = Offset;
168 global_execute_count = Offset;
169
170 tbb::flow::function_node< InputType, OutputType > exe_node( g, lc, cf );
171
172 for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
173
174 std::vector< std::shared_ptr<harness_mapped_receiver<OutputType>> > receivers;
175 for (size_t i = 0; i < num_receivers; i++) {
176 receivers.push_back( std::make_shared<harness_mapped_receiver<OutputType>>(g) );
177 }
178
179 for (size_t r = 0; r < num_receivers; ++r ) {
180 tbb::flow::make_edge( exe_node, *receivers[r] );
181 }
182
183 std::vector< std::shared_ptr<harness_counting_sender<InputType>> > senders;
184 for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
185 senders.clear();
186 for (size_t s = 0; s < num_senders; ++s ) {
187 senders.push_back( std::make_shared<harness_counting_sender<InputType>>() );
188 senders.back()->my_limit = N;
189 tbb::flow::make_edge( *senders.back(), exe_node );
190 }
191
192 for (size_t r = 0; r < num_receivers; ++r ) {
193 receivers[r]->initialize_map( N, num_senders );
194 }
195
196 utils::NativeParallelFor( (int)num_senders, parallel_put_until_limit<InputType>(senders) );
197 g.wait_for_all();
198
199 for (size_t s = 0; s < num_senders; ++s ) {
200 size_t n = senders[s]->my_received;
201 CHECK( n == N );
202 CHECK( senders[s]->my_receiver.load(std::memory_order_relaxed) == &exe_node );
203 }
204 for (size_t r = 0; r < num_receivers; ++r ) {
205 receivers[r]->validate();
206 }
207 }
208 for (size_t r = 0; r < num_receivers; ++r ) {
209 tbb::flow::remove_edge( exe_node, *receivers[r] );
210 }
211 CHECK( exe_node.try_put( InputType() ) == true );
212 g.wait_for_all();
213 for (size_t r = 0; r < num_receivers; ++r ) {
214 receivers[r]->validate();
215 }
216 }
217
218 // validate that the local body matches the global execute_count and both are correct
219 inc_functor body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
220 const size_t expected_count = N/2 * MAX_NODES * MAX_NODES * ( MAX_NODES + 1 ) + MAX_NODES + Offset;
221 size_t global_count = global_execute_count;
222 size_t inc_count = body_copy.local_execute_count;
223 CHECK(global_count == expected_count);
224 CHECK(global_count == inc_count );
225 g.reset(tbb::flow::rf_reset_bodies);
226 body_copy = tbb::flow::copy_body<inc_functor>( exe_node );
227 inc_count = body_copy.local_execute_count;
228 CHECK_MESSAGE( Offset == inc_count, "reset(rf_reset_bodies) did not reset functor" );
229 }
230 }
231
232 template< typename InputType, typename OutputType >
run_buffered_levels(int c)233 void run_buffered_levels( int c ) {
234 buffered_levels<InputType,OutputType>( c, []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::func(i); } );
235 buffered_levels<InputType,OutputType>( c, &harness_graph_executor<InputType, OutputType>::func );
236 buffered_levels<InputType,OutputType>( c, typename harness_graph_executor<InputType, OutputType>::functor() );
237 buffered_levels_with_copy<InputType,OutputType>( c );
238 }
239
240
241 //! Performs test on executable nodes with limited concurrency
242 /** These tests check:
243 1) that the nodes will accepts puts up to the concurrency limit,
244 2) the nodes do not exceed the concurrency limit even when run with more threads (this is checked in the harness_graph_executor),
245 3) the nodes will receive puts from multiple successors simultaneously,
246 and 4) the nodes will send to multiple predecessors.
247 There is no checking of the contents of the messages for corruption.
248 */
249
250 template< typename InputType, typename OutputType, typename Body >
concurrency_levels(size_t concurrency,Body body)251 void concurrency_levels( size_t concurrency, Body body ) {
252
253 for ( size_t lc = 1; lc <= concurrency; ++lc ) {
254 tbb::flow::graph g;
255
256 // Set the execute_counter back to zero in the harness
257 harness_graph_executor<InputType, OutputType>::execute_count = 0;
258 // Set the number of current executors to zero.
259 harness_graph_executor<InputType, OutputType>::current_executors = 0;
260 // Set the max allowed executors to lc. There is a check in the functor to make sure this is never exceeded.
261 harness_graph_executor<InputType, OutputType>::max_executors = lc;
262
263 typedef tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > fnode_type;
264 fnode_type exe_node( g, lc, body );
265
266 for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
267
268 std::vector< std::shared_ptr<harness_counting_receiver<OutputType>> > receivers;
269 for (size_t i = 0; i < num_receivers; ++i) {
270 receivers.push_back( std::make_shared<harness_counting_receiver<OutputType>>(g) );
271 }
272
273 for (size_t r = 0; r < num_receivers; ++r ) {
274 tbb::flow::make_edge( exe_node, *receivers[r] );
275 }
276
277 std::vector< std::shared_ptr<harness_counting_sender<InputType>> > senders;
278
279 for (size_t num_senders = 1; num_senders <= MAX_NODES; ++num_senders ) {
280 senders.clear();
281 {
282 // Exclusively lock m to prevent exe_node from finishing
283 tbb::spin_rw_mutex::scoped_lock l(
284 harness_graph_executor<InputType, OutputType>::template mutex_holder<tbb::spin_rw_mutex>::mutex
285 );
286
287 // put to lc level, it will accept and then block at m
288 for ( size_t c = 0 ; c < lc ; ++c ) {
289 CHECK( exe_node.try_put( InputType() ) == true );
290 }
291 // it only accepts to lc level
292 CHECK( exe_node.try_put( InputType() ) == false );
293
294 for (size_t s = 0; s < num_senders; ++s ) {
295 senders.push_back( std::make_shared<harness_counting_sender<InputType>>() );
296 // register a sender
297 senders.back()->my_limit = N;
298 exe_node.register_predecessor( *senders.back() );
299 }
300
301 } // release lock at end of scope, setting the exe node free to continue
302 // wait for graph to settle down
303 g.wait_for_all();
304
305 // confirm that each sender was requested from N times
306 for (size_t s = 0; s < num_senders; ++s ) {
307 size_t n = senders[s]->my_received;
308 CHECK( n == N );
309 CHECK( senders[s]->my_receiver.load(std::memory_order_relaxed) == &exe_node );
310 }
311 // confirm that each receivers got N * num_senders + the initial lc puts
312 for (size_t r = 0; r < num_receivers; ++r ) {
313 size_t n = receivers[r]->my_count;
314 CHECK( n == num_senders*N+lc );
315 receivers[r]->my_count = 0;
316 }
317 }
318 for (size_t r = 0; r < num_receivers; ++r ) {
319 tbb::flow::remove_edge( exe_node, *receivers[r] );
320 }
321 CHECK( exe_node.try_put( InputType() ) == true );
322 g.wait_for_all();
323 for (size_t r = 0; r < num_receivers; ++r ) {
324 CHECK( int(receivers[r]->my_count) == 0 );
325 }
326 }
327 }
328 }
329
330
331 template< typename InputType, typename OutputType >
run_concurrency_levels(int c)332 void run_concurrency_levels( int c ) {
333 concurrency_levels<InputType,OutputType>( c, []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::template tfunc<tbb::spin_rw_mutex>(i); } );
334 concurrency_levels<InputType,OutputType>( c, &harness_graph_executor<InputType, OutputType>::template tfunc<tbb::spin_rw_mutex> );
335 concurrency_levels<InputType,OutputType>( c, typename harness_graph_executor<InputType, OutputType>::template tfunctor<tbb::spin_rw_mutex>() );
336 }
337
338
339 struct empty_no_assign {
empty_no_assignempty_no_assign340 empty_no_assign() {}
empty_no_assignempty_no_assign341 empty_no_assign( int ) {}
operator intempty_no_assign342 operator int() { return 0; }
343 };
344
345 template< typename InputType >
346 struct parallel_puts : private utils::NoAssign {
347
348 tbb::flow::receiver< InputType > * const my_exe_node;
349
parallel_putsparallel_puts350 parallel_puts( tbb::flow::receiver< InputType > &exe_node ) : my_exe_node(&exe_node) {}
351
operator ()parallel_puts352 void operator()( int ) const {
353 for ( int i = 0; i < N; ++i ) {
354 // the nodes will accept all puts
355 CHECK( my_exe_node->try_put( InputType() ) == true );
356 }
357 }
358
359 };
360
361 //! Performs test on executable nodes with unlimited concurrency
362 /** These tests check:
363 1) that the nodes will accept all puts
364 2) the nodes will receive puts from multiple predecessors simultaneously,
365 and 3) the nodes will send to multiple successors.
366 There is no checking of the contents of the messages for corruption.
367 */
368
369 template< typename InputType, typename OutputType, typename Body >
unlimited_concurrency(Body body)370 void unlimited_concurrency( Body body ) {
371
372 for (unsigned p = 1; p < 2*utils::MaxThread; ++p) {
373 tbb::flow::graph g;
374 tbb::flow::function_node< InputType, OutputType, tbb::flow::rejecting > exe_node( g, tbb::flow::unlimited, body );
375
376 for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) {
377
378 std::vector< std::shared_ptr<harness_counting_receiver<OutputType>> > receivers;
379 for (size_t i = 0; i < num_receivers; ++i) {
380 receivers.push_back( std::make_shared<harness_counting_receiver<OutputType>>(g) );
381 }
382
383 harness_graph_executor<InputType, OutputType>::execute_count = 0;
384
385 for (size_t r = 0; r < num_receivers; ++r ) {
386 tbb::flow::make_edge( exe_node, *receivers[r] );
387 }
388
389 utils::NativeParallelFor( p, parallel_puts<InputType>(exe_node) );
390 g.wait_for_all();
391
392 // 2) the nodes will receive puts from multiple predecessors simultaneously,
393 size_t ec = harness_graph_executor<InputType, OutputType>::execute_count;
394 CHECK( ec == p*N );
395 for (size_t r = 0; r < num_receivers; ++r ) {
396 size_t c = receivers[r]->my_count;
397 // 3) the nodes will send to multiple successors.
398 CHECK( c == p*N );
399 }
400 for (size_t r = 0; r < num_receivers; ++r ) {
401 tbb::flow::remove_edge( exe_node, *receivers[r] );
402 }
403 }
404 }
405 }
406
407 template< typename InputType, typename OutputType >
run_unlimited_concurrency()408 void run_unlimited_concurrency() {
409 harness_graph_executor<InputType, OutputType>::max_executors = 0;
410 unlimited_concurrency<InputType,OutputType>( []( InputType i ) -> OutputType { return harness_graph_executor<InputType, OutputType>::func(i); } );
411 unlimited_concurrency<InputType,OutputType>( &harness_graph_executor<InputType, OutputType>::func );
412 unlimited_concurrency<InputType,OutputType>( typename harness_graph_executor<InputType, OutputType>::functor() );
413 }
414
415 struct continue_msg_to_int {
416 int my_int;
continue_msg_to_intcontinue_msg_to_int417 continue_msg_to_int(int x) : my_int(x) {}
operator ()continue_msg_to_int418 int operator()(tbb::flow::continue_msg) { return my_int; }
419 };
420
test_function_node_with_continue_msg_as_input()421 void test_function_node_with_continue_msg_as_input() {
422 // If this function terminates, then this test is successful
423 tbb::flow::graph g;
424
425 tbb::flow::broadcast_node<tbb::flow::continue_msg> Start(g);
426
427 tbb::flow::function_node<tbb::flow::continue_msg, int, tbb::flow::rejecting> FN1( g, tbb::flow::serial, continue_msg_to_int(42));
428 tbb::flow::function_node<tbb::flow::continue_msg, int, tbb::flow::rejecting> FN2( g, tbb::flow::serial, continue_msg_to_int(43));
429
430 tbb::flow::make_edge( Start, FN1 );
431 tbb::flow::make_edge( Start, FN2 );
432
433 Start.try_put( tbb::flow::continue_msg() );
434 g.wait_for_all();
435 }
436
437 //! Tests limited concurrency cases for nodes that accept data messages
test_concurrency(int num_threads)438 void test_concurrency(int num_threads) {
439 tbb::global_control thread_limit(tbb::global_control::max_allowed_parallelism, num_threads);
440 run_concurrency_levels<int,int>(num_threads);
441 run_concurrency_levels<int,tbb::flow::continue_msg>(num_threads);
442 run_buffered_levels<int, int>(num_threads);
443 run_unlimited_concurrency<int,int>();
444 run_unlimited_concurrency<int,empty_no_assign>();
445 run_unlimited_concurrency<empty_no_assign,int>();
446 run_unlimited_concurrency<empty_no_assign,empty_no_assign>();
447 run_unlimited_concurrency<int,tbb::flow::continue_msg>();
448 run_unlimited_concurrency<empty_no_assign,tbb::flow::continue_msg>();
449 test_function_node_with_continue_msg_as_input();
450 }
451
452 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
453 #include <array>
454 #include <vector>
test_follows_and_precedes_api()455 void test_follows_and_precedes_api() {
456 using msg_t = tbb::flow::continue_msg;
457
458 std::array<msg_t, 3> messages_for_follows = { {msg_t(), msg_t(), msg_t()} };
459 std::vector<msg_t> messages_for_precedes = { msg_t() };
460
461 pass_through<msg_t> pass_msg;
462
463 follows_and_precedes_testing::test_follows
464 <msg_t, tbb::flow::function_node<msg_t, msg_t>>
465 (messages_for_follows, tbb::flow::unlimited, pass_msg);
466 follows_and_precedes_testing::test_precedes
467 <msg_t, tbb::flow::function_node<msg_t, msg_t>>
468 (messages_for_precedes, tbb::flow::unlimited, pass_msg, tbb::flow::node_priority_t(1));
469 }
470 #endif
471
472
473 //! Test various node bodies with concurrency
474 //! \brief \ref error_guessing
475 TEST_CASE("Concurrency test") {
476 for(unsigned int p = utils::MinThread; p <= utils::MaxThread; ++p ) {
477 test_concurrency(p);
478 }
479 }
480
481 //! NativeParallelFor testing with various concurrency settings
482 //! \brief \ref error_guessing
483 TEST_CASE("Lightweight testing"){
484 lightweight_testing::test<tbb::flow::function_node>(10);
485 }
486
487 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
488 //! Test follows and precedes API
489 //! \brief \ref error_guessing
490 TEST_CASE("Flowgraph node set test"){
491 test_follows_and_precedes_api();
492 }
493 #endif
494
495 //! try_release and try_consume test
496 //! \brief \ref error_guessing
497 TEST_CASE("try_release try_consume"){
498 tbb::flow::graph g;
499
__anondcbd16150402(const int&v)500 tbb::flow::function_node<int, int> fn(g, tbb::flow::unlimited, [](const int&v){return v;});
501
502 CHECK_MESSAGE((fn.try_release()==false), "try_release should initially return false on a node");
503 CHECK_MESSAGE((fn.try_consume()==false), "try_consume should initially return false on a node");
504 }
505
506 #if __TBB_CPP20_CONCEPTS_PRESENT
507 //! \brief \ref error_guessing
508 TEST_CASE("constraints for function_node input and output") {
509 struct InputObject {
510 InputObject() = default;
511 InputObject( const InputObject& ) = default;
512 };
513 struct OutputObject : test_concepts::Copyable {};
514
515 static_assert(utils::well_formed_instantiation<tbb::flow::function_node, InputObject, OutputObject>);
516 static_assert(utils::well_formed_instantiation<tbb::flow::function_node, int, int>);
517 static_assert(!utils::well_formed_instantiation<tbb::flow::function_node, test_concepts::NonCopyable, OutputObject>);
518 static_assert(!utils::well_formed_instantiation<tbb::flow::function_node, test_concepts::NonDefaultInitializable, OutputObject>);
519 static_assert(!utils::well_formed_instantiation<tbb::flow::function_node, InputObject, test_concepts::NonCopyable>);
520 }
521
522 template <typename Input, typename Output, typename Body>
523 concept can_call_function_node_ctor = requires( tbb::flow::graph& graph, std::size_t concurrency, Body body,
524 tbb::flow::node_priority_t priority, tbb::flow::buffer_node<int>& f ) {
525 tbb::flow::function_node<Input, Output>(graph, concurrency, body);
526 tbb::flow::function_node<Input, Output>(graph, concurrency, body, priority);
527 #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET
528 tbb::flow::function_node<Input, Output>(tbb::flow::follows(f), concurrency, body);
529 tbb::flow::function_node<Input, Output>(tbb::flow::follows(f), concurrency, body, priority);
530 #endif
531 };
532
533 //! \brief \ref error_guessing
534 TEST_CASE("constraints for function_node body") {
535 using input_type = int;
536 using output_type = int;
537 using namespace test_concepts::function_node_body;
538
539 static_assert(can_call_function_node_ctor<input_type, output_type, Correct<input_type, output_type>>);
540 static_assert(!can_call_function_node_ctor<input_type, output_type, NonCopyable<input_type, output_type>>);
541 static_assert(!can_call_function_node_ctor<input_type, output_type, NonDestructible<input_type, output_type>>);
542 static_assert(!can_call_function_node_ctor<input_type, output_type, NoOperatorRoundBrackets<input_type, output_type>>);
543 static_assert(!can_call_function_node_ctor<input_type, output_type, WrongInputRoundBrackets<input_type, output_type>>);
544 static_assert(!can_call_function_node_ctor<input_type, output_type, WrongReturnRoundBrackets<input_type, output_type>>);
545 }
546 #endif // __TBB_CPP20_CONCEPTS_PRESENT
547