| /f-stack/dpdk/app/test/ |
| H A D | test_distributor_perf.c | 170 for (i = 0; i < rte_lcore_count() - 1; i++) in perf_test() 184 const unsigned int num_workers = rte_lcore_count() - 1; in quit_workers() 210 if (rte_lcore_count() < 2) { in test_distributor_perf() 220 rte_lcore_count() - 1, in test_distributor_perf() 232 rte_lcore_count() - 1, in test_distributor_perf() 242 const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ? in test_distributor_perf() 243 (BIG_BATCH * 2) - 1 : (511 * rte_lcore_count()); in test_distributor_perf()
|
| H A D | test_distributor.c | 151 for (i = 0; i < rte_lcore_count() - 1; i++) in sanity_test() 158 if (rte_lcore_count() >= 3) { in sanity_test() 178 for (i = 0; i < rte_lcore_count() - 1; i++) in sanity_test() 207 for (i = 0; i < rte_lcore_count() - 1; i++) in sanity_test() 487 for (i = 0; i < rte_lcore_count() - 1; i++) in sanity_test_with_worker_shutdown() 726 rte_lcore_count() - 1, in test_error_distributor_create_name() 734 rte_lcore_count() - 1, in test_error_distributor_create_name() 830 if (rte_lcore_count() < 2) { in test_distributor() 837 rte_lcore_count() - 1, in test_distributor() 851 rte_lcore_count() - 1, in test_distributor() [all …]
|
| H A D | test_hash_multiwriter.c | 60 for (pos_core = 0; pos_core < rte_lcore_count(); pos_core++) { in test_hash_multiwriter_worker() 143 nb_total_tsx_insertion / rte_lcore_count(); in test_hash_multiwriter() 180 if (i == rte_lcore_count()) in test_hash_multiwriter() 189 if (i != rte_lcore_count()) { in test_hash_multiwriter() 263 if (rte_lcore_count() < 2) { in test_hash_multiwriter_main()
|
| H A D | test_mempool_perf.c | 70 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) 347 if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0) in test_mempool_perf() 360 if (do_one_mempool_test(default_pool, rte_lcore_count()) < 0) in test_mempool_perf() 372 if (do_one_mempool_test(mp_cache, rte_lcore_count()) < 0) in test_mempool_perf() 385 if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0) in test_mempool_perf()
|
| H A D | test_atomic.c | 533 rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count())); in test_atomic() 534 rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count())); in test_atomic() 535 rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count())); in test_atomic() 555 rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1)); in test_atomic() 556 rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1)); in test_atomic() 557 rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1)); in test_atomic() 594 if (iterations != 4*N*(rte_lcore_count()-1)) { in test_atomic()
|
| H A D | test_hash_readwrite.c | 67 for (i = 0; i < rte_lcore_count(); i++) { in test_hash_readwrite_worker() 209 int worker_cnt = rte_lcore_count() - 1; in test_hash_readwrite_functional() 331 for (i = 0; i < rte_lcore_count(); i++) { in test_rw_writer() 435 unsigned int tot_worker_lcore = rte_lcore_count() - 1; in test_hash_readwrite_perf() 623 if (rte_lcore_count() < 3) { in test_hash_rw_perf_main() 707 if (rte_lcore_count() < 3) { in test_hash_rw_func_main()
|
| H A D | test_stack.c | 314 if (rte_lcore_count() < 2) { in test_stack_multithreaded() 320 __func__, __LINE__, rte_lcore_count()); in test_stack_multithreaded() 322 s = rte_stack_create("test", MAX_BULK * rte_lcore_count(), rte_socket_id(), flags); in test_stack_multithreaded()
|
| H A D | test_mcslock.c | 123 printf("\nTest with lock on %u cores...\n", (rte_lcore_count())); in test_mcslock_perf() 239 if (count != (rte_lcore_count() - 1)) in test_mcslock()
|
| H A D | test_hash_readwrite_lf_perf.c | 134 uint32_t max_cores = rte_lcore_count(); in get_enabled_cores_list() 721 unsigned int tot_lcore = rte_lcore_count(); in test_hash_add_no_ks_lookup_hit() 788 unsigned int tot_lcore = rte_lcore_count(); in test_hash_add_no_ks_lookup_miss() 858 unsigned int tot_lcore = rte_lcore_count(); in test_hash_add_ks_lookup_hit_non_sp() 932 unsigned int tot_lcore = rte_lcore_count(); in test_hash_add_ks_lookup_hit_sp() 1004 unsigned int tot_lcore = rte_lcore_count(); in test_hash_add_ks_lookup_miss() 1080 unsigned int tot_lcore = rte_lcore_count(); in test_hash_multi_add_lookup() 1176 unsigned int tot_lcore = rte_lcore_count(); in test_hash_add_ks_lookup_hit_extbkt() 1322 unsigned int tot_lcore = rte_lcore_count(); in test_hash_rcu_qsbr_writer_perf() 1402 if (rte_lcore_count() < 2) { in test_hash_readwrite_lf_perf_main()
|
| H A D | test_spinlock.c | 150 printf("\nTest with lock on %u cores...\n", rte_lcore_count()); in test_spinlock_perf() 279 if (count != ( rte_lcore_count() - 1)) { in test_spinlock()
|
| H A D | test_ticketlock.c | 155 printf("\nTest with lock on %u cores...\n", rte_lcore_count()); in test_ticketlock_perf() 289 if (count != (rte_lcore_count() - 1)) in test_ticketlock()
|
| H A D | test_func_reentrancy.c | 423 unsigned cores_save = rte_lcore_count(); in launch_test() 476 if (rte_lcore_count() < 2) { in test_func_reentrancy() 480 else if (rte_lcore_count() > MAX_LCORES) in test_func_reentrancy()
|
| H A D | test_trace_perf.c | 134 data->nb_workers = rte_lcore_count() - 1; in WORKER_DEFINE() 154 nb_cores = rte_lcore_count(); in test_trace_perf()
|
| H A D | test_barrier.c | 143 for (ln = rte_lcore_count(); ln != 0 && lpt->lc != lc; lpt++, ln--) in plock_test1_lcore() 200 n = rte_lcore_count(); in plock_test()
|
| H A D | test_stack_perf.c | 336 printf("\n### Testing on all %u lcores ###\n", rte_lcore_count()); in __test_stack_perf() 337 run_on_n_cores(s, bulk_push_pop, rte_lcore_count()); in __test_stack_perf()
|
| H A D | test_timer_secondary.c | 120 if (rte_lcore_count() < NUM_LCORES_NEEDED) { in test_timer_secondary()
|
| /f-stack/dpdk/drivers/event/octeontx2/ |
| H A D | otx2_evdev_selftest.c | 668 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_multi_queue_enq_multi_port_deq() 672 rte_lcore_count() - 1); in test_multi_queue_enq_multi_port_deq() 922 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_multiport_flow_sched_type_test() 926 rte_lcore_count() - 1); in test_multiport_flow_sched_type_test() 1081 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_multiport_queue_sched_type_test() 1089 rte_lcore_count() - 1); in test_multiport_queue_sched_type_test() 1223 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in launch_multi_port_max_stages_random_sched_type() 1227 nr_ports, rte_lcore_count() - 1); in launch_multi_port_max_stages_random_sched_type() 1383 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_producer_consumer_ingress_order_test() 1385 if (rte_lcore_count() < 3 || nr_ports < 2) { in test_producer_consumer_ingress_order_test()
|
| /f-stack/dpdk/drivers/event/octeontx/ |
| H A D | ssovf_evdev_selftest.c | 680 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_multi_queue_enq_multi_port_deq() 684 nr_ports, rte_lcore_count() - 1); in test_multi_queue_enq_multi_port_deq() 936 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_multiport_flow_sched_type_test() 940 nr_ports, rte_lcore_count() - 1); in test_multiport_flow_sched_type_test() 1093 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_multiport_queue_sched_type_test() 1102 rte_lcore_count() - 1); in test_multiport_queue_sched_type_test() 1235 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in launch_multi_port_max_stages_random_sched_type() 1239 nr_ports, rte_lcore_count() - 1); in launch_multi_port_max_stages_random_sched_type() 1391 nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); in test_producer_consumer_ingress_order_test() 1393 if (rte_lcore_count() < 3 || nr_ports < 2) { in test_producer_consumer_ingress_order_test()
|
| /f-stack/dpdk/examples/eventdev_pipeline/ |
| H A D | main.c | 361 printf("\tCores available: %u\n", rte_lcore_count()); in main() 365 if (rte_lcore_count() < cores_needed) in main() 366 rte_panic("Too few cores (%d < %d)\n", rte_lcore_count(), in main()
|
| /f-stack/dpdk/app/test-bbdev/ |
| H A D | main.c | 302 rte_lcore_count()); in parse_args() 303 tp->num_lcores = rte_lcore_count(); in parse_args()
|
| /f-stack/dpdk/lib/librte_eal/include/ |
| H A D | rte_lcore.h | 106 unsigned int rte_lcore_count(void);
|
| /f-stack/dpdk/examples/distributor/ |
| H A D | main.c | 112 const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1; in port_init() 482 const unsigned int num_workers = rte_lcore_count() - 4; in print_stats() 731 if (rte_lcore_count() < 5) in main() 779 rte_lcore_count() - 4, in main()
|
| /f-stack/dpdk/examples/l2fwd-cat/ |
| H A D | l2fwd-cat.c | 198 if (rte_lcore_count() > 1) in main()
|
| /f-stack/dpdk/examples/skeleton/ |
| H A D | basicfwd.c | 202 if (rte_lcore_count() > 1) in main()
|
| /f-stack/dpdk/examples/service_cores/ |
| H A D | main.c | 121 if (p->num_cores > rte_lcore_count() + 1) { in apply_profile()
|