| /f-stack/dpdk/app/test/ |
| H A D | test_lpm_perf.c | 44 #define BATCH_SIZE (1 << 12) macro 661 static uint32_t ip_batch[BATCH_SIZE]; in test_lpm_perf() 663 for (j = 0; j < BATCH_SIZE; j++) in test_lpm_perf() 669 for (j = 0; j < BATCH_SIZE; j++) { in test_lpm_perf() 679 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm_perf() 685 static uint32_t ip_batch[BATCH_SIZE]; in test_lpm_perf() 689 for (j = 0; j < BATCH_SIZE; j++) in test_lpm_perf() 694 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) { in test_lpm_perf() 706 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm_perf() 712 static uint32_t ip_batch[BATCH_SIZE]; in test_lpm_perf() [all …]
|
| H A D | test_lpm6_perf.c | 26 #define BATCH_SIZE 100000 macro 114 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_lpm6_perf() 115 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm6_perf() 139 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_lpm6_perf() 140 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm6_perf()
|
| H A D | test_fib_perf.c | 28 #define BATCH_SIZE (1 << 12) macro 368 static uint32_t ip_batch[BATCH_SIZE]; in test_fib_perf() 372 for (j = 0; j < BATCH_SIZE; j++) in test_fib_perf() 377 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) { in test_fib_perf() 389 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_fib_perf() 390 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_fib_perf()
|
| H A D | test_fib6_perf.c | 26 #define BATCH_SIZE 100000 macro 134 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_fib6_perf() 135 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_fib6_perf()
|
| H A D | test_event_timer_adapter.c | 30 #define BATCH_SIZE 16 macro 1066 struct rte_event evs[BATCH_SIZE]; in stat_inc_reset_ev_enq() 1160 struct rte_event evs[BATCH_SIZE]; in event_timer_arm() 1217 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_double() 1266 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_expiry() 1326 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_rearm() 1397 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_max() 1560 struct rte_event evs[BATCH_SIZE]; in event_timer_cancel() 1628 struct rte_event evs[BATCH_SIZE]; in event_timer_cancel_double()
|
| /f-stack/dpdk/drivers/event/opdl/ |
| H A D | opdl_test.c | 772 #define BATCH_SIZE 32 macro 785 struct rte_event ev[BATCH_SIZE]; in qid_basic() 840 BATCH_SIZE, in qid_basic() 855 BATCH_SIZE); in qid_basic() 899 BATCH_SIZE); in qid_basic() 904 BATCH_SIZE); in qid_basic() 905 if (num_events != BATCH_SIZE) { in qid_basic() 919 BATCH_SIZE, in qid_basic() 944 BATCH_SIZE); in qid_basic() 949 BATCH_SIZE); in qid_basic() [all …]
|
| /f-stack/dpdk/lib/librte_eventdev/ |
| H A D | rte_event_crypto_adapter.c | 22 #define BATCH_SIZE 32 macro 394 if (len == BATCH_SIZE) { in eca_enq_to_cryptodev() 399 BATCH_SIZE); in eca_enq_to_cryptodev() 473 struct rte_event ev[BATCH_SIZE]; in eca_crypto_adapter_enq_run() 486 event_port_id, ev, BATCH_SIZE, 0); in eca_crypto_adapter_enq_run() 510 struct rte_event events[BATCH_SIZE]; in eca_ops_enqueue_burst() 518 num = RTE_MIN(num, BATCH_SIZE); in eca_ops_enqueue_burst() 573 struct rte_crypto_op *ops[BATCH_SIZE]; in eca_crypto_adapter_deq_run() 603 ops, BATCH_SIZE); in eca_crypto_adapter_deq_run() 751 BATCH_SIZE * in eca_add_queue_pair()
|
| H A D | rte_event_eth_rx_adapter.c | 26 #define BATCH_SIZE 32 macro 28 #define ETH_EVENT_BUFFER_SIZE (4*BATCH_SIZE) 813 struct rte_mbuf *mbufs[BATCH_SIZE]; in rxa_eth_rx() 826 while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) { in rxa_eth_rx() 827 if (buf->count >= BATCH_SIZE) in rxa_eth_rx() 831 n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); in rxa_eth_rx() 984 if (buf->count >= BATCH_SIZE) in rxa_intr_ring_dequeue() 987 while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) { in rxa_intr_ring_dequeue() 1106 if (buf->count >= BATCH_SIZE) in rxa_poll() 1108 if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) { in rxa_poll()
|
| /f-stack/dpdk/drivers/raw/ioat/ |
| H A D | ioat_common.c | 132 uint16_t max_batches = max_desc / BATCH_SIZE; in idxd_dev_configure() 148 max_desc, idxd->max_batches, BATCH_SIZE); in idxd_dev_configure() 150 max_desc = max_batches * BATCH_SIZE; in idxd_dev_configure()
|
| H A D | rte_ioat_rawdev_fns.h | 159 #define BATCH_SIZE 64 macro 176 struct rte_idxd_hw_desc ops[BATCH_SIZE]; 384 if (b->op_count >= BATCH_SIZE) in __idxd_write_desc()
|
| /f-stack/dpdk/examples/eventdev_pipeline/ |
| H A D | pipeline_worker_tx.c | 123 struct rte_event ev[BATCH_SIZE + 1]; in worker_do_tx_single_burst() 133 BATCH_SIZE, 0); in worker_do_tx_single_burst() 169 struct rte_event ev[BATCH_SIZE + 1]; in worker_do_tx_single_burst_atq() 179 BATCH_SIZE, 0); in worker_do_tx_single_burst_atq() 310 struct rte_event ev[BATCH_SIZE]; in worker_do_tx_burst() 321 ev, BATCH_SIZE, 0); in worker_do_tx_burst() 365 struct rte_event ev[BATCH_SIZE]; in worker_do_tx_burst_atq() 377 ev, BATCH_SIZE, 0); in worker_do_tx_burst_atq()
|
| H A D | pipeline_common.h | 24 #define BATCH_SIZE 16 macro
|
| H A D | pipeline_worker_generic.c | 65 struct rte_event events[BATCH_SIZE]; in worker_generic_burst()
|
| /f-stack/dpdk/doc/guides/prog_guide/ |
| H A D | eventdev.rst | 316 const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE); 351 struct rte_event events[BATCH_SIZE]; 352 … uint16_t nb_rx = rte_event_dequeue_burst(dev_id, worker_port_id, events, BATCH_SIZE, timeout);
|
| /f-stack/freebsd/kern/ |
| H A D | kern_umtx.c | 3532 #define BATCH_SIZE 128 macro 3536 char *uaddrs[BATCH_SIZE], **upp; in __umtx_op_nwake_private_native() 3543 tocopy = MIN(count, BATCH_SIZE); in __umtx_op_nwake_private_native() 3558 uint32_t uaddrs[BATCH_SIZE], *upp; in __umtx_op_nwake_private_compat32() 3565 tocopy = MIN(count, BATCH_SIZE); in __umtx_op_nwake_private_compat32()
|