Home
last modified time | relevance | path

Searched refs:BATCH_SIZE (Results 1 – 12 of 12) sorted by relevance

/dpdk/app/test/
H A Dtest_lpm_perf.c54 #define BATCH_SIZE (1 << 12) macro
671 static uint32_t ip_batch[BATCH_SIZE]; in test_lpm_perf()
673 for (j = 0; j < BATCH_SIZE; j++) in test_lpm_perf()
679 for (j = 0; j < BATCH_SIZE; j++) { in test_lpm_perf()
689 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm_perf()
695 static uint32_t ip_batch[BATCH_SIZE]; in test_lpm_perf()
699 for (j = 0; j < BATCH_SIZE; j++) in test_lpm_perf()
704 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) { in test_lpm_perf()
716 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm_perf()
722 static uint32_t ip_batch[BATCH_SIZE]; in test_lpm_perf()
[all …]
H A Dtest_lpm6_perf.c37 #define BATCH_SIZE 100000 macro
125 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_lpm6_perf()
126 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm6_perf()
150 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_lpm6_perf()
151 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_lpm6_perf()
H A Dtest_fib_perf.c39 #define BATCH_SIZE (1 << 12) macro
380 static uint32_t ip_batch[BATCH_SIZE]; in test_fib_perf()
384 for (j = 0; j < BATCH_SIZE; j++) in test_fib_perf()
389 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) { in test_fib_perf()
401 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_fib_perf()
402 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_fib_perf()
H A Dtest_fib6_perf.c38 #define BATCH_SIZE 100000 macro
147 (double)total_time / ((double)ITERATIONS * BATCH_SIZE), in test_fib6_perf()
148 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); in test_fib6_perf()
H A Dtest_event_timer_adapter.c42 #define BATCH_SIZE 16 macro
1180 struct rte_event evs[BATCH_SIZE]; in stat_inc_reset_ev_enq()
1274 struct rte_event evs[BATCH_SIZE]; in event_timer_arm()
1331 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_double()
1380 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_expiry()
1440 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_rearm()
1511 struct rte_event evs[BATCH_SIZE]; in event_timer_arm_max()
1674 struct rte_event evs[BATCH_SIZE]; in event_timer_cancel()
1742 struct rte_event evs[BATCH_SIZE]; in event_timer_cancel_double()
/dpdk/drivers/event/opdl/
H A Dopdl_test.c772 #define BATCH_SIZE 32 macro
785 struct rte_event ev[BATCH_SIZE]; in qid_basic()
840 BATCH_SIZE, in qid_basic()
855 BATCH_SIZE); in qid_basic()
899 BATCH_SIZE); in qid_basic()
904 BATCH_SIZE); in qid_basic()
905 if (num_events != BATCH_SIZE) { in qid_basic()
919 BATCH_SIZE, in qid_basic()
944 BATCH_SIZE); in qid_basic()
949 BATCH_SIZE); in qid_basic()
[all …]
/dpdk/lib/eventdev/
H A Drte_event_crypto_adapter.c22 #define BATCH_SIZE 32 macro
28 #define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE)
167 return bufp->count >= BATCH_SIZE; in eca_circular_buffer_batch_ready()
173 return (bufp->size - bufp->count) >= BATCH_SIZE; in eca_circular_buffer_space_for_batch()
581 struct rte_event ev[BATCH_SIZE]; in eca_crypto_adapter_enq_run()
601 event_port_id, ev, BATCH_SIZE, 0); in eca_crypto_adapter_enq_run()
627 struct rte_event events[BATCH_SIZE]; in eca_ops_enqueue_burst()
635 num = RTE_MIN(num, BATCH_SIZE); in eca_ops_enqueue_burst()
727 struct rte_crypto_op *ops[BATCH_SIZE]; in eca_crypto_adapter_deq_run()
762 ops, BATCH_SIZE); in eca_crypto_adapter_deq_run()
H A Drte_event_eth_rx_adapter.c28 #define BATCH_SIZE 32 macro
30 #define ETH_EVENT_BUFFER_SIZE (6*BATCH_SIZE)
984 buf->count >= BATCH_SIZE ? in rxa_buffer_mbufs()
985 buf->count - BATCH_SIZE : 0, in rxa_buffer_mbufs()
1006 uint32_t nb_req = buf->tail + BATCH_SIZE; in rxa_pkt_buf_available()
1012 if (buf->head >= BATCH_SIZE) { in rxa_pkt_buf_available()
1030 struct rte_mbuf *mbufs[BATCH_SIZE]; in rxa_eth_rx()
1041 if (buf->count >= BATCH_SIZE) in rxa_eth_rx()
1206 if (buf->count >= BATCH_SIZE) in rxa_intr_ring_dequeue()
1329 if (buf->count >= BATCH_SIZE) in rxa_poll()
[all …]
/dpdk/examples/eventdev_pipeline/
H A Dpipeline_worker_tx.c135 struct rte_event ev[BATCH_SIZE + 1]; in worker_do_tx_single_burst()
144 nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0); in worker_do_tx_single_burst()
182 struct rte_event ev[BATCH_SIZE + 1]; in worker_do_tx_single_burst_atq()
191 nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0); in worker_do_tx_single_burst_atq()
334 struct rte_event ev[BATCH_SIZE]; in worker_do_tx_burst()
344 nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0); in worker_do_tx_burst()
390 struct rte_event ev[BATCH_SIZE]; in worker_do_tx_burst_atq()
400 nb_rx = rte_event_dequeue_burst(dev, port, ev, BATCH_SIZE, 0); in worker_do_tx_burst_atq()
H A Dpipeline_common.h24 #define BATCH_SIZE 16 macro
H A Dpipeline_worker_generic.c68 struct rte_event events[BATCH_SIZE]; in worker_generic_burst()
/dpdk/doc/guides/prog_guide/
H A Deventdev.rst348 const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
383 struct rte_event events[BATCH_SIZE];
384 … uint16_t nb_rx = rte_event_dequeue_burst(dev_id, worker_port_id, events, BATCH_SIZE, timeout);