| /dpdk/app/test/ |
| H A D | test_reorder.c | 178 bufs[i] = NULL; in test_reorder_insert() 193 bufs[4] = NULL; in test_reorder_insert() 204 bufs[5] = NULL; in test_reorder_insert() 215 bufs[6] = NULL; in test_reorder_insert() 221 rte_pktmbuf_free(bufs[i]); in test_reorder_insert() 271 bufs[1] = NULL; in test_reorder_drain() 288 bufs[2] = NULL; in test_reorder_drain() 289 bufs[3] = NULL; in test_reorder_drain() 296 bufs[4] = NULL; in test_reorder_drain() 303 bufs[7] = NULL; in test_reorder_drain() [all …]
|
| H A D | test_distributor.c | 123 struct rte_mbuf *bufs[BURST]; in sanity_test() local 139 bufs[i]->hash.usr = 0; in sanity_test() 201 bufs[i]->hash.usr = i+1; in sanity_test() 339 struct rte_mbuf *bufs[BURST]; in sanity_test_with_mbuf_alloc() local 350 bufs[j]->hash.usr = (i+j) << 1; in sanity_test_with_mbuf_alloc() 446 struct rte_mbuf *bufs[BURST]; in sanity_test_with_worker_shutdown() local 466 bufs[i]->hash.usr = 1; in sanity_test_with_worker_shutdown() 530 struct rte_mbuf *bufs[BURST]; in test_flush_with_worker_shutdown() local 546 bufs[i]->hash.usr = 0; in test_flush_with_worker_shutdown() 667 &bufs[i * burst + processed], in sanity_mark_test() [all …]
|
| H A D | test_distributor_perf.c | 150 struct rte_mbuf *bufs[BURST]; in perf_test() local 153 if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) { in perf_test() 159 bufs[i]->hash.usr = i; in perf_test() 163 rte_distributor_process(d, bufs, BURST); in perf_test() 176 rte_mempool_put_bulk(p, (void *)bufs, BURST); in perf_test() 194 struct rte_mbuf *bufs[RTE_MAX_LCORE]; in quit_workers() local 196 rte_mempool_get_bulk(p, (void *)bufs, num_workers); in quit_workers() 200 bufs[i]->hash.usr = i << 1; in quit_workers() 201 rte_distributor_process(d, &bufs[i], 1); in quit_workers() 204 rte_mempool_put_bulk(p, (void *)bufs, num_workers); in quit_workers()
|
| /dpdk/drivers/net/dpaa2/ |
| H A D | dpaa2_rxtx.c | 1288 mp = (*bufs)->pool; in dpaa2_dev_tx() 1311 bufs++; in dpaa2_dev_tx() 1332 bufs++; in dpaa2_dev_tx() 1385 bufs++; in dpaa2_dev_tx() 1558 (*bufs), in dpaa2_dev_tx_multi_txq_ordered() 1576 mp = (*bufs)->pool; in dpaa2_dev_tx_multi_txq_ordered() 1593 bufs++; in dpaa2_dev_tx_multi_txq_ordered() 1639 bufs++; in dpaa2_dev_tx_multi_txq_ordered() 1730 (*bufs), in dpaa2_dev_tx_ordered() 1757 bufs++; in dpaa2_dev_tx_ordered() [all …]
|
| H A D | dpaa2_ethdev.h | 238 uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); 240 uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs, 243 uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, 260 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); 261 uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, 265 struct rte_mbuf **bufs, uint16_t nb_pkts);
|
| /dpdk/drivers/mempool/dpaa2/ |
| H A D | dpaa2_hw_mempool.c | 195 uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; in rte_dpaa2_mbuf_release() local 221 bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i]) in rte_dpaa2_mbuf_release() 224 bufs[i] = (uint64_t)obj_table[i] + meta_data_size; in rte_dpaa2_mbuf_release() 245 bufs[i] = (uint64_t) in rte_dpaa2_mbuf_release() 327 size_t bufs[DPAA2_MBUF_MAX_ACQ_REL]; in rte_dpaa2_mbuf_alloc_bulk() local 357 ret = qbman_swp_acquire(swp, bpid, (void *)bufs, in rte_dpaa2_mbuf_alloc_bulk() 360 ret = qbman_swp_acquire(swp, bpid, (void *)bufs, in rte_dpaa2_mbuf_alloc_bulk() 376 for (i = 0; (i < ret) && bufs[i]; i++) { in rte_dpaa2_mbuf_alloc_bulk() 377 DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t); in rte_dpaa2_mbuf_alloc_bulk() 379 (bufs[i] - bp_info->meta_data_size); in rte_dpaa2_mbuf_alloc_bulk() [all …]
|
| /dpdk/lib/distributor/ |
| H A D | rte_distributor_single.c | 31 union rte_distributor_buffer_single *buf = &d->bufs[worker_id]; in EAL_REGISTER_TAILQ() 45 union rte_distributor_buffer_single *buf = &d->bufs[worker_id]; in rte_distributor_poll_pkt_single() 71 union rte_distributor_buffer_single *buf = &d->bufs[worker_id]; in rte_distributor_return_pkt_single() 175 __atomic_store_n(&(d->bufs[wkr].bufptr64), in process_returns() 180 __atomic_store_n(&(d->bufs[wkr].bufptr64), in process_returns() 220 int64_t data = __atomic_load_n(&(d->bufs[wkr].bufptr64), in rte_distributor_process_single() 267 __atomic_store_n(&(d->bufs[wkr].bufptr64), in rte_distributor_process_single() 273 __atomic_store_n(&(d->bufs[wkr].bufptr64), in rte_distributor_process_single() 297 (__atomic_load_n(&(d->bufs[wkr].bufptr64), in rte_distributor_process_single() 300 int64_t oldbuf = d->bufs[wkr].bufptr64 >> in rte_distributor_process_single() [all …]
|
| H A D | rte_distributor.c | 37 struct rte_distributor_buffer *buf = &(d->bufs[worker_id]); in EAL_REGISTER_TAILQ() 88 struct rte_distributor_buffer *buf = &d->bufs[worker_id]; in rte_distributor_poll_pkt() 289 struct rte_distributor_buffer *buf = &(d->bufs[wkr]); in handle_worker_shutdown() 342 struct rte_distributor_buffer *buf = &(d->bufs[wkr]); in handle_returns() 398 struct rte_distributor_buffer *buf = &(d->bufs[wkr]); in release() 417 d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] | in release() 465 if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]), in rte_distributor_process() 467 d->bufs[wid].count = 0; in rte_distributor_process() 600 if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]), in rte_distributor_process() 602 d->bufs[wid].count = 0; in rte_distributor_process() [all …]
|
| /dpdk/drivers/raw/cnxk_gpio/ |
| H A D | rte_pmd_cnxk_gpio.h | 112 struct rte_rawdev_buf *bufs[1]; in __rte_pmd_gpio_enq_deq() local 119 bufs[0] = &buf; in __rte_pmd_gpio_enq_deq() 121 ret = rte_rawdev_enqueue_buffers(dev_id, bufs, RTE_DIM(bufs), q); in __rte_pmd_gpio_enq_deq() 124 if (ret != RTE_DIM(bufs)) in __rte_pmd_gpio_enq_deq() 130 ret = rte_rawdev_dequeue_buffers(dev_id, bufs, RTE_DIM(bufs), q); in __rte_pmd_gpio_enq_deq() 133 if (ret != RTE_DIM(bufs)) in __rte_pmd_gpio_enq_deq()
|
| /dpdk/drivers/net/null/ |
| H A D | rte_eth_null.c | 89 if ((q == NULL) || (bufs == NULL)) in eth_null_rx() 97 bufs[i]->data_len = (uint16_t)packet_size; in eth_null_rx() 98 bufs[i]->pkt_len = packet_size; in eth_null_rx() 99 bufs[i]->port = h->internals->port_id; in eth_null_rx() 114 if ((q == NULL) || (bufs == NULL)) in eth_null_copy_rx() 125 bufs[i]->pkt_len = packet_size; in eth_null_copy_rx() 126 bufs[i]->port = h->internals->port_id; in eth_null_copy_rx() 147 if ((q == NULL) || (bufs == NULL)) in eth_null_tx() 151 rte_pktmbuf_free(bufs[i]); in eth_null_tx() 165 if ((q == NULL) || (bufs == NULL)) in eth_null_copy_tx() [all …]
|
| /dpdk/examples/distributor/ |
| H A D | main.c | 226 struct rte_mbuf *bufs[BURST_SIZE*2]; in lcore_rx() local 266 bufs, BURST_SIZE*2); in lcore_rx() 277 (void *)bufs, nb_ret, NULL); in lcore_rx() 288 (void *)bufs, nb_ret, NULL); in lcore_rx() 297 rte_pktmbuf_free(bufs[sent++]); in lcore_rx() 350 struct rte_mbuf *bufs[BURST_SIZE * 4]; in lcore_distributor() local 356 (void *)bufs, BURST_SIZE*1, NULL); in lcore_distributor() 365 bufs, BURST_SIZE*2); in lcore_distributor() 372 (void *)bufs, nb_ret, NULL); in lcore_distributor() 380 rte_pktmbuf_free(bufs[sent++]); in lcore_distributor() [all …]
|
| /dpdk/app/test-eventdev/ |
| H A D | test_perf_common.h | 104 void *bufs[16] __rte_cache_aligned;\ 105 int const sz = RTE_DIM(bufs);\ 113 void *bufs[], int const buf_sz, uint8_t count) in perf_process_last_stage() argument 115 bufs[count++] = ev->event_ptr; in perf_process_last_stage() 126 rte_mempool_put_bulk(pool, bufs, buf_sz); in perf_process_last_stage() 134 void *bufs[], int const buf_sz, uint8_t count) in perf_process_last_stage_latency() argument 139 bufs[count++] = ev->event_ptr; in perf_process_last_stage_latency() 151 rte_mempool_put_bulk(pool, bufs, buf_sz); in perf_process_last_stage_latency()
|
| /dpdk/drivers/net/dpaa/ |
| H A D | dpaa_rxtx.h | 272 uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs); 274 uint16_t dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, 276 uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs); 279 struct rte_mbuf **bufs __rte_unused, 284 struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs); 286 void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs); 289 struct qm_dqrr_entry **dqrr, void **bufs, int num_bufs);
|
| H A D | dpaa_rxtx.c | 503 void **bufs, int num_bufs) in dpaa_rx_cb_no_prefetch() argument 542 mbuf = bufs[i]; in dpaa_rx_cb_no_prefetch() 559 void **bufs, int num_bufs) in dpaa_rx_cb() argument 582 mbuf = bufs[i]; in dpaa_rx_cb() 614 struct rte_mbuf **bufs, in dpaa_eth_queue_portal_rx() argument 636 void **bufs) in dpaa_rx_cb_parallel() argument 653 *bufs = mbuf; in dpaa_rx_cb_parallel() 663 void **bufs) in dpaa_rx_cb_atomic() argument 687 *bufs = mbuf; in dpaa_rx_cb_atomic() 737 struct rte_mbuf **bufs, in dpaa_eth_queue_rx() argument [all …]
|
| /dpdk/drivers/mempool/dpaa/ |
| H A D | dpaa_mempool.c | 46 struct bm_buffer bufs[8]; in dpaa_mbuf_create_pool() local 78 ret = bman_acquire(bp, bufs, 8, 0); in dpaa_mbuf_create_pool() 80 ret = bman_acquire(bp, bufs, 1, 0); in dpaa_mbuf_create_pool() 211 struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL]; in dpaa_mbuf_alloc_bulk() local 242 ret = bman_acquire(bp_info->bp, bufs, in dpaa_mbuf_alloc_bulk() 245 ret = bman_acquire(bp_info->bp, bufs, count - n, 0); in dpaa_mbuf_alloc_bulk() 260 for (i = 0; (i < ret) && bufs[i].addr; i++) { in dpaa_mbuf_alloc_bulk() 265 bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr); in dpaa_mbuf_alloc_bulk()
|
| /dpdk/drivers/bus/dpaa/base/qbman/ |
| H A D | bman.c | 250 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, in bman_release() argument 277 r->bufs[0].opaque = in bman_release() 279 (bufs[0].opaque & BMAN_BUF_MASK)); in bman_release() 282 r->bufs[i].opaque = in bman_release() 283 cpu_to_be64(bufs[i].opaque & BMAN_BUF_MASK); in bman_release() 292 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, in bman_acquire() argument 314 if (bufs) { in bman_acquire() 316 bufs[i].opaque = in bman_acquire() 317 be64_to_cpu(mcr->acquire.bufs[i].opaque); in bman_acquire()
|
| /dpdk/drivers/raw/cnxk_bphy/ |
| H A D | rte_pmd_bphy.h | 160 struct rte_rawdev_buf *bufs[1]; in __rte_pmd_bphy_enq_deq() local 167 bufs[0] = &buf; in __rte_pmd_bphy_enq_deq() 169 ret = rte_rawdev_enqueue_buffers(dev_id, bufs, RTE_DIM(bufs), q); in __rte_pmd_bphy_enq_deq() 172 if (ret != RTE_DIM(bufs)) in __rte_pmd_bphy_enq_deq() 178 ret = rte_rawdev_dequeue_buffers(dev_id, bufs, RTE_DIM(bufs), q); in __rte_pmd_bphy_enq_deq() 181 if (ret != RTE_DIM(bufs)) in __rte_pmd_bphy_enq_deq()
|
| /dpdk/app/test-bbdev/ |
| H A D | test_bbdev_perf.c | 2795 bufs->hard_outputs, bufs->soft_outputs, in throughput_intr_lcore_ldpc_dec() 2796 bufs->harq_inputs, bufs->harq_outputs, ref_op); in throughput_intr_lcore_ldpc_dec() 2889 bufs->hard_outputs, bufs->soft_outputs, in throughput_intr_lcore_dec() 3063 bufs->inputs, bufs->hard_outputs, in throughput_intr_lcore_ldpc_enc() 3146 bufs->hard_outputs, bufs->soft_outputs, ref_op); in throughput_pmd_lcore_dec() 3257 bufs->hard_outputs, bufs->soft_outputs, in bler_pmd_lcore_ldpc_dec() 3258 bufs->harq_inputs, bufs->harq_outputs, ref_op); in bler_pmd_lcore_ldpc_dec() 3386 bufs->hard_outputs, bufs->soft_outputs, in throughput_pmd_lcore_ldpc_dec() 3387 bufs->harq_inputs, bufs->harq_outputs, ref_op); in throughput_pmd_lcore_ldpc_dec() 3995 bufs->inputs, in latency_test_dec() [all …]
|
| /dpdk/drivers/net/bonding/ |
| H A D | rte_eth_bond_pmd.c | 82 bufs + num_rx_total, nb_pkts); in bond_ethdev_rx_burst() 106 bd_rx_q->queue_id, bufs, nb_pkts); in bond_ethdev_rx_burst_active_backup() 342 bufs[j])) || in rx_burst_8023ad() 353 internals, slaves[idx], bufs[j]); in rx_burst_8023ad() 355 rte_pktmbuf_free(bufs[j]); in rx_burst_8023ad() 360 memmove(&bufs[j], &bufs[j + 1], sizeof(bufs[0]) * in rx_burst_8023ad() 639 bufs, nb_pkts); in bond_ethdev_tx_burst_active_backup() 943 ether_hdr = rte_pktmbuf_mtod(bufs[j], in bond_ethdev_tx_burst_tlb() 1028 bufs[i]; in bond_ethdev_tx_burst_alb() 1113 bufs[nb_pkts - 1 - num_not_send - j] = in bond_ethdev_tx_burst_alb() [all …]
|
| /dpdk/drivers/net/enic/base/ |
| H A D | vnic_wq.c | 35 wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs", in vnic_wq_alloc_bufs() 40 if (wq->bufs == NULL) in vnic_wq_alloc_bufs() 53 rte_free(wq->bufs); in vnic_wq_free() 154 buf = &wq->bufs[to_clean]; in vnic_wq_clean() 161 buf = &wq->bufs[to_clean]; in vnic_wq_clean()
|
| /dpdk/drivers/net/ark/ |
| H A D | ark_ethdev_tx.c | 26 struct rte_mbuf **bufs; member 90 queue->bufs[tx_idx] = mbuf; in eth_ark_tx_desc_fill() 277 queue->bufs = in eth_ark_tx_queue_setup() 283 if (queue->meta_q == 0 || queue->bufs == 0) { in eth_ark_tx_queue_setup() 287 rte_free(queue->bufs); in eth_ark_tx_queue_setup() 299 rte_free(queue->bufs); in eth_ark_tx_queue_setup() 369 rte_free(queue->bufs); in eth_ark_tx_queue_release() 424 mbuf = queue->bufs[queue->free_index & in free_completed_tx()
|
| /dpdk/examples/l2fwd-cat/ |
| H A D | l2fwd-cat.c | 124 struct rte_mbuf *bufs[BURST_SIZE]; in lcore_main() local 126 bufs, BURST_SIZE); in lcore_main() 133 bufs, nb_rx); in lcore_main() 139 rte_pktmbuf_free(bufs[buf]); in lcore_main()
|
| /dpdk/drivers/net/af_xdp/ |
| H A D | rte_eth_af_xdp.c | 211 rte_pktmbuf_free(bufs[i]); in reserve_fill_queue_zc() 233 struct rte_mbuf **bufs __rte_unused, in reserve_fill_queue_cp() 332 bufs[i] = (struct rte_mbuf *) in af_xdp_rx_zc() 339 rte_pktmbuf_pkt_len(bufs[i]) = len; in af_xdp_rx_zc() 340 rte_pktmbuf_data_len(bufs[i]) = len; in af_xdp_rx_zc() 403 bufs[i] = mbufs[i]; in af_xdp_rx_cp() 420 return af_xdp_rx_zc(queue, bufs, nb_pkts); in af_xdp_rx() 432 return af_xdp_rx(queue, bufs, nb_pkts); in eth_af_xdp_rx() 442 ret = af_xdp_rx(queue, &bufs[nb_rx], n); in eth_af_xdp_rx() 518 mbuf = bufs[i]; in af_xdp_tx_zc() [all …]
|
| /dpdk/drivers/net/pcap/ |
| H A D | pcap_ethdev.c | 259 bufs[i]->port = pcap_q->port_id; in eth_pcap_rx_infinite() 330 bufs[num_rx] = mbuf; in eth_pcap_rx() 397 mbuf = bufs[i]; in eth_pcap_tx_dumper() 446 tx_bytes += bufs[i]->pkt_len; in eth_tx_drop() 447 rte_pktmbuf_free(bufs[i]); in eth_tx_drop() 480 mbuf = bufs[i]; in eth_pcap_tx() 813 struct rte_mbuf *bufs; in infinite_rx_ring_free() local 816 rte_pktmbuf_free(bufs); in infinite_rx_ring_free() 888 struct rte_mbuf *bufs[1]; in eth_rx_queue_setup() local 912 if (bufs[0]->nb_segs != 1) { in eth_rx_queue_setup() [all …]
|
| /dpdk/examples/skeleton/ |
| H A D | basicfwd.c | 144 struct rte_mbuf *bufs[BURST_SIZE]; in lcore_main() local 146 bufs, BURST_SIZE); in lcore_main() 153 bufs, nb_rx); in lcore_main() 159 rte_pktmbuf_free(bufs[buf]); in lcore_main()
|