Home
last modified time | relevance | path

Searched refs:tx_pkts (Results 1 – 25 of 127) sorted by relevance

123456

/f-stack/dpdk/drivers/net/virtio/
H A Dvirtio_rxtx_packed_avx.c127 dxp->cookie = tx_pkts[i]; in virtqueue_enqueue_batch_packed_vec()
137 tx_pkts[2]->data_len, in virtqueue_enqueue_batch_packed_vec()
139 tx_pkts[1]->data_len, in virtqueue_enqueue_batch_packed_vec()
141 tx_pkts[0]->data_len, in virtqueue_enqueue_batch_packed_vec()
146 tx_pkts[3]->data_off, in virtqueue_enqueue_batch_packed_vec()
148 tx_pkts[2]->data_off, in virtqueue_enqueue_batch_packed_vec()
150 tx_pkts[1]->data_off, in virtqueue_enqueue_batch_packed_vec()
188 tx_pkts[1]->pkt_len, tx_pkts[2]->pkt_len, in virtqueue_enqueue_batch_packed_vec()
189 tx_pkts[3]->pkt_len); in virtqueue_enqueue_batch_packed_vec()
277 &tx_pkts[nb_tx])) { in virtio_xmit_pkts_packed_vec()
[all …]
H A Dvirtio_ethdev.h94 uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,
97 uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
99 uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
102 uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
111 uint16_t virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
120 int virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
/f-stack/dpdk/drivers/net/axgbe/
H A Daxgbe_rxtx_vec_sse.c59 axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in axgbe_xmit_pkts_vec() argument
79 for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) { in axgbe_xmit_pkts_vec()
80 axgbe_vec_tx(&txq->desc[idx], *tx_pkts); in axgbe_xmit_pkts_vec()
81 txq->sw_ring[idx] = *tx_pkts; in axgbe_xmit_pkts_vec()
86 for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) { in axgbe_xmit_pkts_vec()
87 axgbe_vec_tx(&txq->desc[idx], *tx_pkts); in axgbe_xmit_pkts_vec()
88 txq->sw_ring[idx] = *tx_pkts; in axgbe_xmit_pkts_vec()
/f-stack/dpdk/drivers/net/octeontx2/
H A Dotx2_tx.c38 otx2_nix_xmit_prepare_tso(tx_pkts[i], flags); in nix_xmit_pkts()
48 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags); in nix_xmit_pkts()
51 tx_pkts[i]->ol_flags, 4, flags); in nix_xmit_pkts()
77 otx2_nix_xmit_prepare_tso(tx_pkts[i], flags); in nix_xmit_pkts_mseg()
90 tx_pkts[i]->ol_flags, segdw, in nix_xmit_pkts_mseg()
160 mbuf0 = (uint64_t *)tx_pkts[0]; in nix_xmit_pkts_vector()
161 mbuf1 = (uint64_t *)tx_pkts[1]; in nix_xmit_pkts_vector()
162 mbuf2 = (uint64_t *)tx_pkts[2]; in nix_xmit_pkts_vector()
163 mbuf3 = (uint64_t *)tx_pkts[3]; in nix_xmit_pkts_vector()
939 tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP; in nix_xmit_pkts_vector()
[all …]
/f-stack/dpdk/drivers/net/bnxt/
H A Dbnxt_rxtx_vec_sse.c350 bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **tx_pkts, in bnxt_xmit_fixed_burst_vec() argument
378 bnxt_xmit_one(tx_pkts[0], txbd++, tx_buf++); in bnxt_xmit_fixed_burst_vec()
379 bnxt_xmit_one(tx_pkts[1], txbd++, tx_buf++); in bnxt_xmit_fixed_burst_vec()
380 bnxt_xmit_one(tx_pkts[2], txbd++, tx_buf++); in bnxt_xmit_fixed_burst_vec()
381 bnxt_xmit_one(tx_pkts[3], txbd++, tx_buf++); in bnxt_xmit_fixed_burst_vec()
384 tx_pkts += RTE_BNXT_DESCS_PER_LOOP; in bnxt_xmit_fixed_burst_vec()
388 bnxt_xmit_one(tx_pkts[0], txbd++, tx_buf++); in bnxt_xmit_fixed_burst_vec()
390 tx_pkts++; in bnxt_xmit_fixed_burst_vec()
407 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in bnxt_xmit_pkts_vec() argument
440 ret = bnxt_xmit_fixed_burst_vec(txq, &tx_pkts[nb_sent], num); in bnxt_xmit_pkts_vec()
H A Dbnxt_txr.h50 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
53 uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/f-stack/dpdk/drivers/net/hns3/
H A Dhns3_rxtx_vec_neon.h29 struct rte_mbuf **__restrict tx_pkts, in hns3_xmit_fixed_burst_vec() argument
61 for (i = 0; i < n; i++, tx_pkts++, tx_desc++) { in hns3_xmit_fixed_burst_vec()
62 hns3_vec_tx(tx_desc, *tx_pkts); in hns3_xmit_fixed_burst_vec()
63 tx_entry[i].mbuf = *tx_pkts; in hns3_xmit_fixed_burst_vec()
72 for (i = 0; i < nb_commit; i++, tx_pkts++, tx_desc++) { in hns3_xmit_fixed_burst_vec()
73 hns3_vec_tx(tx_desc, *tx_pkts); in hns3_xmit_fixed_burst_vec()
74 tx_entry[i].mbuf = *tx_pkts; in hns3_xmit_fixed_burst_vec()
/f-stack/dpdk/drivers/net/nfb/
H A Dnfb_stats.c39 stats->q_opackets[i] = tx_queue[i].tx_pkts; in nfb_eth_stats_get()
42 tx_total += tx_queue[i].tx_pkts; in nfb_eth_stats_get()
73 tx_queue[i].tx_pkts = 0; in nfb_eth_stats_reset()
H A Dnfb_tx.h21 volatile uint64_t tx_pkts; /* packets transmitted */ member
188 ndp->tx_pkts += num_tx; in nfb_eth_ndp_tx()
/f-stack/dpdk/drivers/net/ark/
H A Dark_ethdev_tx.h14 struct rte_mbuf **tx_pkts,
17 struct rte_mbuf **tx_pkts,
/f-stack/dpdk/drivers/net/pfe/
H A Dpfe_ethdev.c194 if (tx_pkts[i]->nb_segs > 1) { in pfe_xmit_pkts()
199 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), in pfe_xmit_pkts()
200 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, in pfe_xmit_pkts()
201 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, in pfe_xmit_pkts()
202 tx_pkts[i]); in pfe_xmit_pkts()
204 mbuf = tx_pkts[i]->next; in pfe_xmit_pkts()
223 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, in pfe_xmit_pkts()
224 tx_pkts[i]->pkt_len, 0 /*ctrl*/, in pfe_xmit_pkts()
227 tx_pkts[i]); in pfe_xmit_pkts()
229 stats->obytes += tx_pkts[i]->pkt_len; in pfe_xmit_pkts()
[all …]
/f-stack/dpdk/drivers/crypto/virtio/
H A Dvirtio_cryptodev.h59 struct rte_crypto_op **tx_pkts,
63 struct rte_crypto_op **tx_pkts,
/f-stack/dpdk/drivers/net/sfc/
H A Dsfc_ef10_tx.c327 sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, in sfc_ef10_prepare_pkts() argument
334 struct rte_mbuf *m = tx_pkts[i]; in sfc_ef10_prepare_pkts()
621 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts]; in sfc_ef10_xmit_pkts()
722 return pktp - &tx_pkts[0]; in sfc_ef10_xmit_pkts()
765 struct rte_mbuf **tx_pkts, in sfc_ef10_simple_prepare_pkts() argument
771 struct rte_mbuf *m = tx_pkts[i]; in sfc_ef10_simple_prepare_pkts()
809 if (unlikely(m->pool != tx_pkts[0]->pool)) { in sfc_ef10_simple_prepare_pkts()
820 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, in sfc_ef10_simple_xmit_pkts() argument
845 pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)]; in sfc_ef10_simple_xmit_pkts()
846 for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) { in sfc_ef10_simple_xmit_pkts()
[all …]
/f-stack/dpdk/drivers/net/failsafe/
H A Dfailsafe_rxtx.c141 struct rte_mbuf **tx_pkts, in failsafe_tx_burst() argument
155 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); in failsafe_tx_burst()
162 struct rte_mbuf **tx_pkts, in failsafe_tx_burst_fast() argument
175 nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts); in failsafe_tx_burst_fast()
/f-stack/dpdk/drivers/net/enic/
H A Denic_rxtx.c386 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, in enic_prep_pkts() argument
396 m = tx_pkts[i]; in enic_prep_pkts()
434 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, in enic_xmit_pkts() argument
469 tx_pkt = *tx_pkts++; in enic_xmit_pkts()
639 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, in enic_simple_xmit_pkts() argument
664 memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n); in enic_simple_xmit_pkts()
669 enqueue_simple_pkts(tx_pkts, desc, n, enic); in enic_simple_xmit_pkts()
673 tx_pkts += n; in enic_simple_xmit_pkts()
674 memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem); in enic_simple_xmit_pkts()
676 enqueue_simple_pkts(tx_pkts, desc, rem, enic); in enic_simple_xmit_pkts()
/f-stack/dpdk/drivers/net/ionic/
H A Dionic_rxtx.h20 uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
22 uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/f-stack/dpdk/drivers/net/ice/
H A Dice_rxtx.h216 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
219 uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
248 uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
255 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
262 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
H A Dice_rxtx_vec_sse.c654 ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in ice_xmit_fixed_burst_vec() argument
684 ice_tx_backlog_entry(txep, tx_pkts, n); in ice_xmit_fixed_burst_vec()
686 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) in ice_xmit_fixed_burst_vec()
687 ice_vtx1(txdp, *tx_pkts, flags); in ice_xmit_fixed_burst_vec()
689 ice_vtx1(txdp, *tx_pkts++, rs); in ice_xmit_fixed_burst_vec()
701 ice_tx_backlog_entry(txep, tx_pkts, nb_commit); in ice_xmit_fixed_burst_vec()
703 ice_vtx(txdp, tx_pkts, nb_commit, flags); in ice_xmit_fixed_burst_vec()
722 ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in ice_xmit_pkts_vec() argument
732 ret = ice_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num); in ice_xmit_pkts_vec()
/f-stack/dpdk/drivers/net/qede/
H A Dqede_rxtx.h274 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
276 uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,
278 uint16_t qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts,
281 uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
/f-stack/dpdk/drivers/net/octeontx/
H A Docteontx_rxtx.c47 struct rte_mbuf **tx_pkts, uint16_t pkts) \
51 return __octeontx_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, \
/f-stack/dpdk/drivers/net/iavf/
H A Diavf_rxtx.h425 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
427 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
449 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
462 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
464 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
481 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
/f-stack/dpdk/drivers/net/i40e/
H A Di40e_rxtx_vec_altivec.c552 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in i40e_xmit_fixed_burst_vec() argument
582 tx_backlog_entry(txep, tx_pkts, n); in i40e_xmit_fixed_burst_vec()
584 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) in i40e_xmit_fixed_burst_vec()
585 vtx1(txdp, *tx_pkts, flags); in i40e_xmit_fixed_burst_vec()
587 vtx1(txdp, *tx_pkts++, rs); in i40e_xmit_fixed_burst_vec()
599 tx_backlog_entry(txep, tx_pkts, nb_commit); in i40e_xmit_fixed_burst_vec()
601 vtx(txdp, tx_pkts, nb_commit, flags); in i40e_xmit_fixed_burst_vec()
H A Di40e_rxtx_vec_neon.c533 struct rte_mbuf **__rte_restrict tx_pkts, uint16_t nb_pkts) in i40e_xmit_fixed_burst_vec() argument
561 tx_backlog_entry(txep, tx_pkts, n); in i40e_xmit_fixed_burst_vec()
563 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) in i40e_xmit_fixed_burst_vec()
564 vtx1(txdp, *tx_pkts, flags); in i40e_xmit_fixed_burst_vec()
566 vtx1(txdp, *tx_pkts++, rs); in i40e_xmit_fixed_burst_vec()
578 tx_backlog_entry(txep, tx_pkts, nb_commit); in i40e_xmit_fixed_burst_vec()
580 vtx(txdp, tx_pkts, nb_commit, flags); in i40e_xmit_fixed_burst_vec()
/f-stack/dpdk/drivers/event/sw/
H A Dsw_evdev_scheduler.c96 p->stats.tx_pkts++; in sw_schedule_atomic_to_cq()
97 qid->stats.tx_pkts++; in sw_schedule_atomic_to_cq()
162 qid->stats.tx_pkts++; in sw_schedule_parallel_to_cq()
177 p->stats.tx_pkts++; in sw_schedule_parallel_to_cq()
204 qid->stats.tx_pkts += ret; in sw_schedule_dir_to_cq()
205 port->stats.tx_pkts += ret; in sw_schedule_dir_to_cq()
556 sw->stats.tx_pkts += out_pkts_total; in sw_event_schedule()
/f-stack/dpdk/drivers/net/ixgbe/
H A Dixgbe_rxtx_vec_neon.c483 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in ixgbe_xmit_fixed_burst_vec() argument
512 tx_backlog_entry(txep, tx_pkts, n); in ixgbe_xmit_fixed_burst_vec()
514 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) in ixgbe_xmit_fixed_burst_vec()
515 vtx1(txdp, *tx_pkts, flags); in ixgbe_xmit_fixed_burst_vec()
517 vtx1(txdp, *tx_pkts++, rs); in ixgbe_xmit_fixed_burst_vec()
529 tx_backlog_entry(txep, tx_pkts, nb_commit); in ixgbe_xmit_fixed_burst_vec()
531 vtx(txdp, tx_pkts, nb_commit, flags); in ixgbe_xmit_fixed_burst_vec()

123456