Home
last modified time | relevance | path

Searched refs:tx_queue (Results 1 – 25 of 139) sorted by relevance

123456

/dpdk/drivers/net/nfb/
H A Dnfb_stats.c25 struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **) in nfb_eth_stats_get() local
39 stats->q_opackets[i] = tx_queue[i].tx_pkts; in nfb_eth_stats_get()
40 stats->q_obytes[i] = tx_queue[i].tx_bytes; in nfb_eth_stats_get()
42 tx_total += tx_queue[i].tx_pkts; in nfb_eth_stats_get()
43 tx_total_bytes += tx_queue[i].tx_bytes; in nfb_eth_stats_get()
44 tx_err_total += tx_queue[i].err_pkts; in nfb_eth_stats_get()
64 struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **) in nfb_eth_stats_reset() local
73 tx_queue[i].tx_pkts = 0; in nfb_eth_stats_reset()
74 tx_queue[i].tx_bytes = 0; in nfb_eth_stats_reset()
75 tx_queue[i].err_pkts = 0; in nfb_eth_stats_reset()
/dpdk/drivers/crypto/qat/dev/
H A Dqat_sym_pmd_gen1.c476 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_cipher_gen1()
545 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_cipher_jobs_gen1()
584 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_auth_gen1()
653 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_auth_jobs_gen1()
692 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_chain_gen1()
769 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_chain_jobs_gen1()
810 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_aead_gen1()
881 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_aead_jobs_gen1()
1035 tx_queue->hw_bundle_number, in qat_sym_dp_enqueue_done_gen1()
1036 tx_queue->hw_queue_number, tx_queue->tail); in qat_sym_dp_enqueue_done_gen1()
[all …]
H A Dqat_crypto_pmd_gen3.c411 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_aead_gen3() local
420 (uint8_t *)tx_queue->base_addr + tail); in qat_sym_dp_enqueue_single_aead_gen3()
422 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_aead_gen3()
450 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_aead_jobs_gen3() local
471 (uint8_t *)tx_queue->base_addr + tail); in qat_sym_dp_enqueue_aead_jobs_gen3()
493 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_aead_jobs_gen3()
522 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_auth_gen3() local
530 (uint8_t *)tx_queue->base_addr + tail); in qat_sym_dp_enqueue_single_auth_gen3()
532 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_auth_gen3()
557 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_auth_jobs_gen3() local
[all …]
H A Dqat_crypto_pmd_gen4.c240 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_aead_gen4() local
249 (uint8_t *)tx_queue->base_addr + tail); in qat_sym_dp_enqueue_single_aead_gen4()
250 cookie = qp->op_cookies[tail >> tx_queue->trailz]; in qat_sym_dp_enqueue_single_aead_gen4()
251 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_aead_gen4()
253 rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); in qat_sym_dp_enqueue_single_aead_gen4()
279 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_aead_jobs_gen4() local
297 qp->op_cookies[tail >> tx_queue->trailz]; in qat_sym_dp_enqueue_aead_jobs_gen4()
300 (uint8_t *)tx_queue->base_addr + tail); in qat_sym_dp_enqueue_aead_jobs_gen4()
322 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_aead_jobs_gen4()
/dpdk/drivers/net/af_packet/
H A Drte_eth_af_packet.c81 struct pkt_tx_queue *tx_queue; member
342 internals->tx_queue[i].sockfd = -1; in eth_dev_stop()
425 internal->tx_queue[i].tx_pkts = 0; in eth_stats_reset()
426 internal->tx_queue[i].err_pkts = 0; in eth_stats_reset()
456 rte_free(internals->tx_queue); in eth_dev_close()
674 struct pkt_tx_queue *tx_queue; in rte_pmd_init_internals() local
852 tx_queue = &((*internals)->tx_queue[q]); in rte_pmd_init_internals()
861 if (tx_queue->rd == NULL) in rte_pmd_init_internals()
864 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize); in rte_pmd_init_internals()
867 tx_queue->sockfd = qsockfd; in rte_pmd_init_internals()
[all …]
/dpdk/app/test-eventdev/
H A Dtest_pipeline_queue.c53 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_single_stage_fwd() local
64 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_queue_worker_single_stage_fwd()
112 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_single_stage_burst_fwd() local
173 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_single_stage_fwd_vector() local
185 ev.queue_id = tx_queue[ev.vec->port]; in pipeline_queue_worker_single_stage_fwd_vector()
236 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_single_stage_burst_fwd_vector() local
269 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_tx() local
304 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_fwd() local
318 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_queue_worker_multi_stage_fwd()
338 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_burst_tx() local
[all …]
H A Dtest_pipeline_atq.c46 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_single_stage_fwd() local
57 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_atq_worker_single_stage_fwd()
98 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_single_stage_burst_fwd() local
151 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_single_stage_fwd_vector() local
164 ev.queue_id = tx_queue[ev.vec->port]; in pipeline_atq_worker_single_stage_fwd_vector()
207 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_single_stage_burst_fwd_vector() local
271 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_multi_stage_fwd() local
285 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_atq_worker_multi_stage_fwd()
340 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_multi_stage_burst_fwd() local
411 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_multi_stage_fwd_vector() local
[all …]
/dpdk/drivers/net/virtio/
H A Dvirtio_ethdev.h91 uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,
94 uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
96 uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
99 uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
108 uint16_t virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
/dpdk/drivers/net/ice/
H A Dice_rxtx.h226 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
229 uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
244 int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
258 uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
270 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
272 uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
285 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
287 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
/dpdk/drivers/net/bnxt/
H A Dbnxt_txr.h48 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
51 uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
55 uint16_t bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
/dpdk/drivers/net/pcap/
H A Dpcap_ethdev.c440 struct pcap_tx_queue *tx_queue = queue; in eth_tx_drop() local
450 tx_queue->tx_stat.pkts += nb_pkts; in eth_tx_drop()
451 tx_queue->tx_stat.bytes += tx_bytes; in eth_tx_drop()
466 struct pcap_tx_queue *tx_queue = queue; in eth_pcap_tx() local
474 pcap = pp->tx_pcap[tx_queue->queue_id]; in eth_pcap_tx()
504 tx_queue->tx_stat.pkts += num_tx; in eth_pcap_tx()
505 tx_queue->tx_stat.bytes += tx_bytes; in eth_pcap_tx()
506 tx_queue->tx_stat.err_pkts += i - num_tx; in eth_pcap_tx()
610 tx = &internals->tx_queue[0]; in eth_dev_start()
625 tx = &internals->tx_queue[i]; in eth_dev_start()
[all …]
/dpdk/drivers/net/octeontx_ep/
H A Dotx_ep_rxtx.h39 otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts);
41 otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts);
/dpdk/drivers/crypto/bcmfs/hw/
H A Dbcmfs5_rm.c538 struct bcmfs_queue *tx_queue = &qp->tx_q; in bcmfs5_start_qp() local
545 for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) { in bcmfs5_start_qp()
547 if (next_addr == tx_queue->queue_size) in bcmfs5_start_qp()
549 next_addr += (uint64_t)tx_queue->base_phys_addr; in bcmfs5_start_qp()
554 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d); in bcmfs5_start_qp()
603 bd_low = lower_32_bits(tx_queue->base_phys_addr); in bcmfs5_start_qp()
604 bd_high = upper_32_bits(tx_queue->base_phys_addr); in bcmfs5_start_qp()
610 tx_queue->tx_write_ptr = 0; in bcmfs5_start_qp()
H A Dbcmfs4_rm.c606 struct bcmfs_queue *tx_queue = &qp->tx_q; in bcmfs4_start_qp() local
613 for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) { in bcmfs4_start_qp()
615 if (next_addr == tx_queue->queue_size) in bcmfs4_start_qp()
617 next_addr += (uint64_t)tx_queue->base_phys_addr; in bcmfs4_start_qp()
623 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d); in bcmfs4_start_qp()
672 val = BD_START_ADDR_VALUE(tx_queue->base_phys_addr); in bcmfs4_start_qp()
676 tx_queue->tx_write_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg + in bcmfs4_start_qp()
678 tx_queue->tx_write_ptr *= FS_RING_DESC_SIZE; in bcmfs4_start_qp()
/dpdk/drivers/crypto/virtio/
H A Dvirtio_cryptodev.h58 uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
62 uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
/dpdk/drivers/net/atlantic/
H A Datl_ethdev.h72 int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
101 uint16_t atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
104 uint16_t atl_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/dpdk/drivers/net/iavf/
H A Diavf_rxtx.h619 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
621 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
631 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
643 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
656 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
658 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
688 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
690 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
/dpdk/app/test-pmd/
H A Dnoisy_vnf.c103 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in do_retry()
164 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_noisy_vnf()
188 fs->tx_queue, tmp_pkts, in pkt_burst_noisy_vnf()
213 sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_noisy_vnf()
H A Diofwd.c66 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_io_forward()
75 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_io_forward()
H A Dmacswap.c77 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); in pkt_burst_mac_swap()
85 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_mac_swap()
/dpdk/drivers/net/axgbe/
H A Daxgbe_rxtx.h168 uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
170 uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
192 int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
/dpdk/app/test/
H A Dvirtual_pmd.c26 struct rte_ring *tx_queue; member
61 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT) in virtual_ethdev_stop()
200 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS) in virtual_ethdev_stats_reset()
364 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, in virtual_ethdev_tx_burst_success()
497 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, in virtual_ethdev_get_mbufs_from_tx_queue()
542 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, in virtual_ethdev_create()
544 if (dev_private->tx_queue == NULL) in virtual_ethdev_create()
/dpdk/drivers/net/ionic/
H A Dionic_rxtx.h20 uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
22 uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/dpdk/drivers/net/octeontx/
H A Docteontx_rxtx.c46 octeontx_xmit_pkts_ ##name(void *tx_queue, \
51 return __octeontx_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, \
/dpdk/drivers/net/tap/
H A Drte_eth_tap.h58 struct tx_queue { struct
91 struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */

123456