| /f-stack/dpdk/drivers/net/nfb/ |
| H A D | nfb_stats.c | 25 struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **) in nfb_eth_stats_get() local 39 stats->q_opackets[i] = tx_queue[i].tx_pkts; in nfb_eth_stats_get() 40 stats->q_obytes[i] = tx_queue[i].tx_bytes; in nfb_eth_stats_get() 42 tx_total += tx_queue[i].tx_pkts; in nfb_eth_stats_get() 43 tx_total_bytes += tx_queue[i].tx_bytes; in nfb_eth_stats_get() 44 tx_err_total += tx_queue[i].err_pkts; in nfb_eth_stats_get() 64 struct ndp_tx_queue *tx_queue = *((struct ndp_tx_queue **) in nfb_eth_stats_reset() local 73 tx_queue[i].tx_pkts = 0; in nfb_eth_stats_reset() 74 tx_queue[i].tx_bytes = 0; in nfb_eth_stats_reset() 75 tx_queue[i].err_pkts = 0; in nfb_eth_stats_reset()
|
| /f-stack/dpdk/drivers/crypto/qat/ |
| H A D | qat_sym_hw_dp.c | 50 cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz]; in qat_sym_dp_parse_data_vec() 238 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_aead() 290 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_aead_jobs() 338 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_cipher() 389 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_cipher_jobs() 458 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_auth() 510 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_auth_jobs() 640 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_single_chain() 696 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; in qat_sym_dp_enqueue_chain_jobs() 837 tx_queue->hw_queue_number, tx_queue->tail); in qat_sym_dp_kick_tail() [all …]
|
| /f-stack/dpdk/drivers/net/af_packet/ |
| H A D | rte_eth_af_packet.c | 80 struct pkt_tx_queue *tx_queue; member 295 internals->tx_queue[i].sockfd = -1; in eth_dev_stop() 372 internal->tx_queue[i].tx_pkts = 0; in eth_stats_reset() 373 internal->tx_queue[i].err_pkts = 0; in eth_stats_reset() 403 rte_free(internals->tx_queue); in eth_dev_close() 627 struct pkt_tx_queue *tx_queue; in rte_pmd_init_internals() local 805 tx_queue = &((*internals)->tx_queue[q]); in rte_pmd_init_internals() 814 if (tx_queue->rd == NULL) in rte_pmd_init_internals() 817 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize); in rte_pmd_init_internals() 820 tx_queue->sockfd = qsockfd; in rte_pmd_init_internals() [all …]
|
| /f-stack/dpdk/drivers/net/virtio/ |
| H A D | virtio_ethdev.h | 94 uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, 97 uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 99 uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, 102 uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts, 111 uint16_t virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
| /f-stack/dpdk/app/test-eventdev/ |
| H A D | test_pipeline_queue.c | 48 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_single_stage_fwd() local 58 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_queue_worker_single_stage_fwd() 105 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_single_stage_burst_fwd() local 118 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_queue_worker_single_stage_burst_fwd() 135 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_tx() local 147 if (ev.queue_id == tx_queue[ev.mbuf->port]) { in pipeline_queue_worker_multi_stage_tx() 167 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_fwd() local 180 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_queue_worker_multi_stage_fwd() 199 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_burst_tx() local 237 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_queue_worker_multi_stage_burst_fwd() local [all …]
|
| H A D | test_pipeline_atq.c | 42 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_single_stage_fwd() local 52 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_atq_worker_single_stage_fwd() 91 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_single_stage_burst_fwd() local 105 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_atq_worker_single_stage_burst_fwd() 149 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_multi_stage_fwd() local 162 ev.queue_id = tx_queue[ev.mbuf->port]; in pipeline_atq_worker_multi_stage_fwd() 215 const uint8_t *tx_queue = t->tx_evqueue_id; in pipeline_atq_worker_multi_stage_burst_fwd() local 232 ev[i].queue_id = tx_queue[ev[i].mbuf->port]; in pipeline_atq_worker_multi_stage_burst_fwd()
|
| /f-stack/dpdk/drivers/net/ionic/ |
| H A D | ionic_rxtx.h | 20 uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 22 uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 35 void ionic_dev_tx_queue_release(void *tx_queue);
|
| /f-stack/dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.h | 216 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 219 uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, 234 int ice_tx_descriptor_status(void *tx_queue, uint16_t offset); 248 uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 255 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, 262 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
|
| /f-stack/dpdk/drivers/net/octeontx2/ |
| H A D | otx2_tx.c | 24 nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, in nix_xmit_pkts() argument 27 struct otx2_eth_txq *txq = tx_queue; uint16_t i; in nix_xmit_pkts() 62 nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, in nix_xmit_pkts_mseg() argument 65 struct otx2_eth_txq *txq = tx_queue; uint64_t i; in nix_xmit_pkts_mseg() 105 nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, in nix_xmit_pkts_vector() argument 115 struct otx2_eth_txq *txq = tx_queue; in nix_xmit_pkts_vector() 953 RTE_SET_USED(tx_queue); in nix_xmit_pkts_vector() 964 otx2_nix_xmit_pkts_ ## name(void *tx_queue, \ 981 otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \ 990 return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \ [all …]
|
| /f-stack/dpdk/drivers/crypto/virtio/ |
| H A D | virtio_cryptodev.h | 58 uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue, 62 uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
|
| /f-stack/dpdk/drivers/net/pcap/ |
| H A D | rte_eth_pcap.c | 383 struct pcap_tx_queue *tx_queue = queue; in eth_tx_drop() local 393 tx_queue->tx_stat.pkts += nb_pkts; in eth_tx_drop() 394 tx_queue->tx_stat.bytes += tx_bytes; in eth_tx_drop() 409 struct pcap_tx_queue *tx_queue = queue; in eth_pcap_tx() local 417 pcap = pp->tx_pcap[tx_queue->queue_id]; in eth_pcap_tx() 447 tx_queue->tx_stat.pkts += num_tx; in eth_pcap_tx() 448 tx_queue->tx_stat.bytes += tx_bytes; in eth_pcap_tx() 449 tx_queue->tx_stat.err_pkts += i - num_tx; in eth_pcap_tx() 553 tx = &internals->tx_queue[0]; in eth_dev_start() 568 tx = &internals->tx_queue[i]; in eth_dev_start() [all …]
|
| /f-stack/dpdk/drivers/net/atlantic/ |
| H A D | atl_ethdev.h | 72 int atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 101 uint16_t atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 104 uint16_t atl_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
| /f-stack/dpdk/app/test-pmd/ |
| H A D | iofwd.c | 67 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_io_forward() 76 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_io_forward()
|
| H A D | macswap.c | 78 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); in pkt_burst_mac_swap() 86 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_mac_swap()
|
| H A D | noisy_vnf.c | 103 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in do_retry() 164 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_noisy_vnf() 188 fs->tx_queue, tmp_pkts, in pkt_burst_noisy_vnf() 213 sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, in pkt_burst_noisy_vnf()
|
| /f-stack/dpdk/drivers/crypto/bcmfs/hw/ |
| H A D | bcmfs5_rm.c | 538 struct bcmfs_queue *tx_queue = &qp->tx_q; in bcmfs5_start_qp() local 545 for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) { in bcmfs5_start_qp() 547 if (next_addr == tx_queue->queue_size) in bcmfs5_start_qp() 549 next_addr += (uint64_t)tx_queue->base_phys_addr; in bcmfs5_start_qp() 554 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d); in bcmfs5_start_qp() 603 bd_low = lower_32_bits(tx_queue->base_phys_addr); in bcmfs5_start_qp() 604 bd_high = upper_32_bits(tx_queue->base_phys_addr); in bcmfs5_start_qp() 610 tx_queue->tx_write_ptr = 0; in bcmfs5_start_qp()
|
| H A D | bcmfs4_rm.c | 606 struct bcmfs_queue *tx_queue = &qp->tx_q; in bcmfs4_start_qp() local 613 for (off = 0; off < tx_queue->queue_size; off += FS_RING_DESC_SIZE) { in bcmfs4_start_qp() 615 if (next_addr == tx_queue->queue_size) in bcmfs4_start_qp() 617 next_addr += (uint64_t)tx_queue->base_phys_addr; in bcmfs4_start_qp() 623 rm_write_desc((uint8_t *)tx_queue->base_addr + off, d); in bcmfs4_start_qp() 672 val = BD_START_ADDR_VALUE(tx_queue->base_phys_addr); in bcmfs4_start_qp() 676 tx_queue->tx_write_ptr = FS_MMIO_READ32((uint8_t *)qp->ioreg + in bcmfs4_start_qp() 678 tx_queue->tx_write_ptr *= FS_RING_DESC_SIZE; in bcmfs4_start_qp()
|
| /f-stack/dpdk/drivers/net/axgbe/ |
| H A D | axgbe_rxtx.h | 165 uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 167 uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 189 int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
| H A D | axgbe_rxtx_vec_sse.c | 59 axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, in axgbe_xmit_pkts_vec() argument 68 txq = (struct axgbe_tx_queue *)tx_queue; in axgbe_xmit_pkts_vec()
|
| /f-stack/dpdk/drivers/net/iavf/ |
| H A D | iavf_rxtx.h | 425 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 427 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 437 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset); 449 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 462 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 464 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, 481 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
|
| /f-stack/dpdk/drivers/net/octeontx/ |
| H A D | octeontx_rxtx.c | 46 octeontx_xmit_pkts_ ##name(void *tx_queue, \ 51 return __octeontx_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, \
|
| /f-stack/dpdk/drivers/net/tap/ |
| H A D | rte_eth_tap.h | 58 struct tx_queue { struct 91 struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
|
| /f-stack/dpdk/app/test/ |
| H A D | virtual_pmd.c | 26 struct rte_ring *tx_queue; member 61 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT) in virtual_ethdev_stop() 210 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS) in virtual_ethdev_stats_reset() 376 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, in virtual_ethdev_tx_burst_success() 505 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, in virtual_ethdev_get_mbufs_from_tx_queue() 550 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, in virtual_ethdev_create() 552 if (dev_private->tx_queue == NULL) in virtual_ethdev_create()
|
| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txr.h | 50 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 53 uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
| /f-stack/dpdk/drivers/net/hns3/ |
| H A D | hns3_rxtx_vec.c | 29 hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) in hns3_xmit_pkts_vec() argument 31 struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue; in hns3_xmit_pkts_vec() 38 ret = hns3_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], in hns3_xmit_pkts_vec()
|