| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txr.c | 65 if (txr == NULL) in bnxt_init_tx_ring_struct() 67 txq->tx_ring = txr; in bnxt_init_tx_ring_struct() 171 tx_buf = &txr->tx_buf_ring[txr->tx_prod]; in bnxt_start_xmit() 175 txbd = &txr->tx_desc_ring[txr->tx_prod]; in bnxt_start_xmit() 213 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); in bnxt_start_xmit() 216 &txr->tx_desc_ring[txr->tx_prod]; in bnxt_start_xmit() 321 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); in bnxt_start_xmit() 322 tx_buf = &txr->tx_buf_ring[txr->tx_prod]; in bnxt_start_xmit() 325 txbd = &txr->tx_desc_ring[txr->tx_prod]; in bnxt_start_xmit() 335 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); in bnxt_start_xmit() [all …]
|
| H A D | bnxt_rxtx_vec_common.h | 105 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_vec_fast() local 106 uint32_t ring_mask = txr->tx_ring_struct->ring_mask; in bnxt_tx_cmp_vec_fast() 108 uint16_t cons = txr->tx_cons; in bnxt_tx_cmp_vec_fast() 114 tx_buf = &txr->tx_buf_ring[cons]; in bnxt_tx_cmp_vec_fast() 122 txr->tx_cons = cons; in bnxt_tx_cmp_vec_fast() 128 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_vec() local 130 uint16_t cons = txr->tx_cons; in bnxt_tx_cmp_vec() 132 uint32_t ring_mask = txr->tx_ring_struct->ring_mask; in bnxt_tx_cmp_vec() 138 tx_buf = &txr->tx_buf_ring[cons]; in bnxt_tx_cmp_vec() 154 txr->tx_cons = cons; in bnxt_tx_cmp_vec()
|
| H A D | bnxt_rxtx_vec_sse.c | 353 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_xmit_fixed_burst_vec() local 354 uint16_t tx_prod = txr->tx_prod; in bnxt_xmit_fixed_burst_vec() 359 txbd = &txr->tx_desc_ring[tx_prod]; in bnxt_xmit_fixed_burst_vec() 360 tx_buf = &txr->tx_buf_ring[tx_prod]; in bnxt_xmit_fixed_burst_vec() 398 tx_prod = RING_ADV(txr->tx_ring_struct, tx_prod, nb_pkts); in bnxt_xmit_fixed_burst_vec() 399 bnxt_db_write(&txr->tx_db, tx_prod); in bnxt_xmit_fixed_burst_vec() 401 txr->tx_prod = tx_prod; in bnxt_xmit_fixed_burst_vec() 412 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_xmit_pkts_vec() local 413 uint16_t ring_size = txr->tx_ring_struct->ring_size; in bnxt_xmit_pkts_vec() 439 ring_size - (txr->tx_prod & (ring_size - 1))); in bnxt_xmit_pkts_vec()
|
| H A D | bnxt_rxtx_vec_neon.c | 355 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_xmit_fixed_burst_vec() local 356 uint16_t prod = txr->tx_prod; in bnxt_xmit_fixed_burst_vec() 373 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_fixed_burst_vec() 377 txbd = &txr->tx_desc_ring[prod]; in bnxt_xmit_fixed_burst_vec() 382 prod = RING_NEXT(txr->tx_ring_struct, prod); in bnxt_xmit_fixed_burst_vec() 393 bnxt_db_write(&txr->tx_db, prod); in bnxt_xmit_fixed_burst_vec() 395 txr->tx_prod = prod; in bnxt_xmit_fixed_burst_vec()
|
| H A D | bnxt_ring.c | 726 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_alloc_hwrm_rings() local 727 struct bnxt_ring *ring = txr->tx_ring_struct; in bnxt_alloc_hwrm_rings() 748 bnxt_set_db(bp, &txr->tx_db, ring_type, i, ring->fw_ring_id); in bnxt_alloc_hwrm_rings()
|
| H A D | bnxt_hwrm.c | 2589 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_free_all_hwrm_rings() local 2590 struct bnxt_ring *ring = txr->tx_ring_struct; in bnxt_free_all_hwrm_rings() 2597 memset(txr->tx_desc_ring, 0, in bnxt_free_all_hwrm_rings() 2598 txr->tx_ring_struct->ring_size * in bnxt_free_all_hwrm_rings() 2599 sizeof(*txr->tx_desc_ring)); in bnxt_free_all_hwrm_rings() 2600 memset(txr->tx_buf_ring, 0, in bnxt_free_all_hwrm_rings() 2601 txr->tx_ring_struct->ring_size * in bnxt_free_all_hwrm_rings() 2602 sizeof(*txr->tx_buf_ring)); in bnxt_free_all_hwrm_rings() 2603 txr->tx_prod = 0; in bnxt_free_all_hwrm_rings() 2604 txr->tx_cons = 0; in bnxt_free_all_hwrm_rings()
|
| H A D | bnxt_ethdev.c | 2968 struct bnxt_tx_ring_info *txr; in bnxt_tx_descriptor_status_op() local 2983 txr = txq->tx_ring; in bnxt_tx_descriptor_status_op() 2999 tx_buf = &txr->tx_buf_ring[cons]; in bnxt_tx_descriptor_status_op()
|
| /f-stack/dpdk/drivers/net/enetc/ |
| H A D | enetc_ethdev.c | 220 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc) in enetc_alloc_txbdr() argument 225 txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); in enetc_alloc_txbdr() 226 if (txr->q_swbd == NULL) in enetc_alloc_txbdr() 230 txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN); in enetc_alloc_txbdr() 231 if (txr->bd_base == NULL) { in enetc_alloc_txbdr() 232 rte_free(txr->q_swbd); in enetc_alloc_txbdr() 233 txr->q_swbd = NULL; in enetc_alloc_txbdr() 237 txr->bd_count = nb_desc; in enetc_alloc_txbdr() 238 txr->next_to_clean = 0; in enetc_alloc_txbdr() 239 txr->next_to_use = 0; in enetc_alloc_txbdr()
|
| /f-stack/dpdk/drivers/net/virtio/ |
| H A D | virtqueue.h | 692 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; in virtqueue_enqueue_xmit_packed() local 735 RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr); in virtqueue_enqueue_xmit_packed() 741 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; in virtqueue_enqueue_xmit_packed() 744 start_dp = txr[idx].tx_packed_indir; in virtqueue_enqueue_xmit_packed() 751 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); in virtqueue_enqueue_xmit_packed() 753 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; in virtqueue_enqueue_xmit_packed()
|
| H A D | virtio_ethdev.c | 607 struct virtio_tx_region *txr; in virtio_init_queue() local 610 txr = hdr_mz->addr; in virtio_init_queue() 611 memset(txr, 0, vq_size * sizeof(*txr)); in virtio_init_queue() 615 struct vring_desc *start_dp = txr[i].tx_indir; in virtio_init_queue() 617 RTE_DIM(txr[i].tx_indir)); in virtio_init_queue() 619 + i * sizeof(*txr) in virtio_init_queue() 626 txr[i].tx_packed_indir; in virtio_init_queue() 628 RTE_DIM(txr[i].tx_packed_indir)); in virtio_init_queue() 630 + i * sizeof(*txr) in virtio_init_queue()
|
| H A D | virtio_rxtx.c | 531 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; in virtqueue_enqueue_xmit() local 569 RTE_PTR_DIFF(&txr[idx].tx_indir, txr); in virtqueue_enqueue_xmit() 572 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; in virtqueue_enqueue_xmit() 575 start_dp = txr[idx].tx_indir; in virtqueue_enqueue_xmit() 582 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); in virtqueue_enqueue_xmit() 585 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; in virtqueue_enqueue_xmit()
|
| /f-stack/dpdk/drivers/net/e1000/ |
| H A D | em_rxtx.c | 299 volatile struct e1000_data_desc *txr = txq->tx_ring; in em_xmit_cleanup() local 312 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD)) in em_xmit_cleanup() 342 txr[desc_to_clean_to].upper.fields.status = 0; in em_xmit_cleanup() 371 volatile struct e1000_data_desc *txr; in eth_em_xmit_pkts() local 391 txr = txq->tx_ring; in eth_em_xmit_pkts() 522 &txr[tx_id]; in eth_em_xmit_pkts() 550 txd = &txr[tx_id]; in eth_em_xmit_pkts()
|
| H A D | igb_rxtx.c | 384 volatile union e1000_adv_tx_desc *txr; in eth_igb_xmit_pkts() local 405 txr = txq->tx_ring; in eth_igb_xmit_pkts() 500 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) { in eth_igb_xmit_pkts() 544 &txr[tx_id]; in eth_igb_xmit_pkts() 570 txd = &txr[tx_id]; in eth_igb_xmit_pkts() 1293 volatile union e1000_adv_tx_desc *txr; in igb_tx_done_cleanup() local 1304 txr = txq->tx_ring; in igb_tx_done_cleanup() 1330 if (txr[tx_last].wb.status & in igb_tx_done_cleanup()
|
| /f-stack/dpdk/drivers/net/igc/ |
| H A D | igc_txrx.c | 1626 volatile union igc_adv_tx_desc * const txr = txq->tx_ring; in igc_xmit_pkts() local 1734 if (!(txr[tx_end].wb.status & IGC_TXD_STAT_DD)) { in igc_xmit_pkts() 1780 igc_adv_tx_context_desc *)&txr[tx_id]; in igc_xmit_pkts() 1812 txd = &txr[tx_id]; in igc_xmit_pkts() 2059 volatile union igc_adv_tx_desc *txr; in eth_igc_tx_done_cleanup() local 2071 txr = txq->tx_ring; in eth_igc_tx_done_cleanup() 2099 if (!(txr[tx_last].wb.status & in eth_igc_tx_done_cleanup()
|
| /f-stack/dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx.c | 1018 volatile struct i40e_tx_desc *txr; in i40e_xmit_pkts() local 1037 txr = txq->tx_ring; in i40e_xmit_pkts() 1123 &txr[tx_id]; in i40e_xmit_pkts() 1178 txd = &txr[tx_id]; in i40e_xmit_pkts() 1204 txd = &txr[tx_id]; in i40e_xmit_pkts() 1358 volatile struct i40e_tx_desc *txr = txq->tx_ring; in tx_xmit_pkts() local 1378 txr[txq->tx_next_rs].cmd_type_offset_bsz |= in tx_xmit_pkts() 1391 txr[txq->tx_next_rs].cmd_type_offset_bsz |= in tx_xmit_pkts()
|
| /f-stack/dpdk/drivers/net/iavf/ |
| H A D | iavf_rxtx.c | 2075 volatile struct iavf_tx_desc *txr; in iavf_xmit_pkts() local 2096 txr = txq->tx_ring; in iavf_xmit_pkts() 2174 &txr[tx_id]; in iavf_xmit_pkts() 2191 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id); in iavf_xmit_pkts() 2199 txd = &txr[tx_id]; in iavf_xmit_pkts()
|
| /f-stack/dpdk/drivers/net/txgbe/ |
| H A D | txgbe_rxtx.c | 587 volatile struct txgbe_tx_desc *txr = txq->tx_ring; in txgbe_xmit_cleanup() local 601 status = txr[desc_to_clean_to].dw3; in txgbe_xmit_cleanup() 635 txr[desc_to_clean_to].dw3 = 0; in txgbe_xmit_cleanup() 686 volatile struct txgbe_tx_desc *txr; in txgbe_xmit_pkts() local 709 txr = txq->tx_ring; in txgbe_xmit_pkts() 887 &txr[tx_id]; in txgbe_xmit_pkts() 920 txd = &txr[tx_id]; in txgbe_xmit_pkts()
|
| /f-stack/dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_rxtx.c | 570 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring; in ixgbe_xmit_cleanup() local 584 status = txr[desc_to_clean_to].wb.status; in ixgbe_xmit_cleanup() 615 txr[desc_to_clean_to].wb.status = 0; in ixgbe_xmit_cleanup() 632 volatile union ixgbe_adv_tx_desc *txr; in ixgbe_xmit_pkts() local 658 txr = txq->tx_ring; in ixgbe_xmit_pkts() 852 &txr[tx_id]; in ixgbe_xmit_pkts() 889 txd = &txr[tx_id]; in ixgbe_xmit_pkts()
|
| /f-stack/dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.c | 2934 volatile struct ice_tx_desc *txr = txq->tx_ring; in tx_xmit_pkts() local 2954 txr[txq->tx_next_rs].cmd_type_offset_bsz |= in tx_xmit_pkts() 2967 txr[txq->tx_next_rs].cmd_type_offset_bsz |= in tx_xmit_pkts()
|