| /dpdk/drivers/net/enetc/ |
| H A D | enetc_rxtx.c | 35 tx_swbd_base = tx_ring->q_swbd; in enetc_clean_tx_ring() 36 bd_count = tx_ring->bd_count; in enetc_clean_tx_ring() 37 i = tx_ring->next_to_clean; in enetc_clean_tx_ring() 74 tx_ring->next_to_clean = i; in enetc_clean_tx_ring() 89 i = tx_ring->next_to_use; in enetc_xmit_pkts() 91 bds_to_use = enetc_bd_unused(tx_ring); in enetc_xmit_pkts() 98 txbd = ENETC_TXBD(*tx_ring, i); in enetc_xmit_pkts() 99 tx_swbd = &tx_ring->q_swbd[i]; in enetc_xmit_pkts() 118 enetc_clean_tx_ring(tx_ring); in enetc_xmit_pkts() 120 tx_ring->next_to_use = i; in enetc_xmit_pkts() [all …]
|
| H A D | enetc_ethdev.c | 270 int idx = tx_ring->index; in enetc_setup_txbdr() 298 struct enetc_bdr *tx_ring; in enetc_tx_queue_setup() local 308 if (tx_ring == NULL) { in enetc_tx_queue_setup() 318 tx_ring->index = queue_idx; in enetc_tx_queue_setup() 319 tx_ring->ndev = dev; in enetc_tx_queue_setup() 336 rte_free(tx_ring); in enetc_tx_queue_setup() 364 i = tx_ring->next_to_clean; in enetc_tx_queue_release() 377 enetc_free_bdr(tx_ring); in enetc_tx_queue_release() 378 rte_free(tx_ring); in enetc_tx_queue_release() 790 struct enetc_bdr *tx_ring; in enetc_tx_queue_start() local [all …]
|
| /dpdk/drivers/net/ena/ |
| H A D | ena_ethdev.c | 685 tx_ring->port_id, tx_ring->id, req_id); in validate_tx_req_id() 688 req_id, tx_ring->port_id, tx_ring->id); in validate_tx_req_id() 948 queues = adapter->tx_ring; in ena_queue_start_all() 1362 queues = adapter->tx_ring; in ena_queue_stop_all() 1871 struct ena_ring *tx_ring; in check_for_tx_completions() local 1885 tx_ring = &adapter->tx_ring[qid]; in check_for_tx_completions() 2975 tx_ring->id); in ena_xmit_mbuf() 2995 tx_ring->tx_stats.cnt++; in ena_xmit_mbuf() 2999 tx_ring->size_mask); in ena_xmit_mbuf() 3045 tx_ring->size_mask); in ena_tx_cleanup() [all …]
|
| /dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txq.c | 60 if (!txq || !txq->tx_ring) in bnxt_tx_queue_release_mbufs() 63 sw_ring = txq->tx_ring->tx_buf_ring; in bnxt_tx_queue_release_mbufs() 65 for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { in bnxt_tx_queue_release_mbufs() 96 if (txq->tx_ring) { in bnxt_tx_queue_release_op() 97 bnxt_free_ring(txq->tx_ring->tx_ring_struct); in bnxt_tx_queue_release_op() 98 rte_free(txq->tx_ring->tx_ring_struct); in bnxt_tx_queue_release_op() 99 rte_free(txq->tx_ring); in bnxt_tx_queue_release_op()
|
| H A D | bnxt_txr.h | 32 return ((txq->tx_ring->tx_raw_prod - txq->tx_ring->tx_raw_cons) & in bnxt_tx_bds_in_hw() 33 txq->tx_ring->tx_ring_struct->ring_mask); in bnxt_tx_bds_in_hw() 41 return ((txq->tx_ring->tx_ring_struct->ring_size - in bnxt_tx_avail()
|
| H A D | bnxt_txr.c | 33 bnxt_free_ring(txq->tx_ring->tx_ring_struct); in bnxt_free_tx_rings() 34 rte_free(txq->tx_ring->tx_ring_struct); in bnxt_free_tx_rings() 35 rte_free(txq->tx_ring); in bnxt_free_tx_rings() 51 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_init_one_tx_ring() 71 txq->tx_ring = txr; in bnxt_init_tx_ring_struct() 131 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_start_xmit() 361 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_fast() 393 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp() 524 bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_raw_prod); in bnxt_xmit_pkts()
|
| H A D | bnxt_ring.c | 112 struct bnxt_ring *tx_ring, *rx_ring; in bnxt_alloc_rings() local 223 tx_ring = tx_ring_info->tx_ring_struct; in bnxt_alloc_rings() 227 tx_ring->bd_dma = mz_phys_addr + tx_ring_start; in bnxt_alloc_rings() 228 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; in bnxt_alloc_rings() 229 tx_ring->mem_zone = (const void *)mz; in bnxt_alloc_rings() 231 if (!tx_ring->bd) in bnxt_alloc_rings() 233 if (tx_ring->vmem_size) { in bnxt_alloc_rings() 234 tx_ring->vmem = in bnxt_alloc_rings() 237 (struct rte_mbuf **)tx_ring->vmem; in bnxt_alloc_rings() 709 ring = txq->tx_ring->tx_ring_struct; in bnxt_init_all_rings() [all …]
|
| H A D | bnxt_rxtx_vec_common.h | 105 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_vec_fast() 136 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_vec()
|
| H A D | bnxt_txq.h | 29 struct bnxt_tx_ring_info *tx_ring; member
|
| /dpdk/app/pdump/ |
| H A D | main.c | 124 struct rte_ring *tx_ring; member 509 rte_ring_free(pt->tx_ring); in cleanup_rings() 673 pt->tx_ring = rte_ring_create(ring_name, pt->ring_size, in create_mp_ring_vdev() 675 if (pt->tx_ring == NULL) { in create_mp_ring_vdev() 782 pt->tx_ring = rte_ring_create(ring_name, pt->ring_size, in create_mp_ring_vdev() 784 if (pt->tx_ring == NULL) { in create_mp_ring_vdev() 840 pt->tx_ring, in enable_pdump() 848 pt->tx_ring, pt->mp, NULL); in enable_pdump() 867 pt->tx_ring, pt->mp, NULL); in enable_pdump() 871 pt->tx_ring, pt->mp, NULL); in enable_pdump() [all …]
|
| /dpdk/examples/qos_sched/ |
| H A D | main.c | 66 flow->tx_thread.tx_ring = flow->tx_ring; in app_main_loop() 75 flow->wt_thread.tx_ring = flow->tx_ring; in app_main_loop()
|
| H A D | main.h | 87 struct rte_ring *tx_ring; member 106 struct rte_ring *tx_ring; member
|
| H A D | app_thread.c | 164 retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs, in app_tx_thread() 216 while (rte_ring_sp_enqueue_bulk(conf->tx_ring, in app_worker_thread()
|
| /dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_rxtx_vec_common.h | 86 status = txq->tx_ring[txq->tx_next_dd].wb.status; in ixgbe_tx_free_bufs() 218 txq->tx_ring[i] = zeroed_desc; in _ixgbe_reset_tx_queue_vec() 222 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; in _ixgbe_reset_tx_queue_vec()
|
| H A D | ixgbe_rxtx_vec_neon.c | 593 txdp = &txq->tx_ring[tx_id]; in ixgbe_xmit_fixed_burst_vec() 613 txdp = &txq->tx_ring[tx_id]; in ixgbe_xmit_fixed_burst_vec() 623 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |= in ixgbe_xmit_fixed_burst_vec()
|
| /dpdk/drivers/net/sfc/ |
| H A D | sfc_repr_proxy_api.h | 36 uint16_t queue_id, struct rte_ring *tx_ring,
|
| /dpdk/drivers/net/e1000/ |
| H A D | em_rxtx.c | 151 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */ member 300 volatile struct e1000_data_desc *txr = txq->tx_ring; in em_xmit_cleanup() 392 txr = txq->tx_ring; in eth_em_xmit_pkts() 1147 txq->tx_ring[i] = txd_init; in em_reset_tx_queue() 1280 tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC; in eth_em_tx_queue_setup() 1311 txq->tx_ring = (struct e1000_data_desc *) tz->addr; in eth_em_tx_queue_setup() 1314 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); in eth_em_tx_queue_setup() 1558 status = &txq->tx_ring[desc].upper.fields.status; in eth_em_tx_descriptor_status() 1927 sizeof(*txq->tx_ring)); in eth_em_tx_init() 2030 tx_desc = &txq->tx_ring[txq->tx_tail]; in e1000_flush_tx_ring()
|
| /dpdk/drivers/net/bnx2x/ |
| H A D | bnx2x_rxtx.h | 50 union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */ member
|
| H A D | bnx2x_rxtx.c | 279 txq->tx_ring = (union eth_tx_bd_types *) tz->addr; in bnx2x_dev_tx_queue_setup() 280 memset(txq->tx_ring, 0, tsize); in bnx2x_dev_tx_queue_setup() 296 tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd; in bnx2x_dev_tx_queue_setup()
|
| /dpdk/drivers/net/hns3/ |
| H A D | hns3_rxtx.c | 1124 desc = txq->tx_ring; in hns3_init_txq() 1467 desc = txq->tx_ring; in hns3_alloc_txq_and_dma_zone() 3438 struct hns3_desc *tx_ring = txq->tx_ring; in hns3_parse_tunneling_params() local 3557 struct hns3_desc *tx_ring = txq->tx_ring; in hns3_txd_enable_checksum() local 3919 struct hns3_desc *tx_ring = txq->tx_ring; in hns3_parse_cksum() local 3993 desc = &txq->tx_ring[tx_next_clean]; in hns3_tx_free_buffer_simple() 4149 struct hns3_desc *tx_ring; in hns3_xmit_pkts() local 4167 tx_ring = txq->tx_ring; in hns3_xmit_pkts() 4217 desc = &tx_ring[tx_next_use]; in hns3_xmit_pkts() 4227 desc = &tx_ring[tx_next_use]; in hns3_xmit_pkts() [all …]
|
| H A D | hns3_rxtx_vec.h | 65 tx_desc = &txq->tx_ring[txq->next_to_clean]; in hns3_tx_free_buffers()
|
| H A D | hns3_rxtx_vec_neon.h | 51 tx_desc = &txq->tx_ring[next_to_use]; in hns3_xmit_fixed_burst_vec() 71 tx_desc = &txq->tx_ring[next_to_use]; in hns3_xmit_fixed_burst_vec()
|
| /dpdk/drivers/net/bonding/ |
| H A D | rte_eth_bond_8023ad.c | 630 int retval = rte_ring_enqueue(port->tx_ring, lacp_pkt); in tx_machine() 1086 RTE_ASSERT(port->tx_ring == NULL); in bond_mode_8023ad_activate_slave() 1127 port->tx_ring = rte_ring_create(mem_name, in bond_mode_8023ad_activate_slave() 1130 if (port->tx_ring == NULL) { in bond_mode_8023ad_activate_slave() 1166 while (rte_ring_dequeue(port->tx_ring, &pkt) == 0) in bond_mode_8023ad_deactivate_slave() 1366 if (rte_ring_enqueue(port->tx_ring, pkt) != 0) { in bond_mode_8023ad_handle_slow_pkt() 1670 return rte_ring_enqueue(port->tx_ring, lacp_pkt); in rte_eth_bond_8023ad_ext_slowtx()
|
| /dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx_vec_altivec.c | 572 txdp = &txq->tx_ring[tx_id]; in i40e_xmit_fixed_burst_vec() 592 txdp = &txq->tx_ring[tx_id]; in i40e_xmit_fixed_burst_vec() 602 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= in i40e_xmit_fixed_burst_vec()
|
| /dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.c | 937 ((volatile char *)txq->tx_ring)[i] = 0; in ice_reset_tx_queue() 1365 txq->tx_ring = tz->addr; in ice_tx_queue_setup() 2266 txq->tx_ring = (struct ice_tx_desc *)tz->addr; in ice_fdir_setup_tx_resources() 2603 volatile struct ice_tx_desc *txd = txq->tx_ring; in ice_xmit_cleanup() 2723 volatile struct ice_tx_desc *tx_ring; in ice_xmit_pkts() local 2745 tx_ring = txq->tx_ring; in ice_xmit_pkts() 2825 &tx_ring[tx_id]; in ice_xmit_pkts() 2865 txd = &tx_ring[tx_id]; in ice_xmit_pkts() 2893 txd = &tx_ring[tx_id]; in ice_xmit_pkts() 4312 (&txq->tx_ring[txq->tx_tail]); in ice_fdir_programming() [all …]
|