| /f-stack/dpdk/drivers/net/enetc/ |
| H A D | enetc_rxtx.c | 35 tx_swbd_base = tx_ring->q_swbd; in enetc_clean_tx_ring() 36 bd_count = tx_ring->bd_count; in enetc_clean_tx_ring() 37 i = tx_ring->next_to_clean; in enetc_clean_tx_ring() 74 tx_ring->next_to_clean = i; in enetc_clean_tx_ring() 89 i = tx_ring->next_to_use; in enetc_xmit_pkts() 91 bds_to_use = enetc_bd_unused(tx_ring); in enetc_xmit_pkts() 98 txbd = ENETC_TXBD(*tx_ring, i); in enetc_xmit_pkts() 99 tx_swbd = &tx_ring->q_swbd[i]; in enetc_xmit_pkts() 118 enetc_clean_tx_ring(tx_ring); in enetc_xmit_pkts() 120 tx_ring->next_to_use = i; in enetc_xmit_pkts() [all …]
|
| H A D | enetc_ethdev.c | 256 int idx = tx_ring->index; in enetc_setup_txbdr() 284 struct enetc_bdr *tx_ring; in enetc_tx_queue_setup() local 294 if (tx_ring == NULL) { in enetc_tx_queue_setup() 304 tx_ring->index = queue_idx; in enetc_tx_queue_setup() 305 tx_ring->ndev = dev; in enetc_tx_queue_setup() 322 rte_free(tx_ring); in enetc_tx_queue_setup() 348 i = tx_ring->next_to_clean; in enetc_tx_queue_release() 361 enetc_free_bdr(tx_ring); in enetc_tx_queue_release() 362 rte_free(tx_ring); in enetc_tx_queue_release() 793 struct enetc_bdr *tx_ring; in enetc_tx_queue_start() local [all …]
|
| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txq.c | 30 if (!txq || !txq->tx_ring) in bnxt_tx_queue_release_mbufs() 33 sw_ring = txq->tx_ring->tx_buf_ring; in bnxt_tx_queue_release_mbufs() 35 for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { in bnxt_tx_queue_release_mbufs() 65 if (txq->tx_ring) { in bnxt_tx_queue_release_op() 66 bnxt_free_ring(txq->tx_ring->tx_ring_struct); in bnxt_tx_queue_release_op() 67 rte_free(txq->tx_ring->tx_ring_struct); in bnxt_tx_queue_release_op() 68 rte_free(txq->tx_ring); in bnxt_tx_queue_release_op()
|
| H A D | bnxt_txr.h | 34 return ((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) & in bnxt_tx_bds_in_hw() 35 txq->tx_ring->tx_ring_struct->ring_mask); in bnxt_tx_bds_in_hw() 43 return ((txq->tx_ring->tx_ring_struct->ring_size - in bnxt_tx_avail()
|
| H A D | bnxt_txr.c | 32 bnxt_free_ring(txq->tx_ring->tx_ring_struct); in bnxt_free_tx_rings() 33 rte_free(txq->tx_ring->tx_ring_struct); in bnxt_free_tx_rings() 34 rte_free(txq->tx_ring); in bnxt_free_tx_rings() 47 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_init_one_tx_ring() 67 txq->tx_ring = txr; in bnxt_init_tx_ring_struct() 112 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_start_xmit() 346 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_fast() 376 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp() 507 bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod); in bnxt_xmit_pkts()
|
| H A D | bnxt_ring.c | 107 struct bnxt_ring *tx_ring; in bnxt_alloc_rings() local 219 tx_ring = tx_ring_info->tx_ring_struct; in bnxt_alloc_rings() 223 tx_ring->bd_dma = mz_phys_addr + tx_ring_start; in bnxt_alloc_rings() 224 tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; in bnxt_alloc_rings() 225 tx_ring->mem_zone = (const void *)mz; in bnxt_alloc_rings() 227 if (!tx_ring->bd) in bnxt_alloc_rings() 229 if (tx_ring->vmem_size) { in bnxt_alloc_rings() 230 tx_ring->vmem = in bnxt_alloc_rings() 233 (struct bnxt_sw_tx_bd *)tx_ring->vmem; in bnxt_alloc_rings() 656 ring = txq->tx_ring->tx_ring_struct; in bnxt_init_all_rings() [all …]
|
| H A D | bnxt_rxtx_vec_common.h | 105 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_vec_fast() 128 struct bnxt_tx_ring_info *txr = txq->tx_ring; in bnxt_tx_cmp_vec()
|
| H A D | bnxt_txq.h | 29 struct bnxt_tx_ring_info *tx_ring; member
|
| /f-stack/dpdk/drivers/net/ena/ |
| H A D | ena_ethdev.c | 413 ++tx_ring->tx_stats.bad_req_id; in validate_tx_req_id() 815 queues = adapter->tx_ring; in ena_queue_start_all() 1210 queues = adapter->tx_ring; in ena_queue_stop_all() 2394 ++tx_ring->tx_stats.linearize; in ena_check_and_linearize_mbuf() 2516 tx_ring->id); in ena_xmit_mbuf() 2530 tx_ring->tx_stats.cnt++; in ena_xmit_mbuf() 2534 tx_ring->size_mask); in ena_xmit_mbuf() 2572 tx_ring->size_mask); in ena_tx_cleanup() 2604 tx_ring->size_mask)]); in eth_ena_xmit_pkts() 2617 ena_tx_cleanup(tx_ring); in eth_ena_xmit_pkts() [all …]
|
| /f-stack/dpdk/app/pdump/ |
| H A D | main.c | 124 struct rte_ring *tx_ring; member 511 if (pt->tx_ring) in cleanup_rings() 512 rte_ring_free(pt->tx_ring); in cleanup_rings() 678 pt->tx_ring = rte_ring_create(ring_name, pt->ring_size, in create_mp_ring_vdev() 680 if (pt->tx_ring == NULL) { in create_mp_ring_vdev() 789 if (pt->tx_ring == NULL) { in create_mp_ring_vdev() 845 pt->tx_ring, in enable_pdump() 853 pt->tx_ring, pt->mp, NULL); in enable_pdump() 872 pt->tx_ring, pt->mp, NULL); in enable_pdump() 876 pt->tx_ring, pt->mp, NULL); in enable_pdump() [all …]
|
| /f-stack/dpdk/examples/qos_sched/ |
| H A D | main.c | 66 flow->tx_thread.tx_ring = flow->tx_ring; in app_main_loop() 75 flow->wt_thread.tx_ring = flow->tx_ring; in app_main_loop()
|
| H A D | main.h | 87 struct rte_ring *tx_ring; member 106 struct rte_ring *tx_ring; member
|
| H A D | app_thread.c | 164 retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs, in app_tx_thread() 216 while (rte_ring_sp_enqueue_bulk(conf->tx_ring, in app_worker_thread()
|
| /f-stack/freebsd/contrib/octeon-sdk/ |
| H A D | cvmx-mgmt-port.c | 99 cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS]; member 183 …ng1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0) in cvmx_mgmt_port_initialize() 199 if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0) in cvmx_mgmt_port_initialize() 286 state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE; in cvmx_mgmt_port_initialize() 287 state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]); in cvmx_mgmt_port_initialize() 292 oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3; in cvmx_mgmt_port_initialize() 562 state->tx_ring[state->tx_write_index].s.len = packet_len; in cvmx_mgmt_port_send() 564 state->tx_ring[state->tx_write_index].s.tstamp = 0; in cvmx_mgmt_port_send() 617 state->tx_ring[state->tx_write_index].s.len = m->m_pkthdr.len; in cvmx_mgmt_port_sendm() 619 state->tx_ring[state->tx_write_index].s.tstamp = 0; in cvmx_mgmt_port_sendm()
|
| /f-stack/dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_rxtx_vec_common.h | 86 status = txq->tx_ring[txq->tx_next_dd].wb.status; in ixgbe_tx_free_bufs() 219 txq->tx_ring[i] = zeroed_desc; in _ixgbe_reset_tx_queue_vec() 223 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; in _ixgbe_reset_tx_queue_vec()
|
| H A D | ixgbe_rxtx_vec_neon.c | 505 txdp = &txq->tx_ring[tx_id]; in ixgbe_xmit_fixed_burst_vec() 525 txdp = &txq->tx_ring[tx_id]; in ixgbe_xmit_fixed_burst_vec() 535 txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |= in ixgbe_xmit_fixed_burst_vec()
|
| /f-stack/dpdk/drivers/net/bnx2x/ |
| H A D | bnx2x_rxtx.h | 50 union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */ member
|
| H A D | bnx2x_rxtx.c | 281 txq->tx_ring = (union eth_tx_bd_types *) tz->addr; in bnx2x_dev_tx_queue_setup() 282 memset(txq->tx_ring, 0, tsize); in bnx2x_dev_tx_queue_setup() 298 tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd; in bnx2x_dev_tx_queue_setup()
|
| /f-stack/dpdk/drivers/net/hns3/ |
| H A D | hns3_rxtx.c | 1004 desc = txq->tx_ring; in hns3_init_txq() 1353 desc = txq->tx_ring; in hns3_alloc_txq_and_dma_zone() 2665 desc = txq->tx_ring; in hns3_tx_free_useless_buffer() 2985 struct hns3_desc *tx_ring = txq->tx_ring; in hns3_parse_tunneling_params() local 3097 struct hns3_desc *tx_ring = txq->tx_ring; in hns3_txd_enable_checksum() local 3346 struct hns3_desc *tx_ring = txq->tx_ring; in hns3_parse_cksum() local 3420 desc = &txq->tx_ring[tx_next_clean]; in hns3_tx_free_buffer_simple() 3555 struct hns3_desc *tx_ring; in hns3_xmit_pkts() local 3573 tx_ring = txq->tx_ring; in hns3_xmit_pkts() 3620 desc = &tx_ring[tx_next_use]; in hns3_xmit_pkts() [all …]
|
| H A D | hns3_rxtx_vec.h | 56 tx_desc = &txq->tx_ring[txq->next_to_clean]; in hns3_tx_free_buffers()
|
| H A D | hns3_rxtx_vec_neon.h | 51 tx_desc = &txq->tx_ring[next_to_use]; in hns3_xmit_fixed_burst_vec() 68 tx_desc = &txq->tx_ring[next_to_use]; in hns3_xmit_fixed_burst_vec()
|
| /f-stack/dpdk/drivers/net/e1000/ |
| H A D | em_rxtx.c | 151 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */ member 299 volatile struct e1000_data_desc *txr = txq->tx_ring; in em_xmit_cleanup() 391 txr = txq->tx_ring; in eth_em_xmit_pkts() 1145 txq->tx_ring[i] = txd_init; in em_reset_tx_queue() 1278 tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC; in eth_em_tx_queue_setup() 1308 txq->tx_ring = (struct e1000_data_desc *) tz->addr; in eth_em_tx_queue_setup() 1311 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); in eth_em_tx_queue_setup() 1575 status = &txq->tx_ring[desc].upper.fields.status; in eth_em_tx_descriptor_status() 1946 sizeof(*txq->tx_ring)); in eth_em_tx_init() 2049 tx_desc = &txq->tx_ring[txq->tx_tail]; in e1000_flush_tx_ring()
|
| /f-stack/dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.c | 894 ((volatile char *)txq->tx_ring)[i] = 0; in ice_reset_tx_queue() 1315 txq->tx_ring = tz->addr; in ice_tx_queue_setup() 2101 txq->tx_ring = (struct ice_tx_desc *)tz->addr; in ice_fdir_setup_tx_resources() 2390 volatile struct ice_tx_desc *txd = txq->tx_ring; in ice_xmit_cleanup() 2509 volatile struct ice_tx_desc *tx_ring; in ice_xmit_pkts() local 2531 tx_ring = txq->tx_ring; in ice_xmit_pkts() 2611 &tx_ring[tx_id]; in ice_xmit_pkts() 2647 txd = &tx_ring[tx_id]; in ice_xmit_pkts() 2675 txd = &tx_ring[tx_id]; in ice_xmit_pkts() 3973 (&txq->tx_ring[txq->tx_tail]); in ice_fdir_programming() [all …]
|
| /f-stack/dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx_vec_altivec.c | 575 txdp = &txq->tx_ring[tx_id]; in i40e_xmit_fixed_burst_vec() 595 txdp = &txq->tx_ring[tx_id]; in i40e_xmit_fixed_burst_vec() 605 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= in i40e_xmit_fixed_burst_vec()
|
| H A D | i40e_rxtx_vec_neon.c | 554 txdp = &txq->tx_ring[tx_id]; in i40e_xmit_fixed_burst_vec() 574 txdp = &txq->tx_ring[tx_id]; in i40e_xmit_fixed_burst_vec() 584 txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= in i40e_xmit_fixed_burst_vec()
|