| /dpdk/drivers/event/cnxk/ |
| H A D | cnxk_eventdev_adptr.c | 448 size += (eth_port_id + tx_queue_id); in cnxk_sso_tx_queue_data_sz() 460 size += (tx_queue_id + 1); in cnxk_sso_tx_queue_data_sz() 463 tx_queue_id + 1 : in cnxk_sso_tx_queue_data_sz() 468 size += tx_queue_id + 1; in cnxk_sso_tx_queue_data_sz() 514 tx_queue_id); in cnxk_sso_updt_tx_queue_data() 531 int32_t tx_queue_id) in cnxk_sso_tx_adapter_queue_add() argument 538 if (tx_queue_id < 0) { in cnxk_sso_tx_adapter_queue_add() 544 sq = &cnxk_eth_dev->sqs[tx_queue_id]; in cnxk_sso_tx_adapter_queue_add() 564 int32_t tx_queue_id) in cnxk_sso_tx_adapter_queue_del() argument 571 if (tx_queue_id < 0) { in cnxk_sso_tx_adapter_queue_del() [all …]
|
| H A D | cn10k_eventdev.c | 793 cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) in cn10k_sso_txq_fc_update() argument 800 if (tx_queue_id < 0) { in cn10k_sso_txq_fc_update() 806 sq = &cnxk_eth_dev->sqs[tx_queue_id]; in cn10k_sso_txq_fc_update() 807 txq = eth_dev->data->tx_queues[tx_queue_id]; in cn10k_sso_txq_fc_update() 824 int32_t tx_queue_id) in cn10k_sso_tx_adapter_queue_add() argument 832 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id); in cn10k_sso_tx_adapter_queue_add() 850 cn10k_sso_txq_fc_update(eth_dev, tx_queue_id); in cn10k_sso_tx_adapter_queue_add() 863 int32_t tx_queue_id) in cn10k_sso_tx_adapter_queue_del() argument 868 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id); in cn10k_sso_tx_adapter_queue_del()
|
| H A D | cn9k_eventdev.c | 1025 cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id) in cn9k_sso_txq_fc_update() argument 1032 if (tx_queue_id < 0) { in cn9k_sso_txq_fc_update() 1038 sq = &cnxk_eth_dev->sqs[tx_queue_id]; in cn9k_sso_txq_fc_update() 1039 txq = eth_dev->data->tx_queues[tx_queue_id]; in cn9k_sso_txq_fc_update() 1056 int32_t tx_queue_id) in cn9k_sso_tx_adapter_queue_add() argument 1064 rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id); in cn9k_sso_tx_adapter_queue_add() 1082 cn9k_sso_txq_fc_update(eth_dev, tx_queue_id); in cn9k_sso_tx_adapter_queue_add() 1095 int32_t tx_queue_id) in cn9k_sso_tx_adapter_queue_del() argument 1100 rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id); in cn9k_sso_tx_adapter_queue_del() 1103 cn9k_sso_txq_fc_update(eth_dev, tx_queue_id); in cn9k_sso_tx_adapter_queue_del()
|
| /dpdk/drivers/net/nfb/ |
| H A D | nfb_tx.c | 51 uint16_t tx_queue_id, in nfb_eth_tx_queue_setup() argument 66 "%" PRIu16 "!\n", tx_queue_id); in nfb_eth_tx_queue_setup() 71 tx_queue_id, in nfb_eth_tx_queue_setup() 75 dev->data->tx_queues[tx_queue_id] = txq; in nfb_eth_tx_queue_setup() 84 uint16_t tx_queue_id, in nfb_eth_tx_queue_init() argument 90 txq->queue = ndp_open_tx_queue(nfb, tx_queue_id); in nfb_eth_tx_queue_init() 95 txq->tx_queue_id = tx_queue_id; in nfb_eth_tx_queue_init()
|
| H A D | nfb_tx.h | 20 uint16_t tx_queue_id; /* index */ member 47 uint16_t tx_queue_id, 67 uint16_t tx_queue_id,
|
| /dpdk/drivers/net/ionic/ |
| H A D | ionic_rxtx.c | 143 txq = eth_dev->data->tx_queues[tx_queue_id]; in ionic_dev_tx_queue_stop() 145 eth_dev->data->tx_queue_state[tx_queue_id] = in ionic_dev_tx_queue_stop() 170 if (tx_queue_id >= lif->ntxqcqs) { in ionic_dev_tx_queue_setup() 173 tx_queue_id, lif->ntxqcqs); in ionic_dev_tx_queue_setup() 180 socket_id, tx_queue_id, nb_desc, offloads); in ionic_dev_tx_queue_setup() 189 eth_dev->data->tx_queues[tx_queue_id] = NULL; in ionic_dev_tx_queue_setup() 192 eth_dev->data->tx_queue_state[tx_queue_id] = in ionic_dev_tx_queue_setup() 213 eth_dev->data->tx_queues[tx_queue_id] = txq; in ionic_dev_tx_queue_setup() 230 tx_queue_id); in ionic_dev_tx_queue_start() 234 txq = eth_dev->data->tx_queues[tx_queue_id]; in ionic_dev_tx_queue_start() [all …]
|
| H A D | ionic_rxtx.h | 32 int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 36 int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); 37 int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
| /dpdk/drivers/net/atlantic/ |
| H A D | atl_ethdev.h | 58 void atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id); 65 int atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 89 int atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 90 int atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
| H A D | atl_rxtx.c | 249 if (dev->data->tx_queues[tx_queue_id] != NULL) { in atl_tx_queue_setup() 250 atl_tx_queue_release(dev, tx_queue_id); in atl_tx_queue_setup() 251 dev->data->tx_queues[tx_queue_id] = NULL; in atl_tx_queue_setup() 265 txq->queue_id = tx_queue_id; in atl_tx_queue_setup() 302 dev->data->tx_queues[tx_queue_id] = txq; in atl_tx_queue_setup() 538 if (tx_queue_id < dev->data->nb_tx_queues) { in atl_tx_queue_start() 539 hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id); in atl_tx_queue_start() 542 hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id); in atl_tx_queue_start() 543 dev->data->tx_queue_state[tx_queue_id] = in atl_tx_queue_start() 560 txq = dev->data->tx_queues[tx_queue_id]; in atl_tx_queue_stop() [all …]
|
| /dpdk/drivers/net/cxgbe/ |
| H A D | cxgbe_pfvf.h | 39 uint16_t tx_queue_id); 41 uint16_t tx_queue_id); 42 int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
|
| /dpdk/drivers/net/ngbe/ |
| H A D | ngbe_ethdev.h | 200 int ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 217 void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id); 218 void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id); 224 int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 226 int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
| /dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txr.c | 530 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) in bnxt_tx_queue_start() argument 533 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id]; in bnxt_tx_queue_start() 540 bnxt_free_hwrm_tx_ring(bp, tx_queue_id); in bnxt_tx_queue_start() 541 rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id); in bnxt_tx_queue_start() 545 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; in bnxt_tx_queue_start() 552 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) in bnxt_tx_queue_stop() argument 555 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id]; in bnxt_tx_queue_stop() 565 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; in bnxt_tx_queue_stop()
|
| H A D | bnxt_txr.h | 59 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 60 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
| /dpdk/lib/eventdev/ |
| H A D | rte_event_eth_tx_adapter.c | 158 int32_t tx_queue_id); 244 uint16_t tx_queue_id) in txa_service_queue() argument 253 return likely(tqi != NULL) ? tqi + tx_queue_id : NULL; in txa_service_queue() 456 uint16_t tx_queue_id) in txa_service_is_queue_added() argument 758 int32_t tx_queue_id) in txa_service_queue_add() argument 769 if (tx_queue_id == -1) { in txa_service_queue_add() 809 if (txa_service_is_queue_added(txa, eth_dev, tx_queue_id)) in txa_service_queue_add() 828 txa_retry->tx_queue = tx_queue_id; in txa_service_queue_add() 857 int32_t tx_queue_id) in txa_service_queue_del() argument 867 if (tx_queue_id == -1) { in txa_service_queue_del() [all …]
|
| /dpdk/drivers/net/axgbe/ |
| H A D | axgbe_rxtx.h | 157 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 162 int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 163 int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
| /dpdk/drivers/net/failsafe/ |
| H A D | failsafe_ops.c | 320 ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); in fs_tx_queue_stop() 346 ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); in fs_tx_queue_start() 350 fs_tx_queue_stop(dev, tx_queue_id); in fs_tx_queue_start() 568 uint16_t tx_queue_id, in fs_tx_queue_setup() argument 589 txq = dev->data->tx_queues[tx_queue_id]; in fs_tx_queue_setup() 591 fs_tx_queue_release(dev, tx_queue_id); in fs_tx_queue_setup() 592 dev->data->tx_queues[tx_queue_id] = NULL; in fs_tx_queue_setup() 604 txq->qid = tx_queue_id; in fs_tx_queue_setup() 609 dev->data->tx_queues[tx_queue_id] = txq; in fs_tx_queue_setup() 612 tx_queue_id, in fs_tx_queue_setup() [all …]
|
| /dpdk/lib/ethdev/ |
| H A D | rte_ethdev_trace.h | 59 RTE_TRACE_POINT_ARGS(uint16_t port_id, uint16_t tx_queue_id, 62 rte_trace_point_emit_u16(tx_queue_id);
|
| H A D | rte_ethdev.c | 695 tx_queue_id, port_id); in eth_dev_validate_tx_queue() 703 tx_queue_id, port_id); in eth_dev_validate_tx_queue() 806 tx_queue_id, port_id); in rte_eth_dev_tx_queue_start() 813 tx_queue_id, port_id); in rte_eth_dev_tx_queue_start() 838 tx_queue_id, port_id); in rte_eth_dev_tx_queue_stop() 845 tx_queue_id, port_id); in rte_eth_dev_tx_queue_stop() 2028 eth_dev_txq_release(dev, tx_queue_id); in rte_eth_tx_queue_setup() 2133 eth_dev_txq_release(dev, tx_queue_id); in rte_eth_tx_hairpin_queue_setup() 2135 (dev, tx_queue_id, nb_tx_desc, conf); in rte_eth_tx_hairpin_queue_setup() 2137 dev->data->tx_queue_state[tx_queue_id] = in rte_eth_tx_hairpin_queue_setup() [all …]
|
| /dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.h | 211 int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 212 int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 214 int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 216 int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
| H A D | ice_dcf_ethdev.c | 433 ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) in ice_dcf_tx_queue_start() argument 440 if (tx_queue_id >= dev->data->nb_tx_queues) in ice_dcf_tx_queue_start() 443 txq = dev->data->tx_queues[tx_queue_id]; in ice_dcf_tx_queue_start() 446 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id); in ice_dcf_tx_queue_start() 451 err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true); in ice_dcf_tx_queue_start() 455 tx_queue_id); in ice_dcf_tx_queue_start() 465 ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) in ice_dcf_tx_queue_stop() argument 472 if (tx_queue_id >= dev->data->nb_tx_queues) in ice_dcf_tx_queue_stop() 475 err = ice_dcf_switch_queue(hw, tx_queue_id, false, false); in ice_dcf_tx_queue_stop() 478 tx_queue_id); in ice_dcf_tx_queue_stop() [all …]
|
| /dpdk/drivers/net/virtio/ |
| H A D | virtio_ethdev.h | 70 int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 75 uint16_t tx_queue_id);
|
| /dpdk/drivers/net/null/ |
| H A D | rte_eth_null.c | 243 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, in eth_tx_queue_setup() argument 257 if (tx_queue_id >= dev->data->nb_tx_queues) in eth_tx_queue_setup() 262 dev->data->tx_queues[tx_queue_id] = in eth_tx_queue_setup() 263 &internals->tx_null_queues[tx_queue_id]; in eth_tx_queue_setup() 269 internals->tx_null_queues[tx_queue_id].internals = internals; in eth_tx_queue_setup() 270 internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet; in eth_tx_queue_setup()
|
| /dpdk/drivers/net/sfc/ |
| H A D | sfc_repr.c | 724 sfc_repr_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, in sfc_repr_tx_queue_setup() argument 746 "tx", tx_queue_id, nb_tx_desc, in sfc_repr_tx_queue_setup() 752 tx_queue_id, txq->ring, in sfc_repr_tx_queue_setup() 757 dev->data->tx_queues[tx_queue_id] = txq; in sfc_repr_tx_queue_setup() 776 sfc_repr_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id) in sfc_repr_tx_queue_release() argument 779 struct sfc_repr_txq *txq = dev->data->tx_queues[tx_queue_id]; in sfc_repr_tx_queue_release() 781 sfc_repr_proxy_del_txq(srs->pf_port_id, srs->repr_id, tx_queue_id); in sfc_repr_tx_queue_release()
|
| /dpdk/examples/bbdev_app/ |
| H A D | main.c | 137 unsigned int tx_queue_id; member 521 uint16_t tx_queue_id = 0; in lcore_conf_init() local 539 lconf->tx_queue_id = tx_queue_id++; in lcore_conf_init() 821 uint16_t port_id, tx_queue_id; in run_decoding() local 833 tx_queue_id = lcore_conf->tx_queue_id; in run_decoding() 907 nb_tx = rte_eth_tx_burst(port_id, tx_queue_id, recv_pkts_burst, nb_deq); in run_decoding()
|
| /dpdk/drivers/net/txgbe/ |
| H A D | txgbe_ethdev.h | 440 int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 457 void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); 458 void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); 464 int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 466 int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|