| /dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txq.c | 80 txq = bp->tx_queues[i]; in bnxt_free_tx_mbufs() 87 struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx]; in bnxt_tx_queue_release_op() 115 dev->data->tx_queues[queue_idx] = NULL; in bnxt_tx_queue_release_op() 145 if (eth_dev->data->tx_queues) { in bnxt_tx_queue_setup_op() 146 txq = eth_dev->data->tx_queues[queue_idx]; in bnxt_tx_queue_setup_op() 158 eth_dev->data->tx_queues[queue_idx] = txq; in bnxt_tx_queue_setup_op()
|
| H A D | bnxt_reps.c | 128 ptxq = parent->tx_queues[qid]; in bnxt_rep_tx_burst() 745 if (!parent_bp->tx_queues) { in bnxt_rep_tx_queue_setup_op() 750 parent_txq = parent_bp->tx_queues[queue_idx]; in bnxt_rep_tx_queue_setup_op() 761 if (eth_dev->data->tx_queues) { in bnxt_rep_tx_queue_setup_op() 762 vfr_txq = eth_dev->data->tx_queues[queue_idx]; in bnxt_rep_tx_queue_setup_op() 788 eth_dev->data->tx_queues[queue_idx] = vfr_txq; in bnxt_rep_tx_queue_setup_op() 795 struct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx]; in bnxt_rep_tx_queue_release_op() 802 dev->data->tx_queues[queue_idx] = NULL; in bnxt_rep_tx_queue_release_op()
|
| /dpdk/lib/ethdev/ |
| H A D | ethdev_private.c | 283 fpo->txq.data = dev->data->tx_queues; in eth_dev_fp_ops_setup() 366 void **txq = dev->data->tx_queues; in eth_dev_txq_release() 412 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ in eth_dev_tx_queue_config() 413 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", in eth_dev_tx_queue_config() 414 sizeof(dev->data->tx_queues[0]) * in eth_dev_tx_queue_config() 417 if (dev->data->tx_queues == NULL) { in eth_dev_tx_queue_config() 421 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ in eth_dev_tx_queue_config() 425 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { in eth_dev_tx_queue_config() 429 rte_free(dev->data->tx_queues); in eth_dev_tx_queue_config() 430 dev->data->tx_queues = NULL; in eth_dev_tx_queue_config()
|
| /dpdk/drivers/net/nfb/ |
| H A D | nfb_tx.c | 13 struct ndp_tx_queue *txq = dev->data->tx_queues[txq_id]; in nfb_eth_tx_queue_start() 34 struct ndp_tx_queue *txq = dev->data->tx_queues[txq_id]; in nfb_eth_tx_queue_stop() 75 dev->data->tx_queues[tx_queue_id] = txq; in nfb_eth_tx_queue_setup() 107 struct ndp_tx_queue *txq = dev->data->tx_queues[qid]; in nfb_eth_tx_queue_release()
|
| H A D | nfb_stats.c | 26 dev->data->tx_queues); in nfb_eth_stats_get() 65 dev->data->tx_queues); in nfb_eth_stats_reset()
|
| /dpdk/drivers/net/ring/ |
| H A D | rte_eth_ring.c | 30 struct rte_ring * const *tx_queues; member 152 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id]; in eth_tx_queue_setup() 313 struct rte_ring *const tx_queues[], in do_eth_dev_ring_create() argument 367 data->tx_queues = tx_queues_local; in do_eth_dev_ring_create() 377 internals->tx_ring_queues[i].rng = tx_queues[i]; in do_eth_dev_ring_create() 378 data->tx_queues[i] = &internals->tx_ring_queues[i]; in do_eth_dev_ring_create() 413 struct rte_ring *const tx_queues[], in rte_eth_from_rings() argument 420 .tx_queues = tx_queues, in rte_eth_from_rings() 435 if (tx_queues == NULL && nb_tx_queues > 0) { in rte_eth_from_rings() 697 internal_args->tx_queues, in rte_pmd_ring_probe()
|
| H A D | rte_eth_ring.h | 35 struct rte_ring *const tx_queues[],
|
| /dpdk/drivers/net/hns3/ |
| H A D | hns3_ethdev_dump.c | 258 void **tx_queues; in get_tx_queue() local 261 tx_queues = dev->data->tx_queues; in get_tx_queue() 262 if (tx_queues == NULL || tx_queues[queue_id] == NULL) { in get_tx_queue() 267 txq = (struct hns3_tx_queue *)tx_queues[queue_id]; in get_tx_queue() 271 return tx_queues[queue_id]; in get_tx_queue() 285 void **tx_queues; in get_rxtx_fake_queue_info() local 303 tx_queues = hw->fkq_data.tx_queues; in get_rxtx_fake_queue_info() 306 if (tx_queues == NULL || tx_queues[queue_id] == NULL) { in get_rxtx_fake_queue_info() 310 txq = (struct hns3_tx_queue *)tx_queues[queue_id]; in get_rxtx_fake_queue_info()
|
| H A D | hns3_rxtx.c | 200 hw->fkq_data.tx_queues = NULL; in hns3_fake_tx_queue_release() 240 if (dev->data->tx_queues[i]) { in hns3_free_tx_queues() 242 dev->data->tx_queues[i] = NULL; in hns3_free_tx_queues() 249 if (fkq_data->tx_queues[i]) in hns3_free_tx_queues() 361 txq = hw->data->tx_queues[i]; in hns3_update_all_queues_pvid_proc_en() 474 txq = hw->data->tx_queues[i]; in hns3_start_all_txqs() 495 txq = hw->data->tx_queues[j]; in hns3_start_all_txqs() 550 txq = hw->data->tx_queues[i]; in hns3_restore_tqp_enable_state() 564 txq = hw->data->tx_queues[i]; in hns3_stop_all_txqs() 1280 txq = hw->data->tx_queues[i]; in hns3_start_tqps() [all …]
|
| /dpdk/drivers/net/mlx4/ |
| H A D | mlx4_txq.c | 141 txq = dev->data->tx_queues[i]; in mlx4_tx_uar_init_secondary() 153 txq = dev->data->tx_queues[i]; in mlx4_tx_uar_init_secondary() 352 txq = dev->data->tx_queues[idx]; in mlx4_tx_queue_setup() 407 dev->data->tx_queues[idx] = txq; in mlx4_tx_queue_setup() 533 struct txq *txq = dev->data->tx_queues[idx]; in mlx4_tx_queue_release() 538 dev->data->tx_queues[idx] = NULL; in mlx4_tx_queue_release()
|
| /dpdk/drivers/net/bnx2x/ |
| H A D | bnx2x_rxtx.c | 185 bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]); in bnx2x_dev_tx_queue_release() 312 dev->data->tx_queues[queue_idx] = txq; in bnx2x_dev_tx_queue_setup() 313 if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; in bnx2x_dev_tx_queue_setup() 487 struct bnx2x_tx_queue *txq = dev->data->tx_queues[i]; in bnx2x_dev_clear_queues() 490 dev->data->tx_queues[i] = NULL; in bnx2x_dev_clear_queues()
|
| /dpdk/drivers/net/kni/ |
| H A D | rte_eth_kni.c | 60 struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT]; member 279 q = &internals->tx_queues[tx_queue_id]; in eth_kni_tx_queue_setup() 282 dev->data->tx_queues[tx_queue_id] = q; in eth_kni_tx_queue_setup() 316 q = data->tx_queues[i]; in eth_kni_stats_get() 344 q = data->tx_queues[i]; in eth_kni_stats_reset()
|
| /dpdk/drivers/net/cnxk/ |
| H A D | cn9k_ethdev.c | 191 txq = eth_dev->data->tx_queues[qid]; in cn9k_nix_tx_queue_setup() 281 struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx]; in cn9k_nix_tx_queue_stop() 409 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i); in cn9k_nix_timesync_enable() 433 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i); in cn9k_nix_timesync_disable() 540 struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i]; in cn9k_nix_tm_mark_vlan_dei() 577 struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i]; in cn9k_nix_tm_mark_ip_ecn() 614 struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i]; in cn9k_nix_tm_mark_ip_dscp()
|
| H A D | cn10k_ethdev.c | 192 txq = eth_dev->data->tx_queues[qid]; in cn10k_nix_tx_queue_setup() 297 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx]; in cn10k_nix_tx_queue_stop() 424 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i); in cn10k_nix_timesync_enable() 448 nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i); in cn10k_nix_timesync_disable() 606 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i]; in cn10k_nix_tm_mark_vlan_dei() 643 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i]; in cn10k_nix_tm_mark_ip_ecn() 680 struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i]; in cn10k_nix_tm_mark_ip_dscp()
|
| /dpdk/drivers/net/vhost/ |
| H A D | rte_eth_vhost.c | 243 vq = dev->data->tx_queues[i]; in vhost_dev_xstats_reset() 307 vq = dev->data->tx_queues[i]; in vhost_dev_xstats_get() 750 vq = dev->data->tx_queues[i]; in update_queuing_status() 777 vq = eth_dev->data->tx_queues[i]; in queue_setup() 881 vq = eth_dev->data->tx_queues[i]; in destroy_device() 1212 if (dev->data->tx_queues) in eth_dev_close() 1214 rte_free(dev->data->tx_queues[i]); in eth_dev_close() 1320 if (dev->data->tx_queues[i] == NULL) in eth_stats_get() 1322 vq = dev->data->tx_queues[i]; in eth_stats_get() 1354 vq = dev->data->tx_queues[i]; in eth_stats_reset() [all …]
|
| /dpdk/drivers/net/axgbe/ |
| H A D | axgbe_rxtx.c | 519 axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); in axgbe_dev_tx_queue_release() 596 dev->data->tx_queues[queue_idx] = txq; in axgbe_dev_tx_queue_setup() 597 if (!pdata->tx_queues) in axgbe_dev_tx_queue_setup() 598 pdata->tx_queues = dev->data->tx_queues; in axgbe_dev_tx_queue_setup() 716 txq = dev->data->tx_queues[i]; in axgbe_dev_disable_tx() 727 txq = dev->data->tx_queues[i]; in axgbe_dev_disable_tx() 739 txq = dev->data->tx_queues[i]; in axgbe_dev_enable_tx() 905 txq = dev->data->tx_queues[i]; in axgbe_dev_clear_queues() 909 dev->data->tx_queues[i] = NULL; in axgbe_dev_clear_queues()
|
| /dpdk/drivers/raw/ntb/ |
| H A D | ntb.c | 265 q_conf->tx_free_thresh = hw->tx_queues[queue_id]->tx_free_thresh; in ntb_queue_conf_get() 451 hw->tx_queues[qp_id] = txq; in ntb_txq_setup() 486 ntb_txq_release(hw->tx_queues[queue_id]); in ntb_queue_release() 487 hw->tx_queues[queue_id] = NULL; in ntb_queue_release() 506 struct ntb_tx_queue *txq = hw->tx_queues[qp_id]; in ntb_queue_init() 606 struct ntb_tx_queue *txq = hw->tx_queues[(size_t)context]; in ntb_enqueue_bufs() 876 hw->tx_queues = rte_zmalloc("ntb_tx_queues", in ntb_dev_configure() 889 rte_free(hw->tx_queues); in ntb_dev_configure() 891 hw->tx_queues = NULL; in ntb_dev_configure() 972 ntb_txq_release_mbufs(hw->tx_queues[i]); in ntb_dev_start() [all …]
|
| /dpdk/drivers/net/mvneta/ |
| H A D | mvneta_rxtx.c | 774 if (dev->data->tx_queues[idx]) { in mvneta_tx_queue_setup() 775 rte_free(dev->data->tx_queues[idx]); in mvneta_tx_queue_setup() 776 dev->data->tx_queues[idx] = NULL; in mvneta_tx_queue_setup() 787 dev->data->tx_queues[idx] = txq; in mvneta_tx_queue_setup() 806 struct mvneta_txq *q = dev->data->tx_queues[qid]; in mvneta_tx_queue_release() 954 struct mvneta_txq *txq = dev->data->tx_queues[i]; in mvneta_flush_queues()
|
| /dpdk/drivers/net/atlantic/ |
| H A D | atl_rxtx.c | 249 if (dev->data->tx_queues[tx_queue_id] != NULL) { in atl_tx_queue_setup() 251 dev->data->tx_queues[tx_queue_id] = NULL; in atl_tx_queue_setup() 302 dev->data->tx_queues[tx_queue_id] = txq; in atl_tx_queue_setup() 318 txq = eth_dev->data->tx_queues[i]; in atl_tx_init() 560 txq = dev->data->tx_queues[tx_queue_id]; in atl_tx_queue_stop() 574 struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; in atl_tx_queue_release() 600 dev->data->tx_queues[i] = 0; in atl_free_queues() 684 txq = dev->data->tx_queues[queue_id]; in atl_txq_info_get()
|
| /dpdk/drivers/net/failsafe/ |
| H A D | failsafe_rxtx.c | 153 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; in failsafe_tx_burst() 173 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; in failsafe_tx_burst_fast()
|
| H A D | failsafe_ops.c | 115 txq = dev->data->tx_queues[i]; in fs_set_queues_state_start() 177 if (dev->data->tx_queues[i] != NULL) in fs_set_queues_state_stop() 551 struct txq *txq = dev->data->tx_queues[qid]; in fs_tx_queue_release() 557 if (ETH(sdev)->data->tx_queues != NULL && in fs_tx_queue_release() 558 ETH(sdev)->data->tx_queues[txq->qid] != NULL) in fs_tx_queue_release() 561 dev->data->tx_queues[txq->qid] = NULL; in fs_tx_queue_release() 589 txq = dev->data->tx_queues[tx_queue_id]; in fs_tx_queue_setup() 592 dev->data->tx_queues[tx_queue_id] = NULL; in fs_tx_queue_setup() 609 dev->data->tx_queues[tx_queue_id] = txq; in fs_tx_queue_setup() 640 dev->data->tx_queues[i] = NULL; in fs_dev_free_queues()
|
| /dpdk/drivers/net/nfp/ |
| H A D | nfp_rxtx.c | 660 struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx]; in nfp_net_tx_queue_release() 719 if (dev->data->tx_queues[queue_idx]) { in nfp_net_tx_queue_setup() 723 dev->data->tx_queues[queue_idx] = NULL; in nfp_net_tx_queue_setup() 734 dev->data->tx_queues[queue_idx] = txq; in nfp_net_tx_queue_setup() 748 dev->data->tx_queues[queue_idx] = NULL; in nfp_net_tx_queue_setup() 775 dev->data->tx_queues[queue_idx] = NULL; in nfp_net_tx_queue_setup()
|
| /dpdk/drivers/net/ark/ |
| H A D | ark_ethdev_tx.c | 268 dev->data->tx_queues[queue_idx] = queue; in eth_ark_tx_queue_setup() 380 queue = dev->data->tx_queues[queue_id]; in eth_ark_tx_queue_stop() 402 queue = dev->data->tx_queues[queue_id]; in eth_ark_tx_queue_start()
|
| /dpdk/drivers/net/e1000/ |
| H A D | em_rxtx.c | 1128 em_tx_queue_release(dev->data->tx_queues[qid]); in eth_em_tx_queue_release() 1270 if (dev->data->tx_queues[queue_idx] != NULL) { in eth_em_tx_queue_setup() 1271 em_tx_queue_release(dev->data->tx_queues[queue_idx]); in eth_em_tx_queue_setup() 1272 dev->data->tx_queues[queue_idx] = NULL; in eth_em_tx_queue_setup() 1318 dev->data->tx_queues[queue_idx] = txq; in eth_em_tx_queue_setup() 1573 txq = dev->data->tx_queues[i]; in em_dev_clear_queues() 1602 dev->data->tx_queues[i] = NULL; in em_dev_free_queues() 1923 txq = dev->data->tx_queues[i]; in eth_em_tx_init() 1997 txq = dev->data->tx_queues[queue_id]; in em_txq_info_get() 2020 if (dev->data->tx_queues == NULL) in e1000_flush_tx_ring() [all …]
|
| /dpdk/drivers/net/enetfec/ |
| H A D | enet_ethdev.c | 174 txq = fep->tx_queues[q]; in enet_free_buffers() 391 fep->tx_queues[queue_idx] = txq; in enetfec_tx_queue_setup() 397 txq = fep->tx_queues[queue_idx]; in enetfec_tx_queue_setup() 429 dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx]; in enetfec_tx_queue_setup()
|