| /dpdk/drivers/net/bnxt/ |
| H A D | bnxt_txq.c | 85 void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) in bnxt_tx_queue_release_op() argument 87 struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx]; in bnxt_tx_queue_release_op() 115 dev->data->tx_queues[queue_idx] = NULL; in bnxt_tx_queue_release_op() 120 uint16_t queue_idx, in bnxt_tx_queue_setup_op() argument 133 if (queue_idx >= bnxt_max_rings(bp)) { in bnxt_tx_queue_setup_op() 136 queue_idx, bp->max_tx_rings); in bnxt_tx_queue_setup_op() 146 txq = eth_dev->data->tx_queues[queue_idx]; in bnxt_tx_queue_setup_op() 148 bnxt_tx_queue_release_op(eth_dev, queue_idx); in bnxt_tx_queue_setup_op() 158 eth_dev->data->tx_queues[queue_idx] = txq; in bnxt_tx_queue_setup_op() 180 txq->queue_id = queue_idx; in bnxt_tx_queue_setup_op() [all …]
|
| H A D | bnxt_reps.c | 620 uint16_t queue_idx, in bnxt_rep_rx_queue_setup_op() argument 633 if (queue_idx >= rep_bp->rx_nr_rings) { in bnxt_rep_rx_queue_setup_op() 636 queue_idx, rep_bp->rx_nr_rings); in bnxt_rep_rx_queue_setup_op() 662 rxq = eth_dev->data->rx_queues[queue_idx]; in bnxt_rep_rx_queue_setup_op() 675 eth_dev->data->rx_queues[queue_idx] = rxq; in bnxt_rep_rx_queue_setup_op() 694 rxq->queue_id = queue_idx; in bnxt_rep_rx_queue_setup_op() 723 uint16_t queue_idx, in bnxt_rep_tx_queue_setup_op() argument 733 if (queue_idx >= rep_bp->rx_nr_rings) { in bnxt_rep_tx_queue_setup_op() 736 queue_idx, rep_bp->rx_nr_rings); in bnxt_rep_tx_queue_setup_op() 784 txq->queue_id = queue_idx; in bnxt_rep_tx_queue_setup_op() [all …]
|
| H A D | bnxt_reps.h | 33 __rte_unused uint16_t queue_idx, 40 __rte_unused uint16_t queue_idx, 45 void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx); 46 void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
|
| H A D | bnxt_rxq.c | 306 struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx]; in bnxt_rx_queue_release_op() 319 uint16_t queue_idx, in bnxt_rx_queue_setup_op() argument 334 if (queue_idx >= bnxt_max_rings(bp)) { in bnxt_rx_queue_setup_op() 337 queue_idx, bp->max_rx_rings); in bnxt_rx_queue_setup_op() 347 rxq = eth_dev->data->rx_queues[queue_idx]; in bnxt_rx_queue_setup_op() 349 bnxt_rx_queue_release_op(eth_dev, queue_idx); in bnxt_rx_queue_setup_op() 370 eth_dev->data->rx_queues[queue_idx] = rxq; in bnxt_rx_queue_setup_op() 380 rxq->queue_id = queue_idx; in bnxt_rx_queue_setup_op() 398 if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0) in bnxt_rx_queue_setup_op() 407 if (!queue_idx) in bnxt_rx_queue_setup_op() [all …]
|
| H A D | bnxt_txq.h | 40 void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx); 42 uint16_t queue_idx,
|
| H A D | bnxt_rxq.h | 49 void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx); 51 uint16_t queue_idx,
|
| /dpdk/drivers/net/nfp/ |
| H A D | nfp_rxtx.c | 516 if (dev->data->rx_queues[queue_idx]) { in nfp_net_rx_queue_setup() 518 dev->data->rx_queues[queue_idx] = NULL; in nfp_net_rx_queue_setup() 527 dev->data->rx_queues[queue_idx] = rxq; in nfp_net_rx_queue_setup() 530 rxq->qidx = queue_idx; in nfp_net_rx_queue_setup() 563 dev->data->rx_queues[queue_idx] = NULL; in nfp_net_rx_queue_setup() 577 dev->data->rx_queues[queue_idx] = NULL; in nfp_net_rx_queue_setup() 711 dev->data->port_id, (int)queue_idx); in nfp_net_tx_queue_setup() 719 if (dev->data->tx_queues[queue_idx]) { in nfp_net_tx_queue_setup() 721 queue_idx); in nfp_net_tx_queue_setup() 734 dev->data->tx_queues[queue_idx] = txq; in nfp_net_tx_queue_setup() [all …]
|
| H A D | nfp_rxtx.h | 281 void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); 283 int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 287 void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); 289 int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
| /dpdk/drivers/net/enetfec/ |
| H A D | enet_ethdev.c | 358 uint16_t queue_idx, in enetfec_tx_queue_setup() argument 391 fep->tx_queues[queue_idx] = txq; in enetfec_tx_queue_setup() 397 txq = fep->tx_queues[queue_idx]; in enetfec_tx_queue_setup() 401 txq->bd.queue_id = queue_idx; in enetfec_tx_queue_setup() 407 offset_des_active_txq[queue_idx]; in enetfec_tx_queue_setup() 429 dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx]; in enetfec_tx_queue_setup() 435 uint16_t queue_idx, in enetfec_rx_queue_setup() argument 471 fep->rx_queues[queue_idx] = rxq; in enetfec_rx_queue_setup() 479 rxq = fep->rx_queues[queue_idx]; in enetfec_rx_queue_setup() 483 rxq->bd.queue_id = queue_idx; in enetfec_rx_queue_setup() [all …]
|
| /dpdk/drivers/net/bnx2x/ |
| H A D | bnx2x_rxtx.c | 41 bnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]); in bnx2x_dev_rx_queue_release() 46 uint16_t queue_idx, in bnx2x_dev_rx_queue_setup() argument 58 struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; in bnx2x_dev_rx_queue_setup() 72 rxq->queue_id = queue_idx; in bnx2x_dev_rx_queue_setup() 157 dev->data->rx_queues[queue_idx] = rxq; in bnx2x_dev_rx_queue_setup() 185 bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]); in bnx2x_dev_tx_queue_release() 232 uint16_t queue_idx, in bnx2x_dev_tx_queue_setup() argument 244 struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; in bnx2x_dev_tx_queue_setup() 267 queue_idx, nb_desc, txq->tx_free_thresh, in bnx2x_dev_tx_queue_setup() 305 txq->queue_id = queue_idx; in bnx2x_dev_tx_queue_setup() [all …]
|
| H A D | bnx2x_rxtx.h | 75 void bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); 76 void bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
| /dpdk/drivers/bus/fslmc/mc/ |
| H A D | dpdmai.c | 326 uint8_t queue_idx, in dpdmai_set_rx_queue() argument 341 cmd_params->queue_idx = queue_idx; in dpdmai_set_rx_queue() 368 uint8_t queue_idx, in dpdmai_get_rx_queue() argument 383 cmd_params->queue_idx = queue_idx; in dpdmai_get_rx_queue() 418 uint8_t queue_idx, in dpdmai_get_tx_queue() argument 433 cmd_params->queue_idx = queue_idx; in dpdmai_get_tx_queue()
|
| H A D | fsl_dpdmai.h | 181 uint8_t queue_idx, 202 uint8_t queue_idx, 219 uint8_t queue_idx,
|
| /dpdk/drivers/net/enic/ |
| H A D | enic_main.c | 514 wq = &enic->wq[queue_idx]; in enic_prep_wq_for_simple_tx() 749 vnic_wq_enable(&enic->wq[queue_idx]); in enic_start_wq() 758 ret = vnic_wq_disable(&enic->wq[queue_idx]); in enic_stop_wq() 828 RTE_ASSERT(queue_idx == 0); in enic_alloc_rq() 833 queue_idx = sop_queue_idx; in enic_alloc_rq() 894 queue_idx); in enic_alloc_rq() 1057 RTE_ASSERT(queue_idx == 0); in enic_alloc_wq() 1059 queue_idx = vf->pf_wq_idx; in enic_alloc_wq() 1063 cq_index = enic_cq_wq(enic, queue_idx); in enic_alloc_wq() 1065 wq = &enic->wq[queue_idx]; in enic_alloc_wq() [all …]
|
| H A D | enic_ethdev.c | 146 uint16_t queue_idx, in enicpmd_dev_tx_queue_setup() argument 160 wq = &enic->wq[queue_idx]; in enicpmd_dev_tx_queue_setup() 175 uint16_t queue_idx) in enicpmd_dev_tx_queue_start() argument 181 enic_start_wq(enic, queue_idx); in enicpmd_dev_tx_queue_start() 187 uint16_t queue_idx) in enicpmd_dev_tx_queue_stop() argument 194 ret = enic_stop_wq(enic, queue_idx); in enicpmd_dev_tx_queue_stop() 202 uint16_t queue_idx) in enicpmd_dev_rx_queue_start() argument 208 enic_start_rq(enic, queue_idx); in enicpmd_dev_rx_queue_start() 214 uint16_t queue_idx) in enicpmd_dev_rx_queue_stop() argument 221 ret = enic_stop_rq(enic, queue_idx); in enicpmd_dev_rx_queue_stop() [all …]
|
| H A D | enic.h | 383 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, 385 void enic_start_wq(struct enic *enic, uint16_t queue_idx); 386 int enic_stop_wq(struct enic *enic, uint16_t queue_idx); 387 void enic_start_rq(struct enic *enic, uint16_t queue_idx); 388 int enic_stop_rq(struct enic *enic, uint16_t queue_idx); 390 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
| /dpdk/drivers/net/netvsc/ |
| H A D | hn_var.h | 207 int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 211 void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx, 220 uint16_t queue_idx, uint16_t nb_desc, 264 uint16_t queue_idx, uint16_t nb_desc, 271 uint16_t queue_idx, uint16_t nb_desc,
|
| /dpdk/drivers/net/pfe/ |
| H A D | pfe_ethdev.c | 440 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, in pfe_rx_queue_setup() argument 452 if (queue_idx >= EMAC_RXQ_CNT) { in pfe_rx_queue_setup() 454 queue_idx, EMAC_RXQ_CNT); in pfe_rx_queue_setup() 475 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; in pfe_rx_queue_setup() 476 priv->client.rx_q[queue_idx].queue_id = queue_idx; in pfe_rx_queue_setup() 483 uint16_t queue_idx, in pfe_tx_queue_setup() argument 490 if (queue_idx >= emac_txq_cnt) { in pfe_tx_queue_setup() 492 queue_idx, emac_txq_cnt); in pfe_tx_queue_setup() 495 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; in pfe_tx_queue_setup() 496 priv->client.tx_q[queue_idx].queue_id = queue_idx; in pfe_tx_queue_setup()
|
| /dpdk/drivers/net/virtio/ |
| H A D | virtio_user_ethdev.c | 176 uint16_t queue_idx = vq->vq_queue_index; in virtio_user_setup_queue_packed() local 183 vring = &dev->packed_vrings[queue_idx]; in virtio_user_setup_queue_packed() 194 dev->packed_queues[queue_idx].avail_wrap_counter = true; in virtio_user_setup_queue_packed() 195 dev->packed_queues[queue_idx].used_wrap_counter = true; in virtio_user_setup_queue_packed() 204 uint16_t queue_idx = vq->vq_queue_index; in virtio_user_setup_queue_split() local 213 dev->vrings[queue_idx].num = vq->vq_nentries; in virtio_user_setup_queue_split() 214 dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; in virtio_user_setup_queue_split() 215 dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; in virtio_user_setup_queue_split() 216 dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; in virtio_user_setup_queue_split()
|
| /dpdk/drivers/net/ark/ |
| H A D | ark_ethdev_rx.c | 117 uint16_t queue_idx, in eth_ark_dev_rx_queue_setup() argument 130 int qidx = queue_idx; in eth_ark_dev_rx_queue_setup() 133 if (dev->data->rx_queues[queue_idx] != NULL) { in eth_ark_dev_rx_queue_setup() 134 eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); in eth_ark_dev_rx_queue_setup() 135 dev->data->rx_queues[queue_idx] = NULL; in eth_ark_dev_rx_queue_setup() 174 queue->queue_index = queue_idx; in eth_ark_dev_rx_queue_setup() 201 dev->data->rx_queues[queue_idx] = queue; in eth_ark_dev_rx_queue_setup() 224 status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx); in eth_ark_dev_rx_queue_setup()
|
| /dpdk/drivers/net/e1000/ |
| H A D | em_rxtx.c | 1199 uint16_t queue_idx, in eth_em_tx_queue_setup() argument 1251 (int)queue_idx); in eth_em_tx_queue_setup() 1270 if (dev->data->tx_queues[queue_idx] != NULL) { in eth_em_tx_queue_setup() 1272 dev->data->tx_queues[queue_idx] = NULL; in eth_em_tx_queue_setup() 1306 txq->queue_id = queue_idx; in eth_em_tx_queue_setup() 1318 dev->data->tx_queues[queue_idx] = txq; in eth_em_tx_queue_setup() 1399 uint16_t queue_idx, in eth_em_rx_queue_setup() argument 1437 if (dev->data->rx_queues[queue_idx] != NULL) { in eth_em_rx_queue_setup() 1439 dev->data->rx_queues[queue_idx] = NULL; in eth_em_rx_queue_setup() 1469 rxq->queue_id = queue_idx; in eth_em_rx_queue_setup() [all …]
|
| /dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx.c | 1671 return queue_idx; in i40e_get_queue_offset_by_qindex() 1950 uint16_t queue_idx, in i40e_dev_rx_queue_setup() argument 2071 if (queue_idx >= base && queue_idx < (base + BIT(bsf))) in i40e_dev_rx_queue_setup() 2269 uint16_t queue_idx, in i40e_dev_tx_queue_setup() argument 2339 (int)queue_idx); in i40e_dev_tx_queue_setup() 2348 (int)queue_idx); in i40e_dev_tx_queue_setup() 2357 (int)queue_idx); in i40e_dev_tx_queue_setup() 2367 (int)queue_idx); in i40e_dev_tx_queue_setup() 2376 (int)queue_idx); in i40e_dev_tx_queue_setup() 2385 (int)queue_idx); in i40e_dev_tx_queue_setup() [all …]
|
| /dpdk/drivers/net/axgbe/ |
| H A D | axgbe_rxtx.c | 33 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) in axgbe_dev_rx_queue_release() argument 35 axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); in axgbe_dev_rx_queue_release() 38 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, in axgbe_dev_rx_queue_setup() argument 70 rxq->queue_id = queue_idx; in axgbe_dev_rx_queue_setup() 91 dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, in axgbe_dev_rx_queue_setup() 111 dev->data->rx_queues[queue_idx] = rxq; in axgbe_dev_rx_queue_setup() 519 axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); in axgbe_dev_tx_queue_release() 522 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, in axgbe_dev_tx_queue_setup() argument 570 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, in axgbe_dev_tx_queue_setup() 579 txq->queue_id = queue_idx; in axgbe_dev_tx_queue_setup() [all …]
|
| /dpdk/drivers/net/hinic/ |
| H A D | hinic_pmd_ethdev.c | 421 (int)queue_idx); in hinic_rx_queue_setup() 429 queue_idx, dev->data->name); in hinic_rx_queue_setup() 432 nic_dev->rxqs[queue_idx] = rxq; in hinic_rx_queue_setup() 458 rxq->q_id = queue_idx; in hinic_rx_queue_setup() 474 queue_idx, dev->data->name); in hinic_rx_queue_setup() 485 hinic_destroy_rq(hwdev, queue_idx); in hinic_rx_queue_setup() 576 (int)queue_idx); in hinic_tx_queue_setup() 584 queue_idx, dev->data->name); in hinic_tx_queue_setup() 587 nic_dev->txqs[queue_idx] = txq; in hinic_tx_queue_setup() 597 txq->q_id = queue_idx; in hinic_tx_queue_setup() [all …]
|
| /dpdk/drivers/net/virtio/virtio_user/ |
| H A D | virtio_user_dev.h | 73 void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); 75 uint16_t queue_idx);
|