| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | bnxt_rxq.c | 54 rxq = bp->eth_dev->data->rx_queues[0]; in bnxt_mq_rx_configure() 257 rxq = bp->rx_queues[i]; in bnxt_free_rx_mbufs() 326 if (eth_dev->data->rx_queues) { in bnxt_rx_queue_setup_op() 327 rxq = eth_dev->data->rx_queues[queue_idx]; in bnxt_rx_queue_setup_op() 365 eth_dev->data->rx_queues[queue_idx] = rxq; in bnxt_rx_queue_setup_op() 412 if (eth_dev->data->rx_queues) { in bnxt_rx_queue_intr_enable_op() 413 rxq = eth_dev->data->rx_queues[queue_id]; in bnxt_rx_queue_intr_enable_op() 435 if (eth_dev->data->rx_queues) { in bnxt_rx_queue_intr_disable_op() 436 rxq = eth_dev->data->rx_queues[queue_id]; in bnxt_rx_queue_intr_disable_op() 535 rxq = bp->rx_queues[rx_queue_id]; in bnxt_rx_queue_stop() [all …]
|
| H A D | bnxt_reps.c | 50 rep_rxq = vfr_bp->rx_queues[que]; in bnxt_vfr_recv() 394 rxq = rep_bp->rx_queues[i]; in bnxt_rep_free_rx_mbufs() 556 rep_bp->rx_queues = (void *)eth_dev->data->rx_queues; in bnxt_rep_dev_configure_op() 614 if (!parent_bp->rx_queues) { in bnxt_rep_rx_queue_setup_op() 619 parent_rxq = parent_bp->rx_queues[queue_idx]; in bnxt_rep_rx_queue_setup_op() 630 if (eth_dev->data->rx_queues) { in bnxt_rep_rx_queue_setup_op() 631 rxq = eth_dev->data->rx_queues[queue_idx]; in bnxt_rep_rx_queue_setup_op() 663 eth_dev->data->rx_queues[queue_idx] = rxq; in bnxt_rep_rx_queue_setup_op()
|
| /f-stack/dpdk/drivers/net/ring/ |
| H A D | rte_eth_ring.c | 28 struct rte_ring * const *rx_queues; member 140 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id]; in eth_rx_queue_setup() 254 r = dev->data->rx_queues[i]; in eth_dev_close() 287 struct rte_ring * const rx_queues[], in do_eth_dev_ring_create() argument 342 data->rx_queues = rx_queues_local; in do_eth_dev_ring_create() 349 internals->rx_ring_queues[i].rng = rx_queues[i]; in do_eth_dev_ring_create() 350 data->rx_queues[i] = &internals->rx_ring_queues[i]; in do_eth_dev_ring_create() 387 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[], in rte_eth_from_rings() argument 394 .rx_queues = rx_queues, in rte_eth_from_rings() 407 if (rx_queues == NULL && nb_rx_queues > 0) { in rte_eth_from_rings() [all …]
|
| H A D | rte_eth_ring.h | 33 struct rte_ring * const rx_queues[],
|
| /f-stack/dpdk/drivers/net/hns3/ |
| H A D | hns3_rxtx.c | 152 if (hw->fkq_data.rx_queues[idx]) { in hns3_fake_rx_queue_release() 161 hw->fkq_data.rx_queues = NULL; in hns3_fake_rx_queue_release() 203 if (dev->data->rx_queues[i]) { in hns3_free_rx_queues() 205 dev->data->rx_queues[i] = NULL; in hns3_free_rx_queues() 212 if (fkq_data->rx_queues[i]) in hns3_free_rx_queues() 344 rxq = hw->data->rx_queues[i]; in hns3_update_all_queues_pvid_proc_en() 497 rxq = hw->data->rx_queues[i]; in hns3_start_all_rxqs() 518 rxq = hw->data->rx_queues[j]; in hns3_start_all_rxqs() 532 rxq = hw->data->rx_queues[i]; in hns3_restore_tqp_enable_state() 1167 rxq = hw->data->rx_queues[i]; in hns3_start_tqps() [all …]
|
| /f-stack/dpdk/drivers/net/ark/ |
| H A D | ark_ethdev_rx.c | 134 if (dev->data->rx_queues[queue_idx] != NULL) { in eth_ark_dev_rx_queue_setup() 135 eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); in eth_ark_dev_rx_queue_setup() 136 dev->data->rx_queues[queue_idx] = NULL; in eth_ark_dev_rx_queue_setup() 197 dev->data->rx_queues[queue_idx] = queue; in eth_ark_dev_rx_queue_setup() 397 queue = dev->data->rx_queues[queue_id]; in eth_ark_dev_rx_queue_count() 407 queue = dev->data->rx_queues[queue_id]; in eth_ark_rx_start_queue() 430 queue = dev->data->rx_queues[queue_id]; in eth_ark_rx_stop_queue() 543 queue = dev->data->rx_queues[queue_id]; in eth_ark_rx_dump_queue() 623 queue = (struct ark_rx_queue *)dev->data->rx_queues[i]; in eth_ark_udm_force_close()
|
| /f-stack/dpdk/drivers/net/mlx4/ |
| H A D | mlx4_rxq.c | 193 rxq = dev->data->rx_queues[id]; in mlx4_rss_attach() 272 mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]); in mlx4_rss_attach() 307 mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]); in mlx4_rss_detach() 361 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; in mlx4_rss_init() 436 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; in mlx4_rss_init() 462 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; in mlx4_rss_deinit() 768 rxq = dev->data->rx_queues[idx]; in mlx4_rx_queue_setup() 899 dev->data->rx_queues[idx] = rxq; in mlx4_rx_queue_setup() 902 dev->data->rx_queues[idx] = NULL; in mlx4_rx_queue_setup() 927 if (ETH_DEV(priv)->data->rx_queues[i] == rxq) { in mlx4_rx_queue_release() [all …]
|
| H A D | mlx4_intr.c | 81 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i]; in mlx4_rx_intr_vec_enable() 319 struct rxq *rxq = dev->data->rx_queues[idx]; in mlx4_rx_intr_disable() 364 struct rxq *rxq = dev->data->rx_queues[idx]; in mlx4_rx_intr_enable()
|
| /f-stack/dpdk/drivers/net/vhost/ |
| H A D | rte_eth_vhost.c | 237 vq = dev->data->rx_queues[i]; in vhost_dev_xstats_reset() 294 vq = dev->data->rx_queues[i]; in vhost_dev_xstats_get() 581 vq = dev->data->rx_queues[qid]; in eth_rxq_intr_enable() 618 vq = dev->data->rx_queues[qid]; in eth_rxq_intr_disable() 688 vq = dev->data->rx_queues[i]; in eth_vhost_install_intr() 734 vq = dev->data->rx_queues[i]; in update_queuing_status() 759 vq = eth_dev->data->rx_queues[i]; in queue_setup() 865 vq = eth_dev->data->rx_queues[i]; in destroy_device() 1193 if (dev->data->rx_queues) in eth_dev_close() 1295 vq = dev->data->rx_queues[i]; in eth_stats_get() [all …]
|
| /f-stack/dpdk/drivers/net/nfb/ |
| H A D | nfb_rx.c | 68 struct ndp_rx_queue *rxq = dev->data->rx_queues[rxq_id]; in nfb_eth_rx_queue_start() 89 struct ndp_rx_queue *rxq = dev->data->rx_queues[rxq_id]; in nfb_eth_rx_queue_stop() 137 dev->data->rx_queues[rx_queue_id] = rxq; in nfb_eth_rx_queue_setup()
|
| H A D | nfb_stats.c | 24 dev->data->rx_queues); in nfb_eth_stats_get() 63 dev->data->rx_queues); in nfb_eth_stats_reset()
|
| /f-stack/dpdk/drivers/net/kni/ |
| H A D | rte_eth_kni.c | 59 struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT]; member 130 mb_pool = internals->rx_queues[0].mb_pool; in eth_kni_start() 257 q = &internals->rx_queues[rx_queue_id]; in eth_kni_rx_queue_setup() 261 dev->data->rx_queues[rx_queue_id] = q; in eth_kni_rx_queue_setup() 308 q = data->rx_queues[i]; in eth_kni_stats_get() 341 q = data->rx_queues[i]; in eth_kni_stats_reset()
|
| /f-stack/dpdk/drivers/net/szedata2/ |
| H A D | rte_eth_szedata2.c | 1186 eth_rx_queue_release(dev->data->rx_queues[i]); in eth_dev_close() 1187 dev->data->rx_queues[i] = NULL; in eth_dev_close() 1247 if (dev->data->rx_queues[rx_queue_id] != NULL) { in eth_rx_queue_setup() 1249 dev->data->rx_queues[rx_queue_id] = NULL; in eth_rx_queue_setup() 1284 dev->data->rx_queues[rx_queue_id] = rxq; in eth_rx_queue_setup() 1687 SZE2_DIR_RX, rx_queues * i); in get_port_info() 1691 rx_queues : 0; in get_port_info() 1695 rx_queues, rx_queues * i, numa_rx); in get_port_info() 1704 rx_queues * i, numa_rx, in get_port_info() 1725 pi[current].rx_base_id = rx_queues * i; in get_port_info() [all …]
|
| /f-stack/dpdk/drivers/net/dpaa/ |
| H A D | dpaa_ethdev.c | 504 rte_free(dpaa_intf->rx_queues); in dpaa_eth_dev_close() 505 dpaa_intf->rx_queues = NULL; in dpaa_eth_dev_close() 1109 dev->data->rx_queues[queue_idx] = rxq; in dpaa_eth_rx_queue_setup() 1189 dev->data->rx_queues[eth_rx_queue_id] = rxq; in dpaa_eth_eventq_attach() 1515 rxq = dev->data->rx_queues[queue_id]; in dpaa_rxq_info_get() 1910 dpaa_intf->rx_queues = rte_zmalloc(NULL, in dpaa_dev_init() 1912 if (!dpaa_intf->rx_queues) { in dpaa_dev_init() 1917 dpaa_intf->rx_queues = NULL; in dpaa_dev_init() 1980 dpaa_intf->rx_queues[loop].vsp_id = vsp_id; in dpaa_dev_init() 2115 rte_free(dpaa_intf->rx_queues); in dpaa_dev_init() [all …]
|
| /f-stack/dpdk/drivers/net/af_xdp/ |
| H A D | rte_eth_af_xdp.c | 134 struct pkt_rx_queue *rx_queues; member 674 &internals->rx_queues[i]; in get_shared_umem() 684 &internals->rx_queues[i].umem->refcnt, in get_shared_umem() 686 *umem = internals->rx_queues[i].umem; in get_shared_umem() 772 rxq = &internals->rx_queues[i]; in eth_stats_get() 806 memset(&internals->rx_queues[i].stats, 0, in eth_stats_reset() 860 rxq = &internals->rx_queues[i]; in eth_dev_close() 1185 rxq = &internals->rx_queues[rx_queue_id]; 1217 dev->data->rx_queues[rx_queue_id] = rxq; 1535 if (internals->rx_queues == NULL) { [all …]
|
| /f-stack/dpdk/drivers/net/atlantic/ |
| H A D | atl_rxtx.c | 127 if (dev->data->rx_queues[rx_queue_id] != NULL) { in atl_rx_queue_setup() 129 dev->data->rx_queues[rx_queue_id] = NULL; in atl_rx_queue_setup() 188 dev->data->rx_queues[rx_queue_id] = rxq; in atl_rx_queue_setup() 350 rxq = eth_dev->data->rx_queues[i]; in atl_rx_init() 452 rxq = dev->data->rx_queues[rx_queue_id]; in atl_rx_queue_start() 484 rxq = dev->data->rx_queues[rx_queue_id]; in atl_rx_queue_stop() 593 atl_rx_queue_release(dev->data->rx_queues[i]); in atl_free_queues() 594 dev->data->rx_queues[i] = 0; in atl_free_queues() 669 rxq = dev->data->rx_queues[queue_id]; in atl_rxq_info_get() 703 rxq = dev->data->rx_queues[rx_queue_id]; in atl_rx_queue_count() [all …]
|
| /f-stack/dpdk/drivers/net/axgbe/ |
| H A D | axgbe_rxtx.c | 112 dev->data->rx_queues[queue_idx] = rxq; in axgbe_dev_rx_queue_setup() 113 if (!pdata->rx_queues) in axgbe_dev_rx_queue_setup() 114 pdata->rx_queues = dev->data->rx_queues; in axgbe_dev_rx_queue_setup() 161 rxq = dev->data->rx_queues[i]; in axgbe_dev_disable_rx() 167 rxq = dev->data->rx_queues[i]; in axgbe_dev_disable_rx() 181 rxq = dev->data->rx_queues[i]; in axgbe_dev_enable_rx() 815 rxq = dev->data->rx_queues[i]; in axgbe_dev_clear_queues() 819 dev->data->rx_queues[i] = NULL; in axgbe_dev_clear_queues()
|
| /f-stack/dpdk/drivers/net/bnx2x/ |
| H A D | bnx2x_rxtx.c | 158 dev->data->rx_queues[queue_idx] = rxq; in bnx2x_dev_rx_queue_setup() 159 if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues; in bnx2x_dev_rx_queue_setup() 504 struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i]; in bnx2x_dev_clear_queues() 507 dev->data->rx_queues[i] = NULL; in bnx2x_dev_clear_queues()
|
| /f-stack/dpdk/drivers/net/failsafe/ |
| H A D | failsafe_intr.c | 300 fsrxq = fsdev->data->rx_queues[qid]; in failsafe_eth_rx_intr_ctl_subdevice() 333 rxq = (struct rxq **)fsdev->data->rx_queues; in failsafe_rx_intr_install_subdevice() 373 fsrxq = fsdev->data->rx_queues[qid]; in failsafe_rx_intr_uninstall_subdevice() 454 struct rxq *rxq = priv->data->rx_queues[i]; in fs_rx_intr_vec_install()
|
| H A D | failsafe_ops.c | 109 rxq = dev->data->rx_queues[i]; in fs_set_queues_state_start() 173 if (dev->data->rx_queues[i] != NULL) in fs_set_queues_state_stop() 376 if (ETH(sdev)->data->rx_queues != NULL && in fs_rx_queue_release() 379 (ETH(sdev)->data->rx_queues[rxq->qid]); in fs_rx_queue_release() 382 dev->data->rx_queues[rxq->qid] = NULL; in fs_rx_queue_release() 421 rxq = dev->data->rx_queues[rx_queue_id]; in fs_rx_queue_setup() 424 dev->data->rx_queues[rx_queue_id] = NULL; in fs_rx_queue_setup() 449 dev->data->rx_queues[rx_queue_id] = rxq; in fs_rx_queue_setup() 482 rxq = dev->data->rx_queues[idx]; in fs_rx_intr_enable() 522 rxq = dev->data->rx_queues[idx]; in fs_rx_intr_disable() [all …]
|
| H A D | failsafe_rxtx.c | 97 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; in failsafe_rx_burst() 125 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; in failsafe_rx_burst_fast()
|
| /f-stack/dpdk/drivers/net/pcap/ |
| H A D | rte_eth_pcap.c | 112 struct pmd_devargs rx_queues; member 806 dev->data->rx_queues[rx_queue_id] = pcap_q; in eth_rx_queue_setup() 1270 struct pmd_devargs *rx_queues = &devargs_all->rx_queues; in eth_from_pcaps_common() local 1272 const unsigned int nb_rx_queues = rx_queues->num_of_queue; in eth_from_pcaps_common() 1283 struct devargs_queue *queue = &rx_queues->queue[i]; in eth_from_pcaps_common() 1309 struct pmd_devargs *rx_queues = &devargs_all->rx_queues; in eth_from_pcaps() local 1323 internals->if_index = if_nametoindex(rx_queues->queue[0].name); in eth_from_pcaps() 1326 if (rx_queues->phy_mac) { in eth_from_pcaps() 1327 int ret = eth_pcap_update_mac(rx_queues->queue[0].name, in eth_from_pcaps() 1568 devargs_all.rx_queues = pcaps; in pmd_pcap_probe()
|
| /f-stack/dpdk/drivers/net/mvneta/ |
| H A D | mvneta_rxtx.c | 726 if (dev->data->rx_queues[idx]) { in mvneta_rx_queue_setup() 727 rte_free(dev->data->rx_queues[idx]); in mvneta_rx_queue_setup() 728 dev->data->rx_queues[idx] = NULL; in mvneta_rx_queue_setup() 746 dev->data->rx_queues[idx] = rxq; in mvneta_rx_queue_setup() 845 struct mvneta_rxq *rxq = dev->data->rx_queues[i]; in mvneta_alloc_rx_bufs() 936 struct mvneta_rxq *rxq = dev->data->rx_queues[i]; in mvneta_flush_queues() 988 struct mvneta_rxq *q = dev->data->rx_queues[rx_queue_id]; in mvneta_rxq_info_get()
|
| /f-stack/dpdk/drivers/raw/ntb/ |
| H A D | ntb.c | 266 q_conf->nb_desc = hw->rx_queues[queue_id]->nb_rx_desc; in ntb_queue_conf_get() 267 q_conf->rx_mp = hw->rx_queues[queue_id]->mpool; in ntb_queue_conf_get() 352 hw->rx_queues[qp_id] = rxq; in ntb_rxq_setup() 488 ntb_rxq_release(hw->rx_queues[queue_id]); in ntb_queue_release() 489 hw->rx_queues[queue_id] = NULL; in ntb_queue_release() 505 struct ntb_rx_queue *rxq = hw->rx_queues[qp_id]; in ntb_queue_init() 874 hw->rx_queues = rte_zmalloc("ntb_rx_queues", in ntb_dev_configure() 888 rte_free(hw->rx_queues); in ntb_dev_configure() 890 hw->rx_queues = NULL; in ntb_dev_configure() 966 ntb_rxq_release_mbufs(hw->rx_queues[i]); in ntb_dev_start() [all …]
|
| /f-stack/dpdk/drivers/net/ionic/ |
| H A D | ionic_rxtx.c | 602 struct ionic_qcq *rxq = dev->data->rx_queues[queue_id]; in ionic_rxq_info_get() 681 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { in ionic_dev_rx_queue_setup() 682 void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; in ionic_dev_rx_queue_setup() 684 eth_dev->data->rx_queues[rx_queue_id] = NULL; in ionic_dev_rx_queue_setup() 710 eth_dev->data->rx_queues[rx_queue_id] = rxq; in ionic_dev_rx_queue_setup() 965 rxq = eth_dev->data->rx_queues[rx_queue_id]; in ionic_dev_rx_queue_start() 1048 rxq = eth_dev->data->rx_queues[rx_queue_id]; in ionic_dev_rx_queue_stop()
|