| /f-stack/dpdk/lib/librte_eventdev/ |
| H A D | rte_event_eth_rx_adapter.c | 302 dev_info->rx_queue && in rxa_polled_queue() 380 dev_info->nb_rx_intr; in rxa_calc_nb_post_add_intr() 439 dev_info->nb_rx_poll; in rxa_calc_nb_post_add_poll() 441 - dev_info->wrr_len; in rxa_calc_nb_post_add_poll() 582 dev_info->wrr_len = 0; in rxa_calc_wrr_sequence() 786 if (dev_info->cb_fn) { in rxa_buffer_mbufs() 1052 dev_info->next_q_idx = dev_info->multi_intr_cap ? in rxa_intr_ring_dequeue() 1447 dev_info->intr_queue = in rxa_config_intr() 1777 dev_info->rx_queue = in rxa_sw_add() 2105 dev_info->rx_queue = in rte_event_eth_rx_adapter_queue_add() [all …]
|
| H A D | rte_event_crypto_adapter.c | 741 dev_info->qpairs = in eca_add_queue_pair() 749 qpairs = dev_info->qpairs; in eca_add_queue_pair() 831 dev_info->qpairs = in rte_event_crypto_adapter_queue_pair_add() 841 dev_info->dev, in rte_event_crypto_adapter_queue_pair_add() 932 dev_info->dev, in rte_event_crypto_adapter_queue_pair_del() 941 dev_info->qpairs = NULL; in rte_event_crypto_adapter_queue_pair_del() 961 dev_info->qpairs = NULL; in rte_event_crypto_adapter_queue_pair_del() 1005 &dev_info->dev[i]) : in eca_adapter_ctrl() 1007 &dev_info->dev[i]); in eca_adapter_ctrl() 1063 dev_info->dev, in rte_event_crypto_adapter_stats_get() [all …]
|
| /f-stack/dpdk/kernel/linux/kni/ |
| H A D | kni_misc.c | 305 if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) in kni_ioctl_create() 309 if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) { in kni_ioctl_create() 317 if (dev_info.force_bind && !cpu_online(dev_info.core_id)) { in kni_ioctl_create() 351 if (dev_info.iova_mode) { in kni_ioctl_create() 408 if (dev_info.mtu) in kni_ioctl_create() 409 net_dev->mtu = dev_info.mtu; in kni_ioctl_create() 413 if (dev_info.min_mtu) in kni_ioctl_create() 416 if (dev_info.max_mtu) in kni_ioctl_create() 423 ret, dev_info.name); in kni_ioctl_create() 455 if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) in kni_ioctl_release() [all …]
|
| /f-stack/dpdk/drivers/bus/pci/windows/ |
| H A D | pci.c | 164 get_device_pci_address(HDEVINFO dev_info, in get_device_pci_address() argument 195 get_device_resource_info(HDEVINFO dev_info, in get_device_resource_info() argument 233 res = SetupDiGetDevicePropertyW(dev_info, dev_info_data, in get_device_resource_info() 316 ret = get_pci_hardware_id(dev_info, device_info_data, in pci_scan_one() 390 HDEVINFO dev_info; in rte_pci_scan() local 397 dev_info = SetupDiGetClassDevs(NULL, TEXT("PCI"), NULL, in rte_pci_scan() 399 if (dev_info == INVALID_HANDLE_VALUE) { in rte_pci_scan() 408 while (SetupDiEnumDeviceInfo(dev_info, device_index, in rte_pci_scan() 416 ret = pci_scan_one(dev_info, &device_info_data); in rte_pci_scan() 429 if (dev_info != INVALID_HANDLE_VALUE) in rte_pci_scan() [all …]
|
| /f-stack/dpdk/app/test-eventdev/ |
| H A D | evt_common.h | 79 struct rte_event_dev_info dev_info; in evt_has_distributed_sched() local 81 rte_event_dev_info_get(dev_id, &dev_info); in evt_has_distributed_sched() 89 struct rte_event_dev_info dev_info; in evt_has_burst_mode() local 91 rte_event_dev_info_get(dev_id, &dev_info); in evt_has_burst_mode() 92 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ? in evt_has_burst_mode() 100 struct rte_event_dev_info dev_info; in evt_has_all_types_queue() local 102 rte_event_dev_info_get(dev_id, &dev_info); in evt_has_all_types_queue() 103 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ? in evt_has_all_types_queue() 110 struct rte_event_dev_info dev_info; in evt_has_flow_id() local 112 rte_event_dev_info_get(dev_id, &dev_info); in evt_has_flow_id() [all …]
|
| H A D | test_perf_atq.c | 161 struct rte_event_dev_info dev_info; in perf_atq_eventdev_setup() local 171 memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); in perf_atq_eventdev_setup() 172 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_atq_eventdev_setup() 200 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; in perf_atq_eventdev_setup() 205 .enqueue_depth = dev_info.max_event_port_dequeue_depth, in perf_atq_eventdev_setup() 206 .new_event_threshold = dev_info.max_num_events, in perf_atq_eventdev_setup() 277 struct rte_event_dev_info dev_info; in perf_atq_capability_check() local 279 rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_atq_capability_check() 281 dev_info.max_event_ports < perf_nb_event_ports(opt)) { in perf_atq_capability_check() 283 atq_nb_event_queues(opt), dev_info.max_event_queues, in perf_atq_capability_check() [all …]
|
| H A D | test_perf_queue.c | 162 struct rte_event_dev_info dev_info; in perf_queue_eventdev_setup() local 172 memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); in perf_queue_eventdev_setup() 173 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_queue_eventdev_setup() 215 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; in perf_queue_eventdev_setup() 220 .enqueue_depth = dev_info.max_event_port_dequeue_depth, in perf_queue_eventdev_setup() 221 .new_event_threshold = dev_info.max_num_events, in perf_queue_eventdev_setup() 293 struct rte_event_dev_info dev_info; in perf_queue_capability_check() local 295 rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_queue_capability_check() 297 dev_info.max_event_ports < perf_nb_event_ports(opt)) { in perf_queue_capability_check() 300 dev_info.max_event_queues, in perf_queue_capability_check() [all …]
|
| /f-stack/dpdk/examples/l3fwd/ |
| H A D | l3fwd_event_internal_port.c | 20 struct rte_event_dev_info dev_info; in l3fwd_event_device_setup_internal_port() local 35 rte_event_dev_info_get(event_d_id, &dev_info); in l3fwd_event_device_setup_internal_port() 49 event_d_conf.nb_events_limit = dev_info.max_num_events; in l3fwd_event_device_setup_internal_port() 53 dev_info.max_event_queue_flows; in l3fwd_event_device_setup_internal_port() 55 if (dev_info.max_event_port_dequeue_depth < in l3fwd_event_device_setup_internal_port() 58 dev_info.max_event_port_dequeue_depth; in l3fwd_event_device_setup_internal_port() 60 if (dev_info.max_event_port_enqueue_depth < in l3fwd_event_device_setup_internal_port() 63 dev_info.max_event_port_enqueue_depth; in l3fwd_event_device_setup_internal_port() 66 if (dev_info.max_event_ports < num_workers) in l3fwd_event_device_setup_internal_port() 67 num_workers = dev_info.max_event_ports; in l3fwd_event_device_setup_internal_port() [all …]
|
| H A D | l3fwd_event_generic.c | 20 struct rte_event_dev_info dev_info; in l3fwd_event_device_setup_generic() local 35 rte_event_dev_info_get(event_d_id, &dev_info); in l3fwd_event_device_setup_generic() 49 event_d_conf.nb_events_limit = dev_info.max_num_events; in l3fwd_event_device_setup_generic() 53 dev_info.max_event_queue_flows; in l3fwd_event_device_setup_generic() 55 if (dev_info.max_event_port_dequeue_depth < in l3fwd_event_device_setup_generic() 58 dev_info.max_event_port_dequeue_depth; in l3fwd_event_device_setup_generic() 60 if (dev_info.max_event_port_enqueue_depth < in l3fwd_event_device_setup_generic() 63 dev_info.max_event_port_enqueue_depth; in l3fwd_event_device_setup_generic() 66 if (dev_info.max_event_ports < num_workers) in l3fwd_event_device_setup_generic() 67 num_workers = dev_info.max_event_ports; in l3fwd_event_device_setup_generic() [all …]
|
| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | rte_pmd_bnxt.c | 138 struct rte_eth_dev_info dev_info; in rte_pmd_bnxt_set_vf_mac_addr() local 264 if (vf >= dev_info.max_vfs) in rte_pmd_bnxt_set_vf_mac_anti_spoof() 323 if (vf >= dev_info.max_vfs) in rte_pmd_bnxt_set_vf_vlan_anti_spoof() 374 if (vf >= dev_info.max_vfs) in rte_pmd_bnxt_set_vf_vlan_stripq() 609 if (vf_id >= dev_info.max_vfs) in rte_pmd_bnxt_get_vf_stats() 645 if (vf_id >= dev_info.max_vfs) in rte_pmd_bnxt_reset_vf_stats() 679 if (vf_id >= dev_info.max_vfs) in rte_pmd_bnxt_get_vf_rx_status() 714 if (vf_id >= dev_info.max_vfs) in rte_pmd_bnxt_get_vf_tx_drop_count() 753 if (vf_id >= dev_info.max_vfs) in rte_pmd_bnxt_mac_addr_add() 836 if (vf >= dev_info.max_vfs) in rte_pmd_bnxt_set_vf_vlan_insert() [all …]
|
| /f-stack/dpdk/examples/l2fwd-event/ |
| H A D | l2fwd_event_internal_port.c | 29 struct rte_event_dev_info dev_info; in l2fwd_event_device_setup_internal_port() local 44 rte_event_dev_info_get(event_d_id, &dev_info); in l2fwd_event_device_setup_internal_port() 58 event_d_conf.nb_events_limit = dev_info.max_num_events; in l2fwd_event_device_setup_internal_port() 62 dev_info.max_event_queue_flows; in l2fwd_event_device_setup_internal_port() 64 if (dev_info.max_event_port_dequeue_depth < in l2fwd_event_device_setup_internal_port() 67 dev_info.max_event_port_dequeue_depth; in l2fwd_event_device_setup_internal_port() 69 if (dev_info.max_event_port_enqueue_depth < in l2fwd_event_device_setup_internal_port() 72 dev_info.max_event_port_enqueue_depth; in l2fwd_event_device_setup_internal_port() 76 if (dev_info.max_event_ports < num_workers) in l2fwd_event_device_setup_internal_port() 77 num_workers = dev_info.max_event_ports; in l2fwd_event_device_setup_internal_port() [all …]
|
| H A D | l2fwd_event_generic.c | 29 struct rte_event_dev_info dev_info; in l2fwd_event_device_setup_generic() local 44 rte_event_dev_info_get(event_d_id, &dev_info); in l2fwd_event_device_setup_generic() 59 event_d_conf.nb_events_limit = dev_info.max_num_events; in l2fwd_event_device_setup_generic() 63 dev_info.max_event_queue_flows; in l2fwd_event_device_setup_generic() 65 if (dev_info.max_event_port_dequeue_depth < in l2fwd_event_device_setup_generic() 68 dev_info.max_event_port_dequeue_depth; in l2fwd_event_device_setup_generic() 70 if (dev_info.max_event_port_enqueue_depth < in l2fwd_event_device_setup_generic() 73 dev_info.max_event_port_enqueue_depth; in l2fwd_event_device_setup_generic() 77 if (dev_info.max_event_ports < num_workers) in l2fwd_event_device_setup_generic() 78 num_workers = dev_info.max_event_ports; in l2fwd_event_device_setup_generic() [all …]
|
| H A D | l2fwd_common.c | 34 struct rte_eth_dev_info dev_info; in l2fwd_event_init_ports() local 49 ret = rte_eth_dev_info_get(port_id, &dev_info); in l2fwd_event_init_ports() 54 dev_info.flow_type_rss_offloads; in l2fwd_event_init_ports() 64 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) in l2fwd_event_init_ports() 82 rxq_conf = dev_info.default_rxconf; in l2fwd_event_init_ports() 94 txq_conf = dev_info.default_txconf; in l2fwd_event_init_ports()
|
| /f-stack/dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_vf_representor.c | 40 struct rte_eth_dev_info *dev_info) in ixgbe_vf_representor_dev_infos_get() argument 47 dev_info->device = representor->pf_ethdev->device; in ixgbe_vf_representor_dev_infos_get() 49 dev_info->min_rx_bufsize = 1024; in ixgbe_vf_representor_dev_infos_get() 51 dev_info->max_rx_pktlen = 9728; in ixgbe_vf_representor_dev_infos_get() 53 dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; in ixgbe_vf_representor_dev_infos_get() 55 dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; in ixgbe_vf_representor_dev_infos_get() 58 dev_info->max_mac_addrs = hw->mac.num_rar_entries; in ixgbe_vf_representor_dev_infos_get() 61 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | in ixgbe_vf_representor_dev_infos_get() 72 dev_info->speed_capa = in ixgbe_vf_representor_dev_infos_get() 76 dev_info->switch_info.name = in ixgbe_vf_representor_dev_infos_get() [all …]
|
| /f-stack/dpdk/examples/eventdev_pipeline/ |
| H A D | pipeline_worker_generic.c | 164 struct rte_event_dev_info dev_info; in setup_eventdev_generic() local 176 if (dev_info.max_event_port_dequeue_depth < in setup_eventdev_generic() 179 dev_info.max_event_port_dequeue_depth; in setup_eventdev_generic() 183 dev_info.max_event_port_enqueue_depth; in setup_eventdev_generic() 302 struct rte_eth_dev_info dev_info; in port_init() local 322 rx_conf = dev_info.default_rxconf; in port_init() 326 dev_info.flow_type_rss_offloads; in port_init() 350 txconf = dev_info.default_txconf; in port_init() 414 struct rte_event_dev_info dev_info; in init_adapters() local 428 dev_info.max_event_port_dequeue_depth; in init_adapters() [all …]
|
| /f-stack/dpdk/drivers/raw/ifpga/base/ |
| H A D | ifpga_fme_pr.c | 24 dev_info(NULL, "%s\n", pr_err_msg[i]); in pr_err_handle() 46 dev_info(fme_dev, "resetting PR before initiated PR\n"); in fme_pr_write_init() 97 dev_info(fme_dev, "set PR port ID and start request\n"); in fme_pr_write() 104 dev_info(fme_dev, "pushing data from bitstream to HW\n"); in fme_pr_write() 164 dev_info(fme_dev, "green bitstream push complete\n"); in fme_pr_write_complete() 181 dev_info(fme_dev, "PR done successfully\n"); in fme_pr_write_complete() 300 dev_info(hw, "this is a valid bitsteam..\n"); in do_pr() 329 dev_info(NULL, "FME PR MGMT Init.\n"); in fme_pr_mgmt_init() 337 dev_info(NULL, "using 512-bit PR\n"); in fme_pr_mgmt_init() 340 dev_info(NULL, "using 32-bit PR\n"); in fme_pr_mgmt_init() [all …]
|
| /f-stack/dpdk/lib/librte_kni/ |
| H A D | rte_kni.c | 218 struct rte_kni_device_info dev_info; in rte_kni_alloc() local 259 memset(&dev_info, 0, sizeof(dev_info)); in rte_kni_alloc() 260 dev_info.core_id = conf->core_id; in rte_kni_alloc() 262 dev_info.group_id = conf->group_id; in rte_kni_alloc() 263 dev_info.mbuf_size = conf->mbuf_size; in rte_kni_alloc() 264 dev_info.mtu = conf->mtu; in rte_kni_alloc() 265 dev_info.min_mtu = conf->min_mtu; in rte_kni_alloc() 266 dev_info.max_mtu = conf->max_mtu; in rte_kni_alloc() 279 dev_info.tx_phys = kni->m_tx_q->iova; in rte_kni_alloc() 408 struct rte_kni_device_info dev_info; in rte_kni_release() local [all …]
|
| /f-stack/dpdk/drivers/net/i40e/ |
| H A D | i40e_vf_representor.c | 28 struct rte_eth_dev_info *dev_info) in i40e_vf_representor_dev_infos_get() argument 33 dev_info->device = ethdev->device; in i40e_vf_representor_dev_infos_get() 35 dev_info->max_rx_queues = ethdev->data->nb_rx_queues; in i40e_vf_representor_dev_infos_get() 36 dev_info->max_tx_queues = ethdev->data->nb_tx_queues; in i40e_vf_representor_dev_infos_get() 38 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; in i40e_vf_representor_dev_infos_get() 39 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; in i40e_vf_representor_dev_infos_get() 42 dev_info->reta_size = ETH_RSS_RETA_SIZE_64; in i40e_vf_representor_dev_infos_get() 44 dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; in i40e_vf_representor_dev_infos_get() 45 dev_info->rx_offload_capa = in i40e_vf_representor_dev_infos_get() 52 dev_info->tx_offload_capa = in i40e_vf_representor_dev_infos_get() [all …]
|
| /f-stack/dpdk/lib/librte_regexdev/ |
| H A D | rte_regexdev.c | 191 if (dev_info == NULL) in regexdev_info_get() 195 return (*dev->dev_ops->dev_info_get)(dev, dev_info); in regexdev_info_get() 202 return regexdev_info_get(dev_id, dev_info); in rte_regexdev_info_get() 209 struct rte_regexdev_info dev_info; in rte_regexdev_configure() local 223 ret = regexdev_info_get(dev_id, &dev_info); in rte_regexdev_configure() 252 if (cfg->nb_groups > dev_info.max_groups) { in rte_regexdev_configure() 254 dev_id, cfg->nb_groups, dev_info.max_groups); in rte_regexdev_configure() 262 if (cfg->nb_max_matches > dev_info.max_matches) { in rte_regexdev_configure() 265 dev_info.max_matches); in rte_regexdev_configure() 276 dev_info.max_queue_pairs); in rte_regexdev_configure() [all …]
|
| /f-stack/dpdk/app/test/ |
| H A D | test_event_eth_tx_adapter.c | 227 struct rte_event_dev_info dev_info; in tx_adapter_create() local 245 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); in tx_adapter_create() 248 dev_info.max_event_port_dequeue_depth; in tx_adapter_create() 250 dev_info.max_event_port_enqueue_depth; in tx_adapter_create() 252 dev_info.max_num_events; in tx_adapter_create() 269 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); in tx_adapter_create() 292 struct rte_event_dev_info dev_info; in tx_adapter_create_free() local 469 struct rte_event_dev_info dev_info; in tx_adapter_service() local 507 dev_info.max_event_port_dequeue_depth; in tx_adapter_service() 509 dev_info.max_event_port_enqueue_depth; in tx_adapter_service() [all …]
|
| H A D | test_event_eth_rx_adapter.c | 43 struct rte_eth_dev_info dev_info; in port_init_common() local 50 retval = rte_eth_dev_info_get(port, &dev_info); in port_init_common() 203 struct rte_event_dev_info dev_info; in testsuite_setup() local 223 dev_info.max_event_port_dequeue_depth; in testsuite_setup() 225 dev_info.max_event_port_enqueue_depth; in testsuite_setup() 227 dev_info.max_num_events; in testsuite_setup() 262 struct rte_event_dev_info dev_info; in testsuite_setup_rx_intr() local 282 dev_info.max_event_port_dequeue_depth; in testsuite_setup_rx_intr() 284 dev_info.max_event_port_enqueue_depth; in testsuite_setup_rx_intr() 286 dev_info.max_num_events; in testsuite_setup_rx_intr() [all …]
|
| /f-stack/dpdk/examples/ethtool/lib/ |
| H A D | rte_ethtool.c | 24 struct rte_eth_dev_info dev_info; in rte_ethtool_get_drvinfo() local 44 ret = rte_eth_dev_info_get(port_id, &dev_info); in rte_ethtool_get_drvinfo() 52 strlcpy(drvinfo->driver, dev_info.driver_name, in rte_ethtool_get_drvinfo() 56 if (dev_info.device) in rte_ethtool_get_drvinfo() 57 bus = rte_bus_find_by_device(dev_info.device); in rte_ethtool_get_drvinfo() 59 pci_dev = RTE_DEV_TO_PCI(dev_info.device); in rte_ethtool_get_drvinfo() 387 struct rte_eth_dev_info dev_info; in rte_ethtool_net_set_rx_mode() local 391 ret = rte_eth_dev_info_get(port_id, &dev_info); in rte_ethtool_net_set_rx_mode() 395 num_vfs = dev_info.max_vfs; in rte_ethtool_net_set_rx_mode() 418 struct rte_eth_dev_info dev_info; in rte_ethtool_get_ringparam() local [all …]
|
| /f-stack/dpdk/drivers/event/skeleton/ |
| H A D | skeleton_eventdev.c | 83 struct rte_event_dev_info *dev_info) in skeleton_eventdev_info_get() argument 91 dev_info->min_dequeue_timeout_ns = 1; in skeleton_eventdev_info_get() 92 dev_info->max_dequeue_timeout_ns = 10000; in skeleton_eventdev_info_get() 93 dev_info->dequeue_timeout_ns = 25; in skeleton_eventdev_info_get() 94 dev_info->max_event_queues = 64; in skeleton_eventdev_info_get() 96 dev_info->max_event_queue_priority_levels = 8; in skeleton_eventdev_info_get() 97 dev_info->max_event_priority_levels = 8; in skeleton_eventdev_info_get() 98 dev_info->max_event_ports = 32; in skeleton_eventdev_info_get() 99 dev_info->max_event_port_dequeue_depth = 16; in skeleton_eventdev_info_get() 100 dev_info->max_event_port_enqueue_depth = 16; in skeleton_eventdev_info_get() [all …]
|
| /f-stack/dpdk/app/test-pmd/ |
| H A D | util.c | 276 struct rte_eth_dev_info dev_info; in add_tx_md_callback() local 283 ret = eth_dev_info_get_print_err(portid, &dev_info); in add_tx_md_callback() 287 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in add_tx_md_callback() 297 struct rte_eth_dev_info dev_info; in remove_tx_md_callback() local 304 ret = eth_dev_info_get_print_err(portid, &dev_info); in remove_tx_md_callback() 332 struct rte_eth_dev_info dev_info; in add_tx_dynf_callback() local 339 ret = eth_dev_info_get_print_err(portid, &dev_info); in add_tx_dynf_callback() 353 struct rte_eth_dev_info dev_info; in remove_tx_dynf_callback() local 360 ret = eth_dev_info_get_print_err(portid, &dev_info); in remove_tx_dynf_callback() 374 struct rte_eth_dev_info *dev_info) in eth_dev_info_get_print_err() argument [all …]
|
| /f-stack/dpdk/examples/vmdq/ |
| H A D | main.c | 164 struct rte_eth_dev_info dev_info; in port_init() local 181 retval = rte_eth_dev_info_get(port, &dev_info); in port_init() 206 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num; in port_init() 207 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; in port_init() 210 vmdq_queue_base = dev_info.vmdq_queue_base; in port_init() 211 vmdq_pool_base = dev_info.vmdq_pool_base; in port_init() 223 dev_info.flow_type_rss_offloads; in port_init() 240 rxRings = (uint16_t)dev_info.max_rx_queues; in port_init() 241 txRings = (uint16_t)dev_info.max_tx_queues; in port_init() 268 rxconf = &dev_info.default_rxconf; in port_init() [all …]
|