| /f-stack/dpdk/lib/librte_ethdev/ |
| H A D | rte_tm.c | 16 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_ops_get() 65 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_get_number_of_leaf_nodes() 92 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_node_type_get() 102 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_capabilities_get() 113 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_level_capabilities_get() 124 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_node_capabilities_get() 135 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_wred_profile_add() 145 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_wred_profile_delete() 156 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_shared_wred_context_add_update() 166 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_tm_shared_wred_context_delete() [all …]
|
| H A D | rte_mtr.c | 17 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_ops_get() 66 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_capabilities_get() 78 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_meter_profile_add() 89 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_meter_profile_delete() 102 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_create() 113 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_destroy() 124 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_meter_enable() 135 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_meter_disable() 147 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_meter_profile_update() 159 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_mtr_meter_dscp_table_update() [all …]
|
| H A D | rte_ethdev.c | 421 return &rte_eth_devices[i]; in eth_dev_allocated() 953 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_queue_start() 994 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_queue_stop() 1028 dev = &rte_eth_devices[port_id]; in rte_eth_dev_tx_queue_start() 1067 dev = &rte_eth_devices[port_id]; in rte_eth_dev_tx_queue_stop() 1300 dev = &rte_eth_devices[port_id]; in rte_eth_dev_configure() 1686 dev = &rte_eth_devices[port_id]; in rte_eth_dev_start() 1742 dev = &rte_eth_devices[port_id]; in rte_eth_dev_stop() 1767 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_link_up() 1780 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_link_down() [all …]
|
| H A D | ethdev_private.c | 14 return dev - rte_eth_devices; in eth_dev_to_id() 26 (start < &rte_eth_devices[0] || in eth_find_device() 27 start > &rte_eth_devices[RTE_MAX_ETHPORTS])) in eth_find_device() 34 edev = &rte_eth_devices[idx]; in eth_find_device()
|
| H A D | rte_flow.c | 249 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_ops_get() 278 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_validate() 302 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_create() 327 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_destroy() 349 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_flush() 374 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_query() 397 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_isolate() 1026 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_dev_dump() 1047 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_get_aged_flows() 1154 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_flow_tunnel_decap_set() [all …]
|
| H A D | rte_ethdev.h | 4837 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_rx_burst() 4896 dev = &rte_eth_devices[port_id]; in rte_eth_rx_queue_count() 4924 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_rx_descriptor_done() 4977 dev = &rte_eth_devices[port_id]; in rte_eth_rx_descriptor_status() 5034 dev = &rte_eth_devices[port_id]; in rte_eth_tx_descriptor_status() 5115 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_tx_burst() 5223 dev = &rte_eth_devices[port_id]; in rte_eth_tx_prepare()
|
| H A D | rte_ethdev_core.h | 196 extern struct rte_eth_dev rte_eth_devices[];
|
| /f-stack/dpdk/drivers/net/ixgbe/ |
| H A D | rte_pmd_ixgbe.c | 25 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_mac_addr() 59 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_ping_vf() 90 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_vlan_anti_spoof() 120 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_mac_anti_spoof() 149 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_vlan_insert() 184 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_tx_loopback() 216 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_all_queues_drop_en() 245 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_split_drop_en() 281 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_vlan_stripq() 329 dev = &rte_eth_devices[port]; in rte_pmd_ixgbe_set_vf_rxmode() [all …]
|
| /f-stack/dpdk/drivers/net/atlantic/ |
| H A D | rte_pmd_atlantic.c | 19 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_enable() 34 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_disable() 49 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_config_txsc() 64 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_config_rxsc() 80 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_select_txsa() 96 dev = &rte_eth_devices[port]; in rte_pmd_atl_macsec_select_rxsa()
|
| /f-stack/dpdk/drivers/net/bonding/ |
| H A D | rte_eth_bond_api.c | 199 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in slave_vlan_filter_set() 456 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in __eth_bond_slave_add_lock_free() 462 slave_eth_dev = &rte_eth_devices[slave_port_id]; in __eth_bond_slave_add_lock_free() 612 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in rte_eth_bond_slave_add() 635 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in __eth_bond_slave_remove_lock_free() 666 &rte_eth_devices[bonded_port_id].data->port_id); in __eth_bond_slave_remove_lock_free() 687 slave_eth_dev = &rte_eth_devices[slave_port_id]; in __eth_bond_slave_remove_lock_free() 734 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in rte_eth_bond_slave_remove() 754 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in rte_eth_bond_mode_set() 870 bonded_eth_dev = &rte_eth_devices[bonded_port_id]; in rte_eth_bond_mac_address_set() [all …]
|
| H A D | rte_eth_bond_args.c | 33 pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]); in find_port_id_by_pci_addr() 51 if (rte_eth_devices[i].data == NULL) in find_port_id_by_dev_name() 54 if (strcmp(rte_eth_devices[i].device->name, name) == 0) in find_port_id_by_dev_name()
|
| H A D | rte_eth_bond_8023ad.c | 1390 bond_dev = &rte_eth_devices[port_id]; in rte_eth_bond_8023ad_conf_get() 1406 bond_dev = &rte_eth_devices[port_id]; in rte_eth_bond_8023ad_agg_selection_set() 1428 bond_dev = &rte_eth_devices[port_id]; in rte_eth_bond_8023ad_agg_selection_get() 1477 bond_dev = &rte_eth_devices[port_id]; in rte_eth_bond_8023ad_setup() 1499 bond_dev = &rte_eth_devices[port_id]; in rte_eth_bond_8023ad_slave_info() 1530 bond_dev = &rte_eth_devices[port_id]; in bond_8023ad_ext_validate() 1689 dev = &rte_eth_devices[port]; in rte_eth_bond_8023ad_dedicated_queues_enable() 1715 dev = &rte_eth_devices[port]; in rte_eth_bond_8023ad_dedicated_queues_disable()
|
| /f-stack/dpdk/app/test/ |
| H A D | virtual_pmd.c | 259 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in virtual_ethdev_start_fn_set_success() 273 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in virtual_ethdev_configure_fn_set_success() 286 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in virtual_ethdev_rx_queue_setup_fn_set_success() 299 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in virtual_ethdev_tx_queue_setup_fn_set_success() 312 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in virtual_ethdev_link_update_fn_set_success() 335 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id]; in virtual_ethdev_rx_burst_success() 370 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; in virtual_ethdev_tx_burst_success() 400 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; in virtual_ethdev_tx_burst_fail() 428 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; in virtual_ethdev_rx_burst_fn_set_success() 441 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; in virtual_ethdev_tx_burst_fn_set_success() [all …]
|
| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | rte_pmd_bnxt.c | 53 eth_dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_tx_loopback() 144 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_vf_mac_addr() 244 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_vf_mac_anti_spoof() 303 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_vf_vlan_anti_spoof() 360 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_vf_vlan_stripq() 404 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_vf_rxmode() 504 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_set_vf_vlan_filter() 595 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_get_vf_stats() 631 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_reset_vf_stats() 665 dev = &rte_eth_devices[port]; in rte_pmd_bnxt_get_vf_rx_status() [all …]
|
| /f-stack/dpdk/drivers/net/failsafe/ |
| H A D | failsafe_eal.c | 24 !strncmp(name, rte_eth_devices[pid].device->name, len)) { in fs_ethdev_portid_get() 79 rte_eth_devices[pid].device->devargs; in fs_bus_init() 89 rte_eth_devices[pid].device->name, in fs_bus_init() 104 } else if (strncmp(rte_eth_devices[pid].device->name, in fs_bus_init()
|
| H A D | failsafe_rxtx.c | 150 sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]); in failsafe_tx_burst() 171 sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]); in failsafe_tx_burst_fast()
|
| /f-stack/dpdk/drivers/net/i40e/ |
| H A D | rte_pmd_i40e.c | 24 dev = &rte_eth_devices[port]; in rte_pmd_i40e_ping_vfs() 53 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_mac_anti_spoof() 157 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_vlan_anti_spoof() 417 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_tx_loopback() 459 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_unicast_promisc() 500 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_multicast_promisc() 546 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_mac_addr() 591 dev = &rte_eth_devices[port]; in rte_pmd_i40e_remove_vf_mac_addr() 630 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_vlan_stripq() 673 dev = &rte_eth_devices[port]; in rte_pmd_i40e_set_vf_vlan_insert() [all …]
|
| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_mac.c | 166 priv = rte_eth_devices[port_id].data->dev_private; in mlx5_mac_addr_set() 171 mlx5_ifindex(&rte_eth_devices[port_id]), in mlx5_mac_addr_set()
|
| /f-stack/dpdk/drivers/net/mlx4/ |
| H A D | mlx4.h | 151 ((struct mlx4_proc_priv *)rte_eth_devices[port_id].process_private) 198 #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
|
| H A D | mlx4_mp.c | 70 dev = &rte_eth_devices[param->port_id]; in mp_primary_handle() 122 dev = &rte_eth_devices[param->port_id]; in mp_secondary_handle()
|
| /f-stack/dpdk/drivers/net/virtio/ |
| H A D | virtio_rxtx_simple.h | 35 rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed += in virtio_rxq_rearm_vec()
|
| /f-stack/dpdk/drivers/crypto/octeontx2/ |
| H A D | otx2_cryptodev_mbox.c | 232 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; in otx2_cpt_qp_ethdev_bind() 239 if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id])) in otx2_cpt_qp_ethdev_bind()
|
| /f-stack/dpdk/drivers/net/mlx5/linux/ |
| H A D | mlx5_mp_os.c | 40 dev = &rte_eth_devices[param->port_id]; in mlx5_mp_os_primary_handle() 127 dev = &rte_eth_devices[param->port_id]; in mlx5_mp_os_secondary_handle()
|
| /f-stack/dpdk/drivers/net/memif/ |
| H A D | rte_eth_memif.c | 101 dev = &rte_eth_devices[port_id]; in memif_mp_send_region() 297 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; in eth_memif_rx() 299 rte_eth_devices[mq->in_port].process_private; in eth_memif_rx() 439 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; in eth_memif_rx_zc() 441 rte_eth_devices[mq->in_port].process_private; in eth_memif_rx_zc() 564 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; in eth_memif_tx() 566 rte_eth_devices[mq->in_port].process_private; in eth_memif_tx() 736 struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private; in eth_memif_tx_zc() 738 rte_eth_devices[mq->in_port].process_private; in eth_memif_tx_zc()
|
| /f-stack/dpdk/lib/librte_eventdev/ |
| H A D | rte_event_eth_rx_adapter.c | 1903 &rte_eth_devices[i]) : in rxa_ctrl() 1905 &rte_eth_devices[i]); in rxa_ctrl() 1986 rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; in rte_event_eth_rx_adapter_create_ext() 2093 rte_eth_devices[eth_dev_id].data->nb_rx_queues) { in rte_event_eth_rx_adapter_queue_add() 2115 &rte_eth_devices[eth_dev_id], in rte_event_eth_rx_adapter_queue_add() 2177 rte_eth_devices[eth_dev_id].data->nb_rx_queues) { in rte_event_eth_rx_adapter_queue_del() 2189 &rte_eth_devices[eth_dev_id], in rte_event_eth_rx_adapter_queue_del() 2306 &rte_eth_devices[i], in rte_event_eth_rx_adapter_stats_get() 2343 &rte_eth_devices[i]); in rte_event_eth_rx_adapter_stats_reset()
|