Lines Matching refs:dev

663 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)  in eth_dev_validate_rx_queue()  argument
667 if (rx_queue_id >= dev->data->nb_rx_queues) { in eth_dev_validate_rx_queue()
668 port_id = dev->data->port_id; in eth_dev_validate_rx_queue()
675 if (dev->data->rx_queues[rx_queue_id] == NULL) { in eth_dev_validate_rx_queue()
676 port_id = dev->data->port_id; in eth_dev_validate_rx_queue()
687 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) in eth_dev_validate_tx_queue() argument
691 if (tx_queue_id >= dev->data->nb_tx_queues) { in eth_dev_validate_tx_queue()
692 port_id = dev->data->port_id; in eth_dev_validate_tx_queue()
699 if (dev->data->tx_queues[tx_queue_id] == NULL) { in eth_dev_validate_tx_queue()
700 port_id = dev->data->port_id; in eth_dev_validate_tx_queue()
713 struct rte_eth_dev *dev; in rte_eth_dev_rx_queue_start() local
717 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_queue_start()
719 if (!dev->data->dev_started) { in rte_eth_dev_rx_queue_start()
726 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); in rte_eth_dev_rx_queue_start()
730 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); in rte_eth_dev_rx_queue_start()
732 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { in rte_eth_dev_rx_queue_start()
739 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_rx_queue_start()
746 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id)); in rte_eth_dev_rx_queue_start()
752 struct rte_eth_dev *dev; in rte_eth_dev_rx_queue_stop() local
756 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_queue_stop()
758 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); in rte_eth_dev_rx_queue_stop()
762 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); in rte_eth_dev_rx_queue_stop()
764 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { in rte_eth_dev_rx_queue_stop()
771 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_rx_queue_stop()
778 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); in rte_eth_dev_rx_queue_stop()
784 struct rte_eth_dev *dev; in rte_eth_dev_tx_queue_start() local
788 dev = &rte_eth_devices[port_id]; in rte_eth_dev_tx_queue_start()
790 if (!dev->data->dev_started) { in rte_eth_dev_tx_queue_start()
797 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); in rte_eth_dev_tx_queue_start()
801 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); in rte_eth_dev_tx_queue_start()
803 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { in rte_eth_dev_tx_queue_start()
810 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_tx_queue_start()
817 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); in rte_eth_dev_tx_queue_start()
823 struct rte_eth_dev *dev; in rte_eth_dev_tx_queue_stop() local
827 dev = &rte_eth_devices[port_id]; in rte_eth_dev_tx_queue_stop()
829 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); in rte_eth_dev_tx_queue_stop()
833 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); in rte_eth_dev_tx_queue_stop()
835 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { in rte_eth_dev_tx_queue_stop()
842 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_tx_queue_stop()
849 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); in rte_eth_dev_tx_queue_stop()
1071 struct rte_eth_dev *dev; in rte_eth_dev_configure() local
1079 dev = &rte_eth_devices[port_id]; in rte_eth_dev_configure()
1088 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); in rte_eth_dev_configure()
1090 if (dev->data->dev_started) { in rte_eth_dev_configure()
1102 dev->data->dev_configured = 0; in rte_eth_dev_configure()
1105 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); in rte_eth_dev_configure()
1111 if (dev_conf != &dev->data->dev_conf) in rte_eth_dev_configure()
1112 memcpy(&dev->data->dev_conf, dev_conf, in rte_eth_dev_configure()
1113 sizeof(dev->data->dev_conf)); in rte_eth_dev_configure()
1116 old_mtu = dev->data->mtu; in rte_eth_dev_configure()
1174 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { in rte_eth_dev_configure()
1176 dev->device->driver->name); in rte_eth_dev_configure()
1181 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { in rte_eth_dev_configure()
1183 dev->device->driver->name); in rte_eth_dev_configure()
1189 dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; in rte_eth_dev_configure()
1192 dev->data->dev_conf.rxmode.mtu); in rte_eth_dev_configure()
1196 dev->data->mtu = dev->data->dev_conf.rxmode.mtu; in rte_eth_dev_configure()
1208 max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len; in rte_eth_dev_configure()
1210 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; in rte_eth_dev_configure()
1212 dev->data->dev_conf.rxmode.max_lro_pkt_size, in rte_eth_dev_configure()
1243 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = in rte_eth_dev_configure()
1272 diag = eth_dev_rx_queue_config(dev, nb_rx_q); in rte_eth_dev_configure()
1281 diag = eth_dev_tx_queue_config(dev, nb_tx_q); in rte_eth_dev_configure()
1286 eth_dev_rx_queue_config(dev, 0); in rte_eth_dev_configure()
1291 diag = (*dev->dev_ops->dev_configure)(dev); in rte_eth_dev_configure()
1300 diag = __rte_eth_dev_profile_init(port_id, dev); in rte_eth_dev_configure()
1311 dev->data->dev_conf.rxmode.offloads, "Rx", in rte_eth_dev_configure()
1321 dev->data->dev_conf.txmode.offloads, "Tx", in rte_eth_dev_configure()
1328 dev->data->dev_configured = 1; in rte_eth_dev_configure()
1332 eth_dev_rx_queue_config(dev, 0); in rte_eth_dev_configure()
1333 eth_dev_tx_queue_config(dev, 0); in rte_eth_dev_configure()
1335 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); in rte_eth_dev_configure()
1336 if (old_mtu != dev->data->mtu) in rte_eth_dev_configure()
1337 dev->data->mtu = old_mtu; in rte_eth_dev_configure()
1344 eth_dev_mac_restore(struct rte_eth_dev *dev, in eth_dev_mac_restore() argument
1353 addr = &dev->data->mac_addrs[0]; in eth_dev_mac_restore()
1354 if (*dev->dev_ops->mac_addr_set != NULL) in eth_dev_mac_restore()
1355 (*dev->dev_ops->mac_addr_set)(dev, addr); in eth_dev_mac_restore()
1356 else if (*dev->dev_ops->mac_addr_add != NULL) in eth_dev_mac_restore()
1357 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); in eth_dev_mac_restore()
1359 if (*dev->dev_ops->mac_addr_add != NULL) { in eth_dev_mac_restore()
1361 addr = &dev->data->mac_addrs[i]; in eth_dev_mac_restore()
1368 pool_mask = dev->data->mac_pool_sel[i]; in eth_dev_mac_restore()
1372 (*dev->dev_ops->mac_addr_add)(dev, in eth_dev_mac_restore()
1382 eth_dev_config_restore(struct rte_eth_dev *dev, in eth_dev_config_restore() argument
1388 eth_dev_mac_restore(dev, dev_info); in eth_dev_config_restore()
1396 *dev->dev_ops->promiscuous_enable != NULL) { in eth_dev_config_restore()
1398 (*dev->dev_ops->promiscuous_enable)(dev)); in eth_dev_config_restore()
1406 *dev->dev_ops->promiscuous_disable != NULL) { in eth_dev_config_restore()
1408 (*dev->dev_ops->promiscuous_disable)(dev)); in eth_dev_config_restore()
1423 *dev->dev_ops->allmulticast_enable != NULL) { in eth_dev_config_restore()
1425 (*dev->dev_ops->allmulticast_enable)(dev)); in eth_dev_config_restore()
1433 *dev->dev_ops->allmulticast_disable != NULL) { in eth_dev_config_restore()
1435 (*dev->dev_ops->allmulticast_disable)(dev)); in eth_dev_config_restore()
1450 struct rte_eth_dev *dev; in rte_eth_dev_start() local
1456 dev = &rte_eth_devices[port_id]; in rte_eth_dev_start()
1458 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); in rte_eth_dev_start()
1460 if (dev->data->dev_configured == 0) { in rte_eth_dev_start()
1467 if (dev->data->dev_started != 0) { in rte_eth_dev_start()
1480 eth_dev_mac_restore(dev, &dev_info); in rte_eth_dev_start()
1482 diag = (*dev->dev_ops->dev_start)(dev); in rte_eth_dev_start()
1484 dev->data->dev_started = 1; in rte_eth_dev_start()
1488 ret = eth_dev_config_restore(dev, &dev_info, port_id); in rte_eth_dev_start()
1503 if (dev->data->dev_conf.intr_conf.lsc == 0) { in rte_eth_dev_start()
1504 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); in rte_eth_dev_start()
1505 (*dev->dev_ops->link_update)(dev, 0); in rte_eth_dev_start()
1509 eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev); in rte_eth_dev_start()
1518 struct rte_eth_dev *dev; in rte_eth_dev_stop() local
1522 dev = &rte_eth_devices[port_id]; in rte_eth_dev_stop()
1524 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); in rte_eth_dev_stop()
1526 if (dev->data->dev_started == 0) { in rte_eth_dev_stop()
1536 dev->data->dev_started = 0; in rte_eth_dev_stop()
1537 ret = (*dev->dev_ops->dev_stop)(dev); in rte_eth_dev_stop()
1546 struct rte_eth_dev *dev; in rte_eth_dev_set_link_up() local
1549 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_link_up()
1551 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); in rte_eth_dev_set_link_up()
1552 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); in rte_eth_dev_set_link_up()
1558 struct rte_eth_dev *dev; in rte_eth_dev_set_link_down() local
1561 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_link_down()
1563 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); in rte_eth_dev_set_link_down()
1564 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); in rte_eth_dev_set_link_down()
1570 struct rte_eth_dev *dev; in rte_eth_dev_close() local
1575 dev = &rte_eth_devices[port_id]; in rte_eth_dev_close()
1577 if (dev->data->dev_started) { in rte_eth_dev_close()
1583 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); in rte_eth_dev_close()
1584 *lasterr = (*dev->dev_ops->dev_close)(dev); in rte_eth_dev_close()
1589 *lasterr = rte_eth_dev_release_port(dev); in rte_eth_dev_close()
1597 struct rte_eth_dev *dev; in rte_eth_dev_reset() local
1601 dev = &rte_eth_devices[port_id]; in rte_eth_dev_reset()
1603 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); in rte_eth_dev_reset()
1611 ret = dev->dev_ops->dev_reset(dev); in rte_eth_dev_reset()
1619 struct rte_eth_dev *dev; in rte_eth_dev_is_removed() local
1623 dev = &rte_eth_devices[port_id]; in rte_eth_dev_is_removed()
1625 if (dev->state == RTE_ETH_DEV_REMOVED) in rte_eth_dev_is_removed()
1628 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); in rte_eth_dev_is_removed()
1630 ret = dev->dev_ops->is_removed(dev); in rte_eth_dev_is_removed()
1633 dev->state = RTE_ETH_DEV_REMOVED; in rte_eth_dev_is_removed()
1717 struct rte_eth_dev *dev; in rte_eth_rx_queue_setup() local
1722 dev = &rte_eth_devices[port_id]; in rte_eth_rx_queue_setup()
1724 if (rx_queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_queue_setup()
1729 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); in rte_eth_rx_queue_setup()
1813 if (dev->data->dev_started && in rte_eth_rx_queue_setup()
1818 if (dev->data->dev_started && in rte_eth_rx_queue_setup()
1819 (dev->data->rx_queue_state[rx_queue_id] != in rte_eth_rx_queue_setup()
1823 eth_dev_rxq_release(dev, rx_queue_id); in rte_eth_rx_queue_setup()
1838 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; in rte_eth_rx_queue_setup()
1879 max_rx_pktlen = dev->data->mtu + overhead_len; in rte_eth_rx_queue_setup()
1880 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) in rte_eth_rx_queue_setup()
1881 dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen; in rte_eth_rx_queue_setup()
1883 dev->data->dev_conf.rxmode.max_lro_pkt_size, in rte_eth_rx_queue_setup()
1890 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, in rte_eth_rx_queue_setup()
1893 if (!dev->data->min_rx_buf_size || in rte_eth_rx_queue_setup()
1894 dev->data->min_rx_buf_size > mbp_buf_size) in rte_eth_rx_queue_setup()
1895 dev->data->min_rx_buf_size = mbp_buf_size; in rte_eth_rx_queue_setup()
1909 struct rte_eth_dev *dev; in rte_eth_rx_hairpin_queue_setup() local
1915 dev = &rte_eth_devices[port_id]; in rte_eth_rx_hairpin_queue_setup()
1917 if (rx_queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_hairpin_queue_setup()
1932 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, in rte_eth_rx_hairpin_queue_setup()
1955 for (i = 0, count = 0; i < dev->data->nb_rx_queues && in rte_eth_rx_hairpin_queue_setup()
1957 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) in rte_eth_rx_hairpin_queue_setup()
1965 if (dev->data->dev_started) in rte_eth_rx_hairpin_queue_setup()
1967 eth_dev_rxq_release(dev, rx_queue_id); in rte_eth_rx_hairpin_queue_setup()
1968 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, in rte_eth_rx_hairpin_queue_setup()
1971 dev->data->rx_queue_state[rx_queue_id] = in rte_eth_rx_hairpin_queue_setup()
1981 struct rte_eth_dev *dev; in rte_eth_tx_queue_setup() local
1987 dev = &rte_eth_devices[port_id]; in rte_eth_tx_queue_setup()
1989 if (tx_queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_queue_setup()
1994 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); in rte_eth_tx_queue_setup()
2018 if (dev->data->dev_started && in rte_eth_tx_queue_setup()
2023 if (dev->data->dev_started && in rte_eth_tx_queue_setup()
2024 (dev->data->tx_queue_state[tx_queue_id] != in rte_eth_tx_queue_setup()
2028 eth_dev_txq_release(dev, tx_queue_id); in rte_eth_tx_queue_setup()
2043 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; in rte_eth_tx_queue_setup()
2065 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, in rte_eth_tx_queue_setup()
2074 struct rte_eth_dev *dev; in rte_eth_tx_hairpin_queue_setup() local
2081 dev = &rte_eth_devices[port_id]; in rte_eth_tx_hairpin_queue_setup()
2083 if (tx_queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_hairpin_queue_setup()
2098 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, in rte_eth_tx_hairpin_queue_setup()
2121 for (i = 0, count = 0; i < dev->data->nb_tx_queues && in rte_eth_tx_hairpin_queue_setup()
2123 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) in rte_eth_tx_hairpin_queue_setup()
2131 if (dev->data->dev_started) in rte_eth_tx_hairpin_queue_setup()
2133 eth_dev_txq_release(dev, tx_queue_id); in rte_eth_tx_hairpin_queue_setup()
2134 ret = (*dev->dev_ops->tx_hairpin_queue_setup) in rte_eth_tx_hairpin_queue_setup()
2135 (dev, tx_queue_id, nb_tx_desc, conf); in rte_eth_tx_hairpin_queue_setup()
2137 dev->data->tx_queue_state[tx_queue_id] = in rte_eth_tx_hairpin_queue_setup()
2145 struct rte_eth_dev *dev; in rte_eth_hairpin_bind() local
2149 dev = &rte_eth_devices[tx_port]; in rte_eth_hairpin_bind()
2151 if (dev->data->dev_started == 0) { in rte_eth_hairpin_bind()
2156 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); in rte_eth_hairpin_bind()
2157 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); in rte_eth_hairpin_bind()
2169 struct rte_eth_dev *dev; in rte_eth_hairpin_unbind() local
2173 dev = &rte_eth_devices[tx_port]; in rte_eth_hairpin_unbind()
2175 if (dev->data->dev_started == 0) { in rte_eth_hairpin_unbind()
2180 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); in rte_eth_hairpin_unbind()
2181 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); in rte_eth_hairpin_unbind()
2194 struct rte_eth_dev *dev; in rte_eth_hairpin_get_peer_ports() local
2198 dev = &rte_eth_devices[port_id]; in rte_eth_hairpin_get_peer_ports()
2214 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, in rte_eth_hairpin_get_peer_ports()
2217 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, in rte_eth_hairpin_get_peer_ports()
2280 struct rte_eth_dev *dev; in rte_eth_tx_done_cleanup() local
2284 dev = &rte_eth_devices[port_id]; in rte_eth_tx_done_cleanup()
2286 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); in rte_eth_tx_done_cleanup()
2289 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], in rte_eth_tx_done_cleanup()
2297 struct rte_eth_dev *dev; in rte_eth_promiscuous_enable() local
2301 dev = &rte_eth_devices[port_id]; in rte_eth_promiscuous_enable()
2303 if (dev->data->promiscuous == 1) in rte_eth_promiscuous_enable()
2306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); in rte_eth_promiscuous_enable()
2308 diag = (*dev->dev_ops->promiscuous_enable)(dev); in rte_eth_promiscuous_enable()
2309 dev->data->promiscuous = (diag == 0) ? 1 : 0; in rte_eth_promiscuous_enable()
2317 struct rte_eth_dev *dev; in rte_eth_promiscuous_disable() local
2321 dev = &rte_eth_devices[port_id]; in rte_eth_promiscuous_disable()
2323 if (dev->data->promiscuous == 0) in rte_eth_promiscuous_disable()
2326 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); in rte_eth_promiscuous_disable()
2328 dev->data->promiscuous = 0; in rte_eth_promiscuous_disable()
2329 diag = (*dev->dev_ops->promiscuous_disable)(dev); in rte_eth_promiscuous_disable()
2331 dev->data->promiscuous = 1; in rte_eth_promiscuous_disable()
2339 struct rte_eth_dev *dev; in rte_eth_promiscuous_get() local
2342 dev = &rte_eth_devices[port_id]; in rte_eth_promiscuous_get()
2344 return dev->data->promiscuous; in rte_eth_promiscuous_get()
2350 struct rte_eth_dev *dev; in rte_eth_allmulticast_enable() local
2354 dev = &rte_eth_devices[port_id]; in rte_eth_allmulticast_enable()
2356 if (dev->data->all_multicast == 1) in rte_eth_allmulticast_enable()
2359 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); in rte_eth_allmulticast_enable()
2360 diag = (*dev->dev_ops->allmulticast_enable)(dev); in rte_eth_allmulticast_enable()
2361 dev->data->all_multicast = (diag == 0) ? 1 : 0; in rte_eth_allmulticast_enable()
2369 struct rte_eth_dev *dev; in rte_eth_allmulticast_disable() local
2373 dev = &rte_eth_devices[port_id]; in rte_eth_allmulticast_disable()
2375 if (dev->data->all_multicast == 0) in rte_eth_allmulticast_disable()
2378 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); in rte_eth_allmulticast_disable()
2379 dev->data->all_multicast = 0; in rte_eth_allmulticast_disable()
2380 diag = (*dev->dev_ops->allmulticast_disable)(dev); in rte_eth_allmulticast_disable()
2382 dev->data->all_multicast = 1; in rte_eth_allmulticast_disable()
2390 struct rte_eth_dev *dev; in rte_eth_allmulticast_get() local
2393 dev = &rte_eth_devices[port_id]; in rte_eth_allmulticast_get()
2395 return dev->data->all_multicast; in rte_eth_allmulticast_get()
2401 struct rte_eth_dev *dev; in rte_eth_link_get() local
2404 dev = &rte_eth_devices[port_id]; in rte_eth_link_get()
2412 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) in rte_eth_link_get()
2413 rte_eth_linkstatus_get(dev, eth_link); in rte_eth_link_get()
2415 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); in rte_eth_link_get()
2416 (*dev->dev_ops->link_update)(dev, 1); in rte_eth_link_get()
2417 *eth_link = dev->data->dev_link; in rte_eth_link_get()
2426 struct rte_eth_dev *dev; in rte_eth_link_get_nowait() local
2429 dev = &rte_eth_devices[port_id]; in rte_eth_link_get_nowait()
2437 if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) in rte_eth_link_get_nowait()
2438 rte_eth_linkstatus_get(dev, eth_link); in rte_eth_link_get_nowait()
2440 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); in rte_eth_link_get_nowait()
2441 (*dev->dev_ops->link_update)(dev, 0); in rte_eth_link_get_nowait()
2442 *eth_link = dev->data->dev_link; in rte_eth_link_get_nowait()
2504 struct rte_eth_dev *dev; in rte_eth_stats_get() local
2507 dev = &rte_eth_devices[port_id]; in rte_eth_stats_get()
2517 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); in rte_eth_stats_get()
2518 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; in rte_eth_stats_get()
2519 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); in rte_eth_stats_get()
2525 struct rte_eth_dev *dev; in rte_eth_stats_reset() local
2529 dev = &rte_eth_devices[port_id]; in rte_eth_stats_reset()
2531 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); in rte_eth_stats_reset()
2532 ret = (*dev->dev_ops->stats_reset)(dev); in rte_eth_stats_reset()
2536 dev->data->rx_mbuf_alloc_failed = 0; in rte_eth_stats_reset()
2542 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) in eth_dev_get_xstats_basic_count() argument
2547 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_dev_get_xstats_basic_count()
2548 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_dev_get_xstats_basic_count()
2551 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { in eth_dev_get_xstats_basic_count()
2562 struct rte_eth_dev *dev; in eth_dev_get_xstats_count() local
2566 dev = &rte_eth_devices[port_id]; in eth_dev_get_xstats_count()
2567 if (dev->dev_ops->xstats_get_names != NULL) { in eth_dev_get_xstats_count()
2568 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); in eth_dev_get_xstats_count()
2575 count += eth_dev_get_xstats_basic_count(dev); in eth_dev_get_xstats_count()
2630 eth_basic_stats_get_names(struct rte_eth_dev *dev, in eth_basic_stats_get_names() argument
2644 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) in eth_basic_stats_get_names()
2647 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get_names()
2658 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get_names()
2682 struct rte_eth_dev *dev; in rte_eth_xstats_get_names_by_id() local
2687 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get_names_by_id()
2689 basic_count = eth_dev_get_xstats_basic_count(dev); in rte_eth_xstats_get_names_by_id()
2706 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { in rte_eth_xstats_get_names_by_id()
2723 return (*dev->dev_ops->xstats_get_names_by_id)(dev, in rte_eth_xstats_get_names_by_id()
2756 eth_basic_stats_get_names(dev, xstats_names_copy); in rte_eth_xstats_get_names_by_id()
2785 struct rte_eth_dev *dev; in rte_eth_xstats_get_names() local
2796 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get_names()
2798 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); in rte_eth_xstats_get_names()
2800 if (dev->dev_ops->xstats_get_names != NULL) { in rte_eth_xstats_get_names()
2804 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( in rte_eth_xstats_get_names()
2805 dev, in rte_eth_xstats_get_names()
2820 struct rte_eth_dev *dev; in eth_basic_stats_get() local
2831 dev = &rte_eth_devices[port_id]; in eth_basic_stats_get()
2833 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get()
2834 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get()
2844 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) in eth_basic_stats_get()
2881 struct rte_eth_dev *dev; in rte_eth_xstats_get_by_id() local
2886 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get_by_id()
2893 basic_count = eth_dev_get_xstats_basic_count(dev); in rte_eth_xstats_get_by_id()
2906 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { in rte_eth_xstats_get_by_id()
2907 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); in rte_eth_xstats_get_by_id()
2924 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, in rte_eth_xstats_get_by_id()
2969 struct rte_eth_dev *dev; in rte_eth_xstats_get() local
2976 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get()
2978 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in rte_eth_xstats_get()
2979 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in rte_eth_xstats_get()
2983 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) in rte_eth_xstats_get()
2987 if (dev->dev_ops->xstats_get != NULL) { in rte_eth_xstats_get()
2991 xcount = (*dev->dev_ops->xstats_get)(dev, in rte_eth_xstats_get()
3021 struct rte_eth_dev *dev; in rte_eth_xstats_reset() local
3024 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_reset()
3027 if (dev->dev_ops->xstats_reset != NULL) in rte_eth_xstats_reset()
3028 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); in rte_eth_xstats_reset()
3038 struct rte_eth_dev *dev; in eth_dev_set_queue_stats_mapping() local
3041 dev = &rte_eth_devices[port_id]; in eth_dev_set_queue_stats_mapping()
3043 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) in eth_dev_set_queue_stats_mapping()
3046 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) in eth_dev_set_queue_stats_mapping()
3052 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); in eth_dev_set_queue_stats_mapping()
3053 return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx); in eth_dev_set_queue_stats_mapping()
3077 struct rte_eth_dev *dev; in rte_eth_dev_fw_version_get() local
3080 dev = &rte_eth_devices[port_id]; in rte_eth_dev_fw_version_get()
3089 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); in rte_eth_dev_fw_version_get()
3090 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, in rte_eth_dev_fw_version_get()
3097 struct rte_eth_dev *dev; in rte_eth_dev_info_get() local
3108 dev = &rte_eth_devices[port_id]; in rte_eth_dev_info_get()
3125 dev_info->device = dev->device; in rte_eth_dev_info_get()
3130 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); in rte_eth_dev_info_get()
3131 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); in rte_eth_dev_info_get()
3144 dev_info->driver_name = dev->device->driver->name; in rte_eth_dev_info_get()
3145 dev_info->nb_rx_queues = dev->data->nb_rx_queues; in rte_eth_dev_info_get()
3146 dev_info->nb_tx_queues = dev->data->nb_tx_queues; in rte_eth_dev_info_get()
3148 dev_info->dev_flags = &dev->data->dev_flags; in rte_eth_dev_info_get()
3156 struct rte_eth_dev *dev; in rte_eth_dev_conf_get() local
3159 dev = &rte_eth_devices[port_id]; in rte_eth_dev_conf_get()
3168 memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf)); in rte_eth_dev_conf_get()
3178 struct rte_eth_dev *dev; in rte_eth_dev_get_supported_ptypes() local
3182 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_supported_ptypes()
3191 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); in rte_eth_dev_get_supported_ptypes()
3192 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); in rte_eth_dev_get_supported_ptypes()
3221 struct rte_eth_dev *dev; in rte_eth_dev_set_ptypes() local
3227 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_ptypes()
3236 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || in rte_eth_dev_set_ptypes()
3237 *dev->dev_ops->dev_ptypes_set == NULL) { in rte_eth_dev_set_ptypes()
3243 ret = (*dev->dev_ops->dev_ptypes_set)(dev, in rte_eth_dev_set_ptypes()
3263 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); in rte_eth_dev_set_ptypes()
3288 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); in rte_eth_dev_set_ptypes()
3302 struct rte_eth_dev *dev; in rte_eth_macaddrs_get() local
3315 dev = &rte_eth_devices[port_id]; in rte_eth_macaddrs_get()
3317 memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0])); in rte_eth_macaddrs_get()
3325 struct rte_eth_dev *dev; in rte_eth_macaddr_get() local
3328 dev = &rte_eth_devices[port_id]; in rte_eth_macaddr_get()
3337 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); in rte_eth_macaddr_get()
3345 struct rte_eth_dev *dev; in rte_eth_dev_get_mtu() local
3348 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_mtu()
3356 *mtu = dev->data->mtu; in rte_eth_dev_get_mtu()
3365 struct rte_eth_dev *dev; in rte_eth_dev_set_mtu() local
3368 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_mtu()
3369 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); in rte_eth_dev_set_mtu()
3377 if (*dev->dev_ops->dev_infos_get != NULL) { in rte_eth_dev_set_mtu()
3387 if (dev->data->dev_configured == 0) { in rte_eth_dev_set_mtu()
3394 ret = (*dev->dev_ops->mtu_set)(dev, mtu); in rte_eth_dev_set_mtu()
3396 dev->data->mtu = mtu; in rte_eth_dev_set_mtu()
3404 struct rte_eth_dev *dev; in rte_eth_dev_vlan_filter() local
3408 dev = &rte_eth_devices[port_id]; in rte_eth_dev_vlan_filter()
3410 if (!(dev->data->dev_conf.rxmode.offloads & in rte_eth_dev_vlan_filter()
3422 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); in rte_eth_dev_vlan_filter()
3424 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); in rte_eth_dev_vlan_filter()
3430 vfc = &dev->data->vlan_filter_conf; in rte_eth_dev_vlan_filter()
3447 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_strip_on_queue() local
3450 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_strip_on_queue()
3452 if (rx_queue_id >= dev->data->nb_rx_queues) { in rte_eth_dev_set_vlan_strip_on_queue()
3457 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); in rte_eth_dev_set_vlan_strip_on_queue()
3458 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); in rte_eth_dev_set_vlan_strip_on_queue()
3468 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_ether_type() local
3471 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_ether_type()
3473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); in rte_eth_dev_set_vlan_ether_type()
3474 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, in rte_eth_dev_set_vlan_ether_type()
3482 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_offload() local
3491 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_offload()
3494 orig_offloads = dev->data->dev_conf.rxmode.offloads; in rte_eth_dev_set_vlan_offload()
3558 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); in rte_eth_dev_set_vlan_offload()
3559 dev->data->dev_conf.rxmode.offloads = dev_offloads; in rte_eth_dev_set_vlan_offload()
3560 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); in rte_eth_dev_set_vlan_offload()
3563 dev->data->dev_conf.rxmode.offloads = orig_offloads; in rte_eth_dev_set_vlan_offload()
3572 struct rte_eth_dev *dev; in rte_eth_dev_get_vlan_offload() local
3577 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_vlan_offload()
3578 dev_offloads = &dev->data->dev_conf.rxmode.offloads; in rte_eth_dev_get_vlan_offload()
3598 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_pvid() local
3601 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_pvid()
3603 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); in rte_eth_dev_set_vlan_pvid()
3604 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); in rte_eth_dev_set_vlan_pvid()
3610 struct rte_eth_dev *dev; in rte_eth_dev_flow_ctrl_get() local
3613 dev = &rte_eth_devices[port_id]; in rte_eth_dev_flow_ctrl_get()
3622 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); in rte_eth_dev_flow_ctrl_get()
3624 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); in rte_eth_dev_flow_ctrl_get()
3630 struct rte_eth_dev *dev; in rte_eth_dev_flow_ctrl_set() local
3633 dev = &rte_eth_devices[port_id]; in rte_eth_dev_flow_ctrl_set()
3647 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); in rte_eth_dev_flow_ctrl_set()
3648 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); in rte_eth_dev_flow_ctrl_set()
3655 struct rte_eth_dev *dev; in rte_eth_dev_priority_flow_ctrl_set() local
3658 dev = &rte_eth_devices[port_id]; in rte_eth_dev_priority_flow_ctrl_set()
3673 if (*dev->dev_ops->priority_flow_ctrl_set) in rte_eth_dev_priority_flow_ctrl_set()
3674 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) in rte_eth_dev_priority_flow_ctrl_set()
3675 (dev, pfc_conf)); in rte_eth_dev_priority_flow_ctrl_set()
3733 struct rte_eth_dev *dev; in rte_eth_dev_priority_flow_ctrl_queue_info_get() local
3736 dev = &rte_eth_devices[port_id]; in rte_eth_dev_priority_flow_ctrl_queue_info_get()
3744 if (*dev->dev_ops->priority_flow_ctrl_queue_info_get) in rte_eth_dev_priority_flow_ctrl_queue_info_get()
3745 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get) in rte_eth_dev_priority_flow_ctrl_queue_info_get()
3746 (dev, pfc_queue_info)); in rte_eth_dev_priority_flow_ctrl_queue_info_get()
3756 struct rte_eth_dev *dev; in rte_eth_dev_priority_flow_ctrl_queue_configure() local
3760 dev = &rte_eth_devices[port_id]; in rte_eth_dev_priority_flow_ctrl_queue_configure()
3815 if (*dev->dev_ops->priority_flow_ctrl_queue_config) in rte_eth_dev_priority_flow_ctrl_queue_configure()
3817 (*dev->dev_ops->priority_flow_ctrl_queue_config)( in rte_eth_dev_priority_flow_ctrl_queue_configure()
3818 dev, pfc_queue_conf)); in rte_eth_dev_priority_flow_ctrl_queue_configure()
3871 struct rte_eth_dev *dev; in rte_eth_dev_rss_reta_update() local
3875 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_reta_update()
3898 dev->data->nb_rx_queues); in rte_eth_dev_rss_reta_update()
3902 mq_mode = dev->data->dev_conf.rxmode.mq_mode; in rte_eth_dev_rss_reta_update()
3908 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); in rte_eth_dev_rss_reta_update()
3909 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, in rte_eth_dev_rss_reta_update()
3918 struct rte_eth_dev *dev; in rte_eth_dev_rss_reta_query() local
3922 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_reta_query()
3936 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); in rte_eth_dev_rss_reta_query()
3937 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, in rte_eth_dev_rss_reta_query()
3945 struct rte_eth_dev *dev; in rte_eth_dev_rss_hash_update() local
3951 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_hash_update()
3974 mq_mode = dev->data->dev_conf.rxmode.mq_mode; in rte_eth_dev_rss_hash_update()
3980 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); in rte_eth_dev_rss_hash_update()
3981 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, in rte_eth_dev_rss_hash_update()
3989 struct rte_eth_dev *dev; in rte_eth_dev_rss_hash_conf_get() local
3992 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_hash_conf_get()
4001 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); in rte_eth_dev_rss_hash_conf_get()
4002 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, in rte_eth_dev_rss_hash_conf_get()
4010 struct rte_eth_dev *dev; in rte_eth_dev_udp_tunnel_port_add() local
4013 dev = &rte_eth_devices[port_id]; in rte_eth_dev_udp_tunnel_port_add()
4027 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); in rte_eth_dev_udp_tunnel_port_add()
4028 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, in rte_eth_dev_udp_tunnel_port_add()
4036 struct rte_eth_dev *dev; in rte_eth_dev_udp_tunnel_port_delete() local
4039 dev = &rte_eth_devices[port_id]; in rte_eth_dev_udp_tunnel_port_delete()
4053 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); in rte_eth_dev_udp_tunnel_port_delete()
4054 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, in rte_eth_dev_udp_tunnel_port_delete()
4061 struct rte_eth_dev *dev; in rte_eth_led_on() local
4064 dev = &rte_eth_devices[port_id]; in rte_eth_led_on()
4066 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); in rte_eth_led_on()
4067 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); in rte_eth_led_on()
4073 struct rte_eth_dev *dev; in rte_eth_led_off() local
4076 dev = &rte_eth_devices[port_id]; in rte_eth_led_off()
4078 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); in rte_eth_led_off()
4079 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); in rte_eth_led_off()
4087 struct rte_eth_dev *dev; in rte_eth_fec_get_capability() local
4091 dev = &rte_eth_devices[port_id]; in rte_eth_fec_get_capability()
4100 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); in rte_eth_fec_get_capability()
4101 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); in rte_eth_fec_get_capability()
4109 struct rte_eth_dev *dev; in rte_eth_fec_get() local
4112 dev = &rte_eth_devices[port_id]; in rte_eth_fec_get()
4121 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); in rte_eth_fec_get()
4122 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); in rte_eth_fec_get()
4128 struct rte_eth_dev *dev; in rte_eth_fec_set() local
4131 dev = &rte_eth_devices[port_id]; in rte_eth_fec_set()
4133 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); in rte_eth_fec_set()
4134 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); in rte_eth_fec_set()
4145 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in eth_dev_get_mac_addr_index() local
4154 if (memcmp(addr, &dev->data->mac_addrs[i], in eth_dev_get_mac_addr_index()
4167 struct rte_eth_dev *dev; in rte_eth_dev_mac_addr_add() local
4173 dev = &rte_eth_devices[port_id]; in rte_eth_dev_mac_addr_add()
4182 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); in rte_eth_dev_mac_addr_add()
4203 pool_mask = dev->data->mac_pool_sel[index]; in rte_eth_dev_mac_addr_add()
4211 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); in rte_eth_dev_mac_addr_add()
4215 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); in rte_eth_dev_mac_addr_add()
4218 dev->data->mac_pool_sel[index] |= RTE_BIT64(pool); in rte_eth_dev_mac_addr_add()
4227 struct rte_eth_dev *dev; in rte_eth_dev_mac_addr_remove() local
4231 dev = &rte_eth_devices[port_id]; in rte_eth_dev_mac_addr_remove()
4240 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); in rte_eth_dev_mac_addr_remove()
4252 (*dev->dev_ops->mac_addr_remove)(dev, index); in rte_eth_dev_mac_addr_remove()
4255 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); in rte_eth_dev_mac_addr_remove()
4258 dev->data->mac_pool_sel[index] = 0; in rte_eth_dev_mac_addr_remove()
4266 struct rte_eth_dev *dev; in rte_eth_dev_default_mac_addr_set() local
4270 dev = &rte_eth_devices[port_id]; in rte_eth_dev_default_mac_addr_set()
4282 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); in rte_eth_dev_default_mac_addr_set()
4284 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); in rte_eth_dev_default_mac_addr_set()
4289 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); in rte_eth_dev_default_mac_addr_set()
4304 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in eth_dev_get_hash_mac_addr_index() local
4312 if (!dev->data->hash_mac_addrs) in eth_dev_get_hash_mac_addr_index()
4316 if (memcmp(addr, &dev->data->hash_mac_addrs[i], in eth_dev_get_hash_mac_addr_index()
4329 struct rte_eth_dev *dev; in rte_eth_dev_uc_hash_table_set() local
4332 dev = &rte_eth_devices[port_id]; in rte_eth_dev_uc_hash_table_set()
4368 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); in rte_eth_dev_uc_hash_table_set()
4369 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); in rte_eth_dev_uc_hash_table_set()
4374 &dev->data->hash_mac_addrs[index]); in rte_eth_dev_uc_hash_table_set()
4377 &dev->data->hash_mac_addrs[index]); in rte_eth_dev_uc_hash_table_set()
4386 struct rte_eth_dev *dev; in rte_eth_dev_uc_all_hash_table_set() local
4389 dev = &rte_eth_devices[port_id]; in rte_eth_dev_uc_all_hash_table_set()
4391 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); in rte_eth_dev_uc_all_hash_table_set()
4392 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, in rte_eth_dev_uc_all_hash_table_set()
4399 struct rte_eth_dev *dev; in rte_eth_set_queue_rate_limit() local
4405 dev = &rte_eth_devices[port_id]; in rte_eth_set_queue_rate_limit()
4411 link = dev->data->dev_link; in rte_eth_set_queue_rate_limit()
4427 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); in rte_eth_set_queue_rate_limit()
4428 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, in rte_eth_set_queue_rate_limit()
4453 struct rte_eth_dev *dev; in rte_eth_dev_callback_register() local
4480 dev = &rte_eth_devices[next_port]; in rte_eth_dev_callback_register()
4482 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { in rte_eth_dev_callback_register()
4498 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), in rte_eth_dev_callback_register()
4520 struct rte_eth_dev *dev; in rte_eth_dev_callback_unregister() local
4547 dev = &rte_eth_devices[next_port]; in rte_eth_dev_callback_unregister()
4549 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; in rte_eth_dev_callback_unregister()
4563 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); in rte_eth_dev_callback_unregister()
4579 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_ctl() local
4585 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_ctl()
4587 if (!dev->intr_handle) { in rte_eth_dev_rx_intr_ctl()
4592 intr_handle = dev->intr_handle; in rte_eth_dev_rx_intr_ctl()
4598 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { in rte_eth_dev_rx_intr_ctl()
4615 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_ctl_q_get_fd() local
4621 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_ctl_q_get_fd()
4623 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_dev_rx_intr_ctl_q_get_fd()
4628 if (!dev->intr_handle) { in rte_eth_dev_rx_intr_ctl_q_get_fd()
4633 intr_handle = dev->intr_handle; in rte_eth_dev_rx_intr_ctl_q_get_fd()
4652 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_ctl_q() local
4657 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_ctl_q()
4659 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_dev_rx_intr_ctl_q()
4664 if (!dev->intr_handle) { in rte_eth_dev_rx_intr_ctl_q()
4669 intr_handle = dev->intr_handle; in rte_eth_dev_rx_intr_ctl_q()
4691 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_enable() local
4695 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_enable()
4697 ret = eth_dev_validate_rx_queue(dev, queue_id); in rte_eth_dev_rx_intr_enable()
4701 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); in rte_eth_dev_rx_intr_enable()
4702 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id)); in rte_eth_dev_rx_intr_enable()
4709 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_disable() local
4713 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_disable()
4715 ret = eth_dev_validate_rx_queue(dev, queue_id); in rte_eth_dev_rx_intr_disable()
4719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); in rte_eth_dev_rx_intr_disable()
4720 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id)); in rte_eth_dev_rx_intr_disable()
4732 struct rte_eth_dev *dev; in rte_eth_add_rx_callback() local
4740 dev = &rte_eth_devices[port_id]; in rte_eth_add_rx_callback()
4741 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { in rte_eth_add_rx_callback()
4828 struct rte_eth_dev *dev; in rte_eth_add_tx_callback() local
4837 dev = &rte_eth_devices[port_id]; in rte_eth_add_tx_callback()
4838 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { in rte_eth_add_tx_callback()
4892 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_remove_rx_callback() local
4898 prev_cb = &dev->post_rx_burst_cbs[queue_id]; in rte_eth_remove_rx_callback()
4926 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_remove_tx_callback() local
4932 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; in rte_eth_remove_tx_callback()
4951 struct rte_eth_dev *dev; in rte_eth_rx_queue_info_get() local
4954 dev = &rte_eth_devices[port_id]; in rte_eth_rx_queue_info_get()
4956 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_queue_info_get()
4967 if (dev->data->rx_queues == NULL || in rte_eth_rx_queue_info_get()
4968 dev->data->rx_queues[queue_id] == NULL) { in rte_eth_rx_queue_info_get()
4976 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { in rte_eth_rx_queue_info_get()
4983 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); in rte_eth_rx_queue_info_get()
4986 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); in rte_eth_rx_queue_info_get()
4987 qinfo->queue_state = dev->data->rx_queue_state[queue_id]; in rte_eth_rx_queue_info_get()
4996 struct rte_eth_dev *dev; in rte_eth_tx_queue_info_get() local
4999 dev = &rte_eth_devices[port_id]; in rte_eth_tx_queue_info_get()
5001 if (queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_queue_info_get()
5012 if (dev->data->tx_queues == NULL || in rte_eth_tx_queue_info_get()
5013 dev->data->tx_queues[queue_id] == NULL) { in rte_eth_tx_queue_info_get()
5021 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { in rte_eth_tx_queue_info_get()
5028 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); in rte_eth_tx_queue_info_get()
5031 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); in rte_eth_tx_queue_info_get()
5032 qinfo->queue_state = dev->data->tx_queue_state[queue_id]; in rte_eth_tx_queue_info_get()
5041 struct rte_eth_dev *dev; in rte_eth_rx_burst_mode_get() local
5044 dev = &rte_eth_devices[port_id]; in rte_eth_rx_burst_mode_get()
5046 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_burst_mode_get()
5058 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); in rte_eth_rx_burst_mode_get()
5061 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); in rte_eth_rx_burst_mode_get()
5068 struct rte_eth_dev *dev; in rte_eth_tx_burst_mode_get() local
5071 dev = &rte_eth_devices[port_id]; in rte_eth_tx_burst_mode_get()
5073 if (queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_burst_mode_get()
5085 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); in rte_eth_tx_burst_mode_get()
5088 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); in rte_eth_tx_burst_mode_get()
5095 struct rte_eth_dev *dev; in rte_eth_get_monitor_addr() local
5098 dev = &rte_eth_devices[port_id]; in rte_eth_get_monitor_addr()
5100 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_get_monitor_addr()
5112 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP); in rte_eth_get_monitor_addr()
5114 dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc)); in rte_eth_get_monitor_addr()
5122 struct rte_eth_dev *dev; in rte_eth_dev_set_mc_addr_list() local
5125 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_mc_addr_list()
5127 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); in rte_eth_dev_set_mc_addr_list()
5128 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, in rte_eth_dev_set_mc_addr_list()
5135 struct rte_eth_dev *dev; in rte_eth_timesync_enable() local
5138 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_enable()
5140 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); in rte_eth_timesync_enable()
5141 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); in rte_eth_timesync_enable()
5147 struct rte_eth_dev *dev; in rte_eth_timesync_disable() local
5150 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_disable()
5152 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); in rte_eth_timesync_disable()
5153 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); in rte_eth_timesync_disable()
5160 struct rte_eth_dev *dev; in rte_eth_timesync_read_rx_timestamp() local
5163 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_read_rx_timestamp()
5172 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); in rte_eth_timesync_read_rx_timestamp()
5173 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) in rte_eth_timesync_read_rx_timestamp()
5174 (dev, timestamp, flags)); in rte_eth_timesync_read_rx_timestamp()
5181 struct rte_eth_dev *dev; in rte_eth_timesync_read_tx_timestamp() local
5184 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_read_tx_timestamp()
5193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); in rte_eth_timesync_read_tx_timestamp()
5194 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) in rte_eth_timesync_read_tx_timestamp()
5195 (dev, timestamp)); in rte_eth_timesync_read_tx_timestamp()
5201 struct rte_eth_dev *dev; in rte_eth_timesync_adjust_time() local
5204 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_adjust_time()
5206 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); in rte_eth_timesync_adjust_time()
5207 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta)); in rte_eth_timesync_adjust_time()
5213 struct rte_eth_dev *dev; in rte_eth_timesync_read_time() local
5216 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_read_time()
5225 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); in rte_eth_timesync_read_time()
5226 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, in rte_eth_timesync_read_time()
5233 struct rte_eth_dev *dev; in rte_eth_timesync_write_time() local
5236 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_write_time()
5245 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); in rte_eth_timesync_write_time()
5246 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, in rte_eth_timesync_write_time()
5253 struct rte_eth_dev *dev; in rte_eth_read_clock() local
5256 dev = &rte_eth_devices[port_id]; in rte_eth_read_clock()
5264 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); in rte_eth_read_clock()
5265 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); in rte_eth_read_clock()
5271 struct rte_eth_dev *dev; in rte_eth_dev_get_reg_info() local
5274 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_reg_info()
5283 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); in rte_eth_dev_get_reg_info()
5284 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); in rte_eth_dev_get_reg_info()
5290 struct rte_eth_dev *dev; in rte_eth_dev_get_eeprom_length() local
5293 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_eeprom_length()
5295 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); in rte_eth_dev_get_eeprom_length()
5296 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); in rte_eth_dev_get_eeprom_length()
5302 struct rte_eth_dev *dev; in rte_eth_dev_get_eeprom() local
5305 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_eeprom()
5314 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); in rte_eth_dev_get_eeprom()
5315 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); in rte_eth_dev_get_eeprom()
5321 struct rte_eth_dev *dev; in rte_eth_dev_set_eeprom() local
5324 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_eeprom()
5333 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); in rte_eth_dev_set_eeprom()
5334 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); in rte_eth_dev_set_eeprom()
5341 struct rte_eth_dev *dev; in rte_eth_dev_get_module_info() local
5344 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_module_info()
5353 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); in rte_eth_dev_get_module_info()
5354 return (*dev->dev_ops->get_module_info)(dev, modinfo); in rte_eth_dev_get_module_info()
5361 struct rte_eth_dev *dev; in rte_eth_dev_get_module_eeprom() local
5364 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_module_eeprom()
5387 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); in rte_eth_dev_get_module_eeprom()
5388 return (*dev->dev_ops->get_module_eeprom)(dev, info); in rte_eth_dev_get_module_eeprom()
5395 struct rte_eth_dev *dev; in rte_eth_dev_get_dcb_info() local
5398 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_dcb_info()
5409 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); in rte_eth_dev_get_dcb_info()
5410 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); in rte_eth_dev_get_dcb_info()
5453 struct rte_eth_dev *dev; in rte_eth_dev_hairpin_capability_get() local
5456 dev = &rte_eth_devices[port_id]; in rte_eth_dev_hairpin_capability_get()
5465 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); in rte_eth_dev_hairpin_capability_get()
5467 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); in rte_eth_dev_hairpin_capability_get()
5473 struct rte_eth_dev *dev; in rte_eth_dev_pool_ops_supported() local
5476 dev = &rte_eth_devices[port_id]; in rte_eth_dev_pool_ops_supported()
5485 if (*dev->dev_ops->pool_ops_supported == NULL) in rte_eth_dev_pool_ops_supported()
5488 return (*dev->dev_ops->pool_ops_supported)(dev, pool); in rte_eth_dev_pool_ops_supported()
5732 struct rte_eth_dev *dev; in rte_eth_representor_info_get() local
5735 dev = &rte_eth_devices[port_id]; in rte_eth_representor_info_get()
5737 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP); in rte_eth_representor_info_get()
5738 return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info)); in rte_eth_representor_info_get()
5744 struct rte_eth_dev *dev; in rte_eth_rx_metadata_negotiate() local
5747 dev = &rte_eth_devices[port_id]; in rte_eth_rx_metadata_negotiate()
5749 if (dev->data->dev_configured != 0) { in rte_eth_rx_metadata_negotiate()
5761 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP); in rte_eth_rx_metadata_negotiate()
5763 (*dev->dev_ops->rx_metadata_negotiate)(dev, features)); in rte_eth_rx_metadata_negotiate()
5770 struct rte_eth_dev *dev; in rte_eth_ip_reassembly_capability_get() local
5773 dev = &rte_eth_devices[port_id]; in rte_eth_ip_reassembly_capability_get()
5775 if (dev->data->dev_configured == 0) { in rte_eth_ip_reassembly_capability_get()
5788 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get, in rte_eth_ip_reassembly_capability_get()
5792 return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get) in rte_eth_ip_reassembly_capability_get()
5793 (dev, reassembly_capa)); in rte_eth_ip_reassembly_capability_get()
5800 struct rte_eth_dev *dev; in rte_eth_ip_reassembly_conf_get() local
5803 dev = &rte_eth_devices[port_id]; in rte_eth_ip_reassembly_conf_get()
5805 if (dev->data->dev_configured == 0) { in rte_eth_ip_reassembly_conf_get()
5818 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get, in rte_eth_ip_reassembly_conf_get()
5822 (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf)); in rte_eth_ip_reassembly_conf_get()
5829 struct rte_eth_dev *dev; in rte_eth_ip_reassembly_conf_set() local
5832 dev = &rte_eth_devices[port_id]; in rte_eth_ip_reassembly_conf_set()
5834 if (dev->data->dev_configured == 0) { in rte_eth_ip_reassembly_conf_set()
5842 if (dev->data->dev_started != 0) { in rte_eth_ip_reassembly_conf_set()
5856 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set, in rte_eth_ip_reassembly_conf_set()
5859 (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf)); in rte_eth_ip_reassembly_conf_set()
5865 struct rte_eth_dev *dev; in rte_eth_dev_priv_dump() local
5868 dev = &rte_eth_devices[port_id]; in rte_eth_dev_priv_dump()
5875 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP); in rte_eth_dev_priv_dump()
5876 return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file)); in rte_eth_dev_priv_dump()