Lines Matching refs:dev
848 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) in eth_dev_rx_queue_config() argument
850 uint16_t old_nb_queues = dev->data->nb_rx_queues; in eth_dev_rx_queue_config()
854 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ in eth_dev_rx_queue_config()
855 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", in eth_dev_rx_queue_config()
856 sizeof(dev->data->rx_queues[0]) * nb_queues, in eth_dev_rx_queue_config()
858 if (dev->data->rx_queues == NULL) { in eth_dev_rx_queue_config()
859 dev->data->nb_rx_queues = 0; in eth_dev_rx_queue_config()
862 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ in eth_dev_rx_queue_config()
863 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); in eth_dev_rx_queue_config()
865 rxq = dev->data->rx_queues; in eth_dev_rx_queue_config()
868 (*dev->dev_ops->rx_queue_release)(rxq[i]); in eth_dev_rx_queue_config()
880 dev->data->rx_queues = rxq; in eth_dev_rx_queue_config()
882 } else if (dev->data->rx_queues != NULL && nb_queues == 0) { in eth_dev_rx_queue_config()
883 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); in eth_dev_rx_queue_config()
885 rxq = dev->data->rx_queues; in eth_dev_rx_queue_config()
888 (*dev->dev_ops->rx_queue_release)(rxq[i]); in eth_dev_rx_queue_config()
890 rte_free(dev->data->rx_queues); in eth_dev_rx_queue_config()
891 dev->data->rx_queues = NULL; in eth_dev_rx_queue_config()
893 dev->data->nb_rx_queues = nb_queues; in eth_dev_rx_queue_config()
898 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id) in eth_dev_validate_rx_queue() argument
902 if (rx_queue_id >= dev->data->nb_rx_queues) { in eth_dev_validate_rx_queue()
903 port_id = dev->data->port_id; in eth_dev_validate_rx_queue()
910 if (dev->data->rx_queues[rx_queue_id] == NULL) { in eth_dev_validate_rx_queue()
911 port_id = dev->data->port_id; in eth_dev_validate_rx_queue()
922 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id) in eth_dev_validate_tx_queue() argument
926 if (tx_queue_id >= dev->data->nb_tx_queues) { in eth_dev_validate_tx_queue()
927 port_id = dev->data->port_id; in eth_dev_validate_tx_queue()
934 if (dev->data->tx_queues[tx_queue_id] == NULL) { in eth_dev_validate_tx_queue()
935 port_id = dev->data->port_id; in eth_dev_validate_tx_queue()
948 struct rte_eth_dev *dev; in rte_eth_dev_rx_queue_start() local
953 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_queue_start()
954 if (!dev->data->dev_started) { in rte_eth_dev_rx_queue_start()
961 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); in rte_eth_dev_rx_queue_start()
965 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); in rte_eth_dev_rx_queue_start()
967 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { in rte_eth_dev_rx_queue_start()
974 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_rx_queue_start()
981 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, in rte_eth_dev_rx_queue_start()
989 struct rte_eth_dev *dev; in rte_eth_dev_rx_queue_stop() local
994 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_queue_stop()
996 ret = eth_dev_validate_rx_queue(dev, rx_queue_id); in rte_eth_dev_rx_queue_stop()
1000 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); in rte_eth_dev_rx_queue_stop()
1002 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) { in rte_eth_dev_rx_queue_stop()
1009 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_rx_queue_stop()
1016 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id)); in rte_eth_dev_rx_queue_stop()
1023 struct rte_eth_dev *dev; in rte_eth_dev_tx_queue_start() local
1028 dev = &rte_eth_devices[port_id]; in rte_eth_dev_tx_queue_start()
1029 if (!dev->data->dev_started) { in rte_eth_dev_tx_queue_start()
1036 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); in rte_eth_dev_tx_queue_start()
1040 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); in rte_eth_dev_tx_queue_start()
1042 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { in rte_eth_dev_tx_queue_start()
1049 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_tx_queue_start()
1056 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id)); in rte_eth_dev_tx_queue_start()
1062 struct rte_eth_dev *dev; in rte_eth_dev_tx_queue_stop() local
1067 dev = &rte_eth_devices[port_id]; in rte_eth_dev_tx_queue_stop()
1069 ret = eth_dev_validate_tx_queue(dev, tx_queue_id); in rte_eth_dev_tx_queue_stop()
1073 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); in rte_eth_dev_tx_queue_stop()
1075 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) { in rte_eth_dev_tx_queue_stop()
1082 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) { in rte_eth_dev_tx_queue_stop()
1089 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id)); in rte_eth_dev_tx_queue_stop()
1094 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) in eth_dev_tx_queue_config() argument
1096 uint16_t old_nb_queues = dev->data->nb_tx_queues; in eth_dev_tx_queue_config()
1100 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ in eth_dev_tx_queue_config()
1101 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", in eth_dev_tx_queue_config()
1102 sizeof(dev->data->tx_queues[0]) * nb_queues, in eth_dev_tx_queue_config()
1104 if (dev->data->tx_queues == NULL) { in eth_dev_tx_queue_config()
1105 dev->data->nb_tx_queues = 0; in eth_dev_tx_queue_config()
1108 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ in eth_dev_tx_queue_config()
1109 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); in eth_dev_tx_queue_config()
1111 txq = dev->data->tx_queues; in eth_dev_tx_queue_config()
1114 (*dev->dev_ops->tx_queue_release)(txq[i]); in eth_dev_tx_queue_config()
1126 dev->data->tx_queues = txq; in eth_dev_tx_queue_config()
1128 } else if (dev->data->tx_queues != NULL && nb_queues == 0) { in eth_dev_tx_queue_config()
1129 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); in eth_dev_tx_queue_config()
1131 txq = dev->data->tx_queues; in eth_dev_tx_queue_config()
1134 (*dev->dev_ops->tx_queue_release)(txq[i]); in eth_dev_tx_queue_config()
1136 rte_free(dev->data->tx_queues); in eth_dev_tx_queue_config()
1137 dev->data->tx_queues = NULL; in eth_dev_tx_queue_config()
1139 dev->data->nb_tx_queues = nb_queues; in eth_dev_tx_queue_config()
1292 struct rte_eth_dev *dev; in rte_eth_dev_configure() local
1300 dev = &rte_eth_devices[port_id]; in rte_eth_dev_configure()
1302 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); in rte_eth_dev_configure()
1304 if (dev->data->dev_started) { in rte_eth_dev_configure()
1312 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf)); in rte_eth_dev_configure()
1318 if (dev_conf != &dev->data->dev_conf) in rte_eth_dev_configure()
1319 memcpy(&dev->data->dev_conf, dev_conf, in rte_eth_dev_configure()
1320 sizeof(dev->data->dev_conf)); in rte_eth_dev_configure()
1378 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { in rte_eth_dev_configure()
1380 dev->device->driver->name); in rte_eth_dev_configure()
1385 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { in rte_eth_dev_configure()
1387 dev->device->driver->name); in rte_eth_dev_configure()
1416 dev->data->dev_conf.rxmode.max_rx_pkt_len = in rte_eth_dev_configure()
1426 dev->data->dev_conf.rxmode.max_lro_pkt_size = in rte_eth_dev_configure()
1427 dev->data->dev_conf.rxmode.max_rx_pkt_len; in rte_eth_dev_configure()
1429 dev->data->dev_conf.rxmode.max_lro_pkt_size, in rte_eth_dev_configure()
1430 dev->data->dev_conf.rxmode.max_rx_pkt_len, in rte_eth_dev_configure()
1460 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = in rte_eth_dev_configure()
1489 diag = eth_dev_rx_queue_config(dev, nb_rx_q); in rte_eth_dev_configure()
1498 diag = eth_dev_tx_queue_config(dev, nb_tx_q); in rte_eth_dev_configure()
1503 eth_dev_rx_queue_config(dev, 0); in rte_eth_dev_configure()
1508 diag = (*dev->dev_ops->dev_configure)(dev); in rte_eth_dev_configure()
1517 diag = __rte_eth_dev_profile_init(port_id, dev); in rte_eth_dev_configure()
1528 dev->data->dev_conf.rxmode.offloads, "Rx", in rte_eth_dev_configure()
1538 dev->data->dev_conf.txmode.offloads, "Tx", in rte_eth_dev_configure()
1548 eth_dev_rx_queue_config(dev, 0); in rte_eth_dev_configure()
1549 eth_dev_tx_queue_config(dev, 0); in rte_eth_dev_configure()
1551 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); in rte_eth_dev_configure()
1558 rte_eth_dev_internal_reset(struct rte_eth_dev *dev) in rte_eth_dev_internal_reset() argument
1560 if (dev->data->dev_started) { in rte_eth_dev_internal_reset()
1562 dev->data->port_id); in rte_eth_dev_internal_reset()
1566 eth_dev_rx_queue_config(dev, 0); in rte_eth_dev_internal_reset()
1567 eth_dev_tx_queue_config(dev, 0); in rte_eth_dev_internal_reset()
1569 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf)); in rte_eth_dev_internal_reset()
1573 eth_dev_mac_restore(struct rte_eth_dev *dev, in eth_dev_mac_restore() argument
1582 addr = &dev->data->mac_addrs[0]; in eth_dev_mac_restore()
1583 if (*dev->dev_ops->mac_addr_set != NULL) in eth_dev_mac_restore()
1584 (*dev->dev_ops->mac_addr_set)(dev, addr); in eth_dev_mac_restore()
1585 else if (*dev->dev_ops->mac_addr_add != NULL) in eth_dev_mac_restore()
1586 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool); in eth_dev_mac_restore()
1588 if (*dev->dev_ops->mac_addr_add != NULL) { in eth_dev_mac_restore()
1590 addr = &dev->data->mac_addrs[i]; in eth_dev_mac_restore()
1597 pool_mask = dev->data->mac_pool_sel[i]; in eth_dev_mac_restore()
1601 (*dev->dev_ops->mac_addr_add)(dev, in eth_dev_mac_restore()
1611 eth_dev_config_restore(struct rte_eth_dev *dev, in eth_dev_config_restore() argument
1617 eth_dev_mac_restore(dev, dev_info); in eth_dev_config_restore()
1625 *dev->dev_ops->promiscuous_enable != NULL) { in eth_dev_config_restore()
1627 (*dev->dev_ops->promiscuous_enable)(dev)); in eth_dev_config_restore()
1635 *dev->dev_ops->promiscuous_disable != NULL) { in eth_dev_config_restore()
1637 (*dev->dev_ops->promiscuous_disable)(dev)); in eth_dev_config_restore()
1652 *dev->dev_ops->allmulticast_enable != NULL) { in eth_dev_config_restore()
1654 (*dev->dev_ops->allmulticast_enable)(dev)); in eth_dev_config_restore()
1662 *dev->dev_ops->allmulticast_disable != NULL) { in eth_dev_config_restore()
1664 (*dev->dev_ops->allmulticast_disable)(dev)); in eth_dev_config_restore()
1679 struct rte_eth_dev *dev; in rte_eth_dev_start() local
1686 dev = &rte_eth_devices[port_id]; in rte_eth_dev_start()
1688 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); in rte_eth_dev_start()
1690 if (dev->data->dev_started != 0) { in rte_eth_dev_start()
1703 eth_dev_mac_restore(dev, &dev_info); in rte_eth_dev_start()
1705 diag = (*dev->dev_ops->dev_start)(dev); in rte_eth_dev_start()
1707 dev->data->dev_started = 1; in rte_eth_dev_start()
1711 ret = eth_dev_config_restore(dev, &dev_info, port_id); in rte_eth_dev_start()
1726 if (dev->data->dev_conf.intr_conf.lsc == 0) { in rte_eth_dev_start()
1727 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); in rte_eth_dev_start()
1728 (*dev->dev_ops->link_update)(dev, 0); in rte_eth_dev_start()
1738 struct rte_eth_dev *dev; in rte_eth_dev_stop() local
1742 dev = &rte_eth_devices[port_id]; in rte_eth_dev_stop()
1744 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP); in rte_eth_dev_stop()
1746 if (dev->data->dev_started == 0) { in rte_eth_dev_stop()
1753 dev->data->dev_started = 0; in rte_eth_dev_stop()
1754 ret = (*dev->dev_ops->dev_stop)(dev); in rte_eth_dev_stop()
1763 struct rte_eth_dev *dev; in rte_eth_dev_set_link_up() local
1767 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_link_up()
1769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP); in rte_eth_dev_set_link_up()
1770 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev)); in rte_eth_dev_set_link_up()
1776 struct rte_eth_dev *dev; in rte_eth_dev_set_link_down() local
1780 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_link_down()
1782 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP); in rte_eth_dev_set_link_down()
1783 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev)); in rte_eth_dev_set_link_down()
1789 struct rte_eth_dev *dev; in rte_eth_dev_close() local
1794 dev = &rte_eth_devices[port_id]; in rte_eth_dev_close()
1796 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); in rte_eth_dev_close()
1797 *lasterr = (*dev->dev_ops->dev_close)(dev); in rte_eth_dev_close()
1802 *lasterr = rte_eth_dev_release_port(dev); in rte_eth_dev_close()
1810 struct rte_eth_dev *dev; in rte_eth_dev_reset() local
1814 dev = &rte_eth_devices[port_id]; in rte_eth_dev_reset()
1816 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP); in rte_eth_dev_reset()
1824 ret = dev->dev_ops->dev_reset(dev); in rte_eth_dev_reset()
1832 struct rte_eth_dev *dev; in rte_eth_dev_is_removed() local
1837 dev = &rte_eth_devices[port_id]; in rte_eth_dev_is_removed()
1839 if (dev->state == RTE_ETH_DEV_REMOVED) in rte_eth_dev_is_removed()
1842 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0); in rte_eth_dev_is_removed()
1844 ret = dev->dev_ops->is_removed(dev); in rte_eth_dev_is_removed()
1847 dev->state = RTE_ETH_DEV_REMOVED; in rte_eth_dev_is_removed()
1931 struct rte_eth_dev *dev; in rte_eth_rx_queue_setup() local
1938 dev = &rte_eth_devices[port_id]; in rte_eth_rx_queue_setup()
1939 if (rx_queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_queue_setup()
1944 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); in rte_eth_rx_queue_setup()
2028 if (dev->data->dev_started && in rte_eth_rx_queue_setup()
2033 if (dev->data->dev_started && in rte_eth_rx_queue_setup()
2034 (dev->data->rx_queue_state[rx_queue_id] != in rte_eth_rx_queue_setup()
2038 rxq = dev->data->rx_queues; in rte_eth_rx_queue_setup()
2040 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, in rte_eth_rx_queue_setup()
2042 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); in rte_eth_rx_queue_setup()
2059 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads; in rte_eth_rx_queue_setup()
2085 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) in rte_eth_rx_queue_setup()
2086 dev->data->dev_conf.rxmode.max_lro_pkt_size = in rte_eth_rx_queue_setup()
2087 dev->data->dev_conf.rxmode.max_rx_pkt_len; in rte_eth_rx_queue_setup()
2089 dev->data->dev_conf.rxmode.max_lro_pkt_size, in rte_eth_rx_queue_setup()
2090 dev->data->dev_conf.rxmode.max_rx_pkt_len, in rte_eth_rx_queue_setup()
2096 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, in rte_eth_rx_queue_setup()
2099 if (!dev->data->min_rx_buf_size || in rte_eth_rx_queue_setup()
2100 dev->data->min_rx_buf_size > mbp_buf_size) in rte_eth_rx_queue_setup()
2101 dev->data->min_rx_buf_size = mbp_buf_size; in rte_eth_rx_queue_setup()
2115 struct rte_eth_dev *dev; in rte_eth_rx_hairpin_queue_setup() local
2123 dev = &rte_eth_devices[port_id]; in rte_eth_rx_hairpin_queue_setup()
2124 if (rx_queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_hairpin_queue_setup()
2131 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup, in rte_eth_rx_hairpin_queue_setup()
2154 for (i = 0, count = 0; i < dev->data->nb_rx_queues && in rte_eth_rx_hairpin_queue_setup()
2156 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i)) in rte_eth_rx_hairpin_queue_setup()
2164 if (dev->data->dev_started) in rte_eth_rx_hairpin_queue_setup()
2166 rxq = dev->data->rx_queues; in rte_eth_rx_hairpin_queue_setup()
2168 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, in rte_eth_rx_hairpin_queue_setup()
2170 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]); in rte_eth_rx_hairpin_queue_setup()
2173 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id, in rte_eth_rx_hairpin_queue_setup()
2176 dev->data->rx_queue_state[rx_queue_id] = in rte_eth_rx_hairpin_queue_setup()
2186 struct rte_eth_dev *dev; in rte_eth_tx_queue_setup() local
2194 dev = &rte_eth_devices[port_id]; in rte_eth_tx_queue_setup()
2195 if (tx_queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_queue_setup()
2200 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); in rte_eth_tx_queue_setup()
2224 if (dev->data->dev_started && in rte_eth_tx_queue_setup()
2229 if (dev->data->dev_started && in rte_eth_tx_queue_setup()
2230 (dev->data->tx_queue_state[tx_queue_id] != in rte_eth_tx_queue_setup()
2234 txq = dev->data->tx_queues; in rte_eth_tx_queue_setup()
2236 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, in rte_eth_tx_queue_setup()
2238 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); in rte_eth_tx_queue_setup()
2255 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads; in rte_eth_tx_queue_setup()
2277 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, in rte_eth_tx_queue_setup()
2286 struct rte_eth_dev *dev; in rte_eth_tx_hairpin_queue_setup() local
2294 dev = &rte_eth_devices[port_id]; in rte_eth_tx_hairpin_queue_setup()
2295 if (tx_queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_hairpin_queue_setup()
2302 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup, in rte_eth_tx_hairpin_queue_setup()
2325 for (i = 0, count = 0; i < dev->data->nb_tx_queues && in rte_eth_tx_hairpin_queue_setup()
2327 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i)) in rte_eth_tx_hairpin_queue_setup()
2335 if (dev->data->dev_started) in rte_eth_tx_hairpin_queue_setup()
2337 txq = dev->data->tx_queues; in rte_eth_tx_hairpin_queue_setup()
2339 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, in rte_eth_tx_hairpin_queue_setup()
2341 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]); in rte_eth_tx_hairpin_queue_setup()
2344 ret = (*dev->dev_ops->tx_hairpin_queue_setup) in rte_eth_tx_hairpin_queue_setup()
2345 (dev, tx_queue_id, nb_tx_desc, conf); in rte_eth_tx_hairpin_queue_setup()
2347 dev->data->tx_queue_state[tx_queue_id] = in rte_eth_tx_hairpin_queue_setup()
2355 struct rte_eth_dev *dev; in rte_eth_hairpin_bind() local
2359 dev = &rte_eth_devices[tx_port]; in rte_eth_hairpin_bind()
2360 if (dev->data->dev_started == 0) { in rte_eth_hairpin_bind()
2365 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP); in rte_eth_hairpin_bind()
2366 ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port); in rte_eth_hairpin_bind()
2378 struct rte_eth_dev *dev; in rte_eth_hairpin_unbind() local
2382 dev = &rte_eth_devices[tx_port]; in rte_eth_hairpin_unbind()
2383 if (dev->data->dev_started == 0) { in rte_eth_hairpin_unbind()
2388 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP); in rte_eth_hairpin_unbind()
2389 ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port); in rte_eth_hairpin_unbind()
2402 struct rte_eth_dev *dev; in rte_eth_hairpin_get_peer_ports() local
2409 dev = &rte_eth_devices[port_id]; in rte_eth_hairpin_get_peer_ports()
2410 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports, in rte_eth_hairpin_get_peer_ports()
2413 ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports, in rte_eth_hairpin_get_peer_ports()
2468 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_tx_done_cleanup() local
2473 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP); in rte_eth_tx_done_cleanup()
2476 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id], in rte_eth_tx_done_cleanup()
2484 struct rte_eth_dev *dev; in rte_eth_promiscuous_enable() local
2488 dev = &rte_eth_devices[port_id]; in rte_eth_promiscuous_enable()
2490 if (dev->data->promiscuous == 1) in rte_eth_promiscuous_enable()
2493 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); in rte_eth_promiscuous_enable()
2495 diag = (*dev->dev_ops->promiscuous_enable)(dev); in rte_eth_promiscuous_enable()
2496 dev->data->promiscuous = (diag == 0) ? 1 : 0; in rte_eth_promiscuous_enable()
2504 struct rte_eth_dev *dev; in rte_eth_promiscuous_disable() local
2508 dev = &rte_eth_devices[port_id]; in rte_eth_promiscuous_disable()
2510 if (dev->data->promiscuous == 0) in rte_eth_promiscuous_disable()
2513 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); in rte_eth_promiscuous_disable()
2515 dev->data->promiscuous = 0; in rte_eth_promiscuous_disable()
2516 diag = (*dev->dev_ops->promiscuous_disable)(dev); in rte_eth_promiscuous_disable()
2518 dev->data->promiscuous = 1; in rte_eth_promiscuous_disable()
2526 struct rte_eth_dev *dev; in rte_eth_promiscuous_get() local
2530 dev = &rte_eth_devices[port_id]; in rte_eth_promiscuous_get()
2531 return dev->data->promiscuous; in rte_eth_promiscuous_get()
2537 struct rte_eth_dev *dev; in rte_eth_allmulticast_enable() local
2541 dev = &rte_eth_devices[port_id]; in rte_eth_allmulticast_enable()
2543 if (dev->data->all_multicast == 1) in rte_eth_allmulticast_enable()
2546 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); in rte_eth_allmulticast_enable()
2547 diag = (*dev->dev_ops->allmulticast_enable)(dev); in rte_eth_allmulticast_enable()
2548 dev->data->all_multicast = (diag == 0) ? 1 : 0; in rte_eth_allmulticast_enable()
2556 struct rte_eth_dev *dev; in rte_eth_allmulticast_disable() local
2560 dev = &rte_eth_devices[port_id]; in rte_eth_allmulticast_disable()
2562 if (dev->data->all_multicast == 0) in rte_eth_allmulticast_disable()
2565 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); in rte_eth_allmulticast_disable()
2566 dev->data->all_multicast = 0; in rte_eth_allmulticast_disable()
2567 diag = (*dev->dev_ops->allmulticast_disable)(dev); in rte_eth_allmulticast_disable()
2569 dev->data->all_multicast = 1; in rte_eth_allmulticast_disable()
2577 struct rte_eth_dev *dev; in rte_eth_allmulticast_get() local
2581 dev = &rte_eth_devices[port_id]; in rte_eth_allmulticast_get()
2582 return dev->data->all_multicast; in rte_eth_allmulticast_get()
2588 struct rte_eth_dev *dev; in rte_eth_link_get() local
2591 dev = &rte_eth_devices[port_id]; in rte_eth_link_get()
2593 if (dev->data->dev_conf.intr_conf.lsc && in rte_eth_link_get()
2594 dev->data->dev_started) in rte_eth_link_get()
2595 rte_eth_linkstatus_get(dev, eth_link); in rte_eth_link_get()
2597 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); in rte_eth_link_get()
2598 (*dev->dev_ops->link_update)(dev, 1); in rte_eth_link_get()
2599 *eth_link = dev->data->dev_link; in rte_eth_link_get()
2608 struct rte_eth_dev *dev; in rte_eth_link_get_nowait() local
2611 dev = &rte_eth_devices[port_id]; in rte_eth_link_get_nowait()
2613 if (dev->data->dev_conf.intr_conf.lsc && in rte_eth_link_get_nowait()
2614 dev->data->dev_started) in rte_eth_link_get_nowait()
2615 rte_eth_linkstatus_get(dev, eth_link); in rte_eth_link_get_nowait()
2617 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); in rte_eth_link_get_nowait()
2618 (*dev->dev_ops->link_update)(dev, 0); in rte_eth_link_get_nowait()
2619 *eth_link = dev->data->dev_link; in rte_eth_link_get_nowait()
2665 struct rte_eth_dev *dev; in rte_eth_stats_get() local
2669 dev = &rte_eth_devices[port_id]; in rte_eth_stats_get()
2672 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); in rte_eth_stats_get()
2673 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; in rte_eth_stats_get()
2674 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats)); in rte_eth_stats_get()
2680 struct rte_eth_dev *dev; in rte_eth_stats_reset() local
2684 dev = &rte_eth_devices[port_id]; in rte_eth_stats_reset()
2686 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); in rte_eth_stats_reset()
2687 ret = (*dev->dev_ops->stats_reset)(dev); in rte_eth_stats_reset()
2691 dev->data->rx_mbuf_alloc_failed = 0; in rte_eth_stats_reset()
2697 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev) in eth_dev_get_xstats_basic_count() argument
2702 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_dev_get_xstats_basic_count()
2703 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_dev_get_xstats_basic_count()
2706 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) { in eth_dev_get_xstats_basic_count()
2717 struct rte_eth_dev *dev; in eth_dev_get_xstats_count() local
2721 dev = &rte_eth_devices[port_id]; in eth_dev_get_xstats_count()
2722 if (dev->dev_ops->xstats_get_names_by_id != NULL) { in eth_dev_get_xstats_count()
2723 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, in eth_dev_get_xstats_count()
2728 if (dev->dev_ops->xstats_get_names != NULL) { in eth_dev_get_xstats_count()
2729 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0); in eth_dev_get_xstats_count()
2736 count += eth_dev_get_xstats_basic_count(dev); in eth_dev_get_xstats_count()
2787 eth_basic_stats_get_names(struct rte_eth_dev *dev, in eth_basic_stats_get_names() argument
2801 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) in eth_basic_stats_get_names()
2804 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get_names()
2815 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get_names()
2839 struct rte_eth_dev *dev; in rte_eth_xstats_get_names_by_id() local
2844 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get_names_by_id()
2846 basic_count = eth_dev_get_xstats_basic_count(dev); in rte_eth_xstats_get_names_by_id()
2863 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) { in rte_eth_xstats_get_names_by_id()
2880 return (*dev->dev_ops->xstats_get_names_by_id)(dev, in rte_eth_xstats_get_names_by_id()
2913 eth_basic_stats_get_names(dev, xstats_names_copy); in rte_eth_xstats_get_names_by_id()
2942 struct rte_eth_dev *dev; in rte_eth_xstats_get_names() local
2953 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get_names()
2955 cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names); in rte_eth_xstats_get_names()
2957 if (dev->dev_ops->xstats_get_names != NULL) { in rte_eth_xstats_get_names()
2961 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( in rte_eth_xstats_get_names()
2962 dev, in rte_eth_xstats_get_names()
2977 struct rte_eth_dev *dev; in eth_basic_stats_get() local
2988 dev = &rte_eth_devices[port_id]; in eth_basic_stats_get()
2990 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get()
2991 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in eth_basic_stats_get()
3001 if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0) in eth_basic_stats_get()
3038 struct rte_eth_dev *dev; in rte_eth_xstats_get_by_id() local
3048 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get_by_id()
3049 basic_count = eth_dev_get_xstats_basic_count(dev); in rte_eth_xstats_get_by_id()
3062 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) { in rte_eth_xstats_get_by_id()
3063 unsigned int basic_count = eth_dev_get_xstats_basic_count(dev); in rte_eth_xstats_get_by_id()
3080 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy, in rte_eth_xstats_get_by_id()
3125 struct rte_eth_dev *dev; in rte_eth_xstats_get() local
3133 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_get()
3135 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in rte_eth_xstats_get()
3136 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); in rte_eth_xstats_get()
3140 if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) in rte_eth_xstats_get()
3144 if (dev->dev_ops->xstats_get != NULL) { in rte_eth_xstats_get()
3148 xcount = (*dev->dev_ops->xstats_get)(dev, in rte_eth_xstats_get()
3178 struct rte_eth_dev *dev; in rte_eth_xstats_reset() local
3181 dev = &rte_eth_devices[port_id]; in rte_eth_xstats_reset()
3184 if (dev->dev_ops->xstats_reset != NULL) in rte_eth_xstats_reset()
3185 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); in rte_eth_xstats_reset()
3195 struct rte_eth_dev *dev; in eth_dev_set_queue_stats_mapping() local
3199 dev = &rte_eth_devices[port_id]; in eth_dev_set_queue_stats_mapping()
3201 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); in eth_dev_set_queue_stats_mapping()
3203 if (is_rx && (queue_id >= dev->data->nb_rx_queues)) in eth_dev_set_queue_stats_mapping()
3206 if (!is_rx && (queue_id >= dev->data->nb_tx_queues)) in eth_dev_set_queue_stats_mapping()
3212 return (*dev->dev_ops->queue_stats_mapping_set) in eth_dev_set_queue_stats_mapping()
3213 (dev, queue_id, stat_idx, is_rx); in eth_dev_set_queue_stats_mapping()
3239 struct rte_eth_dev *dev; in rte_eth_dev_fw_version_get() local
3242 dev = &rte_eth_devices[port_id]; in rte_eth_dev_fw_version_get()
3244 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP); in rte_eth_dev_fw_version_get()
3245 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev, in rte_eth_dev_fw_version_get()
3252 struct rte_eth_dev *dev; in rte_eth_dev_info_get() local
3270 dev = &rte_eth_devices[port_id]; in rte_eth_dev_info_get()
3274 dev_info->device = dev->device; in rte_eth_dev_info_get()
3278 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); in rte_eth_dev_info_get()
3279 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); in rte_eth_dev_info_get()
3292 dev_info->driver_name = dev->device->driver->name; in rte_eth_dev_info_get()
3293 dev_info->nb_rx_queues = dev->data->nb_rx_queues; in rte_eth_dev_info_get()
3294 dev_info->nb_tx_queues = dev->data->nb_tx_queues; in rte_eth_dev_info_get()
3296 dev_info->dev_flags = &dev->data->dev_flags; in rte_eth_dev_info_get()
3306 struct rte_eth_dev *dev; in rte_eth_dev_get_supported_ptypes() local
3310 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_supported_ptypes()
3311 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0); in rte_eth_dev_get_supported_ptypes()
3312 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); in rte_eth_dev_get_supported_ptypes()
3341 struct rte_eth_dev *dev; in rte_eth_dev_set_ptypes() local
3347 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_ptypes()
3352 if (*dev->dev_ops->dev_supported_ptypes_get == NULL || in rte_eth_dev_set_ptypes()
3353 *dev->dev_ops->dev_ptypes_set == NULL) { in rte_eth_dev_set_ptypes()
3359 ret = (*dev->dev_ops->dev_ptypes_set)(dev, in rte_eth_dev_set_ptypes()
3379 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev); in rte_eth_dev_set_ptypes()
3404 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask); in rte_eth_dev_set_ptypes()
3416 struct rte_eth_dev *dev; in rte_eth_macaddr_get() local
3419 dev = &rte_eth_devices[port_id]; in rte_eth_macaddr_get()
3420 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); in rte_eth_macaddr_get()
3428 struct rte_eth_dev *dev; in rte_eth_dev_get_mtu() local
3432 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_mtu()
3433 *mtu = dev->data->mtu; in rte_eth_dev_get_mtu()
3442 struct rte_eth_dev *dev; in rte_eth_dev_set_mtu() local
3445 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_mtu()
3446 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); in rte_eth_dev_set_mtu()
3454 if (*dev->dev_ops->dev_infos_get != NULL) { in rte_eth_dev_set_mtu()
3463 ret = (*dev->dev_ops->mtu_set)(dev, mtu); in rte_eth_dev_set_mtu()
3465 dev->data->mtu = mtu; in rte_eth_dev_set_mtu()
3473 struct rte_eth_dev *dev; in rte_eth_dev_vlan_filter() local
3477 dev = &rte_eth_devices[port_id]; in rte_eth_dev_vlan_filter()
3478 if (!(dev->data->dev_conf.rxmode.offloads & in rte_eth_dev_vlan_filter()
3490 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); in rte_eth_dev_vlan_filter()
3492 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); in rte_eth_dev_vlan_filter()
3498 vfc = &dev->data->vlan_filter_conf; in rte_eth_dev_vlan_filter()
3515 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_strip_on_queue() local
3518 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_strip_on_queue()
3519 if (rx_queue_id >= dev->data->nb_rx_queues) { in rte_eth_dev_set_vlan_strip_on_queue()
3524 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); in rte_eth_dev_set_vlan_strip_on_queue()
3525 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); in rte_eth_dev_set_vlan_strip_on_queue()
3535 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_ether_type() local
3538 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_ether_type()
3539 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); in rte_eth_dev_set_vlan_ether_type()
3541 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, in rte_eth_dev_set_vlan_ether_type()
3549 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_offload() local
3558 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_offload()
3561 orig_offloads = dev->data->dev_conf.rxmode.offloads; in rte_eth_dev_set_vlan_offload()
3625 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); in rte_eth_dev_set_vlan_offload()
3626 dev->data->dev_conf.rxmode.offloads = dev_offloads; in rte_eth_dev_set_vlan_offload()
3627 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); in rte_eth_dev_set_vlan_offload()
3630 dev->data->dev_conf.rxmode.offloads = orig_offloads; in rte_eth_dev_set_vlan_offload()
3639 struct rte_eth_dev *dev; in rte_eth_dev_get_vlan_offload() local
3644 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_vlan_offload()
3645 dev_offloads = &dev->data->dev_conf.rxmode.offloads; in rte_eth_dev_get_vlan_offload()
3665 struct rte_eth_dev *dev; in rte_eth_dev_set_vlan_pvid() local
3668 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_vlan_pvid()
3669 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP); in rte_eth_dev_set_vlan_pvid()
3671 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on)); in rte_eth_dev_set_vlan_pvid()
3677 struct rte_eth_dev *dev; in rte_eth_dev_flow_ctrl_get() local
3680 dev = &rte_eth_devices[port_id]; in rte_eth_dev_flow_ctrl_get()
3681 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP); in rte_eth_dev_flow_ctrl_get()
3683 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf)); in rte_eth_dev_flow_ctrl_get()
3689 struct rte_eth_dev *dev; in rte_eth_dev_flow_ctrl_set() local
3697 dev = &rte_eth_devices[port_id]; in rte_eth_dev_flow_ctrl_set()
3698 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); in rte_eth_dev_flow_ctrl_set()
3699 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf)); in rte_eth_dev_flow_ctrl_set()
3706 struct rte_eth_dev *dev; in rte_eth_dev_priority_flow_ctrl_set() local
3714 dev = &rte_eth_devices[port_id]; in rte_eth_dev_priority_flow_ctrl_set()
3716 if (*dev->dev_ops->priority_flow_ctrl_set) in rte_eth_dev_priority_flow_ctrl_set()
3717 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set) in rte_eth_dev_priority_flow_ctrl_set()
3718 (dev, pfc_conf)); in rte_eth_dev_priority_flow_ctrl_set()
3776 struct rte_eth_dev *dev; in rte_eth_dev_rss_reta_update() local
3785 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_reta_update()
3789 dev->data->nb_rx_queues); in rte_eth_dev_rss_reta_update()
3793 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); in rte_eth_dev_rss_reta_update()
3794 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, in rte_eth_dev_rss_reta_update()
3803 struct rte_eth_dev *dev; in rte_eth_dev_rss_reta_query() local
3813 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_reta_query()
3814 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP); in rte_eth_dev_rss_reta_query()
3815 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf, in rte_eth_dev_rss_reta_query()
3823 struct rte_eth_dev *dev; in rte_eth_dev_rss_hash_update() local
3835 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_hash_update()
3844 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); in rte_eth_dev_rss_hash_update()
3845 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, in rte_eth_dev_rss_hash_update()
3853 struct rte_eth_dev *dev; in rte_eth_dev_rss_hash_conf_get() local
3856 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rss_hash_conf_get()
3857 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); in rte_eth_dev_rss_hash_conf_get()
3858 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev, in rte_eth_dev_rss_hash_conf_get()
3866 struct rte_eth_dev *dev; in rte_eth_dev_udp_tunnel_port_add() local
3879 dev = &rte_eth_devices[port_id]; in rte_eth_dev_udp_tunnel_port_add()
3880 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP); in rte_eth_dev_udp_tunnel_port_add()
3881 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev, in rte_eth_dev_udp_tunnel_port_add()
3889 struct rte_eth_dev *dev; in rte_eth_dev_udp_tunnel_port_delete() local
3892 dev = &rte_eth_devices[port_id]; in rte_eth_dev_udp_tunnel_port_delete()
3904 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP); in rte_eth_dev_udp_tunnel_port_delete()
3905 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev, in rte_eth_dev_udp_tunnel_port_delete()
3912 struct rte_eth_dev *dev; in rte_eth_led_on() local
3915 dev = &rte_eth_devices[port_id]; in rte_eth_led_on()
3916 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); in rte_eth_led_on()
3917 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev)); in rte_eth_led_on()
3923 struct rte_eth_dev *dev; in rte_eth_led_off() local
3926 dev = &rte_eth_devices[port_id]; in rte_eth_led_off()
3927 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); in rte_eth_led_off()
3928 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev)); in rte_eth_led_off()
3936 struct rte_eth_dev *dev; in rte_eth_fec_get_capability() local
3943 dev = &rte_eth_devices[port_id]; in rte_eth_fec_get_capability()
3944 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP); in rte_eth_fec_get_capability()
3945 ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num); in rte_eth_fec_get_capability()
3953 struct rte_eth_dev *dev; in rte_eth_fec_get() local
3959 dev = &rte_eth_devices[port_id]; in rte_eth_fec_get()
3960 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP); in rte_eth_fec_get()
3961 return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa)); in rte_eth_fec_get()
3967 struct rte_eth_dev *dev; in rte_eth_fec_set() local
3970 dev = &rte_eth_devices[port_id]; in rte_eth_fec_set()
3971 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); in rte_eth_fec_set()
3972 return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); in rte_eth_fec_set()
3983 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in eth_dev_get_mac_addr_index() local
3992 if (memcmp(addr, &dev->data->mac_addrs[i], in eth_dev_get_mac_addr_index()
4005 struct rte_eth_dev *dev; in rte_eth_dev_mac_addr_add() local
4011 dev = &rte_eth_devices[port_id]; in rte_eth_dev_mac_addr_add()
4012 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); in rte_eth_dev_mac_addr_add()
4033 pool_mask = dev->data->mac_pool_sel[index]; in rte_eth_dev_mac_addr_add()
4041 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); in rte_eth_dev_mac_addr_add()
4045 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); in rte_eth_dev_mac_addr_add()
4048 dev->data->mac_pool_sel[index] |= (1ULL << pool); in rte_eth_dev_mac_addr_add()
4057 struct rte_eth_dev *dev; in rte_eth_dev_mac_addr_remove() local
4061 dev = &rte_eth_devices[port_id]; in rte_eth_dev_mac_addr_remove()
4062 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); in rte_eth_dev_mac_addr_remove()
4074 (*dev->dev_ops->mac_addr_remove)(dev, index); in rte_eth_dev_mac_addr_remove()
4077 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); in rte_eth_dev_mac_addr_remove()
4080 dev->data->mac_pool_sel[index] = 0; in rte_eth_dev_mac_addr_remove()
4088 struct rte_eth_dev *dev; in rte_eth_dev_default_mac_addr_set() local
4096 dev = &rte_eth_devices[port_id]; in rte_eth_dev_default_mac_addr_set()
4097 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); in rte_eth_dev_default_mac_addr_set()
4099 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); in rte_eth_dev_default_mac_addr_set()
4104 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); in rte_eth_dev_default_mac_addr_set()
4119 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in eth_dev_get_hash_mac_addr_index() local
4127 if (!dev->data->hash_mac_addrs) in eth_dev_get_hash_mac_addr_index()
4131 if (memcmp(addr, &dev->data->hash_mac_addrs[i], in eth_dev_get_hash_mac_addr_index()
4144 struct rte_eth_dev *dev; in rte_eth_dev_uc_hash_table_set() local
4148 dev = &rte_eth_devices[port_id]; in rte_eth_dev_uc_hash_table_set()
4176 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); in rte_eth_dev_uc_hash_table_set()
4177 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); in rte_eth_dev_uc_hash_table_set()
4182 &dev->data->hash_mac_addrs[index]); in rte_eth_dev_uc_hash_table_set()
4185 &dev->data->hash_mac_addrs[index]); in rte_eth_dev_uc_hash_table_set()
4194 struct rte_eth_dev *dev; in rte_eth_dev_uc_all_hash_table_set() local
4198 dev = &rte_eth_devices[port_id]; in rte_eth_dev_uc_all_hash_table_set()
4200 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); in rte_eth_dev_uc_all_hash_table_set()
4201 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, in rte_eth_dev_uc_all_hash_table_set()
4208 struct rte_eth_dev *dev; in rte_eth_set_queue_rate_limit() local
4219 dev = &rte_eth_devices[port_id]; in rte_eth_set_queue_rate_limit()
4220 link = dev->data->dev_link; in rte_eth_set_queue_rate_limit()
4236 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP); in rte_eth_set_queue_rate_limit()
4237 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev, in rte_eth_set_queue_rate_limit()
4246 struct rte_eth_dev *dev; in rte_eth_mirror_rule_set() local
4275 dev = &rte_eth_devices[port_id]; in rte_eth_mirror_rule_set()
4276 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); in rte_eth_mirror_rule_set()
4278 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev, in rte_eth_mirror_rule_set()
4285 struct rte_eth_dev *dev; in rte_eth_mirror_rule_reset() local
4289 dev = &rte_eth_devices[port_id]; in rte_eth_mirror_rule_reset()
4290 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); in rte_eth_mirror_rule_reset()
4292 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, in rte_eth_mirror_rule_reset()
4309 struct rte_eth_dev *dev; in rte_eth_dev_callback_register() local
4332 dev = &rte_eth_devices[next_port]; in rte_eth_dev_callback_register()
4334 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { in rte_eth_dev_callback_register()
4350 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), in rte_eth_dev_callback_register()
4372 struct rte_eth_dev *dev; in rte_eth_dev_callback_unregister() local
4395 dev = &rte_eth_devices[next_port]; in rte_eth_dev_callback_unregister()
4397 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; in rte_eth_dev_callback_unregister()
4411 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); in rte_eth_dev_callback_unregister()
4424 rte_eth_dev_callback_process(struct rte_eth_dev *dev, in rte_eth_dev_callback_process() argument
4432 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { in rte_eth_dev_callback_process()
4441 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, in rte_eth_dev_callback_process()
4451 rte_eth_dev_probing_finish(struct rte_eth_dev *dev) in rte_eth_dev_probing_finish() argument
4453 if (dev == NULL) in rte_eth_dev_probing_finish()
4456 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL); in rte_eth_dev_probing_finish()
4458 dev->state = RTE_ETH_DEV_ATTACHED; in rte_eth_dev_probing_finish()
4465 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_ctl() local
4472 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_ctl()
4474 if (!dev->intr_handle) { in rte_eth_dev_rx_intr_ctl()
4479 intr_handle = dev->intr_handle; in rte_eth_dev_rx_intr_ctl()
4485 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { in rte_eth_dev_rx_intr_ctl()
4502 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_ctl_q_get_fd() local
4509 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_ctl_q_get_fd()
4511 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_dev_rx_intr_ctl_q_get_fd()
4516 if (!dev->intr_handle) { in rte_eth_dev_rx_intr_ctl_q_get_fd()
4521 intr_handle = dev->intr_handle; in rte_eth_dev_rx_intr_ctl_q_get_fd()
4544 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, in rte_eth_dma_zone_reserve() argument
4552 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, in rte_eth_dma_zone_reserve()
4579 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name, in rte_eth_dma_zone_free() argument
4586 rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id, in rte_eth_dma_zone_free()
4690 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_ctl_q() local
4696 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_ctl_q()
4697 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_dev_rx_intr_ctl_q()
4702 if (!dev->intr_handle) { in rte_eth_dev_rx_intr_ctl_q()
4707 intr_handle = dev->intr_handle; in rte_eth_dev_rx_intr_ctl_q()
4729 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_enable() local
4734 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_enable()
4736 ret = eth_dev_validate_rx_queue(dev, queue_id); in rte_eth_dev_rx_intr_enable()
4740 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); in rte_eth_dev_rx_intr_enable()
4741 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, in rte_eth_dev_rx_intr_enable()
4749 struct rte_eth_dev *dev; in rte_eth_dev_rx_intr_disable() local
4754 dev = &rte_eth_devices[port_id]; in rte_eth_dev_rx_intr_disable()
4756 ret = eth_dev_validate_rx_queue(dev, queue_id); in rte_eth_dev_rx_intr_disable()
4760 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); in rte_eth_dev_rx_intr_disable()
4761 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, in rte_eth_dev_rx_intr_disable()
4774 struct rte_eth_dev *dev; in rte_eth_add_rx_callback() local
4782 dev = &rte_eth_devices[port_id]; in rte_eth_add_rx_callback()
4783 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { in rte_eth_add_rx_callback()
4870 struct rte_eth_dev *dev; in rte_eth_add_tx_callback() local
4879 dev = &rte_eth_devices[port_id]; in rte_eth_add_tx_callback()
4880 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { in rte_eth_add_tx_callback()
4934 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_remove_rx_callback() local
4940 prev_cb = &dev->post_rx_burst_cbs[queue_id]; in rte_eth_remove_rx_callback()
4968 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; in rte_eth_remove_tx_callback() local
4974 prev_cb = &dev->pre_tx_burst_cbs[queue_id]; in rte_eth_remove_tx_callback()
4993 struct rte_eth_dev *dev; in rte_eth_rx_queue_info_get() local
5000 dev = &rte_eth_devices[port_id]; in rte_eth_rx_queue_info_get()
5001 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_queue_info_get()
5006 if (dev->data->rx_queues == NULL || in rte_eth_rx_queue_info_get()
5007 dev->data->rx_queues[queue_id] == NULL) { in rte_eth_rx_queue_info_get()
5015 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) { in rte_eth_rx_queue_info_get()
5022 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); in rte_eth_rx_queue_info_get()
5025 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); in rte_eth_rx_queue_info_get()
5033 struct rte_eth_dev *dev; in rte_eth_tx_queue_info_get() local
5040 dev = &rte_eth_devices[port_id]; in rte_eth_tx_queue_info_get()
5041 if (queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_queue_info_get()
5046 if (dev->data->tx_queues == NULL || in rte_eth_tx_queue_info_get()
5047 dev->data->tx_queues[queue_id] == NULL) { in rte_eth_tx_queue_info_get()
5055 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) { in rte_eth_tx_queue_info_get()
5062 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); in rte_eth_tx_queue_info_get()
5065 dev->dev_ops->txq_info_get(dev, queue_id, qinfo); in rte_eth_tx_queue_info_get()
5074 struct rte_eth_dev *dev; in rte_eth_rx_burst_mode_get() local
5081 dev = &rte_eth_devices[port_id]; in rte_eth_rx_burst_mode_get()
5083 if (queue_id >= dev->data->nb_rx_queues) { in rte_eth_rx_burst_mode_get()
5088 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); in rte_eth_rx_burst_mode_get()
5091 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); in rte_eth_rx_burst_mode_get()
5098 struct rte_eth_dev *dev; in rte_eth_tx_burst_mode_get() local
5105 dev = &rte_eth_devices[port_id]; in rte_eth_tx_burst_mode_get()
5107 if (queue_id >= dev->data->nb_tx_queues) { in rte_eth_tx_burst_mode_get()
5112 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); in rte_eth_tx_burst_mode_get()
5115 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); in rte_eth_tx_burst_mode_get()
5123 struct rte_eth_dev *dev; in rte_eth_dev_set_mc_addr_list() local
5127 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_mc_addr_list()
5128 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP); in rte_eth_dev_set_mc_addr_list()
5129 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev, in rte_eth_dev_set_mc_addr_list()
5136 struct rte_eth_dev *dev; in rte_eth_timesync_enable() local
5139 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_enable()
5141 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP); in rte_eth_timesync_enable()
5142 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev)); in rte_eth_timesync_enable()
5148 struct rte_eth_dev *dev; in rte_eth_timesync_disable() local
5151 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_disable()
5153 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP); in rte_eth_timesync_disable()
5154 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev)); in rte_eth_timesync_disable()
5161 struct rte_eth_dev *dev; in rte_eth_timesync_read_rx_timestamp() local
5164 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_read_rx_timestamp()
5166 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP); in rte_eth_timesync_read_rx_timestamp()
5167 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp) in rte_eth_timesync_read_rx_timestamp()
5168 (dev, timestamp, flags)); in rte_eth_timesync_read_rx_timestamp()
5175 struct rte_eth_dev *dev; in rte_eth_timesync_read_tx_timestamp() local
5178 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_read_tx_timestamp()
5180 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); in rte_eth_timesync_read_tx_timestamp()
5181 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp) in rte_eth_timesync_read_tx_timestamp()
5182 (dev, timestamp)); in rte_eth_timesync_read_tx_timestamp()
5188 struct rte_eth_dev *dev; in rte_eth_timesync_adjust_time() local
5191 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_adjust_time()
5193 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP); in rte_eth_timesync_adjust_time()
5194 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, in rte_eth_timesync_adjust_time()
5201 struct rte_eth_dev *dev; in rte_eth_timesync_read_time() local
5204 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_read_time()
5206 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP); in rte_eth_timesync_read_time()
5207 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev, in rte_eth_timesync_read_time()
5214 struct rte_eth_dev *dev; in rte_eth_timesync_write_time() local
5217 dev = &rte_eth_devices[port_id]; in rte_eth_timesync_write_time()
5219 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP); in rte_eth_timesync_write_time()
5220 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev, in rte_eth_timesync_write_time()
5227 struct rte_eth_dev *dev; in rte_eth_read_clock() local
5230 dev = &rte_eth_devices[port_id]; in rte_eth_read_clock()
5232 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); in rte_eth_read_clock()
5233 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); in rte_eth_read_clock()
5239 struct rte_eth_dev *dev; in rte_eth_dev_get_reg_info() local
5243 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_reg_info()
5244 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); in rte_eth_dev_get_reg_info()
5245 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info)); in rte_eth_dev_get_reg_info()
5251 struct rte_eth_dev *dev; in rte_eth_dev_get_eeprom_length() local
5255 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_eeprom_length()
5256 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); in rte_eth_dev_get_eeprom_length()
5257 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev)); in rte_eth_dev_get_eeprom_length()
5263 struct rte_eth_dev *dev; in rte_eth_dev_get_eeprom() local
5267 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_eeprom()
5268 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); in rte_eth_dev_get_eeprom()
5269 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info)); in rte_eth_dev_get_eeprom()
5275 struct rte_eth_dev *dev; in rte_eth_dev_set_eeprom() local
5279 dev = &rte_eth_devices[port_id]; in rte_eth_dev_set_eeprom()
5280 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); in rte_eth_dev_set_eeprom()
5281 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); in rte_eth_dev_set_eeprom()
5288 struct rte_eth_dev *dev; in rte_eth_dev_get_module_info() local
5292 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_module_info()
5293 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP); in rte_eth_dev_get_module_info()
5294 return (*dev->dev_ops->get_module_info)(dev, modinfo); in rte_eth_dev_get_module_info()
5301 struct rte_eth_dev *dev; in rte_eth_dev_get_module_eeprom() local
5305 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_module_eeprom()
5306 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP); in rte_eth_dev_get_module_eeprom()
5307 return (*dev->dev_ops->get_module_eeprom)(dev, info); in rte_eth_dev_get_module_eeprom()
5314 struct rte_eth_dev *dev; in rte_eth_dev_get_dcb_info() local
5318 dev = &rte_eth_devices[port_id]; in rte_eth_dev_get_dcb_info()
5321 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); in rte_eth_dev_get_dcb_info()
5322 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info)); in rte_eth_dev_get_dcb_info()
5365 struct rte_eth_dev *dev; in rte_eth_dev_hairpin_capability_get() local
5369 dev = &rte_eth_devices[port_id]; in rte_eth_dev_hairpin_capability_get()
5370 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP); in rte_eth_dev_hairpin_capability_get()
5372 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap)); in rte_eth_dev_hairpin_capability_get()
5376 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) in rte_eth_dev_is_rx_hairpin_queue() argument
5378 if (dev->data->rx_queue_state[queue_id] == in rte_eth_dev_is_rx_hairpin_queue()
5385 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id) in rte_eth_dev_is_tx_hairpin_queue() argument
5387 if (dev->data->tx_queue_state[queue_id] == in rte_eth_dev_is_tx_hairpin_queue()
5396 struct rte_eth_dev *dev; in rte_eth_dev_pool_ops_supported() local
5403 dev = &rte_eth_devices[port_id]; in rte_eth_dev_pool_ops_supported()
5405 if (*dev->dev_ops->pool_ops_supported == NULL) in rte_eth_dev_pool_ops_supported()
5408 return (*dev->dev_ops->pool_ops_supported)(dev, pool); in rte_eth_dev_pool_ops_supported()
5718 struct rte_eth_dev *dev; in rte_eth_hairpin_queue_peer_update() local
5725 dev = &rte_eth_devices[peer_port]; in rte_eth_hairpin_queue_peer_update()
5726 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update, in rte_eth_hairpin_queue_peer_update()
5729 return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue, in rte_eth_hairpin_queue_peer_update()
5738 struct rte_eth_dev *dev; in rte_eth_hairpin_queue_peer_bind() local
5744 dev = &rte_eth_devices[cur_port]; in rte_eth_hairpin_queue_peer_bind()
5745 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind, in rte_eth_hairpin_queue_peer_bind()
5748 return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue, in rte_eth_hairpin_queue_peer_bind()
5756 struct rte_eth_dev *dev; in rte_eth_hairpin_queue_peer_unbind() local
5759 dev = &rte_eth_devices[cur_port]; in rte_eth_hairpin_queue_peer_unbind()
5760 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind, in rte_eth_hairpin_queue_peer_unbind()
5763 return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue, in rte_eth_hairpin_queue_peer_unbind()