Lines Matching refs:sdev

24 	struct sub_device *sdev;  in fs_dev_configure()  local
29 FOREACH_SUBDEV(sdev, i, dev) { in fs_dev_configure()
34 if (sdev->state != DEV_PROBED && in fs_dev_configure()
35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) in fs_dev_configure()
38 rmv_interrupt = ETH(sdev)->data->dev_flags & in fs_dev_configure()
48 (ETH(sdev)->data->dev_flags & in fs_dev_configure()
58 ret = rte_eth_dev_configure(PORT_ID(sdev), in fs_dev_configure()
63 if (!fs_err(sdev, ret)) in fs_dev_configure()
69 if (rmv_interrupt && sdev->rmv_callback == 0) { in fs_dev_configure()
70 ret = rte_eth_dev_callback_register(PORT_ID(sdev), in fs_dev_configure()
73 sdev); in fs_dev_configure()
76 SUB_ID(sdev)); in fs_dev_configure()
78 sdev->rmv_callback = 1; in fs_dev_configure()
81 if (lsc_interrupt && sdev->lsc_callback == 0) { in fs_dev_configure()
82 ret = rte_eth_dev_callback_register(PORT_ID(sdev), in fs_dev_configure()
88 SUB_ID(sdev)); in fs_dev_configure()
90 sdev->lsc_callback = 1; in fs_dev_configure()
93 sdev->state = DEV_ACTIVE; in fs_dev_configure()
125 struct sub_device *sdev; in fs_dev_start() local
135 FOREACH_SUBDEV(sdev, i, dev) { in fs_dev_start()
136 if (sdev->state != DEV_ACTIVE) in fs_dev_start()
139 ret = rte_eth_dev_start(PORT_ID(sdev)); in fs_dev_start()
141 if (!fs_err(sdev, ret)) in fs_dev_start()
146 ret = failsafe_rx_intr_install_subdevice(sdev); in fs_dev_start()
148 if (!fs_err(sdev, ret)) in fs_dev_start()
150 if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) in fs_dev_start()
152 SUB_ID(sdev)); in fs_dev_start()
156 sdev->state = DEV_STARTED; in fs_dev_start()
185 struct sub_device *sdev; in fs_dev_stop() local
191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { in fs_dev_stop()
192 ret = rte_eth_dev_stop(PORT_ID(sdev)); in fs_dev_stop()
193 if (fs_err(sdev, ret) < 0) { in fs_dev_stop()
195 PORT_ID(sdev)); in fs_dev_stop()
200 failsafe_rx_intr_uninstall_subdevice(sdev); in fs_dev_stop()
201 sdev->state = DEV_STARTED - 1; in fs_dev_stop()
213 struct sub_device *sdev; in fs_dev_set_link_up() local
218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_dev_set_link_up()
220 ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); in fs_dev_set_link_up()
221 if ((ret = fs_err(sdev, ret))) { in fs_dev_set_link_up()
235 struct sub_device *sdev; in fs_dev_set_link_down() local
240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_dev_set_link_down()
242 ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); in fs_dev_set_link_down()
243 if ((ret = fs_err(sdev, ret))) { in fs_dev_set_link_down()
257 struct sub_device *sdev; in fs_rx_queue_stop() local
264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_stop()
265 uint16_t port_id = ETH(sdev)->data->port_id; in fs_rx_queue_stop()
268 ret = fs_err(sdev, ret); in fs_rx_queue_stop()
285 struct sub_device *sdev; in fs_rx_queue_start() local
290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_start()
291 uint16_t port_id = ETH(sdev)->data->port_id; in fs_rx_queue_start()
294 ret = fs_err(sdev, ret); in fs_rx_queue_start()
310 struct sub_device *sdev; in fs_tx_queue_stop() local
317 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_stop()
318 uint16_t port_id = ETH(sdev)->data->port_id; in fs_tx_queue_stop()
321 ret = fs_err(sdev, ret); in fs_tx_queue_stop()
338 struct sub_device *sdev; in fs_tx_queue_start() local
343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_start()
344 uint16_t port_id = ETH(sdev)->data->port_id; in fs_tx_queue_start()
347 ret = fs_err(sdev, ret); in fs_tx_queue_start()
364 struct sub_device *sdev; in fs_rx_queue_release() local
375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_release()
376 if (ETH(sdev)->data->rx_queues != NULL && in fs_rx_queue_release()
377 ETH(sdev)->data->rx_queues[rxq->qid] != NULL) { in fs_rx_queue_release()
378 SUBOPS(sdev, rx_queue_release) in fs_rx_queue_release()
379 (ETH(sdev)->data->rx_queues[rxq->qid]); in fs_rx_queue_release()
405 struct sub_device *sdev; in fs_rx_queue_setup() local
412 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_rx_queue_setup()
413 if (SUBOPS(sdev, rx_queue_start) == NULL) { in fs_rx_queue_setup()
434 FOREACH_SUBDEV(sdev, i, dev) in fs_rx_queue_setup()
442 rxq->sdev = PRIV(dev)->subs; in fs_rx_queue_setup()
450 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_setup()
451 ret = rte_eth_rx_queue_setup(PORT_ID(sdev), in fs_rx_queue_setup()
455 if ((ret = fs_err(sdev, ret))) { in fs_rx_queue_setup()
472 struct sub_device *sdev; in fs_rx_intr_enable() local
494 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_intr_enable()
495 ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); in fs_rx_intr_enable()
496 ret = fs_err(sdev, ret); in fs_rx_intr_enable()
511 struct sub_device *sdev; in fs_rx_intr_disable() local
528 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_intr_disable()
529 ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); in fs_rx_intr_disable()
530 ret = fs_err(sdev, ret); in fs_rx_intr_disable()
548 struct sub_device *sdev; in fs_tx_queue_release() local
557 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_release()
558 if (ETH(sdev)->data->tx_queues != NULL && in fs_tx_queue_release()
559 ETH(sdev)->data->tx_queues[txq->qid] != NULL) { in fs_tx_queue_release()
560 SUBOPS(sdev, tx_queue_release) in fs_tx_queue_release()
561 (ETH(sdev)->data->tx_queues[txq->qid]); in fs_tx_queue_release()
576 struct sub_device *sdev; in fs_tx_queue_setup() local
583 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_tx_queue_setup()
584 if (SUBOPS(sdev, tx_queue_start) == NULL) { in fs_tx_queue_setup()
605 FOREACH_SUBDEV(sdev, i, dev) in fs_tx_queue_setup()
613 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_setup()
614 ret = rte_eth_tx_queue_setup(PORT_ID(sdev), in fs_tx_queue_setup()
618 if ((ret = fs_err(sdev, ret))) { in fs_tx_queue_setup()
651 struct sub_device *sdev; in failsafe_eth_dev_close() local
665 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in failsafe_eth_dev_close()
667 failsafe_eth_dev_unregister_callbacks(sdev); in failsafe_eth_dev_close()
668 err = rte_eth_dev_close(PORT_ID(sdev)); in failsafe_eth_dev_close()
672 PORT_ID(sdev)); in failsafe_eth_dev_close()
674 sdev->state = DEV_ACTIVE - 1; in failsafe_eth_dev_close()
705 struct sub_device *sdev; in fs_promiscuous_enable() local
710 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_enable()
711 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); in fs_promiscuous_enable()
712 ret = fs_err(sdev, ret); in fs_promiscuous_enable()
715 PORT_ID(sdev)); in fs_promiscuous_enable()
721 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_enable()
722 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); in fs_promiscuous_enable()
723 ret = fs_err(sdev, ret); in fs_promiscuous_enable()
726 PORT_ID(sdev)); in fs_promiscuous_enable()
737 struct sub_device *sdev; in fs_promiscuous_disable() local
742 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_disable()
743 ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); in fs_promiscuous_disable()
744 ret = fs_err(sdev, ret); in fs_promiscuous_disable()
747 PORT_ID(sdev)); in fs_promiscuous_disable()
753 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_disable()
754 ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); in fs_promiscuous_disable()
755 ret = fs_err(sdev, ret); in fs_promiscuous_disable()
758 PORT_ID(sdev)); in fs_promiscuous_disable()
769 struct sub_device *sdev; in fs_allmulticast_enable() local
774 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_enable()
775 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); in fs_allmulticast_enable()
776 ret = fs_err(sdev, ret); in fs_allmulticast_enable()
779 PORT_ID(sdev)); in fs_allmulticast_enable()
785 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_enable()
786 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); in fs_allmulticast_enable()
787 ret = fs_err(sdev, ret); in fs_allmulticast_enable()
790 PORT_ID(sdev)); in fs_allmulticast_enable()
801 struct sub_device *sdev; in fs_allmulticast_disable() local
806 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_disable()
807 ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); in fs_allmulticast_disable()
808 ret = fs_err(sdev, ret); in fs_allmulticast_disable()
811 PORT_ID(sdev)); in fs_allmulticast_disable()
817 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_disable()
818 ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); in fs_allmulticast_disable()
819 ret = fs_err(sdev, ret); in fs_allmulticast_disable()
822 PORT_ID(sdev)); in fs_allmulticast_disable()
834 struct sub_device *sdev; in fs_link_update() local
839 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_link_update()
841 ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); in fs_link_update()
842 if (ret && ret != -1 && sdev->remove == 0 && in fs_link_update()
843 rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { in fs_link_update()
871 struct sub_device *sdev; in fs_stats_get() local
877 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_stats_get()
878 struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; in fs_stats_get()
879 uint64_t *timestamp = &sdev->stats_snapshot.timestamp; in fs_stats_get()
882 ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); in fs_stats_get()
884 if (!fs_err(sdev, ret)) { in fs_stats_get()
905 struct sub_device *sdev; in fs_stats_reset() local
910 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_stats_reset()
911 ret = rte_eth_stats_reset(PORT_ID(sdev)); in fs_stats_reset()
913 if (!fs_err(sdev, ret)) in fs_stats_reset()
921 memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); in fs_stats_reset()
932 struct sub_device *sdev; in __fs_xstats_count() local
937 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_count()
938 ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); in __fs_xstats_count()
952 struct sub_device *sdev; in __fs_xstats_get_names() local
960 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_get_names()
967 r = rte_eth_xstats_get_names(PORT_ID(sdev), in __fs_xstats_get_names()
1011 struct sub_device *sdev; in __fs_xstats_get() local
1024 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_get()
1025 ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); in __fs_xstats_get()
1062 struct sub_device *sdev; in fs_xstats_reset() local
1067 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_xstats_reset()
1068 r = rte_eth_xstats_reset(PORT_ID(sdev)); in fs_xstats_reset()
1161 struct sub_device *sdev; in fs_dev_infos_get() local
1230 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_dev_infos_get()
1233 ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); in fs_dev_infos_get()
1234 ret = fs_err(sdev, ret); in fs_dev_infos_get()
1247 struct sub_device *sdev; in fs_dev_supported_ptypes_get() local
1252 sdev = TX_SUBDEV(dev); in fs_dev_supported_ptypes_get()
1253 if (sdev == NULL) { in fs_dev_supported_ptypes_get()
1257 edev = ETH(sdev); in fs_dev_supported_ptypes_get()
1259 if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { in fs_dev_supported_ptypes_get()
1270 ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev); in fs_dev_supported_ptypes_get()
1279 struct sub_device *sdev; in fs_mtu_set() local
1284 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mtu_set()
1286 ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); in fs_mtu_set()
1287 if ((ret = fs_err(sdev, ret))) { in fs_mtu_set()
1301 struct sub_device *sdev; in fs_vlan_filter_set() local
1306 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_vlan_filter_set()
1308 ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); in fs_vlan_filter_set()
1309 if ((ret = fs_err(sdev, ret))) { in fs_vlan_filter_set()
1324 struct sub_device *sdev; in fs_flow_ctrl_get() local
1328 sdev = TX_SUBDEV(dev); in fs_flow_ctrl_get()
1329 if (sdev == NULL) { in fs_flow_ctrl_get()
1333 if (SUBOPS(sdev, flow_ctrl_get) == NULL) { in fs_flow_ctrl_get()
1337 ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); in fs_flow_ctrl_get()
1347 struct sub_device *sdev; in fs_flow_ctrl_set() local
1352 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_flow_ctrl_set()
1354 ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); in fs_flow_ctrl_set()
1355 if ((ret = fs_err(sdev, ret))) { in fs_flow_ctrl_set()
1369 struct sub_device *sdev; in fs_mac_addr_remove() local
1376 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) in fs_mac_addr_remove()
1377 rte_eth_dev_mac_addr_remove(PORT_ID(sdev), in fs_mac_addr_remove()
1389 struct sub_device *sdev; in fs_mac_addr_add() local
1395 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mac_addr_add()
1396 ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); in fs_mac_addr_add()
1397 if ((ret = fs_err(sdev, ret))) { in fs_mac_addr_add()
1416 struct sub_device *sdev; in fs_mac_addr_set() local
1421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mac_addr_set()
1422 ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); in fs_mac_addr_set()
1423 ret = fs_err(sdev, ret); in fs_mac_addr_set()
1440 struct sub_device *sdev; in fs_set_mc_addr_list() local
1447 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_set_mc_addr_list()
1448 ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), in fs_set_mc_addr_list()
1472 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_set_mc_addr_list()
1473 int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), in fs_set_mc_addr_list()
1489 struct sub_device *sdev; in fs_rss_hash_update() local
1494 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rss_hash_update()
1495 ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); in fs_rss_hash_update()
1496 ret = fs_err(sdev, ret); in fs_rss_hash_update()