Lines Matching refs:dev
22 fs_dev_configure(struct rte_eth_dev *dev) in fs_dev_configure() argument
28 fs_lock(dev, 0); in fs_dev_configure()
29 FOREACH_SUBDEV(sdev, i, dev) { in fs_dev_configure()
35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) in fs_dev_configure()
42 dev->data->dev_conf.intr_conf.rmv = 1; in fs_dev_configure()
46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; in fs_dev_configure()
52 dev->data->dev_conf.intr_conf.lsc = 1; in fs_dev_configure()
55 dev->data->dev_conf.intr_conf.lsc = 0; in fs_dev_configure()
59 dev->data->nb_rx_queues, in fs_dev_configure()
60 dev->data->nb_tx_queues, in fs_dev_configure()
61 &dev->data->dev_conf); in fs_dev_configure()
66 fs_unlock(dev, 0); in fs_dev_configure()
80 dev->data->dev_conf.intr_conf.rmv = 0; in fs_dev_configure()
85 dev); in fs_dev_configure()
92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; in fs_dev_configure()
95 if (PRIV(dev)->state < DEV_ACTIVE) in fs_dev_configure()
96 PRIV(dev)->state = DEV_ACTIVE; in fs_dev_configure()
97 fs_unlock(dev, 0); in fs_dev_configure()
102 fs_set_queues_state_start(struct rte_eth_dev *dev) in fs_set_queues_state_start() argument
108 for (i = 0; i < dev->data->nb_rx_queues; i++) { in fs_set_queues_state_start()
109 rxq = dev->data->rx_queues[i]; in fs_set_queues_state_start()
111 dev->data->rx_queue_state[i] = in fs_set_queues_state_start()
114 for (i = 0; i < dev->data->nb_tx_queues; i++) { in fs_set_queues_state_start()
115 txq = dev->data->tx_queues[i]; in fs_set_queues_state_start()
117 dev->data->tx_queue_state[i] = in fs_set_queues_state_start()
123 fs_dev_start(struct rte_eth_dev *dev) in fs_dev_start() argument
129 fs_lock(dev, 0); in fs_dev_start()
130 ret = failsafe_rx_intr_install(dev); in fs_dev_start()
132 fs_unlock(dev, 0); in fs_dev_start()
135 FOREACH_SUBDEV(sdev, i, dev) { in fs_dev_start()
143 fs_unlock(dev, 0); in fs_dev_start()
153 fs_unlock(dev, 0); in fs_dev_start()
158 if (PRIV(dev)->state < DEV_STARTED) { in fs_dev_start()
159 PRIV(dev)->state = DEV_STARTED; in fs_dev_start()
160 fs_set_queues_state_start(dev); in fs_dev_start()
162 fs_switch_dev(dev, NULL); in fs_dev_start()
163 fs_unlock(dev, 0); in fs_dev_start()
168 fs_set_queues_state_stop(struct rte_eth_dev *dev) in fs_set_queues_state_stop() argument
172 for (i = 0; i < dev->data->nb_rx_queues; i++) in fs_set_queues_state_stop()
173 if (dev->data->rx_queues[i] != NULL) in fs_set_queues_state_stop()
174 dev->data->rx_queue_state[i] = in fs_set_queues_state_stop()
176 for (i = 0; i < dev->data->nb_tx_queues; i++) in fs_set_queues_state_stop()
177 if (dev->data->tx_queues[i] != NULL) in fs_set_queues_state_stop()
178 dev->data->tx_queue_state[i] = in fs_set_queues_state_stop()
183 fs_dev_stop(struct rte_eth_dev *dev) in fs_dev_stop() argument
189 fs_lock(dev, 0); in fs_dev_stop()
190 PRIV(dev)->state = DEV_STARTED - 1; in fs_dev_stop()
191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { in fs_dev_stop()
196 PRIV(dev)->state = DEV_STARTED + 1; in fs_dev_stop()
197 fs_unlock(dev, 0); in fs_dev_stop()
203 failsafe_rx_intr_uninstall(dev); in fs_dev_stop()
204 fs_set_queues_state_stop(dev); in fs_dev_stop()
205 fs_unlock(dev, 0); in fs_dev_stop()
211 fs_dev_set_link_up(struct rte_eth_dev *dev) in fs_dev_set_link_up() argument
217 fs_lock(dev, 0); in fs_dev_set_link_up()
218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_dev_set_link_up()
224 fs_unlock(dev, 0); in fs_dev_set_link_up()
228 fs_unlock(dev, 0); in fs_dev_set_link_up()
233 fs_dev_set_link_down(struct rte_eth_dev *dev) in fs_dev_set_link_down() argument
239 fs_lock(dev, 0); in fs_dev_set_link_down()
240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_dev_set_link_down()
246 fs_unlock(dev, 0); in fs_dev_set_link_down()
250 fs_unlock(dev, 0); in fs_dev_set_link_down()
255 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) in fs_rx_queue_stop() argument
263 fs_lock(dev, 0); in fs_rx_queue_stop()
264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_stop()
276 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; in fs_rx_queue_stop()
277 fs_unlock(dev, 0); in fs_rx_queue_stop()
283 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) in fs_rx_queue_start() argument
289 fs_lock(dev, 0); in fs_rx_queue_start()
290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_start()
297 fs_rx_queue_stop(dev, rx_queue_id); in fs_rx_queue_start()
298 fs_unlock(dev, 0); in fs_rx_queue_start()
302 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; in fs_rx_queue_start()
303 fs_unlock(dev, 0); in fs_rx_queue_start()
308 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) in fs_tx_queue_stop() argument
316 fs_lock(dev, 0); in fs_tx_queue_stop()
317 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_stop()
329 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; in fs_tx_queue_stop()
330 fs_unlock(dev, 0); in fs_tx_queue_stop()
336 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) in fs_tx_queue_start() argument
342 fs_lock(dev, 0); in fs_tx_queue_start()
343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_start()
350 fs_tx_queue_stop(dev, tx_queue_id); in fs_tx_queue_start()
351 fs_unlock(dev, 0); in fs_tx_queue_start()
355 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; in fs_tx_queue_start()
356 fs_unlock(dev, 0); in fs_tx_queue_start()
363 struct rte_eth_dev *dev; in fs_rx_queue_release() local
371 dev = &rte_eth_devices[rxq->priv->data->port_id]; in fs_rx_queue_release()
372 fs_lock(dev, 0); in fs_rx_queue_release()
375 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_release()
382 dev->data->rx_queues[rxq->qid] = NULL; in fs_rx_queue_release()
384 fs_unlock(dev, 0); in fs_rx_queue_release()
388 fs_rx_queue_setup(struct rte_eth_dev *dev, in fs_rx_queue_setup() argument
410 fs_lock(dev, 0); in fs_rx_queue_setup()
412 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_rx_queue_setup()
416 fs_unlock(dev, 0); in fs_rx_queue_setup()
421 rxq = dev->data->rx_queues[rx_queue_id]; in fs_rx_queue_setup()
424 dev->data->rx_queues[rx_queue_id] = NULL; in fs_rx_queue_setup()
428 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, in fs_rx_queue_setup()
431 fs_unlock(dev, 0); in fs_rx_queue_setup()
434 FOREACH_SUBDEV(sdev, i, dev) in fs_rx_queue_setup()
441 rxq->priv = PRIV(dev); in fs_rx_queue_setup()
442 rxq->sdev = PRIV(dev)->subs; in fs_rx_queue_setup()
445 fs_unlock(dev, 0); in fs_rx_queue_setup()
449 dev->data->rx_queues[rx_queue_id] = rxq; in fs_rx_queue_setup()
450 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_setup()
460 fs_unlock(dev, 0); in fs_rx_queue_setup()
464 fs_unlock(dev, 0); in fs_rx_queue_setup()
469 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) in fs_rx_intr_enable() argument
477 fs_lock(dev, 0); in fs_rx_intr_enable()
478 if (idx >= dev->data->nb_rx_queues) { in fs_rx_intr_enable()
482 rxq = dev->data->rx_queues[idx]; in fs_rx_intr_enable()
488 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { in fs_rx_intr_enable()
494 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_intr_enable()
501 fs_unlock(dev, 0); in fs_rx_intr_enable()
508 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) in fs_rx_intr_disable() argument
517 fs_lock(dev, 0); in fs_rx_intr_disable()
518 if (idx >= dev->data->nb_rx_queues) { in fs_rx_intr_disable()
522 rxq = dev->data->rx_queues[idx]; in fs_rx_intr_disable()
528 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_intr_disable()
538 fs_unlock(dev, 0); in fs_rx_intr_disable()
547 struct rte_eth_dev *dev; in fs_tx_queue_release() local
555 dev = &rte_eth_devices[txq->priv->data->port_id]; in fs_tx_queue_release()
556 fs_lock(dev, 0); in fs_tx_queue_release()
557 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_release()
564 dev->data->tx_queues[txq->qid] = NULL; in fs_tx_queue_release()
566 fs_unlock(dev, 0); in fs_tx_queue_release()
570 fs_tx_queue_setup(struct rte_eth_dev *dev, in fs_tx_queue_setup() argument
581 fs_lock(dev, 0); in fs_tx_queue_setup()
583 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_tx_queue_setup()
587 fs_unlock(dev, 0); in fs_tx_queue_setup()
592 txq = dev->data->tx_queues[tx_queue_id]; in fs_tx_queue_setup()
595 dev->data->tx_queues[tx_queue_id] = NULL; in fs_tx_queue_setup()
599 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, in fs_tx_queue_setup()
602 fs_unlock(dev, 0); in fs_tx_queue_setup()
605 FOREACH_SUBDEV(sdev, i, dev) in fs_tx_queue_setup()
611 txq->priv = PRIV(dev); in fs_tx_queue_setup()
612 dev->data->tx_queues[tx_queue_id] = txq; in fs_tx_queue_setup()
613 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_setup()
623 fs_unlock(dev, 0); in fs_tx_queue_setup()
627 fs_unlock(dev, 0); in fs_tx_queue_setup()
632 fs_dev_free_queues(struct rte_eth_dev *dev) in fs_dev_free_queues() argument
636 for (i = 0; i < dev->data->nb_rx_queues; i++) { in fs_dev_free_queues()
637 fs_rx_queue_release(dev->data->rx_queues[i]); in fs_dev_free_queues()
638 dev->data->rx_queues[i] = NULL; in fs_dev_free_queues()
640 dev->data->nb_rx_queues = 0; in fs_dev_free_queues()
641 for (i = 0; i < dev->data->nb_tx_queues; i++) { in fs_dev_free_queues()
642 fs_tx_queue_release(dev->data->tx_queues[i]); in fs_dev_free_queues()
643 dev->data->tx_queues[i] = NULL; in fs_dev_free_queues()
645 dev->data->nb_tx_queues = 0; in fs_dev_free_queues()
649 failsafe_eth_dev_close(struct rte_eth_dev *dev) in failsafe_eth_dev_close() argument
655 fs_lock(dev, 0); in failsafe_eth_dev_close()
656 failsafe_hotplug_alarm_cancel(dev); in failsafe_eth_dev_close()
657 if (PRIV(dev)->state == DEV_STARTED) { in failsafe_eth_dev_close()
658 ret = dev->dev_ops->dev_stop(dev); in failsafe_eth_dev_close()
660 fs_unlock(dev, 0); in failsafe_eth_dev_close()
664 PRIV(dev)->state = DEV_ACTIVE - 1; in failsafe_eth_dev_close()
665 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in failsafe_eth_dev_close()
677 failsafe_eth_new_event_callback, dev); in failsafe_eth_dev_close()
679 fs_unlock(dev, 0); in failsafe_eth_dev_close()
682 fs_dev_free_queues(dev); in failsafe_eth_dev_close()
683 err = failsafe_eal_uninit(dev); in failsafe_eth_dev_close()
688 failsafe_args_free(dev); in failsafe_eth_dev_close()
689 rte_free(PRIV(dev)->subs); in failsafe_eth_dev_close()
690 rte_free(PRIV(dev)->mcast_addrs); in failsafe_eth_dev_close()
692 dev->data->mac_addrs = NULL; in failsafe_eth_dev_close()
693 fs_unlock(dev, 0); in failsafe_eth_dev_close()
694 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); in failsafe_eth_dev_close()
703 fs_promiscuous_enable(struct rte_eth_dev *dev) in fs_promiscuous_enable() argument
709 fs_lock(dev, 0); in fs_promiscuous_enable()
710 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_enable()
721 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_enable()
729 fs_unlock(dev, 0); in fs_promiscuous_enable()
735 fs_promiscuous_disable(struct rte_eth_dev *dev) in fs_promiscuous_disable() argument
741 fs_lock(dev, 0); in fs_promiscuous_disable()
742 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_disable()
753 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_disable()
761 fs_unlock(dev, 0); in fs_promiscuous_disable()
767 fs_allmulticast_enable(struct rte_eth_dev *dev) in fs_allmulticast_enable() argument
773 fs_lock(dev, 0); in fs_allmulticast_enable()
774 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_enable()
785 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_enable()
793 fs_unlock(dev, 0); in fs_allmulticast_enable()
799 fs_allmulticast_disable(struct rte_eth_dev *dev) in fs_allmulticast_disable() argument
805 fs_lock(dev, 0); in fs_allmulticast_disable()
806 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_disable()
817 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_disable()
825 fs_unlock(dev, 0); in fs_allmulticast_disable()
831 fs_link_update(struct rte_eth_dev *dev, in fs_link_update() argument
838 fs_lock(dev, 0); in fs_link_update()
839 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_link_update()
846 fs_unlock(dev, 0); in fs_link_update()
850 if (TX_SUBDEV(dev)) { in fs_link_update()
854 l1 = &dev->data->dev_link; in fs_link_update()
855 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; in fs_link_update()
858 fs_unlock(dev, 0); in fs_link_update()
862 fs_unlock(dev, 0); in fs_link_update()
867 fs_stats_get(struct rte_eth_dev *dev, in fs_stats_get() argument
875 fs_lock(dev, 0); in fs_stats_get()
876 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); in fs_stats_get()
877 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_stats_get()
891 fs_unlock(dev, 0); in fs_stats_get()
898 fs_unlock(dev, 0); in fs_stats_get()
903 fs_stats_reset(struct rte_eth_dev *dev) in fs_stats_reset() argument
909 fs_lock(dev, 0); in fs_stats_reset()
910 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_stats_reset()
918 fs_unlock(dev, 0); in fs_stats_reset()
923 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); in fs_stats_reset()
924 fs_unlock(dev, 0); in fs_stats_reset()
930 __fs_xstats_count(struct rte_eth_dev *dev) in __fs_xstats_count() argument
937 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_count()
948 __fs_xstats_get_names(struct rte_eth_dev *dev, in __fs_xstats_get_names() argument
958 return __fs_xstats_count(dev); in __fs_xstats_get_names()
960 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_get_names()
993 fs_xstats_get_names(struct rte_eth_dev *dev, in fs_xstats_get_names() argument
999 fs_lock(dev, 0); in fs_xstats_get_names()
1000 ret = __fs_xstats_get_names(dev, xstats_names, limit); in fs_xstats_get_names()
1001 fs_unlock(dev, 0); in fs_xstats_get_names()
1006 __fs_xstats_get(struct rte_eth_dev *dev, in __fs_xstats_get() argument
1015 ret = __fs_xstats_count(dev); in __fs_xstats_get()
1024 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_get()
1045 fs_xstats_get(struct rte_eth_dev *dev, in fs_xstats_get() argument
1051 fs_lock(dev, 0); in fs_xstats_get()
1052 ret = __fs_xstats_get(dev, xstats, n); in fs_xstats_get()
1053 fs_unlock(dev, 0); in fs_xstats_get()
1060 fs_xstats_reset(struct rte_eth_dev *dev) in fs_xstats_reset() argument
1066 fs_lock(dev, 0); in fs_xstats_reset()
1067 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_xstats_reset()
1072 fs_unlock(dev, 0); in fs_xstats_reset()
1158 fs_dev_infos_get(struct rte_eth_dev *dev, in fs_dev_infos_get() argument
1230 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_dev_infos_get()
1245 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) in fs_dev_supported_ptypes_get() argument
1251 fs_lock(dev, 0); in fs_dev_supported_ptypes_get()
1252 sdev = TX_SUBDEV(dev); in fs_dev_supported_ptypes_get()
1272 fs_unlock(dev, 0); in fs_dev_supported_ptypes_get()
1277 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) in fs_mtu_set() argument
1283 fs_lock(dev, 0); in fs_mtu_set()
1284 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mtu_set()
1290 fs_unlock(dev, 0); in fs_mtu_set()
1294 fs_unlock(dev, 0); in fs_mtu_set()
1299 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) in fs_vlan_filter_set() argument
1305 fs_lock(dev, 0); in fs_vlan_filter_set()
1306 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_vlan_filter_set()
1312 fs_unlock(dev, 0); in fs_vlan_filter_set()
1316 fs_unlock(dev, 0); in fs_vlan_filter_set()
1321 fs_flow_ctrl_get(struct rte_eth_dev *dev, in fs_flow_ctrl_get() argument
1327 fs_lock(dev, 0); in fs_flow_ctrl_get()
1328 sdev = TX_SUBDEV(dev); in fs_flow_ctrl_get()
1339 fs_unlock(dev, 0); in fs_flow_ctrl_get()
1344 fs_flow_ctrl_set(struct rte_eth_dev *dev, in fs_flow_ctrl_set() argument
1351 fs_lock(dev, 0); in fs_flow_ctrl_set()
1352 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_flow_ctrl_set()
1358 fs_unlock(dev, 0); in fs_flow_ctrl_set()
1362 fs_unlock(dev, 0); in fs_flow_ctrl_set()
1367 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) in fs_mac_addr_remove() argument
1372 fs_lock(dev, 0); in fs_mac_addr_remove()
1376 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) in fs_mac_addr_remove()
1378 &dev->data->mac_addrs[index]); in fs_mac_addr_remove()
1379 PRIV(dev)->mac_addr_pool[index] = 0; in fs_mac_addr_remove()
1380 fs_unlock(dev, 0); in fs_mac_addr_remove()
1384 fs_mac_addr_add(struct rte_eth_dev *dev, in fs_mac_addr_add() argument
1394 fs_lock(dev, 0); in fs_mac_addr_add()
1395 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mac_addr_add()
1400 fs_unlock(dev, 0); in fs_mac_addr_add()
1404 if (index >= PRIV(dev)->nb_mac_addr) { in fs_mac_addr_add()
1406 PRIV(dev)->nb_mac_addr = index; in fs_mac_addr_add()
1408 PRIV(dev)->mac_addr_pool[index] = vmdq; in fs_mac_addr_add()
1409 fs_unlock(dev, 0); in fs_mac_addr_add()
1414 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) in fs_mac_addr_set() argument
1420 fs_lock(dev, 0); in fs_mac_addr_set()
1421 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mac_addr_set()
1427 fs_unlock(dev, 0); in fs_mac_addr_set()
1431 fs_unlock(dev, 0); in fs_mac_addr_set()
1437 fs_set_mc_addr_list(struct rte_eth_dev *dev, in fs_set_mc_addr_list() argument
1445 fs_lock(dev, 0); in fs_set_mc_addr_list()
1447 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_set_mc_addr_list()
1457 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, in fs_set_mc_addr_list()
1458 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); in fs_set_mc_addr_list()
1464 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); in fs_set_mc_addr_list()
1465 PRIV(dev)->nb_mcast_addr = nb_mc_addr; in fs_set_mc_addr_list()
1466 PRIV(dev)->mcast_addrs = mcast_addrs; in fs_set_mc_addr_list()
1468 fs_unlock(dev, 0); in fs_set_mc_addr_list()
1472 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_set_mc_addr_list()
1474 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); in fs_set_mc_addr_list()
1481 fs_unlock(dev, 0); in fs_set_mc_addr_list()
1486 fs_rss_hash_update(struct rte_eth_dev *dev, in fs_rss_hash_update() argument
1493 fs_lock(dev, 0); in fs_rss_hash_update()
1494 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rss_hash_update()
1501 fs_unlock(dev, 0); in fs_rss_hash_update()
1505 fs_unlock(dev, 0); in fs_rss_hash_update()
1511 fs_filter_ctrl(struct rte_eth_dev *dev __rte_unused, in fs_filter_ctrl()