Lines Matching refs:dev
22 fs_dev_configure(struct rte_eth_dev *dev) in fs_dev_configure() argument
28 fs_lock(dev, 0); in fs_dev_configure()
29 FOREACH_SUBDEV(sdev, i, dev) { in fs_dev_configure()
35 !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) in fs_dev_configure()
42 dev->data->dev_conf.intr_conf.rmv = 1; in fs_dev_configure()
46 lsc_enabled = dev->data->dev_conf.intr_conf.lsc; in fs_dev_configure()
52 dev->data->dev_conf.intr_conf.lsc = 1; in fs_dev_configure()
55 dev->data->dev_conf.intr_conf.lsc = 0; in fs_dev_configure()
59 dev->data->nb_rx_queues, in fs_dev_configure()
60 dev->data->nb_tx_queues, in fs_dev_configure()
61 &dev->data->dev_conf); in fs_dev_configure()
66 fs_unlock(dev, 0); in fs_dev_configure()
80 dev->data->dev_conf.intr_conf.rmv = 0; in fs_dev_configure()
85 dev); in fs_dev_configure()
92 dev->data->dev_conf.intr_conf.lsc = lsc_enabled; in fs_dev_configure()
95 if (PRIV(dev)->state < DEV_ACTIVE) in fs_dev_configure()
96 PRIV(dev)->state = DEV_ACTIVE; in fs_dev_configure()
97 fs_unlock(dev, 0); in fs_dev_configure()
102 fs_set_queues_state_start(struct rte_eth_dev *dev) in fs_set_queues_state_start() argument
108 for (i = 0; i < dev->data->nb_rx_queues; i++) { in fs_set_queues_state_start()
109 rxq = dev->data->rx_queues[i]; in fs_set_queues_state_start()
111 dev->data->rx_queue_state[i] = in fs_set_queues_state_start()
114 for (i = 0; i < dev->data->nb_tx_queues; i++) { in fs_set_queues_state_start()
115 txq = dev->data->tx_queues[i]; in fs_set_queues_state_start()
117 dev->data->tx_queue_state[i] = in fs_set_queues_state_start()
123 fs_dev_start(struct rte_eth_dev *dev) in fs_dev_start() argument
129 fs_lock(dev, 0); in fs_dev_start()
130 ret = failsafe_rx_intr_install(dev); in fs_dev_start()
132 fs_unlock(dev, 0); in fs_dev_start()
135 FOREACH_SUBDEV(sdev, i, dev) { in fs_dev_start()
143 fs_unlock(dev, 0); in fs_dev_start()
153 fs_unlock(dev, 0); in fs_dev_start()
158 if (PRIV(dev)->state < DEV_STARTED) { in fs_dev_start()
159 PRIV(dev)->state = DEV_STARTED; in fs_dev_start()
160 fs_set_queues_state_start(dev); in fs_dev_start()
162 fs_switch_dev(dev, NULL); in fs_dev_start()
163 fs_unlock(dev, 0); in fs_dev_start()
168 fs_set_queues_state_stop(struct rte_eth_dev *dev) in fs_set_queues_state_stop() argument
172 for (i = 0; i < dev->data->nb_rx_queues; i++) in fs_set_queues_state_stop()
173 if (dev->data->rx_queues[i] != NULL) in fs_set_queues_state_stop()
174 dev->data->rx_queue_state[i] = in fs_set_queues_state_stop()
176 for (i = 0; i < dev->data->nb_tx_queues; i++) in fs_set_queues_state_stop()
177 if (dev->data->tx_queues[i] != NULL) in fs_set_queues_state_stop()
178 dev->data->tx_queue_state[i] = in fs_set_queues_state_stop()
183 fs_dev_stop(struct rte_eth_dev *dev) in fs_dev_stop() argument
189 fs_lock(dev, 0); in fs_dev_stop()
190 PRIV(dev)->state = DEV_STARTED - 1; in fs_dev_stop()
191 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { in fs_dev_stop()
196 PRIV(dev)->state = DEV_STARTED + 1; in fs_dev_stop()
197 fs_unlock(dev, 0); in fs_dev_stop()
203 failsafe_rx_intr_uninstall(dev); in fs_dev_stop()
204 fs_set_queues_state_stop(dev); in fs_dev_stop()
205 fs_unlock(dev, 0); in fs_dev_stop()
211 fs_dev_set_link_up(struct rte_eth_dev *dev) in fs_dev_set_link_up() argument
217 fs_lock(dev, 0); in fs_dev_set_link_up()
218 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_dev_set_link_up()
224 fs_unlock(dev, 0); in fs_dev_set_link_up()
228 fs_unlock(dev, 0); in fs_dev_set_link_up()
233 fs_dev_set_link_down(struct rte_eth_dev *dev) in fs_dev_set_link_down() argument
239 fs_lock(dev, 0); in fs_dev_set_link_down()
240 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_dev_set_link_down()
246 fs_unlock(dev, 0); in fs_dev_set_link_down()
250 fs_unlock(dev, 0); in fs_dev_set_link_down()
255 fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) in fs_rx_queue_stop() argument
263 fs_lock(dev, 0); in fs_rx_queue_stop()
264 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_stop()
276 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; in fs_rx_queue_stop()
277 fs_unlock(dev, 0); in fs_rx_queue_stop()
283 fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) in fs_rx_queue_start() argument
289 fs_lock(dev, 0); in fs_rx_queue_start()
290 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_start()
297 fs_rx_queue_stop(dev, rx_queue_id); in fs_rx_queue_start()
298 fs_unlock(dev, 0); in fs_rx_queue_start()
302 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; in fs_rx_queue_start()
303 fs_unlock(dev, 0); in fs_rx_queue_start()
308 fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) in fs_tx_queue_stop() argument
316 fs_lock(dev, 0); in fs_tx_queue_stop()
317 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_stop()
329 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; in fs_tx_queue_stop()
330 fs_unlock(dev, 0); in fs_tx_queue_stop()
336 fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) in fs_tx_queue_start() argument
342 fs_lock(dev, 0); in fs_tx_queue_start()
343 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_start()
350 fs_tx_queue_stop(dev, tx_queue_id); in fs_tx_queue_start()
351 fs_unlock(dev, 0); in fs_tx_queue_start()
355 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; in fs_tx_queue_start()
356 fs_unlock(dev, 0); in fs_tx_queue_start()
361 fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) in fs_rx_queue_release() argument
365 struct rxq *rxq = dev->data->rx_queues[qid]; in fs_rx_queue_release()
369 fs_lock(dev, 0); in fs_rx_queue_release()
372 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_release()
377 dev->data->rx_queues[rxq->qid] = NULL; in fs_rx_queue_release()
379 fs_unlock(dev, 0); in fs_rx_queue_release()
383 fs_rx_queue_setup(struct rte_eth_dev *dev, in fs_rx_queue_setup() argument
412 fs_lock(dev, 0); in fs_rx_queue_setup()
414 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_rx_queue_setup()
418 fs_unlock(dev, 0); in fs_rx_queue_setup()
423 rxq = dev->data->rx_queues[rx_queue_id]; in fs_rx_queue_setup()
425 fs_rx_queue_release(dev, rx_queue_id); in fs_rx_queue_setup()
426 dev->data->rx_queues[rx_queue_id] = NULL; in fs_rx_queue_setup()
430 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, in fs_rx_queue_setup()
433 fs_unlock(dev, 0); in fs_rx_queue_setup()
436 FOREACH_SUBDEV(sdev, i, dev) in fs_rx_queue_setup()
443 rxq->priv = PRIV(dev); in fs_rx_queue_setup()
444 rxq->sdev = PRIV(dev)->subs; in fs_rx_queue_setup()
447 fs_unlock(dev, 0); in fs_rx_queue_setup()
451 dev->data->rx_queues[rx_queue_id] = rxq; in fs_rx_queue_setup()
452 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_queue_setup()
462 fs_unlock(dev, 0); in fs_rx_queue_setup()
465 fs_rx_queue_release(dev, rx_queue_id); in fs_rx_queue_setup()
466 fs_unlock(dev, 0); in fs_rx_queue_setup()
471 fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) in fs_rx_intr_enable() argument
479 fs_lock(dev, 0); in fs_rx_intr_enable()
480 if (idx >= dev->data->nb_rx_queues) { in fs_rx_intr_enable()
484 rxq = dev->data->rx_queues[idx]; in fs_rx_intr_enable()
490 if (PRIV(dev)->rxp.sstate != SS_RUNNING) { in fs_rx_intr_enable()
496 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_intr_enable()
503 fs_unlock(dev, 0); in fs_rx_intr_enable()
510 fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) in fs_rx_intr_disable() argument
519 fs_lock(dev, 0); in fs_rx_intr_disable()
520 if (idx >= dev->data->nb_rx_queues) { in fs_rx_intr_disable()
524 rxq = dev->data->rx_queues[idx]; in fs_rx_intr_disable()
530 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rx_intr_disable()
540 fs_unlock(dev, 0); in fs_rx_intr_disable()
547 fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) in fs_tx_queue_release() argument
551 struct txq *txq = dev->data->tx_queues[qid]; in fs_tx_queue_release()
555 fs_lock(dev, 0); in fs_tx_queue_release()
556 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_release()
561 dev->data->tx_queues[txq->qid] = NULL; in fs_tx_queue_release()
563 fs_unlock(dev, 0); in fs_tx_queue_release()
567 fs_tx_queue_setup(struct rte_eth_dev *dev, in fs_tx_queue_setup() argument
578 fs_lock(dev, 0); in fs_tx_queue_setup()
580 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_tx_queue_setup()
584 fs_unlock(dev, 0); in fs_tx_queue_setup()
589 txq = dev->data->tx_queues[tx_queue_id]; in fs_tx_queue_setup()
591 fs_tx_queue_release(dev, tx_queue_id); in fs_tx_queue_setup()
592 dev->data->tx_queues[tx_queue_id] = NULL; in fs_tx_queue_setup()
596 sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, in fs_tx_queue_setup()
599 fs_unlock(dev, 0); in fs_tx_queue_setup()
602 FOREACH_SUBDEV(sdev, i, dev) in fs_tx_queue_setup()
608 txq->priv = PRIV(dev); in fs_tx_queue_setup()
609 dev->data->tx_queues[tx_queue_id] = txq; in fs_tx_queue_setup()
610 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_tx_queue_setup()
620 fs_unlock(dev, 0); in fs_tx_queue_setup()
623 fs_tx_queue_release(dev, tx_queue_id); in fs_tx_queue_setup()
624 fs_unlock(dev, 0); in fs_tx_queue_setup()
629 fs_dev_free_queues(struct rte_eth_dev *dev) in fs_dev_free_queues() argument
633 for (i = 0; i < dev->data->nb_rx_queues; i++) { in fs_dev_free_queues()
634 fs_rx_queue_release(dev, i); in fs_dev_free_queues()
635 dev->data->rx_queues[i] = NULL; in fs_dev_free_queues()
637 dev->data->nb_rx_queues = 0; in fs_dev_free_queues()
638 for (i = 0; i < dev->data->nb_tx_queues; i++) { in fs_dev_free_queues()
639 fs_tx_queue_release(dev, i); in fs_dev_free_queues()
640 dev->data->tx_queues[i] = NULL; in fs_dev_free_queues()
642 dev->data->nb_tx_queues = 0; in fs_dev_free_queues()
646 failsafe_eth_dev_close(struct rte_eth_dev *dev) in failsafe_eth_dev_close() argument
652 fs_lock(dev, 0); in failsafe_eth_dev_close()
653 failsafe_hotplug_alarm_cancel(dev); in failsafe_eth_dev_close()
654 if (PRIV(dev)->state == DEV_STARTED) { in failsafe_eth_dev_close()
655 ret = dev->dev_ops->dev_stop(dev); in failsafe_eth_dev_close()
657 fs_unlock(dev, 0); in failsafe_eth_dev_close()
661 PRIV(dev)->state = DEV_ACTIVE - 1; in failsafe_eth_dev_close()
662 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in failsafe_eth_dev_close()
674 failsafe_eth_new_event_callback, dev); in failsafe_eth_dev_close()
676 fs_unlock(dev, 0); in failsafe_eth_dev_close()
679 fs_dev_free_queues(dev); in failsafe_eth_dev_close()
680 err = failsafe_eal_uninit(dev); in failsafe_eth_dev_close()
685 failsafe_args_free(dev); in failsafe_eth_dev_close()
686 rte_free(PRIV(dev)->subs); in failsafe_eth_dev_close()
687 rte_free(PRIV(dev)->mcast_addrs); in failsafe_eth_dev_close()
689 dev->data->mac_addrs = NULL; in failsafe_eth_dev_close()
690 fs_unlock(dev, 0); in failsafe_eth_dev_close()
691 err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); in failsafe_eth_dev_close()
700 fs_promiscuous_enable(struct rte_eth_dev *dev) in fs_promiscuous_enable() argument
706 fs_lock(dev, 0); in fs_promiscuous_enable()
707 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_enable()
718 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_enable()
726 fs_unlock(dev, 0); in fs_promiscuous_enable()
732 fs_promiscuous_disable(struct rte_eth_dev *dev) in fs_promiscuous_disable() argument
738 fs_lock(dev, 0); in fs_promiscuous_disable()
739 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_disable()
750 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_promiscuous_disable()
758 fs_unlock(dev, 0); in fs_promiscuous_disable()
764 fs_allmulticast_enable(struct rte_eth_dev *dev) in fs_allmulticast_enable() argument
770 fs_lock(dev, 0); in fs_allmulticast_enable()
771 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_enable()
782 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_enable()
790 fs_unlock(dev, 0); in fs_allmulticast_enable()
796 fs_allmulticast_disable(struct rte_eth_dev *dev) in fs_allmulticast_disable() argument
802 fs_lock(dev, 0); in fs_allmulticast_disable()
803 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_disable()
814 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_allmulticast_disable()
822 fs_unlock(dev, 0); in fs_allmulticast_disable()
828 fs_link_update(struct rte_eth_dev *dev, in fs_link_update() argument
835 fs_lock(dev, 0); in fs_link_update()
836 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_link_update()
843 fs_unlock(dev, 0); in fs_link_update()
847 if (TX_SUBDEV(dev)) { in fs_link_update()
851 l1 = &dev->data->dev_link; in fs_link_update()
852 l2 = Ð(TX_SUBDEV(dev))->data->dev_link; in fs_link_update()
855 fs_unlock(dev, 0); in fs_link_update()
859 fs_unlock(dev, 0); in fs_link_update()
864 fs_stats_get(struct rte_eth_dev *dev, in fs_stats_get() argument
872 fs_lock(dev, 0); in fs_stats_get()
873 rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); in fs_stats_get()
874 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_stats_get()
888 fs_unlock(dev, 0); in fs_stats_get()
895 fs_unlock(dev, 0); in fs_stats_get()
900 fs_stats_reset(struct rte_eth_dev *dev) in fs_stats_reset() argument
906 fs_lock(dev, 0); in fs_stats_reset()
907 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_stats_reset()
915 fs_unlock(dev, 0); in fs_stats_reset()
920 memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); in fs_stats_reset()
921 fs_unlock(dev, 0); in fs_stats_reset()
927 __fs_xstats_count(struct rte_eth_dev *dev) in __fs_xstats_count() argument
934 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_count()
945 __fs_xstats_get_names(struct rte_eth_dev *dev, in __fs_xstats_get_names() argument
955 return __fs_xstats_count(dev); in __fs_xstats_get_names()
957 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_get_names()
990 fs_xstats_get_names(struct rte_eth_dev *dev, in fs_xstats_get_names() argument
996 fs_lock(dev, 0); in fs_xstats_get_names()
997 ret = __fs_xstats_get_names(dev, xstats_names, limit); in fs_xstats_get_names()
998 fs_unlock(dev, 0); in fs_xstats_get_names()
1003 __fs_xstats_get(struct rte_eth_dev *dev, in __fs_xstats_get() argument
1012 ret = __fs_xstats_count(dev); in __fs_xstats_get()
1021 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in __fs_xstats_get()
1042 fs_xstats_get(struct rte_eth_dev *dev, in fs_xstats_get() argument
1048 fs_lock(dev, 0); in fs_xstats_get()
1049 ret = __fs_xstats_get(dev, xstats, n); in fs_xstats_get()
1050 fs_unlock(dev, 0); in fs_xstats_get()
1057 fs_xstats_reset(struct rte_eth_dev *dev) in fs_xstats_reset() argument
1063 fs_lock(dev, 0); in fs_xstats_reset()
1064 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_xstats_reset()
1069 fs_unlock(dev, 0); in fs_xstats_reset()
1157 fs_dev_infos_get(struct rte_eth_dev *dev, in fs_dev_infos_get() argument
1232 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { in fs_dev_infos_get()
1247 fs_dev_supported_ptypes_get(struct rte_eth_dev *dev) in fs_dev_supported_ptypes_get() argument
1253 fs_lock(dev, 0); in fs_dev_supported_ptypes_get()
1254 sdev = TX_SUBDEV(dev); in fs_dev_supported_ptypes_get()
1274 fs_unlock(dev, 0); in fs_dev_supported_ptypes_get()
1279 fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) in fs_mtu_set() argument
1285 fs_lock(dev, 0); in fs_mtu_set()
1286 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mtu_set()
1292 fs_unlock(dev, 0); in fs_mtu_set()
1296 fs_unlock(dev, 0); in fs_mtu_set()
1301 fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) in fs_vlan_filter_set() argument
1307 fs_lock(dev, 0); in fs_vlan_filter_set()
1308 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_vlan_filter_set()
1314 fs_unlock(dev, 0); in fs_vlan_filter_set()
1318 fs_unlock(dev, 0); in fs_vlan_filter_set()
1323 fs_flow_ctrl_get(struct rte_eth_dev *dev, in fs_flow_ctrl_get() argument
1329 fs_lock(dev, 0); in fs_flow_ctrl_get()
1330 sdev = TX_SUBDEV(dev); in fs_flow_ctrl_get()
1341 fs_unlock(dev, 0); in fs_flow_ctrl_get()
1346 fs_flow_ctrl_set(struct rte_eth_dev *dev, in fs_flow_ctrl_set() argument
1353 fs_lock(dev, 0); in fs_flow_ctrl_set()
1354 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_flow_ctrl_set()
1360 fs_unlock(dev, 0); in fs_flow_ctrl_set()
1364 fs_unlock(dev, 0); in fs_flow_ctrl_set()
1369 fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) in fs_mac_addr_remove() argument
1374 fs_lock(dev, 0); in fs_mac_addr_remove()
1378 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) in fs_mac_addr_remove()
1380 &dev->data->mac_addrs[index]); in fs_mac_addr_remove()
1381 PRIV(dev)->mac_addr_pool[index] = 0; in fs_mac_addr_remove()
1382 fs_unlock(dev, 0); in fs_mac_addr_remove()
1386 fs_mac_addr_add(struct rte_eth_dev *dev, in fs_mac_addr_add() argument
1396 fs_lock(dev, 0); in fs_mac_addr_add()
1397 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mac_addr_add()
1402 fs_unlock(dev, 0); in fs_mac_addr_add()
1406 if (index >= PRIV(dev)->nb_mac_addr) { in fs_mac_addr_add()
1408 PRIV(dev)->nb_mac_addr = index; in fs_mac_addr_add()
1410 PRIV(dev)->mac_addr_pool[index] = vmdq; in fs_mac_addr_add()
1411 fs_unlock(dev, 0); in fs_mac_addr_add()
1416 fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) in fs_mac_addr_set() argument
1422 fs_lock(dev, 0); in fs_mac_addr_set()
1423 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_mac_addr_set()
1429 fs_unlock(dev, 0); in fs_mac_addr_set()
1433 fs_unlock(dev, 0); in fs_mac_addr_set()
1439 fs_set_mc_addr_list(struct rte_eth_dev *dev, in fs_set_mc_addr_list() argument
1447 fs_lock(dev, 0); in fs_set_mc_addr_list()
1449 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_set_mc_addr_list()
1459 mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, in fs_set_mc_addr_list()
1460 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); in fs_set_mc_addr_list()
1466 nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); in fs_set_mc_addr_list()
1467 PRIV(dev)->nb_mcast_addr = nb_mc_addr; in fs_set_mc_addr_list()
1468 PRIV(dev)->mcast_addrs = mcast_addrs; in fs_set_mc_addr_list()
1470 fs_unlock(dev, 0); in fs_set_mc_addr_list()
1474 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_set_mc_addr_list()
1476 PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); in fs_set_mc_addr_list()
1483 fs_unlock(dev, 0); in fs_set_mc_addr_list()
1488 fs_rss_hash_update(struct rte_eth_dev *dev, in fs_rss_hash_update() argument
1495 fs_lock(dev, 0); in fs_rss_hash_update()
1496 FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { in fs_rss_hash_update()
1503 fs_unlock(dev, 0); in fs_rss_hash_update()
1507 fs_unlock(dev, 0); in fs_rss_hash_update()
1513 fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, in fs_flow_ops_get()