Lines Matching refs:eth_dev
60 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop) in lio_send_rx_ctrl_cmd() argument
62 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_send_rx_ctrl_cmd()
74 ctrl_cmd.eth_dev = eth_dev; in lio_send_rx_ctrl_cmd()
141 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, in lio_dev_xstats_get() argument
144 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_xstats_get()
216 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev, in lio_dev_xstats_get_names() argument
220 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_xstats_get_names()
244 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) in lio_dev_xstats_reset() argument
246 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_xstats_reset()
265 ctrl_cmd.eth_dev = eth_dev; in lio_dev_xstats_reset()
284 RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0); in lio_dev_xstats_reset()
285 return (*eth_dev->dev_ops->stats_reset)(eth_dev); in lio_dev_xstats_reset()
290 lio_dev_stats_get(struct rte_eth_dev *eth_dev, in lio_dev_stats_get() argument
293 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_stats_get()
303 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { in lio_dev_stats_get()
322 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { in lio_dev_stats_get()
342 lio_dev_stats_reset(struct rte_eth_dev *eth_dev) in lio_dev_stats_reset() argument
344 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_stats_reset()
351 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { in lio_dev_stats_reset()
360 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { in lio_dev_stats_reset()
373 lio_dev_info_get(struct rte_eth_dev *eth_dev, in lio_dev_info_get() argument
376 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_info_get()
377 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); in lio_dev_info_get()
434 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) in lio_dev_mtu_set() argument
436 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_mtu_set()
467 ctrl_cmd.eth_dev = eth_dev; in lio_dev_mtu_set()
485 eth_dev->data->dev_conf.rxmode.offloads |= in lio_dev_mtu_set()
488 eth_dev->data->dev_conf.rxmode.offloads &= in lio_dev_mtu_set()
491 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; in lio_dev_mtu_set()
492 eth_dev->data->mtu = mtu; in lio_dev_mtu_set()
498 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev, in lio_dev_rss_reta_update() argument
502 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_rss_reta_update()
532 ctrl_cmd.eth_dev = eth_dev; in lio_dev_rss_reta_update()
571 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev, in lio_dev_rss_reta_query() argument
575 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_rss_reta_query()
599 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, in lio_dev_rss_hash_conf_get() argument
602 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_rss_hash_conf_get()
637 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev, in lio_dev_rss_hash_update() argument
640 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_rss_hash_update()
662 ctrl_cmd.eth_dev = eth_dev; in lio_dev_rss_hash_update()
771 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev, in lio_dev_udp_tunnel_add() argument
774 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_udp_tunnel_add()
794 ctrl_cmd.eth_dev = eth_dev; in lio_dev_udp_tunnel_add()
828 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev, in lio_dev_udp_tunnel_del() argument
831 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_udp_tunnel_del()
851 ctrl_cmd.eth_dev = eth_dev; in lio_dev_udp_tunnel_del()
873 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) in lio_dev_vlan_filter_set() argument
875 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_vlan_filter_set()
890 ctrl_cmd.eth_dev = eth_dev; in lio_dev_vlan_filter_set()
928 lio_dev_link_update(struct rte_eth_dev *eth_dev, in lio_dev_link_update() argument
931 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_link_update()
944 return rte_eth_linkstatus_set(eth_dev, &link); in lio_dev_link_update()
961 return rte_eth_linkstatus_set(eth_dev, &link); in lio_dev_link_update()
973 lio_change_dev_flag(struct rte_eth_dev *eth_dev) in lio_change_dev_flag() argument
975 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_change_dev_flag()
987 ctrl_cmd.eth_dev = eth_dev; in lio_change_dev_flag()
1009 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) in lio_dev_promiscuous_enable() argument
1011 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_promiscuous_enable()
1026 return lio_change_dev_flag(eth_dev); in lio_dev_promiscuous_enable()
1030 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) in lio_dev_promiscuous_disable() argument
1032 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_promiscuous_disable()
1047 return lio_change_dev_flag(eth_dev); in lio_dev_promiscuous_disable()
1051 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) in lio_dev_allmulticast_enable() argument
1053 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_allmulticast_enable()
1062 return lio_change_dev_flag(eth_dev); in lio_dev_allmulticast_enable()
1066 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) in lio_dev_allmulticast_disable() argument
1068 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_allmulticast_disable()
1077 return lio_change_dev_flag(eth_dev); in lio_dev_allmulticast_disable()
1081 lio_dev_rss_configure(struct rte_eth_dev *eth_dev) in lio_dev_rss_configure() argument
1083 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_rss_configure()
1092 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; in lio_dev_rss_configure()
1095 lio_dev_rss_hash_update(eth_dev, &rss_conf); in lio_dev_rss_configure()
1102 lio_dev_rss_hash_update(eth_dev, &rss_conf); in lio_dev_rss_configure()
1108 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ? in lio_dev_rss_configure()
1109 i % eth_dev->data->nb_rx_queues : 0); in lio_dev_rss_configure()
1116 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ); in lio_dev_rss_configure()
1120 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev) in lio_dev_mq_rx_configure() argument
1122 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_mq_rx_configure()
1126 switch (eth_dev->data->dev_conf.rxmode.mq_mode) { in lio_dev_mq_rx_configure()
1128 lio_dev_rss_configure(eth_dev); in lio_dev_mq_rx_configure()
1135 lio_dev_rss_hash_update(eth_dev, &rss_conf); in lio_dev_mq_rx_configure()
1164 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, in lio_dev_rx_queue_setup() argument
1169 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_rx_queue_setup()
1184 if (eth_dev->data->rx_queues[q_no] != NULL) { in lio_dev_rx_queue_setup()
1185 lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]); in lio_dev_rx_queue_setup()
1186 eth_dev->data->rx_queues[q_no] = NULL; in lio_dev_rx_queue_setup()
1198 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq]; in lio_dev_rx_queue_setup()
1248 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, in lio_dev_tx_queue_setup() argument
1252 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_tx_queue_setup()
1264 if (eth_dev->data->tx_queues[q_no] != NULL) { in lio_dev_tx_queue_setup()
1265 lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]); in lio_dev_tx_queue_setup()
1266 eth_dev->data->tx_queues[q_no] = NULL; in lio_dev_tx_queue_setup()
1286 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq]; in lio_dev_tx_queue_setup()
1321 lio_dev_get_link_status(struct rte_eth_dev *eth_dev) in lio_dev_get_link_status() argument
1323 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_get_link_status()
1361 if (ls->s.mtu < eth_dev->data->mtu) { in lio_dev_get_link_status()
1364 eth_dev->data->mtu = ls->s.mtu; in lio_dev_get_link_status()
1367 lio_dev_link_update(eth_dev, 0); in lio_dev_get_link_status()
1382 lio_sync_link_state_check(void *eth_dev) in lio_sync_link_state_check() argument
1385 (((struct rte_eth_dev *)eth_dev)->data->dev_private); in lio_sync_link_state_check()
1388 lio_dev_get_link_status(eth_dev); in lio_sync_link_state_check()
1395 eth_dev); in lio_sync_link_state_check()
1399 lio_dev_start(struct rte_eth_dev *eth_dev) in lio_dev_start() argument
1402 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; in lio_dev_start()
1403 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_start()
1407 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id); in lio_dev_start()
1412 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) in lio_dev_start()
1420 lio_dev_mq_rx_configure(eth_dev); in lio_dev_start()
1430 eth_dev); in lio_dev_start()
1449 if (eth_dev->data->mtu != mtu) { in lio_dev_start()
1450 ret = lio_dev_mtu_set(eth_dev, mtu); in lio_dev_start()
1458 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); in lio_dev_start()
1462 lio_send_rx_ctrl_cmd(eth_dev, 0); in lio_dev_start()
1469 lio_dev_stop(struct rte_eth_dev *eth_dev) in lio_dev_stop() argument
1471 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_stop()
1473 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id); in lio_dev_stop()
1474 eth_dev->data->dev_started = 0; in lio_dev_stop()
1479 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); in lio_dev_stop()
1481 lio_send_rx_ctrl_cmd(eth_dev, 0); in lio_dev_stop()
1492 lio_dev_set_link_up(struct rte_eth_dev *eth_dev) in lio_dev_set_link_up() argument
1494 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_set_link_up()
1506 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) { in lio_dev_set_link_up()
1512 eth_dev->data->dev_link.link_status = ETH_LINK_UP; in lio_dev_set_link_up()
1518 lio_dev_set_link_down(struct rte_eth_dev *eth_dev) in lio_dev_set_link_down() argument
1520 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_set_link_down()
1533 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; in lio_dev_set_link_down()
1535 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) { in lio_dev_set_link_down()
1537 eth_dev->data->dev_link.link_status = ETH_LINK_UP; in lio_dev_set_link_down()
1557 lio_dev_close(struct rte_eth_dev *eth_dev) in lio_dev_close() argument
1559 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_close()
1565 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); in lio_dev_close()
1568 ret = lio_dev_stop(eth_dev); in lio_dev_close()
1590 lio_dev_clear_queues(eth_dev); in lio_dev_close()
1599 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev) in lio_enable_hw_tunnel_rx_checksum() argument
1601 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_enable_hw_tunnel_rx_checksum()
1613 ctrl_cmd.eth_dev = eth_dev; in lio_enable_hw_tunnel_rx_checksum()
1633 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) in lio_enable_hw_tunnel_tx_checksum() argument
1635 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_enable_hw_tunnel_tx_checksum()
1647 ctrl_cmd.eth_dev = eth_dev; in lio_enable_hw_tunnel_tx_checksum()
1664 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq, in lio_send_queue_count_update() argument
1667 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_send_queue_count_update()
1685 ctrl_cmd.eth_dev = eth_dev; in lio_send_queue_count_update()
1707 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq) in lio_reconf_queues() argument
1709 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_reconf_queues()
1714 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq)) in lio_reconf_queues()
1721 ret = lio_dev_stop(eth_dev); in lio_reconf_queues()
1736 lio_dev_configure(struct rte_eth_dev *eth_dev) in lio_dev_configure() argument
1738 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_dev_configure()
1749 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) in lio_dev_configure()
1750 eth_dev->data->dev_conf.rxmode.offloads |= in lio_dev_configure()
1757 return lio_reconf_queues(eth_dev, in lio_dev_configure()
1758 eth_dev->data->nb_tx_queues, in lio_dev_configure()
1759 eth_dev->data->nb_rx_queues); in lio_dev_configure()
1761 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; in lio_dev_configure()
1762 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; in lio_dev_configure()
1765 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues; in lio_dev_configure()
1766 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues; in lio_dev_configure()
1834 eth_dev->data->port_id, in lio_dev_configure()
1869 ð_dev->data->mac_addrs[0]); in lio_dev_configure()
1872 lio_enable_hw_tunnel_rx_checksum(eth_dev); in lio_dev_configure()
1873 lio_enable_hw_tunnel_tx_checksum(eth_dev); in lio_dev_configure()
1889 lio_dev_link_update(eth_dev, 0); in lio_dev_configure()
2066 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) in lio_eth_dev_uninit() argument
2068 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_eth_dev_uninit()
2082 lio_eth_dev_init(struct rte_eth_dev *eth_dev) in lio_eth_dev_init() argument
2084 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); in lio_eth_dev_init()
2085 struct lio_device *lio_dev = LIO_DEV(eth_dev); in lio_eth_dev_init()
2089 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts; in lio_eth_dev_init()
2090 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts; in lio_eth_dev_init()
2096 rte_eth_copy_pci_info(eth_dev, pdev); in lio_eth_dev_init()
2097 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; in lio_eth_dev_init()
2106 lio_dev->eth_dev = eth_dev; in lio_eth_dev_init()
2112 lio_dev->port_id = eth_dev->data->port_id; in lio_eth_dev_init()
2119 eth_dev->dev_ops = &liovf_eth_dev_ops; in lio_eth_dev_init()
2120 eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0); in lio_eth_dev_init()
2121 if (eth_dev->data->mac_addrs == NULL) { in lio_eth_dev_init()
2124 eth_dev->dev_ops = NULL; in lio_eth_dev_init()
2125 eth_dev->rx_pkt_burst = NULL; in lio_eth_dev_init()
2126 eth_dev->tx_pkt_burst = NULL; in lio_eth_dev_init()