xref: /dpdk/drivers/net/txgbe/txgbe_ethdev.c (revision f6946717)
1a3babbddSJiawen Wu /* SPDX-License-Identifier: BSD-3-Clause
2f8aadb64SJiawen Wu  * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3f8aadb64SJiawen Wu  * Copyright(c) 2010-2017 Intel Corporation
4a3babbddSJiawen Wu  */
5a3babbddSJiawen Wu 
6e1698e38SJiawen Wu #include <stdio.h>
7e1698e38SJiawen Wu #include <errno.h>
8e1698e38SJiawen Wu #include <stdint.h>
9e1698e38SJiawen Wu #include <string.h>
107dc11706SJiawen Wu #include <rte_common.h>
11df96fd0dSBruce Richardson #include <ethdev_pci.h>
122fc745e6SJiawen Wu 
132fc745e6SJiawen Wu #include <rte_interrupts.h>
14a331fe3bSJiawen Wu #include <rte_log.h>
15a331fe3bSJiawen Wu #include <rte_debug.h>
167dc11706SJiawen Wu #include <rte_pci.h>
17e1698e38SJiawen Wu #include <rte_memory.h>
182fc745e6SJiawen Wu #include <rte_eal.h>
192fc745e6SJiawen Wu #include <rte_alarm.h>
20f611dadaSJiawen Wu #include <rte_kvargs.h>
217dc11706SJiawen Wu 
227dc11706SJiawen Wu #include "txgbe_logs.h"
237dc11706SJiawen Wu #include "base/txgbe.h"
247dc11706SJiawen Wu #include "txgbe_ethdev.h"
2586d8adc7SJiawen Wu #include "txgbe_rxtx.h"
26ab7a6530SJiawen Wu #include "txgbe_regs_group.h"
27ab7a6530SJiawen Wu 
28ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_general[] = {
29ab7a6530SJiawen Wu 	{TXGBE_RST, 1, 1, "TXGBE_RST"},
30ab7a6530SJiawen Wu 	{TXGBE_STAT, 1, 1, "TXGBE_STAT"},
31ab7a6530SJiawen Wu 	{TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
32ab7a6530SJiawen Wu 	{TXGBE_SDP, 1, 1, "TXGBE_SDP"},
33ab7a6530SJiawen Wu 	{TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
34ab7a6530SJiawen Wu 	{TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
35ab7a6530SJiawen Wu 	{0, 0, 0, ""}
36ab7a6530SJiawen Wu };
37ab7a6530SJiawen Wu 
38ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_nvm[] = {
39ab7a6530SJiawen Wu 	{0, 0, 0, ""}
40ab7a6530SJiawen Wu };
41ab7a6530SJiawen Wu 
42ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_interrupt[] = {
43ab7a6530SJiawen Wu 	{0, 0, 0, ""}
44ab7a6530SJiawen Wu };
45ab7a6530SJiawen Wu 
46ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_fctl_others[] = {
47ab7a6530SJiawen Wu 	{0, 0, 0, ""}
48ab7a6530SJiawen Wu };
49ab7a6530SJiawen Wu 
50ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_rxdma[] = {
51ab7a6530SJiawen Wu 	{0, 0, 0, ""}
52ab7a6530SJiawen Wu };
53ab7a6530SJiawen Wu 
54ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_rx[] = {
55ab7a6530SJiawen Wu 	{0, 0, 0, ""}
56ab7a6530SJiawen Wu };
57ab7a6530SJiawen Wu 
58ab7a6530SJiawen Wu static struct reg_info txgbe_regs_tx[] = {
59ab7a6530SJiawen Wu 	{0, 0, 0, ""}
60ab7a6530SJiawen Wu };
61ab7a6530SJiawen Wu 
62ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_wakeup[] = {
63ab7a6530SJiawen Wu 	{0, 0, 0, ""}
64ab7a6530SJiawen Wu };
65ab7a6530SJiawen Wu 
66ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_dcb[] = {
67ab7a6530SJiawen Wu 	{0, 0, 0, ""}
68ab7a6530SJiawen Wu };
69ab7a6530SJiawen Wu 
70ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_mac[] = {
71ab7a6530SJiawen Wu 	{0, 0, 0, ""}
72ab7a6530SJiawen Wu };
73ab7a6530SJiawen Wu 
74ab7a6530SJiawen Wu static const struct reg_info txgbe_regs_diagnostic[] = {
75ab7a6530SJiawen Wu 	{0, 0, 0, ""},
76ab7a6530SJiawen Wu };
77ab7a6530SJiawen Wu 
78ab7a6530SJiawen Wu /* PF registers */
79ab7a6530SJiawen Wu static const struct reg_info *txgbe_regs_others[] = {
80ab7a6530SJiawen Wu 				txgbe_regs_general,
81ab7a6530SJiawen Wu 				txgbe_regs_nvm,
82ab7a6530SJiawen Wu 				txgbe_regs_interrupt,
83ab7a6530SJiawen Wu 				txgbe_regs_fctl_others,
84ab7a6530SJiawen Wu 				txgbe_regs_rxdma,
85ab7a6530SJiawen Wu 				txgbe_regs_rx,
86ab7a6530SJiawen Wu 				txgbe_regs_tx,
87ab7a6530SJiawen Wu 				txgbe_regs_wakeup,
88ab7a6530SJiawen Wu 				txgbe_regs_dcb,
89ab7a6530SJiawen Wu 				txgbe_regs_mac,
90ab7a6530SJiawen Wu 				txgbe_regs_diagnostic,
91ab7a6530SJiawen Wu 				NULL};
927dc11706SJiawen Wu 
93635c2135SJiawen Wu static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
94635c2135SJiawen Wu static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
95c13f84a7SJiawen Wu static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
96c13f84a7SJiawen Wu static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
970c061eadSJiawen Wu static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
980c061eadSJiawen Wu static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
99e1698e38SJiawen Wu static int txgbe_dev_close(struct rte_eth_dev *dev);
1000c061eadSJiawen Wu static int txgbe_dev_link_update(struct rte_eth_dev *dev,
1010c061eadSJiawen Wu 				int wait_to_complete);
102c9bb590dSJiawen Wu static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
103220b0e49SJiawen Wu static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
104220b0e49SJiawen Wu static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
105220b0e49SJiawen Wu 					uint16_t queue);
106e1698e38SJiawen Wu 
1072fc745e6SJiawen Wu static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
1082fc745e6SJiawen Wu static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
1092fc745e6SJiawen Wu static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
11082650948SJiawen Wu static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
1112fc745e6SJiawen Wu static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
112ef6427a3SJiawen Wu static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
113ef6427a3SJiawen Wu 				      struct rte_intr_handle *handle);
1142fc745e6SJiawen Wu static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
1152fc745e6SJiawen Wu 				      struct rte_intr_handle *handle);
1162fc745e6SJiawen Wu static void txgbe_dev_interrupt_handler(void *param);
1172fc745e6SJiawen Wu static void txgbe_dev_interrupt_delayed_handler(void *param);
1182fc745e6SJiawen Wu static void txgbe_configure_msix(struct rte_eth_dev *dev);
1192fc745e6SJiawen Wu 
12077a72b4dSJiawen Wu static int txgbe_filter_restore(struct rte_eth_dev *dev);
1215377fa68SJiawen Wu static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
12277a72b4dSJiawen Wu 
123220b0e49SJiawen Wu #define TXGBE_SET_HWSTRIP(h, q) do {\
124220b0e49SJiawen Wu 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
125220b0e49SJiawen Wu 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
126220b0e49SJiawen Wu 		(h)->bitmap[idx] |= 1 << bit;\
127220b0e49SJiawen Wu 	} while (0)
128220b0e49SJiawen Wu 
129220b0e49SJiawen Wu #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
130220b0e49SJiawen Wu 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
131220b0e49SJiawen Wu 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
132220b0e49SJiawen Wu 		(h)->bitmap[idx] &= ~(1 << bit);\
133220b0e49SJiawen Wu 	} while (0)
134220b0e49SJiawen Wu 
135220b0e49SJiawen Wu #define TXGBE_GET_HWSTRIP(h, q, r) do {\
136220b0e49SJiawen Wu 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
137220b0e49SJiawen Wu 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
138220b0e49SJiawen Wu 		(r) = (h)->bitmap[idx] >> bit & 1;\
139220b0e49SJiawen Wu 	} while (0)
140220b0e49SJiawen Wu 
1417dc11706SJiawen Wu /*
1427dc11706SJiawen Wu  * The set of PCI devices this driver supports
1437dc11706SJiawen Wu  */
1447dc11706SJiawen Wu static const struct rte_pci_id pci_id_txgbe_map[] = {
1456666db9dSJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
1466666db9dSJiawen Wu 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
1477dc11706SJiawen Wu 	{ .vendor_id = 0, /* sentinel */ },
1487dc11706SJiawen Wu };
1497dc11706SJiawen Wu 
15086d8adc7SJiawen Wu static const struct rte_eth_desc_lim rx_desc_lim = {
15186d8adc7SJiawen Wu 	.nb_max = TXGBE_RING_DESC_MAX,
15286d8adc7SJiawen Wu 	.nb_min = TXGBE_RING_DESC_MIN,
15386d8adc7SJiawen Wu 	.nb_align = TXGBE_RXD_ALIGN,
15486d8adc7SJiawen Wu };
15586d8adc7SJiawen Wu 
15686d8adc7SJiawen Wu static const struct rte_eth_desc_lim tx_desc_lim = {
15786d8adc7SJiawen Wu 	.nb_max = TXGBE_RING_DESC_MAX,
15886d8adc7SJiawen Wu 	.nb_min = TXGBE_RING_DESC_MIN,
15986d8adc7SJiawen Wu 	.nb_align = TXGBE_TXD_ALIGN,
16086d8adc7SJiawen Wu 	.nb_seg_max = TXGBE_TX_MAX_SEG,
16186d8adc7SJiawen Wu 	.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
16286d8adc7SJiawen Wu };
16386d8adc7SJiawen Wu 
164e1698e38SJiawen Wu static const struct eth_dev_ops txgbe_eth_dev_ops;
165e1698e38SJiawen Wu 
16691fe49c8SJiawen Wu #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
16791fe49c8SJiawen Wu #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
16891fe49c8SJiawen Wu static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
16991fe49c8SJiawen Wu 	/* MNG RxTx */
17091fe49c8SJiawen Wu 	HW_XSTAT(mng_bmc2host_packets),
17191fe49c8SJiawen Wu 	HW_XSTAT(mng_host2bmc_packets),
17291fe49c8SJiawen Wu 	/* Basic RxTx */
17391fe49c8SJiawen Wu 	HW_XSTAT(rx_packets),
17491fe49c8SJiawen Wu 	HW_XSTAT(tx_packets),
17591fe49c8SJiawen Wu 	HW_XSTAT(rx_bytes),
17691fe49c8SJiawen Wu 	HW_XSTAT(tx_bytes),
17791fe49c8SJiawen Wu 	HW_XSTAT(rx_total_bytes),
17891fe49c8SJiawen Wu 	HW_XSTAT(rx_total_packets),
17991fe49c8SJiawen Wu 	HW_XSTAT(tx_total_packets),
18091fe49c8SJiawen Wu 	HW_XSTAT(rx_total_missed_packets),
18191fe49c8SJiawen Wu 	HW_XSTAT(rx_broadcast_packets),
18291fe49c8SJiawen Wu 	HW_XSTAT(rx_multicast_packets),
18391fe49c8SJiawen Wu 	HW_XSTAT(rx_management_packets),
18491fe49c8SJiawen Wu 	HW_XSTAT(tx_management_packets),
18591fe49c8SJiawen Wu 	HW_XSTAT(rx_management_dropped),
18691fe49c8SJiawen Wu 
18791fe49c8SJiawen Wu 	/* Basic Error */
18891fe49c8SJiawen Wu 	HW_XSTAT(rx_crc_errors),
18991fe49c8SJiawen Wu 	HW_XSTAT(rx_illegal_byte_errors),
19091fe49c8SJiawen Wu 	HW_XSTAT(rx_error_bytes),
19191fe49c8SJiawen Wu 	HW_XSTAT(rx_mac_short_packet_dropped),
19291fe49c8SJiawen Wu 	HW_XSTAT(rx_length_errors),
19391fe49c8SJiawen Wu 	HW_XSTAT(rx_undersize_errors),
19491fe49c8SJiawen Wu 	HW_XSTAT(rx_fragment_errors),
19591fe49c8SJiawen Wu 	HW_XSTAT(rx_oversize_errors),
19691fe49c8SJiawen Wu 	HW_XSTAT(rx_jabber_errors),
19791fe49c8SJiawen Wu 	HW_XSTAT(rx_l3_l4_xsum_error),
19891fe49c8SJiawen Wu 	HW_XSTAT(mac_local_errors),
19991fe49c8SJiawen Wu 	HW_XSTAT(mac_remote_errors),
20091fe49c8SJiawen Wu 
20191fe49c8SJiawen Wu 	/* Flow Director */
20291fe49c8SJiawen Wu 	HW_XSTAT(flow_director_added_filters),
20391fe49c8SJiawen Wu 	HW_XSTAT(flow_director_removed_filters),
20491fe49c8SJiawen Wu 	HW_XSTAT(flow_director_filter_add_errors),
20591fe49c8SJiawen Wu 	HW_XSTAT(flow_director_filter_remove_errors),
20691fe49c8SJiawen Wu 	HW_XSTAT(flow_director_matched_filters),
20791fe49c8SJiawen Wu 	HW_XSTAT(flow_director_missed_filters),
20891fe49c8SJiawen Wu 
20991fe49c8SJiawen Wu 	/* FCoE */
21091fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_crc_errors),
21191fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
21291fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_dropped),
21391fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_packets),
21491fe49c8SJiawen Wu 	HW_XSTAT(tx_fcoe_packets),
21591fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_bytes),
21691fe49c8SJiawen Wu 	HW_XSTAT(tx_fcoe_bytes),
21791fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_no_ddp),
21891fe49c8SJiawen Wu 	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
21991fe49c8SJiawen Wu 
22091fe49c8SJiawen Wu 	/* MACSEC */
22191fe49c8SJiawen Wu 	HW_XSTAT(tx_macsec_pkts_untagged),
22291fe49c8SJiawen Wu 	HW_XSTAT(tx_macsec_pkts_encrypted),
22391fe49c8SJiawen Wu 	HW_XSTAT(tx_macsec_pkts_protected),
22491fe49c8SJiawen Wu 	HW_XSTAT(tx_macsec_octets_encrypted),
22591fe49c8SJiawen Wu 	HW_XSTAT(tx_macsec_octets_protected),
22691fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_untagged),
22791fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_badtag),
22891fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_nosci),
22991fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_pkts_unknownsci),
23091fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_octets_decrypted),
23191fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_octets_validated),
23291fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
23391fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
23491fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sc_pkts_late),
23591fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_ok),
23691fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
23791fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
23891fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
23991fe49c8SJiawen Wu 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
24091fe49c8SJiawen Wu 
24191fe49c8SJiawen Wu 	/* MAC RxTx */
24291fe49c8SJiawen Wu 	HW_XSTAT(rx_size_64_packets),
24391fe49c8SJiawen Wu 	HW_XSTAT(rx_size_65_to_127_packets),
24491fe49c8SJiawen Wu 	HW_XSTAT(rx_size_128_to_255_packets),
24591fe49c8SJiawen Wu 	HW_XSTAT(rx_size_256_to_511_packets),
24691fe49c8SJiawen Wu 	HW_XSTAT(rx_size_512_to_1023_packets),
24791fe49c8SJiawen Wu 	HW_XSTAT(rx_size_1024_to_max_packets),
24891fe49c8SJiawen Wu 	HW_XSTAT(tx_size_64_packets),
24991fe49c8SJiawen Wu 	HW_XSTAT(tx_size_65_to_127_packets),
25091fe49c8SJiawen Wu 	HW_XSTAT(tx_size_128_to_255_packets),
25191fe49c8SJiawen Wu 	HW_XSTAT(tx_size_256_to_511_packets),
25291fe49c8SJiawen Wu 	HW_XSTAT(tx_size_512_to_1023_packets),
25391fe49c8SJiawen Wu 	HW_XSTAT(tx_size_1024_to_max_packets),
25491fe49c8SJiawen Wu 
25591fe49c8SJiawen Wu 	/* Flow Control */
25691fe49c8SJiawen Wu 	HW_XSTAT(tx_xon_packets),
25791fe49c8SJiawen Wu 	HW_XSTAT(rx_xon_packets),
25891fe49c8SJiawen Wu 	HW_XSTAT(tx_xoff_packets),
25991fe49c8SJiawen Wu 	HW_XSTAT(rx_xoff_packets),
26091fe49c8SJiawen Wu 
26191fe49c8SJiawen Wu 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
26291fe49c8SJiawen Wu 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
26391fe49c8SJiawen Wu 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
26491fe49c8SJiawen Wu 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
26591fe49c8SJiawen Wu };
26691fe49c8SJiawen Wu 
26791fe49c8SJiawen Wu #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
26891fe49c8SJiawen Wu 			   sizeof(rte_txgbe_stats_strings[0]))
26991fe49c8SJiawen Wu 
27091fe49c8SJiawen Wu /* Per-priority statistics */
27191fe49c8SJiawen Wu #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
27291fe49c8SJiawen Wu static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
27391fe49c8SJiawen Wu 	UP_XSTAT(rx_up_packets),
27491fe49c8SJiawen Wu 	UP_XSTAT(tx_up_packets),
27591fe49c8SJiawen Wu 	UP_XSTAT(rx_up_bytes),
27691fe49c8SJiawen Wu 	UP_XSTAT(tx_up_bytes),
27791fe49c8SJiawen Wu 	UP_XSTAT(rx_up_drop_packets),
27891fe49c8SJiawen Wu 
27991fe49c8SJiawen Wu 	UP_XSTAT(tx_up_xon_packets),
28091fe49c8SJiawen Wu 	UP_XSTAT(rx_up_xon_packets),
28191fe49c8SJiawen Wu 	UP_XSTAT(tx_up_xoff_packets),
28291fe49c8SJiawen Wu 	UP_XSTAT(rx_up_xoff_packets),
28391fe49c8SJiawen Wu 	UP_XSTAT(rx_up_dropped),
28491fe49c8SJiawen Wu 	UP_XSTAT(rx_up_mbuf_alloc_errors),
28591fe49c8SJiawen Wu 	UP_XSTAT(tx_up_xon2off_packets),
28691fe49c8SJiawen Wu };
28791fe49c8SJiawen Wu 
28891fe49c8SJiawen Wu #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
28991fe49c8SJiawen Wu 			   sizeof(rte_txgbe_up_strings[0]))
29091fe49c8SJiawen Wu 
29191fe49c8SJiawen Wu /* Per-queue statistics */
29291fe49c8SJiawen Wu #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
29391fe49c8SJiawen Wu static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
29491fe49c8SJiawen Wu 	QP_XSTAT(rx_qp_packets),
29591fe49c8SJiawen Wu 	QP_XSTAT(tx_qp_packets),
29691fe49c8SJiawen Wu 	QP_XSTAT(rx_qp_bytes),
29791fe49c8SJiawen Wu 	QP_XSTAT(tx_qp_bytes),
29891fe49c8SJiawen Wu 	QP_XSTAT(rx_qp_mc_packets),
29991fe49c8SJiawen Wu };
30091fe49c8SJiawen Wu 
30191fe49c8SJiawen Wu #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
30291fe49c8SJiawen Wu 			   sizeof(rte_txgbe_qp_strings[0]))
30391fe49c8SJiawen Wu 
3044460ed14SJiawen Wu static inline int
txgbe_is_sfp(struct txgbe_hw * hw)3054460ed14SJiawen Wu txgbe_is_sfp(struct txgbe_hw *hw)
3064460ed14SJiawen Wu {
3074460ed14SJiawen Wu 	switch (hw->phy.type) {
3084460ed14SJiawen Wu 	case txgbe_phy_sfp_avago:
3094460ed14SJiawen Wu 	case txgbe_phy_sfp_ftl:
3104460ed14SJiawen Wu 	case txgbe_phy_sfp_intel:
3114460ed14SJiawen Wu 	case txgbe_phy_sfp_unknown:
3124460ed14SJiawen Wu 	case txgbe_phy_sfp_tyco_passive:
3134460ed14SJiawen Wu 	case txgbe_phy_sfp_unknown_passive:
3144460ed14SJiawen Wu 		return 1;
3154460ed14SJiawen Wu 	default:
3164460ed14SJiawen Wu 		return 0;
3174460ed14SJiawen Wu 	}
3184460ed14SJiawen Wu }
3194460ed14SJiawen Wu 
320b1f59667SJiawen Wu static inline int32_t
txgbe_pf_reset_hw(struct txgbe_hw * hw)321b1f59667SJiawen Wu txgbe_pf_reset_hw(struct txgbe_hw *hw)
322b1f59667SJiawen Wu {
323b1f59667SJiawen Wu 	uint32_t ctrl_ext;
324b1f59667SJiawen Wu 	int32_t status;
325b1f59667SJiawen Wu 
326b1f59667SJiawen Wu 	status = hw->mac.reset_hw(hw);
327b1f59667SJiawen Wu 
328b1f59667SJiawen Wu 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
329b1f59667SJiawen Wu 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
330b1f59667SJiawen Wu 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
331b1f59667SJiawen Wu 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
332b1f59667SJiawen Wu 	txgbe_flush(hw);
333b1f59667SJiawen Wu 
334b1f59667SJiawen Wu 	if (status == TXGBE_ERR_SFP_NOT_PRESENT)
335b1f59667SJiawen Wu 		status = 0;
336b1f59667SJiawen Wu 	return status;
337b1f59667SJiawen Wu }
338b1f59667SJiawen Wu 
3392fc745e6SJiawen Wu static inline void
txgbe_enable_intr(struct rte_eth_dev * dev)3402fc745e6SJiawen Wu txgbe_enable_intr(struct rte_eth_dev *dev)
3412fc745e6SJiawen Wu {
3422fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3432fc745e6SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3442fc745e6SJiawen Wu 
3452fc745e6SJiawen Wu 	wr32(hw, TXGBE_IENMISC, intr->mask_misc);
3462fc745e6SJiawen Wu 	wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
3472fc745e6SJiawen Wu 	wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
3482fc745e6SJiawen Wu 	txgbe_flush(hw);
3492fc745e6SJiawen Wu }
3502fc745e6SJiawen Wu 
3512fc745e6SJiawen Wu static void
txgbe_disable_intr(struct txgbe_hw * hw)3522fc745e6SJiawen Wu txgbe_disable_intr(struct txgbe_hw *hw)
3532fc745e6SJiawen Wu {
3542fc745e6SJiawen Wu 	PMD_INIT_FUNC_TRACE();
3552fc745e6SJiawen Wu 
3562fc745e6SJiawen Wu 	wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
3572fc745e6SJiawen Wu 	wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
3582fc745e6SJiawen Wu 	wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
3592fc745e6SJiawen Wu 	txgbe_flush(hw);
3602fc745e6SJiawen Wu }
3612fc745e6SJiawen Wu 
3627dc11706SJiawen Wu static int
txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev * eth_dev,uint16_t queue_id,uint8_t stat_idx,uint8_t is_rx)363c1d4e9d3SJiawen Wu txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
364c1d4e9d3SJiawen Wu 				  uint16_t queue_id,
365c1d4e9d3SJiawen Wu 				  uint8_t stat_idx,
366c1d4e9d3SJiawen Wu 				  uint8_t is_rx)
367c1d4e9d3SJiawen Wu {
368c1d4e9d3SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
369c1d4e9d3SJiawen Wu 	struct txgbe_stat_mappings *stat_mappings =
370c1d4e9d3SJiawen Wu 		TXGBE_DEV_STAT_MAPPINGS(eth_dev);
371c1d4e9d3SJiawen Wu 	uint32_t qsmr_mask = 0;
372c1d4e9d3SJiawen Wu 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
373c1d4e9d3SJiawen Wu 	uint32_t q_map;
374c1d4e9d3SJiawen Wu 	uint8_t n, offset;
375c1d4e9d3SJiawen Wu 
376c1d4e9d3SJiawen Wu 	if (hw->mac.type != txgbe_mac_raptor)
377c1d4e9d3SJiawen Wu 		return -ENOSYS;
378c1d4e9d3SJiawen Wu 
379*f6946717SWeiguo Li 	if (stat_idx & ~QMAP_FIELD_RESERVED_BITS_MASK)
380c1d4e9d3SJiawen Wu 		return -EIO;
381c1d4e9d3SJiawen Wu 
382c1d4e9d3SJiawen Wu 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
383c1d4e9d3SJiawen Wu 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
384c1d4e9d3SJiawen Wu 		     queue_id, stat_idx);
385c1d4e9d3SJiawen Wu 
386c1d4e9d3SJiawen Wu 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
387c1d4e9d3SJiawen Wu 	if (n >= TXGBE_NB_STAT_MAPPING) {
388c1d4e9d3SJiawen Wu 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
389c1d4e9d3SJiawen Wu 		return -EIO;
390c1d4e9d3SJiawen Wu 	}
391c1d4e9d3SJiawen Wu 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
392c1d4e9d3SJiawen Wu 
393c1d4e9d3SJiawen Wu 	/* Now clear any previous stat_idx set */
394c1d4e9d3SJiawen Wu 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
395c1d4e9d3SJiawen Wu 	if (!is_rx)
396c1d4e9d3SJiawen Wu 		stat_mappings->tqsm[n] &= ~clearing_mask;
397c1d4e9d3SJiawen Wu 	else
398c1d4e9d3SJiawen Wu 		stat_mappings->rqsm[n] &= ~clearing_mask;
399c1d4e9d3SJiawen Wu 
400c1d4e9d3SJiawen Wu 	q_map = (uint32_t)stat_idx;
401c1d4e9d3SJiawen Wu 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
402c1d4e9d3SJiawen Wu 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
403c1d4e9d3SJiawen Wu 	if (!is_rx)
404c1d4e9d3SJiawen Wu 		stat_mappings->tqsm[n] |= qsmr_mask;
405c1d4e9d3SJiawen Wu 	else
406c1d4e9d3SJiawen Wu 		stat_mappings->rqsm[n] |= qsmr_mask;
407c1d4e9d3SJiawen Wu 
408c1d4e9d3SJiawen Wu 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
409c1d4e9d3SJiawen Wu 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
410c1d4e9d3SJiawen Wu 		     queue_id, stat_idx);
411c1d4e9d3SJiawen Wu 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
412c1d4e9d3SJiawen Wu 		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
413c1d4e9d3SJiawen Wu 	return 0;
414c1d4e9d3SJiawen Wu }
415c1d4e9d3SJiawen Wu 
4168bdc7882SJiawen Wu static void
txgbe_dcb_init(struct txgbe_hw * hw,struct txgbe_dcb_config * dcb_config)4178bdc7882SJiawen Wu txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
4188bdc7882SJiawen Wu {
4198bdc7882SJiawen Wu 	int i;
4208bdc7882SJiawen Wu 	u8 bwgp;
4218bdc7882SJiawen Wu 	struct txgbe_dcb_tc_config *tc;
4228bdc7882SJiawen Wu 
4238bdc7882SJiawen Wu 	UNREFERENCED_PARAMETER(hw);
4248bdc7882SJiawen Wu 
4258bdc7882SJiawen Wu 	dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
4268bdc7882SJiawen Wu 	dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
4278bdc7882SJiawen Wu 	bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
4288bdc7882SJiawen Wu 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
4298bdc7882SJiawen Wu 		tc = &dcb_config->tc_config[i];
4308bdc7882SJiawen Wu 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
4318bdc7882SJiawen Wu 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
4328bdc7882SJiawen Wu 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
4338bdc7882SJiawen Wu 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
4348bdc7882SJiawen Wu 		tc->pfc = txgbe_dcb_pfc_disabled;
4358bdc7882SJiawen Wu 	}
4368bdc7882SJiawen Wu 
4378bdc7882SJiawen Wu 	/* Initialize default user to priority mapping, UPx->TC0 */
4388bdc7882SJiawen Wu 	tc = &dcb_config->tc_config[0];
4398bdc7882SJiawen Wu 	tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
4408bdc7882SJiawen Wu 	tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
4418bdc7882SJiawen Wu 	for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
4428bdc7882SJiawen Wu 		dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
4438bdc7882SJiawen Wu 		dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
4448bdc7882SJiawen Wu 	}
4458bdc7882SJiawen Wu 	dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
4468bdc7882SJiawen Wu 	dcb_config->pfc_mode_enable = false;
4478bdc7882SJiawen Wu 	dcb_config->vt_mode = true;
4488bdc7882SJiawen Wu 	dcb_config->round_robin_enable = false;
4498bdc7882SJiawen Wu 	/* support all DCB capabilities */
4508bdc7882SJiawen Wu 	dcb_config->support.capabilities = 0xFF;
4518bdc7882SJiawen Wu }
4528bdc7882SJiawen Wu 
4532102db87SJiawen Wu /*
4542102db87SJiawen Wu  * Ensure that all locks are released before first NVM or PHY access
4552102db87SJiawen Wu  */
4562102db87SJiawen Wu static void
txgbe_swfw_lock_reset(struct txgbe_hw * hw)4572102db87SJiawen Wu txgbe_swfw_lock_reset(struct txgbe_hw *hw)
4582102db87SJiawen Wu {
4592102db87SJiawen Wu 	uint16_t mask;
4602102db87SJiawen Wu 
4612102db87SJiawen Wu 	/*
4622102db87SJiawen Wu 	 * These ones are more tricky since they are common to all ports; but
4632102db87SJiawen Wu 	 * swfw_sync retries last long enough (1s) to be almost sure that if
4642102db87SJiawen Wu 	 * lock can not be taken it is due to an improper lock of the
4652102db87SJiawen Wu 	 * semaphore.
4662102db87SJiawen Wu 	 */
4672102db87SJiawen Wu 	mask = TXGBE_MNGSEM_SWPHY |
4682102db87SJiawen Wu 	       TXGBE_MNGSEM_SWMBX |
4692102db87SJiawen Wu 	       TXGBE_MNGSEM_SWFLASH;
4702102db87SJiawen Wu 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
4712102db87SJiawen Wu 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
4722102db87SJiawen Wu 
4732102db87SJiawen Wu 	hw->mac.release_swfw_sync(hw, mask);
4742102db87SJiawen Wu }
4752102db87SJiawen Wu 
476c1d4e9d3SJiawen Wu static int
txgbe_handle_devarg(__rte_unused const char * key,const char * value,void * extra_args)477f611dadaSJiawen Wu txgbe_handle_devarg(__rte_unused const char *key, const char *value,
478f611dadaSJiawen Wu 		  void *extra_args)
479f611dadaSJiawen Wu {
480f611dadaSJiawen Wu 	uint16_t *n = extra_args;
481f611dadaSJiawen Wu 
482f611dadaSJiawen Wu 	if (value == NULL || extra_args == NULL)
483f611dadaSJiawen Wu 		return -EINVAL;
484f611dadaSJiawen Wu 
485f611dadaSJiawen Wu 	*n = (uint16_t)strtoul(value, NULL, 10);
486f611dadaSJiawen Wu 	if (*n == USHRT_MAX && errno == ERANGE)
487f611dadaSJiawen Wu 		return -1;
488f611dadaSJiawen Wu 
489f611dadaSJiawen Wu 	return 0;
490f611dadaSJiawen Wu }
491f611dadaSJiawen Wu 
492f611dadaSJiawen Wu static void
txgbe_parse_devargs(struct txgbe_hw * hw,struct rte_devargs * devargs)493f611dadaSJiawen Wu txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
494f611dadaSJiawen Wu {
495f611dadaSJiawen Wu 	struct rte_kvargs *kvlist;
496f611dadaSJiawen Wu 	u16 auto_neg = 1;
497f611dadaSJiawen Wu 	u16 poll = 0;
498b4ce1520SJiawen Wu 	u16 present = 0;
499f611dadaSJiawen Wu 	u16 sgmii = 0;
5009997a0cbSJiawen Wu 	u16 ffe_set = 0;
5019997a0cbSJiawen Wu 	u16 ffe_main = 27;
5029997a0cbSJiawen Wu 	u16 ffe_pre = 8;
5039997a0cbSJiawen Wu 	u16 ffe_post = 44;
504f611dadaSJiawen Wu 
505f611dadaSJiawen Wu 	if (devargs == NULL)
506f611dadaSJiawen Wu 		goto null;
507f611dadaSJiawen Wu 
508f611dadaSJiawen Wu 	kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
509f611dadaSJiawen Wu 	if (kvlist == NULL)
510f611dadaSJiawen Wu 		goto null;
511f611dadaSJiawen Wu 
512f611dadaSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
513f611dadaSJiawen Wu 			   &txgbe_handle_devarg, &auto_neg);
514f611dadaSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
515f611dadaSJiawen Wu 			   &txgbe_handle_devarg, &poll);
516f611dadaSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
517f611dadaSJiawen Wu 			   &txgbe_handle_devarg, &present);
518f611dadaSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
519f611dadaSJiawen Wu 			   &txgbe_handle_devarg, &sgmii);
5209997a0cbSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
5219997a0cbSJiawen Wu 			   &txgbe_handle_devarg, &ffe_set);
5229997a0cbSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
5239997a0cbSJiawen Wu 			   &txgbe_handle_devarg, &ffe_main);
5249997a0cbSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
5259997a0cbSJiawen Wu 			   &txgbe_handle_devarg, &ffe_pre);
5269997a0cbSJiawen Wu 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
5279997a0cbSJiawen Wu 			   &txgbe_handle_devarg, &ffe_post);
528f611dadaSJiawen Wu 	rte_kvargs_free(kvlist);
529f611dadaSJiawen Wu 
530f611dadaSJiawen Wu null:
531f611dadaSJiawen Wu 	hw->devarg.auto_neg = auto_neg;
532f611dadaSJiawen Wu 	hw->devarg.poll = poll;
533f611dadaSJiawen Wu 	hw->devarg.present = present;
534f611dadaSJiawen Wu 	hw->devarg.sgmii = sgmii;
5359997a0cbSJiawen Wu 	hw->phy.ffe_set = ffe_set;
5369997a0cbSJiawen Wu 	hw->phy.ffe_main = ffe_main;
5379997a0cbSJiawen Wu 	hw->phy.ffe_pre = ffe_pre;
5389997a0cbSJiawen Wu 	hw->phy.ffe_post = ffe_post;
539f611dadaSJiawen Wu }
540f611dadaSJiawen Wu 
541f611dadaSJiawen Wu static int
eth_txgbe_dev_init(struct rte_eth_dev * eth_dev,void * init_params __rte_unused)5427dc11706SJiawen Wu eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
5437dc11706SJiawen Wu {
544e1698e38SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
545e1698e38SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
546220b0e49SJiawen Wu 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
547220b0e49SJiawen Wu 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
5488bdc7882SJiawen Wu 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
549838e9bafSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
55069ce8c8aSJiawen Wu 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
551d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
552e1698e38SJiawen Wu 	const struct rte_memzone *mz;
553a6712cd0SJiawen Wu 	uint32_t ctrl_ext;
55435c90eccSJiawen Wu 	uint16_t csum;
5554c6dea0cSJiawen Wu 	int err, i, ret;
556e1698e38SJiawen Wu 
557e1698e38SJiawen Wu 	PMD_INIT_FUNC_TRACE();
558e1698e38SJiawen Wu 
559e1698e38SJiawen Wu 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
560c22e6c7aSJiawen Wu 	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
561c22e6c7aSJiawen Wu 	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
562c22e6c7aSJiawen Wu 	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
5630e484278SJiawen Wu 	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
564ca46fcd7SJiawen Wu 	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
56591e0e38bSJiawen Wu 	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
566ca46fcd7SJiawen Wu 
567ca46fcd7SJiawen Wu 	/*
568ca46fcd7SJiawen Wu 	 * For secondary processes, we don't initialise any further as primary
569ca46fcd7SJiawen Wu 	 * has already done this work. Only check we don't need a different
570ca46fcd7SJiawen Wu 	 * RX and TX function.
571ca46fcd7SJiawen Wu 	 */
572ca46fcd7SJiawen Wu 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
573ca46fcd7SJiawen Wu 		struct txgbe_tx_queue *txq;
574ca46fcd7SJiawen Wu 		/* TX queue function in primary, set by last queue initialized
575ca46fcd7SJiawen Wu 		 * Tx queue may not initialized by primary process
576ca46fcd7SJiawen Wu 		 */
577ca46fcd7SJiawen Wu 		if (eth_dev->data->tx_queues) {
578ca46fcd7SJiawen Wu 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
579ca46fcd7SJiawen Wu 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
580ca46fcd7SJiawen Wu 			txgbe_set_tx_function(eth_dev, txq);
581ca46fcd7SJiawen Wu 		} else {
582ca46fcd7SJiawen Wu 			/* Use default TX function if we get here */
583ca46fcd7SJiawen Wu 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
584ca46fcd7SJiawen Wu 				     "Using default TX function.");
585ca46fcd7SJiawen Wu 		}
586ca46fcd7SJiawen Wu 
587ca46fcd7SJiawen Wu 		txgbe_set_rx_function(eth_dev);
588ca46fcd7SJiawen Wu 
589ca46fcd7SJiawen Wu 		return 0;
590ca46fcd7SJiawen Wu 	}
591e1698e38SJiawen Wu 
592e1698e38SJiawen Wu 	rte_eth_copy_pci_info(eth_dev, pci_dev);
593e1698e38SJiawen Wu 
594e1698e38SJiawen Wu 	/* Vendor and Device ID need to be set before init of shared code */
595e1698e38SJiawen Wu 	hw->device_id = pci_dev->id.device_id;
596e1698e38SJiawen Wu 	hw->vendor_id = pci_dev->id.vendor_id;
597e1698e38SJiawen Wu 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
598e1698e38SJiawen Wu 	hw->allow_unsupported_sfp = 1;
599e1698e38SJiawen Wu 
600e1698e38SJiawen Wu 	/* Reserve memory for interrupt status block */
601e1698e38SJiawen Wu 	mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
602e1698e38SJiawen Wu 		16, TXGBE_ALIGN, SOCKET_ID_ANY);
603e1698e38SJiawen Wu 	if (mz == NULL)
604e1698e38SJiawen Wu 		return -ENOMEM;
605e1698e38SJiawen Wu 
606e1698e38SJiawen Wu 	hw->isb_dma = TMZ_PADDR(mz);
607e1698e38SJiawen Wu 	hw->isb_mem = TMZ_VADDR(mz);
608e1698e38SJiawen Wu 
609f611dadaSJiawen Wu 	txgbe_parse_devargs(hw, pci_dev->device.devargs);
6104460ed14SJiawen Wu 	/* Initialize the shared code (base driver) */
6114460ed14SJiawen Wu 	err = txgbe_init_shared_code(hw);
6124460ed14SJiawen Wu 	if (err != 0) {
6134460ed14SJiawen Wu 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
6144460ed14SJiawen Wu 		return -EIO;
6154460ed14SJiawen Wu 	}
6164460ed14SJiawen Wu 
6172102db87SJiawen Wu 	/* Unlock any pending hardware semaphore */
6182102db87SJiawen Wu 	txgbe_swfw_lock_reset(hw);
6192102db87SJiawen Wu 
620f437d97cSJiawen Wu #ifdef RTE_LIB_SECURITY
621f437d97cSJiawen Wu 	/* Initialize security_ctx only for primary process*/
622f437d97cSJiawen Wu 	if (txgbe_ipsec_ctx_create(eth_dev))
623f437d97cSJiawen Wu 		return -ENOMEM;
624f437d97cSJiawen Wu #endif
625f437d97cSJiawen Wu 
6268bdc7882SJiawen Wu 	/* Initialize DCB configuration*/
6278bdc7882SJiawen Wu 	memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
6288bdc7882SJiawen Wu 	txgbe_dcb_init(hw, dcb_config);
6298bdc7882SJiawen Wu 
63069ce8c8aSJiawen Wu 	/* Get Hardware Flow Control setting */
63169ce8c8aSJiawen Wu 	hw->fc.requested_mode = txgbe_fc_full;
63269ce8c8aSJiawen Wu 	hw->fc.current_mode = txgbe_fc_full;
63369ce8c8aSJiawen Wu 	hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
63469ce8c8aSJiawen Wu 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
63569ce8c8aSJiawen Wu 		hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
63669ce8c8aSJiawen Wu 		hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
63769ce8c8aSJiawen Wu 	}
63869ce8c8aSJiawen Wu 	hw->fc.send_xon = 1;
63969ce8c8aSJiawen Wu 
64035c90eccSJiawen Wu 	err = hw->rom.init_params(hw);
64135c90eccSJiawen Wu 	if (err != 0) {
64235c90eccSJiawen Wu 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
64335c90eccSJiawen Wu 		return -EIO;
64435c90eccSJiawen Wu 	}
64535c90eccSJiawen Wu 
64635c90eccSJiawen Wu 	/* Make sure we have a good EEPROM before we read from it */
64735c90eccSJiawen Wu 	err = hw->rom.validate_checksum(hw, &csum);
64835c90eccSJiawen Wu 	if (err != 0) {
64935c90eccSJiawen Wu 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
65035c90eccSJiawen Wu 		return -EIO;
65135c90eccSJiawen Wu 	}
65235c90eccSJiawen Wu 
653f58ae2fcSJiawen Wu 	err = hw->mac.init_hw(hw);
654f58ae2fcSJiawen Wu 
655f58ae2fcSJiawen Wu 	/*
656f58ae2fcSJiawen Wu 	 * Devices with copper phys will fail to initialise if txgbe_init_hw()
657f58ae2fcSJiawen Wu 	 * is called too soon after the kernel driver unbinding/binding occurs.
658f58ae2fcSJiawen Wu 	 * The failure occurs in txgbe_identify_phy() for all devices,
659f58ae2fcSJiawen Wu 	 * but for non-copper devies, txgbe_identify_sfp_module() is
660f58ae2fcSJiawen Wu 	 * also called. See txgbe_identify_phy(). The reason for the
661f58ae2fcSJiawen Wu 	 * failure is not known, and only occuts when virtualisation features
662f58ae2fcSJiawen Wu 	 * are disabled in the bios. A delay of 200ms  was found to be enough by
663f58ae2fcSJiawen Wu 	 * trial-and-error, and is doubled to be safe.
664f58ae2fcSJiawen Wu 	 */
665f58ae2fcSJiawen Wu 	if (err && hw->phy.media_type == txgbe_media_type_copper) {
666f58ae2fcSJiawen Wu 		rte_delay_ms(200);
667f58ae2fcSJiawen Wu 		err = hw->mac.init_hw(hw);
668f58ae2fcSJiawen Wu 	}
669f58ae2fcSJiawen Wu 
670f58ae2fcSJiawen Wu 	if (err == TXGBE_ERR_SFP_NOT_PRESENT)
671f58ae2fcSJiawen Wu 		err = 0;
672f58ae2fcSJiawen Wu 
673f58ae2fcSJiawen Wu 	if (err == TXGBE_ERR_EEPROM_VERSION) {
674f58ae2fcSJiawen Wu 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
675f58ae2fcSJiawen Wu 			     "LOM.  Please be aware there may be issues associated "
676f58ae2fcSJiawen Wu 			     "with your hardware.");
677f58ae2fcSJiawen Wu 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
678f58ae2fcSJiawen Wu 			     "please contact your hardware representative "
679f58ae2fcSJiawen Wu 			     "who provided you with this hardware.");
680f58ae2fcSJiawen Wu 	} else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
681f58ae2fcSJiawen Wu 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
682f58ae2fcSJiawen Wu 	}
683f58ae2fcSJiawen Wu 	if (err) {
684f58ae2fcSJiawen Wu 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
685f58ae2fcSJiawen Wu 		return -EIO;
686f58ae2fcSJiawen Wu 	}
687f58ae2fcSJiawen Wu 
688c9bb590dSJiawen Wu 	/* Reset the hw statistics */
689c9bb590dSJiawen Wu 	txgbe_dev_stats_reset(eth_dev);
690c9bb590dSJiawen Wu 
6912fc745e6SJiawen Wu 	/* disable interrupt */
6922fc745e6SJiawen Wu 	txgbe_disable_intr(hw);
6932fc745e6SJiawen Wu 
694e1698e38SJiawen Wu 	/* Allocate memory for storing MAC addresses */
695e1698e38SJiawen Wu 	eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
696e1698e38SJiawen Wu 					       hw->mac.num_rar_entries, 0);
697e1698e38SJiawen Wu 	if (eth_dev->data->mac_addrs == NULL) {
698e1698e38SJiawen Wu 		PMD_INIT_LOG(ERR,
699e1698e38SJiawen Wu 			     "Failed to allocate %u bytes needed to store "
700e1698e38SJiawen Wu 			     "MAC addresses",
701e1698e38SJiawen Wu 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
702e1698e38SJiawen Wu 		return -ENOMEM;
703e1698e38SJiawen Wu 	}
704e1698e38SJiawen Wu 
705e1698e38SJiawen Wu 	/* Copy the permanent MAC address */
706e1698e38SJiawen Wu 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
707e1698e38SJiawen Wu 			&eth_dev->data->mac_addrs[0]);
708e1698e38SJiawen Wu 
709e1698e38SJiawen Wu 	/* Allocate memory for storing hash filter MAC addresses */
710e1698e38SJiawen Wu 	eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
711e1698e38SJiawen Wu 			RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
712e1698e38SJiawen Wu 	if (eth_dev->data->hash_mac_addrs == NULL) {
713e1698e38SJiawen Wu 		PMD_INIT_LOG(ERR,
714e1698e38SJiawen Wu 			     "Failed to allocate %d bytes needed to store MAC addresses",
715e1698e38SJiawen Wu 			     RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
716e1698e38SJiawen Wu 		return -ENOMEM;
717e1698e38SJiawen Wu 	}
718e1698e38SJiawen Wu 
719220b0e49SJiawen Wu 	/* initialize the vfta */
720220b0e49SJiawen Wu 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
721220b0e49SJiawen Wu 
722220b0e49SJiawen Wu 	/* initialize the hw strip bitmap*/
723220b0e49SJiawen Wu 	memset(hwstrip, 0, sizeof(*hwstrip));
724220b0e49SJiawen Wu 
725a6712cd0SJiawen Wu 	/* initialize PF if max_vfs not zero */
7264c6dea0cSJiawen Wu 	ret = txgbe_pf_host_init(eth_dev);
7274c6dea0cSJiawen Wu 	if (ret) {
7284c6dea0cSJiawen Wu 		rte_free(eth_dev->data->mac_addrs);
7294c6dea0cSJiawen Wu 		eth_dev->data->mac_addrs = NULL;
7304c6dea0cSJiawen Wu 		rte_free(eth_dev->data->hash_mac_addrs);
7314c6dea0cSJiawen Wu 		eth_dev->data->hash_mac_addrs = NULL;
7324c6dea0cSJiawen Wu 		return ret;
7334c6dea0cSJiawen Wu 	}
734a6712cd0SJiawen Wu 
735a6712cd0SJiawen Wu 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
736a6712cd0SJiawen Wu 	/* let hardware know driver is loaded */
737a6712cd0SJiawen Wu 	ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
738a6712cd0SJiawen Wu 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
739a6712cd0SJiawen Wu 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
740a6712cd0SJiawen Wu 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
741a6712cd0SJiawen Wu 	txgbe_flush(hw);
742a6712cd0SJiawen Wu 
7434460ed14SJiawen Wu 	if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
7444460ed14SJiawen Wu 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
7454460ed14SJiawen Wu 			     (int)hw->mac.type, (int)hw->phy.type,
7464460ed14SJiawen Wu 			     (int)hw->phy.sfp_type);
7474460ed14SJiawen Wu 	else
7484460ed14SJiawen Wu 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
7494460ed14SJiawen Wu 			     (int)hw->mac.type, (int)hw->phy.type);
7504460ed14SJiawen Wu 
751e1698e38SJiawen Wu 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
752e1698e38SJiawen Wu 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
753e1698e38SJiawen Wu 		     pci_dev->id.device_id);
754e1698e38SJiawen Wu 
7552fc745e6SJiawen Wu 	rte_intr_callback_register(intr_handle,
7562fc745e6SJiawen Wu 				   txgbe_dev_interrupt_handler, eth_dev);
7572fc745e6SJiawen Wu 
758e1698e38SJiawen Wu 	/* enable uio/vfio intr/eventfd mapping */
759e1698e38SJiawen Wu 	rte_intr_enable(intr_handle);
7607dc11706SJiawen Wu 
7612fc745e6SJiawen Wu 	/* enable support intr */
7622fc745e6SJiawen Wu 	txgbe_enable_intr(eth_dev);
7632fc745e6SJiawen Wu 
764838e9bafSJiawen Wu 	/* initialize filter info */
765838e9bafSJiawen Wu 	memset(filter_info, 0,
766838e9bafSJiawen Wu 	       sizeof(struct txgbe_filter_info));
767838e9bafSJiawen Wu 
768838e9bafSJiawen Wu 	/* initialize 5tuple filter list */
769838e9bafSJiawen Wu 	TAILQ_INIT(&filter_info->fivetuple_list);
770838e9bafSJiawen Wu 
771635c2135SJiawen Wu 	/* initialize flow director filter list & hash */
772635c2135SJiawen Wu 	txgbe_fdir_filter_init(eth_dev);
773635c2135SJiawen Wu 
774c13f84a7SJiawen Wu 	/* initialize l2 tunnel filter list & hash */
775c13f84a7SJiawen Wu 	txgbe_l2_tn_filter_init(eth_dev);
776c13f84a7SJiawen Wu 
7775c2352b9SJiawen Wu 	/* initialize flow filter lists */
7785c2352b9SJiawen Wu 	txgbe_filterlist_init();
7795c2352b9SJiawen Wu 
78069ce8c8aSJiawen Wu 	/* initialize bandwidth configuration info */
78169ce8c8aSJiawen Wu 	memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
78269ce8c8aSJiawen Wu 
783ad02aa03SJiawen Wu 	/* initialize Traffic Manager configuration */
784ad02aa03SJiawen Wu 	txgbe_tm_conf_init(eth_dev);
785ad02aa03SJiawen Wu 
7867dc11706SJiawen Wu 	return 0;
7877dc11706SJiawen Wu }
7887dc11706SJiawen Wu 
7897dc11706SJiawen Wu static int
eth_txgbe_dev_uninit(struct rte_eth_dev * eth_dev)7907dc11706SJiawen Wu eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
7917dc11706SJiawen Wu {
792e1698e38SJiawen Wu 	PMD_INIT_FUNC_TRACE();
793e1698e38SJiawen Wu 
794e1698e38SJiawen Wu 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
795e1698e38SJiawen Wu 		return 0;
796e1698e38SJiawen Wu 
797e1698e38SJiawen Wu 	txgbe_dev_close(eth_dev);
7987dc11706SJiawen Wu 
7997dc11706SJiawen Wu 	return 0;
8007dc11706SJiawen Wu }
8017dc11706SJiawen Wu 
txgbe_ntuple_filter_uninit(struct rte_eth_dev * eth_dev)802838e9bafSJiawen Wu static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
803838e9bafSJiawen Wu {
804838e9bafSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
805838e9bafSJiawen Wu 	struct txgbe_5tuple_filter *p_5tuple;
806838e9bafSJiawen Wu 
807838e9bafSJiawen Wu 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
808838e9bafSJiawen Wu 		TAILQ_REMOVE(&filter_info->fivetuple_list,
809838e9bafSJiawen Wu 			     p_5tuple,
810838e9bafSJiawen Wu 			     entries);
811838e9bafSJiawen Wu 		rte_free(p_5tuple);
812838e9bafSJiawen Wu 	}
813838e9bafSJiawen Wu 	memset(filter_info->fivetuple_mask, 0,
814838e9bafSJiawen Wu 	       sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
815838e9bafSJiawen Wu 
816838e9bafSJiawen Wu 	return 0;
817838e9bafSJiawen Wu }
818838e9bafSJiawen Wu 
txgbe_fdir_filter_uninit(struct rte_eth_dev * eth_dev)819635c2135SJiawen Wu static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
820635c2135SJiawen Wu {
821635c2135SJiawen Wu 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
822635c2135SJiawen Wu 	struct txgbe_fdir_filter *fdir_filter;
823635c2135SJiawen Wu 
824635c2135SJiawen Wu 	rte_free(fdir_info->hash_map);
825635c2135SJiawen Wu 	rte_hash_free(fdir_info->hash_handle);
826635c2135SJiawen Wu 
827635c2135SJiawen Wu 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
828635c2135SJiawen Wu 		TAILQ_REMOVE(&fdir_info->fdir_list,
829635c2135SJiawen Wu 			     fdir_filter,
830635c2135SJiawen Wu 			     entries);
831635c2135SJiawen Wu 		rte_free(fdir_filter);
832635c2135SJiawen Wu 	}
833635c2135SJiawen Wu 
834635c2135SJiawen Wu 	return 0;
835635c2135SJiawen Wu }
836635c2135SJiawen Wu 
txgbe_l2_tn_filter_uninit(struct rte_eth_dev * eth_dev)837c13f84a7SJiawen Wu static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
838c13f84a7SJiawen Wu {
839c13f84a7SJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
840c13f84a7SJiawen Wu 	struct txgbe_l2_tn_filter *l2_tn_filter;
841c13f84a7SJiawen Wu 
842c13f84a7SJiawen Wu 	rte_free(l2_tn_info->hash_map);
843c13f84a7SJiawen Wu 	rte_hash_free(l2_tn_info->hash_handle);
844c13f84a7SJiawen Wu 
845c13f84a7SJiawen Wu 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
846c13f84a7SJiawen Wu 		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
847c13f84a7SJiawen Wu 			     l2_tn_filter,
848c13f84a7SJiawen Wu 			     entries);
849c13f84a7SJiawen Wu 		rte_free(l2_tn_filter);
850c13f84a7SJiawen Wu 	}
851c13f84a7SJiawen Wu 
852c13f84a7SJiawen Wu 	return 0;
853c13f84a7SJiawen Wu }
854c13f84a7SJiawen Wu 
txgbe_fdir_filter_init(struct rte_eth_dev * eth_dev)855635c2135SJiawen Wu static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
856635c2135SJiawen Wu {
857635c2135SJiawen Wu 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
858635c2135SJiawen Wu 	char fdir_hash_name[RTE_HASH_NAMESIZE];
859635c2135SJiawen Wu 	struct rte_hash_parameters fdir_hash_params = {
860635c2135SJiawen Wu 		.name = fdir_hash_name,
861635c2135SJiawen Wu 		.entries = TXGBE_MAX_FDIR_FILTER_NUM,
862635c2135SJiawen Wu 		.key_len = sizeof(struct txgbe_atr_input),
863635c2135SJiawen Wu 		.hash_func = rte_hash_crc,
864635c2135SJiawen Wu 		.hash_func_init_val = 0,
865635c2135SJiawen Wu 		.socket_id = rte_socket_id(),
866635c2135SJiawen Wu 	};
867635c2135SJiawen Wu 
868635c2135SJiawen Wu 	TAILQ_INIT(&fdir_info->fdir_list);
869635c2135SJiawen Wu 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
870635c2135SJiawen Wu 		 "fdir_%s", TDEV_NAME(eth_dev));
871635c2135SJiawen Wu 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
872635c2135SJiawen Wu 	if (!fdir_info->hash_handle) {
873635c2135SJiawen Wu 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
874635c2135SJiawen Wu 		return -EINVAL;
875635c2135SJiawen Wu 	}
876635c2135SJiawen Wu 	fdir_info->hash_map = rte_zmalloc("txgbe",
877635c2135SJiawen Wu 					  sizeof(struct txgbe_fdir_filter *) *
878635c2135SJiawen Wu 					  TXGBE_MAX_FDIR_FILTER_NUM,
879635c2135SJiawen Wu 					  0);
880635c2135SJiawen Wu 	if (!fdir_info->hash_map) {
881635c2135SJiawen Wu 		PMD_INIT_LOG(ERR,
882635c2135SJiawen Wu 			     "Failed to allocate memory for fdir hash map!");
883635c2135SJiawen Wu 		return -ENOMEM;
884635c2135SJiawen Wu 	}
885635c2135SJiawen Wu 	fdir_info->mask_added = FALSE;
886635c2135SJiawen Wu 
887635c2135SJiawen Wu 	return 0;
888635c2135SJiawen Wu }
889635c2135SJiawen Wu 
txgbe_l2_tn_filter_init(struct rte_eth_dev * eth_dev)890c13f84a7SJiawen Wu static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
891c13f84a7SJiawen Wu {
892c13f84a7SJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
893c13f84a7SJiawen Wu 	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
894c13f84a7SJiawen Wu 	struct rte_hash_parameters l2_tn_hash_params = {
895c13f84a7SJiawen Wu 		.name = l2_tn_hash_name,
896c13f84a7SJiawen Wu 		.entries = TXGBE_MAX_L2_TN_FILTER_NUM,
897c13f84a7SJiawen Wu 		.key_len = sizeof(struct txgbe_l2_tn_key),
898c13f84a7SJiawen Wu 		.hash_func = rte_hash_crc,
899c13f84a7SJiawen Wu 		.hash_func_init_val = 0,
900c13f84a7SJiawen Wu 		.socket_id = rte_socket_id(),
901c13f84a7SJiawen Wu 	};
902c13f84a7SJiawen Wu 
903c13f84a7SJiawen Wu 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
904c13f84a7SJiawen Wu 	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
905c13f84a7SJiawen Wu 		 "l2_tn_%s", TDEV_NAME(eth_dev));
906c13f84a7SJiawen Wu 	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
907c13f84a7SJiawen Wu 	if (!l2_tn_info->hash_handle) {
908c13f84a7SJiawen Wu 		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
909c13f84a7SJiawen Wu 		return -EINVAL;
910c13f84a7SJiawen Wu 	}
911c13f84a7SJiawen Wu 	l2_tn_info->hash_map = rte_zmalloc("txgbe",
912c13f84a7SJiawen Wu 				   sizeof(struct txgbe_l2_tn_filter *) *
913c13f84a7SJiawen Wu 				   TXGBE_MAX_L2_TN_FILTER_NUM,
914c13f84a7SJiawen Wu 				   0);
915c13f84a7SJiawen Wu 	if (!l2_tn_info->hash_map) {
916c13f84a7SJiawen Wu 		PMD_INIT_LOG(ERR,
917c13f84a7SJiawen Wu 			"Failed to allocate memory for L2 TN hash map!");
918c13f84a7SJiawen Wu 		return -ENOMEM;
919c13f84a7SJiawen Wu 	}
920c13f84a7SJiawen Wu 	l2_tn_info->e_tag_en = FALSE;
921c13f84a7SJiawen Wu 	l2_tn_info->e_tag_fwd_en = FALSE;
922c13f84a7SJiawen Wu 	l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
923c13f84a7SJiawen Wu 
924c13f84a7SJiawen Wu 	return 0;
925c13f84a7SJiawen Wu }
926c13f84a7SJiawen Wu 
9277dc11706SJiawen Wu static int
eth_txgbe_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)9287dc11706SJiawen Wu eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
9297dc11706SJiawen Wu 		struct rte_pci_device *pci_dev)
9307dc11706SJiawen Wu {
9317870df8fSJiawen Wu 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
9327dc11706SJiawen Wu 			sizeof(struct txgbe_adapter),
9337dc11706SJiawen Wu 			eth_dev_pci_specific_init, pci_dev,
9347dc11706SJiawen Wu 			eth_txgbe_dev_init, NULL);
9357dc11706SJiawen Wu }
9367dc11706SJiawen Wu 
eth_txgbe_pci_remove(struct rte_pci_device * pci_dev)9377dc11706SJiawen Wu static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
9387dc11706SJiawen Wu {
9397dc11706SJiawen Wu 	struct rte_eth_dev *ethdev;
9407dc11706SJiawen Wu 
9417dc11706SJiawen Wu 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
9427dc11706SJiawen Wu 	if (!ethdev)
9437870df8fSJiawen Wu 		return 0;
9447dc11706SJiawen Wu 
9457dc11706SJiawen Wu 	return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
9467dc11706SJiawen Wu }
9477dc11706SJiawen Wu 
9487dc11706SJiawen Wu static struct rte_pci_driver rte_txgbe_pmd = {
9497dc11706SJiawen Wu 	.id_table = pci_id_txgbe_map,
9507dc11706SJiawen Wu 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
9517dc11706SJiawen Wu 		     RTE_PCI_DRV_INTR_LSC,
9527dc11706SJiawen Wu 	.probe = eth_txgbe_pci_probe,
9537dc11706SJiawen Wu 	.remove = eth_txgbe_pci_remove,
9547dc11706SJiawen Wu };
9557dc11706SJiawen Wu 
95675cbb1f0SJiawen Wu static int
txgbe_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)957220b0e49SJiawen Wu txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
958220b0e49SJiawen Wu {
959220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
960220b0e49SJiawen Wu 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
961220b0e49SJiawen Wu 	uint32_t vfta;
962220b0e49SJiawen Wu 	uint32_t vid_idx;
963220b0e49SJiawen Wu 	uint32_t vid_bit;
964220b0e49SJiawen Wu 
965220b0e49SJiawen Wu 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
966220b0e49SJiawen Wu 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
967220b0e49SJiawen Wu 	vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
968220b0e49SJiawen Wu 	if (on)
969220b0e49SJiawen Wu 		vfta |= vid_bit;
970220b0e49SJiawen Wu 	else
971220b0e49SJiawen Wu 		vfta &= ~vid_bit;
972220b0e49SJiawen Wu 	wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
973220b0e49SJiawen Wu 
974220b0e49SJiawen Wu 	/* update local VFTA copy */
975220b0e49SJiawen Wu 	shadow_vfta->vfta[vid_idx] = vfta;
976220b0e49SJiawen Wu 
977220b0e49SJiawen Wu 	return 0;
978220b0e49SJiawen Wu }
979220b0e49SJiawen Wu 
980220b0e49SJiawen Wu static void
txgbe_vlan_strip_queue_set(struct rte_eth_dev * dev,uint16_t queue,int on)981220b0e49SJiawen Wu txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
982220b0e49SJiawen Wu {
983220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
984220b0e49SJiawen Wu 	struct txgbe_rx_queue *rxq;
985220b0e49SJiawen Wu 	bool restart;
986220b0e49SJiawen Wu 	uint32_t rxcfg, rxbal, rxbah;
987220b0e49SJiawen Wu 
988220b0e49SJiawen Wu 	if (on)
989220b0e49SJiawen Wu 		txgbe_vlan_hw_strip_enable(dev, queue);
990220b0e49SJiawen Wu 	else
991220b0e49SJiawen Wu 		txgbe_vlan_hw_strip_disable(dev, queue);
992220b0e49SJiawen Wu 
993220b0e49SJiawen Wu 	rxq = dev->data->rx_queues[queue];
994220b0e49SJiawen Wu 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
995220b0e49SJiawen Wu 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
996220b0e49SJiawen Wu 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
997295968d1SFerruh Yigit 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
998220b0e49SJiawen Wu 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
999220b0e49SJiawen Wu 			!(rxcfg & TXGBE_RXCFG_VLAN);
1000220b0e49SJiawen Wu 		rxcfg |= TXGBE_RXCFG_VLAN;
1001220b0e49SJiawen Wu 	} else {
1002220b0e49SJiawen Wu 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1003220b0e49SJiawen Wu 			(rxcfg & TXGBE_RXCFG_VLAN);
1004220b0e49SJiawen Wu 		rxcfg &= ~TXGBE_RXCFG_VLAN;
1005220b0e49SJiawen Wu 	}
1006220b0e49SJiawen Wu 	rxcfg &= ~TXGBE_RXCFG_ENA;
1007220b0e49SJiawen Wu 
1008220b0e49SJiawen Wu 	if (restart) {
1009220b0e49SJiawen Wu 		/* set vlan strip for ring */
1010220b0e49SJiawen Wu 		txgbe_dev_rx_queue_stop(dev, queue);
1011220b0e49SJiawen Wu 		wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
1012220b0e49SJiawen Wu 		wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
1013220b0e49SJiawen Wu 		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
1014220b0e49SJiawen Wu 		txgbe_dev_rx_queue_start(dev, queue);
1015220b0e49SJiawen Wu 	}
1016220b0e49SJiawen Wu }
1017220b0e49SJiawen Wu 
1018220b0e49SJiawen Wu static int
txgbe_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)1019220b0e49SJiawen Wu txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1020220b0e49SJiawen Wu 		    enum rte_vlan_type vlan_type,
1021220b0e49SJiawen Wu 		    uint16_t tpid)
1022220b0e49SJiawen Wu {
1023220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1024220b0e49SJiawen Wu 	int ret = 0;
1025220b0e49SJiawen Wu 	uint32_t portctrl, vlan_ext, qinq;
1026220b0e49SJiawen Wu 
1027220b0e49SJiawen Wu 	portctrl = rd32(hw, TXGBE_PORTCTL);
1028220b0e49SJiawen Wu 
1029220b0e49SJiawen Wu 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
1030220b0e49SJiawen Wu 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
1031220b0e49SJiawen Wu 	switch (vlan_type) {
1032295968d1SFerruh Yigit 	case RTE_ETH_VLAN_TYPE_INNER:
1033220b0e49SJiawen Wu 		if (vlan_ext) {
1034220b0e49SJiawen Wu 			wr32m(hw, TXGBE_VLANCTL,
1035220b0e49SJiawen Wu 				TXGBE_VLANCTL_TPID_MASK,
1036220b0e49SJiawen Wu 				TXGBE_VLANCTL_TPID(tpid));
1037220b0e49SJiawen Wu 			wr32m(hw, TXGBE_DMATXCTRL,
1038220b0e49SJiawen Wu 				TXGBE_DMATXCTRL_TPID_MASK,
1039220b0e49SJiawen Wu 				TXGBE_DMATXCTRL_TPID(tpid));
1040220b0e49SJiawen Wu 		} else {
1041220b0e49SJiawen Wu 			ret = -ENOTSUP;
1042220b0e49SJiawen Wu 			PMD_DRV_LOG(ERR, "Inner type is not supported"
1043220b0e49SJiawen Wu 				    " by single VLAN");
1044220b0e49SJiawen Wu 		}
1045220b0e49SJiawen Wu 
1046220b0e49SJiawen Wu 		if (qinq) {
1047220b0e49SJiawen Wu 			wr32m(hw, TXGBE_TAGTPID(0),
1048220b0e49SJiawen Wu 				TXGBE_TAGTPID_LSB_MASK,
1049220b0e49SJiawen Wu 				TXGBE_TAGTPID_LSB(tpid));
1050220b0e49SJiawen Wu 		}
1051220b0e49SJiawen Wu 		break;
1052295968d1SFerruh Yigit 	case RTE_ETH_VLAN_TYPE_OUTER:
1053220b0e49SJiawen Wu 		if (vlan_ext) {
1054220b0e49SJiawen Wu 			/* Only the high 16-bits is valid */
1055220b0e49SJiawen Wu 			wr32m(hw, TXGBE_EXTAG,
1056220b0e49SJiawen Wu 				TXGBE_EXTAG_VLAN_MASK,
1057220b0e49SJiawen Wu 				TXGBE_EXTAG_VLAN(tpid));
1058220b0e49SJiawen Wu 		} else {
1059220b0e49SJiawen Wu 			wr32m(hw, TXGBE_VLANCTL,
1060220b0e49SJiawen Wu 				TXGBE_VLANCTL_TPID_MASK,
1061220b0e49SJiawen Wu 				TXGBE_VLANCTL_TPID(tpid));
1062220b0e49SJiawen Wu 			wr32m(hw, TXGBE_DMATXCTRL,
1063220b0e49SJiawen Wu 				TXGBE_DMATXCTRL_TPID_MASK,
1064220b0e49SJiawen Wu 				TXGBE_DMATXCTRL_TPID(tpid));
1065220b0e49SJiawen Wu 		}
1066220b0e49SJiawen Wu 
1067220b0e49SJiawen Wu 		if (qinq) {
1068220b0e49SJiawen Wu 			wr32m(hw, TXGBE_TAGTPID(0),
1069220b0e49SJiawen Wu 				TXGBE_TAGTPID_MSB_MASK,
1070220b0e49SJiawen Wu 				TXGBE_TAGTPID_MSB(tpid));
1071220b0e49SJiawen Wu 		}
1072220b0e49SJiawen Wu 		break;
1073220b0e49SJiawen Wu 	default:
1074220b0e49SJiawen Wu 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1075220b0e49SJiawen Wu 		return -EINVAL;
1076220b0e49SJiawen Wu 	}
1077220b0e49SJiawen Wu 
1078220b0e49SJiawen Wu 	return ret;
1079220b0e49SJiawen Wu }
1080220b0e49SJiawen Wu 
1081220b0e49SJiawen Wu void
txgbe_vlan_hw_filter_disable(struct rte_eth_dev * dev)1082220b0e49SJiawen Wu txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1083220b0e49SJiawen Wu {
1084220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1085220b0e49SJiawen Wu 	uint32_t vlnctrl;
1086220b0e49SJiawen Wu 
1087220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1088220b0e49SJiawen Wu 
1089220b0e49SJiawen Wu 	/* Filter Table Disable */
1090220b0e49SJiawen Wu 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1091220b0e49SJiawen Wu 	vlnctrl &= ~TXGBE_VLANCTL_VFE;
1092220b0e49SJiawen Wu 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1093220b0e49SJiawen Wu }
1094220b0e49SJiawen Wu 
1095220b0e49SJiawen Wu void
txgbe_vlan_hw_filter_enable(struct rte_eth_dev * dev)1096220b0e49SJiawen Wu txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1097220b0e49SJiawen Wu {
1098220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1099220b0e49SJiawen Wu 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1100220b0e49SJiawen Wu 	uint32_t vlnctrl;
1101220b0e49SJiawen Wu 	uint16_t i;
1102220b0e49SJiawen Wu 
1103220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1104220b0e49SJiawen Wu 
1105220b0e49SJiawen Wu 	/* Filter Table Enable */
1106220b0e49SJiawen Wu 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1107220b0e49SJiawen Wu 	vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1108220b0e49SJiawen Wu 	vlnctrl |= TXGBE_VLANCTL_VFE;
1109220b0e49SJiawen Wu 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1110220b0e49SJiawen Wu 
1111220b0e49SJiawen Wu 	/* write whatever is in local vfta copy */
1112220b0e49SJiawen Wu 	for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1113220b0e49SJiawen Wu 		wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1114220b0e49SJiawen Wu }
1115220b0e49SJiawen Wu 
1116220b0e49SJiawen Wu void
txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev * dev,uint16_t queue,bool on)1117220b0e49SJiawen Wu txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1118220b0e49SJiawen Wu {
1119220b0e49SJiawen Wu 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1120220b0e49SJiawen Wu 	struct txgbe_rx_queue *rxq;
1121220b0e49SJiawen Wu 
1122220b0e49SJiawen Wu 	if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1123220b0e49SJiawen Wu 		return;
1124220b0e49SJiawen Wu 
1125220b0e49SJiawen Wu 	if (on)
1126220b0e49SJiawen Wu 		TXGBE_SET_HWSTRIP(hwstrip, queue);
1127220b0e49SJiawen Wu 	else
1128220b0e49SJiawen Wu 		TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1129220b0e49SJiawen Wu 
1130220b0e49SJiawen Wu 	if (queue >= dev->data->nb_rx_queues)
1131220b0e49SJiawen Wu 		return;
1132220b0e49SJiawen Wu 
1133220b0e49SJiawen Wu 	rxq = dev->data->rx_queues[queue];
1134220b0e49SJiawen Wu 
1135220b0e49SJiawen Wu 	if (on) {
1136daa02b5cSOlivier Matz 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1137295968d1SFerruh Yigit 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1138220b0e49SJiawen Wu 	} else {
1139daa02b5cSOlivier Matz 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
1140295968d1SFerruh Yigit 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1141220b0e49SJiawen Wu 	}
1142220b0e49SJiawen Wu }
1143220b0e49SJiawen Wu 
1144220b0e49SJiawen Wu static void
txgbe_vlan_hw_strip_disable(struct rte_eth_dev * dev,uint16_t queue)1145220b0e49SJiawen Wu txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1146220b0e49SJiawen Wu {
1147220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1148220b0e49SJiawen Wu 	uint32_t ctrl;
1149220b0e49SJiawen Wu 
1150220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1151220b0e49SJiawen Wu 
1152220b0e49SJiawen Wu 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1153220b0e49SJiawen Wu 	ctrl &= ~TXGBE_RXCFG_VLAN;
1154220b0e49SJiawen Wu 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1155220b0e49SJiawen Wu 
1156220b0e49SJiawen Wu 	/* record those setting for HW strip per queue */
1157220b0e49SJiawen Wu 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1158220b0e49SJiawen Wu }
1159220b0e49SJiawen Wu 
1160220b0e49SJiawen Wu static void
txgbe_vlan_hw_strip_enable(struct rte_eth_dev * dev,uint16_t queue)1161220b0e49SJiawen Wu txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1162220b0e49SJiawen Wu {
1163220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1164220b0e49SJiawen Wu 	uint32_t ctrl;
1165220b0e49SJiawen Wu 
1166220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1167220b0e49SJiawen Wu 
1168220b0e49SJiawen Wu 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1169220b0e49SJiawen Wu 	ctrl |= TXGBE_RXCFG_VLAN;
1170220b0e49SJiawen Wu 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1171220b0e49SJiawen Wu 
1172220b0e49SJiawen Wu 	/* record those setting for HW strip per queue */
1173220b0e49SJiawen Wu 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1174220b0e49SJiawen Wu }
1175220b0e49SJiawen Wu 
1176220b0e49SJiawen Wu static void
txgbe_vlan_hw_extend_disable(struct rte_eth_dev * dev)1177220b0e49SJiawen Wu txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1178220b0e49SJiawen Wu {
1179220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1180220b0e49SJiawen Wu 	uint32_t ctrl;
1181220b0e49SJiawen Wu 
1182220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1183220b0e49SJiawen Wu 
1184220b0e49SJiawen Wu 	ctrl = rd32(hw, TXGBE_PORTCTL);
1185220b0e49SJiawen Wu 	ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1186220b0e49SJiawen Wu 	wr32(hw, TXGBE_PORTCTL, ctrl);
1187220b0e49SJiawen Wu }
1188220b0e49SJiawen Wu 
1189220b0e49SJiawen Wu static void
txgbe_vlan_hw_extend_enable(struct rte_eth_dev * dev)1190220b0e49SJiawen Wu txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1191220b0e49SJiawen Wu {
1192220b0e49SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1193220b0e49SJiawen Wu 	uint32_t ctrl;
1194220b0e49SJiawen Wu 
1195220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1196220b0e49SJiawen Wu 
1197220b0e49SJiawen Wu 	ctrl  = rd32(hw, TXGBE_PORTCTL);
1198220b0e49SJiawen Wu 	ctrl |= TXGBE_PORTCTL_VLANEXT;
119915f0573eSJiawen Wu 	wr32(hw, TXGBE_PORTCTL, ctrl);
120015f0573eSJiawen Wu }
120115f0573eSJiawen Wu 
120215f0573eSJiawen Wu static void
txgbe_qinq_hw_strip_disable(struct rte_eth_dev * dev)120315f0573eSJiawen Wu txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
120415f0573eSJiawen Wu {
120515f0573eSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
120615f0573eSJiawen Wu 	uint32_t ctrl;
120715f0573eSJiawen Wu 
120815f0573eSJiawen Wu 	PMD_INIT_FUNC_TRACE();
120915f0573eSJiawen Wu 
121015f0573eSJiawen Wu 	ctrl = rd32(hw, TXGBE_PORTCTL);
121115f0573eSJiawen Wu 	ctrl &= ~TXGBE_PORTCTL_QINQ;
121215f0573eSJiawen Wu 	wr32(hw, TXGBE_PORTCTL, ctrl);
121315f0573eSJiawen Wu }
121415f0573eSJiawen Wu 
121515f0573eSJiawen Wu static void
txgbe_qinq_hw_strip_enable(struct rte_eth_dev * dev)121615f0573eSJiawen Wu txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
121715f0573eSJiawen Wu {
121815f0573eSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
121915f0573eSJiawen Wu 	uint32_t ctrl;
122015f0573eSJiawen Wu 
122115f0573eSJiawen Wu 	PMD_INIT_FUNC_TRACE();
122215f0573eSJiawen Wu 
122315f0573eSJiawen Wu 	ctrl  = rd32(hw, TXGBE_PORTCTL);
122415f0573eSJiawen Wu 	ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
1225220b0e49SJiawen Wu 	wr32(hw, TXGBE_PORTCTL, ctrl);
1226220b0e49SJiawen Wu }
1227220b0e49SJiawen Wu 
1228220b0e49SJiawen Wu void
txgbe_vlan_hw_strip_config(struct rte_eth_dev * dev)1229220b0e49SJiawen Wu txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1230220b0e49SJiawen Wu {
1231220b0e49SJiawen Wu 	struct txgbe_rx_queue *rxq;
1232220b0e49SJiawen Wu 	uint16_t i;
1233220b0e49SJiawen Wu 
1234220b0e49SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1235220b0e49SJiawen Wu 
1236220b0e49SJiawen Wu 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1237220b0e49SJiawen Wu 		rxq = dev->data->rx_queues[i];
1238220b0e49SJiawen Wu 
1239295968d1SFerruh Yigit 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1240220b0e49SJiawen Wu 			txgbe_vlan_strip_queue_set(dev, i, 1);
1241220b0e49SJiawen Wu 		else
1242220b0e49SJiawen Wu 			txgbe_vlan_strip_queue_set(dev, i, 0);
1243220b0e49SJiawen Wu 	}
1244220b0e49SJiawen Wu }
1245220b0e49SJiawen Wu 
1246220b0e49SJiawen Wu void
txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev * dev,int mask)1247220b0e49SJiawen Wu txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1248220b0e49SJiawen Wu {
1249220b0e49SJiawen Wu 	uint16_t i;
1250220b0e49SJiawen Wu 	struct rte_eth_rxmode *rxmode;
1251220b0e49SJiawen Wu 	struct txgbe_rx_queue *rxq;
1252220b0e49SJiawen Wu 
1253295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1254220b0e49SJiawen Wu 		rxmode = &dev->data->dev_conf.rxmode;
1255295968d1SFerruh Yigit 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1256220b0e49SJiawen Wu 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1257220b0e49SJiawen Wu 				rxq = dev->data->rx_queues[i];
1258295968d1SFerruh Yigit 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1259220b0e49SJiawen Wu 			}
1260220b0e49SJiawen Wu 		else
1261220b0e49SJiawen Wu 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1262220b0e49SJiawen Wu 				rxq = dev->data->rx_queues[i];
1263295968d1SFerruh Yigit 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1264220b0e49SJiawen Wu 			}
1265220b0e49SJiawen Wu 	}
1266220b0e49SJiawen Wu }
1267220b0e49SJiawen Wu 
1268220b0e49SJiawen Wu static int
txgbe_vlan_offload_config(struct rte_eth_dev * dev,int mask)1269220b0e49SJiawen Wu txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1270220b0e49SJiawen Wu {
1271220b0e49SJiawen Wu 	struct rte_eth_rxmode *rxmode;
1272220b0e49SJiawen Wu 	rxmode = &dev->data->dev_conf.rxmode;
1273220b0e49SJiawen Wu 
1274295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
1275220b0e49SJiawen Wu 		txgbe_vlan_hw_strip_config(dev);
1276220b0e49SJiawen Wu 
1277295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1278295968d1SFerruh Yigit 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1279220b0e49SJiawen Wu 			txgbe_vlan_hw_filter_enable(dev);
1280220b0e49SJiawen Wu 		else
1281220b0e49SJiawen Wu 			txgbe_vlan_hw_filter_disable(dev);
1282220b0e49SJiawen Wu 	}
1283220b0e49SJiawen Wu 
1284295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
1285295968d1SFerruh Yigit 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
1286220b0e49SJiawen Wu 			txgbe_vlan_hw_extend_enable(dev);
1287220b0e49SJiawen Wu 		else
1288220b0e49SJiawen Wu 			txgbe_vlan_hw_extend_disable(dev);
1289220b0e49SJiawen Wu 	}
1290220b0e49SJiawen Wu 
1291295968d1SFerruh Yigit 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
1292295968d1SFerruh Yigit 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
129315f0573eSJiawen Wu 			txgbe_qinq_hw_strip_enable(dev);
129415f0573eSJiawen Wu 		else
129515f0573eSJiawen Wu 			txgbe_qinq_hw_strip_disable(dev);
129615f0573eSJiawen Wu 	}
129715f0573eSJiawen Wu 
1298220b0e49SJiawen Wu 	return 0;
1299220b0e49SJiawen Wu }
1300220b0e49SJiawen Wu 
1301220b0e49SJiawen Wu static int
txgbe_vlan_offload_set(struct rte_eth_dev * dev,int mask)1302220b0e49SJiawen Wu txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1303220b0e49SJiawen Wu {
1304220b0e49SJiawen Wu 	txgbe_config_vlan_strip_on_all_queues(dev, mask);
1305220b0e49SJiawen Wu 
1306220b0e49SJiawen Wu 	txgbe_vlan_offload_config(dev, mask);
1307220b0e49SJiawen Wu 
1308220b0e49SJiawen Wu 	return 0;
1309220b0e49SJiawen Wu }
1310220b0e49SJiawen Wu 
1311c35b73a1SJiawen Wu static void
txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev * dev)1312c35b73a1SJiawen Wu txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1313c35b73a1SJiawen Wu {
1314c35b73a1SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1315c35b73a1SJiawen Wu 	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
1316c35b73a1SJiawen Wu 	uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1317c35b73a1SJiawen Wu 
1318c35b73a1SJiawen Wu 	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1319c35b73a1SJiawen Wu 	wr32(hw, TXGBE_VLANCTL, vlanctrl);
1320c35b73a1SJiawen Wu }
1321c35b73a1SJiawen Wu 
1322220b0e49SJiawen Wu static int
txgbe_check_vf_rss_rxq_num(struct rte_eth_dev * dev,uint16_t nb_rx_q)132375cbb1f0SJiawen Wu txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
132475cbb1f0SJiawen Wu {
132575cbb1f0SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
132675cbb1f0SJiawen Wu 
132775cbb1f0SJiawen Wu 	switch (nb_rx_q) {
132875cbb1f0SJiawen Wu 	case 1:
132975cbb1f0SJiawen Wu 	case 2:
1330295968d1SFerruh Yigit 		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
133175cbb1f0SJiawen Wu 		break;
133275cbb1f0SJiawen Wu 	case 4:
1333295968d1SFerruh Yigit 		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
133475cbb1f0SJiawen Wu 		break;
133575cbb1f0SJiawen Wu 	default:
133675cbb1f0SJiawen Wu 		return -EINVAL;
133775cbb1f0SJiawen Wu 	}
133875cbb1f0SJiawen Wu 
133975cbb1f0SJiawen Wu 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
134075cbb1f0SJiawen Wu 		TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
134175cbb1f0SJiawen Wu 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
134275cbb1f0SJiawen Wu 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
134375cbb1f0SJiawen Wu 	return 0;
134475cbb1f0SJiawen Wu }
134575cbb1f0SJiawen Wu 
134675cbb1f0SJiawen Wu static int
txgbe_check_mq_mode(struct rte_eth_dev * dev)134775cbb1f0SJiawen Wu txgbe_check_mq_mode(struct rte_eth_dev *dev)
134875cbb1f0SJiawen Wu {
134975cbb1f0SJiawen Wu 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
135075cbb1f0SJiawen Wu 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
135175cbb1f0SJiawen Wu 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
135275cbb1f0SJiawen Wu 
135375cbb1f0SJiawen Wu 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
135475cbb1f0SJiawen Wu 		/* check multi-queue mode */
135575cbb1f0SJiawen Wu 		switch (dev_conf->rxmode.mq_mode) {
1356295968d1SFerruh Yigit 		case RTE_ETH_MQ_RX_VMDQ_DCB:
1357295968d1SFerruh Yigit 			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
135875cbb1f0SJiawen Wu 			break;
1359295968d1SFerruh Yigit 		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
136075cbb1f0SJiawen Wu 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
136175cbb1f0SJiawen Wu 			PMD_INIT_LOG(ERR, "SRIOV active,"
136275cbb1f0SJiawen Wu 					" unsupported mq_mode rx %d.",
136375cbb1f0SJiawen Wu 					dev_conf->rxmode.mq_mode);
136475cbb1f0SJiawen Wu 			return -EINVAL;
1365295968d1SFerruh Yigit 		case RTE_ETH_MQ_RX_RSS:
1366295968d1SFerruh Yigit 		case RTE_ETH_MQ_RX_VMDQ_RSS:
1367295968d1SFerruh Yigit 			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
136875cbb1f0SJiawen Wu 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
136975cbb1f0SJiawen Wu 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
137075cbb1f0SJiawen Wu 					PMD_INIT_LOG(ERR, "SRIOV is active,"
137175cbb1f0SJiawen Wu 						" invalid queue number"
137275cbb1f0SJiawen Wu 						" for VMDQ RSS, allowed"
137375cbb1f0SJiawen Wu 						" value are 1, 2 or 4.");
137475cbb1f0SJiawen Wu 					return -EINVAL;
137575cbb1f0SJiawen Wu 				}
137675cbb1f0SJiawen Wu 			break;
1377295968d1SFerruh Yigit 		case RTE_ETH_MQ_RX_VMDQ_ONLY:
1378295968d1SFerruh Yigit 		case RTE_ETH_MQ_RX_NONE:
137975cbb1f0SJiawen Wu 			/* if nothing mq mode configure, use default scheme */
138075cbb1f0SJiawen Wu 			dev->data->dev_conf.rxmode.mq_mode =
1381295968d1SFerruh Yigit 				RTE_ETH_MQ_RX_VMDQ_ONLY;
138275cbb1f0SJiawen Wu 			break;
1383295968d1SFerruh Yigit 		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
138475cbb1f0SJiawen Wu 			/* SRIOV only works in VMDq enable mode */
138575cbb1f0SJiawen Wu 			PMD_INIT_LOG(ERR, "SRIOV is active,"
138675cbb1f0SJiawen Wu 					" wrong mq_mode rx %d.",
138775cbb1f0SJiawen Wu 					dev_conf->rxmode.mq_mode);
138875cbb1f0SJiawen Wu 			return -EINVAL;
138975cbb1f0SJiawen Wu 		}
139075cbb1f0SJiawen Wu 
139175cbb1f0SJiawen Wu 		switch (dev_conf->txmode.mq_mode) {
1392295968d1SFerruh Yigit 		case RTE_ETH_MQ_TX_VMDQ_DCB:
1393295968d1SFerruh Yigit 			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1394295968d1SFerruh Yigit 			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
139575cbb1f0SJiawen Wu 			break;
1396295968d1SFerruh Yigit 		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
139775cbb1f0SJiawen Wu 			dev->data->dev_conf.txmode.mq_mode =
1398295968d1SFerruh Yigit 				RTE_ETH_MQ_TX_VMDQ_ONLY;
139975cbb1f0SJiawen Wu 			break;
140075cbb1f0SJiawen Wu 		}
140175cbb1f0SJiawen Wu 
140275cbb1f0SJiawen Wu 		/* check valid queue number */
140375cbb1f0SJiawen Wu 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
140475cbb1f0SJiawen Wu 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
140575cbb1f0SJiawen Wu 			PMD_INIT_LOG(ERR, "SRIOV is active,"
140675cbb1f0SJiawen Wu 					" nb_rx_q=%d nb_tx_q=%d queue number"
140775cbb1f0SJiawen Wu 					" must be less than or equal to %d.",
140875cbb1f0SJiawen Wu 					nb_rx_q, nb_tx_q,
140975cbb1f0SJiawen Wu 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
141075cbb1f0SJiawen Wu 			return -EINVAL;
141175cbb1f0SJiawen Wu 		}
141275cbb1f0SJiawen Wu 	} else {
1413295968d1SFerruh Yigit 		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
141475cbb1f0SJiawen Wu 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
141575cbb1f0SJiawen Wu 					  " not supported.");
141675cbb1f0SJiawen Wu 			return -EINVAL;
141775cbb1f0SJiawen Wu 		}
141875cbb1f0SJiawen Wu 		/* check configuration for vmdb+dcb mode */
1419295968d1SFerruh Yigit 		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
142075cbb1f0SJiawen Wu 			const struct rte_eth_vmdq_dcb_conf *conf;
142175cbb1f0SJiawen Wu 
142275cbb1f0SJiawen Wu 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
142375cbb1f0SJiawen Wu 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
142475cbb1f0SJiawen Wu 						TXGBE_VMDQ_DCB_NB_QUEUES);
142575cbb1f0SJiawen Wu 				return -EINVAL;
142675cbb1f0SJiawen Wu 			}
142775cbb1f0SJiawen Wu 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1428295968d1SFerruh Yigit 			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
1429295968d1SFerruh Yigit 			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
143075cbb1f0SJiawen Wu 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
143175cbb1f0SJiawen Wu 						" nb_queue_pools must be %d or %d.",
1432295968d1SFerruh Yigit 						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
143375cbb1f0SJiawen Wu 				return -EINVAL;
143475cbb1f0SJiawen Wu 			}
143575cbb1f0SJiawen Wu 		}
1436295968d1SFerruh Yigit 		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
143775cbb1f0SJiawen Wu 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
143875cbb1f0SJiawen Wu 
143975cbb1f0SJiawen Wu 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
144075cbb1f0SJiawen Wu 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
144175cbb1f0SJiawen Wu 						 TXGBE_VMDQ_DCB_NB_QUEUES);
144275cbb1f0SJiawen Wu 				return -EINVAL;
144375cbb1f0SJiawen Wu 			}
144475cbb1f0SJiawen Wu 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1445295968d1SFerruh Yigit 			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
1446295968d1SFerruh Yigit 			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
144775cbb1f0SJiawen Wu 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
144875cbb1f0SJiawen Wu 						" nb_queue_pools != %d and"
144975cbb1f0SJiawen Wu 						" nb_queue_pools != %d.",
1450295968d1SFerruh Yigit 						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
145175cbb1f0SJiawen Wu 				return -EINVAL;
145275cbb1f0SJiawen Wu 			}
145375cbb1f0SJiawen Wu 		}
145475cbb1f0SJiawen Wu 
145575cbb1f0SJiawen Wu 		/* For DCB mode check our configuration before we go further */
1456295968d1SFerruh Yigit 		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
145775cbb1f0SJiawen Wu 			const struct rte_eth_dcb_rx_conf *conf;
145875cbb1f0SJiawen Wu 
145975cbb1f0SJiawen Wu 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1460295968d1SFerruh Yigit 			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
1461295968d1SFerruh Yigit 			       conf->nb_tcs == RTE_ETH_8_TCS)) {
146275cbb1f0SJiawen Wu 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
146375cbb1f0SJiawen Wu 						" and nb_tcs != %d.",
1464295968d1SFerruh Yigit 						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
146575cbb1f0SJiawen Wu 				return -EINVAL;
146675cbb1f0SJiawen Wu 			}
146775cbb1f0SJiawen Wu 		}
146875cbb1f0SJiawen Wu 
1469295968d1SFerruh Yigit 		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
147075cbb1f0SJiawen Wu 			const struct rte_eth_dcb_tx_conf *conf;
147175cbb1f0SJiawen Wu 
147275cbb1f0SJiawen Wu 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1473295968d1SFerruh Yigit 			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
1474295968d1SFerruh Yigit 			       conf->nb_tcs == RTE_ETH_8_TCS)) {
147575cbb1f0SJiawen Wu 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
147675cbb1f0SJiawen Wu 						" and nb_tcs != %d.",
1477295968d1SFerruh Yigit 						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
147875cbb1f0SJiawen Wu 				return -EINVAL;
147975cbb1f0SJiawen Wu 			}
148075cbb1f0SJiawen Wu 		}
148175cbb1f0SJiawen Wu 	}
148275cbb1f0SJiawen Wu 	return 0;
148375cbb1f0SJiawen Wu }
148475cbb1f0SJiawen Wu 
148575cbb1f0SJiawen Wu static int
txgbe_dev_configure(struct rte_eth_dev * dev)148675cbb1f0SJiawen Wu txgbe_dev_configure(struct rte_eth_dev *dev)
148775cbb1f0SJiawen Wu {
148875cbb1f0SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
148975cbb1f0SJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
149075cbb1f0SJiawen Wu 	int ret;
149175cbb1f0SJiawen Wu 
149275cbb1f0SJiawen Wu 	PMD_INIT_FUNC_TRACE();
149375cbb1f0SJiawen Wu 
1494295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1495295968d1SFerruh Yigit 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
149675cbb1f0SJiawen Wu 
149775cbb1f0SJiawen Wu 	/* multiple queue mode checking */
149875cbb1f0SJiawen Wu 	ret  = txgbe_check_mq_mode(dev);
149975cbb1f0SJiawen Wu 	if (ret != 0) {
150075cbb1f0SJiawen Wu 		PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
150175cbb1f0SJiawen Wu 			    ret);
150275cbb1f0SJiawen Wu 		return ret;
150375cbb1f0SJiawen Wu 	}
150475cbb1f0SJiawen Wu 
150575cbb1f0SJiawen Wu 	/* set flag to update link status after init */
150675cbb1f0SJiawen Wu 	intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
150775cbb1f0SJiawen Wu 
150875cbb1f0SJiawen Wu 	/*
150975cbb1f0SJiawen Wu 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
151075cbb1f0SJiawen Wu 	 * allocation Rx preconditions we will reset it.
151175cbb1f0SJiawen Wu 	 */
151275cbb1f0SJiawen Wu 	adapter->rx_bulk_alloc_allowed = true;
151375cbb1f0SJiawen Wu 
151475cbb1f0SJiawen Wu 	return 0;
151575cbb1f0SJiawen Wu }
15162fc745e6SJiawen Wu 
15172fc745e6SJiawen Wu static void
txgbe_dev_phy_intr_setup(struct rte_eth_dev * dev)15182fc745e6SJiawen Wu txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
15192fc745e6SJiawen Wu {
15202fc745e6SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
15212fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
15222fc745e6SJiawen Wu 	uint32_t gpie;
15232fc745e6SJiawen Wu 
15242fc745e6SJiawen Wu 	gpie = rd32(hw, TXGBE_GPIOINTEN);
15252fc745e6SJiawen Wu 	gpie |= TXGBE_GPIOBIT_6;
15262fc745e6SJiawen Wu 	wr32(hw, TXGBE_GPIOINTEN, gpie);
15272fc745e6SJiawen Wu 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
152882650948SJiawen Wu 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
15292fc745e6SJiawen Wu }
15302fc745e6SJiawen Wu 
1531770a3523SJiawen Wu int
txgbe_set_vf_rate_limit(struct rte_eth_dev * dev,uint16_t vf,uint16_t tx_rate,uint64_t q_msk)1532770a3523SJiawen Wu txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1533770a3523SJiawen Wu 			uint16_t tx_rate, uint64_t q_msk)
1534770a3523SJiawen Wu {
1535770a3523SJiawen Wu 	struct txgbe_hw *hw;
1536770a3523SJiawen Wu 	struct txgbe_vf_info *vfinfo;
1537770a3523SJiawen Wu 	struct rte_eth_link link;
1538770a3523SJiawen Wu 	uint8_t  nb_q_per_pool;
1539770a3523SJiawen Wu 	uint32_t queue_stride;
1540770a3523SJiawen Wu 	uint32_t queue_idx, idx = 0, vf_idx;
1541770a3523SJiawen Wu 	uint32_t queue_end;
1542770a3523SJiawen Wu 	uint16_t total_rate = 0;
1543770a3523SJiawen Wu 	struct rte_pci_device *pci_dev;
1544770a3523SJiawen Wu 	int ret;
1545770a3523SJiawen Wu 
1546770a3523SJiawen Wu 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1547770a3523SJiawen Wu 	ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1548770a3523SJiawen Wu 	if (ret < 0)
1549770a3523SJiawen Wu 		return ret;
1550770a3523SJiawen Wu 
1551770a3523SJiawen Wu 	if (vf >= pci_dev->max_vfs)
1552770a3523SJiawen Wu 		return -EINVAL;
1553770a3523SJiawen Wu 
1554770a3523SJiawen Wu 	if (tx_rate > link.link_speed)
1555770a3523SJiawen Wu 		return -EINVAL;
1556770a3523SJiawen Wu 
1557770a3523SJiawen Wu 	if (q_msk == 0)
1558770a3523SJiawen Wu 		return 0;
1559770a3523SJiawen Wu 
1560770a3523SJiawen Wu 	hw = TXGBE_DEV_HW(dev);
1561770a3523SJiawen Wu 	vfinfo = *(TXGBE_DEV_VFDATA(dev));
1562770a3523SJiawen Wu 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1563770a3523SJiawen Wu 	queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1564770a3523SJiawen Wu 	queue_idx = vf * queue_stride;
1565770a3523SJiawen Wu 	queue_end = queue_idx + nb_q_per_pool - 1;
1566770a3523SJiawen Wu 	if (queue_end >= hw->mac.max_tx_queues)
1567770a3523SJiawen Wu 		return -EINVAL;
1568770a3523SJiawen Wu 
1569770a3523SJiawen Wu 	if (vfinfo) {
1570770a3523SJiawen Wu 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1571770a3523SJiawen Wu 			if (vf_idx == vf)
1572770a3523SJiawen Wu 				continue;
1573770a3523SJiawen Wu 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1574770a3523SJiawen Wu 				idx++)
1575770a3523SJiawen Wu 				total_rate += vfinfo[vf_idx].tx_rate[idx];
1576770a3523SJiawen Wu 		}
1577770a3523SJiawen Wu 	} else {
1578770a3523SJiawen Wu 		return -EINVAL;
1579770a3523SJiawen Wu 	}
1580770a3523SJiawen Wu 
1581770a3523SJiawen Wu 	/* Store tx_rate for this vf. */
1582770a3523SJiawen Wu 	for (idx = 0; idx < nb_q_per_pool; idx++) {
1583770a3523SJiawen Wu 		if (((uint64_t)0x1 << idx) & q_msk) {
1584770a3523SJiawen Wu 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
1585770a3523SJiawen Wu 				vfinfo[vf].tx_rate[idx] = tx_rate;
1586770a3523SJiawen Wu 			total_rate += tx_rate;
1587770a3523SJiawen Wu 		}
1588770a3523SJiawen Wu 	}
1589770a3523SJiawen Wu 
1590770a3523SJiawen Wu 	if (total_rate > dev->data->dev_link.link_speed) {
1591770a3523SJiawen Wu 		/* Reset stored TX rate of the VF if it causes exceed
1592770a3523SJiawen Wu 		 * link speed.
1593770a3523SJiawen Wu 		 */
1594770a3523SJiawen Wu 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1595770a3523SJiawen Wu 		return -EINVAL;
1596770a3523SJiawen Wu 	}
1597770a3523SJiawen Wu 
1598770a3523SJiawen Wu 	/* Set ARBTXRATE of each queue/pool for vf X  */
1599770a3523SJiawen Wu 	for (; queue_idx <= queue_end; queue_idx++) {
1600770a3523SJiawen Wu 		if (0x1 & q_msk)
1601770a3523SJiawen Wu 			txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1602770a3523SJiawen Wu 		q_msk = q_msk >> 1;
1603770a3523SJiawen Wu 	}
1604770a3523SJiawen Wu 
1605770a3523SJiawen Wu 	return 0;
1606770a3523SJiawen Wu }
1607770a3523SJiawen Wu 
1608e1698e38SJiawen Wu /*
1609b1f59667SJiawen Wu  * Configure device link speed and setup link.
1610b1f59667SJiawen Wu  * It returns 0 on success.
1611b1f59667SJiawen Wu  */
1612b1f59667SJiawen Wu static int
txgbe_dev_start(struct rte_eth_dev * dev)1613b1f59667SJiawen Wu txgbe_dev_start(struct rte_eth_dev *dev)
1614b1f59667SJiawen Wu {
1615b1f59667SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1616c9bb590dSJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1617770a3523SJiawen Wu 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1618b1f59667SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1619d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1620b1f59667SJiawen Wu 	uint32_t intr_vector = 0;
1621b1f59667SJiawen Wu 	int err;
1622b1f59667SJiawen Wu 	bool link_up = false, negotiate = 0;
1623b1f59667SJiawen Wu 	uint32_t speed = 0;
1624b1f59667SJiawen Wu 	uint32_t allowed_speeds = 0;
1625220b0e49SJiawen Wu 	int mask = 0;
1626b1f59667SJiawen Wu 	int status;
1627770a3523SJiawen Wu 	uint16_t vf, idx;
1628b1f59667SJiawen Wu 	uint32_t *link_speeds;
1629ad02aa03SJiawen Wu 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1630b1f59667SJiawen Wu 
1631b1f59667SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1632b1f59667SJiawen Wu 
1633b1f59667SJiawen Wu 	/* Stop the link setup handler before resetting the HW. */
1634b1f59667SJiawen Wu 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1635b1f59667SJiawen Wu 
1636b1f59667SJiawen Wu 	/* disable uio/vfio intr/eventfd mapping */
1637b1f59667SJiawen Wu 	rte_intr_disable(intr_handle);
1638b1f59667SJiawen Wu 
1639b1f59667SJiawen Wu 	/* stop adapter */
1640b1f59667SJiawen Wu 	hw->adapter_stopped = 0;
1641b1f59667SJiawen Wu 	txgbe_stop_hw(hw);
1642b1f59667SJiawen Wu 
1643b1f59667SJiawen Wu 	/* reinitialize adapter
1644b1f59667SJiawen Wu 	 * this calls reset and start
1645b1f59667SJiawen Wu 	 */
1646b1f59667SJiawen Wu 	hw->nb_rx_queues = dev->data->nb_rx_queues;
1647b1f59667SJiawen Wu 	hw->nb_tx_queues = dev->data->nb_tx_queues;
1648b1f59667SJiawen Wu 	status = txgbe_pf_reset_hw(hw);
1649b1f59667SJiawen Wu 	if (status != 0)
1650b1f59667SJiawen Wu 		return -1;
1651b1f59667SJiawen Wu 	hw->mac.start_hw(hw);
1652b1f59667SJiawen Wu 	hw->mac.get_link_status = true;
165312a653ebSJiawen Wu 	hw->dev_start = true;
1654b1f59667SJiawen Wu 
1655770a3523SJiawen Wu 	/* configure PF module if SRIOV enabled */
1656770a3523SJiawen Wu 	txgbe_pf_host_configure(dev);
1657770a3523SJiawen Wu 
1658b1f59667SJiawen Wu 	txgbe_dev_phy_intr_setup(dev);
1659b1f59667SJiawen Wu 
1660b1f59667SJiawen Wu 	/* check and configure queue intr-vector mapping */
1661b1f59667SJiawen Wu 	if ((rte_intr_cap_multiple(intr_handle) ||
1662b1f59667SJiawen Wu 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1663b1f59667SJiawen Wu 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1664b1f59667SJiawen Wu 		intr_vector = dev->data->nb_rx_queues;
1665b1f59667SJiawen Wu 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1666b1f59667SJiawen Wu 			return -1;
1667b1f59667SJiawen Wu 	}
1668b1f59667SJiawen Wu 
1669d61138d4SHarman Kalra 	if (rte_intr_dp_is_en(intr_handle)) {
1670d61138d4SHarman Kalra 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1671d61138d4SHarman Kalra 						   dev->data->nb_rx_queues)) {
1672b1f59667SJiawen Wu 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1673b1f59667SJiawen Wu 				     " intr_vec", dev->data->nb_rx_queues);
1674b1f59667SJiawen Wu 			return -ENOMEM;
1675b1f59667SJiawen Wu 		}
1676b1f59667SJiawen Wu 	}
16777be78d02SJosh Soref 	/* configure msix for sleep until rx interrupt */
1678b1f59667SJiawen Wu 	txgbe_configure_msix(dev);
1679b1f59667SJiawen Wu 
1680b1f59667SJiawen Wu 	/* initialize transmission unit */
1681b1f59667SJiawen Wu 	txgbe_dev_tx_init(dev);
1682b1f59667SJiawen Wu 
1683b1f59667SJiawen Wu 	/* This can fail when allocating mbufs for descriptor rings */
1684b1f59667SJiawen Wu 	err = txgbe_dev_rx_init(dev);
1685b1f59667SJiawen Wu 	if (err) {
1686b1f59667SJiawen Wu 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1687b1f59667SJiawen Wu 		goto error;
1688b1f59667SJiawen Wu 	}
1689b1f59667SJiawen Wu 
1690295968d1SFerruh Yigit 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1691295968d1SFerruh Yigit 		RTE_ETH_VLAN_EXTEND_MASK;
1692220b0e49SJiawen Wu 	err = txgbe_vlan_offload_config(dev, mask);
1693220b0e49SJiawen Wu 	if (err) {
1694220b0e49SJiawen Wu 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1695220b0e49SJiawen Wu 		goto error;
1696220b0e49SJiawen Wu 	}
1697220b0e49SJiawen Wu 
1698295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1699c35b73a1SJiawen Wu 		/* Enable vlan filtering for VMDq */
1700c35b73a1SJiawen Wu 		txgbe_vmdq_vlan_hw_filter_enable(dev);
1701c35b73a1SJiawen Wu 	}
1702c35b73a1SJiawen Wu 
17038bdc7882SJiawen Wu 	/* Configure DCB hw */
17048bdc7882SJiawen Wu 	txgbe_configure_pb(dev);
17058bdc7882SJiawen Wu 	txgbe_configure_port(dev);
17068bdc7882SJiawen Wu 	txgbe_configure_dcb(dev);
17078bdc7882SJiawen Wu 
1708ea230ddaSJiawen Wu 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1709ea230ddaSJiawen Wu 		err = txgbe_fdir_configure(dev);
1710ea230ddaSJiawen Wu 		if (err)
1711ea230ddaSJiawen Wu 			goto error;
1712ea230ddaSJiawen Wu 	}
1713ea230ddaSJiawen Wu 
1714770a3523SJiawen Wu 	/* Restore vf rate limit */
1715770a3523SJiawen Wu 	if (vfinfo != NULL) {
1716770a3523SJiawen Wu 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
1717770a3523SJiawen Wu 			for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1718770a3523SJiawen Wu 				if (vfinfo[vf].tx_rate[idx] != 0)
1719770a3523SJiawen Wu 					txgbe_set_vf_rate_limit(dev, vf,
1720770a3523SJiawen Wu 						vfinfo[vf].tx_rate[idx],
1721770a3523SJiawen Wu 						1 << idx);
1722770a3523SJiawen Wu 	}
1723770a3523SJiawen Wu 
1724b1f59667SJiawen Wu 	err = txgbe_dev_rxtx_start(dev);
1725b1f59667SJiawen Wu 	if (err < 0) {
1726b1f59667SJiawen Wu 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1727b1f59667SJiawen Wu 		goto error;
1728b1f59667SJiawen Wu 	}
1729b1f59667SJiawen Wu 
1730b1f59667SJiawen Wu 	/* Skip link setup if loopback mode is enabled. */
1731b1f59667SJiawen Wu 	if (hw->mac.type == txgbe_mac_raptor &&
1732b1f59667SJiawen Wu 	    dev->data->dev_conf.lpbk_mode)
1733b1f59667SJiawen Wu 		goto skip_link_setup;
1734b1f59667SJiawen Wu 
1735b1f59667SJiawen Wu 	if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1736b1f59667SJiawen Wu 		err = hw->mac.setup_sfp(hw);
1737b1f59667SJiawen Wu 		if (err)
1738b1f59667SJiawen Wu 			goto error;
1739b1f59667SJiawen Wu 	}
1740b1f59667SJiawen Wu 
1741b1f59667SJiawen Wu 	if (hw->phy.media_type == txgbe_media_type_copper) {
1742b1f59667SJiawen Wu 		/* Turn on the copper */
1743b1f59667SJiawen Wu 		hw->phy.set_phy_power(hw, true);
1744b1f59667SJiawen Wu 	} else {
1745b1f59667SJiawen Wu 		/* Turn on the laser */
1746b1f59667SJiawen Wu 		hw->mac.enable_tx_laser(hw);
1747b1f59667SJiawen Wu 	}
1748b1f59667SJiawen Wu 
174982650948SJiawen Wu 	if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
1750b1f59667SJiawen Wu 		err = hw->mac.check_link(hw, &speed, &link_up, 0);
1751b1f59667SJiawen Wu 	if (err)
1752b1f59667SJiawen Wu 		goto error;
1753b1f59667SJiawen Wu 	dev->data->dev_link.link_status = link_up;
1754b1f59667SJiawen Wu 
1755b1f59667SJiawen Wu 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1756b1f59667SJiawen Wu 	if (err)
1757b1f59667SJiawen Wu 		goto error;
1758b1f59667SJiawen Wu 
1759295968d1SFerruh Yigit 	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
1760295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_10G;
1761b1f59667SJiawen Wu 
1762b1f59667SJiawen Wu 	link_speeds = &dev->data->dev_conf.link_speeds;
1763196f0e12SJiawen Wu 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1764b1f59667SJiawen Wu 		PMD_INIT_LOG(ERR, "Invalid link setting");
1765b1f59667SJiawen Wu 		goto error;
1766b1f59667SJiawen Wu 	}
1767b1f59667SJiawen Wu 
1768b1f59667SJiawen Wu 	speed = 0x0;
1769295968d1SFerruh Yigit 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1770b1f59667SJiawen Wu 		speed = (TXGBE_LINK_SPEED_100M_FULL |
1771b1f59667SJiawen Wu 			 TXGBE_LINK_SPEED_1GB_FULL |
1772b1f59667SJiawen Wu 			 TXGBE_LINK_SPEED_10GB_FULL);
1773b1f59667SJiawen Wu 	} else {
1774295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
1775b1f59667SJiawen Wu 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
1776295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
1777b1f59667SJiawen Wu 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
1778295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
1779b1f59667SJiawen Wu 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1780295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1781b1f59667SJiawen Wu 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
1782295968d1SFerruh Yigit 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1783b1f59667SJiawen Wu 			speed |= TXGBE_LINK_SPEED_100M_FULL;
1784b1f59667SJiawen Wu 	}
1785b1f59667SJiawen Wu 
1786b1f59667SJiawen Wu 	err = hw->mac.setup_link(hw, speed, link_up);
1787b1f59667SJiawen Wu 	if (err)
1788b1f59667SJiawen Wu 		goto error;
1789b1f59667SJiawen Wu 
1790b1f59667SJiawen Wu skip_link_setup:
1791b1f59667SJiawen Wu 
1792b1f59667SJiawen Wu 	if (rte_intr_allow_others(intr_handle)) {
179382650948SJiawen Wu 		txgbe_dev_misc_interrupt_setup(dev);
1794b1f59667SJiawen Wu 		/* check if lsc interrupt is enabled */
1795b1f59667SJiawen Wu 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1796b1f59667SJiawen Wu 			txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1797b1f59667SJiawen Wu 		else
1798b1f59667SJiawen Wu 			txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1799b1f59667SJiawen Wu 		txgbe_dev_macsec_interrupt_setup(dev);
1800b1f59667SJiawen Wu 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1801b1f59667SJiawen Wu 	} else {
1802b1f59667SJiawen Wu 		rte_intr_callback_unregister(intr_handle,
1803b1f59667SJiawen Wu 					     txgbe_dev_interrupt_handler, dev);
1804b1f59667SJiawen Wu 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1805b1f59667SJiawen Wu 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1806b1f59667SJiawen Wu 				     " no intr multiplex");
1807b1f59667SJiawen Wu 	}
1808b1f59667SJiawen Wu 
1809b1f59667SJiawen Wu 	/* check if rxq interrupt is enabled */
1810b1f59667SJiawen Wu 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1811b1f59667SJiawen Wu 	    rte_intr_dp_is_en(intr_handle))
1812b1f59667SJiawen Wu 		txgbe_dev_rxq_interrupt_setup(dev);
1813b1f59667SJiawen Wu 
1814b1f59667SJiawen Wu 	/* enable uio/vfio intr/eventfd mapping */
1815b1f59667SJiawen Wu 	rte_intr_enable(intr_handle);
1816b1f59667SJiawen Wu 
1817b1f59667SJiawen Wu 	/* resume enabled intr since hw reset */
1818b1f59667SJiawen Wu 	txgbe_enable_intr(dev);
18195377fa68SJiawen Wu 	txgbe_l2_tunnel_conf(dev);
182077a72b4dSJiawen Wu 	txgbe_filter_restore(dev);
1821b1f59667SJiawen Wu 
1822ad02aa03SJiawen Wu 	if (tm_conf->root && !tm_conf->committed)
1823ad02aa03SJiawen Wu 		PMD_DRV_LOG(WARNING,
1824ad02aa03SJiawen Wu 			    "please call hierarchy_commit() "
1825ad02aa03SJiawen Wu 			    "before starting the port");
1826ad02aa03SJiawen Wu 
1827b1f59667SJiawen Wu 	/*
1828b1f59667SJiawen Wu 	 * Update link status right before return, because it may
1829b1f59667SJiawen Wu 	 * start link configuration process in a separate thread.
1830b1f59667SJiawen Wu 	 */
1831b1f59667SJiawen Wu 	txgbe_dev_link_update(dev, 0);
1832b1f59667SJiawen Wu 
1833b1f59667SJiawen Wu 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1834b1f59667SJiawen Wu 
1835c9bb590dSJiawen Wu 	txgbe_read_stats_registers(hw, hw_stats);
1836c9bb590dSJiawen Wu 	hw->offset_loaded = 1;
1837c9bb590dSJiawen Wu 
1838b1f59667SJiawen Wu 	return 0;
1839b1f59667SJiawen Wu 
1840b1f59667SJiawen Wu error:
1841b1f59667SJiawen Wu 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1842b1f59667SJiawen Wu 	txgbe_dev_clear_queues(dev);
1843b1f59667SJiawen Wu 	return -EIO;
1844b1f59667SJiawen Wu }
1845b1f59667SJiawen Wu 
1846b1f59667SJiawen Wu /*
1847e0d876efSJiawen Wu  * Stop device: disable rx and tx functions to allow for reconfiguring.
1848e0d876efSJiawen Wu  */
1849e0d876efSJiawen Wu static int
txgbe_dev_stop(struct rte_eth_dev * dev)1850e0d876efSJiawen Wu txgbe_dev_stop(struct rte_eth_dev *dev)
1851e0d876efSJiawen Wu {
1852e0d876efSJiawen Wu 	struct rte_eth_link link;
18539e487a37SJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1854e0d876efSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1855770a3523SJiawen Wu 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1856e0d876efSJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1857d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1858770a3523SJiawen Wu 	int vf;
1859ad02aa03SJiawen Wu 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1860e0d876efSJiawen Wu 
1861e0d876efSJiawen Wu 	if (hw->adapter_stopped)
1862e0d876efSJiawen Wu 		return 0;
1863e0d876efSJiawen Wu 
1864e0d876efSJiawen Wu 	PMD_INIT_FUNC_TRACE();
1865e0d876efSJiawen Wu 
1866e0d876efSJiawen Wu 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1867e0d876efSJiawen Wu 
1868e0d876efSJiawen Wu 	/* disable interrupts */
1869e0d876efSJiawen Wu 	txgbe_disable_intr(hw);
1870e0d876efSJiawen Wu 
1871e0d876efSJiawen Wu 	/* reset the NIC */
1872e0d876efSJiawen Wu 	txgbe_pf_reset_hw(hw);
1873e0d876efSJiawen Wu 	hw->adapter_stopped = 0;
1874e0d876efSJiawen Wu 
1875e0d876efSJiawen Wu 	/* stop adapter */
1876e0d876efSJiawen Wu 	txgbe_stop_hw(hw);
1877e0d876efSJiawen Wu 
1878770a3523SJiawen Wu 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1879770a3523SJiawen Wu 		vfinfo[vf].clear_to_send = false;
1880770a3523SJiawen Wu 
1881e0d876efSJiawen Wu 	if (hw->phy.media_type == txgbe_media_type_copper) {
1882e0d876efSJiawen Wu 		/* Turn off the copper */
1883e0d876efSJiawen Wu 		hw->phy.set_phy_power(hw, false);
1884e0d876efSJiawen Wu 	} else {
1885e0d876efSJiawen Wu 		/* Turn off the laser */
1886e0d876efSJiawen Wu 		hw->mac.disable_tx_laser(hw);
1887e0d876efSJiawen Wu 	}
1888e0d876efSJiawen Wu 
1889e0d876efSJiawen Wu 	txgbe_dev_clear_queues(dev);
1890e0d876efSJiawen Wu 
1891e0d876efSJiawen Wu 	/* Clear stored conf */
1892e0d876efSJiawen Wu 	dev->data->scattered_rx = 0;
1893e0d876efSJiawen Wu 	dev->data->lro = 0;
1894e0d876efSJiawen Wu 
1895e0d876efSJiawen Wu 	/* Clear recorded link status */
1896e0d876efSJiawen Wu 	memset(&link, 0, sizeof(link));
1897e0d876efSJiawen Wu 	rte_eth_linkstatus_set(dev, &link);
1898e0d876efSJiawen Wu 
1899e0d876efSJiawen Wu 	if (!rte_intr_allow_others(intr_handle))
1900e0d876efSJiawen Wu 		/* resume to the default handler */
1901e0d876efSJiawen Wu 		rte_intr_callback_register(intr_handle,
1902e0d876efSJiawen Wu 					   txgbe_dev_interrupt_handler,
1903e0d876efSJiawen Wu 					   (void *)dev);
1904e0d876efSJiawen Wu 
1905e0d876efSJiawen Wu 	/* Clean datapath event and queue/vec mapping */
1906e0d876efSJiawen Wu 	rte_intr_efd_disable(intr_handle);
1907d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
1908e0d876efSJiawen Wu 
1909ad02aa03SJiawen Wu 	/* reset hierarchy commit */
1910ad02aa03SJiawen Wu 	tm_conf->committed = false;
1911ad02aa03SJiawen Wu 
19129e487a37SJiawen Wu 	adapter->rss_reta_updated = 0;
1913e0d876efSJiawen Wu 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1914e0d876efSJiawen Wu 
1915e0d876efSJiawen Wu 	hw->adapter_stopped = true;
1916e0d876efSJiawen Wu 	dev->data->dev_started = 0;
191712a653ebSJiawen Wu 	hw->dev_start = false;
1918e0d876efSJiawen Wu 
1919e0d876efSJiawen Wu 	return 0;
1920e0d876efSJiawen Wu }
1921e0d876efSJiawen Wu 
1922e0d876efSJiawen Wu /*
19230c061eadSJiawen Wu  * Set device link up: enable tx.
19240c061eadSJiawen Wu  */
19250c061eadSJiawen Wu static int
txgbe_dev_set_link_up(struct rte_eth_dev * dev)19260c061eadSJiawen Wu txgbe_dev_set_link_up(struct rte_eth_dev *dev)
19270c061eadSJiawen Wu {
19280c061eadSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
19290c061eadSJiawen Wu 
19300c061eadSJiawen Wu 	if (hw->phy.media_type == txgbe_media_type_copper) {
19310c061eadSJiawen Wu 		/* Turn on the copper */
19320c061eadSJiawen Wu 		hw->phy.set_phy_power(hw, true);
19330c061eadSJiawen Wu 	} else {
19340c061eadSJiawen Wu 		/* Turn on the laser */
19350c061eadSJiawen Wu 		hw->mac.enable_tx_laser(hw);
19362e1ecb46SJiawen Wu 		hw->dev_start = true;
19370c061eadSJiawen Wu 		txgbe_dev_link_update(dev, 0);
19380c061eadSJiawen Wu 	}
19390c061eadSJiawen Wu 
19400c061eadSJiawen Wu 	return 0;
19410c061eadSJiawen Wu }
19420c061eadSJiawen Wu 
19430c061eadSJiawen Wu /*
19440c061eadSJiawen Wu  * Set device link down: disable tx.
19450c061eadSJiawen Wu  */
19460c061eadSJiawen Wu static int
txgbe_dev_set_link_down(struct rte_eth_dev * dev)19470c061eadSJiawen Wu txgbe_dev_set_link_down(struct rte_eth_dev *dev)
19480c061eadSJiawen Wu {
19490c061eadSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
19500c061eadSJiawen Wu 
19510c061eadSJiawen Wu 	if (hw->phy.media_type == txgbe_media_type_copper) {
19520c061eadSJiawen Wu 		/* Turn off the copper */
19530c061eadSJiawen Wu 		hw->phy.set_phy_power(hw, false);
19540c061eadSJiawen Wu 	} else {
19550c061eadSJiawen Wu 		/* Turn off the laser */
19560c061eadSJiawen Wu 		hw->mac.disable_tx_laser(hw);
19572e1ecb46SJiawen Wu 		hw->dev_start = false;
19580c061eadSJiawen Wu 		txgbe_dev_link_update(dev, 0);
19590c061eadSJiawen Wu 	}
19600c061eadSJiawen Wu 
19610c061eadSJiawen Wu 	return 0;
19620c061eadSJiawen Wu }
19630c061eadSJiawen Wu 
19640c061eadSJiawen Wu /*
1965e1698e38SJiawen Wu  * Reset and stop device.
1966e1698e38SJiawen Wu  */
1967e1698e38SJiawen Wu static int
txgbe_dev_close(struct rte_eth_dev * dev)1968e1698e38SJiawen Wu txgbe_dev_close(struct rte_eth_dev *dev)
1969e1698e38SJiawen Wu {
1970e0d876efSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1971e1698e38SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1972d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
19732fc745e6SJiawen Wu 	int retries = 0;
19742fc745e6SJiawen Wu 	int ret;
1975e1698e38SJiawen Wu 
1976e1698e38SJiawen Wu 	PMD_INIT_FUNC_TRACE();
1977e1698e38SJiawen Wu 
1978e0d876efSJiawen Wu 	txgbe_pf_reset_hw(hw);
1979e0d876efSJiawen Wu 
1980e0d876efSJiawen Wu 	ret = txgbe_dev_stop(dev);
1981e0d876efSJiawen Wu 
1982b4cfffaaSJiawen Wu 	txgbe_dev_free_queues(dev);
1983b4cfffaaSJiawen Wu 
1984e0d876efSJiawen Wu 	/* reprogram the RAR[0] in case user changed it. */
1985e0d876efSJiawen Wu 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1986e0d876efSJiawen Wu 
19872102db87SJiawen Wu 	/* Unlock any pending hardware semaphore */
19882102db87SJiawen Wu 	txgbe_swfw_lock_reset(hw);
19892102db87SJiawen Wu 
1990e1698e38SJiawen Wu 	/* disable uio intr before callback unregister */
1991e1698e38SJiawen Wu 	rte_intr_disable(intr_handle);
1992e1698e38SJiawen Wu 
19932fc745e6SJiawen Wu 	do {
19942fc745e6SJiawen Wu 		ret = rte_intr_callback_unregister(intr_handle,
19952fc745e6SJiawen Wu 				txgbe_dev_interrupt_handler, dev);
19962fc745e6SJiawen Wu 		if (ret >= 0 || ret == -ENOENT) {
19972fc745e6SJiawen Wu 			break;
19982fc745e6SJiawen Wu 		} else if (ret != -EAGAIN) {
19992fc745e6SJiawen Wu 			PMD_INIT_LOG(ERR,
20002fc745e6SJiawen Wu 				"intr callback unregister failed: %d",
20012fc745e6SJiawen Wu 				ret);
20022fc745e6SJiawen Wu 		}
20032fc745e6SJiawen Wu 		rte_delay_ms(100);
20042fc745e6SJiawen Wu 	} while (retries++ < (10 + TXGBE_LINK_UP_TIME));
20052fc745e6SJiawen Wu 
20062fc745e6SJiawen Wu 	/* cancel the delay handler before remove dev */
20072fc745e6SJiawen Wu 	rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
20082fc745e6SJiawen Wu 
2009a6712cd0SJiawen Wu 	/* uninitialize PF if max_vfs not zero */
2010a6712cd0SJiawen Wu 	txgbe_pf_host_uninit(dev);
2011a6712cd0SJiawen Wu 
2012e1698e38SJiawen Wu 	rte_free(dev->data->mac_addrs);
2013e1698e38SJiawen Wu 	dev->data->mac_addrs = NULL;
2014e1698e38SJiawen Wu 
2015e1698e38SJiawen Wu 	rte_free(dev->data->hash_mac_addrs);
2016e1698e38SJiawen Wu 	dev->data->hash_mac_addrs = NULL;
2017e1698e38SJiawen Wu 
2018635c2135SJiawen Wu 	/* remove all the fdir filters & hash */
2019635c2135SJiawen Wu 	txgbe_fdir_filter_uninit(dev);
2020635c2135SJiawen Wu 
2021c13f84a7SJiawen Wu 	/* remove all the L2 tunnel filters & hash */
2022c13f84a7SJiawen Wu 	txgbe_l2_tn_filter_uninit(dev);
2023c13f84a7SJiawen Wu 
2024838e9bafSJiawen Wu 	/* Remove all ntuple filters of the device */
2025838e9bafSJiawen Wu 	txgbe_ntuple_filter_uninit(dev);
2026838e9bafSJiawen Wu 
20276bde42feSJiawen Wu 	/* clear all the filters list */
20286bde42feSJiawen Wu 	txgbe_filterlist_flush();
20296bde42feSJiawen Wu 
2030ad02aa03SJiawen Wu 	/* Remove all Traffic Manager configuration */
2031ad02aa03SJiawen Wu 	txgbe_tm_conf_uninit(dev);
2032ad02aa03SJiawen Wu 
2033f437d97cSJiawen Wu #ifdef RTE_LIB_SECURITY
2034f437d97cSJiawen Wu 	rte_free(dev->security_ctx);
20356d498b85SYunjian Wang 	dev->security_ctx = NULL;
2036f437d97cSJiawen Wu #endif
2037f437d97cSJiawen Wu 
2038e0d876efSJiawen Wu 	return ret;
2039e0d876efSJiawen Wu }
2040e0d876efSJiawen Wu 
2041e0d876efSJiawen Wu /*
2042e0d876efSJiawen Wu  * Reset PF device.
2043e0d876efSJiawen Wu  */
2044e0d876efSJiawen Wu static int
txgbe_dev_reset(struct rte_eth_dev * dev)2045e0d876efSJiawen Wu txgbe_dev_reset(struct rte_eth_dev *dev)
2046e0d876efSJiawen Wu {
2047e0d876efSJiawen Wu 	int ret;
2048e0d876efSJiawen Wu 
2049e0d876efSJiawen Wu 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2050e0d876efSJiawen Wu 	 * its VF to make them align with it. The detailed notification
2051e0d876efSJiawen Wu 	 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
2052e0d876efSJiawen Wu 	 * To avoid unexpected behavior in VF, currently reset of PF with
2053e0d876efSJiawen Wu 	 * SR-IOV activation is not supported. It might be supported later.
2054e0d876efSJiawen Wu 	 */
2055e0d876efSJiawen Wu 	if (dev->data->sriov.active)
2056e0d876efSJiawen Wu 		return -ENOTSUP;
2057e0d876efSJiawen Wu 
2058e0d876efSJiawen Wu 	ret = eth_txgbe_dev_uninit(dev);
2059e0d876efSJiawen Wu 	if (ret)
2060e0d876efSJiawen Wu 		return ret;
2061e0d876efSJiawen Wu 
2062e0d876efSJiawen Wu 	ret = eth_txgbe_dev_init(dev, NULL);
2063e0d876efSJiawen Wu 
2064e0d876efSJiawen Wu 	return ret;
2065e1698e38SJiawen Wu }
2066e1698e38SJiawen Wu 
2067c9bb590dSJiawen Wu #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
2068c9bb590dSJiawen Wu 	{                                                       \
2069c9bb590dSJiawen Wu 		uint32_t current_counter = rd32(hw, reg);       \
2070c9bb590dSJiawen Wu 		if (current_counter < last_counter)             \
2071c9bb590dSJiawen Wu 			current_counter += 0x100000000LL;       \
2072c9bb590dSJiawen Wu 		if (!hw->offset_loaded)                         \
2073c9bb590dSJiawen Wu 			last_counter = current_counter;         \
2074c9bb590dSJiawen Wu 		counter = current_counter - last_counter;       \
2075c9bb590dSJiawen Wu 		counter &= 0xFFFFFFFFLL;                        \
2076c9bb590dSJiawen Wu 	}
2077c9bb590dSJiawen Wu 
2078c9bb590dSJiawen Wu #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2079c9bb590dSJiawen Wu 	{                                                                \
2080c9bb590dSJiawen Wu 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
2081c9bb590dSJiawen Wu 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
2082c9bb590dSJiawen Wu 		uint64_t current_counter = (current_counter_msb << 32) | \
2083c9bb590dSJiawen Wu 			current_counter_lsb;                             \
2084c9bb590dSJiawen Wu 		if (current_counter < last_counter)                      \
2085c9bb590dSJiawen Wu 			current_counter += 0x1000000000LL;               \
2086c9bb590dSJiawen Wu 		if (!hw->offset_loaded)                                  \
2087c9bb590dSJiawen Wu 			last_counter = current_counter;                  \
2088c9bb590dSJiawen Wu 		counter = current_counter - last_counter;                \
2089c9bb590dSJiawen Wu 		counter &= 0xFFFFFFFFFLL;                                \
2090c9bb590dSJiawen Wu 	}
2091c9bb590dSJiawen Wu 
2092c9bb590dSJiawen Wu void
txgbe_read_stats_registers(struct txgbe_hw * hw,struct txgbe_hw_stats * hw_stats)2093c9bb590dSJiawen Wu txgbe_read_stats_registers(struct txgbe_hw *hw,
2094c9bb590dSJiawen Wu 			   struct txgbe_hw_stats *hw_stats)
2095c9bb590dSJiawen Wu {
2096c9bb590dSJiawen Wu 	unsigned int i;
2097c9bb590dSJiawen Wu 
2098c9bb590dSJiawen Wu 	/* QP Stats */
2099c9bb590dSJiawen Wu 	for (i = 0; i < hw->nb_rx_queues; i++) {
2100c9bb590dSJiawen Wu 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2101c9bb590dSJiawen Wu 			hw->qp_last[i].rx_qp_packets,
2102c9bb590dSJiawen Wu 			hw_stats->qp[i].rx_qp_packets);
2103c9bb590dSJiawen Wu 		UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2104c9bb590dSJiawen Wu 			hw->qp_last[i].rx_qp_bytes,
2105c9bb590dSJiawen Wu 			hw_stats->qp[i].rx_qp_bytes);
2106c9bb590dSJiawen Wu 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2107c9bb590dSJiawen Wu 			hw->qp_last[i].rx_qp_mc_packets,
2108c9bb590dSJiawen Wu 			hw_stats->qp[i].rx_qp_mc_packets);
2109c9bb590dSJiawen Wu 	}
2110c9bb590dSJiawen Wu 
2111c9bb590dSJiawen Wu 	for (i = 0; i < hw->nb_tx_queues; i++) {
2112c9bb590dSJiawen Wu 		UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2113c9bb590dSJiawen Wu 			hw->qp_last[i].tx_qp_packets,
2114c9bb590dSJiawen Wu 			hw_stats->qp[i].tx_qp_packets);
2115c9bb590dSJiawen Wu 		UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2116c9bb590dSJiawen Wu 			hw->qp_last[i].tx_qp_bytes,
2117c9bb590dSJiawen Wu 			hw_stats->qp[i].tx_qp_bytes);
2118c9bb590dSJiawen Wu 	}
2119c9bb590dSJiawen Wu 	/* PB Stats */
2120c9bb590dSJiawen Wu 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2121c9bb590dSJiawen Wu 		hw_stats->up[i].rx_up_xon_packets +=
2122c9bb590dSJiawen Wu 				rd32(hw, TXGBE_PBRXUPXON(i));
2123c9bb590dSJiawen Wu 		hw_stats->up[i].rx_up_xoff_packets +=
2124c9bb590dSJiawen Wu 				rd32(hw, TXGBE_PBRXUPXOFF(i));
2125c9bb590dSJiawen Wu 		hw_stats->up[i].tx_up_xon_packets +=
2126c9bb590dSJiawen Wu 				rd32(hw, TXGBE_PBTXUPXON(i));
2127c9bb590dSJiawen Wu 		hw_stats->up[i].tx_up_xoff_packets +=
2128c9bb590dSJiawen Wu 				rd32(hw, TXGBE_PBTXUPXOFF(i));
2129c9bb590dSJiawen Wu 		hw_stats->up[i].tx_up_xon2off_packets +=
2130c9bb590dSJiawen Wu 				rd32(hw, TXGBE_PBTXUPOFF(i));
2131c9bb590dSJiawen Wu 		hw_stats->up[i].rx_up_dropped +=
2132c9bb590dSJiawen Wu 				rd32(hw, TXGBE_PBRXMISS(i));
2133c9bb590dSJiawen Wu 	}
2134c9bb590dSJiawen Wu 	hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2135c9bb590dSJiawen Wu 	hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2136c9bb590dSJiawen Wu 	hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2137c9bb590dSJiawen Wu 	hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2138c9bb590dSJiawen Wu 
2139c9bb590dSJiawen Wu 	/* DMA Stats */
2140c9bb590dSJiawen Wu 	hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2141c9bb590dSJiawen Wu 	hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2142c9bb590dSJiawen Wu 
2143c9bb590dSJiawen Wu 	hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2144c9bb590dSJiawen Wu 	hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2145fa702fdeSJiawen Wu 	hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
2146c9bb590dSJiawen Wu 	hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2147c9bb590dSJiawen Wu 
2148c9bb590dSJiawen Wu 	/* MAC Stats */
2149c9bb590dSJiawen Wu 	hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2150c9bb590dSJiawen Wu 	hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2151c9bb590dSJiawen Wu 	hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2152c9bb590dSJiawen Wu 
2153c9bb590dSJiawen Wu 	hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2154c9bb590dSJiawen Wu 	hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2155c9bb590dSJiawen Wu 	hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2156c9bb590dSJiawen Wu 
2157c9bb590dSJiawen Wu 	hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2158c9bb590dSJiawen Wu 	hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2159c9bb590dSJiawen Wu 
2160c9bb590dSJiawen Wu 	hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2161c9bb590dSJiawen Wu 	hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2162c9bb590dSJiawen Wu 	hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2163c9bb590dSJiawen Wu 	hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2164c9bb590dSJiawen Wu 	hw_stats->rx_size_512_to_1023_packets +=
2165c9bb590dSJiawen Wu 			rd64(hw, TXGBE_MACRX512TO1023L);
2166c9bb590dSJiawen Wu 	hw_stats->rx_size_1024_to_max_packets +=
2167c9bb590dSJiawen Wu 			rd64(hw, TXGBE_MACRX1024TOMAXL);
2168c9bb590dSJiawen Wu 	hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2169c9bb590dSJiawen Wu 	hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2170c9bb590dSJiawen Wu 	hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2171c9bb590dSJiawen Wu 	hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2172c9bb590dSJiawen Wu 	hw_stats->tx_size_512_to_1023_packets +=
2173c9bb590dSJiawen Wu 			rd64(hw, TXGBE_MACTX512TO1023L);
2174c9bb590dSJiawen Wu 	hw_stats->tx_size_1024_to_max_packets +=
2175c9bb590dSJiawen Wu 			rd64(hw, TXGBE_MACTX1024TOMAXL);
2176c9bb590dSJiawen Wu 
2177c9bb590dSJiawen Wu 	hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2178c9bb590dSJiawen Wu 	hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2179c9bb590dSJiawen Wu 	hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2180c9bb590dSJiawen Wu 
2181c9bb590dSJiawen Wu 	/* MNG Stats */
2182c9bb590dSJiawen Wu 	hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2183c9bb590dSJiawen Wu 	hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2184c9bb590dSJiawen Wu 	hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2185c9bb590dSJiawen Wu 	hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2186c9bb590dSJiawen Wu 
2187c9bb590dSJiawen Wu 	/* FCoE Stats */
2188c9bb590dSJiawen Wu 	hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2189c9bb590dSJiawen Wu 	hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2190c9bb590dSJiawen Wu 	hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2191c9bb590dSJiawen Wu 	hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2192c9bb590dSJiawen Wu 	hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2193c9bb590dSJiawen Wu 	hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2194c9bb590dSJiawen Wu 	hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2195c9bb590dSJiawen Wu 
2196c9bb590dSJiawen Wu 	/* Flow Director Stats */
2197c9bb590dSJiawen Wu 	hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2198c9bb590dSJiawen Wu 	hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2199c9bb590dSJiawen Wu 	hw_stats->flow_director_added_filters +=
2200c9bb590dSJiawen Wu 		TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2201c9bb590dSJiawen Wu 	hw_stats->flow_director_removed_filters +=
2202c9bb590dSJiawen Wu 		TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2203c9bb590dSJiawen Wu 	hw_stats->flow_director_filter_add_errors +=
2204c9bb590dSJiawen Wu 		TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2205c9bb590dSJiawen Wu 	hw_stats->flow_director_filter_remove_errors +=
2206c9bb590dSJiawen Wu 		TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2207c9bb590dSJiawen Wu 
2208c9bb590dSJiawen Wu 	/* MACsec Stats */
2209c9bb590dSJiawen Wu 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2210c9bb590dSJiawen Wu 	hw_stats->tx_macsec_pkts_encrypted +=
2211c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECTX_ENCPKT);
2212c9bb590dSJiawen Wu 	hw_stats->tx_macsec_pkts_protected +=
2213c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECTX_PROTPKT);
2214c9bb590dSJiawen Wu 	hw_stats->tx_macsec_octets_encrypted +=
2215c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECTX_ENCOCT);
2216c9bb590dSJiawen Wu 	hw_stats->tx_macsec_octets_protected +=
2217c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECTX_PROTOCT);
2218c9bb590dSJiawen Wu 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2219c9bb590dSJiawen Wu 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2220c9bb590dSJiawen Wu 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2221c9bb590dSJiawen Wu 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2222c9bb590dSJiawen Wu 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2223c9bb590dSJiawen Wu 	hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2224c9bb590dSJiawen Wu 	hw_stats->rx_macsec_sc_pkts_unchecked +=
2225c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2226c9bb590dSJiawen Wu 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2227c9bb590dSJiawen Wu 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2228c9bb590dSJiawen Wu 	for (i = 0; i < 2; i++) {
2229c9bb590dSJiawen Wu 		hw_stats->rx_macsec_sa_pkts_ok +=
2230c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECRX_OKPKT(i));
2231c9bb590dSJiawen Wu 		hw_stats->rx_macsec_sa_pkts_invalid +=
2232c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECRX_INVPKT(i));
2233c9bb590dSJiawen Wu 		hw_stats->rx_macsec_sa_pkts_notvalid +=
2234c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECRX_BADPKT(i));
2235c9bb590dSJiawen Wu 	}
2236c9bb590dSJiawen Wu 	hw_stats->rx_macsec_sa_pkts_unusedsa +=
2237c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECRX_INVSAPKT);
2238c9bb590dSJiawen Wu 	hw_stats->rx_macsec_sa_pkts_notusingsa +=
2239c9bb590dSJiawen Wu 			rd32(hw, TXGBE_LSECRX_BADSAPKT);
2240c9bb590dSJiawen Wu 
2241c9bb590dSJiawen Wu 	hw_stats->rx_total_missed_packets = 0;
2242c9bb590dSJiawen Wu 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2243c9bb590dSJiawen Wu 		hw_stats->rx_total_missed_packets +=
2244c9bb590dSJiawen Wu 			hw_stats->up[i].rx_up_dropped;
2245c9bb590dSJiawen Wu 	}
2246c9bb590dSJiawen Wu }
2247c9bb590dSJiawen Wu 
2248c9bb590dSJiawen Wu static int
txgbe_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)2249c9bb590dSJiawen Wu txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2250c9bb590dSJiawen Wu {
2251c9bb590dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2252c9bb590dSJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2253c9bb590dSJiawen Wu 	struct txgbe_stat_mappings *stat_mappings =
2254c9bb590dSJiawen Wu 			TXGBE_DEV_STAT_MAPPINGS(dev);
2255c9bb590dSJiawen Wu 	uint32_t i, j;
2256c9bb590dSJiawen Wu 
2257c9bb590dSJiawen Wu 	txgbe_read_stats_registers(hw, hw_stats);
2258c9bb590dSJiawen Wu 
2259c9bb590dSJiawen Wu 	if (stats == NULL)
2260c9bb590dSJiawen Wu 		return -EINVAL;
2261c9bb590dSJiawen Wu 
2262c9bb590dSJiawen Wu 	/* Fill out the rte_eth_stats statistics structure */
2263c9bb590dSJiawen Wu 	stats->ipackets = hw_stats->rx_packets;
2264c9bb590dSJiawen Wu 	stats->ibytes = hw_stats->rx_bytes;
2265c9bb590dSJiawen Wu 	stats->opackets = hw_stats->tx_packets;
2266c9bb590dSJiawen Wu 	stats->obytes = hw_stats->tx_bytes;
2267c9bb590dSJiawen Wu 
2268c9bb590dSJiawen Wu 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2269c9bb590dSJiawen Wu 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2270c9bb590dSJiawen Wu 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2271c9bb590dSJiawen Wu 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2272c9bb590dSJiawen Wu 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2273c9bb590dSJiawen Wu 	for (i = 0; i < TXGBE_MAX_QP; i++) {
2274c9bb590dSJiawen Wu 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2275c9bb590dSJiawen Wu 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2276c9bb590dSJiawen Wu 		uint32_t q_map;
2277c9bb590dSJiawen Wu 
2278c9bb590dSJiawen Wu 		q_map = (stat_mappings->rqsm[n] >> offset)
2279c9bb590dSJiawen Wu 				& QMAP_FIELD_RESERVED_BITS_MASK;
2280c9bb590dSJiawen Wu 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2281c9bb590dSJiawen Wu 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2282c9bb590dSJiawen Wu 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2283c9bb590dSJiawen Wu 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2284c9bb590dSJiawen Wu 
2285c9bb590dSJiawen Wu 		q_map = (stat_mappings->tqsm[n] >> offset)
2286c9bb590dSJiawen Wu 				& QMAP_FIELD_RESERVED_BITS_MASK;
2287c9bb590dSJiawen Wu 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2288c9bb590dSJiawen Wu 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2289c9bb590dSJiawen Wu 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2290c9bb590dSJiawen Wu 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2291c9bb590dSJiawen Wu 	}
2292c9bb590dSJiawen Wu 
2293c9bb590dSJiawen Wu 	/* Rx Errors */
2294fa702fdeSJiawen Wu 	stats->imissed  = hw_stats->rx_total_missed_packets +
2295fa702fdeSJiawen Wu 			  hw_stats->rx_dma_drop;
2296c9bb590dSJiawen Wu 	stats->ierrors  = hw_stats->rx_crc_errors +
2297c9bb590dSJiawen Wu 			  hw_stats->rx_mac_short_packet_dropped +
2298c9bb590dSJiawen Wu 			  hw_stats->rx_length_errors +
2299c9bb590dSJiawen Wu 			  hw_stats->rx_undersize_errors +
2300c9bb590dSJiawen Wu 			  hw_stats->rx_oversize_errors +
2301c9bb590dSJiawen Wu 			  hw_stats->rx_drop_packets +
2302c9bb590dSJiawen Wu 			  hw_stats->rx_illegal_byte_errors +
2303c9bb590dSJiawen Wu 			  hw_stats->rx_error_bytes +
2304c9bb590dSJiawen Wu 			  hw_stats->rx_fragment_errors +
2305c9bb590dSJiawen Wu 			  hw_stats->rx_fcoe_crc_errors +
2306c9bb590dSJiawen Wu 			  hw_stats->rx_fcoe_mbuf_allocation_errors;
2307c9bb590dSJiawen Wu 
2308c9bb590dSJiawen Wu 	/* Tx Errors */
2309c9bb590dSJiawen Wu 	stats->oerrors  = 0;
2310c9bb590dSJiawen Wu 	return 0;
2311c9bb590dSJiawen Wu }
2312c9bb590dSJiawen Wu 
2313c9bb590dSJiawen Wu static int
txgbe_dev_stats_reset(struct rte_eth_dev * dev)2314c9bb590dSJiawen Wu txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2315c9bb590dSJiawen Wu {
2316c9bb590dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2317c9bb590dSJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2318c9bb590dSJiawen Wu 
2319c9bb590dSJiawen Wu 	/* HW registers are cleared on read */
2320c9bb590dSJiawen Wu 	hw->offset_loaded = 0;
2321c9bb590dSJiawen Wu 	txgbe_dev_stats_get(dev, NULL);
2322c9bb590dSJiawen Wu 	hw->offset_loaded = 1;
2323c9bb590dSJiawen Wu 
2324c9bb590dSJiawen Wu 	/* Reset software totals */
2325c9bb590dSJiawen Wu 	memset(hw_stats, 0, sizeof(*hw_stats));
2326c9bb590dSJiawen Wu 
2327c9bb590dSJiawen Wu 	return 0;
2328c9bb590dSJiawen Wu }
2329c9bb590dSJiawen Wu 
233091fe49c8SJiawen Wu /* This function calculates the number of xstats based on the current config */
233191fe49c8SJiawen Wu static unsigned
txgbe_xstats_calc_num(struct rte_eth_dev * dev)233291fe49c8SJiawen Wu txgbe_xstats_calc_num(struct rte_eth_dev *dev)
233391fe49c8SJiawen Wu {
233491fe49c8SJiawen Wu 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
233591fe49c8SJiawen Wu 	return TXGBE_NB_HW_STATS +
233691fe49c8SJiawen Wu 	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
233791fe49c8SJiawen Wu 	       TXGBE_NB_QP_STATS * nb_queues;
233891fe49c8SJiawen Wu }
233991fe49c8SJiawen Wu 
234091fe49c8SJiawen Wu static inline int
txgbe_get_name_by_id(uint32_t id,char * name,uint32_t size)234191fe49c8SJiawen Wu txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
234291fe49c8SJiawen Wu {
234391fe49c8SJiawen Wu 	int nb, st;
234491fe49c8SJiawen Wu 
234591fe49c8SJiawen Wu 	/* Extended stats from txgbe_hw_stats */
234691fe49c8SJiawen Wu 	if (id < TXGBE_NB_HW_STATS) {
234791fe49c8SJiawen Wu 		snprintf(name, size, "[hw]%s",
234891fe49c8SJiawen Wu 			rte_txgbe_stats_strings[id].name);
234991fe49c8SJiawen Wu 		return 0;
235091fe49c8SJiawen Wu 	}
235191fe49c8SJiawen Wu 	id -= TXGBE_NB_HW_STATS;
235291fe49c8SJiawen Wu 
235391fe49c8SJiawen Wu 	/* Priority Stats */
235491fe49c8SJiawen Wu 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
235591fe49c8SJiawen Wu 		nb = id / TXGBE_NB_UP_STATS;
235691fe49c8SJiawen Wu 		st = id % TXGBE_NB_UP_STATS;
235791fe49c8SJiawen Wu 		snprintf(name, size, "[p%u]%s", nb,
235891fe49c8SJiawen Wu 			rte_txgbe_up_strings[st].name);
235991fe49c8SJiawen Wu 		return 0;
236091fe49c8SJiawen Wu 	}
236191fe49c8SJiawen Wu 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
236291fe49c8SJiawen Wu 
236391fe49c8SJiawen Wu 	/* Queue Stats */
236491fe49c8SJiawen Wu 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
236591fe49c8SJiawen Wu 		nb = id / TXGBE_NB_QP_STATS;
236691fe49c8SJiawen Wu 		st = id % TXGBE_NB_QP_STATS;
236791fe49c8SJiawen Wu 		snprintf(name, size, "[q%u]%s", nb,
236891fe49c8SJiawen Wu 			rte_txgbe_qp_strings[st].name);
236991fe49c8SJiawen Wu 		return 0;
237091fe49c8SJiawen Wu 	}
237191fe49c8SJiawen Wu 	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
237291fe49c8SJiawen Wu 
237391fe49c8SJiawen Wu 	return -(int)(id + 1);
237491fe49c8SJiawen Wu }
237591fe49c8SJiawen Wu 
237691fe49c8SJiawen Wu static inline int
txgbe_get_offset_by_id(uint32_t id,uint32_t * offset)237791fe49c8SJiawen Wu txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
237891fe49c8SJiawen Wu {
237991fe49c8SJiawen Wu 	int nb, st;
238091fe49c8SJiawen Wu 
238191fe49c8SJiawen Wu 	/* Extended stats from txgbe_hw_stats */
238291fe49c8SJiawen Wu 	if (id < TXGBE_NB_HW_STATS) {
238391fe49c8SJiawen Wu 		*offset = rte_txgbe_stats_strings[id].offset;
238491fe49c8SJiawen Wu 		return 0;
238591fe49c8SJiawen Wu 	}
238691fe49c8SJiawen Wu 	id -= TXGBE_NB_HW_STATS;
238791fe49c8SJiawen Wu 
238891fe49c8SJiawen Wu 	/* Priority Stats */
238991fe49c8SJiawen Wu 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
239091fe49c8SJiawen Wu 		nb = id / TXGBE_NB_UP_STATS;
239191fe49c8SJiawen Wu 		st = id % TXGBE_NB_UP_STATS;
239291fe49c8SJiawen Wu 		*offset = rte_txgbe_up_strings[st].offset +
239391fe49c8SJiawen Wu 			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
239491fe49c8SJiawen Wu 		return 0;
239591fe49c8SJiawen Wu 	}
239691fe49c8SJiawen Wu 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
239791fe49c8SJiawen Wu 
239891fe49c8SJiawen Wu 	/* Queue Stats */
239991fe49c8SJiawen Wu 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
240091fe49c8SJiawen Wu 		nb = id / TXGBE_NB_QP_STATS;
240191fe49c8SJiawen Wu 		st = id % TXGBE_NB_QP_STATS;
240291fe49c8SJiawen Wu 		*offset = rte_txgbe_qp_strings[st].offset +
240391fe49c8SJiawen Wu 			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
240491fe49c8SJiawen Wu 		return 0;
240591fe49c8SJiawen Wu 	}
240691fe49c8SJiawen Wu 
2407f8b41a8eSConor Walsh 	return -1;
240891fe49c8SJiawen Wu }
240991fe49c8SJiawen Wu 
txgbe_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int limit)241091fe49c8SJiawen Wu static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
241191fe49c8SJiawen Wu 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
241291fe49c8SJiawen Wu {
241391fe49c8SJiawen Wu 	unsigned int i, count;
241491fe49c8SJiawen Wu 
241591fe49c8SJiawen Wu 	count = txgbe_xstats_calc_num(dev);
241691fe49c8SJiawen Wu 	if (xstats_names == NULL)
241791fe49c8SJiawen Wu 		return count;
241891fe49c8SJiawen Wu 
241991fe49c8SJiawen Wu 	/* Note: limit >= cnt_stats checked upstream
242091fe49c8SJiawen Wu 	 * in rte_eth_xstats_names()
242191fe49c8SJiawen Wu 	 */
242291fe49c8SJiawen Wu 	limit = min(limit, count);
242391fe49c8SJiawen Wu 
242491fe49c8SJiawen Wu 	/* Extended stats from txgbe_hw_stats */
242591fe49c8SJiawen Wu 	for (i = 0; i < limit; i++) {
242691fe49c8SJiawen Wu 		if (txgbe_get_name_by_id(i, xstats_names[i].name,
242791fe49c8SJiawen Wu 			sizeof(xstats_names[i].name))) {
242891fe49c8SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
242991fe49c8SJiawen Wu 			break;
243091fe49c8SJiawen Wu 		}
243191fe49c8SJiawen Wu 	}
243291fe49c8SJiawen Wu 
243391fe49c8SJiawen Wu 	return i;
243491fe49c8SJiawen Wu }
243591fe49c8SJiawen Wu 
txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev * dev,const uint64_t * ids,struct rte_eth_xstat_name * xstats_names,unsigned int limit)243691fe49c8SJiawen Wu static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
243791fe49c8SJiawen Wu 	const uint64_t *ids,
24388c9f976fSAndrew Rybchenko 	struct rte_eth_xstat_name *xstats_names,
243991fe49c8SJiawen Wu 	unsigned int limit)
244091fe49c8SJiawen Wu {
244191fe49c8SJiawen Wu 	unsigned int i;
244291fe49c8SJiawen Wu 
244391fe49c8SJiawen Wu 	if (ids == NULL)
244491fe49c8SJiawen Wu 		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
244591fe49c8SJiawen Wu 
244691fe49c8SJiawen Wu 	for (i = 0; i < limit; i++) {
244791fe49c8SJiawen Wu 		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
244891fe49c8SJiawen Wu 				sizeof(xstats_names[i].name))) {
244991fe49c8SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
245091fe49c8SJiawen Wu 			return -1;
245191fe49c8SJiawen Wu 		}
245291fe49c8SJiawen Wu 	}
245391fe49c8SJiawen Wu 
245491fe49c8SJiawen Wu 	return i;
245591fe49c8SJiawen Wu }
245691fe49c8SJiawen Wu 
245791fe49c8SJiawen Wu static int
txgbe_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int limit)245891fe49c8SJiawen Wu txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
245991fe49c8SJiawen Wu 					 unsigned int limit)
246091fe49c8SJiawen Wu {
246191fe49c8SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
246291fe49c8SJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
246391fe49c8SJiawen Wu 	unsigned int i, count;
246491fe49c8SJiawen Wu 
246591fe49c8SJiawen Wu 	txgbe_read_stats_registers(hw, hw_stats);
246691fe49c8SJiawen Wu 
246791fe49c8SJiawen Wu 	/* If this is a reset xstats is NULL, and we have cleared the
246891fe49c8SJiawen Wu 	 * registers by reading them.
246991fe49c8SJiawen Wu 	 */
247091fe49c8SJiawen Wu 	count = txgbe_xstats_calc_num(dev);
247191fe49c8SJiawen Wu 	if (xstats == NULL)
247291fe49c8SJiawen Wu 		return count;
247391fe49c8SJiawen Wu 
247491fe49c8SJiawen Wu 	limit = min(limit, txgbe_xstats_calc_num(dev));
247591fe49c8SJiawen Wu 
247691fe49c8SJiawen Wu 	/* Extended stats from txgbe_hw_stats */
247791fe49c8SJiawen Wu 	for (i = 0; i < limit; i++) {
247891fe49c8SJiawen Wu 		uint32_t offset = 0;
247991fe49c8SJiawen Wu 
248091fe49c8SJiawen Wu 		if (txgbe_get_offset_by_id(i, &offset)) {
248191fe49c8SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
248291fe49c8SJiawen Wu 			break;
248391fe49c8SJiawen Wu 		}
248491fe49c8SJiawen Wu 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
248591fe49c8SJiawen Wu 		xstats[i].id = i;
248691fe49c8SJiawen Wu 	}
248791fe49c8SJiawen Wu 
248891fe49c8SJiawen Wu 	return i;
248991fe49c8SJiawen Wu }
249091fe49c8SJiawen Wu 
249191fe49c8SJiawen Wu static int
txgbe_dev_xstats_get_(struct rte_eth_dev * dev,uint64_t * values,unsigned int limit)249291fe49c8SJiawen Wu txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
249391fe49c8SJiawen Wu 					 unsigned int limit)
249491fe49c8SJiawen Wu {
249591fe49c8SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
249691fe49c8SJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
249791fe49c8SJiawen Wu 	unsigned int i, count;
249891fe49c8SJiawen Wu 
249991fe49c8SJiawen Wu 	txgbe_read_stats_registers(hw, hw_stats);
250091fe49c8SJiawen Wu 
250191fe49c8SJiawen Wu 	/* If this is a reset xstats is NULL, and we have cleared the
250291fe49c8SJiawen Wu 	 * registers by reading them.
250391fe49c8SJiawen Wu 	 */
250491fe49c8SJiawen Wu 	count = txgbe_xstats_calc_num(dev);
250591fe49c8SJiawen Wu 	if (values == NULL)
250691fe49c8SJiawen Wu 		return count;
250791fe49c8SJiawen Wu 
250891fe49c8SJiawen Wu 	limit = min(limit, txgbe_xstats_calc_num(dev));
250991fe49c8SJiawen Wu 
251091fe49c8SJiawen Wu 	/* Extended stats from txgbe_hw_stats */
251191fe49c8SJiawen Wu 	for (i = 0; i < limit; i++) {
251291fe49c8SJiawen Wu 		uint32_t offset;
251391fe49c8SJiawen Wu 
251491fe49c8SJiawen Wu 		if (txgbe_get_offset_by_id(i, &offset)) {
251591fe49c8SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
251691fe49c8SJiawen Wu 			break;
251791fe49c8SJiawen Wu 		}
251891fe49c8SJiawen Wu 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
251991fe49c8SJiawen Wu 	}
252091fe49c8SJiawen Wu 
252191fe49c8SJiawen Wu 	return i;
252291fe49c8SJiawen Wu }
252391fe49c8SJiawen Wu 
252491fe49c8SJiawen Wu static int
txgbe_dev_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int limit)252591fe49c8SJiawen Wu txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
252691fe49c8SJiawen Wu 		uint64_t *values, unsigned int limit)
252791fe49c8SJiawen Wu {
252891fe49c8SJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
252991fe49c8SJiawen Wu 	unsigned int i;
253091fe49c8SJiawen Wu 
253191fe49c8SJiawen Wu 	if (ids == NULL)
253291fe49c8SJiawen Wu 		return txgbe_dev_xstats_get_(dev, values, limit);
253391fe49c8SJiawen Wu 
253491fe49c8SJiawen Wu 	for (i = 0; i < limit; i++) {
253591fe49c8SJiawen Wu 		uint32_t offset;
253691fe49c8SJiawen Wu 
253791fe49c8SJiawen Wu 		if (txgbe_get_offset_by_id(ids[i], &offset)) {
253891fe49c8SJiawen Wu 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
253991fe49c8SJiawen Wu 			break;
254091fe49c8SJiawen Wu 		}
254191fe49c8SJiawen Wu 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
254291fe49c8SJiawen Wu 	}
254391fe49c8SJiawen Wu 
254491fe49c8SJiawen Wu 	return i;
254591fe49c8SJiawen Wu }
254691fe49c8SJiawen Wu 
254791fe49c8SJiawen Wu static int
txgbe_dev_xstats_reset(struct rte_eth_dev * dev)254891fe49c8SJiawen Wu txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
254991fe49c8SJiawen Wu {
255091fe49c8SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
255191fe49c8SJiawen Wu 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
255291fe49c8SJiawen Wu 
255391fe49c8SJiawen Wu 	/* HW registers are cleared on read */
255491fe49c8SJiawen Wu 	hw->offset_loaded = 0;
255591fe49c8SJiawen Wu 	txgbe_read_stats_registers(hw, hw_stats);
255691fe49c8SJiawen Wu 	hw->offset_loaded = 1;
255791fe49c8SJiawen Wu 
255891fe49c8SJiawen Wu 	/* Reset software totals */
255991fe49c8SJiawen Wu 	memset(hw_stats, 0, sizeof(*hw_stats));
256091fe49c8SJiawen Wu 
256191fe49c8SJiawen Wu 	return 0;
256291fe49c8SJiawen Wu }
256391fe49c8SJiawen Wu 
256486d8adc7SJiawen Wu static int
txgbe_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)2565bc84ac0fSJiawen Wu txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2566bc84ac0fSJiawen Wu {
2567bc84ac0fSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2568bc84ac0fSJiawen Wu 	u32 etrack_id;
2569bc84ac0fSJiawen Wu 	int ret;
2570bc84ac0fSJiawen Wu 
25716f47613cSJiawen Wu 	hw->phy.get_fw_version(hw, &etrack_id);
2572bc84ac0fSJiawen Wu 
2573bc84ac0fSJiawen Wu 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2574d345d6c9SFerruh Yigit 	if (ret < 0)
2575d345d6c9SFerruh Yigit 		return -EINVAL;
2576bc84ac0fSJiawen Wu 
2577bc84ac0fSJiawen Wu 	ret += 1; /* add the size of '\0' */
2578d345d6c9SFerruh Yigit 	if (fw_size < (size_t)ret)
2579bc84ac0fSJiawen Wu 		return ret;
2580bc84ac0fSJiawen Wu 	else
2581bc84ac0fSJiawen Wu 		return 0;
2582bc84ac0fSJiawen Wu }
2583bc84ac0fSJiawen Wu 
2584bc84ac0fSJiawen Wu static int
txgbe_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)258586d8adc7SJiawen Wu txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
258686d8adc7SJiawen Wu {
258786d8adc7SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
258886d8adc7SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
258986d8adc7SJiawen Wu 
259086d8adc7SJiawen Wu 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
259186d8adc7SJiawen Wu 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
259286d8adc7SJiawen Wu 	dev_info->min_rx_bufsize = 1024;
259386d8adc7SJiawen Wu 	dev_info->max_rx_pktlen = 15872;
259486d8adc7SJiawen Wu 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
259586d8adc7SJiawen Wu 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
259686d8adc7SJiawen Wu 	dev_info->max_vfs = pci_dev->max_vfs;
2597295968d1SFerruh Yigit 	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
259886d8adc7SJiawen Wu 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
25992fe6f1b7SDmitry Kozlyuk 	dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
260086d8adc7SJiawen Wu 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
260186d8adc7SJiawen Wu 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
260286d8adc7SJiawen Wu 				     dev_info->rx_queue_offload_capa);
260386d8adc7SJiawen Wu 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
260486d8adc7SJiawen Wu 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
260586d8adc7SJiawen Wu 
260686d8adc7SJiawen Wu 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
260786d8adc7SJiawen Wu 		.rx_thresh = {
260886d8adc7SJiawen Wu 			.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
260986d8adc7SJiawen Wu 			.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
261086d8adc7SJiawen Wu 			.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
261186d8adc7SJiawen Wu 		},
261286d8adc7SJiawen Wu 		.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
261386d8adc7SJiawen Wu 		.rx_drop_en = 0,
261486d8adc7SJiawen Wu 		.offloads = 0,
261586d8adc7SJiawen Wu 	};
261686d8adc7SJiawen Wu 
261786d8adc7SJiawen Wu 	dev_info->default_txconf = (struct rte_eth_txconf) {
261886d8adc7SJiawen Wu 		.tx_thresh = {
261986d8adc7SJiawen Wu 			.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
262086d8adc7SJiawen Wu 			.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
262186d8adc7SJiawen Wu 			.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
262286d8adc7SJiawen Wu 		},
262386d8adc7SJiawen Wu 		.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
262486d8adc7SJiawen Wu 		.offloads = 0,
262586d8adc7SJiawen Wu 	};
262686d8adc7SJiawen Wu 
262786d8adc7SJiawen Wu 	dev_info->rx_desc_lim = rx_desc_lim;
262886d8adc7SJiawen Wu 	dev_info->tx_desc_lim = tx_desc_lim;
262986d8adc7SJiawen Wu 
263086d8adc7SJiawen Wu 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2631295968d1SFerruh Yigit 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
263286d8adc7SJiawen Wu 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
263386d8adc7SJiawen Wu 
2634295968d1SFerruh Yigit 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
2635295968d1SFerruh Yigit 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
263686d8adc7SJiawen Wu 
263786d8adc7SJiawen Wu 	/* Driver-preferred Rx/Tx parameters */
263886d8adc7SJiawen Wu 	dev_info->default_rxportconf.burst_size = 32;
263986d8adc7SJiawen Wu 	dev_info->default_txportconf.burst_size = 32;
264086d8adc7SJiawen Wu 	dev_info->default_rxportconf.nb_queues = 1;
264186d8adc7SJiawen Wu 	dev_info->default_txportconf.nb_queues = 1;
264286d8adc7SJiawen Wu 	dev_info->default_rxportconf.ring_size = 256;
264386d8adc7SJiawen Wu 	dev_info->default_txportconf.ring_size = 256;
264486d8adc7SJiawen Wu 
264586d8adc7SJiawen Wu 	return 0;
264686d8adc7SJiawen Wu }
264786d8adc7SJiawen Wu 
26480e484278SJiawen Wu const uint32_t *
txgbe_dev_supported_ptypes_get(struct rte_eth_dev * dev)26490e484278SJiawen Wu txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
26500e484278SJiawen Wu {
26510e484278SJiawen Wu 	if (dev->rx_pkt_burst == txgbe_recv_pkts ||
26520e484278SJiawen Wu 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
26530e484278SJiawen Wu 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
26540e484278SJiawen Wu 	    dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
26550e484278SJiawen Wu 		return txgbe_get_supported_ptypes();
26560e484278SJiawen Wu 
26570e484278SJiawen Wu 	return NULL;
26580e484278SJiawen Wu }
26590e484278SJiawen Wu 
26600c061eadSJiawen Wu void
txgbe_dev_setup_link_alarm_handler(void * param)26610c061eadSJiawen Wu txgbe_dev_setup_link_alarm_handler(void *param)
26620c061eadSJiawen Wu {
26630c061eadSJiawen Wu 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
26640c061eadSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
26650c061eadSJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
26660c061eadSJiawen Wu 	u32 speed;
26670c061eadSJiawen Wu 	bool autoneg = false;
26680c061eadSJiawen Wu 
26690c061eadSJiawen Wu 	speed = hw->phy.autoneg_advertised;
26700c061eadSJiawen Wu 	if (!speed)
26710c061eadSJiawen Wu 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
26720c061eadSJiawen Wu 
26730c061eadSJiawen Wu 	hw->mac.setup_link(hw, speed, true);
26740c061eadSJiawen Wu 
26750c061eadSJiawen Wu 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
26760c061eadSJiawen Wu }
26770c061eadSJiawen Wu 
26780c061eadSJiawen Wu /* return 0 means link status changed, -1 means not changed */
26790c061eadSJiawen Wu int
txgbe_dev_link_update_share(struct rte_eth_dev * dev,int wait_to_complete)26800c061eadSJiawen Wu txgbe_dev_link_update_share(struct rte_eth_dev *dev,
26810c061eadSJiawen Wu 			    int wait_to_complete)
26820c061eadSJiawen Wu {
26830c061eadSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
26840c061eadSJiawen Wu 	struct rte_eth_link link;
26850c061eadSJiawen Wu 	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
26860c061eadSJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
26870c061eadSJiawen Wu 	bool link_up;
26880c061eadSJiawen Wu 	int err;
26890c061eadSJiawen Wu 	int wait = 1;
26900c061eadSJiawen Wu 
26910c061eadSJiawen Wu 	memset(&link, 0, sizeof(link));
2692295968d1SFerruh Yigit 	link.link_status = RTE_ETH_LINK_DOWN;
2693295968d1SFerruh Yigit 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2694295968d1SFerruh Yigit 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
2695196f0e12SJiawen Wu 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
269694f746aaSFerruh Yigit 			RTE_ETH_LINK_SPEED_FIXED);
26970c061eadSJiawen Wu 
26980c061eadSJiawen Wu 	hw->mac.get_link_status = true;
26990c061eadSJiawen Wu 
27000c061eadSJiawen Wu 	if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
27010c061eadSJiawen Wu 		return rte_eth_linkstatus_set(dev, &link);
27020c061eadSJiawen Wu 
27030c061eadSJiawen Wu 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
27040c061eadSJiawen Wu 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
27050c061eadSJiawen Wu 		wait = 0;
27060c061eadSJiawen Wu 
27070c061eadSJiawen Wu 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
27080c061eadSJiawen Wu 
27090c061eadSJiawen Wu 	if (err != 0) {
2710295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
2711295968d1SFerruh Yigit 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
27120c061eadSJiawen Wu 		return rte_eth_linkstatus_set(dev, &link);
27130c061eadSJiawen Wu 	}
27140c061eadSJiawen Wu 
27150c061eadSJiawen Wu 	if (link_up == 0) {
271682650948SJiawen Wu 		if ((hw->subsystem_device_id & 0xFF) ==
271782650948SJiawen Wu 				TXGBE_DEV_ID_KR_KX_KX4) {
271882650948SJiawen Wu 			hw->mac.bp_down_event(hw);
271982650948SJiawen Wu 		} else if (hw->phy.media_type == txgbe_media_type_fiber) {
27200c061eadSJiawen Wu 			intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
27210c061eadSJiawen Wu 			rte_eal_alarm_set(10,
27220c061eadSJiawen Wu 				txgbe_dev_setup_link_alarm_handler, dev);
27230c061eadSJiawen Wu 		}
27240c061eadSJiawen Wu 		return rte_eth_linkstatus_set(dev, &link);
272512a653ebSJiawen Wu 	} else if (!hw->dev_start) {
272612a653ebSJiawen Wu 		return rte_eth_linkstatus_set(dev, &link);
27270c061eadSJiawen Wu 	}
27280c061eadSJiawen Wu 
27290c061eadSJiawen Wu 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2730295968d1SFerruh Yigit 	link.link_status = RTE_ETH_LINK_UP;
2731295968d1SFerruh Yigit 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
27320c061eadSJiawen Wu 
27330c061eadSJiawen Wu 	switch (link_speed) {
27340c061eadSJiawen Wu 	default:
27350c061eadSJiawen Wu 	case TXGBE_LINK_SPEED_UNKNOWN:
2736295968d1SFerruh Yigit 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2737295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
27380c061eadSJiawen Wu 		break;
27390c061eadSJiawen Wu 
27400c061eadSJiawen Wu 	case TXGBE_LINK_SPEED_100M_FULL:
2741295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
27420c061eadSJiawen Wu 		break;
27430c061eadSJiawen Wu 
27440c061eadSJiawen Wu 	case TXGBE_LINK_SPEED_1GB_FULL:
2745295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
27460c061eadSJiawen Wu 		break;
27470c061eadSJiawen Wu 
27480c061eadSJiawen Wu 	case TXGBE_LINK_SPEED_2_5GB_FULL:
2749295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
27500c061eadSJiawen Wu 		break;
27510c061eadSJiawen Wu 
27520c061eadSJiawen Wu 	case TXGBE_LINK_SPEED_5GB_FULL:
2753295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_5G;
27540c061eadSJiawen Wu 		break;
27550c061eadSJiawen Wu 
27560c061eadSJiawen Wu 	case TXGBE_LINK_SPEED_10GB_FULL:
2757295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_10G;
27580c061eadSJiawen Wu 		break;
27590c061eadSJiawen Wu 	}
27600c061eadSJiawen Wu 
27610c061eadSJiawen Wu 	return rte_eth_linkstatus_set(dev, &link);
27620c061eadSJiawen Wu }
27630c061eadSJiawen Wu 
27642fc745e6SJiawen Wu static int
txgbe_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)27652fc745e6SJiawen Wu txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
27662fc745e6SJiawen Wu {
27670c061eadSJiawen Wu 	return txgbe_dev_link_update_share(dev, wait_to_complete);
27682fc745e6SJiawen Wu }
27692fc745e6SJiawen Wu 
2770d06e6723SJiawen Wu static int
txgbe_dev_promiscuous_enable(struct rte_eth_dev * dev)2771d06e6723SJiawen Wu txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2772d06e6723SJiawen Wu {
2773d06e6723SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2774d06e6723SJiawen Wu 	uint32_t fctrl;
2775d06e6723SJiawen Wu 
2776d06e6723SJiawen Wu 	fctrl = rd32(hw, TXGBE_PSRCTL);
2777d06e6723SJiawen Wu 	fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2778d06e6723SJiawen Wu 	wr32(hw, TXGBE_PSRCTL, fctrl);
2779d06e6723SJiawen Wu 
2780d06e6723SJiawen Wu 	return 0;
2781d06e6723SJiawen Wu }
2782d06e6723SJiawen Wu 
2783d06e6723SJiawen Wu static int
txgbe_dev_promiscuous_disable(struct rte_eth_dev * dev)2784d06e6723SJiawen Wu txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2785d06e6723SJiawen Wu {
2786d06e6723SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2787d06e6723SJiawen Wu 	uint32_t fctrl;
2788d06e6723SJiawen Wu 
2789d06e6723SJiawen Wu 	fctrl = rd32(hw, TXGBE_PSRCTL);
2790d06e6723SJiawen Wu 	fctrl &= (~TXGBE_PSRCTL_UCP);
2791d06e6723SJiawen Wu 	if (dev->data->all_multicast == 1)
2792d06e6723SJiawen Wu 		fctrl |= TXGBE_PSRCTL_MCP;
2793d06e6723SJiawen Wu 	else
2794d06e6723SJiawen Wu 		fctrl &= (~TXGBE_PSRCTL_MCP);
2795d06e6723SJiawen Wu 	wr32(hw, TXGBE_PSRCTL, fctrl);
2796d06e6723SJiawen Wu 
2797d06e6723SJiawen Wu 	return 0;
2798d06e6723SJiawen Wu }
2799d06e6723SJiawen Wu 
2800d06e6723SJiawen Wu static int
txgbe_dev_allmulticast_enable(struct rte_eth_dev * dev)2801d06e6723SJiawen Wu txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2802d06e6723SJiawen Wu {
2803d06e6723SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2804d06e6723SJiawen Wu 	uint32_t fctrl;
2805d06e6723SJiawen Wu 
2806d06e6723SJiawen Wu 	fctrl = rd32(hw, TXGBE_PSRCTL);
2807d06e6723SJiawen Wu 	fctrl |= TXGBE_PSRCTL_MCP;
2808d06e6723SJiawen Wu 	wr32(hw, TXGBE_PSRCTL, fctrl);
2809d06e6723SJiawen Wu 
2810d06e6723SJiawen Wu 	return 0;
2811d06e6723SJiawen Wu }
2812d06e6723SJiawen Wu 
2813d06e6723SJiawen Wu static int
txgbe_dev_allmulticast_disable(struct rte_eth_dev * dev)2814d06e6723SJiawen Wu txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2815d06e6723SJiawen Wu {
2816d06e6723SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2817d06e6723SJiawen Wu 	uint32_t fctrl;
2818d06e6723SJiawen Wu 
2819d06e6723SJiawen Wu 	if (dev->data->promiscuous == 1)
2820d06e6723SJiawen Wu 		return 0; /* must remain in all_multicast mode */
2821d06e6723SJiawen Wu 
2822d06e6723SJiawen Wu 	fctrl = rd32(hw, TXGBE_PSRCTL);
2823d06e6723SJiawen Wu 	fctrl &= (~TXGBE_PSRCTL_MCP);
2824d06e6723SJiawen Wu 	wr32(hw, TXGBE_PSRCTL, fctrl);
2825d06e6723SJiawen Wu 
2826d06e6723SJiawen Wu 	return 0;
2827d06e6723SJiawen Wu }
2828d06e6723SJiawen Wu 
28292fc745e6SJiawen Wu /**
28302fc745e6SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
28312fc745e6SJiawen Wu  * It will be called once only during nic initialized.
28322fc745e6SJiawen Wu  *
28332fc745e6SJiawen Wu  * @param dev
28342fc745e6SJiawen Wu  *  Pointer to struct rte_eth_dev.
28352fc745e6SJiawen Wu  * @param on
28362fc745e6SJiawen Wu  *  Enable or Disable.
28372fc745e6SJiawen Wu  *
28382fc745e6SJiawen Wu  * @return
28392fc745e6SJiawen Wu  *  - On success, zero.
28402fc745e6SJiawen Wu  *  - On failure, a negative value.
28412fc745e6SJiawen Wu  */
28422fc745e6SJiawen Wu static int
txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on)28432fc745e6SJiawen Wu txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
28442fc745e6SJiawen Wu {
28452fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
28462fc745e6SJiawen Wu 
28472fc745e6SJiawen Wu 	txgbe_dev_link_status_print(dev);
28482fc745e6SJiawen Wu 	if (on)
28492fc745e6SJiawen Wu 		intr->mask_misc |= TXGBE_ICRMISC_LSC;
28502fc745e6SJiawen Wu 	else
28512fc745e6SJiawen Wu 		intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
28522fc745e6SJiawen Wu 
28532fc745e6SJiawen Wu 	return 0;
28542fc745e6SJiawen Wu }
28552fc745e6SJiawen Wu 
285682650948SJiawen Wu static int
txgbe_dev_misc_interrupt_setup(struct rte_eth_dev * dev)285782650948SJiawen Wu txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
285882650948SJiawen Wu {
285982650948SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
286082650948SJiawen Wu 	u64 mask;
286182650948SJiawen Wu 
286282650948SJiawen Wu 	mask = TXGBE_ICR_MASK;
286382650948SJiawen Wu 	mask &= (1ULL << TXGBE_MISC_VEC_ID);
286482650948SJiawen Wu 	intr->mask |= mask;
286582650948SJiawen Wu 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
286682650948SJiawen Wu 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
286782650948SJiawen Wu 	return 0;
286882650948SJiawen Wu }
286982650948SJiawen Wu 
28702fc745e6SJiawen Wu /**
28712fc745e6SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
28722fc745e6SJiawen Wu  * It will be called once only during nic initialized.
28732fc745e6SJiawen Wu  *
28742fc745e6SJiawen Wu  * @param dev
28752fc745e6SJiawen Wu  *  Pointer to struct rte_eth_dev.
28762fc745e6SJiawen Wu  *
28772fc745e6SJiawen Wu  * @return
28782fc745e6SJiawen Wu  *  - On success, zero.
28792fc745e6SJiawen Wu  *  - On failure, a negative value.
28802fc745e6SJiawen Wu  */
28812fc745e6SJiawen Wu static int
txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev * dev)28822fc745e6SJiawen Wu txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
28832fc745e6SJiawen Wu {
28842fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
288582650948SJiawen Wu 	u64 mask;
28862fc745e6SJiawen Wu 
288782650948SJiawen Wu 	mask = TXGBE_ICR_MASK;
288882650948SJiawen Wu 	mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
288982650948SJiawen Wu 	intr->mask |= mask;
28902fc745e6SJiawen Wu 
28912fc745e6SJiawen Wu 	return 0;
28922fc745e6SJiawen Wu }
28932fc745e6SJiawen Wu 
28942fc745e6SJiawen Wu /**
28952fc745e6SJiawen Wu  * It clears the interrupt causes and enables the interrupt.
28962fc745e6SJiawen Wu  * It will be called once only during nic initialized.
28972fc745e6SJiawen Wu  *
28982fc745e6SJiawen Wu  * @param dev
28992fc745e6SJiawen Wu  *  Pointer to struct rte_eth_dev.
29002fc745e6SJiawen Wu  *
29012fc745e6SJiawen Wu  * @return
29022fc745e6SJiawen Wu  *  - On success, zero.
29032fc745e6SJiawen Wu  *  - On failure, a negative value.
29042fc745e6SJiawen Wu  */
29052fc745e6SJiawen Wu static int
txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev * dev)29062fc745e6SJiawen Wu txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
29072fc745e6SJiawen Wu {
29082fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
29092fc745e6SJiawen Wu 
29102fc745e6SJiawen Wu 	intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
29112fc745e6SJiawen Wu 
29122fc745e6SJiawen Wu 	return 0;
29132fc745e6SJiawen Wu }
29142fc745e6SJiawen Wu 
29152fc745e6SJiawen Wu /*
29162fc745e6SJiawen Wu  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
29172fc745e6SJiawen Wu  *
29182fc745e6SJiawen Wu  * @param dev
29192fc745e6SJiawen Wu  *  Pointer to struct rte_eth_dev.
29202fc745e6SJiawen Wu  *
29212fc745e6SJiawen Wu  * @return
29222fc745e6SJiawen Wu  *  - On success, zero.
29232fc745e6SJiawen Wu  *  - On failure, a negative value.
29242fc745e6SJiawen Wu  */
29252fc745e6SJiawen Wu static int
txgbe_dev_interrupt_get_status(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)2926ef6427a3SJiawen Wu txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
2927ef6427a3SJiawen Wu 				struct rte_intr_handle *intr_handle)
29282fc745e6SJiawen Wu {
29292fc745e6SJiawen Wu 	uint32_t eicr;
29302fc745e6SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
29312fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
29322fc745e6SJiawen Wu 
2933d61138d4SHarman Kalra 	if (rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_UIO &&
2934d61138d4SHarman Kalra 		rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX)
2935ef6427a3SJiawen Wu 		wr32(hw, TXGBE_PX_INTA, 1);
2936ef6427a3SJiawen Wu 
29372fc745e6SJiawen Wu 	/* clear all cause mask */
29382fc745e6SJiawen Wu 	txgbe_disable_intr(hw);
29392fc745e6SJiawen Wu 
29402fc745e6SJiawen Wu 	/* read-on-clear nic registers here */
29412fc745e6SJiawen Wu 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
29422fc745e6SJiawen Wu 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
29432fc745e6SJiawen Wu 
29442fc745e6SJiawen Wu 	intr->flags = 0;
29452fc745e6SJiawen Wu 
29462fc745e6SJiawen Wu 	/* set flag for async link update */
29472fc745e6SJiawen Wu 	if (eicr & TXGBE_ICRMISC_LSC)
29482fc745e6SJiawen Wu 		intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
29492fc745e6SJiawen Wu 
295082650948SJiawen Wu 	if (eicr & TXGBE_ICRMISC_ANDONE)
295182650948SJiawen Wu 		intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
295282650948SJiawen Wu 
29532fc745e6SJiawen Wu 	if (eicr & TXGBE_ICRMISC_VFMBX)
29542fc745e6SJiawen Wu 		intr->flags |= TXGBE_FLAG_MAILBOX;
29552fc745e6SJiawen Wu 
29562fc745e6SJiawen Wu 	if (eicr & TXGBE_ICRMISC_LNKSEC)
29572fc745e6SJiawen Wu 		intr->flags |= TXGBE_FLAG_MACSEC;
29582fc745e6SJiawen Wu 
29592fc745e6SJiawen Wu 	if (eicr & TXGBE_ICRMISC_GPIO)
29602fc745e6SJiawen Wu 		intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
29612fc745e6SJiawen Wu 
29622fc745e6SJiawen Wu 	return 0;
29632fc745e6SJiawen Wu }
29642fc745e6SJiawen Wu 
29652fc745e6SJiawen Wu /**
29662fc745e6SJiawen Wu  * It gets and then prints the link status.
29672fc745e6SJiawen Wu  *
29682fc745e6SJiawen Wu  * @param dev
29692fc745e6SJiawen Wu  *  Pointer to struct rte_eth_dev.
29702fc745e6SJiawen Wu  *
29712fc745e6SJiawen Wu  * @return
29722fc745e6SJiawen Wu  *  - On success, zero.
29732fc745e6SJiawen Wu  *  - On failure, a negative value.
29742fc745e6SJiawen Wu  */
29752fc745e6SJiawen Wu static void
txgbe_dev_link_status_print(struct rte_eth_dev * dev)29762fc745e6SJiawen Wu txgbe_dev_link_status_print(struct rte_eth_dev *dev)
29772fc745e6SJiawen Wu {
29782fc745e6SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
29792fc745e6SJiawen Wu 	struct rte_eth_link link;
29802fc745e6SJiawen Wu 
29812fc745e6SJiawen Wu 	rte_eth_linkstatus_get(dev, &link);
29822fc745e6SJiawen Wu 
29832fc745e6SJiawen Wu 	if (link.link_status) {
29842fc745e6SJiawen Wu 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
29852fc745e6SJiawen Wu 					(int)(dev->data->port_id),
29862fc745e6SJiawen Wu 					(unsigned int)link.link_speed,
2987295968d1SFerruh Yigit 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
29882fc745e6SJiawen Wu 					"full-duplex" : "half-duplex");
29892fc745e6SJiawen Wu 	} else {
29902fc745e6SJiawen Wu 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
29912fc745e6SJiawen Wu 				(int)(dev->data->port_id));
29922fc745e6SJiawen Wu 	}
29932fc745e6SJiawen Wu 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
29942fc745e6SJiawen Wu 				pci_dev->addr.domain,
29952fc745e6SJiawen Wu 				pci_dev->addr.bus,
29962fc745e6SJiawen Wu 				pci_dev->addr.devid,
29972fc745e6SJiawen Wu 				pci_dev->addr.function);
29982fc745e6SJiawen Wu }
29992fc745e6SJiawen Wu 
30002fc745e6SJiawen Wu /*
30012fc745e6SJiawen Wu  * It executes link_update after knowing an interrupt occurred.
30022fc745e6SJiawen Wu  *
30032fc745e6SJiawen Wu  * @param dev
30042fc745e6SJiawen Wu  *  Pointer to struct rte_eth_dev.
30052fc745e6SJiawen Wu  *
30062fc745e6SJiawen Wu  * @return
30072fc745e6SJiawen Wu  *  - On success, zero.
30082fc745e6SJiawen Wu  *  - On failure, a negative value.
30092fc745e6SJiawen Wu  */
30102fc745e6SJiawen Wu static int
txgbe_dev_interrupt_action(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)30112fc745e6SJiawen Wu txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
30122fc745e6SJiawen Wu 			   struct rte_intr_handle *intr_handle)
30132fc745e6SJiawen Wu {
30142fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
30152fc745e6SJiawen Wu 	int64_t timeout;
30162fc745e6SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
30172fc745e6SJiawen Wu 
30182fc745e6SJiawen Wu 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
30192fc745e6SJiawen Wu 
3020fa7c130dSJiawen Wu 	if (intr->flags & TXGBE_FLAG_MAILBOX) {
3021fa7c130dSJiawen Wu 		txgbe_pf_mbx_process(dev);
30222fc745e6SJiawen Wu 		intr->flags &= ~TXGBE_FLAG_MAILBOX;
3023fa7c130dSJiawen Wu 	}
30242fc745e6SJiawen Wu 
30252fc745e6SJiawen Wu 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
30262fc745e6SJiawen Wu 		hw->phy.handle_lasi(hw);
30272fc745e6SJiawen Wu 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
30282fc745e6SJiawen Wu 	}
30292fc745e6SJiawen Wu 
303082650948SJiawen Wu 	if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
303182650948SJiawen Wu 		if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
303282650948SJiawen Wu 			hw->mac.kr_handle(hw);
303382650948SJiawen Wu 			intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
303482650948SJiawen Wu 		}
303582650948SJiawen Wu 	}
303682650948SJiawen Wu 
30372fc745e6SJiawen Wu 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
30382fc745e6SJiawen Wu 		struct rte_eth_link link;
30392fc745e6SJiawen Wu 
30402fc745e6SJiawen Wu 		/*get the link status before link update, for predicting later*/
30412fc745e6SJiawen Wu 		rte_eth_linkstatus_get(dev, &link);
30422fc745e6SJiawen Wu 
30432fc745e6SJiawen Wu 		txgbe_dev_link_update(dev, 0);
30442fc745e6SJiawen Wu 
30452fc745e6SJiawen Wu 		/* likely to up */
30462fc745e6SJiawen Wu 		if (!link.link_status)
30472fc745e6SJiawen Wu 			/* handle it 1 sec later, wait it being stable */
30482fc745e6SJiawen Wu 			timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
30492fc745e6SJiawen Wu 		/* likely to down */
305082650948SJiawen Wu 		else if ((hw->subsystem_device_id & 0xFF) ==
305182650948SJiawen Wu 				TXGBE_DEV_ID_KR_KX_KX4 &&
305282650948SJiawen Wu 				hw->devarg.auto_neg == 1)
305382650948SJiawen Wu 			/* handle it 2 sec later for backplane AN73 */
305482650948SJiawen Wu 			timeout = 2000;
30552fc745e6SJiawen Wu 		else
30562fc745e6SJiawen Wu 			/* handle it 4 sec later, wait it being stable */
30572fc745e6SJiawen Wu 			timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
30582fc745e6SJiawen Wu 
30592fc745e6SJiawen Wu 		txgbe_dev_link_status_print(dev);
30602fc745e6SJiawen Wu 		if (rte_eal_alarm_set(timeout * 1000,
30612fc745e6SJiawen Wu 				      txgbe_dev_interrupt_delayed_handler,
30622fc745e6SJiawen Wu 				      (void *)dev) < 0) {
30632fc745e6SJiawen Wu 			PMD_DRV_LOG(ERR, "Error setting alarm");
30642fc745e6SJiawen Wu 		} else {
30652fc745e6SJiawen Wu 			/* only disable lsc interrupt */
30662fc745e6SJiawen Wu 			intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
306782650948SJiawen Wu 
306882650948SJiawen Wu 			intr->mask_orig = intr->mask;
306982650948SJiawen Wu 			/* only disable all misc interrupts */
307082650948SJiawen Wu 			intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
30712fc745e6SJiawen Wu 		}
30722fc745e6SJiawen Wu 	}
30732fc745e6SJiawen Wu 
30742fc745e6SJiawen Wu 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
30752fc745e6SJiawen Wu 	txgbe_enable_intr(dev);
30762fc745e6SJiawen Wu 	rte_intr_enable(intr_handle);
30772fc745e6SJiawen Wu 
30782fc745e6SJiawen Wu 	return 0;
30792fc745e6SJiawen Wu }
30802fc745e6SJiawen Wu 
30812fc745e6SJiawen Wu /**
30822fc745e6SJiawen Wu  * Interrupt handler which shall be registered for alarm callback for delayed
30832fc745e6SJiawen Wu  * handling specific interrupt to wait for the stable nic state. As the
30842fc745e6SJiawen Wu  * NIC interrupt state is not stable for txgbe after link is just down,
30852fc745e6SJiawen Wu  * it needs to wait 4 seconds to get the stable status.
30862fc745e6SJiawen Wu  *
30872fc745e6SJiawen Wu  * @param handle
30882fc745e6SJiawen Wu  *  Pointer to interrupt handle.
30892fc745e6SJiawen Wu  * @param param
30902fc745e6SJiawen Wu  *  The address of parameter (struct rte_eth_dev *) registered before.
30912fc745e6SJiawen Wu  *
30922fc745e6SJiawen Wu  * @return
30932fc745e6SJiawen Wu  *  void
30942fc745e6SJiawen Wu  */
30952fc745e6SJiawen Wu static void
txgbe_dev_interrupt_delayed_handler(void * param)30962fc745e6SJiawen Wu txgbe_dev_interrupt_delayed_handler(void *param)
30972fc745e6SJiawen Wu {
30982fc745e6SJiawen Wu 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
30992fc745e6SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3100d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
31012fc745e6SJiawen Wu 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
31022fc745e6SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
31032fc745e6SJiawen Wu 	uint32_t eicr;
31042fc745e6SJiawen Wu 
31052fc745e6SJiawen Wu 	txgbe_disable_intr(hw);
31062fc745e6SJiawen Wu 
31072fc745e6SJiawen Wu 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3108fa7c130dSJiawen Wu 	if (eicr & TXGBE_ICRMISC_VFMBX)
3109fa7c130dSJiawen Wu 		txgbe_pf_mbx_process(dev);
31102fc745e6SJiawen Wu 
31112fc745e6SJiawen Wu 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
31122fc745e6SJiawen Wu 		hw->phy.handle_lasi(hw);
31132fc745e6SJiawen Wu 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
31142fc745e6SJiawen Wu 	}
31152fc745e6SJiawen Wu 
31162fc745e6SJiawen Wu 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
31172fc745e6SJiawen Wu 		txgbe_dev_link_update(dev, 0);
31182fc745e6SJiawen Wu 		intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
31192fc745e6SJiawen Wu 		txgbe_dev_link_status_print(dev);
31202fc745e6SJiawen Wu 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
31212fc745e6SJiawen Wu 					      NULL);
31222fc745e6SJiawen Wu 	}
31232fc745e6SJiawen Wu 
31242fc745e6SJiawen Wu 	if (intr->flags & TXGBE_FLAG_MACSEC) {
31252fc745e6SJiawen Wu 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
31262fc745e6SJiawen Wu 					      NULL);
31272fc745e6SJiawen Wu 		intr->flags &= ~TXGBE_FLAG_MACSEC;
31282fc745e6SJiawen Wu 	}
31292fc745e6SJiawen Wu 
31302fc745e6SJiawen Wu 	/* restore original mask */
313182650948SJiawen Wu 	intr->mask_misc |= TXGBE_ICRMISC_LSC;
313282650948SJiawen Wu 
313382650948SJiawen Wu 	intr->mask = intr->mask_orig;
313482650948SJiawen Wu 	intr->mask_orig = 0;
31352fc745e6SJiawen Wu 
31362fc745e6SJiawen Wu 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
31372fc745e6SJiawen Wu 	txgbe_enable_intr(dev);
31382fc745e6SJiawen Wu 	rte_intr_enable(intr_handle);
31392fc745e6SJiawen Wu }
31402fc745e6SJiawen Wu 
31412fc745e6SJiawen Wu /**
31422fc745e6SJiawen Wu  * Interrupt handler triggered by NIC  for handling
31432fc745e6SJiawen Wu  * specific interrupt.
31442fc745e6SJiawen Wu  *
31452fc745e6SJiawen Wu  * @param handle
31462fc745e6SJiawen Wu  *  Pointer to interrupt handle.
31472fc745e6SJiawen Wu  * @param param
31482fc745e6SJiawen Wu  *  The address of parameter (struct rte_eth_dev *) registered before.
31492fc745e6SJiawen Wu  *
31502fc745e6SJiawen Wu  * @return
31512fc745e6SJiawen Wu  *  void
31522fc745e6SJiawen Wu  */
31532fc745e6SJiawen Wu static void
txgbe_dev_interrupt_handler(void * param)31542fc745e6SJiawen Wu txgbe_dev_interrupt_handler(void *param)
31552fc745e6SJiawen Wu {
31562fc745e6SJiawen Wu 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
31572fc745e6SJiawen Wu 
3158ef6427a3SJiawen Wu 	txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
31592fc745e6SJiawen Wu 	txgbe_dev_interrupt_action(dev, dev->intr_handle);
31602fc745e6SJiawen Wu }
31612fc745e6SJiawen Wu 
316269ce8c8aSJiawen Wu static int
txgbe_dev_led_on(struct rte_eth_dev * dev)316309afa548SJiawen Wu txgbe_dev_led_on(struct rte_eth_dev *dev)
316409afa548SJiawen Wu {
316509afa548SJiawen Wu 	struct txgbe_hw *hw;
316609afa548SJiawen Wu 
316709afa548SJiawen Wu 	hw = TXGBE_DEV_HW(dev);
3168f45834fdSJiawen Wu 	return txgbe_led_on(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP;
316909afa548SJiawen Wu }
317009afa548SJiawen Wu 
317109afa548SJiawen Wu static int
txgbe_dev_led_off(struct rte_eth_dev * dev)317209afa548SJiawen Wu txgbe_dev_led_off(struct rte_eth_dev *dev)
317309afa548SJiawen Wu {
317409afa548SJiawen Wu 	struct txgbe_hw *hw;
317509afa548SJiawen Wu 
317609afa548SJiawen Wu 	hw = TXGBE_DEV_HW(dev);
3177f45834fdSJiawen Wu 	return txgbe_led_off(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP;
317809afa548SJiawen Wu }
317909afa548SJiawen Wu 
318009afa548SJiawen Wu static int
txgbe_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)318169ce8c8aSJiawen Wu txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
318269ce8c8aSJiawen Wu {
318369ce8c8aSJiawen Wu 	struct txgbe_hw *hw;
318469ce8c8aSJiawen Wu 	uint32_t mflcn_reg;
318569ce8c8aSJiawen Wu 	uint32_t fccfg_reg;
318669ce8c8aSJiawen Wu 	int rx_pause;
318769ce8c8aSJiawen Wu 	int tx_pause;
318869ce8c8aSJiawen Wu 
318969ce8c8aSJiawen Wu 	hw = TXGBE_DEV_HW(dev);
319069ce8c8aSJiawen Wu 
319169ce8c8aSJiawen Wu 	fc_conf->pause_time = hw->fc.pause_time;
319269ce8c8aSJiawen Wu 	fc_conf->high_water = hw->fc.high_water[0];
319369ce8c8aSJiawen Wu 	fc_conf->low_water = hw->fc.low_water[0];
319469ce8c8aSJiawen Wu 	fc_conf->send_xon = hw->fc.send_xon;
319569ce8c8aSJiawen Wu 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
319669ce8c8aSJiawen Wu 
319769ce8c8aSJiawen Wu 	/*
319869ce8c8aSJiawen Wu 	 * Return rx_pause status according to actual setting of
319969ce8c8aSJiawen Wu 	 * RXFCCFG register.
320069ce8c8aSJiawen Wu 	 */
320169ce8c8aSJiawen Wu 	mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
320269ce8c8aSJiawen Wu 	if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
320369ce8c8aSJiawen Wu 		rx_pause = 1;
320469ce8c8aSJiawen Wu 	else
320569ce8c8aSJiawen Wu 		rx_pause = 0;
320669ce8c8aSJiawen Wu 
320769ce8c8aSJiawen Wu 	/*
320869ce8c8aSJiawen Wu 	 * Return tx_pause status according to actual setting of
320969ce8c8aSJiawen Wu 	 * TXFCCFG register.
321069ce8c8aSJiawen Wu 	 */
321169ce8c8aSJiawen Wu 	fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
321269ce8c8aSJiawen Wu 	if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
321369ce8c8aSJiawen Wu 		tx_pause = 1;
321469ce8c8aSJiawen Wu 	else
321569ce8c8aSJiawen Wu 		tx_pause = 0;
321669ce8c8aSJiawen Wu 
321769ce8c8aSJiawen Wu 	if (rx_pause && tx_pause)
3218295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_FULL;
321969ce8c8aSJiawen Wu 	else if (rx_pause)
3220295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
322169ce8c8aSJiawen Wu 	else if (tx_pause)
3222295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
322369ce8c8aSJiawen Wu 	else
3224295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_NONE;
322569ce8c8aSJiawen Wu 
322669ce8c8aSJiawen Wu 	return 0;
322769ce8c8aSJiawen Wu }
322869ce8c8aSJiawen Wu 
322969ce8c8aSJiawen Wu static int
txgbe_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)323069ce8c8aSJiawen Wu txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
323169ce8c8aSJiawen Wu {
323269ce8c8aSJiawen Wu 	struct txgbe_hw *hw;
323369ce8c8aSJiawen Wu 	int err;
323469ce8c8aSJiawen Wu 	uint32_t rx_buf_size;
323569ce8c8aSJiawen Wu 	uint32_t max_high_water;
323669ce8c8aSJiawen Wu 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
323769ce8c8aSJiawen Wu 		txgbe_fc_none,
323869ce8c8aSJiawen Wu 		txgbe_fc_rx_pause,
323969ce8c8aSJiawen Wu 		txgbe_fc_tx_pause,
324069ce8c8aSJiawen Wu 		txgbe_fc_full
324169ce8c8aSJiawen Wu 	};
324269ce8c8aSJiawen Wu 
324369ce8c8aSJiawen Wu 	PMD_INIT_FUNC_TRACE();
324469ce8c8aSJiawen Wu 
324569ce8c8aSJiawen Wu 	hw = TXGBE_DEV_HW(dev);
324669ce8c8aSJiawen Wu 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
324769ce8c8aSJiawen Wu 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
324869ce8c8aSJiawen Wu 
324969ce8c8aSJiawen Wu 	/*
325069ce8c8aSJiawen Wu 	 * At least reserve one Ethernet frame for watermark
325169ce8c8aSJiawen Wu 	 * high_water/low_water in kilo bytes for txgbe
325269ce8c8aSJiawen Wu 	 */
325369ce8c8aSJiawen Wu 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
325469ce8c8aSJiawen Wu 	if (fc_conf->high_water > max_high_water ||
325569ce8c8aSJiawen Wu 	    fc_conf->high_water < fc_conf->low_water) {
325669ce8c8aSJiawen Wu 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
325769ce8c8aSJiawen Wu 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
325869ce8c8aSJiawen Wu 		return -EINVAL;
325969ce8c8aSJiawen Wu 	}
326069ce8c8aSJiawen Wu 
326169ce8c8aSJiawen Wu 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
326269ce8c8aSJiawen Wu 	hw->fc.pause_time     = fc_conf->pause_time;
326369ce8c8aSJiawen Wu 	hw->fc.high_water[0]  = fc_conf->high_water;
326469ce8c8aSJiawen Wu 	hw->fc.low_water[0]   = fc_conf->low_water;
326569ce8c8aSJiawen Wu 	hw->fc.send_xon       = fc_conf->send_xon;
326669ce8c8aSJiawen Wu 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
326769ce8c8aSJiawen Wu 
326869ce8c8aSJiawen Wu 	err = txgbe_fc_enable(hw);
326969ce8c8aSJiawen Wu 
327069ce8c8aSJiawen Wu 	/* Not negotiated is not an error case */
327169ce8c8aSJiawen Wu 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
327269ce8c8aSJiawen Wu 		wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
327369ce8c8aSJiawen Wu 		      (fc_conf->mac_ctrl_frame_fwd
327469ce8c8aSJiawen Wu 		       ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
327569ce8c8aSJiawen Wu 		txgbe_flush(hw);
327669ce8c8aSJiawen Wu 
327769ce8c8aSJiawen Wu 		return 0;
327869ce8c8aSJiawen Wu 	}
327969ce8c8aSJiawen Wu 
328069ce8c8aSJiawen Wu 	PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
328169ce8c8aSJiawen Wu 	return -EIO;
328269ce8c8aSJiawen Wu }
328369ce8c8aSJiawen Wu 
3284cc389e51SJiawen Wu static int
txgbe_priority_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_pfc_conf * pfc_conf)3285cc389e51SJiawen Wu txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3286cc389e51SJiawen Wu 		struct rte_eth_pfc_conf *pfc_conf)
3287cc389e51SJiawen Wu {
3288cc389e51SJiawen Wu 	int err;
3289cc389e51SJiawen Wu 	uint32_t rx_buf_size;
3290cc389e51SJiawen Wu 	uint32_t max_high_water;
3291cc389e51SJiawen Wu 	uint8_t tc_num;
3292cc389e51SJiawen Wu 	uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3293cc389e51SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3294cc389e51SJiawen Wu 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3295cc389e51SJiawen Wu 
3296cc389e51SJiawen Wu 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3297cc389e51SJiawen Wu 		txgbe_fc_none,
3298cc389e51SJiawen Wu 		txgbe_fc_rx_pause,
3299cc389e51SJiawen Wu 		txgbe_fc_tx_pause,
3300cc389e51SJiawen Wu 		txgbe_fc_full
3301cc389e51SJiawen Wu 	};
3302cc389e51SJiawen Wu 
3303cc389e51SJiawen Wu 	PMD_INIT_FUNC_TRACE();
3304cc389e51SJiawen Wu 
3305cc389e51SJiawen Wu 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3306cc389e51SJiawen Wu 	tc_num = map[pfc_conf->priority];
3307cc389e51SJiawen Wu 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3308cc389e51SJiawen Wu 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3309cc389e51SJiawen Wu 	/*
3310cc389e51SJiawen Wu 	 * At least reserve one Ethernet frame for watermark
3311cc389e51SJiawen Wu 	 * high_water/low_water in kilo bytes for txgbe
3312cc389e51SJiawen Wu 	 */
3313cc389e51SJiawen Wu 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3314cc389e51SJiawen Wu 	if (pfc_conf->fc.high_water > max_high_water ||
3315cc389e51SJiawen Wu 	    pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3316cc389e51SJiawen Wu 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3317cc389e51SJiawen Wu 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3318cc389e51SJiawen Wu 		return -EINVAL;
3319cc389e51SJiawen Wu 	}
3320cc389e51SJiawen Wu 
3321cc389e51SJiawen Wu 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3322cc389e51SJiawen Wu 	hw->fc.pause_time = pfc_conf->fc.pause_time;
3323cc389e51SJiawen Wu 	hw->fc.send_xon = pfc_conf->fc.send_xon;
3324cc389e51SJiawen Wu 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3325cc389e51SJiawen Wu 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3326cc389e51SJiawen Wu 
3327cc389e51SJiawen Wu 	err = txgbe_dcb_pfc_enable(hw, tc_num);
3328cc389e51SJiawen Wu 
3329cc389e51SJiawen Wu 	/* Not negotiated is not an error case */
3330cc389e51SJiawen Wu 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3331cc389e51SJiawen Wu 		return 0;
3332cc389e51SJiawen Wu 
3333cc389e51SJiawen Wu 	PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3334cc389e51SJiawen Wu 	return -EIO;
3335cc389e51SJiawen Wu }
3336cc389e51SJiawen Wu 
33379e487a37SJiawen Wu int
txgbe_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)33389e487a37SJiawen Wu txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
33399e487a37SJiawen Wu 			  struct rte_eth_rss_reta_entry64 *reta_conf,
33409e487a37SJiawen Wu 			  uint16_t reta_size)
33419e487a37SJiawen Wu {
33429e487a37SJiawen Wu 	uint8_t i, j, mask;
33439e487a37SJiawen Wu 	uint32_t reta;
33449e487a37SJiawen Wu 	uint16_t idx, shift;
33459e487a37SJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
33469e487a37SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
33479e487a37SJiawen Wu 
33489e487a37SJiawen Wu 	PMD_INIT_FUNC_TRACE();
33499e487a37SJiawen Wu 
33509e487a37SJiawen Wu 	if (!txgbe_rss_update_sp(hw->mac.type)) {
33519e487a37SJiawen Wu 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
33529e487a37SJiawen Wu 			"NIC.");
33539e487a37SJiawen Wu 		return -ENOTSUP;
33549e487a37SJiawen Wu 	}
33559e487a37SJiawen Wu 
3356295968d1SFerruh Yigit 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
33579e487a37SJiawen Wu 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
33589e487a37SJiawen Wu 			"(%d) doesn't match the number hardware can supported "
3359295968d1SFerruh Yigit 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
33609e487a37SJiawen Wu 		return -EINVAL;
33619e487a37SJiawen Wu 	}
33629e487a37SJiawen Wu 
33639e487a37SJiawen Wu 	for (i = 0; i < reta_size; i += 4) {
3364295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3365295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
33669e487a37SJiawen Wu 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
33679e487a37SJiawen Wu 		if (!mask)
33689e487a37SJiawen Wu 			continue;
33699e487a37SJiawen Wu 
337064b5d946SJiawen Wu 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
33719e487a37SJiawen Wu 		for (j = 0; j < 4; j++) {
33729e487a37SJiawen Wu 			if (RS8(mask, j, 0x1)) {
33739e487a37SJiawen Wu 				reta  &= ~(MS32(8 * j, 0xFF));
33749e487a37SJiawen Wu 				reta |= LS32(reta_conf[idx].reta[shift + j],
33759e487a37SJiawen Wu 						8 * j, 0xFF);
33769e487a37SJiawen Wu 			}
33779e487a37SJiawen Wu 		}
337864b5d946SJiawen Wu 		wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
33799e487a37SJiawen Wu 	}
33809e487a37SJiawen Wu 	adapter->rss_reta_updated = 1;
33819e487a37SJiawen Wu 
33829e487a37SJiawen Wu 	return 0;
33839e487a37SJiawen Wu }
33849e487a37SJiawen Wu 
33859e487a37SJiawen Wu int
txgbe_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)33869e487a37SJiawen Wu txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
33879e487a37SJiawen Wu 			 struct rte_eth_rss_reta_entry64 *reta_conf,
33889e487a37SJiawen Wu 			 uint16_t reta_size)
33899e487a37SJiawen Wu {
33909e487a37SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
33919e487a37SJiawen Wu 	uint8_t i, j, mask;
33929e487a37SJiawen Wu 	uint32_t reta;
33939e487a37SJiawen Wu 	uint16_t idx, shift;
33949e487a37SJiawen Wu 
33959e487a37SJiawen Wu 	PMD_INIT_FUNC_TRACE();
33969e487a37SJiawen Wu 
3397295968d1SFerruh Yigit 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
33989e487a37SJiawen Wu 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
33999e487a37SJiawen Wu 			"(%d) doesn't match the number hardware can supported "
3400295968d1SFerruh Yigit 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
34019e487a37SJiawen Wu 		return -EINVAL;
34029e487a37SJiawen Wu 	}
34039e487a37SJiawen Wu 
34049e487a37SJiawen Wu 	for (i = 0; i < reta_size; i += 4) {
3405295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3406295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
34079e487a37SJiawen Wu 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
34089e487a37SJiawen Wu 		if (!mask)
34099e487a37SJiawen Wu 			continue;
34109e487a37SJiawen Wu 
341164b5d946SJiawen Wu 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
34129e487a37SJiawen Wu 		for (j = 0; j < 4; j++) {
34139e487a37SJiawen Wu 			if (RS8(mask, j, 0x1))
34149e487a37SJiawen Wu 				reta_conf[idx].reta[shift + j] =
34159e487a37SJiawen Wu 					(uint16_t)RS32(reta, 8 * j, 0xFF);
34169e487a37SJiawen Wu 		}
34179e487a37SJiawen Wu 	}
34189e487a37SJiawen Wu 
34199e487a37SJiawen Wu 	return 0;
34209e487a37SJiawen Wu }
34219e487a37SJiawen Wu 
3422a331fe3bSJiawen Wu static int
txgbe_add_rar(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)3423a331fe3bSJiawen Wu txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3424a331fe3bSJiawen Wu 				uint32_t index, uint32_t pool)
3425a331fe3bSJiawen Wu {
3426a331fe3bSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3427a331fe3bSJiawen Wu 	uint32_t enable_addr = 1;
3428a331fe3bSJiawen Wu 
3429a331fe3bSJiawen Wu 	return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3430a331fe3bSJiawen Wu 			     pool, enable_addr);
3431a331fe3bSJiawen Wu }
3432a331fe3bSJiawen Wu 
3433a331fe3bSJiawen Wu static void
txgbe_remove_rar(struct rte_eth_dev * dev,uint32_t index)3434a331fe3bSJiawen Wu txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3435a331fe3bSJiawen Wu {
3436a331fe3bSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3437a331fe3bSJiawen Wu 
3438a331fe3bSJiawen Wu 	txgbe_clear_rar(hw, index);
3439a331fe3bSJiawen Wu }
3440a331fe3bSJiawen Wu 
3441a331fe3bSJiawen Wu static int
txgbe_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)3442a331fe3bSJiawen Wu txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3443a331fe3bSJiawen Wu {
3444a331fe3bSJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3445a331fe3bSJiawen Wu 
3446a331fe3bSJiawen Wu 	txgbe_remove_rar(dev, 0);
3447a331fe3bSJiawen Wu 	txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3448a331fe3bSJiawen Wu 
3449a331fe3bSJiawen Wu 	return 0;
3450a331fe3bSJiawen Wu }
3451a331fe3bSJiawen Wu 
34523926214fSJiawen Wu static int
txgbe_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)34533926214fSJiawen Wu txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
34543926214fSJiawen Wu {
34553926214fSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
34563926214fSJiawen Wu 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
34573926214fSJiawen Wu 	struct rte_eth_dev_data *dev_data = dev->data;
34583926214fSJiawen Wu 
34593926214fSJiawen Wu 	/* If device is started, refuse mtu that requires the support of
34603926214fSJiawen Wu 	 * scattered packets when this feature has not been enabled before.
34613926214fSJiawen Wu 	 */
34623926214fSJiawen Wu 	if (dev_data->dev_started && !dev_data->scattered_rx &&
346325cf2630SFerruh Yigit 	    (frame_size + 2 * RTE_VLAN_HLEN >
34643926214fSJiawen Wu 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
34653926214fSJiawen Wu 		PMD_INIT_LOG(ERR, "Stop port first.");
34663926214fSJiawen Wu 		return -EINVAL;
34673926214fSJiawen Wu 	}
34683926214fSJiawen Wu 
34693926214fSJiawen Wu 	if (hw->mode)
34703926214fSJiawen Wu 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
34713926214fSJiawen Wu 			TXGBE_FRAME_SIZE_MAX);
34723926214fSJiawen Wu 	else
34733926214fSJiawen Wu 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
34743926214fSJiawen Wu 			TXGBE_FRMSZ_MAX(frame_size));
34753926214fSJiawen Wu 
34763926214fSJiawen Wu 	return 0;
34773926214fSJiawen Wu }
34783926214fSJiawen Wu 
3479ca6cc80dSJiawen Wu static uint32_t
txgbe_uta_vector(struct txgbe_hw * hw,struct rte_ether_addr * uc_addr)3480ca6cc80dSJiawen Wu txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3481ca6cc80dSJiawen Wu {
3482ca6cc80dSJiawen Wu 	uint32_t vector = 0;
3483ca6cc80dSJiawen Wu 
3484ca6cc80dSJiawen Wu 	switch (hw->mac.mc_filter_type) {
3485ca6cc80dSJiawen Wu 	case 0:   /* use bits [47:36] of the address */
3486ca6cc80dSJiawen Wu 		vector = ((uc_addr->addr_bytes[4] >> 4) |
3487ca6cc80dSJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
3488ca6cc80dSJiawen Wu 		break;
3489ca6cc80dSJiawen Wu 	case 1:   /* use bits [46:35] of the address */
3490ca6cc80dSJiawen Wu 		vector = ((uc_addr->addr_bytes[4] >> 3) |
3491ca6cc80dSJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
3492ca6cc80dSJiawen Wu 		break;
3493ca6cc80dSJiawen Wu 	case 2:   /* use bits [45:34] of the address */
3494ca6cc80dSJiawen Wu 		vector = ((uc_addr->addr_bytes[4] >> 2) |
3495ca6cc80dSJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
3496ca6cc80dSJiawen Wu 		break;
3497ca6cc80dSJiawen Wu 	case 3:   /* use bits [43:32] of the address */
3498ca6cc80dSJiawen Wu 		vector = ((uc_addr->addr_bytes[4]) |
3499ca6cc80dSJiawen Wu 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
3500ca6cc80dSJiawen Wu 		break;
3501ca6cc80dSJiawen Wu 	default:  /* Invalid mc_filter_type */
3502ca6cc80dSJiawen Wu 		break;
3503ca6cc80dSJiawen Wu 	}
3504ca6cc80dSJiawen Wu 
3505ca6cc80dSJiawen Wu 	/* vector can only be 12-bits or boundary will be exceeded */
3506ca6cc80dSJiawen Wu 	vector &= 0xFFF;
3507ca6cc80dSJiawen Wu 	return vector;
3508ca6cc80dSJiawen Wu }
3509ca6cc80dSJiawen Wu 
3510ca6cc80dSJiawen Wu static int
txgbe_uc_hash_table_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint8_t on)3511ca6cc80dSJiawen Wu txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3512ca6cc80dSJiawen Wu 			struct rte_ether_addr *mac_addr, uint8_t on)
3513ca6cc80dSJiawen Wu {
3514ca6cc80dSJiawen Wu 	uint32_t vector;
3515ca6cc80dSJiawen Wu 	uint32_t uta_idx;
3516ca6cc80dSJiawen Wu 	uint32_t reg_val;
3517ca6cc80dSJiawen Wu 	uint32_t uta_mask;
3518ca6cc80dSJiawen Wu 	uint32_t psrctl;
3519ca6cc80dSJiawen Wu 
3520ca6cc80dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3521ca6cc80dSJiawen Wu 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3522ca6cc80dSJiawen Wu 
3523ca6cc80dSJiawen Wu 	/* The UTA table only exists on pf hardware */
3524ca6cc80dSJiawen Wu 	if (hw->mac.type < txgbe_mac_raptor)
3525ca6cc80dSJiawen Wu 		return -ENOTSUP;
3526ca6cc80dSJiawen Wu 
3527ca6cc80dSJiawen Wu 	vector = txgbe_uta_vector(hw, mac_addr);
3528ca6cc80dSJiawen Wu 	uta_idx = (vector >> 5) & 0x7F;
3529ca6cc80dSJiawen Wu 	uta_mask = 0x1UL << (vector & 0x1F);
3530ca6cc80dSJiawen Wu 
3531ca6cc80dSJiawen Wu 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3532ca6cc80dSJiawen Wu 		return 0;
3533ca6cc80dSJiawen Wu 
3534ca6cc80dSJiawen Wu 	reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3535ca6cc80dSJiawen Wu 	if (on) {
3536ca6cc80dSJiawen Wu 		uta_info->uta_in_use++;
3537ca6cc80dSJiawen Wu 		reg_val |= uta_mask;
3538ca6cc80dSJiawen Wu 		uta_info->uta_shadow[uta_idx] |= uta_mask;
3539ca6cc80dSJiawen Wu 	} else {
3540ca6cc80dSJiawen Wu 		uta_info->uta_in_use--;
3541ca6cc80dSJiawen Wu 		reg_val &= ~uta_mask;
3542ca6cc80dSJiawen Wu 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3543ca6cc80dSJiawen Wu 	}
3544ca6cc80dSJiawen Wu 
3545ca6cc80dSJiawen Wu 	wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3546ca6cc80dSJiawen Wu 
3547ca6cc80dSJiawen Wu 	psrctl = rd32(hw, TXGBE_PSRCTL);
3548ca6cc80dSJiawen Wu 	if (uta_info->uta_in_use > 0)
3549ca6cc80dSJiawen Wu 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3550ca6cc80dSJiawen Wu 	else
3551ca6cc80dSJiawen Wu 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3552ca6cc80dSJiawen Wu 
3553ca6cc80dSJiawen Wu 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3554ca6cc80dSJiawen Wu 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3555ca6cc80dSJiawen Wu 	wr32(hw, TXGBE_PSRCTL, psrctl);
3556ca6cc80dSJiawen Wu 
3557ca6cc80dSJiawen Wu 	return 0;
3558ca6cc80dSJiawen Wu }
3559ca6cc80dSJiawen Wu 
3560ca6cc80dSJiawen Wu static int
txgbe_uc_all_hash_table_set(struct rte_eth_dev * dev,uint8_t on)3561ca6cc80dSJiawen Wu txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3562ca6cc80dSJiawen Wu {
3563ca6cc80dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3564ca6cc80dSJiawen Wu 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3565ca6cc80dSJiawen Wu 	uint32_t psrctl;
3566ca6cc80dSJiawen Wu 	int i;
3567ca6cc80dSJiawen Wu 
3568ca6cc80dSJiawen Wu 	/* The UTA table only exists on pf hardware */
3569ca6cc80dSJiawen Wu 	if (hw->mac.type < txgbe_mac_raptor)
3570ca6cc80dSJiawen Wu 		return -ENOTSUP;
3571ca6cc80dSJiawen Wu 
3572ca6cc80dSJiawen Wu 	if (on) {
3573295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3574ca6cc80dSJiawen Wu 			uta_info->uta_shadow[i] = ~0;
3575ca6cc80dSJiawen Wu 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3576ca6cc80dSJiawen Wu 		}
3577ca6cc80dSJiawen Wu 	} else {
3578295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3579ca6cc80dSJiawen Wu 			uta_info->uta_shadow[i] = 0;
3580ca6cc80dSJiawen Wu 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
3581ca6cc80dSJiawen Wu 		}
3582ca6cc80dSJiawen Wu 	}
3583ca6cc80dSJiawen Wu 
3584ca6cc80dSJiawen Wu 	psrctl = rd32(hw, TXGBE_PSRCTL);
3585ca6cc80dSJiawen Wu 	if (on)
3586ca6cc80dSJiawen Wu 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3587ca6cc80dSJiawen Wu 	else
3588ca6cc80dSJiawen Wu 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3589ca6cc80dSJiawen Wu 
3590ca6cc80dSJiawen Wu 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3591ca6cc80dSJiawen Wu 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3592ca6cc80dSJiawen Wu 	wr32(hw, TXGBE_PSRCTL, psrctl);
3593ca6cc80dSJiawen Wu 
3594ca6cc80dSJiawen Wu 	return 0;
3595ca6cc80dSJiawen Wu }
3596ca6cc80dSJiawen Wu 
3597c35b73a1SJiawen Wu uint32_t
txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask,uint32_t orig_val)3598c35b73a1SJiawen Wu txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3599c35b73a1SJiawen Wu {
3600c35b73a1SJiawen Wu 	uint32_t new_val = orig_val;
3601c35b73a1SJiawen Wu 
3602295968d1SFerruh Yigit 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
3603c35b73a1SJiawen Wu 		new_val |= TXGBE_POOLETHCTL_UTA;
3604295968d1SFerruh Yigit 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
3605c35b73a1SJiawen Wu 		new_val |= TXGBE_POOLETHCTL_MCHA;
3606295968d1SFerruh Yigit 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
3607c35b73a1SJiawen Wu 		new_val |= TXGBE_POOLETHCTL_UCHA;
3608295968d1SFerruh Yigit 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
3609c35b73a1SJiawen Wu 		new_val |= TXGBE_POOLETHCTL_BCA;
3610295968d1SFerruh Yigit 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
3611c35b73a1SJiawen Wu 		new_val |= TXGBE_POOLETHCTL_MCP;
3612c35b73a1SJiawen Wu 
3613c35b73a1SJiawen Wu 	return new_val;
3614c35b73a1SJiawen Wu }
3615c35b73a1SJiawen Wu 
3616a5682d28SJiawen Wu static int
txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)3617a5682d28SJiawen Wu txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3618a5682d28SJiawen Wu {
3619a5682d28SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3620d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3621a5682d28SJiawen Wu 	uint32_t mask;
3622a5682d28SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3623a5682d28SJiawen Wu 
3624a5682d28SJiawen Wu 	if (queue_id < 32) {
3625a5682d28SJiawen Wu 		mask = rd32(hw, TXGBE_IMS(0));
3626a5682d28SJiawen Wu 		mask &= (1 << queue_id);
3627a5682d28SJiawen Wu 		wr32(hw, TXGBE_IMS(0), mask);
3628a5682d28SJiawen Wu 	} else if (queue_id < 64) {
3629a5682d28SJiawen Wu 		mask = rd32(hw, TXGBE_IMS(1));
3630a5682d28SJiawen Wu 		mask &= (1 << (queue_id - 32));
3631a5682d28SJiawen Wu 		wr32(hw, TXGBE_IMS(1), mask);
3632a5682d28SJiawen Wu 	}
3633a5682d28SJiawen Wu 	rte_intr_enable(intr_handle);
3634a5682d28SJiawen Wu 
3635a5682d28SJiawen Wu 	return 0;
3636a5682d28SJiawen Wu }
3637a5682d28SJiawen Wu 
3638a5682d28SJiawen Wu static int
txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)3639a5682d28SJiawen Wu txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3640a5682d28SJiawen Wu {
3641a5682d28SJiawen Wu 	uint32_t mask;
3642a5682d28SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3643a5682d28SJiawen Wu 
3644a5682d28SJiawen Wu 	if (queue_id < 32) {
3645a5682d28SJiawen Wu 		mask = rd32(hw, TXGBE_IMS(0));
3646a5682d28SJiawen Wu 		mask &= ~(1 << queue_id);
3647a5682d28SJiawen Wu 		wr32(hw, TXGBE_IMS(0), mask);
3648a5682d28SJiawen Wu 	} else if (queue_id < 64) {
3649a5682d28SJiawen Wu 		mask = rd32(hw, TXGBE_IMS(1));
3650a5682d28SJiawen Wu 		mask &= ~(1 << (queue_id - 32));
3651a5682d28SJiawen Wu 		wr32(hw, TXGBE_IMS(1), mask);
3652a5682d28SJiawen Wu 	}
3653a5682d28SJiawen Wu 
3654a5682d28SJiawen Wu 	return 0;
3655a5682d28SJiawen Wu }
3656a5682d28SJiawen Wu 
36572fc745e6SJiawen Wu /**
36582fc745e6SJiawen Wu  * set the IVAR registers, mapping interrupt causes to vectors
36592fc745e6SJiawen Wu  * @param hw
36602fc745e6SJiawen Wu  *  pointer to txgbe_hw struct
36612fc745e6SJiawen Wu  * @direction
36622fc745e6SJiawen Wu  *  0 for Rx, 1 for Tx, -1 for other causes
36632fc745e6SJiawen Wu  * @queue
36642fc745e6SJiawen Wu  *  queue to map the corresponding interrupt to
36652fc745e6SJiawen Wu  * @msix_vector
36662fc745e6SJiawen Wu  *  the vector to map to the corresponding queue
36672fc745e6SJiawen Wu  */
36682fc745e6SJiawen Wu void
txgbe_set_ivar_map(struct txgbe_hw * hw,int8_t direction,uint8_t queue,uint8_t msix_vector)36692fc745e6SJiawen Wu txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
36702fc745e6SJiawen Wu 		   uint8_t queue, uint8_t msix_vector)
36712fc745e6SJiawen Wu {
36722fc745e6SJiawen Wu 	uint32_t tmp, idx;
36732fc745e6SJiawen Wu 
36742fc745e6SJiawen Wu 	if (direction == -1) {
36752fc745e6SJiawen Wu 		/* other causes */
36762fc745e6SJiawen Wu 		msix_vector |= TXGBE_IVARMISC_VLD;
36772fc745e6SJiawen Wu 		idx = 0;
36782fc745e6SJiawen Wu 		tmp = rd32(hw, TXGBE_IVARMISC);
36792fc745e6SJiawen Wu 		tmp &= ~(0xFF << idx);
36802fc745e6SJiawen Wu 		tmp |= (msix_vector << idx);
36812fc745e6SJiawen Wu 		wr32(hw, TXGBE_IVARMISC, tmp);
36822fc745e6SJiawen Wu 	} else {
36832fc745e6SJiawen Wu 		/* rx or tx causes */
36847be78d02SJosh Soref 		/* Workaround for ICR lost */
36852fc745e6SJiawen Wu 		idx = ((16 * (queue & 1)) + (8 * direction));
36862fc745e6SJiawen Wu 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
36872fc745e6SJiawen Wu 		tmp &= ~(0xFF << idx);
36882fc745e6SJiawen Wu 		tmp |= (msix_vector << idx);
36892fc745e6SJiawen Wu 		wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
36902fc745e6SJiawen Wu 	}
36912fc745e6SJiawen Wu }
36922fc745e6SJiawen Wu 
36932fc745e6SJiawen Wu /**
36942fc745e6SJiawen Wu  * Sets up the hardware to properly generate MSI-X interrupts
36952fc745e6SJiawen Wu  * @hw
36962fc745e6SJiawen Wu  *  board private structure
36972fc745e6SJiawen Wu  */
36982fc745e6SJiawen Wu static void
txgbe_configure_msix(struct rte_eth_dev * dev)36992fc745e6SJiawen Wu txgbe_configure_msix(struct rte_eth_dev *dev)
37002fc745e6SJiawen Wu {
37012fc745e6SJiawen Wu 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3702d61138d4SHarman Kalra 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
37032fc745e6SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
37042fc745e6SJiawen Wu 	uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
37052fc745e6SJiawen Wu 	uint32_t vec = TXGBE_MISC_VEC_ID;
37062fc745e6SJiawen Wu 	uint32_t gpie;
37072fc745e6SJiawen Wu 
37082fc745e6SJiawen Wu 	/* won't configure msix register if no mapping is done
37092fc745e6SJiawen Wu 	 * between intr vector and event fd
37102fc745e6SJiawen Wu 	 * but if misx has been enabled already, need to configure
37112fc745e6SJiawen Wu 	 * auto clean, auto mask and throttling.
37122fc745e6SJiawen Wu 	 */
37132fc745e6SJiawen Wu 	gpie = rd32(hw, TXGBE_GPIE);
37142fc745e6SJiawen Wu 	if (!rte_intr_dp_is_en(intr_handle) &&
37152fc745e6SJiawen Wu 	    !(gpie & TXGBE_GPIE_MSIX))
37162fc745e6SJiawen Wu 		return;
37172fc745e6SJiawen Wu 
37182fc745e6SJiawen Wu 	if (rte_intr_allow_others(intr_handle)) {
37192fc745e6SJiawen Wu 		base = TXGBE_RX_VEC_START;
37202fc745e6SJiawen Wu 		vec = base;
37212fc745e6SJiawen Wu 	}
37222fc745e6SJiawen Wu 
37232fc745e6SJiawen Wu 	/* setup GPIE for MSI-x mode */
37242fc745e6SJiawen Wu 	gpie = rd32(hw, TXGBE_GPIE);
37252fc745e6SJiawen Wu 	gpie |= TXGBE_GPIE_MSIX;
37262fc745e6SJiawen Wu 	wr32(hw, TXGBE_GPIE, gpie);
37272fc745e6SJiawen Wu 
37282fc745e6SJiawen Wu 	/* Populate the IVAR table and set the ITR values to the
37292fc745e6SJiawen Wu 	 * corresponding register.
37302fc745e6SJiawen Wu 	 */
37312fc745e6SJiawen Wu 	if (rte_intr_dp_is_en(intr_handle)) {
37322fc745e6SJiawen Wu 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
37332fc745e6SJiawen Wu 			queue_id++) {
37342fc745e6SJiawen Wu 			/* by default, 1:1 mapping */
37352fc745e6SJiawen Wu 			txgbe_set_ivar_map(hw, 0, queue_id, vec);
3736d61138d4SHarman Kalra 			rte_intr_vec_list_index_set(intr_handle,
3737d61138d4SHarman Kalra 							   queue_id, vec);
3738d61138d4SHarman Kalra 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
3739d61138d4SHarman Kalra 			    - 1)
37402fc745e6SJiawen Wu 				vec++;
37412fc745e6SJiawen Wu 		}
37422fc745e6SJiawen Wu 
37432fc745e6SJiawen Wu 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
37442fc745e6SJiawen Wu 	}
37452fc745e6SJiawen Wu 	wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
37462fc745e6SJiawen Wu 			TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
37472fc745e6SJiawen Wu 			| TXGBE_ITR_WRDSA);
37482fc745e6SJiawen Wu }
37492fc745e6SJiawen Wu 
3750770a3523SJiawen Wu int
txgbe_set_queue_rate_limit(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t tx_rate)3751770a3523SJiawen Wu txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3752770a3523SJiawen Wu 			   uint16_t queue_idx, uint16_t tx_rate)
3753770a3523SJiawen Wu {
3754770a3523SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3755770a3523SJiawen Wu 	uint32_t bcnrc_val;
3756770a3523SJiawen Wu 
3757770a3523SJiawen Wu 	if (queue_idx >= hw->mac.max_tx_queues)
3758770a3523SJiawen Wu 		return -EINVAL;
3759770a3523SJiawen Wu 
3760770a3523SJiawen Wu 	if (tx_rate != 0) {
3761770a3523SJiawen Wu 		bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3762770a3523SJiawen Wu 		bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3763770a3523SJiawen Wu 	} else {
3764770a3523SJiawen Wu 		bcnrc_val = 0;
3765770a3523SJiawen Wu 	}
3766770a3523SJiawen Wu 
3767770a3523SJiawen Wu 	/*
3768770a3523SJiawen Wu 	 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3769770a3523SJiawen Wu 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3770770a3523SJiawen Wu 	 */
3771770a3523SJiawen Wu 	wr32(hw, TXGBE_ARBTXMMW, 0x14);
3772770a3523SJiawen Wu 
3773770a3523SJiawen Wu 	/* Set ARBTXRATE of queue X */
3774770a3523SJiawen Wu 	wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3775770a3523SJiawen Wu 	wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3776770a3523SJiawen Wu 	txgbe_flush(hw);
3777770a3523SJiawen Wu 
3778770a3523SJiawen Wu 	return 0;
3779770a3523SJiawen Wu }
3780770a3523SJiawen Wu 
3781983a4ef2SJiawen Wu int
txgbe_syn_filter_set(struct rte_eth_dev * dev,struct rte_eth_syn_filter * filter,bool add)3782983a4ef2SJiawen Wu txgbe_syn_filter_set(struct rte_eth_dev *dev,
3783983a4ef2SJiawen Wu 			struct rte_eth_syn_filter *filter,
3784983a4ef2SJiawen Wu 			bool add)
3785983a4ef2SJiawen Wu {
3786983a4ef2SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3787983a4ef2SJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3788983a4ef2SJiawen Wu 	uint32_t syn_info;
3789983a4ef2SJiawen Wu 	uint32_t synqf;
3790983a4ef2SJiawen Wu 
3791983a4ef2SJiawen Wu 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3792983a4ef2SJiawen Wu 		return -EINVAL;
3793983a4ef2SJiawen Wu 
3794983a4ef2SJiawen Wu 	syn_info = filter_info->syn_info;
3795983a4ef2SJiawen Wu 
3796983a4ef2SJiawen Wu 	if (add) {
3797983a4ef2SJiawen Wu 		if (syn_info & TXGBE_SYNCLS_ENA)
3798983a4ef2SJiawen Wu 			return -EINVAL;
3799983a4ef2SJiawen Wu 		synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3800983a4ef2SJiawen Wu 		synqf |= TXGBE_SYNCLS_ENA;
3801983a4ef2SJiawen Wu 
3802983a4ef2SJiawen Wu 		if (filter->hig_pri)
3803983a4ef2SJiawen Wu 			synqf |= TXGBE_SYNCLS_HIPRIO;
3804983a4ef2SJiawen Wu 		else
3805983a4ef2SJiawen Wu 			synqf &= ~TXGBE_SYNCLS_HIPRIO;
3806983a4ef2SJiawen Wu 	} else {
3807983a4ef2SJiawen Wu 		synqf = rd32(hw, TXGBE_SYNCLS);
3808983a4ef2SJiawen Wu 		if (!(syn_info & TXGBE_SYNCLS_ENA))
3809983a4ef2SJiawen Wu 			return -ENOENT;
3810983a4ef2SJiawen Wu 		synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3811983a4ef2SJiawen Wu 	}
3812983a4ef2SJiawen Wu 
3813983a4ef2SJiawen Wu 	filter_info->syn_info = synqf;
3814983a4ef2SJiawen Wu 	wr32(hw, TXGBE_SYNCLS, synqf);
3815983a4ef2SJiawen Wu 	txgbe_flush(hw);
3816983a4ef2SJiawen Wu 	return 0;
3817983a4ef2SJiawen Wu }
3818983a4ef2SJiawen Wu 
381977a72b4dSJiawen Wu static inline enum txgbe_5tuple_protocol
convert_protocol_type(uint8_t protocol_value)382077a72b4dSJiawen Wu convert_protocol_type(uint8_t protocol_value)
382177a72b4dSJiawen Wu {
382277a72b4dSJiawen Wu 	if (protocol_value == IPPROTO_TCP)
382377a72b4dSJiawen Wu 		return TXGBE_5TF_PROT_TCP;
382477a72b4dSJiawen Wu 	else if (protocol_value == IPPROTO_UDP)
382577a72b4dSJiawen Wu 		return TXGBE_5TF_PROT_UDP;
382677a72b4dSJiawen Wu 	else if (protocol_value == IPPROTO_SCTP)
382777a72b4dSJiawen Wu 		return TXGBE_5TF_PROT_SCTP;
382877a72b4dSJiawen Wu 	else
382977a72b4dSJiawen Wu 		return TXGBE_5TF_PROT_NONE;
383077a72b4dSJiawen Wu }
383177a72b4dSJiawen Wu 
383277a72b4dSJiawen Wu /* inject a 5-tuple filter to HW */
383377a72b4dSJiawen Wu static inline void
txgbe_inject_5tuple_filter(struct rte_eth_dev * dev,struct txgbe_5tuple_filter * filter)383477a72b4dSJiawen Wu txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
383577a72b4dSJiawen Wu 			   struct txgbe_5tuple_filter *filter)
383677a72b4dSJiawen Wu {
383777a72b4dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
383877a72b4dSJiawen Wu 	int i;
383977a72b4dSJiawen Wu 	uint32_t ftqf, sdpqf;
384077a72b4dSJiawen Wu 	uint32_t l34timir = 0;
384177a72b4dSJiawen Wu 	uint32_t mask = TXGBE_5TFCTL0_MASK;
384277a72b4dSJiawen Wu 
384377a72b4dSJiawen Wu 	i = filter->index;
384477a72b4dSJiawen Wu 	sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
384577a72b4dSJiawen Wu 	sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
384677a72b4dSJiawen Wu 
384777a72b4dSJiawen Wu 	ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
384877a72b4dSJiawen Wu 	ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
384977a72b4dSJiawen Wu 	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
385077a72b4dSJiawen Wu 		mask &= ~TXGBE_5TFCTL0_MSADDR;
385177a72b4dSJiawen Wu 	if (filter->filter_info.dst_ip_mask == 0)
385277a72b4dSJiawen Wu 		mask &= ~TXGBE_5TFCTL0_MDADDR;
385377a72b4dSJiawen Wu 	if (filter->filter_info.src_port_mask == 0)
385477a72b4dSJiawen Wu 		mask &= ~TXGBE_5TFCTL0_MSPORT;
385577a72b4dSJiawen Wu 	if (filter->filter_info.dst_port_mask == 0)
385677a72b4dSJiawen Wu 		mask &= ~TXGBE_5TFCTL0_MDPORT;
385777a72b4dSJiawen Wu 	if (filter->filter_info.proto_mask == 0)
385877a72b4dSJiawen Wu 		mask &= ~TXGBE_5TFCTL0_MPROTO;
385977a72b4dSJiawen Wu 	ftqf |= mask;
386077a72b4dSJiawen Wu 	ftqf |= TXGBE_5TFCTL0_MPOOL;
386177a72b4dSJiawen Wu 	ftqf |= TXGBE_5TFCTL0_ENA;
386277a72b4dSJiawen Wu 
386377a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
386477a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
386577a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFPORT(i), sdpqf);
386677a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFCTL0(i), ftqf);
386777a72b4dSJiawen Wu 
386877a72b4dSJiawen Wu 	l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
386977a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFCTL1(i), l34timir);
387077a72b4dSJiawen Wu }
387177a72b4dSJiawen Wu 
387277a72b4dSJiawen Wu /*
387377a72b4dSJiawen Wu  * add a 5tuple filter
387477a72b4dSJiawen Wu  *
387577a72b4dSJiawen Wu  * @param
387677a72b4dSJiawen Wu  * dev: Pointer to struct rte_eth_dev.
387777a72b4dSJiawen Wu  * index: the index the filter allocates.
387877a72b4dSJiawen Wu  * filter: pointer to the filter that will be added.
387977a72b4dSJiawen Wu  * rx_queue: the queue id the filter assigned to.
388077a72b4dSJiawen Wu  *
388177a72b4dSJiawen Wu  * @return
388277a72b4dSJiawen Wu  *    - On success, zero.
388377a72b4dSJiawen Wu  *    - On failure, a negative value.
388477a72b4dSJiawen Wu  */
388577a72b4dSJiawen Wu static int
txgbe_add_5tuple_filter(struct rte_eth_dev * dev,struct txgbe_5tuple_filter * filter)388677a72b4dSJiawen Wu txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
388777a72b4dSJiawen Wu 			struct txgbe_5tuple_filter *filter)
388877a72b4dSJiawen Wu {
388977a72b4dSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
389077a72b4dSJiawen Wu 	int i, idx, shift;
389177a72b4dSJiawen Wu 
389277a72b4dSJiawen Wu 	/*
389377a72b4dSJiawen Wu 	 * look for an unused 5tuple filter index,
389477a72b4dSJiawen Wu 	 * and insert the filter to list.
389577a72b4dSJiawen Wu 	 */
389677a72b4dSJiawen Wu 	for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
389777a72b4dSJiawen Wu 		idx = i / (sizeof(uint32_t) * NBBY);
389877a72b4dSJiawen Wu 		shift = i % (sizeof(uint32_t) * NBBY);
389977a72b4dSJiawen Wu 		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
390077a72b4dSJiawen Wu 			filter_info->fivetuple_mask[idx] |= 1 << shift;
390177a72b4dSJiawen Wu 			filter->index = i;
390277a72b4dSJiawen Wu 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
390377a72b4dSJiawen Wu 					  filter,
390477a72b4dSJiawen Wu 					  entries);
390577a72b4dSJiawen Wu 			break;
390677a72b4dSJiawen Wu 		}
390777a72b4dSJiawen Wu 	}
390877a72b4dSJiawen Wu 	if (i >= TXGBE_MAX_FTQF_FILTERS) {
390977a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
391077a72b4dSJiawen Wu 		return -ENOSYS;
391177a72b4dSJiawen Wu 	}
391277a72b4dSJiawen Wu 
391377a72b4dSJiawen Wu 	txgbe_inject_5tuple_filter(dev, filter);
391477a72b4dSJiawen Wu 
391577a72b4dSJiawen Wu 	return 0;
391677a72b4dSJiawen Wu }
391777a72b4dSJiawen Wu 
391877a72b4dSJiawen Wu /*
391977a72b4dSJiawen Wu  * remove a 5tuple filter
392077a72b4dSJiawen Wu  *
392177a72b4dSJiawen Wu  * @param
392277a72b4dSJiawen Wu  * dev: Pointer to struct rte_eth_dev.
392377a72b4dSJiawen Wu  * filter: the pointer of the filter will be removed.
392477a72b4dSJiawen Wu  */
392577a72b4dSJiawen Wu static void
txgbe_remove_5tuple_filter(struct rte_eth_dev * dev,struct txgbe_5tuple_filter * filter)392677a72b4dSJiawen Wu txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
392777a72b4dSJiawen Wu 			struct txgbe_5tuple_filter *filter)
392877a72b4dSJiawen Wu {
392977a72b4dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
393077a72b4dSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
393177a72b4dSJiawen Wu 	uint16_t index = filter->index;
393277a72b4dSJiawen Wu 
393377a72b4dSJiawen Wu 	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
393477a72b4dSJiawen Wu 				~(1 << (index % (sizeof(uint32_t) * NBBY)));
393577a72b4dSJiawen Wu 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
393677a72b4dSJiawen Wu 	rte_free(filter);
393777a72b4dSJiawen Wu 
393877a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFDADDR(index), 0);
393977a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFSADDR(index), 0);
394077a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFPORT(index), 0);
394177a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFCTL0(index), 0);
394277a72b4dSJiawen Wu 	wr32(hw, TXGBE_5TFCTL1(index), 0);
394377a72b4dSJiawen Wu }
394477a72b4dSJiawen Wu 
394577a72b4dSJiawen Wu static inline struct txgbe_5tuple_filter *
txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list * filter_list,struct txgbe_5tuple_filter_info * key)394677a72b4dSJiawen Wu txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
394777a72b4dSJiawen Wu 			struct txgbe_5tuple_filter_info *key)
394877a72b4dSJiawen Wu {
394977a72b4dSJiawen Wu 	struct txgbe_5tuple_filter *it;
395077a72b4dSJiawen Wu 
395177a72b4dSJiawen Wu 	TAILQ_FOREACH(it, filter_list, entries) {
395277a72b4dSJiawen Wu 		if (memcmp(key, &it->filter_info,
395377a72b4dSJiawen Wu 			sizeof(struct txgbe_5tuple_filter_info)) == 0) {
395477a72b4dSJiawen Wu 			return it;
395577a72b4dSJiawen Wu 		}
395677a72b4dSJiawen Wu 	}
395777a72b4dSJiawen Wu 	return NULL;
395877a72b4dSJiawen Wu }
395977a72b4dSJiawen Wu 
396077a72b4dSJiawen Wu /* translate elements in struct rte_eth_ntuple_filter
396177a72b4dSJiawen Wu  * to struct txgbe_5tuple_filter_info
396277a72b4dSJiawen Wu  */
396377a72b4dSJiawen Wu static inline int
ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter * filter,struct txgbe_5tuple_filter_info * filter_info)396477a72b4dSJiawen Wu ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
396577a72b4dSJiawen Wu 			struct txgbe_5tuple_filter_info *filter_info)
396677a72b4dSJiawen Wu {
396777a72b4dSJiawen Wu 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
396877a72b4dSJiawen Wu 		filter->priority > TXGBE_5TUPLE_MAX_PRI ||
396977a72b4dSJiawen Wu 		filter->priority < TXGBE_5TUPLE_MIN_PRI)
397077a72b4dSJiawen Wu 		return -EINVAL;
397177a72b4dSJiawen Wu 
397277a72b4dSJiawen Wu 	switch (filter->dst_ip_mask) {
397377a72b4dSJiawen Wu 	case UINT32_MAX:
397477a72b4dSJiawen Wu 		filter_info->dst_ip_mask = 0;
397577a72b4dSJiawen Wu 		filter_info->dst_ip = filter->dst_ip;
397677a72b4dSJiawen Wu 		break;
397777a72b4dSJiawen Wu 	case 0:
397877a72b4dSJiawen Wu 		filter_info->dst_ip_mask = 1;
397977a72b4dSJiawen Wu 		break;
398077a72b4dSJiawen Wu 	default:
398177a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
398277a72b4dSJiawen Wu 		return -EINVAL;
398377a72b4dSJiawen Wu 	}
398477a72b4dSJiawen Wu 
398577a72b4dSJiawen Wu 	switch (filter->src_ip_mask) {
398677a72b4dSJiawen Wu 	case UINT32_MAX:
398777a72b4dSJiawen Wu 		filter_info->src_ip_mask = 0;
398877a72b4dSJiawen Wu 		filter_info->src_ip = filter->src_ip;
398977a72b4dSJiawen Wu 		break;
399077a72b4dSJiawen Wu 	case 0:
399177a72b4dSJiawen Wu 		filter_info->src_ip_mask = 1;
399277a72b4dSJiawen Wu 		break;
399377a72b4dSJiawen Wu 	default:
399477a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
399577a72b4dSJiawen Wu 		return -EINVAL;
399677a72b4dSJiawen Wu 	}
399777a72b4dSJiawen Wu 
399877a72b4dSJiawen Wu 	switch (filter->dst_port_mask) {
399977a72b4dSJiawen Wu 	case UINT16_MAX:
400077a72b4dSJiawen Wu 		filter_info->dst_port_mask = 0;
400177a72b4dSJiawen Wu 		filter_info->dst_port = filter->dst_port;
400277a72b4dSJiawen Wu 		break;
400377a72b4dSJiawen Wu 	case 0:
400477a72b4dSJiawen Wu 		filter_info->dst_port_mask = 1;
400577a72b4dSJiawen Wu 		break;
400677a72b4dSJiawen Wu 	default:
400777a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
400877a72b4dSJiawen Wu 		return -EINVAL;
400977a72b4dSJiawen Wu 	}
401077a72b4dSJiawen Wu 
401177a72b4dSJiawen Wu 	switch (filter->src_port_mask) {
401277a72b4dSJiawen Wu 	case UINT16_MAX:
401377a72b4dSJiawen Wu 		filter_info->src_port_mask = 0;
401477a72b4dSJiawen Wu 		filter_info->src_port = filter->src_port;
401577a72b4dSJiawen Wu 		break;
401677a72b4dSJiawen Wu 	case 0:
401777a72b4dSJiawen Wu 		filter_info->src_port_mask = 1;
401877a72b4dSJiawen Wu 		break;
401977a72b4dSJiawen Wu 	default:
402077a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
402177a72b4dSJiawen Wu 		return -EINVAL;
402277a72b4dSJiawen Wu 	}
402377a72b4dSJiawen Wu 
402477a72b4dSJiawen Wu 	switch (filter->proto_mask) {
402577a72b4dSJiawen Wu 	case UINT8_MAX:
402677a72b4dSJiawen Wu 		filter_info->proto_mask = 0;
402777a72b4dSJiawen Wu 		filter_info->proto =
402877a72b4dSJiawen Wu 			convert_protocol_type(filter->proto);
402977a72b4dSJiawen Wu 		break;
403077a72b4dSJiawen Wu 	case 0:
403177a72b4dSJiawen Wu 		filter_info->proto_mask = 1;
403277a72b4dSJiawen Wu 		break;
403377a72b4dSJiawen Wu 	default:
403477a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
403577a72b4dSJiawen Wu 		return -EINVAL;
403677a72b4dSJiawen Wu 	}
403777a72b4dSJiawen Wu 
403877a72b4dSJiawen Wu 	filter_info->priority = (uint8_t)filter->priority;
403977a72b4dSJiawen Wu 	return 0;
404077a72b4dSJiawen Wu }
404177a72b4dSJiawen Wu 
404277a72b4dSJiawen Wu /*
404377a72b4dSJiawen Wu  * add or delete a ntuple filter
404477a72b4dSJiawen Wu  *
404577a72b4dSJiawen Wu  * @param
404677a72b4dSJiawen Wu  * dev: Pointer to struct rte_eth_dev.
404777a72b4dSJiawen Wu  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
404877a72b4dSJiawen Wu  * add: if true, add filter, if false, remove filter
404977a72b4dSJiawen Wu  *
405077a72b4dSJiawen Wu  * @return
405177a72b4dSJiawen Wu  *    - On success, zero.
405277a72b4dSJiawen Wu  *    - On failure, a negative value.
405377a72b4dSJiawen Wu  */
405477a72b4dSJiawen Wu int
txgbe_add_del_ntuple_filter(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter,bool add)405577a72b4dSJiawen Wu txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
405677a72b4dSJiawen Wu 			struct rte_eth_ntuple_filter *ntuple_filter,
405777a72b4dSJiawen Wu 			bool add)
405877a72b4dSJiawen Wu {
405977a72b4dSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
406077a72b4dSJiawen Wu 	struct txgbe_5tuple_filter_info filter_5tuple;
406177a72b4dSJiawen Wu 	struct txgbe_5tuple_filter *filter;
406277a72b4dSJiawen Wu 	int ret;
406377a72b4dSJiawen Wu 
406477a72b4dSJiawen Wu 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
406577a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
406677a72b4dSJiawen Wu 		return -EINVAL;
406777a72b4dSJiawen Wu 	}
406877a72b4dSJiawen Wu 
406977a72b4dSJiawen Wu 	memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
407077a72b4dSJiawen Wu 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
407177a72b4dSJiawen Wu 	if (ret < 0)
407277a72b4dSJiawen Wu 		return ret;
407377a72b4dSJiawen Wu 
407477a72b4dSJiawen Wu 	filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
407577a72b4dSJiawen Wu 					 &filter_5tuple);
407677a72b4dSJiawen Wu 	if (filter != NULL && add) {
407777a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "filter exists.");
407877a72b4dSJiawen Wu 		return -EEXIST;
407977a72b4dSJiawen Wu 	}
408077a72b4dSJiawen Wu 	if (filter == NULL && !add) {
408177a72b4dSJiawen Wu 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
408277a72b4dSJiawen Wu 		return -ENOENT;
408377a72b4dSJiawen Wu 	}
408477a72b4dSJiawen Wu 
408577a72b4dSJiawen Wu 	if (add) {
408677a72b4dSJiawen Wu 		filter = rte_zmalloc("txgbe_5tuple_filter",
408777a72b4dSJiawen Wu 				sizeof(struct txgbe_5tuple_filter), 0);
408877a72b4dSJiawen Wu 		if (filter == NULL)
408977a72b4dSJiawen Wu 			return -ENOMEM;
409077a72b4dSJiawen Wu 		rte_memcpy(&filter->filter_info,
409177a72b4dSJiawen Wu 				 &filter_5tuple,
409277a72b4dSJiawen Wu 				 sizeof(struct txgbe_5tuple_filter_info));
409377a72b4dSJiawen Wu 		filter->queue = ntuple_filter->queue;
409477a72b4dSJiawen Wu 		ret = txgbe_add_5tuple_filter(dev, filter);
409577a72b4dSJiawen Wu 		if (ret < 0) {
409677a72b4dSJiawen Wu 			rte_free(filter);
409777a72b4dSJiawen Wu 			return ret;
409877a72b4dSJiawen Wu 		}
409977a72b4dSJiawen Wu 	} else {
410077a72b4dSJiawen Wu 		txgbe_remove_5tuple_filter(dev, filter);
410177a72b4dSJiawen Wu 	}
410277a72b4dSJiawen Wu 
410377a72b4dSJiawen Wu 	return 0;
410477a72b4dSJiawen Wu }
410577a72b4dSJiawen Wu 
4106f8e2cfc7SJiawen Wu int
txgbe_add_del_ethertype_filter(struct rte_eth_dev * dev,struct rte_eth_ethertype_filter * filter,bool add)4107f8e2cfc7SJiawen Wu txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4108f8e2cfc7SJiawen Wu 			struct rte_eth_ethertype_filter *filter,
4109f8e2cfc7SJiawen Wu 			bool add)
4110f8e2cfc7SJiawen Wu {
4111f8e2cfc7SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4112f8e2cfc7SJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4113f8e2cfc7SJiawen Wu 	uint32_t etqf = 0;
4114f8e2cfc7SJiawen Wu 	uint32_t etqs = 0;
4115f8e2cfc7SJiawen Wu 	int ret;
4116f8e2cfc7SJiawen Wu 	struct txgbe_ethertype_filter ethertype_filter;
4117f8e2cfc7SJiawen Wu 
4118f8e2cfc7SJiawen Wu 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4119f8e2cfc7SJiawen Wu 		return -EINVAL;
4120f8e2cfc7SJiawen Wu 
4121f8e2cfc7SJiawen Wu 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4122f8e2cfc7SJiawen Wu 	    filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4123f8e2cfc7SJiawen Wu 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4124f8e2cfc7SJiawen Wu 			" ethertype filter.", filter->ether_type);
4125f8e2cfc7SJiawen Wu 		return -EINVAL;
4126f8e2cfc7SJiawen Wu 	}
4127f8e2cfc7SJiawen Wu 
4128f8e2cfc7SJiawen Wu 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4129f8e2cfc7SJiawen Wu 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4130f8e2cfc7SJiawen Wu 		return -EINVAL;
4131f8e2cfc7SJiawen Wu 	}
4132f8e2cfc7SJiawen Wu 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4133f8e2cfc7SJiawen Wu 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
4134f8e2cfc7SJiawen Wu 		return -EINVAL;
4135f8e2cfc7SJiawen Wu 	}
4136f8e2cfc7SJiawen Wu 
4137f8e2cfc7SJiawen Wu 	ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4138f8e2cfc7SJiawen Wu 	if (ret >= 0 && add) {
4139f8e2cfc7SJiawen Wu 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4140f8e2cfc7SJiawen Wu 			    filter->ether_type);
4141f8e2cfc7SJiawen Wu 		return -EEXIST;
4142f8e2cfc7SJiawen Wu 	}
4143f8e2cfc7SJiawen Wu 	if (ret < 0 && !add) {
4144f8e2cfc7SJiawen Wu 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4145f8e2cfc7SJiawen Wu 			    filter->ether_type);
4146f8e2cfc7SJiawen Wu 		return -ENOENT;
4147f8e2cfc7SJiawen Wu 	}
4148f8e2cfc7SJiawen Wu 
4149f8e2cfc7SJiawen Wu 	if (add) {
4150f8e2cfc7SJiawen Wu 		etqf = TXGBE_ETFLT_ENA;
4151f8e2cfc7SJiawen Wu 		etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4152f8e2cfc7SJiawen Wu 		etqs |= TXGBE_ETCLS_QPID(filter->queue);
4153f8e2cfc7SJiawen Wu 		etqs |= TXGBE_ETCLS_QENA;
4154f8e2cfc7SJiawen Wu 
4155f8e2cfc7SJiawen Wu 		ethertype_filter.ethertype = filter->ether_type;
4156f8e2cfc7SJiawen Wu 		ethertype_filter.etqf = etqf;
4157f8e2cfc7SJiawen Wu 		ethertype_filter.etqs = etqs;
4158f8e2cfc7SJiawen Wu 		ethertype_filter.conf = FALSE;
4159f8e2cfc7SJiawen Wu 		ret = txgbe_ethertype_filter_insert(filter_info,
4160f8e2cfc7SJiawen Wu 						    &ethertype_filter);
4161f8e2cfc7SJiawen Wu 		if (ret < 0) {
4162f8e2cfc7SJiawen Wu 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
4163f8e2cfc7SJiawen Wu 			return -ENOSPC;
4164f8e2cfc7SJiawen Wu 		}
4165f8e2cfc7SJiawen Wu 	} else {
4166f8e2cfc7SJiawen Wu 		ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4167f8e2cfc7SJiawen Wu 		if (ret < 0)
4168f8e2cfc7SJiawen Wu 			return -ENOSYS;
4169f8e2cfc7SJiawen Wu 	}
4170f8e2cfc7SJiawen Wu 	wr32(hw, TXGBE_ETFLT(ret), etqf);
4171f8e2cfc7SJiawen Wu 	wr32(hw, TXGBE_ETCLS(ret), etqs);
4172f8e2cfc7SJiawen Wu 	txgbe_flush(hw);
4173f8e2cfc7SJiawen Wu 
4174f8e2cfc7SJiawen Wu 	return 0;
4175f8e2cfc7SJiawen Wu }
4176f8e2cfc7SJiawen Wu 
417743bb1f8dSJiawen Wu static int
txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev * dev,const struct rte_flow_ops ** ops)4178fb7ad441SThomas Monjalon txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
4179fb7ad441SThomas Monjalon 		       const struct rte_flow_ops **ops)
418043bb1f8dSJiawen Wu {
4181fb7ad441SThomas Monjalon 	*ops = &txgbe_flow_ops;
4182fb7ad441SThomas Monjalon 	return 0;
418343bb1f8dSJiawen Wu }
418443bb1f8dSJiawen Wu 
4185a331fe3bSJiawen Wu static u8 *
txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw * hw,u8 ** mc_addr_ptr,u32 * vmdq)4186a331fe3bSJiawen Wu txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4187a331fe3bSJiawen Wu 			u8 **mc_addr_ptr, u32 *vmdq)
4188a331fe3bSJiawen Wu {
4189a331fe3bSJiawen Wu 	u8 *mc_addr;
4190a331fe3bSJiawen Wu 
4191a331fe3bSJiawen Wu 	*vmdq = 0;
4192a331fe3bSJiawen Wu 	mc_addr = *mc_addr_ptr;
4193a331fe3bSJiawen Wu 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4194a331fe3bSJiawen Wu 	return mc_addr;
4195a331fe3bSJiawen Wu }
4196a331fe3bSJiawen Wu 
4197a331fe3bSJiawen Wu int
txgbe_dev_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)4198a331fe3bSJiawen Wu txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4199a331fe3bSJiawen Wu 			  struct rte_ether_addr *mc_addr_set,
4200a331fe3bSJiawen Wu 			  uint32_t nb_mc_addr)
4201a331fe3bSJiawen Wu {
4202a331fe3bSJiawen Wu 	struct txgbe_hw *hw;
4203a331fe3bSJiawen Wu 	u8 *mc_addr_list;
4204a331fe3bSJiawen Wu 
4205a331fe3bSJiawen Wu 	hw = TXGBE_DEV_HW(dev);
4206a331fe3bSJiawen Wu 	mc_addr_list = (u8 *)mc_addr_set;
4207c8307adaSJiawen Wu 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4208a331fe3bSJiawen Wu 					 txgbe_dev_addr_list_itr, TRUE);
4209a331fe3bSJiawen Wu }
4210a331fe3bSJiawen Wu 
4211bd8e3adcSJiawen Wu static uint64_t
txgbe_read_systime_cyclecounter(struct rte_eth_dev * dev)4212bd8e3adcSJiawen Wu txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4213bd8e3adcSJiawen Wu {
4214bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4215bd8e3adcSJiawen Wu 	uint64_t systime_cycles;
4216bd8e3adcSJiawen Wu 
4217bd8e3adcSJiawen Wu 	systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4218bd8e3adcSJiawen Wu 	systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4219bd8e3adcSJiawen Wu 
4220bd8e3adcSJiawen Wu 	return systime_cycles;
4221bd8e3adcSJiawen Wu }
4222bd8e3adcSJiawen Wu 
4223bd8e3adcSJiawen Wu static uint64_t
txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev * dev)4224bd8e3adcSJiawen Wu txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4225bd8e3adcSJiawen Wu {
4226bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4227bd8e3adcSJiawen Wu 	uint64_t rx_tstamp_cycles;
4228bd8e3adcSJiawen Wu 
4229bd8e3adcSJiawen Wu 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4230bd8e3adcSJiawen Wu 	rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4231bd8e3adcSJiawen Wu 	rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4232bd8e3adcSJiawen Wu 
4233bd8e3adcSJiawen Wu 	return rx_tstamp_cycles;
4234bd8e3adcSJiawen Wu }
4235bd8e3adcSJiawen Wu 
4236bd8e3adcSJiawen Wu static uint64_t
txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev * dev)4237bd8e3adcSJiawen Wu txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4238bd8e3adcSJiawen Wu {
4239bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4240bd8e3adcSJiawen Wu 	uint64_t tx_tstamp_cycles;
4241bd8e3adcSJiawen Wu 
4242bd8e3adcSJiawen Wu 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4243bd8e3adcSJiawen Wu 	tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4244bd8e3adcSJiawen Wu 	tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4245bd8e3adcSJiawen Wu 
4246bd8e3adcSJiawen Wu 	return tx_tstamp_cycles;
4247bd8e3adcSJiawen Wu }
4248bd8e3adcSJiawen Wu 
4249bd8e3adcSJiawen Wu static void
txgbe_start_timecounters(struct rte_eth_dev * dev)4250bd8e3adcSJiawen Wu txgbe_start_timecounters(struct rte_eth_dev *dev)
4251bd8e3adcSJiawen Wu {
4252bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4253bd8e3adcSJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4254bd8e3adcSJiawen Wu 	struct rte_eth_link link;
4255bd8e3adcSJiawen Wu 	uint32_t incval = 0;
4256bd8e3adcSJiawen Wu 	uint32_t shift = 0;
4257bd8e3adcSJiawen Wu 
4258bd8e3adcSJiawen Wu 	/* Get current link speed. */
4259bd8e3adcSJiawen Wu 	txgbe_dev_link_update(dev, 1);
4260bd8e3adcSJiawen Wu 	rte_eth_linkstatus_get(dev, &link);
4261bd8e3adcSJiawen Wu 
4262bd8e3adcSJiawen Wu 	switch (link.link_speed) {
4263295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_100M:
4264bd8e3adcSJiawen Wu 		incval = TXGBE_INCVAL_100;
4265bd8e3adcSJiawen Wu 		shift = TXGBE_INCVAL_SHIFT_100;
4266bd8e3adcSJiawen Wu 		break;
4267295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_1G:
4268bd8e3adcSJiawen Wu 		incval = TXGBE_INCVAL_1GB;
4269bd8e3adcSJiawen Wu 		shift = TXGBE_INCVAL_SHIFT_1GB;
4270bd8e3adcSJiawen Wu 		break;
4271295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_10G:
4272bd8e3adcSJiawen Wu 	default:
4273bd8e3adcSJiawen Wu 		incval = TXGBE_INCVAL_10GB;
4274bd8e3adcSJiawen Wu 		shift = TXGBE_INCVAL_SHIFT_10GB;
4275bd8e3adcSJiawen Wu 		break;
4276bd8e3adcSJiawen Wu 	}
4277bd8e3adcSJiawen Wu 
4278bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4279bd8e3adcSJiawen Wu 
4280bd8e3adcSJiawen Wu 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4281bd8e3adcSJiawen Wu 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4282bd8e3adcSJiawen Wu 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4283bd8e3adcSJiawen Wu 
4284bd8e3adcSJiawen Wu 	adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4285bd8e3adcSJiawen Wu 	adapter->systime_tc.cc_shift = shift;
4286bd8e3adcSJiawen Wu 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4287bd8e3adcSJiawen Wu 
4288bd8e3adcSJiawen Wu 	adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4289bd8e3adcSJiawen Wu 	adapter->rx_tstamp_tc.cc_shift = shift;
4290bd8e3adcSJiawen Wu 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4291bd8e3adcSJiawen Wu 
4292bd8e3adcSJiawen Wu 	adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4293bd8e3adcSJiawen Wu 	adapter->tx_tstamp_tc.cc_shift = shift;
4294bd8e3adcSJiawen Wu 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4295bd8e3adcSJiawen Wu }
4296bd8e3adcSJiawen Wu 
4297bd8e3adcSJiawen Wu static int
txgbe_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)4298bd8e3adcSJiawen Wu txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4299bd8e3adcSJiawen Wu {
4300bd8e3adcSJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4301bd8e3adcSJiawen Wu 
4302bd8e3adcSJiawen Wu 	adapter->systime_tc.nsec += delta;
4303bd8e3adcSJiawen Wu 	adapter->rx_tstamp_tc.nsec += delta;
4304bd8e3adcSJiawen Wu 	adapter->tx_tstamp_tc.nsec += delta;
4305bd8e3adcSJiawen Wu 
4306bd8e3adcSJiawen Wu 	return 0;
4307bd8e3adcSJiawen Wu }
4308bd8e3adcSJiawen Wu 
4309bd8e3adcSJiawen Wu static int
txgbe_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)4310bd8e3adcSJiawen Wu txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4311bd8e3adcSJiawen Wu {
4312bd8e3adcSJiawen Wu 	uint64_t ns;
4313bd8e3adcSJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4314bd8e3adcSJiawen Wu 
4315bd8e3adcSJiawen Wu 	ns = rte_timespec_to_ns(ts);
4316bd8e3adcSJiawen Wu 	/* Set the timecounters to a new value. */
4317bd8e3adcSJiawen Wu 	adapter->systime_tc.nsec = ns;
4318bd8e3adcSJiawen Wu 	adapter->rx_tstamp_tc.nsec = ns;
4319bd8e3adcSJiawen Wu 	adapter->tx_tstamp_tc.nsec = ns;
4320bd8e3adcSJiawen Wu 
4321bd8e3adcSJiawen Wu 	return 0;
4322bd8e3adcSJiawen Wu }
4323bd8e3adcSJiawen Wu 
4324bd8e3adcSJiawen Wu static int
txgbe_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)4325bd8e3adcSJiawen Wu txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4326bd8e3adcSJiawen Wu {
4327bd8e3adcSJiawen Wu 	uint64_t ns, systime_cycles;
4328bd8e3adcSJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4329bd8e3adcSJiawen Wu 
4330bd8e3adcSJiawen Wu 	systime_cycles = txgbe_read_systime_cyclecounter(dev);
4331bd8e3adcSJiawen Wu 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4332bd8e3adcSJiawen Wu 	*ts = rte_ns_to_timespec(ns);
4333bd8e3adcSJiawen Wu 
4334bd8e3adcSJiawen Wu 	return 0;
4335bd8e3adcSJiawen Wu }
4336bd8e3adcSJiawen Wu 
4337bd8e3adcSJiawen Wu static int
txgbe_timesync_enable(struct rte_eth_dev * dev)4338bd8e3adcSJiawen Wu txgbe_timesync_enable(struct rte_eth_dev *dev)
4339bd8e3adcSJiawen Wu {
4340bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4341bd8e3adcSJiawen Wu 	uint32_t tsync_ctl;
4342bd8e3adcSJiawen Wu 
4343bd8e3adcSJiawen Wu 	/* Stop the timesync system time. */
4344bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTIMEINC, 0x0);
4345bd8e3adcSJiawen Wu 	/* Reset the timesync system time value. */
4346bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTIMEL, 0x0);
4347bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTIMEH, 0x0);
4348bd8e3adcSJiawen Wu 
4349bd8e3adcSJiawen Wu 	txgbe_start_timecounters(dev);
4350bd8e3adcSJiawen Wu 
4351bd8e3adcSJiawen Wu 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4352bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4353bd8e3adcSJiawen Wu 		RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4354bd8e3adcSJiawen Wu 
4355bd8e3adcSJiawen Wu 	/* Enable timestamping of received PTP packets. */
4356bd8e3adcSJiawen Wu 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4357bd8e3adcSJiawen Wu 	tsync_ctl |= TXGBE_TSRXCTL_ENA;
4358bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4359bd8e3adcSJiawen Wu 
4360bd8e3adcSJiawen Wu 	/* Enable timestamping of transmitted PTP packets. */
4361bd8e3adcSJiawen Wu 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4362bd8e3adcSJiawen Wu 	tsync_ctl |= TXGBE_TSTXCTL_ENA;
4363bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4364bd8e3adcSJiawen Wu 
4365bd8e3adcSJiawen Wu 	txgbe_flush(hw);
4366bd8e3adcSJiawen Wu 
4367bd8e3adcSJiawen Wu 	return 0;
4368bd8e3adcSJiawen Wu }
4369bd8e3adcSJiawen Wu 
4370bd8e3adcSJiawen Wu static int
txgbe_timesync_disable(struct rte_eth_dev * dev)4371bd8e3adcSJiawen Wu txgbe_timesync_disable(struct rte_eth_dev *dev)
4372bd8e3adcSJiawen Wu {
4373bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4374bd8e3adcSJiawen Wu 	uint32_t tsync_ctl;
4375bd8e3adcSJiawen Wu 
4376bd8e3adcSJiawen Wu 	/* Disable timestamping of transmitted PTP packets. */
4377bd8e3adcSJiawen Wu 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4378bd8e3adcSJiawen Wu 	tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4379bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4380bd8e3adcSJiawen Wu 
4381bd8e3adcSJiawen Wu 	/* Disable timestamping of received PTP packets. */
4382bd8e3adcSJiawen Wu 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4383bd8e3adcSJiawen Wu 	tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4384bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4385bd8e3adcSJiawen Wu 
4386bd8e3adcSJiawen Wu 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4387bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4388bd8e3adcSJiawen Wu 
43897be78d02SJosh Soref 	/* Stop incrementing the System Time registers. */
4390bd8e3adcSJiawen Wu 	wr32(hw, TXGBE_TSTIMEINC, 0);
4391bd8e3adcSJiawen Wu 
4392bd8e3adcSJiawen Wu 	return 0;
4393bd8e3adcSJiawen Wu }
4394bd8e3adcSJiawen Wu 
4395bd8e3adcSJiawen Wu static int
txgbe_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags __rte_unused)4396bd8e3adcSJiawen Wu txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4397bd8e3adcSJiawen Wu 				 struct timespec *timestamp,
4398bd8e3adcSJiawen Wu 				 uint32_t flags __rte_unused)
4399bd8e3adcSJiawen Wu {
4400bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4401bd8e3adcSJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4402bd8e3adcSJiawen Wu 	uint32_t tsync_rxctl;
4403bd8e3adcSJiawen Wu 	uint64_t rx_tstamp_cycles;
4404bd8e3adcSJiawen Wu 	uint64_t ns;
4405bd8e3adcSJiawen Wu 
4406bd8e3adcSJiawen Wu 	tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4407bd8e3adcSJiawen Wu 	if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4408bd8e3adcSJiawen Wu 		return -EINVAL;
4409bd8e3adcSJiawen Wu 
4410bd8e3adcSJiawen Wu 	rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4411bd8e3adcSJiawen Wu 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4412bd8e3adcSJiawen Wu 	*timestamp = rte_ns_to_timespec(ns);
4413bd8e3adcSJiawen Wu 
4414bd8e3adcSJiawen Wu 	return  0;
4415bd8e3adcSJiawen Wu }
4416bd8e3adcSJiawen Wu 
4417bd8e3adcSJiawen Wu static int
txgbe_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)4418bd8e3adcSJiawen Wu txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4419bd8e3adcSJiawen Wu 				 struct timespec *timestamp)
4420bd8e3adcSJiawen Wu {
4421bd8e3adcSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4422bd8e3adcSJiawen Wu 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4423bd8e3adcSJiawen Wu 	uint32_t tsync_txctl;
4424bd8e3adcSJiawen Wu 	uint64_t tx_tstamp_cycles;
4425bd8e3adcSJiawen Wu 	uint64_t ns;
4426bd8e3adcSJiawen Wu 
4427bd8e3adcSJiawen Wu 	tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4428bd8e3adcSJiawen Wu 	if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4429bd8e3adcSJiawen Wu 		return -EINVAL;
4430bd8e3adcSJiawen Wu 
4431bd8e3adcSJiawen Wu 	tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4432bd8e3adcSJiawen Wu 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4433bd8e3adcSJiawen Wu 	*timestamp = rte_ns_to_timespec(ns);
4434bd8e3adcSJiawen Wu 
4435bd8e3adcSJiawen Wu 	return 0;
4436bd8e3adcSJiawen Wu }
4437bd8e3adcSJiawen Wu 
44383cc8b50dSJiawen Wu static int
txgbe_get_reg_length(struct rte_eth_dev * dev __rte_unused)4439ab7a6530SJiawen Wu txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4440ab7a6530SJiawen Wu {
4441ab7a6530SJiawen Wu 	int count = 0;
4442ab7a6530SJiawen Wu 	int g_ind = 0;
4443ab7a6530SJiawen Wu 	const struct reg_info *reg_group;
4444ab7a6530SJiawen Wu 	const struct reg_info **reg_set = txgbe_regs_others;
4445ab7a6530SJiawen Wu 
4446ab7a6530SJiawen Wu 	while ((reg_group = reg_set[g_ind++]))
4447ab7a6530SJiawen Wu 		count += txgbe_regs_group_count(reg_group);
4448ab7a6530SJiawen Wu 
4449ab7a6530SJiawen Wu 	return count;
4450ab7a6530SJiawen Wu }
4451ab7a6530SJiawen Wu 
4452ab7a6530SJiawen Wu static int
txgbe_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)4453ab7a6530SJiawen Wu txgbe_get_regs(struct rte_eth_dev *dev,
4454ab7a6530SJiawen Wu 	      struct rte_dev_reg_info *regs)
4455ab7a6530SJiawen Wu {
4456ab7a6530SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4457ab7a6530SJiawen Wu 	uint32_t *data = regs->data;
4458ab7a6530SJiawen Wu 	int g_ind = 0;
4459ab7a6530SJiawen Wu 	int count = 0;
4460ab7a6530SJiawen Wu 	const struct reg_info *reg_group;
4461ab7a6530SJiawen Wu 	const struct reg_info **reg_set = txgbe_regs_others;
4462ab7a6530SJiawen Wu 
4463ab7a6530SJiawen Wu 	if (data == NULL) {
4464ab7a6530SJiawen Wu 		regs->length = txgbe_get_reg_length(dev);
4465ab7a6530SJiawen Wu 		regs->width = sizeof(uint32_t);
4466ab7a6530SJiawen Wu 		return 0;
4467ab7a6530SJiawen Wu 	}
4468ab7a6530SJiawen Wu 
4469ab7a6530SJiawen Wu 	/* Support only full register dump */
4470ab7a6530SJiawen Wu 	if (regs->length == 0 ||
4471ab7a6530SJiawen Wu 	    regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4472ab7a6530SJiawen Wu 		regs->version = hw->mac.type << 24 |
4473ab7a6530SJiawen Wu 				hw->revision_id << 16 |
4474ab7a6530SJiawen Wu 				hw->device_id;
4475ab7a6530SJiawen Wu 		while ((reg_group = reg_set[g_ind++]))
4476ab7a6530SJiawen Wu 			count += txgbe_read_regs_group(dev, &data[count],
4477ab7a6530SJiawen Wu 						      reg_group);
4478ab7a6530SJiawen Wu 		return 0;
4479ab7a6530SJiawen Wu 	}
4480ab7a6530SJiawen Wu 
4481ab7a6530SJiawen Wu 	return -ENOTSUP;
4482ab7a6530SJiawen Wu }
4483ab7a6530SJiawen Wu 
4484ab7a6530SJiawen Wu static int
txgbe_get_eeprom_length(struct rte_eth_dev * dev)44853cc8b50dSJiawen Wu txgbe_get_eeprom_length(struct rte_eth_dev *dev)
44863cc8b50dSJiawen Wu {
44873cc8b50dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
44883cc8b50dSJiawen Wu 
44893cc8b50dSJiawen Wu 	/* Return unit is byte count */
44903cc8b50dSJiawen Wu 	return hw->rom.word_size * 2;
44913cc8b50dSJiawen Wu }
44923cc8b50dSJiawen Wu 
44933cc8b50dSJiawen Wu static int
txgbe_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)44943cc8b50dSJiawen Wu txgbe_get_eeprom(struct rte_eth_dev *dev,
44953cc8b50dSJiawen Wu 		struct rte_dev_eeprom_info *in_eeprom)
44963cc8b50dSJiawen Wu {
44973cc8b50dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
44983cc8b50dSJiawen Wu 	struct txgbe_rom_info *eeprom = &hw->rom;
44993cc8b50dSJiawen Wu 	uint16_t *data = in_eeprom->data;
45003cc8b50dSJiawen Wu 	int first, length;
45013cc8b50dSJiawen Wu 
45023cc8b50dSJiawen Wu 	first = in_eeprom->offset >> 1;
45033cc8b50dSJiawen Wu 	length = in_eeprom->length >> 1;
45043cc8b50dSJiawen Wu 	if (first > hw->rom.word_size ||
45053cc8b50dSJiawen Wu 	    ((first + length) > hw->rom.word_size))
45063cc8b50dSJiawen Wu 		return -EINVAL;
45073cc8b50dSJiawen Wu 
45083cc8b50dSJiawen Wu 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
45093cc8b50dSJiawen Wu 
45103cc8b50dSJiawen Wu 	return eeprom->readw_buffer(hw, first, length, data);
45113cc8b50dSJiawen Wu }
45123cc8b50dSJiawen Wu 
45133cc8b50dSJiawen Wu static int
txgbe_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)45143cc8b50dSJiawen Wu txgbe_set_eeprom(struct rte_eth_dev *dev,
45153cc8b50dSJiawen Wu 		struct rte_dev_eeprom_info *in_eeprom)
45163cc8b50dSJiawen Wu {
45173cc8b50dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
45183cc8b50dSJiawen Wu 	struct txgbe_rom_info *eeprom = &hw->rom;
45193cc8b50dSJiawen Wu 	uint16_t *data = in_eeprom->data;
45203cc8b50dSJiawen Wu 	int first, length;
45213cc8b50dSJiawen Wu 
45223cc8b50dSJiawen Wu 	first = in_eeprom->offset >> 1;
45233cc8b50dSJiawen Wu 	length = in_eeprom->length >> 1;
45243cc8b50dSJiawen Wu 	if (first > hw->rom.word_size ||
45253cc8b50dSJiawen Wu 	    ((first + length) > hw->rom.word_size))
45263cc8b50dSJiawen Wu 		return -EINVAL;
45273cc8b50dSJiawen Wu 
45283cc8b50dSJiawen Wu 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
45293cc8b50dSJiawen Wu 
45303cc8b50dSJiawen Wu 	return eeprom->writew_buffer(hw,  first, length, data);
45313cc8b50dSJiawen Wu }
45323cc8b50dSJiawen Wu 
45333cc8b50dSJiawen Wu static int
txgbe_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)45343cc8b50dSJiawen Wu txgbe_get_module_info(struct rte_eth_dev *dev,
45353cc8b50dSJiawen Wu 		      struct rte_eth_dev_module_info *modinfo)
45363cc8b50dSJiawen Wu {
45373cc8b50dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
45383cc8b50dSJiawen Wu 	uint32_t status;
45393cc8b50dSJiawen Wu 	uint8_t sff8472_rev, addr_mode;
45403cc8b50dSJiawen Wu 	bool page_swap = false;
45413cc8b50dSJiawen Wu 
45423cc8b50dSJiawen Wu 	/* Check whether we support SFF-8472 or not */
45433cc8b50dSJiawen Wu 	status = hw->phy.read_i2c_eeprom(hw,
45443cc8b50dSJiawen Wu 					     TXGBE_SFF_SFF_8472_COMP,
45453cc8b50dSJiawen Wu 					     &sff8472_rev);
45463cc8b50dSJiawen Wu 	if (status != 0)
45473cc8b50dSJiawen Wu 		return -EIO;
45483cc8b50dSJiawen Wu 
45493cc8b50dSJiawen Wu 	/* addressing mode is not supported */
45503cc8b50dSJiawen Wu 	status = hw->phy.read_i2c_eeprom(hw,
45513cc8b50dSJiawen Wu 					     TXGBE_SFF_SFF_8472_SWAP,
45523cc8b50dSJiawen Wu 					     &addr_mode);
45533cc8b50dSJiawen Wu 	if (status != 0)
45543cc8b50dSJiawen Wu 		return -EIO;
45553cc8b50dSJiawen Wu 
45563cc8b50dSJiawen Wu 	if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
45573cc8b50dSJiawen Wu 		PMD_DRV_LOG(ERR,
45583cc8b50dSJiawen Wu 			    "Address change required to access page 0xA2, "
45593cc8b50dSJiawen Wu 			    "but not supported. Please report the module "
45603cc8b50dSJiawen Wu 			    "type to the driver maintainers.");
45613cc8b50dSJiawen Wu 		page_swap = true;
45623cc8b50dSJiawen Wu 	}
45633cc8b50dSJiawen Wu 
45643cc8b50dSJiawen Wu 	if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
45653cc8b50dSJiawen Wu 		/* We have a SFP, but it does not support SFF-8472 */
45663cc8b50dSJiawen Wu 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
45673cc8b50dSJiawen Wu 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
45683cc8b50dSJiawen Wu 	} else {
45693cc8b50dSJiawen Wu 		/* We have a SFP which supports a revision of SFF-8472. */
45703cc8b50dSJiawen Wu 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
45713cc8b50dSJiawen Wu 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
45723cc8b50dSJiawen Wu 	}
45733cc8b50dSJiawen Wu 
45743cc8b50dSJiawen Wu 	return 0;
45753cc8b50dSJiawen Wu }
45763cc8b50dSJiawen Wu 
45773cc8b50dSJiawen Wu static int
txgbe_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)45783cc8b50dSJiawen Wu txgbe_get_module_eeprom(struct rte_eth_dev *dev,
45793cc8b50dSJiawen Wu 			struct rte_dev_eeprom_info *info)
45803cc8b50dSJiawen Wu {
45813cc8b50dSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
45823cc8b50dSJiawen Wu 	uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
45833cc8b50dSJiawen Wu 	uint8_t databyte = 0xFF;
45843cc8b50dSJiawen Wu 	uint8_t *data = info->data;
45853cc8b50dSJiawen Wu 	uint32_t i = 0;
45863cc8b50dSJiawen Wu 
45873cc8b50dSJiawen Wu 	if (info->length == 0)
45883cc8b50dSJiawen Wu 		return -EINVAL;
45893cc8b50dSJiawen Wu 
45903cc8b50dSJiawen Wu 	for (i = info->offset; i < info->offset + info->length; i++) {
45913cc8b50dSJiawen Wu 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
45923cc8b50dSJiawen Wu 			status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
45933cc8b50dSJiawen Wu 		else
45943cc8b50dSJiawen Wu 			status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
45953cc8b50dSJiawen Wu 
45963cc8b50dSJiawen Wu 		if (status != 0)
45973cc8b50dSJiawen Wu 			return -EIO;
45983cc8b50dSJiawen Wu 
45993cc8b50dSJiawen Wu 		data[i - info->offset] = databyte;
46003cc8b50dSJiawen Wu 	}
46013cc8b50dSJiawen Wu 
46023cc8b50dSJiawen Wu 	return 0;
46033cc8b50dSJiawen Wu }
46043cc8b50dSJiawen Wu 
46059e487a37SJiawen Wu bool
txgbe_rss_update_sp(enum txgbe_mac_type mac_type)46069e487a37SJiawen Wu txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
46079e487a37SJiawen Wu {
46089e487a37SJiawen Wu 	switch (mac_type) {
46099e487a37SJiawen Wu 	case txgbe_mac_raptor:
461064b5d946SJiawen Wu 	case txgbe_mac_raptor_vf:
46119e487a37SJiawen Wu 		return 1;
46129e487a37SJiawen Wu 	default:
46139e487a37SJiawen Wu 		return 0;
46149e487a37SJiawen Wu 	}
46159e487a37SJiawen Wu }
46169e487a37SJiawen Wu 
46179810975cSJiawen Wu static int
txgbe_dev_get_dcb_info(struct rte_eth_dev * dev,struct rte_eth_dcb_info * dcb_info)46189810975cSJiawen Wu txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
46199810975cSJiawen Wu 			struct rte_eth_dcb_info *dcb_info)
46209810975cSJiawen Wu {
46219810975cSJiawen Wu 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
46229810975cSJiawen Wu 	struct txgbe_dcb_tc_config *tc;
46239810975cSJiawen Wu 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
46249810975cSJiawen Wu 	uint8_t nb_tcs;
46259810975cSJiawen Wu 	uint8_t i, j;
46269810975cSJiawen Wu 
4627295968d1SFerruh Yigit 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
46289810975cSJiawen Wu 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
46299810975cSJiawen Wu 	else
46309810975cSJiawen Wu 		dcb_info->nb_tcs = 1;
46319810975cSJiawen Wu 
46329810975cSJiawen Wu 	tc_queue = &dcb_info->tc_queue;
46339810975cSJiawen Wu 	nb_tcs = dcb_info->nb_tcs;
46349810975cSJiawen Wu 
46359810975cSJiawen Wu 	if (dcb_config->vt_mode) { /* vt is enabled */
46369810975cSJiawen Wu 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
46379810975cSJiawen Wu 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4638295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
46399810975cSJiawen Wu 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
46409810975cSJiawen Wu 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
46419810975cSJiawen Wu 			for (j = 0; j < nb_tcs; j++) {
46429810975cSJiawen Wu 				tc_queue->tc_rxq[0][j].base = j;
46439810975cSJiawen Wu 				tc_queue->tc_rxq[0][j].nb_queue = 1;
46449810975cSJiawen Wu 				tc_queue->tc_txq[0][j].base = j;
46459810975cSJiawen Wu 				tc_queue->tc_txq[0][j].nb_queue = 1;
46469810975cSJiawen Wu 			}
46479810975cSJiawen Wu 		} else {
46489810975cSJiawen Wu 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
46499810975cSJiawen Wu 				for (j = 0; j < nb_tcs; j++) {
46509810975cSJiawen Wu 					tc_queue->tc_rxq[i][j].base =
46519810975cSJiawen Wu 						i * nb_tcs + j;
46529810975cSJiawen Wu 					tc_queue->tc_rxq[i][j].nb_queue = 1;
46539810975cSJiawen Wu 					tc_queue->tc_txq[i][j].base =
46549810975cSJiawen Wu 						i * nb_tcs + j;
46559810975cSJiawen Wu 					tc_queue->tc_txq[i][j].nb_queue = 1;
46569810975cSJiawen Wu 				}
46579810975cSJiawen Wu 			}
46589810975cSJiawen Wu 		}
46599810975cSJiawen Wu 	} else { /* vt is disabled */
46609810975cSJiawen Wu 		struct rte_eth_dcb_rx_conf *rx_conf =
46619810975cSJiawen Wu 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4662295968d1SFerruh Yigit 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
46639810975cSJiawen Wu 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4664295968d1SFerruh Yigit 		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
46659810975cSJiawen Wu 			for (i = 0; i < dcb_info->nb_tcs; i++) {
46669810975cSJiawen Wu 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
46679810975cSJiawen Wu 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
46689810975cSJiawen Wu 			}
46699810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
46709810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
46719810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
46729810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
46739810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
46749810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
46759810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
46769810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4677295968d1SFerruh Yigit 		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
46789810975cSJiawen Wu 			for (i = 0; i < dcb_info->nb_tcs; i++) {
46799810975cSJiawen Wu 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
46809810975cSJiawen Wu 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
46819810975cSJiawen Wu 			}
46829810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
46839810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
46849810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
46859810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
46869810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
46879810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
46889810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
46899810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
46909810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
46919810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
46929810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
46939810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
46949810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
46959810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
46969810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
46979810975cSJiawen Wu 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
46989810975cSJiawen Wu 		}
46999810975cSJiawen Wu 	}
47009810975cSJiawen Wu 	for (i = 0; i < dcb_info->nb_tcs; i++) {
47019810975cSJiawen Wu 		tc = &dcb_config->tc_config[i];
47029810975cSJiawen Wu 		dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
47039810975cSJiawen Wu 	}
47049810975cSJiawen Wu 	return 0;
47059810975cSJiawen Wu }
47069810975cSJiawen Wu 
47075377fa68SJiawen Wu /* Update e-tag ether type */
47085377fa68SJiawen Wu static int
txgbe_update_e_tag_eth_type(struct txgbe_hw * hw,uint16_t ether_type)47095377fa68SJiawen Wu txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
47105377fa68SJiawen Wu 			    uint16_t ether_type)
47115377fa68SJiawen Wu {
47125377fa68SJiawen Wu 	uint32_t etag_etype;
47135377fa68SJiawen Wu 
47145377fa68SJiawen Wu 	etag_etype = rd32(hw, TXGBE_EXTAG);
47155377fa68SJiawen Wu 	etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
47165377fa68SJiawen Wu 	etag_etype |= ether_type;
47175377fa68SJiawen Wu 	wr32(hw, TXGBE_EXTAG, etag_etype);
47185377fa68SJiawen Wu 	txgbe_flush(hw);
47195377fa68SJiawen Wu 
47205377fa68SJiawen Wu 	return 0;
47215377fa68SJiawen Wu }
47225377fa68SJiawen Wu 
47235377fa68SJiawen Wu /* Enable e-tag tunnel */
47245377fa68SJiawen Wu static int
txgbe_e_tag_enable(struct txgbe_hw * hw)47255377fa68SJiawen Wu txgbe_e_tag_enable(struct txgbe_hw *hw)
47265377fa68SJiawen Wu {
47275377fa68SJiawen Wu 	uint32_t etag_etype;
47285377fa68SJiawen Wu 
47295377fa68SJiawen Wu 	etag_etype = rd32(hw, TXGBE_PORTCTL);
47305377fa68SJiawen Wu 	etag_etype |= TXGBE_PORTCTL_ETAG;
47315377fa68SJiawen Wu 	wr32(hw, TXGBE_PORTCTL, etag_etype);
47325377fa68SJiawen Wu 	txgbe_flush(hw);
47335377fa68SJiawen Wu 
47345377fa68SJiawen Wu 	return 0;
47355377fa68SJiawen Wu }
47365377fa68SJiawen Wu 
47375377fa68SJiawen Wu static int
txgbe_e_tag_filter_del(struct rte_eth_dev * dev,struct txgbe_l2_tunnel_conf * l2_tunnel)4738ad1a8a27SJiawen Wu txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4739ad1a8a27SJiawen Wu 		       struct txgbe_l2_tunnel_conf  *l2_tunnel)
4740ad1a8a27SJiawen Wu {
4741ad1a8a27SJiawen Wu 	int ret = 0;
4742ad1a8a27SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4743ad1a8a27SJiawen Wu 	uint32_t i, rar_entries;
4744ad1a8a27SJiawen Wu 	uint32_t rar_low, rar_high;
4745ad1a8a27SJiawen Wu 
4746ad1a8a27SJiawen Wu 	rar_entries = hw->mac.num_rar_entries;
4747ad1a8a27SJiawen Wu 
4748ad1a8a27SJiawen Wu 	for (i = 1; i < rar_entries; i++) {
4749ad1a8a27SJiawen Wu 		wr32(hw, TXGBE_ETHADDRIDX, i);
4750ad1a8a27SJiawen Wu 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4751ad1a8a27SJiawen Wu 		rar_low  = rd32(hw, TXGBE_ETHADDRL);
4752ad1a8a27SJiawen Wu 		if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4753ad1a8a27SJiawen Wu 		    (rar_high & TXGBE_ETHADDRH_ETAG) &&
4754ad1a8a27SJiawen Wu 		    (TXGBE_ETHADDRL_ETAG(rar_low) ==
4755ad1a8a27SJiawen Wu 		     l2_tunnel->tunnel_id)) {
4756ad1a8a27SJiawen Wu 			wr32(hw, TXGBE_ETHADDRL, 0);
4757ad1a8a27SJiawen Wu 			wr32(hw, TXGBE_ETHADDRH, 0);
4758ad1a8a27SJiawen Wu 
4759ad1a8a27SJiawen Wu 			txgbe_clear_vmdq(hw, i, BIT_MASK32);
4760ad1a8a27SJiawen Wu 
4761ad1a8a27SJiawen Wu 			return ret;
4762ad1a8a27SJiawen Wu 		}
4763ad1a8a27SJiawen Wu 	}
4764ad1a8a27SJiawen Wu 
4765ad1a8a27SJiawen Wu 	return ret;
4766ad1a8a27SJiawen Wu }
4767ad1a8a27SJiawen Wu 
4768ad1a8a27SJiawen Wu static int
txgbe_e_tag_filter_add(struct rte_eth_dev * dev,struct txgbe_l2_tunnel_conf * l2_tunnel)4769ad1a8a27SJiawen Wu txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4770ad1a8a27SJiawen Wu 		       struct txgbe_l2_tunnel_conf *l2_tunnel)
4771ad1a8a27SJiawen Wu {
4772ad1a8a27SJiawen Wu 	int ret = 0;
4773ad1a8a27SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4774ad1a8a27SJiawen Wu 	uint32_t i, rar_entries;
4775ad1a8a27SJiawen Wu 	uint32_t rar_low, rar_high;
4776ad1a8a27SJiawen Wu 
4777ad1a8a27SJiawen Wu 	/* One entry for one tunnel. Try to remove potential existing entry. */
4778ad1a8a27SJiawen Wu 	txgbe_e_tag_filter_del(dev, l2_tunnel);
4779ad1a8a27SJiawen Wu 
4780ad1a8a27SJiawen Wu 	rar_entries = hw->mac.num_rar_entries;
4781ad1a8a27SJiawen Wu 
4782ad1a8a27SJiawen Wu 	for (i = 1; i < rar_entries; i++) {
4783ad1a8a27SJiawen Wu 		wr32(hw, TXGBE_ETHADDRIDX, i);
4784ad1a8a27SJiawen Wu 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4785ad1a8a27SJiawen Wu 		if (rar_high & TXGBE_ETHADDRH_VLD) {
4786ad1a8a27SJiawen Wu 			continue;
4787ad1a8a27SJiawen Wu 		} else {
4788ad1a8a27SJiawen Wu 			txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4789ad1a8a27SJiawen Wu 			rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4790ad1a8a27SJiawen Wu 			rar_low = l2_tunnel->tunnel_id;
4791ad1a8a27SJiawen Wu 
4792ad1a8a27SJiawen Wu 			wr32(hw, TXGBE_ETHADDRL, rar_low);
4793ad1a8a27SJiawen Wu 			wr32(hw, TXGBE_ETHADDRH, rar_high);
4794ad1a8a27SJiawen Wu 
4795ad1a8a27SJiawen Wu 			return ret;
4796ad1a8a27SJiawen Wu 		}
4797ad1a8a27SJiawen Wu 	}
4798ad1a8a27SJiawen Wu 
4799ad1a8a27SJiawen Wu 	PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4800ad1a8a27SJiawen Wu 		     " Please remove a rule before adding a new one.");
4801ad1a8a27SJiawen Wu 	return -EINVAL;
4802ad1a8a27SJiawen Wu }
4803ad1a8a27SJiawen Wu 
4804ad1a8a27SJiawen Wu static inline struct txgbe_l2_tn_filter *
txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info * l2_tn_info,struct txgbe_l2_tn_key * key)4805ad1a8a27SJiawen Wu txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4806ad1a8a27SJiawen Wu 			  struct txgbe_l2_tn_key *key)
4807ad1a8a27SJiawen Wu {
4808ad1a8a27SJiawen Wu 	int ret;
4809ad1a8a27SJiawen Wu 
4810ad1a8a27SJiawen Wu 	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4811ad1a8a27SJiawen Wu 	if (ret < 0)
4812ad1a8a27SJiawen Wu 		return NULL;
4813ad1a8a27SJiawen Wu 
4814ad1a8a27SJiawen Wu 	return l2_tn_info->hash_map[ret];
4815ad1a8a27SJiawen Wu }
4816ad1a8a27SJiawen Wu 
4817ad1a8a27SJiawen Wu static inline int
txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info * l2_tn_info,struct txgbe_l2_tn_filter * l2_tn_filter)4818ad1a8a27SJiawen Wu txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4819ad1a8a27SJiawen Wu 			  struct txgbe_l2_tn_filter *l2_tn_filter)
4820ad1a8a27SJiawen Wu {
4821ad1a8a27SJiawen Wu 	int ret;
4822ad1a8a27SJiawen Wu 
4823ad1a8a27SJiawen Wu 	ret = rte_hash_add_key(l2_tn_info->hash_handle,
4824ad1a8a27SJiawen Wu 			       &l2_tn_filter->key);
4825ad1a8a27SJiawen Wu 
4826ad1a8a27SJiawen Wu 	if (ret < 0) {
4827ad1a8a27SJiawen Wu 		PMD_DRV_LOG(ERR,
4828ad1a8a27SJiawen Wu 			    "Failed to insert L2 tunnel filter"
4829ad1a8a27SJiawen Wu 			    " to hash table %d!",
4830ad1a8a27SJiawen Wu 			    ret);
4831ad1a8a27SJiawen Wu 		return ret;
4832ad1a8a27SJiawen Wu 	}
4833ad1a8a27SJiawen Wu 
4834ad1a8a27SJiawen Wu 	l2_tn_info->hash_map[ret] = l2_tn_filter;
4835ad1a8a27SJiawen Wu 
4836ad1a8a27SJiawen Wu 	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4837ad1a8a27SJiawen Wu 
4838ad1a8a27SJiawen Wu 	return 0;
4839ad1a8a27SJiawen Wu }
4840ad1a8a27SJiawen Wu 
4841ad1a8a27SJiawen Wu static inline int
txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info * l2_tn_info,struct txgbe_l2_tn_key * key)4842ad1a8a27SJiawen Wu txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4843ad1a8a27SJiawen Wu 			  struct txgbe_l2_tn_key *key)
4844ad1a8a27SJiawen Wu {
4845ad1a8a27SJiawen Wu 	int ret;
4846ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_filter *l2_tn_filter;
4847ad1a8a27SJiawen Wu 
4848ad1a8a27SJiawen Wu 	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4849ad1a8a27SJiawen Wu 
4850ad1a8a27SJiawen Wu 	if (ret < 0) {
4851ad1a8a27SJiawen Wu 		PMD_DRV_LOG(ERR,
4852ad1a8a27SJiawen Wu 			    "No such L2 tunnel filter to delete %d!",
4853ad1a8a27SJiawen Wu 			    ret);
4854ad1a8a27SJiawen Wu 		return ret;
4855ad1a8a27SJiawen Wu 	}
4856ad1a8a27SJiawen Wu 
4857ad1a8a27SJiawen Wu 	l2_tn_filter = l2_tn_info->hash_map[ret];
4858ad1a8a27SJiawen Wu 	l2_tn_info->hash_map[ret] = NULL;
4859ad1a8a27SJiawen Wu 
4860ad1a8a27SJiawen Wu 	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4861ad1a8a27SJiawen Wu 	rte_free(l2_tn_filter);
4862ad1a8a27SJiawen Wu 
4863ad1a8a27SJiawen Wu 	return 0;
4864ad1a8a27SJiawen Wu }
4865ad1a8a27SJiawen Wu 
4866ad1a8a27SJiawen Wu /* Add l2 tunnel filter */
4867ad1a8a27SJiawen Wu int
txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev * dev,struct txgbe_l2_tunnel_conf * l2_tunnel,bool restore)4868ad1a8a27SJiawen Wu txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4869ad1a8a27SJiawen Wu 			       struct txgbe_l2_tunnel_conf *l2_tunnel,
4870ad1a8a27SJiawen Wu 			       bool restore)
4871ad1a8a27SJiawen Wu {
4872ad1a8a27SJiawen Wu 	int ret;
4873ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4874ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_key key;
4875ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_filter *node;
4876ad1a8a27SJiawen Wu 
4877ad1a8a27SJiawen Wu 	if (!restore) {
4878ad1a8a27SJiawen Wu 		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4879ad1a8a27SJiawen Wu 		key.tn_id = l2_tunnel->tunnel_id;
4880ad1a8a27SJiawen Wu 
4881ad1a8a27SJiawen Wu 		node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4882ad1a8a27SJiawen Wu 
4883ad1a8a27SJiawen Wu 		if (node) {
4884ad1a8a27SJiawen Wu 			PMD_DRV_LOG(ERR,
4885ad1a8a27SJiawen Wu 				    "The L2 tunnel filter already exists!");
4886ad1a8a27SJiawen Wu 			return -EINVAL;
4887ad1a8a27SJiawen Wu 		}
4888ad1a8a27SJiawen Wu 
4889ad1a8a27SJiawen Wu 		node = rte_zmalloc("txgbe_l2_tn",
4890ad1a8a27SJiawen Wu 				   sizeof(struct txgbe_l2_tn_filter),
4891ad1a8a27SJiawen Wu 				   0);
4892ad1a8a27SJiawen Wu 		if (!node)
4893ad1a8a27SJiawen Wu 			return -ENOMEM;
4894ad1a8a27SJiawen Wu 
4895ad1a8a27SJiawen Wu 		rte_memcpy(&node->key,
4896ad1a8a27SJiawen Wu 				 &key,
4897ad1a8a27SJiawen Wu 				 sizeof(struct txgbe_l2_tn_key));
4898ad1a8a27SJiawen Wu 		node->pool = l2_tunnel->pool;
4899ad1a8a27SJiawen Wu 		ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4900ad1a8a27SJiawen Wu 		if (ret < 0) {
4901ad1a8a27SJiawen Wu 			rte_free(node);
4902ad1a8a27SJiawen Wu 			return ret;
4903ad1a8a27SJiawen Wu 		}
4904ad1a8a27SJiawen Wu 	}
4905ad1a8a27SJiawen Wu 
4906ad1a8a27SJiawen Wu 	switch (l2_tunnel->l2_tunnel_type) {
4907295968d1SFerruh Yigit 	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
4908ad1a8a27SJiawen Wu 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4909ad1a8a27SJiawen Wu 		break;
4910ad1a8a27SJiawen Wu 	default:
4911ad1a8a27SJiawen Wu 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4912ad1a8a27SJiawen Wu 		ret = -EINVAL;
4913ad1a8a27SJiawen Wu 		break;
4914ad1a8a27SJiawen Wu 	}
4915ad1a8a27SJiawen Wu 
4916ad1a8a27SJiawen Wu 	if (!restore && ret < 0)
4917ad1a8a27SJiawen Wu 		(void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4918ad1a8a27SJiawen Wu 
4919ad1a8a27SJiawen Wu 	return ret;
4920ad1a8a27SJiawen Wu }
4921ad1a8a27SJiawen Wu 
4922ad1a8a27SJiawen Wu /* Delete l2 tunnel filter */
4923ad1a8a27SJiawen Wu int
txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev * dev,struct txgbe_l2_tunnel_conf * l2_tunnel)4924ad1a8a27SJiawen Wu txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4925ad1a8a27SJiawen Wu 			       struct txgbe_l2_tunnel_conf *l2_tunnel)
4926ad1a8a27SJiawen Wu {
4927ad1a8a27SJiawen Wu 	int ret;
4928ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4929ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_key key;
4930ad1a8a27SJiawen Wu 
4931ad1a8a27SJiawen Wu 	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4932ad1a8a27SJiawen Wu 	key.tn_id = l2_tunnel->tunnel_id;
4933ad1a8a27SJiawen Wu 	ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4934ad1a8a27SJiawen Wu 	if (ret < 0)
4935ad1a8a27SJiawen Wu 		return ret;
4936ad1a8a27SJiawen Wu 
4937ad1a8a27SJiawen Wu 	switch (l2_tunnel->l2_tunnel_type) {
4938295968d1SFerruh Yigit 	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
4939ad1a8a27SJiawen Wu 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4940ad1a8a27SJiawen Wu 		break;
4941ad1a8a27SJiawen Wu 	default:
4942ad1a8a27SJiawen Wu 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4943ad1a8a27SJiawen Wu 		ret = -EINVAL;
4944ad1a8a27SJiawen Wu 		break;
4945ad1a8a27SJiawen Wu 	}
4946ad1a8a27SJiawen Wu 
4947ad1a8a27SJiawen Wu 	return ret;
4948ad1a8a27SJiawen Wu }
4949ad1a8a27SJiawen Wu 
4950ad1a8a27SJiawen Wu static int
txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev * dev,bool en)49515377fa68SJiawen Wu txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
49525377fa68SJiawen Wu {
49535377fa68SJiawen Wu 	int ret = 0;
49545377fa68SJiawen Wu 	uint32_t ctrl;
49555377fa68SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
49565377fa68SJiawen Wu 
49575377fa68SJiawen Wu 	ctrl = rd32(hw, TXGBE_POOLCTL);
49585377fa68SJiawen Wu 	ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
49595377fa68SJiawen Wu 	if (en)
49605377fa68SJiawen Wu 		ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
49615377fa68SJiawen Wu 	wr32(hw, TXGBE_POOLCTL, ctrl);
49625377fa68SJiawen Wu 
49635377fa68SJiawen Wu 	return ret;
49645377fa68SJiawen Wu }
49655377fa68SJiawen Wu 
4966d15c7568SJiawen Wu /* Add UDP tunneling port */
4967d15c7568SJiawen Wu static int
txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)4968d15c7568SJiawen Wu txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4969d15c7568SJiawen Wu 			      struct rte_eth_udp_tunnel *udp_tunnel)
4970d15c7568SJiawen Wu {
4971d15c7568SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4972d15c7568SJiawen Wu 	int ret = 0;
4973d15c7568SJiawen Wu 
4974d15c7568SJiawen Wu 	if (udp_tunnel == NULL)
4975d15c7568SJiawen Wu 		return -EINVAL;
4976d15c7568SJiawen Wu 
4977d15c7568SJiawen Wu 	switch (udp_tunnel->prot_type) {
4978295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
4979d15c7568SJiawen Wu 		if (udp_tunnel->udp_port == 0) {
4980d15c7568SJiawen Wu 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
4981d15c7568SJiawen Wu 			ret = -EINVAL;
4982d15c7568SJiawen Wu 			break;
4983d15c7568SJiawen Wu 		}
4984d15c7568SJiawen Wu 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
4985d15c7568SJiawen Wu 		break;
4986295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
4987d15c7568SJiawen Wu 		if (udp_tunnel->udp_port == 0) {
4988d15c7568SJiawen Wu 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
4989d15c7568SJiawen Wu 			ret = -EINVAL;
4990d15c7568SJiawen Wu 			break;
4991d15c7568SJiawen Wu 		}
4992d15c7568SJiawen Wu 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
4993d15c7568SJiawen Wu 		break;
4994295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
4995d15c7568SJiawen Wu 		if (udp_tunnel->udp_port == 0) {
4996d15c7568SJiawen Wu 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
4997d15c7568SJiawen Wu 			ret = -EINVAL;
4998d15c7568SJiawen Wu 			break;
4999d15c7568SJiawen Wu 		}
5000d15c7568SJiawen Wu 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
5001d15c7568SJiawen Wu 		break;
5002295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
5003b950203bSJiawen Wu 		if (udp_tunnel->udp_port == 0) {
5004b950203bSJiawen Wu 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
5005b950203bSJiawen Wu 			ret = -EINVAL;
5006b950203bSJiawen Wu 			break;
5007b950203bSJiawen Wu 		}
5008b950203bSJiawen Wu 		wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
5009b950203bSJiawen Wu 		break;
5010d15c7568SJiawen Wu 	default:
5011d15c7568SJiawen Wu 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5012d15c7568SJiawen Wu 		ret = -EINVAL;
5013d15c7568SJiawen Wu 		break;
5014d15c7568SJiawen Wu 	}
5015d15c7568SJiawen Wu 
5016d15c7568SJiawen Wu 	txgbe_flush(hw);
5017d15c7568SJiawen Wu 
5018d15c7568SJiawen Wu 	return ret;
5019d15c7568SJiawen Wu }
5020d15c7568SJiawen Wu 
5021d15c7568SJiawen Wu /* Remove UDP tunneling port */
5022d15c7568SJiawen Wu static int
txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)5023d15c7568SJiawen Wu txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5024d15c7568SJiawen Wu 			      struct rte_eth_udp_tunnel *udp_tunnel)
5025d15c7568SJiawen Wu {
5026d15c7568SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5027d15c7568SJiawen Wu 	int ret = 0;
5028d15c7568SJiawen Wu 	uint16_t cur_port;
5029d15c7568SJiawen Wu 
5030d15c7568SJiawen Wu 	if (udp_tunnel == NULL)
5031d15c7568SJiawen Wu 		return -EINVAL;
5032d15c7568SJiawen Wu 
5033d15c7568SJiawen Wu 	switch (udp_tunnel->prot_type) {
5034295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
5035d15c7568SJiawen Wu 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
5036d15c7568SJiawen Wu 		if (cur_port != udp_tunnel->udp_port) {
5037d15c7568SJiawen Wu 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5038d15c7568SJiawen Wu 					udp_tunnel->udp_port);
5039d15c7568SJiawen Wu 			ret = -EINVAL;
5040d15c7568SJiawen Wu 			break;
5041d15c7568SJiawen Wu 		}
5042d15c7568SJiawen Wu 		wr32(hw, TXGBE_VXLANPORT, 0);
5043d15c7568SJiawen Wu 		break;
5044295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
5045d15c7568SJiawen Wu 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
5046d15c7568SJiawen Wu 		if (cur_port != udp_tunnel->udp_port) {
5047d15c7568SJiawen Wu 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5048d15c7568SJiawen Wu 					udp_tunnel->udp_port);
5049d15c7568SJiawen Wu 			ret = -EINVAL;
5050d15c7568SJiawen Wu 			break;
5051d15c7568SJiawen Wu 		}
5052d15c7568SJiawen Wu 		wr32(hw, TXGBE_GENEVEPORT, 0);
5053d15c7568SJiawen Wu 		break;
5054295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
5055d15c7568SJiawen Wu 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
5056d15c7568SJiawen Wu 		if (cur_port != udp_tunnel->udp_port) {
5057d15c7568SJiawen Wu 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5058d15c7568SJiawen Wu 					udp_tunnel->udp_port);
5059d15c7568SJiawen Wu 			ret = -EINVAL;
5060d15c7568SJiawen Wu 			break;
5061d15c7568SJiawen Wu 		}
5062d15c7568SJiawen Wu 		wr32(hw, TXGBE_TEREDOPORT, 0);
5063d15c7568SJiawen Wu 		break;
5064295968d1SFerruh Yigit 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
5065b950203bSJiawen Wu 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
5066b950203bSJiawen Wu 		if (cur_port != udp_tunnel->udp_port) {
5067b950203bSJiawen Wu 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5068b950203bSJiawen Wu 					udp_tunnel->udp_port);
5069b950203bSJiawen Wu 			ret = -EINVAL;
5070b950203bSJiawen Wu 			break;
5071b950203bSJiawen Wu 		}
5072b950203bSJiawen Wu 		wr32(hw, TXGBE_VXLANPORTGPE, 0);
5073b950203bSJiawen Wu 		break;
5074d15c7568SJiawen Wu 	default:
5075d15c7568SJiawen Wu 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5076d15c7568SJiawen Wu 		ret = -EINVAL;
5077d15c7568SJiawen Wu 		break;
5078d15c7568SJiawen Wu 	}
5079d15c7568SJiawen Wu 
5080d15c7568SJiawen Wu 	txgbe_flush(hw);
5081d15c7568SJiawen Wu 
5082d15c7568SJiawen Wu 	return ret;
5083d15c7568SJiawen Wu }
5084d15c7568SJiawen Wu 
508577a72b4dSJiawen Wu /* restore n-tuple filter */
508677a72b4dSJiawen Wu static inline void
txgbe_ntuple_filter_restore(struct rte_eth_dev * dev)508777a72b4dSJiawen Wu txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
508877a72b4dSJiawen Wu {
508977a72b4dSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
509077a72b4dSJiawen Wu 	struct txgbe_5tuple_filter *node;
509177a72b4dSJiawen Wu 
509277a72b4dSJiawen Wu 	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
509377a72b4dSJiawen Wu 		txgbe_inject_5tuple_filter(dev, node);
509477a72b4dSJiawen Wu 	}
509577a72b4dSJiawen Wu }
509677a72b4dSJiawen Wu 
5097f8e2cfc7SJiawen Wu /* restore ethernet type filter */
5098f8e2cfc7SJiawen Wu static inline void
txgbe_ethertype_filter_restore(struct rte_eth_dev * dev)5099f8e2cfc7SJiawen Wu txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5100f8e2cfc7SJiawen Wu {
5101f8e2cfc7SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5102f8e2cfc7SJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5103f8e2cfc7SJiawen Wu 	int i;
5104f8e2cfc7SJiawen Wu 
5105f8e2cfc7SJiawen Wu 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5106f8e2cfc7SJiawen Wu 		if (filter_info->ethertype_mask & (1 << i)) {
5107f8e2cfc7SJiawen Wu 			wr32(hw, TXGBE_ETFLT(i),
5108f8e2cfc7SJiawen Wu 					filter_info->ethertype_filters[i].etqf);
5109f8e2cfc7SJiawen Wu 			wr32(hw, TXGBE_ETCLS(i),
5110f8e2cfc7SJiawen Wu 					filter_info->ethertype_filters[i].etqs);
5111f8e2cfc7SJiawen Wu 			txgbe_flush(hw);
5112f8e2cfc7SJiawen Wu 		}
5113f8e2cfc7SJiawen Wu 	}
5114f8e2cfc7SJiawen Wu }
5115f8e2cfc7SJiawen Wu 
5116983a4ef2SJiawen Wu /* restore SYN filter */
5117983a4ef2SJiawen Wu static inline void
txgbe_syn_filter_restore(struct rte_eth_dev * dev)5118983a4ef2SJiawen Wu txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5119983a4ef2SJiawen Wu {
5120983a4ef2SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5121983a4ef2SJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5122983a4ef2SJiawen Wu 	uint32_t synqf;
5123983a4ef2SJiawen Wu 
5124983a4ef2SJiawen Wu 	synqf = filter_info->syn_info;
5125983a4ef2SJiawen Wu 
5126983a4ef2SJiawen Wu 	if (synqf & TXGBE_SYNCLS_ENA) {
5127983a4ef2SJiawen Wu 		wr32(hw, TXGBE_SYNCLS, synqf);
5128983a4ef2SJiawen Wu 		txgbe_flush(hw);
5129983a4ef2SJiawen Wu 	}
5130983a4ef2SJiawen Wu }
5131983a4ef2SJiawen Wu 
5132ad1a8a27SJiawen Wu /* restore L2 tunnel filter */
5133ad1a8a27SJiawen Wu static inline void
txgbe_l2_tn_filter_restore(struct rte_eth_dev * dev)5134ad1a8a27SJiawen Wu txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5135ad1a8a27SJiawen Wu {
5136ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5137ad1a8a27SJiawen Wu 	struct txgbe_l2_tn_filter *node;
5138ad1a8a27SJiawen Wu 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5139ad1a8a27SJiawen Wu 
5140ad1a8a27SJiawen Wu 	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5141ad1a8a27SJiawen Wu 		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5142ad1a8a27SJiawen Wu 		l2_tn_conf.tunnel_id      = node->key.tn_id;
5143ad1a8a27SJiawen Wu 		l2_tn_conf.pool           = node->pool;
5144ad1a8a27SJiawen Wu 		(void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5145ad1a8a27SJiawen Wu 	}
5146ad1a8a27SJiawen Wu }
5147ad1a8a27SJiawen Wu 
51489fdfed08SJiawen Wu /* restore rss filter */
51499fdfed08SJiawen Wu static inline void
txgbe_rss_filter_restore(struct rte_eth_dev * dev)51509fdfed08SJiawen Wu txgbe_rss_filter_restore(struct rte_eth_dev *dev)
51519fdfed08SJiawen Wu {
51529fdfed08SJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
51539fdfed08SJiawen Wu 
51549fdfed08SJiawen Wu 	if (filter_info->rss_info.conf.queue_num)
51559fdfed08SJiawen Wu 		txgbe_config_rss_filter(dev,
51569fdfed08SJiawen Wu 			&filter_info->rss_info, TRUE);
51579fdfed08SJiawen Wu }
51589fdfed08SJiawen Wu 
515977a72b4dSJiawen Wu static int
txgbe_filter_restore(struct rte_eth_dev * dev)516077a72b4dSJiawen Wu txgbe_filter_restore(struct rte_eth_dev *dev)
516177a72b4dSJiawen Wu {
516277a72b4dSJiawen Wu 	txgbe_ntuple_filter_restore(dev);
5163f8e2cfc7SJiawen Wu 	txgbe_ethertype_filter_restore(dev);
5164983a4ef2SJiawen Wu 	txgbe_syn_filter_restore(dev);
516508d61139SJiawen Wu 	txgbe_fdir_filter_restore(dev);
5166ad1a8a27SJiawen Wu 	txgbe_l2_tn_filter_restore(dev);
51679fdfed08SJiawen Wu 	txgbe_rss_filter_restore(dev);
516877a72b4dSJiawen Wu 
516977a72b4dSJiawen Wu 	return 0;
517077a72b4dSJiawen Wu }
517177a72b4dSJiawen Wu 
51725377fa68SJiawen Wu static void
txgbe_l2_tunnel_conf(struct rte_eth_dev * dev)51735377fa68SJiawen Wu txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
51745377fa68SJiawen Wu {
51755377fa68SJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
51765377fa68SJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
51775377fa68SJiawen Wu 
51785377fa68SJiawen Wu 	if (l2_tn_info->e_tag_en)
51795377fa68SJiawen Wu 		(void)txgbe_e_tag_enable(hw);
51805377fa68SJiawen Wu 
51815377fa68SJiawen Wu 	if (l2_tn_info->e_tag_fwd_en)
51825377fa68SJiawen Wu 		(void)txgbe_e_tag_forwarding_en_dis(dev, 1);
51835377fa68SJiawen Wu 
51845377fa68SJiawen Wu 	(void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
51855377fa68SJiawen Wu }
51865377fa68SJiawen Wu 
51876bde42feSJiawen Wu /* remove all the n-tuple filters */
51886bde42feSJiawen Wu void
txgbe_clear_all_ntuple_filter(struct rte_eth_dev * dev)51896bde42feSJiawen Wu txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
51906bde42feSJiawen Wu {
51916bde42feSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
51926bde42feSJiawen Wu 	struct txgbe_5tuple_filter *p_5tuple;
51936bde42feSJiawen Wu 
51946bde42feSJiawen Wu 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
51956bde42feSJiawen Wu 		txgbe_remove_5tuple_filter(dev, p_5tuple);
51966bde42feSJiawen Wu }
51976bde42feSJiawen Wu 
51986bde42feSJiawen Wu /* remove all the ether type filters */
51996bde42feSJiawen Wu void
txgbe_clear_all_ethertype_filter(struct rte_eth_dev * dev)52006bde42feSJiawen Wu txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
52016bde42feSJiawen Wu {
52026bde42feSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
52036bde42feSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
52046bde42feSJiawen Wu 	int i;
52056bde42feSJiawen Wu 
52066bde42feSJiawen Wu 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
52076bde42feSJiawen Wu 		if (filter_info->ethertype_mask & (1 << i) &&
52086bde42feSJiawen Wu 		    !filter_info->ethertype_filters[i].conf) {
52096bde42feSJiawen Wu 			(void)txgbe_ethertype_filter_remove(filter_info,
52106bde42feSJiawen Wu 							    (uint8_t)i);
52116bde42feSJiawen Wu 			wr32(hw, TXGBE_ETFLT(i), 0);
52126bde42feSJiawen Wu 			wr32(hw, TXGBE_ETCLS(i), 0);
52136bde42feSJiawen Wu 			txgbe_flush(hw);
52146bde42feSJiawen Wu 		}
52156bde42feSJiawen Wu 	}
52166bde42feSJiawen Wu }
52176bde42feSJiawen Wu 
52186bde42feSJiawen Wu /* remove the SYN filter */
52196bde42feSJiawen Wu void
txgbe_clear_syn_filter(struct rte_eth_dev * dev)52206bde42feSJiawen Wu txgbe_clear_syn_filter(struct rte_eth_dev *dev)
52216bde42feSJiawen Wu {
52226bde42feSJiawen Wu 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
52236bde42feSJiawen Wu 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
52246bde42feSJiawen Wu 
52256bde42feSJiawen Wu 	if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
52266bde42feSJiawen Wu 		filter_info->syn_info = 0;
52276bde42feSJiawen Wu 
52286bde42feSJiawen Wu 		wr32(hw, TXGBE_SYNCLS, 0);
52296bde42feSJiawen Wu 		txgbe_flush(hw);
52306bde42feSJiawen Wu 	}
52316bde42feSJiawen Wu }
52326bde42feSJiawen Wu 
52336bde42feSJiawen Wu /* remove all the L2 tunnel filters */
52346bde42feSJiawen Wu int
txgbe_clear_all_l2_tn_filter(struct rte_eth_dev * dev)52356bde42feSJiawen Wu txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
52366bde42feSJiawen Wu {
52376bde42feSJiawen Wu 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
52386bde42feSJiawen Wu 	struct txgbe_l2_tn_filter *l2_tn_filter;
52396bde42feSJiawen Wu 	struct txgbe_l2_tunnel_conf l2_tn_conf;
52406bde42feSJiawen Wu 	int ret = 0;
52416bde42feSJiawen Wu 
52426bde42feSJiawen Wu 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
52436bde42feSJiawen Wu 		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
52446bde42feSJiawen Wu 		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
52456bde42feSJiawen Wu 		l2_tn_conf.pool           = l2_tn_filter->pool;
52466bde42feSJiawen Wu 		ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
52476bde42feSJiawen Wu 		if (ret < 0)
52486bde42feSJiawen Wu 			return ret;
52496bde42feSJiawen Wu 	}
52506bde42feSJiawen Wu 
52516bde42feSJiawen Wu 	return 0;
52526bde42feSJiawen Wu }
52536bde42feSJiawen Wu 
5254e1698e38SJiawen Wu static const struct eth_dev_ops txgbe_eth_dev_ops = {
525575cbb1f0SJiawen Wu 	.dev_configure              = txgbe_dev_configure,
525686d8adc7SJiawen Wu 	.dev_infos_get              = txgbe_dev_info_get,
5257b1f59667SJiawen Wu 	.dev_start                  = txgbe_dev_start,
5258e0d876efSJiawen Wu 	.dev_stop                   = txgbe_dev_stop,
52590c061eadSJiawen Wu 	.dev_set_link_up            = txgbe_dev_set_link_up,
52600c061eadSJiawen Wu 	.dev_set_link_down          = txgbe_dev_set_link_down,
5261e0d876efSJiawen Wu 	.dev_close                  = txgbe_dev_close,
5262e0d876efSJiawen Wu 	.dev_reset                  = txgbe_dev_reset,
5263d06e6723SJiawen Wu 	.promiscuous_enable         = txgbe_dev_promiscuous_enable,
5264d06e6723SJiawen Wu 	.promiscuous_disable        = txgbe_dev_promiscuous_disable,
5265d06e6723SJiawen Wu 	.allmulticast_enable        = txgbe_dev_allmulticast_enable,
5266d06e6723SJiawen Wu 	.allmulticast_disable       = txgbe_dev_allmulticast_disable,
5267e0d876efSJiawen Wu 	.link_update                = txgbe_dev_link_update,
5268c9bb590dSJiawen Wu 	.stats_get                  = txgbe_dev_stats_get,
526991fe49c8SJiawen Wu 	.xstats_get                 = txgbe_dev_xstats_get,
527091fe49c8SJiawen Wu 	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
5271c9bb590dSJiawen Wu 	.stats_reset                = txgbe_dev_stats_reset,
527291fe49c8SJiawen Wu 	.xstats_reset               = txgbe_dev_xstats_reset,
527391fe49c8SJiawen Wu 	.xstats_get_names           = txgbe_dev_xstats_get_names,
527491fe49c8SJiawen Wu 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
5275c1d4e9d3SJiawen Wu 	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
5276bc84ac0fSJiawen Wu 	.fw_version_get             = txgbe_fw_version_get,
52770e484278SJiawen Wu 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
52783926214fSJiawen Wu 	.mtu_set                    = txgbe_dev_mtu_set,
5279220b0e49SJiawen Wu 	.vlan_filter_set            = txgbe_vlan_filter_set,
5280220b0e49SJiawen Wu 	.vlan_tpid_set              = txgbe_vlan_tpid_set,
5281220b0e49SJiawen Wu 	.vlan_offload_set           = txgbe_vlan_offload_set,
5282220b0e49SJiawen Wu 	.vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
5283b4cfffaaSJiawen Wu 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
5284b4cfffaaSJiawen Wu 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
5285b4cfffaaSJiawen Wu 	.tx_queue_start	            = txgbe_dev_tx_queue_start,
5286b4cfffaaSJiawen Wu 	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
5287226bf98eSJiawen Wu 	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
5288a5682d28SJiawen Wu 	.rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
5289a5682d28SJiawen Wu 	.rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
5290226bf98eSJiawen Wu 	.rx_queue_release           = txgbe_dev_rx_queue_release,
5291226bf98eSJiawen Wu 	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
5292226bf98eSJiawen Wu 	.tx_queue_release           = txgbe_dev_tx_queue_release,
529309afa548SJiawen Wu 	.dev_led_on                 = txgbe_dev_led_on,
529409afa548SJiawen Wu 	.dev_led_off                = txgbe_dev_led_off,
529569ce8c8aSJiawen Wu 	.flow_ctrl_get              = txgbe_flow_ctrl_get,
529669ce8c8aSJiawen Wu 	.flow_ctrl_set              = txgbe_flow_ctrl_set,
5297cc389e51SJiawen Wu 	.priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
5298a331fe3bSJiawen Wu 	.mac_addr_add               = txgbe_add_rar,
5299a331fe3bSJiawen Wu 	.mac_addr_remove            = txgbe_remove_rar,
5300a331fe3bSJiawen Wu 	.mac_addr_set               = txgbe_set_default_mac_addr,
5301ca6cc80dSJiawen Wu 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
5302ca6cc80dSJiawen Wu 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
5303770a3523SJiawen Wu 	.set_queue_rate_limit       = txgbe_set_queue_rate_limit,
53049e487a37SJiawen Wu 	.reta_update                = txgbe_dev_rss_reta_update,
53059e487a37SJiawen Wu 	.reta_query                 = txgbe_dev_rss_reta_query,
53069e487a37SJiawen Wu 	.rss_hash_update            = txgbe_dev_rss_hash_update,
53079e487a37SJiawen Wu 	.rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
5308fb7ad441SThomas Monjalon 	.flow_ops_get               = txgbe_dev_flow_ops_get,
5309a331fe3bSJiawen Wu 	.set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
5310db9767a5SJiawen Wu 	.rxq_info_get               = txgbe_rxq_info_get,
5311db9767a5SJiawen Wu 	.txq_info_get               = txgbe_txq_info_get,
5312bd8e3adcSJiawen Wu 	.timesync_enable            = txgbe_timesync_enable,
5313bd8e3adcSJiawen Wu 	.timesync_disable           = txgbe_timesync_disable,
5314bd8e3adcSJiawen Wu 	.timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5315bd8e3adcSJiawen Wu 	.timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5316ab7a6530SJiawen Wu 	.get_reg                    = txgbe_get_regs,
53173cc8b50dSJiawen Wu 	.get_eeprom_length          = txgbe_get_eeprom_length,
53183cc8b50dSJiawen Wu 	.get_eeprom                 = txgbe_get_eeprom,
53193cc8b50dSJiawen Wu 	.set_eeprom                 = txgbe_set_eeprom,
53203cc8b50dSJiawen Wu 	.get_module_info            = txgbe_get_module_info,
53213cc8b50dSJiawen Wu 	.get_module_eeprom          = txgbe_get_module_eeprom,
53229810975cSJiawen Wu 	.get_dcb_info               = txgbe_dev_get_dcb_info,
5323bd8e3adcSJiawen Wu 	.timesync_adjust_time       = txgbe_timesync_adjust_time,
5324bd8e3adcSJiawen Wu 	.timesync_read_time         = txgbe_timesync_read_time,
5325bd8e3adcSJiawen Wu 	.timesync_write_time        = txgbe_timesync_write_time,
5326d15c7568SJiawen Wu 	.udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
5327d15c7568SJiawen Wu 	.udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
53283fa0c0e8SJiawen Wu 	.tm_ops_get                 = txgbe_tm_ops_get,
5329720e083dSJiawen Wu 	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
5330e1698e38SJiawen Wu };
5331e1698e38SJiawen Wu 
53327dc11706SJiawen Wu RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
53337dc11706SJiawen Wu RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
53347dc11706SJiawen Wu RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5335f611dadaSJiawen Wu RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
5336f611dadaSJiawen Wu 			      TXGBE_DEVARG_BP_AUTO "=<0|1>"
5337f611dadaSJiawen Wu 			      TXGBE_DEVARG_KR_POLL "=<0|1>"
5338f611dadaSJiawen Wu 			      TXGBE_DEVARG_KR_PRESENT "=<0|1>"
53399997a0cbSJiawen Wu 			      TXGBE_DEVARG_KX_SGMII "=<0|1>"
53409997a0cbSJiawen Wu 			      TXGBE_DEVARG_FFE_SET "=<0-4>"
53419997a0cbSJiawen Wu 			      TXGBE_DEVARG_FFE_MAIN "=<uint16>"
53429997a0cbSJiawen Wu 			      TXGBE_DEVARG_FFE_PRE "=<uint16>"
53439997a0cbSJiawen Wu 			      TXGBE_DEVARG_FFE_POST "=<uint16>");
53447dc11706SJiawen Wu 
5345eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
5346eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
5347eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
53487dc11706SJiawen Wu 
53497dc11706SJiawen Wu #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5350eeded204SDavid Marchand 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
53517dc11706SJiawen Wu #endif
53527dc11706SJiawen Wu #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5353eeded204SDavid Marchand 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
53547dc11706SJiawen Wu #endif
53557dc11706SJiawen Wu 
53567dc11706SJiawen Wu #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5357eeded204SDavid Marchand 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);
53587dc11706SJiawen Wu #endif
5359