xref: /f-stack/dpdk/drivers/net/igc/igc_ethdev.c (revision 2d9fd380)
1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606  * Copyright(c) 2019-2020 Intel Corporation
3*2d9fd380Sjfb8856606  */
4*2d9fd380Sjfb8856606 
5*2d9fd380Sjfb8856606 #include <stdint.h>
6*2d9fd380Sjfb8856606 #include <string.h>
7*2d9fd380Sjfb8856606 
8*2d9fd380Sjfb8856606 #include <rte_string_fns.h>
9*2d9fd380Sjfb8856606 #include <rte_pci.h>
10*2d9fd380Sjfb8856606 #include <rte_bus_pci.h>
11*2d9fd380Sjfb8856606 #include <rte_ethdev_driver.h>
12*2d9fd380Sjfb8856606 #include <rte_ethdev_pci.h>
13*2d9fd380Sjfb8856606 #include <rte_malloc.h>
14*2d9fd380Sjfb8856606 #include <rte_alarm.h>
15*2d9fd380Sjfb8856606 
16*2d9fd380Sjfb8856606 #include "igc_logs.h"
17*2d9fd380Sjfb8856606 #include "igc_txrx.h"
18*2d9fd380Sjfb8856606 #include "igc_filter.h"
19*2d9fd380Sjfb8856606 #include "igc_flow.h"
20*2d9fd380Sjfb8856606 
21*2d9fd380Sjfb8856606 #define IGC_INTEL_VENDOR_ID		0x8086
22*2d9fd380Sjfb8856606 
23*2d9fd380Sjfb8856606 /*
24*2d9fd380Sjfb8856606  * The overhead from MTU to max frame size.
25*2d9fd380Sjfb8856606  * Considering VLAN so tag needs to be counted.
26*2d9fd380Sjfb8856606  */
27*2d9fd380Sjfb8856606 #define IGC_ETH_OVERHEAD		(RTE_ETHER_HDR_LEN + \
28*2d9fd380Sjfb8856606 					RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
29*2d9fd380Sjfb8856606 
30*2d9fd380Sjfb8856606 #define IGC_FC_PAUSE_TIME		0x0680
31*2d9fd380Sjfb8856606 #define IGC_LINK_UPDATE_CHECK_TIMEOUT	90  /* 9s */
32*2d9fd380Sjfb8856606 #define IGC_LINK_UPDATE_CHECK_INTERVAL	100 /* ms */
33*2d9fd380Sjfb8856606 
34*2d9fd380Sjfb8856606 #define IGC_MISC_VEC_ID			RTE_INTR_VEC_ZERO_OFFSET
35*2d9fd380Sjfb8856606 #define IGC_RX_VEC_START		RTE_INTR_VEC_RXTX_OFFSET
36*2d9fd380Sjfb8856606 #define IGC_MSIX_OTHER_INTR_VEC		0   /* MSI-X other interrupt vector */
37*2d9fd380Sjfb8856606 #define IGC_FLAG_NEED_LINK_UPDATE	(1u << 0)	/* need update link */
38*2d9fd380Sjfb8856606 
39*2d9fd380Sjfb8856606 #define IGC_DEFAULT_RX_FREE_THRESH	32
40*2d9fd380Sjfb8856606 
41*2d9fd380Sjfb8856606 #define IGC_DEFAULT_RX_PTHRESH		8
42*2d9fd380Sjfb8856606 #define IGC_DEFAULT_RX_HTHRESH		8
43*2d9fd380Sjfb8856606 #define IGC_DEFAULT_RX_WTHRESH		4
44*2d9fd380Sjfb8856606 
45*2d9fd380Sjfb8856606 #define IGC_DEFAULT_TX_PTHRESH		8
46*2d9fd380Sjfb8856606 #define IGC_DEFAULT_TX_HTHRESH		1
47*2d9fd380Sjfb8856606 #define IGC_DEFAULT_TX_WTHRESH		16
48*2d9fd380Sjfb8856606 
49*2d9fd380Sjfb8856606 /* MSI-X other interrupt vector */
50*2d9fd380Sjfb8856606 #define IGC_MSIX_OTHER_INTR_VEC		0
51*2d9fd380Sjfb8856606 
52*2d9fd380Sjfb8856606 /* External VLAN Enable bit mask */
53*2d9fd380Sjfb8856606 #define IGC_CTRL_EXT_EXT_VLAN		(1u << 26)
54*2d9fd380Sjfb8856606 
55*2d9fd380Sjfb8856606 /* Speed select */
56*2d9fd380Sjfb8856606 #define IGC_CTRL_SPEED_MASK		(7u << 8)
57*2d9fd380Sjfb8856606 #define IGC_CTRL_SPEED_2500		(6u << 8)
58*2d9fd380Sjfb8856606 
59*2d9fd380Sjfb8856606 /* External VLAN Ether Type bit mask and shift */
60*2d9fd380Sjfb8856606 #define IGC_VET_EXT			0xFFFF0000
61*2d9fd380Sjfb8856606 #define IGC_VET_EXT_SHIFT		16
62*2d9fd380Sjfb8856606 
63*2d9fd380Sjfb8856606 /* Force EEE Auto-negotiation */
64*2d9fd380Sjfb8856606 #define IGC_EEER_EEE_FRC_AN		(1u << 28)
65*2d9fd380Sjfb8856606 
66*2d9fd380Sjfb8856606 /* Per Queue Good Packets Received Count */
67*2d9fd380Sjfb8856606 #define IGC_PQGPRC(idx)		(0x10010 + 0x100 * (idx))
68*2d9fd380Sjfb8856606 /* Per Queue Good Octets Received Count */
69*2d9fd380Sjfb8856606 #define IGC_PQGORC(idx)		(0x10018 + 0x100 * (idx))
70*2d9fd380Sjfb8856606 /* Per Queue Good Octets Transmitted Count */
71*2d9fd380Sjfb8856606 #define IGC_PQGOTC(idx)		(0x10034 + 0x100 * (idx))
72*2d9fd380Sjfb8856606 /* Per Queue Multicast Packets Received Count */
73*2d9fd380Sjfb8856606 #define IGC_PQMPRC(idx)		(0x10038 + 0x100 * (idx))
74*2d9fd380Sjfb8856606 /* Transmit Queue Drop Packet Count */
75*2d9fd380Sjfb8856606 #define IGC_TQDPC(idx)		(0xe030 + 0x40 * (idx))
76*2d9fd380Sjfb8856606 
77*2d9fd380Sjfb8856606 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
78*2d9fd380Sjfb8856606 #define U32_0_IN_U64		0	/* lower bytes of u64 */
79*2d9fd380Sjfb8856606 #define U32_1_IN_U64		1	/* higher bytes of u64 */
80*2d9fd380Sjfb8856606 #else
81*2d9fd380Sjfb8856606 #define U32_0_IN_U64		1
82*2d9fd380Sjfb8856606 #define U32_1_IN_U64		0
83*2d9fd380Sjfb8856606 #endif
84*2d9fd380Sjfb8856606 
85*2d9fd380Sjfb8856606 #define IGC_ALARM_INTERVAL	8000000u
86*2d9fd380Sjfb8856606 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
87*2d9fd380Sjfb8856606 
88*2d9fd380Sjfb8856606 static const struct rte_eth_desc_lim rx_desc_lim = {
89*2d9fd380Sjfb8856606 	.nb_max = IGC_MAX_RXD,
90*2d9fd380Sjfb8856606 	.nb_min = IGC_MIN_RXD,
91*2d9fd380Sjfb8856606 	.nb_align = IGC_RXD_ALIGN,
92*2d9fd380Sjfb8856606 };
93*2d9fd380Sjfb8856606 
94*2d9fd380Sjfb8856606 static const struct rte_eth_desc_lim tx_desc_lim = {
95*2d9fd380Sjfb8856606 	.nb_max = IGC_MAX_TXD,
96*2d9fd380Sjfb8856606 	.nb_min = IGC_MIN_TXD,
97*2d9fd380Sjfb8856606 	.nb_align = IGC_TXD_ALIGN,
98*2d9fd380Sjfb8856606 	.nb_seg_max = IGC_TX_MAX_SEG,
99*2d9fd380Sjfb8856606 	.nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
100*2d9fd380Sjfb8856606 };
101*2d9fd380Sjfb8856606 
102*2d9fd380Sjfb8856606 static const struct rte_pci_id pci_id_igc_map[] = {
103*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
104*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },
105*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I)  },
106*2d9fd380Sjfb8856606 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K)  },
107*2d9fd380Sjfb8856606 	{ .vendor_id = 0, /* sentinel */ },
108*2d9fd380Sjfb8856606 };
109*2d9fd380Sjfb8856606 
110*2d9fd380Sjfb8856606 /* store statistics names and its offset in stats structure */
111*2d9fd380Sjfb8856606 struct rte_igc_xstats_name_off {
112*2d9fd380Sjfb8856606 	char name[RTE_ETH_XSTATS_NAME_SIZE];
113*2d9fd380Sjfb8856606 	unsigned int offset;
114*2d9fd380Sjfb8856606 };
115*2d9fd380Sjfb8856606 
116*2d9fd380Sjfb8856606 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
117*2d9fd380Sjfb8856606 	{"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
118*2d9fd380Sjfb8856606 	{"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
119*2d9fd380Sjfb8856606 	{"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
120*2d9fd380Sjfb8856606 	{"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
121*2d9fd380Sjfb8856606 	{"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
122*2d9fd380Sjfb8856606 	{"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
123*2d9fd380Sjfb8856606 	{"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
124*2d9fd380Sjfb8856606 		ecol)},
125*2d9fd380Sjfb8856606 	{"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
126*2d9fd380Sjfb8856606 	{"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
127*2d9fd380Sjfb8856606 	{"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
128*2d9fd380Sjfb8856606 	{"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
129*2d9fd380Sjfb8856606 	{"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
130*2d9fd380Sjfb8856606 	{"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
131*2d9fd380Sjfb8856606 	{"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
132*2d9fd380Sjfb8856606 	{"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
133*2d9fd380Sjfb8856606 	{"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
134*2d9fd380Sjfb8856606 	{"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
135*2d9fd380Sjfb8856606 	{"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
136*2d9fd380Sjfb8856606 		fcruc)},
137*2d9fd380Sjfb8856606 	{"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
138*2d9fd380Sjfb8856606 	{"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
139*2d9fd380Sjfb8856606 	{"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
140*2d9fd380Sjfb8856606 	{"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
141*2d9fd380Sjfb8856606 	{"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
142*2d9fd380Sjfb8856606 		prc1023)},
143*2d9fd380Sjfb8856606 	{"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
144*2d9fd380Sjfb8856606 		prc1522)},
145*2d9fd380Sjfb8856606 	{"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
146*2d9fd380Sjfb8856606 	{"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
147*2d9fd380Sjfb8856606 	{"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
148*2d9fd380Sjfb8856606 	{"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
149*2d9fd380Sjfb8856606 	{"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
150*2d9fd380Sjfb8856606 	{"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
151*2d9fd380Sjfb8856606 	{"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
152*2d9fd380Sjfb8856606 	{"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
153*2d9fd380Sjfb8856606 	{"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
154*2d9fd380Sjfb8856606 	{"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
155*2d9fd380Sjfb8856606 	{"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
156*2d9fd380Sjfb8856606 	{"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
157*2d9fd380Sjfb8856606 	{"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
158*2d9fd380Sjfb8856606 	{"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
159*2d9fd380Sjfb8856606 	{"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
160*2d9fd380Sjfb8856606 	{"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
161*2d9fd380Sjfb8856606 	{"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
162*2d9fd380Sjfb8856606 	{"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
163*2d9fd380Sjfb8856606 	{"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
164*2d9fd380Sjfb8856606 		ptc1023)},
165*2d9fd380Sjfb8856606 	{"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
166*2d9fd380Sjfb8856606 		ptc1522)},
167*2d9fd380Sjfb8856606 	{"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
168*2d9fd380Sjfb8856606 	{"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
169*2d9fd380Sjfb8856606 	{"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
170*2d9fd380Sjfb8856606 	{"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
171*2d9fd380Sjfb8856606 	{"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
172*2d9fd380Sjfb8856606 	{"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
173*2d9fd380Sjfb8856606 	{"rx_descriptor_lower_threshold",
174*2d9fd380Sjfb8856606 		offsetof(struct igc_hw_stats, icrxdmtc)},
175*2d9fd380Sjfb8856606 };
176*2d9fd380Sjfb8856606 
177*2d9fd380Sjfb8856606 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
178*2d9fd380Sjfb8856606 		sizeof(rte_igc_stats_strings[0]))
179*2d9fd380Sjfb8856606 
180*2d9fd380Sjfb8856606 static int eth_igc_configure(struct rte_eth_dev *dev);
181*2d9fd380Sjfb8856606 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
182*2d9fd380Sjfb8856606 static int eth_igc_stop(struct rte_eth_dev *dev);
183*2d9fd380Sjfb8856606 static int eth_igc_start(struct rte_eth_dev *dev);
184*2d9fd380Sjfb8856606 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
185*2d9fd380Sjfb8856606 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
186*2d9fd380Sjfb8856606 static int eth_igc_close(struct rte_eth_dev *dev);
187*2d9fd380Sjfb8856606 static int eth_igc_reset(struct rte_eth_dev *dev);
188*2d9fd380Sjfb8856606 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
189*2d9fd380Sjfb8856606 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
190*2d9fd380Sjfb8856606 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
191*2d9fd380Sjfb8856606 				char *fw_version, size_t fw_size);
192*2d9fd380Sjfb8856606 static int eth_igc_infos_get(struct rte_eth_dev *dev,
193*2d9fd380Sjfb8856606 			struct rte_eth_dev_info *dev_info);
194*2d9fd380Sjfb8856606 static int eth_igc_led_on(struct rte_eth_dev *dev);
195*2d9fd380Sjfb8856606 static int eth_igc_led_off(struct rte_eth_dev *dev);
196*2d9fd380Sjfb8856606 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
197*2d9fd380Sjfb8856606 static int eth_igc_rar_set(struct rte_eth_dev *dev,
198*2d9fd380Sjfb8856606 		struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
199*2d9fd380Sjfb8856606 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
200*2d9fd380Sjfb8856606 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
201*2d9fd380Sjfb8856606 			struct rte_ether_addr *addr);
202*2d9fd380Sjfb8856606 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
203*2d9fd380Sjfb8856606 			 struct rte_ether_addr *mc_addr_set,
204*2d9fd380Sjfb8856606 			 uint32_t nb_mc_addr);
205*2d9fd380Sjfb8856606 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
206*2d9fd380Sjfb8856606 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
207*2d9fd380Sjfb8856606 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
208*2d9fd380Sjfb8856606 static int eth_igc_stats_get(struct rte_eth_dev *dev,
209*2d9fd380Sjfb8856606 			struct rte_eth_stats *rte_stats);
210*2d9fd380Sjfb8856606 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
211*2d9fd380Sjfb8856606 			struct rte_eth_xstat *xstats, unsigned int n);
212*2d9fd380Sjfb8856606 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
213*2d9fd380Sjfb8856606 				const uint64_t *ids,
214*2d9fd380Sjfb8856606 				uint64_t *values, unsigned int n);
215*2d9fd380Sjfb8856606 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
216*2d9fd380Sjfb8856606 				struct rte_eth_xstat_name *xstats_names,
217*2d9fd380Sjfb8856606 				unsigned int size);
218*2d9fd380Sjfb8856606 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
219*2d9fd380Sjfb8856606 		struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
220*2d9fd380Sjfb8856606 		unsigned int limit);
221*2d9fd380Sjfb8856606 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
222*2d9fd380Sjfb8856606 static int
223*2d9fd380Sjfb8856606 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
224*2d9fd380Sjfb8856606 	uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
225*2d9fd380Sjfb8856606 static int
226*2d9fd380Sjfb8856606 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
227*2d9fd380Sjfb8856606 static int
228*2d9fd380Sjfb8856606 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
229*2d9fd380Sjfb8856606 static int
230*2d9fd380Sjfb8856606 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
231*2d9fd380Sjfb8856606 static int
232*2d9fd380Sjfb8856606 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
233*2d9fd380Sjfb8856606 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
234*2d9fd380Sjfb8856606 			struct rte_eth_rss_reta_entry64 *reta_conf,
235*2d9fd380Sjfb8856606 			uint16_t reta_size);
236*2d9fd380Sjfb8856606 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
237*2d9fd380Sjfb8856606 		       struct rte_eth_rss_reta_entry64 *reta_conf,
238*2d9fd380Sjfb8856606 		       uint16_t reta_size);
239*2d9fd380Sjfb8856606 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
240*2d9fd380Sjfb8856606 			struct rte_eth_rss_conf *rss_conf);
241*2d9fd380Sjfb8856606 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
242*2d9fd380Sjfb8856606 			struct rte_eth_rss_conf *rss_conf);
243*2d9fd380Sjfb8856606 static int
244*2d9fd380Sjfb8856606 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
245*2d9fd380Sjfb8856606 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
246*2d9fd380Sjfb8856606 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
247*2d9fd380Sjfb8856606 		      enum rte_vlan_type vlan_type, uint16_t tpid);
248*2d9fd380Sjfb8856606 
249*2d9fd380Sjfb8856606 static const struct eth_dev_ops eth_igc_ops = {
250*2d9fd380Sjfb8856606 	.dev_configure		= eth_igc_configure,
251*2d9fd380Sjfb8856606 	.link_update		= eth_igc_link_update,
252*2d9fd380Sjfb8856606 	.dev_stop		= eth_igc_stop,
253*2d9fd380Sjfb8856606 	.dev_start		= eth_igc_start,
254*2d9fd380Sjfb8856606 	.dev_close		= eth_igc_close,
255*2d9fd380Sjfb8856606 	.dev_reset		= eth_igc_reset,
256*2d9fd380Sjfb8856606 	.dev_set_link_up	= eth_igc_set_link_up,
257*2d9fd380Sjfb8856606 	.dev_set_link_down	= eth_igc_set_link_down,
258*2d9fd380Sjfb8856606 	.promiscuous_enable	= eth_igc_promiscuous_enable,
259*2d9fd380Sjfb8856606 	.promiscuous_disable	= eth_igc_promiscuous_disable,
260*2d9fd380Sjfb8856606 	.allmulticast_enable	= eth_igc_allmulticast_enable,
261*2d9fd380Sjfb8856606 	.allmulticast_disable	= eth_igc_allmulticast_disable,
262*2d9fd380Sjfb8856606 	.fw_version_get		= eth_igc_fw_version_get,
263*2d9fd380Sjfb8856606 	.dev_infos_get		= eth_igc_infos_get,
264*2d9fd380Sjfb8856606 	.dev_led_on		= eth_igc_led_on,
265*2d9fd380Sjfb8856606 	.dev_led_off		= eth_igc_led_off,
266*2d9fd380Sjfb8856606 	.dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
267*2d9fd380Sjfb8856606 	.mtu_set		= eth_igc_mtu_set,
268*2d9fd380Sjfb8856606 	.mac_addr_add		= eth_igc_rar_set,
269*2d9fd380Sjfb8856606 	.mac_addr_remove	= eth_igc_rar_clear,
270*2d9fd380Sjfb8856606 	.mac_addr_set		= eth_igc_default_mac_addr_set,
271*2d9fd380Sjfb8856606 	.set_mc_addr_list	= eth_igc_set_mc_addr_list,
272*2d9fd380Sjfb8856606 
273*2d9fd380Sjfb8856606 	.rx_queue_setup		= eth_igc_rx_queue_setup,
274*2d9fd380Sjfb8856606 	.rx_queue_release	= eth_igc_rx_queue_release,
275*2d9fd380Sjfb8856606 	.tx_queue_setup		= eth_igc_tx_queue_setup,
276*2d9fd380Sjfb8856606 	.tx_queue_release	= eth_igc_tx_queue_release,
277*2d9fd380Sjfb8856606 	.tx_done_cleanup	= eth_igc_tx_done_cleanup,
278*2d9fd380Sjfb8856606 	.rxq_info_get		= eth_igc_rxq_info_get,
279*2d9fd380Sjfb8856606 	.txq_info_get		= eth_igc_txq_info_get,
280*2d9fd380Sjfb8856606 	.stats_get		= eth_igc_stats_get,
281*2d9fd380Sjfb8856606 	.xstats_get		= eth_igc_xstats_get,
282*2d9fd380Sjfb8856606 	.xstats_get_by_id	= eth_igc_xstats_get_by_id,
283*2d9fd380Sjfb8856606 	.xstats_get_names_by_id	= eth_igc_xstats_get_names_by_id,
284*2d9fd380Sjfb8856606 	.xstats_get_names	= eth_igc_xstats_get_names,
285*2d9fd380Sjfb8856606 	.stats_reset		= eth_igc_xstats_reset,
286*2d9fd380Sjfb8856606 	.xstats_reset		= eth_igc_xstats_reset,
287*2d9fd380Sjfb8856606 	.queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
288*2d9fd380Sjfb8856606 	.rx_queue_intr_enable	= eth_igc_rx_queue_intr_enable,
289*2d9fd380Sjfb8856606 	.rx_queue_intr_disable	= eth_igc_rx_queue_intr_disable,
290*2d9fd380Sjfb8856606 	.flow_ctrl_get		= eth_igc_flow_ctrl_get,
291*2d9fd380Sjfb8856606 	.flow_ctrl_set		= eth_igc_flow_ctrl_set,
292*2d9fd380Sjfb8856606 	.reta_update		= eth_igc_rss_reta_update,
293*2d9fd380Sjfb8856606 	.reta_query		= eth_igc_rss_reta_query,
294*2d9fd380Sjfb8856606 	.rss_hash_update	= eth_igc_rss_hash_update,
295*2d9fd380Sjfb8856606 	.rss_hash_conf_get	= eth_igc_rss_hash_conf_get,
296*2d9fd380Sjfb8856606 	.vlan_filter_set	= eth_igc_vlan_filter_set,
297*2d9fd380Sjfb8856606 	.vlan_offload_set	= eth_igc_vlan_offload_set,
298*2d9fd380Sjfb8856606 	.vlan_tpid_set		= eth_igc_vlan_tpid_set,
299*2d9fd380Sjfb8856606 	.vlan_strip_queue_set	= eth_igc_vlan_strip_queue_set,
300*2d9fd380Sjfb8856606 	.filter_ctrl		= eth_igc_filter_ctrl,
301*2d9fd380Sjfb8856606 };
302*2d9fd380Sjfb8856606 
303*2d9fd380Sjfb8856606 /*
304*2d9fd380Sjfb8856606  * multiple queue mode checking
305*2d9fd380Sjfb8856606  */
306*2d9fd380Sjfb8856606 static int
igc_check_mq_mode(struct rte_eth_dev * dev)307*2d9fd380Sjfb8856606 igc_check_mq_mode(struct rte_eth_dev *dev)
308*2d9fd380Sjfb8856606 {
309*2d9fd380Sjfb8856606 	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
310*2d9fd380Sjfb8856606 	enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
311*2d9fd380Sjfb8856606 
312*2d9fd380Sjfb8856606 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
313*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR, "SRIOV is not supported.");
314*2d9fd380Sjfb8856606 		return -EINVAL;
315*2d9fd380Sjfb8856606 	}
316*2d9fd380Sjfb8856606 
317*2d9fd380Sjfb8856606 	if (rx_mq_mode != ETH_MQ_RX_NONE &&
318*2d9fd380Sjfb8856606 		rx_mq_mode != ETH_MQ_RX_RSS) {
319*2d9fd380Sjfb8856606 		/* RSS together with VMDq not supported*/
320*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
321*2d9fd380Sjfb8856606 				rx_mq_mode);
322*2d9fd380Sjfb8856606 		return -EINVAL;
323*2d9fd380Sjfb8856606 	}
324*2d9fd380Sjfb8856606 
325*2d9fd380Sjfb8856606 	/* To no break software that set invalid mode, only display
326*2d9fd380Sjfb8856606 	 * warning if invalid mode is used.
327*2d9fd380Sjfb8856606 	 */
328*2d9fd380Sjfb8856606 	if (tx_mq_mode != ETH_MQ_TX_NONE)
329*2d9fd380Sjfb8856606 		PMD_INIT_LOG(WARNING,
330*2d9fd380Sjfb8856606 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
331*2d9fd380Sjfb8856606 			tx_mq_mode);
332*2d9fd380Sjfb8856606 
333*2d9fd380Sjfb8856606 	return 0;
334*2d9fd380Sjfb8856606 }
335*2d9fd380Sjfb8856606 
336*2d9fd380Sjfb8856606 static int
eth_igc_configure(struct rte_eth_dev * dev)337*2d9fd380Sjfb8856606 eth_igc_configure(struct rte_eth_dev *dev)
338*2d9fd380Sjfb8856606 {
339*2d9fd380Sjfb8856606 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
340*2d9fd380Sjfb8856606 	int ret;
341*2d9fd380Sjfb8856606 
342*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
343*2d9fd380Sjfb8856606 
344*2d9fd380Sjfb8856606 	ret  = igc_check_mq_mode(dev);
345*2d9fd380Sjfb8856606 	if (ret != 0)
346*2d9fd380Sjfb8856606 		return ret;
347*2d9fd380Sjfb8856606 
348*2d9fd380Sjfb8856606 	intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
349*2d9fd380Sjfb8856606 	return 0;
350*2d9fd380Sjfb8856606 }
351*2d9fd380Sjfb8856606 
352*2d9fd380Sjfb8856606 static int
eth_igc_set_link_up(struct rte_eth_dev * dev)353*2d9fd380Sjfb8856606 eth_igc_set_link_up(struct rte_eth_dev *dev)
354*2d9fd380Sjfb8856606 {
355*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
356*2d9fd380Sjfb8856606 
357*2d9fd380Sjfb8856606 	if (hw->phy.media_type == igc_media_type_copper)
358*2d9fd380Sjfb8856606 		igc_power_up_phy(hw);
359*2d9fd380Sjfb8856606 	else
360*2d9fd380Sjfb8856606 		igc_power_up_fiber_serdes_link(hw);
361*2d9fd380Sjfb8856606 	return 0;
362*2d9fd380Sjfb8856606 }
363*2d9fd380Sjfb8856606 
364*2d9fd380Sjfb8856606 static int
eth_igc_set_link_down(struct rte_eth_dev * dev)365*2d9fd380Sjfb8856606 eth_igc_set_link_down(struct rte_eth_dev *dev)
366*2d9fd380Sjfb8856606 {
367*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
368*2d9fd380Sjfb8856606 
369*2d9fd380Sjfb8856606 	if (hw->phy.media_type == igc_media_type_copper)
370*2d9fd380Sjfb8856606 		igc_power_down_phy(hw);
371*2d9fd380Sjfb8856606 	else
372*2d9fd380Sjfb8856606 		igc_shutdown_fiber_serdes_link(hw);
373*2d9fd380Sjfb8856606 	return 0;
374*2d9fd380Sjfb8856606 }
375*2d9fd380Sjfb8856606 
376*2d9fd380Sjfb8856606 /*
377*2d9fd380Sjfb8856606  * disable other interrupt
378*2d9fd380Sjfb8856606  */
379*2d9fd380Sjfb8856606 static void
igc_intr_other_disable(struct rte_eth_dev * dev)380*2d9fd380Sjfb8856606 igc_intr_other_disable(struct rte_eth_dev *dev)
381*2d9fd380Sjfb8856606 {
382*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
383*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
384*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
385*2d9fd380Sjfb8856606 
386*2d9fd380Sjfb8856606 	if (rte_intr_allow_others(intr_handle) &&
387*2d9fd380Sjfb8856606 		dev->data->dev_conf.intr_conf.lsc) {
388*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
389*2d9fd380Sjfb8856606 	}
390*2d9fd380Sjfb8856606 
391*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_IMC, ~0);
392*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
393*2d9fd380Sjfb8856606 }
394*2d9fd380Sjfb8856606 
395*2d9fd380Sjfb8856606 /*
396*2d9fd380Sjfb8856606  * enable other interrupt
397*2d9fd380Sjfb8856606  */
398*2d9fd380Sjfb8856606 static inline void
igc_intr_other_enable(struct rte_eth_dev * dev)399*2d9fd380Sjfb8856606 igc_intr_other_enable(struct rte_eth_dev *dev)
400*2d9fd380Sjfb8856606 {
401*2d9fd380Sjfb8856606 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
402*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
403*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
404*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
405*2d9fd380Sjfb8856606 
406*2d9fd380Sjfb8856606 	if (rte_intr_allow_others(intr_handle) &&
407*2d9fd380Sjfb8856606 		dev->data->dev_conf.intr_conf.lsc) {
408*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
409*2d9fd380Sjfb8856606 	}
410*2d9fd380Sjfb8856606 
411*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
412*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
413*2d9fd380Sjfb8856606 }
414*2d9fd380Sjfb8856606 
415*2d9fd380Sjfb8856606 /*
416*2d9fd380Sjfb8856606  * It reads ICR and gets interrupt causes, check it and set a bit flag
417*2d9fd380Sjfb8856606  * to update link status.
418*2d9fd380Sjfb8856606  */
419*2d9fd380Sjfb8856606 static void
eth_igc_interrupt_get_status(struct rte_eth_dev * dev)420*2d9fd380Sjfb8856606 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
421*2d9fd380Sjfb8856606 {
422*2d9fd380Sjfb8856606 	uint32_t icr;
423*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
424*2d9fd380Sjfb8856606 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
425*2d9fd380Sjfb8856606 
426*2d9fd380Sjfb8856606 	/* read-on-clear nic registers here */
427*2d9fd380Sjfb8856606 	icr = IGC_READ_REG(hw, IGC_ICR);
428*2d9fd380Sjfb8856606 
429*2d9fd380Sjfb8856606 	intr->flags = 0;
430*2d9fd380Sjfb8856606 	if (icr & IGC_ICR_LSC)
431*2d9fd380Sjfb8856606 		intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
432*2d9fd380Sjfb8856606 }
433*2d9fd380Sjfb8856606 
434*2d9fd380Sjfb8856606 /* return 0 means link status changed, -1 means not changed */
435*2d9fd380Sjfb8856606 static int
eth_igc_link_update(struct rte_eth_dev * dev,int wait_to_complete)436*2d9fd380Sjfb8856606 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
437*2d9fd380Sjfb8856606 {
438*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
439*2d9fd380Sjfb8856606 	struct rte_eth_link link;
440*2d9fd380Sjfb8856606 	int link_check, count;
441*2d9fd380Sjfb8856606 
442*2d9fd380Sjfb8856606 	link_check = 0;
443*2d9fd380Sjfb8856606 	hw->mac.get_link_status = 1;
444*2d9fd380Sjfb8856606 
445*2d9fd380Sjfb8856606 	/* possible wait-to-complete in up to 9 seconds */
446*2d9fd380Sjfb8856606 	for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
447*2d9fd380Sjfb8856606 		/* Read the real link status */
448*2d9fd380Sjfb8856606 		switch (hw->phy.media_type) {
449*2d9fd380Sjfb8856606 		case igc_media_type_copper:
450*2d9fd380Sjfb8856606 			/* Do the work to read phy */
451*2d9fd380Sjfb8856606 			igc_check_for_link(hw);
452*2d9fd380Sjfb8856606 			link_check = !hw->mac.get_link_status;
453*2d9fd380Sjfb8856606 			break;
454*2d9fd380Sjfb8856606 
455*2d9fd380Sjfb8856606 		case igc_media_type_fiber:
456*2d9fd380Sjfb8856606 			igc_check_for_link(hw);
457*2d9fd380Sjfb8856606 			link_check = (IGC_READ_REG(hw, IGC_STATUS) &
458*2d9fd380Sjfb8856606 				      IGC_STATUS_LU);
459*2d9fd380Sjfb8856606 			break;
460*2d9fd380Sjfb8856606 
461*2d9fd380Sjfb8856606 		case igc_media_type_internal_serdes:
462*2d9fd380Sjfb8856606 			igc_check_for_link(hw);
463*2d9fd380Sjfb8856606 			link_check = hw->mac.serdes_has_link;
464*2d9fd380Sjfb8856606 			break;
465*2d9fd380Sjfb8856606 
466*2d9fd380Sjfb8856606 		default:
467*2d9fd380Sjfb8856606 			break;
468*2d9fd380Sjfb8856606 		}
469*2d9fd380Sjfb8856606 		if (link_check || wait_to_complete == 0)
470*2d9fd380Sjfb8856606 			break;
471*2d9fd380Sjfb8856606 		rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
472*2d9fd380Sjfb8856606 	}
473*2d9fd380Sjfb8856606 	memset(&link, 0, sizeof(link));
474*2d9fd380Sjfb8856606 
475*2d9fd380Sjfb8856606 	/* Now we check if a transition has happened */
476*2d9fd380Sjfb8856606 	if (link_check) {
477*2d9fd380Sjfb8856606 		uint16_t duplex, speed;
478*2d9fd380Sjfb8856606 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
479*2d9fd380Sjfb8856606 		link.link_duplex = (duplex == FULL_DUPLEX) ?
480*2d9fd380Sjfb8856606 				ETH_LINK_FULL_DUPLEX :
481*2d9fd380Sjfb8856606 				ETH_LINK_HALF_DUPLEX;
482*2d9fd380Sjfb8856606 		link.link_speed = speed;
483*2d9fd380Sjfb8856606 		link.link_status = ETH_LINK_UP;
484*2d9fd380Sjfb8856606 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
485*2d9fd380Sjfb8856606 				ETH_LINK_SPEED_FIXED);
486*2d9fd380Sjfb8856606 
487*2d9fd380Sjfb8856606 		if (speed == SPEED_2500) {
488*2d9fd380Sjfb8856606 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
489*2d9fd380Sjfb8856606 			if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
490*2d9fd380Sjfb8856606 				tipg &= ~IGC_TIPG_IPGT_MASK;
491*2d9fd380Sjfb8856606 				tipg |= 0x0b;
492*2d9fd380Sjfb8856606 				IGC_WRITE_REG(hw, IGC_TIPG, tipg);
493*2d9fd380Sjfb8856606 			}
494*2d9fd380Sjfb8856606 		}
495*2d9fd380Sjfb8856606 	} else {
496*2d9fd380Sjfb8856606 		link.link_speed = 0;
497*2d9fd380Sjfb8856606 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
498*2d9fd380Sjfb8856606 		link.link_status = ETH_LINK_DOWN;
499*2d9fd380Sjfb8856606 		link.link_autoneg = ETH_LINK_FIXED;
500*2d9fd380Sjfb8856606 	}
501*2d9fd380Sjfb8856606 
502*2d9fd380Sjfb8856606 	return rte_eth_linkstatus_set(dev, &link);
503*2d9fd380Sjfb8856606 }
504*2d9fd380Sjfb8856606 
505*2d9fd380Sjfb8856606 /*
506*2d9fd380Sjfb8856606  * It executes link_update after knowing an interrupt is present.
507*2d9fd380Sjfb8856606  */
508*2d9fd380Sjfb8856606 static void
eth_igc_interrupt_action(struct rte_eth_dev * dev)509*2d9fd380Sjfb8856606 eth_igc_interrupt_action(struct rte_eth_dev *dev)
510*2d9fd380Sjfb8856606 {
511*2d9fd380Sjfb8856606 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
512*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
513*2d9fd380Sjfb8856606 	struct rte_eth_link link;
514*2d9fd380Sjfb8856606 	int ret;
515*2d9fd380Sjfb8856606 
516*2d9fd380Sjfb8856606 	if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
517*2d9fd380Sjfb8856606 		intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
518*2d9fd380Sjfb8856606 
519*2d9fd380Sjfb8856606 		/* set get_link_status to check register later */
520*2d9fd380Sjfb8856606 		ret = eth_igc_link_update(dev, 0);
521*2d9fd380Sjfb8856606 
522*2d9fd380Sjfb8856606 		/* check if link has changed */
523*2d9fd380Sjfb8856606 		if (ret < 0)
524*2d9fd380Sjfb8856606 			return;
525*2d9fd380Sjfb8856606 
526*2d9fd380Sjfb8856606 		rte_eth_linkstatus_get(dev, &link);
527*2d9fd380Sjfb8856606 		if (link.link_status)
528*2d9fd380Sjfb8856606 			PMD_DRV_LOG(INFO,
529*2d9fd380Sjfb8856606 				" Port %d: Link Up - speed %u Mbps - %s",
530*2d9fd380Sjfb8856606 				dev->data->port_id,
531*2d9fd380Sjfb8856606 				(unsigned int)link.link_speed,
532*2d9fd380Sjfb8856606 				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
533*2d9fd380Sjfb8856606 				"full-duplex" : "half-duplex");
534*2d9fd380Sjfb8856606 		else
535*2d9fd380Sjfb8856606 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
536*2d9fd380Sjfb8856606 				dev->data->port_id);
537*2d9fd380Sjfb8856606 
538*2d9fd380Sjfb8856606 		PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
539*2d9fd380Sjfb8856606 				pci_dev->addr.domain,
540*2d9fd380Sjfb8856606 				pci_dev->addr.bus,
541*2d9fd380Sjfb8856606 				pci_dev->addr.devid,
542*2d9fd380Sjfb8856606 				pci_dev->addr.function);
543*2d9fd380Sjfb8856606 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
544*2d9fd380Sjfb8856606 	}
545*2d9fd380Sjfb8856606 }
546*2d9fd380Sjfb8856606 
547*2d9fd380Sjfb8856606 /*
548*2d9fd380Sjfb8856606  * Interrupt handler which shall be registered at first.
549*2d9fd380Sjfb8856606  *
550*2d9fd380Sjfb8856606  * @handle
551*2d9fd380Sjfb8856606  *  Pointer to interrupt handle.
552*2d9fd380Sjfb8856606  * @param
553*2d9fd380Sjfb8856606  *  The address of parameter (struct rte_eth_dev *) registered before.
554*2d9fd380Sjfb8856606  */
555*2d9fd380Sjfb8856606 static void
eth_igc_interrupt_handler(void * param)556*2d9fd380Sjfb8856606 eth_igc_interrupt_handler(void *param)
557*2d9fd380Sjfb8856606 {
558*2d9fd380Sjfb8856606 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
559*2d9fd380Sjfb8856606 
560*2d9fd380Sjfb8856606 	eth_igc_interrupt_get_status(dev);
561*2d9fd380Sjfb8856606 	eth_igc_interrupt_action(dev);
562*2d9fd380Sjfb8856606 }
563*2d9fd380Sjfb8856606 
564*2d9fd380Sjfb8856606 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
565*2d9fd380Sjfb8856606 
566*2d9fd380Sjfb8856606 /*
567*2d9fd380Sjfb8856606  * Update the queue status every IGC_ALARM_INTERVAL time.
568*2d9fd380Sjfb8856606  * @param
569*2d9fd380Sjfb8856606  *  The address of parameter (struct rte_eth_dev *) registered before.
570*2d9fd380Sjfb8856606  */
571*2d9fd380Sjfb8856606 static void
igc_update_queue_stats_handler(void * param)572*2d9fd380Sjfb8856606 igc_update_queue_stats_handler(void *param)
573*2d9fd380Sjfb8856606 {
574*2d9fd380Sjfb8856606 	struct rte_eth_dev *dev = param;
575*2d9fd380Sjfb8856606 	igc_read_queue_stats_register(dev);
576*2d9fd380Sjfb8856606 	rte_eal_alarm_set(IGC_ALARM_INTERVAL,
577*2d9fd380Sjfb8856606 			igc_update_queue_stats_handler, dev);
578*2d9fd380Sjfb8856606 }
579*2d9fd380Sjfb8856606 
580*2d9fd380Sjfb8856606 /*
581*2d9fd380Sjfb8856606  * rx,tx enable/disable
582*2d9fd380Sjfb8856606  */
583*2d9fd380Sjfb8856606 static void
eth_igc_rxtx_control(struct rte_eth_dev * dev,bool enable)584*2d9fd380Sjfb8856606 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
585*2d9fd380Sjfb8856606 {
586*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
587*2d9fd380Sjfb8856606 	uint32_t tctl, rctl;
588*2d9fd380Sjfb8856606 
589*2d9fd380Sjfb8856606 	tctl = IGC_READ_REG(hw, IGC_TCTL);
590*2d9fd380Sjfb8856606 	rctl = IGC_READ_REG(hw, IGC_RCTL);
591*2d9fd380Sjfb8856606 
592*2d9fd380Sjfb8856606 	if (enable) {
593*2d9fd380Sjfb8856606 		/* enable Tx/Rx */
594*2d9fd380Sjfb8856606 		tctl |= IGC_TCTL_EN;
595*2d9fd380Sjfb8856606 		rctl |= IGC_RCTL_EN;
596*2d9fd380Sjfb8856606 	} else {
597*2d9fd380Sjfb8856606 		/* disable Tx/Rx */
598*2d9fd380Sjfb8856606 		tctl &= ~IGC_TCTL_EN;
599*2d9fd380Sjfb8856606 		rctl &= ~IGC_RCTL_EN;
600*2d9fd380Sjfb8856606 	}
601*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_TCTL, tctl);
602*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
603*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
604*2d9fd380Sjfb8856606 }
605*2d9fd380Sjfb8856606 
606*2d9fd380Sjfb8856606 /*
607*2d9fd380Sjfb8856606  *  This routine disables all traffic on the adapter by issuing a
608*2d9fd380Sjfb8856606  *  global reset on the MAC.
609*2d9fd380Sjfb8856606  */
610*2d9fd380Sjfb8856606 static int
eth_igc_stop(struct rte_eth_dev * dev)611*2d9fd380Sjfb8856606 eth_igc_stop(struct rte_eth_dev *dev)
612*2d9fd380Sjfb8856606 {
613*2d9fd380Sjfb8856606 	struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
614*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
615*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
616*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
617*2d9fd380Sjfb8856606 	struct rte_eth_link link;
618*2d9fd380Sjfb8856606 
619*2d9fd380Sjfb8856606 	dev->data->dev_started = 0;
620*2d9fd380Sjfb8856606 	adapter->stopped = 1;
621*2d9fd380Sjfb8856606 
622*2d9fd380Sjfb8856606 	/* disable receive and transmit */
623*2d9fd380Sjfb8856606 	eth_igc_rxtx_control(dev, false);
624*2d9fd380Sjfb8856606 
625*2d9fd380Sjfb8856606 	/* disable all MSI-X interrupts */
626*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
627*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
628*2d9fd380Sjfb8856606 
629*2d9fd380Sjfb8856606 	/* clear all MSI-X interrupts */
630*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
631*2d9fd380Sjfb8856606 
632*2d9fd380Sjfb8856606 	igc_intr_other_disable(dev);
633*2d9fd380Sjfb8856606 
634*2d9fd380Sjfb8856606 	rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
635*2d9fd380Sjfb8856606 
636*2d9fd380Sjfb8856606 	/* disable intr eventfd mapping */
637*2d9fd380Sjfb8856606 	rte_intr_disable(intr_handle);
638*2d9fd380Sjfb8856606 
639*2d9fd380Sjfb8856606 	igc_reset_hw(hw);
640*2d9fd380Sjfb8856606 
641*2d9fd380Sjfb8856606 	/* disable all wake up */
642*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_WUC, 0);
643*2d9fd380Sjfb8856606 
644*2d9fd380Sjfb8856606 	/* disable checking EEE operation in MAC loopback mode */
645*2d9fd380Sjfb8856606 	igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
646*2d9fd380Sjfb8856606 
647*2d9fd380Sjfb8856606 	/* Set bit for Go Link disconnect */
648*2d9fd380Sjfb8856606 	igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
649*2d9fd380Sjfb8856606 			IGC_82580_PM_GO_LINKD);
650*2d9fd380Sjfb8856606 
651*2d9fd380Sjfb8856606 	/* Power down the phy. Needed to make the link go Down */
652*2d9fd380Sjfb8856606 	eth_igc_set_link_down(dev);
653*2d9fd380Sjfb8856606 
654*2d9fd380Sjfb8856606 	igc_dev_clear_queues(dev);
655*2d9fd380Sjfb8856606 
656*2d9fd380Sjfb8856606 	/* clear the recorded link status */
657*2d9fd380Sjfb8856606 	memset(&link, 0, sizeof(link));
658*2d9fd380Sjfb8856606 	rte_eth_linkstatus_set(dev, &link);
659*2d9fd380Sjfb8856606 
660*2d9fd380Sjfb8856606 	if (!rte_intr_allow_others(intr_handle))
661*2d9fd380Sjfb8856606 		/* resume to the default handler */
662*2d9fd380Sjfb8856606 		rte_intr_callback_register(intr_handle,
663*2d9fd380Sjfb8856606 					   eth_igc_interrupt_handler,
664*2d9fd380Sjfb8856606 					   (void *)dev);
665*2d9fd380Sjfb8856606 
666*2d9fd380Sjfb8856606 	/* Clean datapath event and queue/vec mapping */
667*2d9fd380Sjfb8856606 	rte_intr_efd_disable(intr_handle);
668*2d9fd380Sjfb8856606 	if (intr_handle->intr_vec != NULL) {
669*2d9fd380Sjfb8856606 		rte_free(intr_handle->intr_vec);
670*2d9fd380Sjfb8856606 		intr_handle->intr_vec = NULL;
671*2d9fd380Sjfb8856606 	}
672*2d9fd380Sjfb8856606 
673*2d9fd380Sjfb8856606 	return 0;
674*2d9fd380Sjfb8856606 }
675*2d9fd380Sjfb8856606 
676*2d9fd380Sjfb8856606 /*
677*2d9fd380Sjfb8856606  * write interrupt vector allocation register
678*2d9fd380Sjfb8856606  * @hw
679*2d9fd380Sjfb8856606  *  board private structure
680*2d9fd380Sjfb8856606  * @queue_index
681*2d9fd380Sjfb8856606  *  queue index, valid 0,1,2,3
682*2d9fd380Sjfb8856606  * @tx
683*2d9fd380Sjfb8856606  *  tx:1, rx:0
684*2d9fd380Sjfb8856606  * @msix_vector
685*2d9fd380Sjfb8856606  *  msix-vector, valid 0,1,2,3,4
686*2d9fd380Sjfb8856606  */
687*2d9fd380Sjfb8856606 static void
igc_write_ivar(struct igc_hw * hw,uint8_t queue_index,bool tx,uint8_t msix_vector)688*2d9fd380Sjfb8856606 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
689*2d9fd380Sjfb8856606 		bool tx, uint8_t msix_vector)
690*2d9fd380Sjfb8856606 {
691*2d9fd380Sjfb8856606 	uint8_t offset = 0;
692*2d9fd380Sjfb8856606 	uint8_t reg_index = queue_index >> 1;
693*2d9fd380Sjfb8856606 	uint32_t val;
694*2d9fd380Sjfb8856606 
695*2d9fd380Sjfb8856606 	/*
696*2d9fd380Sjfb8856606 	 * IVAR(0)
697*2d9fd380Sjfb8856606 	 * bit31...24	bit23...16	bit15...8	bit7...0
698*2d9fd380Sjfb8856606 	 * TX1		RX1		TX0		RX0
699*2d9fd380Sjfb8856606 	 *
700*2d9fd380Sjfb8856606 	 * IVAR(1)
701*2d9fd380Sjfb8856606 	 * bit31...24	bit23...16	bit15...8	bit7...0
702*2d9fd380Sjfb8856606 	 * TX3		RX3		TX2		RX2
703*2d9fd380Sjfb8856606 	 */
704*2d9fd380Sjfb8856606 
705*2d9fd380Sjfb8856606 	if (tx)
706*2d9fd380Sjfb8856606 		offset = 8;
707*2d9fd380Sjfb8856606 
708*2d9fd380Sjfb8856606 	if (queue_index & 1)
709*2d9fd380Sjfb8856606 		offset += 16;
710*2d9fd380Sjfb8856606 
711*2d9fd380Sjfb8856606 	val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
712*2d9fd380Sjfb8856606 
713*2d9fd380Sjfb8856606 	/* clear bits */
714*2d9fd380Sjfb8856606 	val &= ~((uint32_t)0xFF << offset);
715*2d9fd380Sjfb8856606 
716*2d9fd380Sjfb8856606 	/* write vector and valid bit */
717*2d9fd380Sjfb8856606 	val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
718*2d9fd380Sjfb8856606 
719*2d9fd380Sjfb8856606 	IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
720*2d9fd380Sjfb8856606 }
721*2d9fd380Sjfb8856606 
722*2d9fd380Sjfb8856606 /* Sets up the hardware to generate MSI-X interrupts properly
723*2d9fd380Sjfb8856606  * @hw
724*2d9fd380Sjfb8856606  *  board private structure
725*2d9fd380Sjfb8856606  */
726*2d9fd380Sjfb8856606 static void
igc_configure_msix_intr(struct rte_eth_dev * dev)727*2d9fd380Sjfb8856606 igc_configure_msix_intr(struct rte_eth_dev *dev)
728*2d9fd380Sjfb8856606 {
729*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
730*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
731*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
732*2d9fd380Sjfb8856606 
733*2d9fd380Sjfb8856606 	uint32_t intr_mask;
734*2d9fd380Sjfb8856606 	uint32_t vec = IGC_MISC_VEC_ID;
735*2d9fd380Sjfb8856606 	uint32_t base = IGC_MISC_VEC_ID;
736*2d9fd380Sjfb8856606 	uint32_t misc_shift = 0;
737*2d9fd380Sjfb8856606 	int i;
738*2d9fd380Sjfb8856606 
739*2d9fd380Sjfb8856606 	/* won't configure msix register if no mapping is done
740*2d9fd380Sjfb8856606 	 * between intr vector and event fd
741*2d9fd380Sjfb8856606 	 */
742*2d9fd380Sjfb8856606 	if (!rte_intr_dp_is_en(intr_handle))
743*2d9fd380Sjfb8856606 		return;
744*2d9fd380Sjfb8856606 
745*2d9fd380Sjfb8856606 	if (rte_intr_allow_others(intr_handle)) {
746*2d9fd380Sjfb8856606 		base = IGC_RX_VEC_START;
747*2d9fd380Sjfb8856606 		vec = base;
748*2d9fd380Sjfb8856606 		misc_shift = 1;
749*2d9fd380Sjfb8856606 	}
750*2d9fd380Sjfb8856606 
751*2d9fd380Sjfb8856606 	/* turn on MSI-X capability first */
752*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
753*2d9fd380Sjfb8856606 				IGC_GPIE_PBA | IGC_GPIE_EIAME |
754*2d9fd380Sjfb8856606 				IGC_GPIE_NSICR);
755*2d9fd380Sjfb8856606 	intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
756*2d9fd380Sjfb8856606 		misc_shift;
757*2d9fd380Sjfb8856606 
758*2d9fd380Sjfb8856606 	if (dev->data->dev_conf.intr_conf.lsc)
759*2d9fd380Sjfb8856606 		intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
760*2d9fd380Sjfb8856606 
761*2d9fd380Sjfb8856606 	/* enable msix auto-clear */
762*2d9fd380Sjfb8856606 	igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
763*2d9fd380Sjfb8856606 
764*2d9fd380Sjfb8856606 	/* set other cause interrupt vector */
765*2d9fd380Sjfb8856606 	igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
766*2d9fd380Sjfb8856606 		(uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
767*2d9fd380Sjfb8856606 
768*2d9fd380Sjfb8856606 	/* enable auto-mask */
769*2d9fd380Sjfb8856606 	igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
770*2d9fd380Sjfb8856606 
771*2d9fd380Sjfb8856606 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
772*2d9fd380Sjfb8856606 		igc_write_ivar(hw, i, 0, vec);
773*2d9fd380Sjfb8856606 		intr_handle->intr_vec[i] = vec;
774*2d9fd380Sjfb8856606 		if (vec < base + intr_handle->nb_efd - 1)
775*2d9fd380Sjfb8856606 			vec++;
776*2d9fd380Sjfb8856606 	}
777*2d9fd380Sjfb8856606 
778*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
779*2d9fd380Sjfb8856606 }
780*2d9fd380Sjfb8856606 
781*2d9fd380Sjfb8856606 /**
782*2d9fd380Sjfb8856606  * It enables the interrupt mask and then enable the interrupt.
783*2d9fd380Sjfb8856606  *
784*2d9fd380Sjfb8856606  * @dev
785*2d9fd380Sjfb8856606  *  Pointer to struct rte_eth_dev.
786*2d9fd380Sjfb8856606  * @on
787*2d9fd380Sjfb8856606  *  Enable or Disable
788*2d9fd380Sjfb8856606  */
789*2d9fd380Sjfb8856606 static void
igc_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on)790*2d9fd380Sjfb8856606 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
791*2d9fd380Sjfb8856606 {
792*2d9fd380Sjfb8856606 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
793*2d9fd380Sjfb8856606 
794*2d9fd380Sjfb8856606 	if (on)
795*2d9fd380Sjfb8856606 		intr->mask |= IGC_ICR_LSC;
796*2d9fd380Sjfb8856606 	else
797*2d9fd380Sjfb8856606 		intr->mask &= ~IGC_ICR_LSC;
798*2d9fd380Sjfb8856606 }
799*2d9fd380Sjfb8856606 
800*2d9fd380Sjfb8856606 /*
801*2d9fd380Sjfb8856606  * It enables the interrupt.
802*2d9fd380Sjfb8856606  * It will be called once only during nic initialized.
803*2d9fd380Sjfb8856606  */
804*2d9fd380Sjfb8856606 static void
igc_rxq_interrupt_setup(struct rte_eth_dev * dev)805*2d9fd380Sjfb8856606 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
806*2d9fd380Sjfb8856606 {
807*2d9fd380Sjfb8856606 	uint32_t mask;
808*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
809*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
810*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
811*2d9fd380Sjfb8856606 	int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
812*2d9fd380Sjfb8856606 
813*2d9fd380Sjfb8856606 	/* won't configure msix register if no mapping is done
814*2d9fd380Sjfb8856606 	 * between intr vector and event fd
815*2d9fd380Sjfb8856606 	 */
816*2d9fd380Sjfb8856606 	if (!rte_intr_dp_is_en(intr_handle))
817*2d9fd380Sjfb8856606 		return;
818*2d9fd380Sjfb8856606 
819*2d9fd380Sjfb8856606 	mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;
820*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EIMS, mask);
821*2d9fd380Sjfb8856606 }
822*2d9fd380Sjfb8856606 
823*2d9fd380Sjfb8856606 /*
824*2d9fd380Sjfb8856606  *  Get hardware rx-buffer size.
825*2d9fd380Sjfb8856606  */
826*2d9fd380Sjfb8856606 static inline int
igc_get_rx_buffer_size(struct igc_hw * hw)827*2d9fd380Sjfb8856606 igc_get_rx_buffer_size(struct igc_hw *hw)
828*2d9fd380Sjfb8856606 {
829*2d9fd380Sjfb8856606 	return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
830*2d9fd380Sjfb8856606 }
831*2d9fd380Sjfb8856606 
832*2d9fd380Sjfb8856606 /*
833*2d9fd380Sjfb8856606  * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
834*2d9fd380Sjfb8856606  * For ASF and Pass Through versions of f/w this means
835*2d9fd380Sjfb8856606  * that the driver is loaded.
836*2d9fd380Sjfb8856606  */
837*2d9fd380Sjfb8856606 static void
igc_hw_control_acquire(struct igc_hw * hw)838*2d9fd380Sjfb8856606 igc_hw_control_acquire(struct igc_hw *hw)
839*2d9fd380Sjfb8856606 {
840*2d9fd380Sjfb8856606 	uint32_t ctrl_ext;
841*2d9fd380Sjfb8856606 
842*2d9fd380Sjfb8856606 	/* Let firmware know the driver has taken over */
843*2d9fd380Sjfb8856606 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
844*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
845*2d9fd380Sjfb8856606 }
846*2d9fd380Sjfb8856606 
847*2d9fd380Sjfb8856606 /*
848*2d9fd380Sjfb8856606  * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
849*2d9fd380Sjfb8856606  * For ASF and Pass Through versions of f/w this means that the
850*2d9fd380Sjfb8856606  * driver is no longer loaded.
851*2d9fd380Sjfb8856606  */
852*2d9fd380Sjfb8856606 static void
igc_hw_control_release(struct igc_hw * hw)853*2d9fd380Sjfb8856606 igc_hw_control_release(struct igc_hw *hw)
854*2d9fd380Sjfb8856606 {
855*2d9fd380Sjfb8856606 	uint32_t ctrl_ext;
856*2d9fd380Sjfb8856606 
857*2d9fd380Sjfb8856606 	/* Let firmware taken over control of h/w */
858*2d9fd380Sjfb8856606 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
859*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_CTRL_EXT,
860*2d9fd380Sjfb8856606 			ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
861*2d9fd380Sjfb8856606 }
862*2d9fd380Sjfb8856606 
863*2d9fd380Sjfb8856606 static int
igc_hardware_init(struct igc_hw * hw)864*2d9fd380Sjfb8856606 igc_hardware_init(struct igc_hw *hw)
865*2d9fd380Sjfb8856606 {
866*2d9fd380Sjfb8856606 	uint32_t rx_buf_size;
867*2d9fd380Sjfb8856606 	int diag;
868*2d9fd380Sjfb8856606 
869*2d9fd380Sjfb8856606 	/* Let the firmware know the OS is in control */
870*2d9fd380Sjfb8856606 	igc_hw_control_acquire(hw);
871*2d9fd380Sjfb8856606 
872*2d9fd380Sjfb8856606 	/* Issue a global reset */
873*2d9fd380Sjfb8856606 	igc_reset_hw(hw);
874*2d9fd380Sjfb8856606 
875*2d9fd380Sjfb8856606 	/* disable all wake up */
876*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_WUC, 0);
877*2d9fd380Sjfb8856606 
878*2d9fd380Sjfb8856606 	/*
879*2d9fd380Sjfb8856606 	 * Hardware flow control
880*2d9fd380Sjfb8856606 	 * - High water mark should allow for at least two standard size (1518)
881*2d9fd380Sjfb8856606 	 *   frames to be received after sending an XOFF.
882*2d9fd380Sjfb8856606 	 * - Low water mark works best when it is very near the high water mark.
883*2d9fd380Sjfb8856606 	 *   This allows the receiver to restart by sending XON when it has
884*2d9fd380Sjfb8856606 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
885*2d9fd380Sjfb8856606 	 *   restart after one full frame is pulled from the buffer. There
886*2d9fd380Sjfb8856606 	 *   could be several smaller frames in the buffer and if so they will
887*2d9fd380Sjfb8856606 	 *   not trigger the XON until their total number reduces the buffer
888*2d9fd380Sjfb8856606 	 *   by 1500.
889*2d9fd380Sjfb8856606 	 */
890*2d9fd380Sjfb8856606 	rx_buf_size = igc_get_rx_buffer_size(hw);
891*2d9fd380Sjfb8856606 	hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
892*2d9fd380Sjfb8856606 	hw->fc.low_water = hw->fc.high_water - 1500;
893*2d9fd380Sjfb8856606 	hw->fc.pause_time = IGC_FC_PAUSE_TIME;
894*2d9fd380Sjfb8856606 	hw->fc.send_xon = 1;
895*2d9fd380Sjfb8856606 	hw->fc.requested_mode = igc_fc_full;
896*2d9fd380Sjfb8856606 
897*2d9fd380Sjfb8856606 	diag = igc_init_hw(hw);
898*2d9fd380Sjfb8856606 	if (diag < 0)
899*2d9fd380Sjfb8856606 		return diag;
900*2d9fd380Sjfb8856606 
901*2d9fd380Sjfb8856606 	igc_get_phy_info(hw);
902*2d9fd380Sjfb8856606 	igc_check_for_link(hw);
903*2d9fd380Sjfb8856606 
904*2d9fd380Sjfb8856606 	return 0;
905*2d9fd380Sjfb8856606 }
906*2d9fd380Sjfb8856606 
907*2d9fd380Sjfb8856606 static int
eth_igc_start(struct rte_eth_dev * dev)908*2d9fd380Sjfb8856606 eth_igc_start(struct rte_eth_dev *dev)
909*2d9fd380Sjfb8856606 {
910*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
911*2d9fd380Sjfb8856606 	struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
912*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
913*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
914*2d9fd380Sjfb8856606 	uint32_t *speeds;
915*2d9fd380Sjfb8856606 	int ret;
916*2d9fd380Sjfb8856606 
917*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
918*2d9fd380Sjfb8856606 
919*2d9fd380Sjfb8856606 	/* disable all MSI-X interrupts */
920*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
921*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
922*2d9fd380Sjfb8856606 
923*2d9fd380Sjfb8856606 	/* clear all MSI-X interrupts */
924*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
925*2d9fd380Sjfb8856606 
926*2d9fd380Sjfb8856606 	/* disable uio/vfio intr/eventfd mapping */
927*2d9fd380Sjfb8856606 	if (!adapter->stopped)
928*2d9fd380Sjfb8856606 		rte_intr_disable(intr_handle);
929*2d9fd380Sjfb8856606 
930*2d9fd380Sjfb8856606 	/* Power up the phy. Needed to make the link go Up */
931*2d9fd380Sjfb8856606 	eth_igc_set_link_up(dev);
932*2d9fd380Sjfb8856606 
933*2d9fd380Sjfb8856606 	/* Put the address into the Receive Address Array */
934*2d9fd380Sjfb8856606 	igc_rar_set(hw, hw->mac.addr, 0);
935*2d9fd380Sjfb8856606 
936*2d9fd380Sjfb8856606 	/* Initialize the hardware */
937*2d9fd380Sjfb8856606 	if (igc_hardware_init(hw)) {
938*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
939*2d9fd380Sjfb8856606 		return -EIO;
940*2d9fd380Sjfb8856606 	}
941*2d9fd380Sjfb8856606 	adapter->stopped = 0;
942*2d9fd380Sjfb8856606 
943*2d9fd380Sjfb8856606 	/* check and configure queue intr-vector mapping */
944*2d9fd380Sjfb8856606 	if (rte_intr_cap_multiple(intr_handle) &&
945*2d9fd380Sjfb8856606 		dev->data->dev_conf.intr_conf.rxq) {
946*2d9fd380Sjfb8856606 		uint32_t intr_vector = dev->data->nb_rx_queues;
947*2d9fd380Sjfb8856606 		if (rte_intr_efd_enable(intr_handle, intr_vector))
948*2d9fd380Sjfb8856606 			return -1;
949*2d9fd380Sjfb8856606 	}
950*2d9fd380Sjfb8856606 
951*2d9fd380Sjfb8856606 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
952*2d9fd380Sjfb8856606 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
953*2d9fd380Sjfb8856606 			dev->data->nb_rx_queues * sizeof(int), 0);
954*2d9fd380Sjfb8856606 		if (intr_handle->intr_vec == NULL) {
955*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR,
956*2d9fd380Sjfb8856606 				"Failed to allocate %d rx_queues intr_vec",
957*2d9fd380Sjfb8856606 				dev->data->nb_rx_queues);
958*2d9fd380Sjfb8856606 			return -ENOMEM;
959*2d9fd380Sjfb8856606 		}
960*2d9fd380Sjfb8856606 	}
961*2d9fd380Sjfb8856606 
962*2d9fd380Sjfb8856606 	/* configure msix for rx interrupt */
963*2d9fd380Sjfb8856606 	igc_configure_msix_intr(dev);
964*2d9fd380Sjfb8856606 
965*2d9fd380Sjfb8856606 	igc_tx_init(dev);
966*2d9fd380Sjfb8856606 
967*2d9fd380Sjfb8856606 	/* This can fail when allocating mbufs for descriptor rings */
968*2d9fd380Sjfb8856606 	ret = igc_rx_init(dev);
969*2d9fd380Sjfb8856606 	if (ret) {
970*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
971*2d9fd380Sjfb8856606 		igc_dev_clear_queues(dev);
972*2d9fd380Sjfb8856606 		return ret;
973*2d9fd380Sjfb8856606 	}
974*2d9fd380Sjfb8856606 
975*2d9fd380Sjfb8856606 	igc_clear_hw_cntrs_base_generic(hw);
976*2d9fd380Sjfb8856606 
977*2d9fd380Sjfb8856606 	/* VLAN Offload Settings */
978*2d9fd380Sjfb8856606 	eth_igc_vlan_offload_set(dev,
979*2d9fd380Sjfb8856606 		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
980*2d9fd380Sjfb8856606 		ETH_VLAN_EXTEND_MASK);
981*2d9fd380Sjfb8856606 
982*2d9fd380Sjfb8856606 	/* Setup link speed and duplex */
983*2d9fd380Sjfb8856606 	speeds = &dev->data->dev_conf.link_speeds;
984*2d9fd380Sjfb8856606 	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
985*2d9fd380Sjfb8856606 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
986*2d9fd380Sjfb8856606 		hw->mac.autoneg = 1;
987*2d9fd380Sjfb8856606 	} else {
988*2d9fd380Sjfb8856606 		int num_speeds = 0;
989*2d9fd380Sjfb8856606 		bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
990*2d9fd380Sjfb8856606 
991*2d9fd380Sjfb8856606 		/* Reset */
992*2d9fd380Sjfb8856606 		hw->phy.autoneg_advertised = 0;
993*2d9fd380Sjfb8856606 
994*2d9fd380Sjfb8856606 		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
995*2d9fd380Sjfb8856606 				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
996*2d9fd380Sjfb8856606 				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
997*2d9fd380Sjfb8856606 				ETH_LINK_SPEED_FIXED)) {
998*2d9fd380Sjfb8856606 			num_speeds = -1;
999*2d9fd380Sjfb8856606 			goto error_invalid_config;
1000*2d9fd380Sjfb8856606 		}
1001*2d9fd380Sjfb8856606 		if (*speeds & ETH_LINK_SPEED_10M_HD) {
1002*2d9fd380Sjfb8856606 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1003*2d9fd380Sjfb8856606 			num_speeds++;
1004*2d9fd380Sjfb8856606 		}
1005*2d9fd380Sjfb8856606 		if (*speeds & ETH_LINK_SPEED_10M) {
1006*2d9fd380Sjfb8856606 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1007*2d9fd380Sjfb8856606 			num_speeds++;
1008*2d9fd380Sjfb8856606 		}
1009*2d9fd380Sjfb8856606 		if (*speeds & ETH_LINK_SPEED_100M_HD) {
1010*2d9fd380Sjfb8856606 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1011*2d9fd380Sjfb8856606 			num_speeds++;
1012*2d9fd380Sjfb8856606 		}
1013*2d9fd380Sjfb8856606 		if (*speeds & ETH_LINK_SPEED_100M) {
1014*2d9fd380Sjfb8856606 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1015*2d9fd380Sjfb8856606 			num_speeds++;
1016*2d9fd380Sjfb8856606 		}
1017*2d9fd380Sjfb8856606 		if (*speeds & ETH_LINK_SPEED_1G) {
1018*2d9fd380Sjfb8856606 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1019*2d9fd380Sjfb8856606 			num_speeds++;
1020*2d9fd380Sjfb8856606 		}
1021*2d9fd380Sjfb8856606 		if (*speeds & ETH_LINK_SPEED_2_5G) {
1022*2d9fd380Sjfb8856606 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
1023*2d9fd380Sjfb8856606 			num_speeds++;
1024*2d9fd380Sjfb8856606 		}
1025*2d9fd380Sjfb8856606 		if (num_speeds == 0 || (!autoneg && num_speeds > 1))
1026*2d9fd380Sjfb8856606 			goto error_invalid_config;
1027*2d9fd380Sjfb8856606 
1028*2d9fd380Sjfb8856606 		/* Set/reset the mac.autoneg based on the link speed,
1029*2d9fd380Sjfb8856606 		 * fixed or not
1030*2d9fd380Sjfb8856606 		 */
1031*2d9fd380Sjfb8856606 		if (!autoneg) {
1032*2d9fd380Sjfb8856606 			hw->mac.autoneg = 0;
1033*2d9fd380Sjfb8856606 			hw->mac.forced_speed_duplex =
1034*2d9fd380Sjfb8856606 					hw->phy.autoneg_advertised;
1035*2d9fd380Sjfb8856606 		} else {
1036*2d9fd380Sjfb8856606 			hw->mac.autoneg = 1;
1037*2d9fd380Sjfb8856606 		}
1038*2d9fd380Sjfb8856606 	}
1039*2d9fd380Sjfb8856606 
1040*2d9fd380Sjfb8856606 	igc_setup_link(hw);
1041*2d9fd380Sjfb8856606 
1042*2d9fd380Sjfb8856606 	if (rte_intr_allow_others(intr_handle)) {
1043*2d9fd380Sjfb8856606 		/* check if lsc interrupt is enabled */
1044*2d9fd380Sjfb8856606 		if (dev->data->dev_conf.intr_conf.lsc)
1045*2d9fd380Sjfb8856606 			igc_lsc_interrupt_setup(dev, 1);
1046*2d9fd380Sjfb8856606 		else
1047*2d9fd380Sjfb8856606 			igc_lsc_interrupt_setup(dev, 0);
1048*2d9fd380Sjfb8856606 	} else {
1049*2d9fd380Sjfb8856606 		rte_intr_callback_unregister(intr_handle,
1050*2d9fd380Sjfb8856606 					     eth_igc_interrupt_handler,
1051*2d9fd380Sjfb8856606 					     (void *)dev);
1052*2d9fd380Sjfb8856606 		if (dev->data->dev_conf.intr_conf.lsc)
1053*2d9fd380Sjfb8856606 			PMD_DRV_LOG(INFO,
1054*2d9fd380Sjfb8856606 				"LSC won't enable because of no intr multiplex");
1055*2d9fd380Sjfb8856606 	}
1056*2d9fd380Sjfb8856606 
1057*2d9fd380Sjfb8856606 	/* enable uio/vfio intr/eventfd mapping */
1058*2d9fd380Sjfb8856606 	rte_intr_enable(intr_handle);
1059*2d9fd380Sjfb8856606 
1060*2d9fd380Sjfb8856606 	rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1061*2d9fd380Sjfb8856606 			igc_update_queue_stats_handler, dev);
1062*2d9fd380Sjfb8856606 
1063*2d9fd380Sjfb8856606 	/* check if rxq interrupt is enabled */
1064*2d9fd380Sjfb8856606 	if (dev->data->dev_conf.intr_conf.rxq &&
1065*2d9fd380Sjfb8856606 			rte_intr_dp_is_en(intr_handle))
1066*2d9fd380Sjfb8856606 		igc_rxq_interrupt_setup(dev);
1067*2d9fd380Sjfb8856606 
1068*2d9fd380Sjfb8856606 	/* resume enabled intr since hw reset */
1069*2d9fd380Sjfb8856606 	igc_intr_other_enable(dev);
1070*2d9fd380Sjfb8856606 
1071*2d9fd380Sjfb8856606 	eth_igc_rxtx_control(dev, true);
1072*2d9fd380Sjfb8856606 	eth_igc_link_update(dev, 0);
1073*2d9fd380Sjfb8856606 
1074*2d9fd380Sjfb8856606 	/* configure MAC-loopback mode */
1075*2d9fd380Sjfb8856606 	if (dev->data->dev_conf.lpbk_mode == 1) {
1076*2d9fd380Sjfb8856606 		uint32_t reg_val;
1077*2d9fd380Sjfb8856606 
1078*2d9fd380Sjfb8856606 		reg_val = IGC_READ_REG(hw, IGC_CTRL);
1079*2d9fd380Sjfb8856606 		reg_val &= ~IGC_CTRL_SPEED_MASK;
1080*2d9fd380Sjfb8856606 		reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
1081*2d9fd380Sjfb8856606 			IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
1082*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
1083*2d9fd380Sjfb8856606 
1084*2d9fd380Sjfb8856606 		igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
1085*2d9fd380Sjfb8856606 	}
1086*2d9fd380Sjfb8856606 
1087*2d9fd380Sjfb8856606 	return 0;
1088*2d9fd380Sjfb8856606 
1089*2d9fd380Sjfb8856606 error_invalid_config:
1090*2d9fd380Sjfb8856606 	PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1091*2d9fd380Sjfb8856606 		     dev->data->dev_conf.link_speeds, dev->data->port_id);
1092*2d9fd380Sjfb8856606 	igc_dev_clear_queues(dev);
1093*2d9fd380Sjfb8856606 	return -EINVAL;
1094*2d9fd380Sjfb8856606 }
1095*2d9fd380Sjfb8856606 
1096*2d9fd380Sjfb8856606 static int
igc_reset_swfw_lock(struct igc_hw * hw)1097*2d9fd380Sjfb8856606 igc_reset_swfw_lock(struct igc_hw *hw)
1098*2d9fd380Sjfb8856606 {
1099*2d9fd380Sjfb8856606 	int ret_val;
1100*2d9fd380Sjfb8856606 
1101*2d9fd380Sjfb8856606 	/*
1102*2d9fd380Sjfb8856606 	 * Do mac ops initialization manually here, since we will need
1103*2d9fd380Sjfb8856606 	 * some function pointers set by this call.
1104*2d9fd380Sjfb8856606 	 */
1105*2d9fd380Sjfb8856606 	ret_val = igc_init_mac_params(hw);
1106*2d9fd380Sjfb8856606 	if (ret_val)
1107*2d9fd380Sjfb8856606 		return ret_val;
1108*2d9fd380Sjfb8856606 
1109*2d9fd380Sjfb8856606 	/*
1110*2d9fd380Sjfb8856606 	 * SMBI lock should not fail in this early stage. If this is the case,
1111*2d9fd380Sjfb8856606 	 * it is due to an improper exit of the application.
1112*2d9fd380Sjfb8856606 	 * So force the release of the faulty lock.
1113*2d9fd380Sjfb8856606 	 */
1114*2d9fd380Sjfb8856606 	if (igc_get_hw_semaphore_generic(hw) < 0)
1115*2d9fd380Sjfb8856606 		PMD_DRV_LOG(DEBUG, "SMBI lock released");
1116*2d9fd380Sjfb8856606 
1117*2d9fd380Sjfb8856606 	igc_put_hw_semaphore_generic(hw);
1118*2d9fd380Sjfb8856606 
1119*2d9fd380Sjfb8856606 	if (hw->mac.ops.acquire_swfw_sync != NULL) {
1120*2d9fd380Sjfb8856606 		uint16_t mask;
1121*2d9fd380Sjfb8856606 
1122*2d9fd380Sjfb8856606 		/*
1123*2d9fd380Sjfb8856606 		 * Phy lock should not fail in this early stage.
1124*2d9fd380Sjfb8856606 		 * If this is the case, it is due to an improper exit of the
1125*2d9fd380Sjfb8856606 		 * application. So force the release of the faulty lock.
1126*2d9fd380Sjfb8856606 		 */
1127*2d9fd380Sjfb8856606 		mask = IGC_SWFW_PHY0_SM;
1128*2d9fd380Sjfb8856606 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1129*2d9fd380Sjfb8856606 			PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1130*2d9fd380Sjfb8856606 				    hw->bus.func);
1131*2d9fd380Sjfb8856606 		}
1132*2d9fd380Sjfb8856606 		hw->mac.ops.release_swfw_sync(hw, mask);
1133*2d9fd380Sjfb8856606 
1134*2d9fd380Sjfb8856606 		/*
1135*2d9fd380Sjfb8856606 		 * This one is more tricky since it is common to all ports; but
1136*2d9fd380Sjfb8856606 		 * swfw_sync retries last long enough (1s) to be almost sure
1137*2d9fd380Sjfb8856606 		 * that if lock can not be taken it is due to an improper lock
1138*2d9fd380Sjfb8856606 		 * of the semaphore.
1139*2d9fd380Sjfb8856606 		 */
1140*2d9fd380Sjfb8856606 		mask = IGC_SWFW_EEP_SM;
1141*2d9fd380Sjfb8856606 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1142*2d9fd380Sjfb8856606 			PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1143*2d9fd380Sjfb8856606 
1144*2d9fd380Sjfb8856606 		hw->mac.ops.release_swfw_sync(hw, mask);
1145*2d9fd380Sjfb8856606 	}
1146*2d9fd380Sjfb8856606 
1147*2d9fd380Sjfb8856606 	return IGC_SUCCESS;
1148*2d9fd380Sjfb8856606 }
1149*2d9fd380Sjfb8856606 
1150*2d9fd380Sjfb8856606 /*
1151*2d9fd380Sjfb8856606  * free all rx/tx queues.
1152*2d9fd380Sjfb8856606  */
1153*2d9fd380Sjfb8856606 static void
igc_dev_free_queues(struct rte_eth_dev * dev)1154*2d9fd380Sjfb8856606 igc_dev_free_queues(struct rte_eth_dev *dev)
1155*2d9fd380Sjfb8856606 {
1156*2d9fd380Sjfb8856606 	uint16_t i;
1157*2d9fd380Sjfb8856606 
1158*2d9fd380Sjfb8856606 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1159*2d9fd380Sjfb8856606 		eth_igc_rx_queue_release(dev->data->rx_queues[i]);
1160*2d9fd380Sjfb8856606 		dev->data->rx_queues[i] = NULL;
1161*2d9fd380Sjfb8856606 	}
1162*2d9fd380Sjfb8856606 	dev->data->nb_rx_queues = 0;
1163*2d9fd380Sjfb8856606 
1164*2d9fd380Sjfb8856606 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1165*2d9fd380Sjfb8856606 		eth_igc_tx_queue_release(dev->data->tx_queues[i]);
1166*2d9fd380Sjfb8856606 		dev->data->tx_queues[i] = NULL;
1167*2d9fd380Sjfb8856606 	}
1168*2d9fd380Sjfb8856606 	dev->data->nb_tx_queues = 0;
1169*2d9fd380Sjfb8856606 }
1170*2d9fd380Sjfb8856606 
1171*2d9fd380Sjfb8856606 static int
eth_igc_close(struct rte_eth_dev * dev)1172*2d9fd380Sjfb8856606 eth_igc_close(struct rte_eth_dev *dev)
1173*2d9fd380Sjfb8856606 {
1174*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1175*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1176*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1177*2d9fd380Sjfb8856606 	struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1178*2d9fd380Sjfb8856606 	int retry = 0;
1179*2d9fd380Sjfb8856606 	int ret = 0;
1180*2d9fd380Sjfb8856606 
1181*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
1182*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1183*2d9fd380Sjfb8856606 		return 0;
1184*2d9fd380Sjfb8856606 
1185*2d9fd380Sjfb8856606 	if (!adapter->stopped)
1186*2d9fd380Sjfb8856606 		ret = eth_igc_stop(dev);
1187*2d9fd380Sjfb8856606 
1188*2d9fd380Sjfb8856606 	igc_flow_flush(dev, NULL);
1189*2d9fd380Sjfb8856606 	igc_clear_all_filter(dev);
1190*2d9fd380Sjfb8856606 
1191*2d9fd380Sjfb8856606 	igc_intr_other_disable(dev);
1192*2d9fd380Sjfb8856606 	do {
1193*2d9fd380Sjfb8856606 		int ret = rte_intr_callback_unregister(intr_handle,
1194*2d9fd380Sjfb8856606 				eth_igc_interrupt_handler, dev);
1195*2d9fd380Sjfb8856606 		if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1196*2d9fd380Sjfb8856606 			break;
1197*2d9fd380Sjfb8856606 
1198*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1199*2d9fd380Sjfb8856606 		DELAY(200 * 1000); /* delay 200ms */
1200*2d9fd380Sjfb8856606 	} while (retry++ < 5);
1201*2d9fd380Sjfb8856606 
1202*2d9fd380Sjfb8856606 	igc_phy_hw_reset(hw);
1203*2d9fd380Sjfb8856606 	igc_hw_control_release(hw);
1204*2d9fd380Sjfb8856606 	igc_dev_free_queues(dev);
1205*2d9fd380Sjfb8856606 
1206*2d9fd380Sjfb8856606 	/* Reset any pending lock */
1207*2d9fd380Sjfb8856606 	igc_reset_swfw_lock(hw);
1208*2d9fd380Sjfb8856606 
1209*2d9fd380Sjfb8856606 	return ret;
1210*2d9fd380Sjfb8856606 }
1211*2d9fd380Sjfb8856606 
1212*2d9fd380Sjfb8856606 static void
igc_identify_hardware(struct rte_eth_dev * dev,struct rte_pci_device * pci_dev)1213*2d9fd380Sjfb8856606 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1214*2d9fd380Sjfb8856606 {
1215*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1216*2d9fd380Sjfb8856606 
1217*2d9fd380Sjfb8856606 	hw->vendor_id = pci_dev->id.vendor_id;
1218*2d9fd380Sjfb8856606 	hw->device_id = pci_dev->id.device_id;
1219*2d9fd380Sjfb8856606 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1220*2d9fd380Sjfb8856606 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1221*2d9fd380Sjfb8856606 }
1222*2d9fd380Sjfb8856606 
1223*2d9fd380Sjfb8856606 static int
eth_igc_dev_init(struct rte_eth_dev * dev)1224*2d9fd380Sjfb8856606 eth_igc_dev_init(struct rte_eth_dev *dev)
1225*2d9fd380Sjfb8856606 {
1226*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1227*2d9fd380Sjfb8856606 	struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1228*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1229*2d9fd380Sjfb8856606 	int i, error = 0;
1230*2d9fd380Sjfb8856606 
1231*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
1232*2d9fd380Sjfb8856606 	dev->dev_ops = &eth_igc_ops;
1233*2d9fd380Sjfb8856606 	dev->rx_descriptor_done	= eth_igc_rx_descriptor_done;
1234*2d9fd380Sjfb8856606 	dev->rx_queue_count = eth_igc_rx_queue_count;
1235*2d9fd380Sjfb8856606 	dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
1236*2d9fd380Sjfb8856606 	dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
1237*2d9fd380Sjfb8856606 
1238*2d9fd380Sjfb8856606 	/*
1239*2d9fd380Sjfb8856606 	 * for secondary processes, we don't initialize any further as primary
1240*2d9fd380Sjfb8856606 	 * has already done this work. Only check we don't need a different
1241*2d9fd380Sjfb8856606 	 * RX function.
1242*2d9fd380Sjfb8856606 	 */
1243*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1244*2d9fd380Sjfb8856606 		return 0;
1245*2d9fd380Sjfb8856606 
1246*2d9fd380Sjfb8856606 	rte_eth_copy_pci_info(dev, pci_dev);
1247*2d9fd380Sjfb8856606 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1248*2d9fd380Sjfb8856606 
1249*2d9fd380Sjfb8856606 	hw->back = pci_dev;
1250*2d9fd380Sjfb8856606 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1251*2d9fd380Sjfb8856606 
1252*2d9fd380Sjfb8856606 	igc_identify_hardware(dev, pci_dev);
1253*2d9fd380Sjfb8856606 	if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1254*2d9fd380Sjfb8856606 		error = -EIO;
1255*2d9fd380Sjfb8856606 		goto err_late;
1256*2d9fd380Sjfb8856606 	}
1257*2d9fd380Sjfb8856606 
1258*2d9fd380Sjfb8856606 	igc_get_bus_info(hw);
1259*2d9fd380Sjfb8856606 
1260*2d9fd380Sjfb8856606 	/* Reset any pending lock */
1261*2d9fd380Sjfb8856606 	if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1262*2d9fd380Sjfb8856606 		error = -EIO;
1263*2d9fd380Sjfb8856606 		goto err_late;
1264*2d9fd380Sjfb8856606 	}
1265*2d9fd380Sjfb8856606 
1266*2d9fd380Sjfb8856606 	/* Finish initialization */
1267*2d9fd380Sjfb8856606 	if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1268*2d9fd380Sjfb8856606 		error = -EIO;
1269*2d9fd380Sjfb8856606 		goto err_late;
1270*2d9fd380Sjfb8856606 	}
1271*2d9fd380Sjfb8856606 
1272*2d9fd380Sjfb8856606 	hw->mac.autoneg = 1;
1273*2d9fd380Sjfb8856606 	hw->phy.autoneg_wait_to_complete = 0;
1274*2d9fd380Sjfb8856606 	hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1275*2d9fd380Sjfb8856606 
1276*2d9fd380Sjfb8856606 	/* Copper options */
1277*2d9fd380Sjfb8856606 	if (hw->phy.media_type == igc_media_type_copper) {
1278*2d9fd380Sjfb8856606 		hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1279*2d9fd380Sjfb8856606 		hw->phy.disable_polarity_correction = 0;
1280*2d9fd380Sjfb8856606 		hw->phy.ms_type = igc_ms_hw_default;
1281*2d9fd380Sjfb8856606 	}
1282*2d9fd380Sjfb8856606 
1283*2d9fd380Sjfb8856606 	/*
1284*2d9fd380Sjfb8856606 	 * Start from a known state, this is important in reading the nvm
1285*2d9fd380Sjfb8856606 	 * and mac from that.
1286*2d9fd380Sjfb8856606 	 */
1287*2d9fd380Sjfb8856606 	igc_reset_hw(hw);
1288*2d9fd380Sjfb8856606 
1289*2d9fd380Sjfb8856606 	/* Make sure we have a good EEPROM before we read from it */
1290*2d9fd380Sjfb8856606 	if (igc_validate_nvm_checksum(hw) < 0) {
1291*2d9fd380Sjfb8856606 		/*
1292*2d9fd380Sjfb8856606 		 * Some PCI-E parts fail the first check due to
1293*2d9fd380Sjfb8856606 		 * the link being in sleep state, call it again,
1294*2d9fd380Sjfb8856606 		 * if it fails a second time its a real issue.
1295*2d9fd380Sjfb8856606 		 */
1296*2d9fd380Sjfb8856606 		if (igc_validate_nvm_checksum(hw) < 0) {
1297*2d9fd380Sjfb8856606 			PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1298*2d9fd380Sjfb8856606 			error = -EIO;
1299*2d9fd380Sjfb8856606 			goto err_late;
1300*2d9fd380Sjfb8856606 		}
1301*2d9fd380Sjfb8856606 	}
1302*2d9fd380Sjfb8856606 
1303*2d9fd380Sjfb8856606 	/* Read the permanent MAC address out of the EEPROM */
1304*2d9fd380Sjfb8856606 	if (igc_read_mac_addr(hw) != 0) {
1305*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1306*2d9fd380Sjfb8856606 		error = -EIO;
1307*2d9fd380Sjfb8856606 		goto err_late;
1308*2d9fd380Sjfb8856606 	}
1309*2d9fd380Sjfb8856606 
1310*2d9fd380Sjfb8856606 	/* Allocate memory for storing MAC addresses */
1311*2d9fd380Sjfb8856606 	dev->data->mac_addrs = rte_zmalloc("igc",
1312*2d9fd380Sjfb8856606 		RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1313*2d9fd380Sjfb8856606 	if (dev->data->mac_addrs == NULL) {
1314*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1315*2d9fd380Sjfb8856606 				RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1316*2d9fd380Sjfb8856606 		error = -ENOMEM;
1317*2d9fd380Sjfb8856606 		goto err_late;
1318*2d9fd380Sjfb8856606 	}
1319*2d9fd380Sjfb8856606 
1320*2d9fd380Sjfb8856606 	/* Copy the permanent MAC address */
1321*2d9fd380Sjfb8856606 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1322*2d9fd380Sjfb8856606 			&dev->data->mac_addrs[0]);
1323*2d9fd380Sjfb8856606 
1324*2d9fd380Sjfb8856606 	/* Now initialize the hardware */
1325*2d9fd380Sjfb8856606 	if (igc_hardware_init(hw) != 0) {
1326*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR, "Hardware initialization failed");
1327*2d9fd380Sjfb8856606 		rte_free(dev->data->mac_addrs);
1328*2d9fd380Sjfb8856606 		dev->data->mac_addrs = NULL;
1329*2d9fd380Sjfb8856606 		error = -ENODEV;
1330*2d9fd380Sjfb8856606 		goto err_late;
1331*2d9fd380Sjfb8856606 	}
1332*2d9fd380Sjfb8856606 
1333*2d9fd380Sjfb8856606 	hw->mac.get_link_status = 1;
1334*2d9fd380Sjfb8856606 	igc->stopped = 0;
1335*2d9fd380Sjfb8856606 
1336*2d9fd380Sjfb8856606 	/* Indicate SOL/IDER usage */
1337*2d9fd380Sjfb8856606 	if (igc_check_reset_block(hw) < 0)
1338*2d9fd380Sjfb8856606 		PMD_INIT_LOG(ERR,
1339*2d9fd380Sjfb8856606 			"PHY reset is blocked due to SOL/IDER session.");
1340*2d9fd380Sjfb8856606 
1341*2d9fd380Sjfb8856606 	PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1342*2d9fd380Sjfb8856606 			dev->data->port_id, pci_dev->id.vendor_id,
1343*2d9fd380Sjfb8856606 			pci_dev->id.device_id);
1344*2d9fd380Sjfb8856606 
1345*2d9fd380Sjfb8856606 	rte_intr_callback_register(&pci_dev->intr_handle,
1346*2d9fd380Sjfb8856606 			eth_igc_interrupt_handler, (void *)dev);
1347*2d9fd380Sjfb8856606 
1348*2d9fd380Sjfb8856606 	/* enable uio/vfio intr/eventfd mapping */
1349*2d9fd380Sjfb8856606 	rte_intr_enable(&pci_dev->intr_handle);
1350*2d9fd380Sjfb8856606 
1351*2d9fd380Sjfb8856606 	/* enable support intr */
1352*2d9fd380Sjfb8856606 	igc_intr_other_enable(dev);
1353*2d9fd380Sjfb8856606 
1354*2d9fd380Sjfb8856606 	/* initiate queue status */
1355*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1356*2d9fd380Sjfb8856606 		igc->txq_stats_map[i] = -1;
1357*2d9fd380Sjfb8856606 		igc->rxq_stats_map[i] = -1;
1358*2d9fd380Sjfb8856606 	}
1359*2d9fd380Sjfb8856606 
1360*2d9fd380Sjfb8856606 	igc_flow_init(dev);
1361*2d9fd380Sjfb8856606 	igc_clear_all_filter(dev);
1362*2d9fd380Sjfb8856606 	return 0;
1363*2d9fd380Sjfb8856606 
1364*2d9fd380Sjfb8856606 err_late:
1365*2d9fd380Sjfb8856606 	igc_hw_control_release(hw);
1366*2d9fd380Sjfb8856606 	return error;
1367*2d9fd380Sjfb8856606 }
1368*2d9fd380Sjfb8856606 
1369*2d9fd380Sjfb8856606 static int
eth_igc_dev_uninit(__rte_unused struct rte_eth_dev * eth_dev)1370*2d9fd380Sjfb8856606 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1371*2d9fd380Sjfb8856606 {
1372*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
1373*2d9fd380Sjfb8856606 	eth_igc_close(eth_dev);
1374*2d9fd380Sjfb8856606 	return 0;
1375*2d9fd380Sjfb8856606 }
1376*2d9fd380Sjfb8856606 
1377*2d9fd380Sjfb8856606 static int
eth_igc_reset(struct rte_eth_dev * dev)1378*2d9fd380Sjfb8856606 eth_igc_reset(struct rte_eth_dev *dev)
1379*2d9fd380Sjfb8856606 {
1380*2d9fd380Sjfb8856606 	int ret;
1381*2d9fd380Sjfb8856606 
1382*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
1383*2d9fd380Sjfb8856606 
1384*2d9fd380Sjfb8856606 	ret = eth_igc_dev_uninit(dev);
1385*2d9fd380Sjfb8856606 	if (ret)
1386*2d9fd380Sjfb8856606 		return ret;
1387*2d9fd380Sjfb8856606 
1388*2d9fd380Sjfb8856606 	return eth_igc_dev_init(dev);
1389*2d9fd380Sjfb8856606 }
1390*2d9fd380Sjfb8856606 
1391*2d9fd380Sjfb8856606 static int
eth_igc_promiscuous_enable(struct rte_eth_dev * dev)1392*2d9fd380Sjfb8856606 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1393*2d9fd380Sjfb8856606 {
1394*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1395*2d9fd380Sjfb8856606 	uint32_t rctl;
1396*2d9fd380Sjfb8856606 
1397*2d9fd380Sjfb8856606 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1398*2d9fd380Sjfb8856606 	rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1399*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1400*2d9fd380Sjfb8856606 	return 0;
1401*2d9fd380Sjfb8856606 }
1402*2d9fd380Sjfb8856606 
1403*2d9fd380Sjfb8856606 static int
eth_igc_promiscuous_disable(struct rte_eth_dev * dev)1404*2d9fd380Sjfb8856606 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1405*2d9fd380Sjfb8856606 {
1406*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1407*2d9fd380Sjfb8856606 	uint32_t rctl;
1408*2d9fd380Sjfb8856606 
1409*2d9fd380Sjfb8856606 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1410*2d9fd380Sjfb8856606 	rctl &= (~IGC_RCTL_UPE);
1411*2d9fd380Sjfb8856606 	if (dev->data->all_multicast == 1)
1412*2d9fd380Sjfb8856606 		rctl |= IGC_RCTL_MPE;
1413*2d9fd380Sjfb8856606 	else
1414*2d9fd380Sjfb8856606 		rctl &= (~IGC_RCTL_MPE);
1415*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1416*2d9fd380Sjfb8856606 	return 0;
1417*2d9fd380Sjfb8856606 }
1418*2d9fd380Sjfb8856606 
1419*2d9fd380Sjfb8856606 static int
eth_igc_allmulticast_enable(struct rte_eth_dev * dev)1420*2d9fd380Sjfb8856606 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1421*2d9fd380Sjfb8856606 {
1422*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1423*2d9fd380Sjfb8856606 	uint32_t rctl;
1424*2d9fd380Sjfb8856606 
1425*2d9fd380Sjfb8856606 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1426*2d9fd380Sjfb8856606 	rctl |= IGC_RCTL_MPE;
1427*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1428*2d9fd380Sjfb8856606 	return 0;
1429*2d9fd380Sjfb8856606 }
1430*2d9fd380Sjfb8856606 
1431*2d9fd380Sjfb8856606 static int
eth_igc_allmulticast_disable(struct rte_eth_dev * dev)1432*2d9fd380Sjfb8856606 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1433*2d9fd380Sjfb8856606 {
1434*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1435*2d9fd380Sjfb8856606 	uint32_t rctl;
1436*2d9fd380Sjfb8856606 
1437*2d9fd380Sjfb8856606 	if (dev->data->promiscuous == 1)
1438*2d9fd380Sjfb8856606 		return 0;	/* must remain in all_multicast mode */
1439*2d9fd380Sjfb8856606 
1440*2d9fd380Sjfb8856606 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1441*2d9fd380Sjfb8856606 	rctl &= (~IGC_RCTL_MPE);
1442*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1443*2d9fd380Sjfb8856606 	return 0;
1444*2d9fd380Sjfb8856606 }
1445*2d9fd380Sjfb8856606 
1446*2d9fd380Sjfb8856606 static int
eth_igc_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)1447*2d9fd380Sjfb8856606 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1448*2d9fd380Sjfb8856606 		       size_t fw_size)
1449*2d9fd380Sjfb8856606 {
1450*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1451*2d9fd380Sjfb8856606 	struct igc_fw_version fw;
1452*2d9fd380Sjfb8856606 	int ret;
1453*2d9fd380Sjfb8856606 
1454*2d9fd380Sjfb8856606 	igc_get_fw_version(hw, &fw);
1455*2d9fd380Sjfb8856606 
1456*2d9fd380Sjfb8856606 	/* if option rom is valid, display its version too */
1457*2d9fd380Sjfb8856606 	if (fw.or_valid) {
1458*2d9fd380Sjfb8856606 		ret = snprintf(fw_version, fw_size,
1459*2d9fd380Sjfb8856606 			 "%d.%d, 0x%08x, %d.%d.%d",
1460*2d9fd380Sjfb8856606 			 fw.eep_major, fw.eep_minor, fw.etrack_id,
1461*2d9fd380Sjfb8856606 			 fw.or_major, fw.or_build, fw.or_patch);
1462*2d9fd380Sjfb8856606 	/* no option rom */
1463*2d9fd380Sjfb8856606 	} else {
1464*2d9fd380Sjfb8856606 		if (fw.etrack_id != 0X0000) {
1465*2d9fd380Sjfb8856606 			ret = snprintf(fw_version, fw_size,
1466*2d9fd380Sjfb8856606 				 "%d.%d, 0x%08x",
1467*2d9fd380Sjfb8856606 				 fw.eep_major, fw.eep_minor,
1468*2d9fd380Sjfb8856606 				 fw.etrack_id);
1469*2d9fd380Sjfb8856606 		} else {
1470*2d9fd380Sjfb8856606 			ret = snprintf(fw_version, fw_size,
1471*2d9fd380Sjfb8856606 				 "%d.%d.%d",
1472*2d9fd380Sjfb8856606 				 fw.eep_major, fw.eep_minor,
1473*2d9fd380Sjfb8856606 				 fw.eep_build);
1474*2d9fd380Sjfb8856606 		}
1475*2d9fd380Sjfb8856606 	}
1476*2d9fd380Sjfb8856606 
1477*2d9fd380Sjfb8856606 	ret += 1; /* add the size of '\0' */
1478*2d9fd380Sjfb8856606 	if (fw_size < (u32)ret)
1479*2d9fd380Sjfb8856606 		return ret;
1480*2d9fd380Sjfb8856606 	else
1481*2d9fd380Sjfb8856606 		return 0;
1482*2d9fd380Sjfb8856606 }
1483*2d9fd380Sjfb8856606 
1484*2d9fd380Sjfb8856606 static int
eth_igc_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1485*2d9fd380Sjfb8856606 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1486*2d9fd380Sjfb8856606 {
1487*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1488*2d9fd380Sjfb8856606 
1489*2d9fd380Sjfb8856606 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1490*2d9fd380Sjfb8856606 	dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1491*2d9fd380Sjfb8856606 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1492*2d9fd380Sjfb8856606 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1493*2d9fd380Sjfb8856606 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1494*2d9fd380Sjfb8856606 	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1495*2d9fd380Sjfb8856606 
1496*2d9fd380Sjfb8856606 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1497*2d9fd380Sjfb8856606 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1498*2d9fd380Sjfb8856606 	dev_info->max_vmdq_pools = 0;
1499*2d9fd380Sjfb8856606 
1500*2d9fd380Sjfb8856606 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1501*2d9fd380Sjfb8856606 	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1502*2d9fd380Sjfb8856606 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1503*2d9fd380Sjfb8856606 
1504*2d9fd380Sjfb8856606 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1505*2d9fd380Sjfb8856606 		.rx_thresh = {
1506*2d9fd380Sjfb8856606 			.pthresh = IGC_DEFAULT_RX_PTHRESH,
1507*2d9fd380Sjfb8856606 			.hthresh = IGC_DEFAULT_RX_HTHRESH,
1508*2d9fd380Sjfb8856606 			.wthresh = IGC_DEFAULT_RX_WTHRESH,
1509*2d9fd380Sjfb8856606 		},
1510*2d9fd380Sjfb8856606 		.rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1511*2d9fd380Sjfb8856606 		.rx_drop_en = 0,
1512*2d9fd380Sjfb8856606 		.offloads = 0,
1513*2d9fd380Sjfb8856606 	};
1514*2d9fd380Sjfb8856606 
1515*2d9fd380Sjfb8856606 	dev_info->default_txconf = (struct rte_eth_txconf) {
1516*2d9fd380Sjfb8856606 		.tx_thresh = {
1517*2d9fd380Sjfb8856606 			.pthresh = IGC_DEFAULT_TX_PTHRESH,
1518*2d9fd380Sjfb8856606 			.hthresh = IGC_DEFAULT_TX_HTHRESH,
1519*2d9fd380Sjfb8856606 			.wthresh = IGC_DEFAULT_TX_WTHRESH,
1520*2d9fd380Sjfb8856606 		},
1521*2d9fd380Sjfb8856606 		.offloads = 0,
1522*2d9fd380Sjfb8856606 	};
1523*2d9fd380Sjfb8856606 
1524*2d9fd380Sjfb8856606 	dev_info->rx_desc_lim = rx_desc_lim;
1525*2d9fd380Sjfb8856606 	dev_info->tx_desc_lim = tx_desc_lim;
1526*2d9fd380Sjfb8856606 
1527*2d9fd380Sjfb8856606 	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1528*2d9fd380Sjfb8856606 			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1529*2d9fd380Sjfb8856606 			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1530*2d9fd380Sjfb8856606 
1531*2d9fd380Sjfb8856606 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1532*2d9fd380Sjfb8856606 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1533*2d9fd380Sjfb8856606 	return 0;
1534*2d9fd380Sjfb8856606 }
1535*2d9fd380Sjfb8856606 
1536*2d9fd380Sjfb8856606 static int
eth_igc_led_on(struct rte_eth_dev * dev)1537*2d9fd380Sjfb8856606 eth_igc_led_on(struct rte_eth_dev *dev)
1538*2d9fd380Sjfb8856606 {
1539*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1540*2d9fd380Sjfb8856606 
1541*2d9fd380Sjfb8856606 	return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1542*2d9fd380Sjfb8856606 }
1543*2d9fd380Sjfb8856606 
1544*2d9fd380Sjfb8856606 static int
eth_igc_led_off(struct rte_eth_dev * dev)1545*2d9fd380Sjfb8856606 eth_igc_led_off(struct rte_eth_dev *dev)
1546*2d9fd380Sjfb8856606 {
1547*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1548*2d9fd380Sjfb8856606 
1549*2d9fd380Sjfb8856606 	return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1550*2d9fd380Sjfb8856606 }
1551*2d9fd380Sjfb8856606 
1552*2d9fd380Sjfb8856606 static const uint32_t *
eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev * dev)1553*2d9fd380Sjfb8856606 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1554*2d9fd380Sjfb8856606 {
1555*2d9fd380Sjfb8856606 	static const uint32_t ptypes[] = {
1556*2d9fd380Sjfb8856606 		/* refers to rx_desc_pkt_info_to_pkt_type() */
1557*2d9fd380Sjfb8856606 		RTE_PTYPE_L2_ETHER,
1558*2d9fd380Sjfb8856606 		RTE_PTYPE_L3_IPV4,
1559*2d9fd380Sjfb8856606 		RTE_PTYPE_L3_IPV4_EXT,
1560*2d9fd380Sjfb8856606 		RTE_PTYPE_L3_IPV6,
1561*2d9fd380Sjfb8856606 		RTE_PTYPE_L3_IPV6_EXT,
1562*2d9fd380Sjfb8856606 		RTE_PTYPE_L4_TCP,
1563*2d9fd380Sjfb8856606 		RTE_PTYPE_L4_UDP,
1564*2d9fd380Sjfb8856606 		RTE_PTYPE_L4_SCTP,
1565*2d9fd380Sjfb8856606 		RTE_PTYPE_TUNNEL_IP,
1566*2d9fd380Sjfb8856606 		RTE_PTYPE_INNER_L3_IPV6,
1567*2d9fd380Sjfb8856606 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1568*2d9fd380Sjfb8856606 		RTE_PTYPE_INNER_L4_TCP,
1569*2d9fd380Sjfb8856606 		RTE_PTYPE_INNER_L4_UDP,
1570*2d9fd380Sjfb8856606 		RTE_PTYPE_UNKNOWN
1571*2d9fd380Sjfb8856606 	};
1572*2d9fd380Sjfb8856606 
1573*2d9fd380Sjfb8856606 	return ptypes;
1574*2d9fd380Sjfb8856606 }
1575*2d9fd380Sjfb8856606 
1576*2d9fd380Sjfb8856606 static int
eth_igc_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1577*2d9fd380Sjfb8856606 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1578*2d9fd380Sjfb8856606 {
1579*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1580*2d9fd380Sjfb8856606 	uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1581*2d9fd380Sjfb8856606 	uint32_t rctl;
1582*2d9fd380Sjfb8856606 
1583*2d9fd380Sjfb8856606 	/* if extend vlan has been enabled */
1584*2d9fd380Sjfb8856606 	if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1585*2d9fd380Sjfb8856606 		frame_size += VLAN_TAG_SIZE;
1586*2d9fd380Sjfb8856606 
1587*2d9fd380Sjfb8856606 	/* check that mtu is within the allowed range */
1588*2d9fd380Sjfb8856606 	if (mtu < RTE_ETHER_MIN_MTU ||
1589*2d9fd380Sjfb8856606 		frame_size > MAX_RX_JUMBO_FRAME_SIZE)
1590*2d9fd380Sjfb8856606 		return -EINVAL;
1591*2d9fd380Sjfb8856606 
1592*2d9fd380Sjfb8856606 	/*
1593*2d9fd380Sjfb8856606 	 * refuse mtu that requires the support of scattered packets when
1594*2d9fd380Sjfb8856606 	 * this feature has not been enabled before.
1595*2d9fd380Sjfb8856606 	 */
1596*2d9fd380Sjfb8856606 	if (!dev->data->scattered_rx &&
1597*2d9fd380Sjfb8856606 	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1598*2d9fd380Sjfb8856606 		return -EINVAL;
1599*2d9fd380Sjfb8856606 
1600*2d9fd380Sjfb8856606 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1601*2d9fd380Sjfb8856606 
1602*2d9fd380Sjfb8856606 	/* switch to jumbo mode if needed */
1603*2d9fd380Sjfb8856606 	if (mtu > RTE_ETHER_MTU) {
1604*2d9fd380Sjfb8856606 		dev->data->dev_conf.rxmode.offloads |=
1605*2d9fd380Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1606*2d9fd380Sjfb8856606 		rctl |= IGC_RCTL_LPE;
1607*2d9fd380Sjfb8856606 	} else {
1608*2d9fd380Sjfb8856606 		dev->data->dev_conf.rxmode.offloads &=
1609*2d9fd380Sjfb8856606 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1610*2d9fd380Sjfb8856606 		rctl &= ~IGC_RCTL_LPE;
1611*2d9fd380Sjfb8856606 	}
1612*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1613*2d9fd380Sjfb8856606 
1614*2d9fd380Sjfb8856606 	/* update max frame size */
1615*2d9fd380Sjfb8856606 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1616*2d9fd380Sjfb8856606 
1617*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RLPML,
1618*2d9fd380Sjfb8856606 			dev->data->dev_conf.rxmode.max_rx_pkt_len);
1619*2d9fd380Sjfb8856606 
1620*2d9fd380Sjfb8856606 	return 0;
1621*2d9fd380Sjfb8856606 }
1622*2d9fd380Sjfb8856606 
1623*2d9fd380Sjfb8856606 static int
eth_igc_rar_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)1624*2d9fd380Sjfb8856606 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1625*2d9fd380Sjfb8856606 		uint32_t index, uint32_t pool)
1626*2d9fd380Sjfb8856606 {
1627*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1628*2d9fd380Sjfb8856606 
1629*2d9fd380Sjfb8856606 	igc_rar_set(hw, mac_addr->addr_bytes, index);
1630*2d9fd380Sjfb8856606 	RTE_SET_USED(pool);
1631*2d9fd380Sjfb8856606 	return 0;
1632*2d9fd380Sjfb8856606 }
1633*2d9fd380Sjfb8856606 
1634*2d9fd380Sjfb8856606 static void
eth_igc_rar_clear(struct rte_eth_dev * dev,uint32_t index)1635*2d9fd380Sjfb8856606 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1636*2d9fd380Sjfb8856606 {
1637*2d9fd380Sjfb8856606 	uint8_t addr[RTE_ETHER_ADDR_LEN];
1638*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1639*2d9fd380Sjfb8856606 
1640*2d9fd380Sjfb8856606 	memset(addr, 0, sizeof(addr));
1641*2d9fd380Sjfb8856606 	igc_rar_set(hw, addr, index);
1642*2d9fd380Sjfb8856606 }
1643*2d9fd380Sjfb8856606 
1644*2d9fd380Sjfb8856606 static int
eth_igc_default_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * addr)1645*2d9fd380Sjfb8856606 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1646*2d9fd380Sjfb8856606 			struct rte_ether_addr *addr)
1647*2d9fd380Sjfb8856606 {
1648*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1649*2d9fd380Sjfb8856606 	igc_rar_set(hw, addr->addr_bytes, 0);
1650*2d9fd380Sjfb8856606 	return 0;
1651*2d9fd380Sjfb8856606 }
1652*2d9fd380Sjfb8856606 
1653*2d9fd380Sjfb8856606 static int
eth_igc_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)1654*2d9fd380Sjfb8856606 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1655*2d9fd380Sjfb8856606 			 struct rte_ether_addr *mc_addr_set,
1656*2d9fd380Sjfb8856606 			 uint32_t nb_mc_addr)
1657*2d9fd380Sjfb8856606 {
1658*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1659*2d9fd380Sjfb8856606 	igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1660*2d9fd380Sjfb8856606 	return 0;
1661*2d9fd380Sjfb8856606 }
1662*2d9fd380Sjfb8856606 
1663*2d9fd380Sjfb8856606 /*
1664*2d9fd380Sjfb8856606  * Read hardware registers
1665*2d9fd380Sjfb8856606  */
1666*2d9fd380Sjfb8856606 static void
igc_read_stats_registers(struct igc_hw * hw,struct igc_hw_stats * stats)1667*2d9fd380Sjfb8856606 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1668*2d9fd380Sjfb8856606 {
1669*2d9fd380Sjfb8856606 	int pause_frames;
1670*2d9fd380Sjfb8856606 
1671*2d9fd380Sjfb8856606 	uint64_t old_gprc  = stats->gprc;
1672*2d9fd380Sjfb8856606 	uint64_t old_gptc  = stats->gptc;
1673*2d9fd380Sjfb8856606 	uint64_t old_tpr   = stats->tpr;
1674*2d9fd380Sjfb8856606 	uint64_t old_tpt   = stats->tpt;
1675*2d9fd380Sjfb8856606 	uint64_t old_rpthc = stats->rpthc;
1676*2d9fd380Sjfb8856606 	uint64_t old_hgptc = stats->hgptc;
1677*2d9fd380Sjfb8856606 
1678*2d9fd380Sjfb8856606 	stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1679*2d9fd380Sjfb8856606 	stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1680*2d9fd380Sjfb8856606 	stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1681*2d9fd380Sjfb8856606 	stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1682*2d9fd380Sjfb8856606 	stats->scc += IGC_READ_REG(hw, IGC_SCC);
1683*2d9fd380Sjfb8856606 	stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1684*2d9fd380Sjfb8856606 
1685*2d9fd380Sjfb8856606 	stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1686*2d9fd380Sjfb8856606 	stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1687*2d9fd380Sjfb8856606 	stats->colc += IGC_READ_REG(hw, IGC_COLC);
1688*2d9fd380Sjfb8856606 
1689*2d9fd380Sjfb8856606 	stats->dc += IGC_READ_REG(hw, IGC_DC);
1690*2d9fd380Sjfb8856606 	stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1691*2d9fd380Sjfb8856606 	stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1692*2d9fd380Sjfb8856606 	stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1693*2d9fd380Sjfb8856606 	stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1694*2d9fd380Sjfb8856606 	stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1695*2d9fd380Sjfb8856606 
1696*2d9fd380Sjfb8856606 	/*
1697*2d9fd380Sjfb8856606 	 * For watchdog management we need to know if we have been
1698*2d9fd380Sjfb8856606 	 * paused during the last interval, so capture that here.
1699*2d9fd380Sjfb8856606 	 */
1700*2d9fd380Sjfb8856606 	pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1701*2d9fd380Sjfb8856606 	stats->xoffrxc += pause_frames;
1702*2d9fd380Sjfb8856606 	stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1703*2d9fd380Sjfb8856606 	stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1704*2d9fd380Sjfb8856606 	stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1705*2d9fd380Sjfb8856606 	stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1706*2d9fd380Sjfb8856606 	stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1707*2d9fd380Sjfb8856606 	stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1708*2d9fd380Sjfb8856606 	stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1709*2d9fd380Sjfb8856606 	stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1710*2d9fd380Sjfb8856606 	stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1711*2d9fd380Sjfb8856606 	stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1712*2d9fd380Sjfb8856606 	stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1713*2d9fd380Sjfb8856606 	stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1714*2d9fd380Sjfb8856606 
1715*2d9fd380Sjfb8856606 	/* For the 64-bit byte counters the low dword must be read first. */
1716*2d9fd380Sjfb8856606 	/* Both registers clear on the read of the high dword */
1717*2d9fd380Sjfb8856606 
1718*2d9fd380Sjfb8856606 	/* Workaround CRC bytes included in size, take away 4 bytes/packet */
1719*2d9fd380Sjfb8856606 	stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1720*2d9fd380Sjfb8856606 	stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1721*2d9fd380Sjfb8856606 	stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1722*2d9fd380Sjfb8856606 	stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1723*2d9fd380Sjfb8856606 	stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1724*2d9fd380Sjfb8856606 	stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1725*2d9fd380Sjfb8856606 
1726*2d9fd380Sjfb8856606 	stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1727*2d9fd380Sjfb8856606 	stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1728*2d9fd380Sjfb8856606 	stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1729*2d9fd380Sjfb8856606 	stats->roc += IGC_READ_REG(hw, IGC_ROC);
1730*2d9fd380Sjfb8856606 	stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1731*2d9fd380Sjfb8856606 
1732*2d9fd380Sjfb8856606 	stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1733*2d9fd380Sjfb8856606 	stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1734*2d9fd380Sjfb8856606 	stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1735*2d9fd380Sjfb8856606 	stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1736*2d9fd380Sjfb8856606 	stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1737*2d9fd380Sjfb8856606 	stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1738*2d9fd380Sjfb8856606 	stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1739*2d9fd380Sjfb8856606 
1740*2d9fd380Sjfb8856606 	stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1741*2d9fd380Sjfb8856606 	stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1742*2d9fd380Sjfb8856606 
1743*2d9fd380Sjfb8856606 	stats->tor += IGC_READ_REG(hw, IGC_TORL);
1744*2d9fd380Sjfb8856606 	stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1745*2d9fd380Sjfb8856606 	stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1746*2d9fd380Sjfb8856606 	stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1747*2d9fd380Sjfb8856606 	stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1748*2d9fd380Sjfb8856606 	stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1749*2d9fd380Sjfb8856606 
1750*2d9fd380Sjfb8856606 	stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1751*2d9fd380Sjfb8856606 	stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1752*2d9fd380Sjfb8856606 	stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1753*2d9fd380Sjfb8856606 	stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1754*2d9fd380Sjfb8856606 	stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1755*2d9fd380Sjfb8856606 	stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1756*2d9fd380Sjfb8856606 	stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1757*2d9fd380Sjfb8856606 	stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1758*2d9fd380Sjfb8856606 	stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1759*2d9fd380Sjfb8856606 
1760*2d9fd380Sjfb8856606 	stats->iac += IGC_READ_REG(hw, IGC_IAC);
1761*2d9fd380Sjfb8856606 	stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1762*2d9fd380Sjfb8856606 	stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1763*2d9fd380Sjfb8856606 	stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1764*2d9fd380Sjfb8856606 
1765*2d9fd380Sjfb8856606 	/* Host to Card Statistics */
1766*2d9fd380Sjfb8856606 	stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1767*2d9fd380Sjfb8856606 	stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1768*2d9fd380Sjfb8856606 	stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1769*2d9fd380Sjfb8856606 	stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1770*2d9fd380Sjfb8856606 	stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1771*2d9fd380Sjfb8856606 	stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1772*2d9fd380Sjfb8856606 	stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1773*2d9fd380Sjfb8856606 }
1774*2d9fd380Sjfb8856606 
1775*2d9fd380Sjfb8856606 /*
1776*2d9fd380Sjfb8856606  * Write 0 to all queue status registers
1777*2d9fd380Sjfb8856606  */
1778*2d9fd380Sjfb8856606 static void
igc_reset_queue_stats_register(struct igc_hw * hw)1779*2d9fd380Sjfb8856606 igc_reset_queue_stats_register(struct igc_hw *hw)
1780*2d9fd380Sjfb8856606 {
1781*2d9fd380Sjfb8856606 	int i;
1782*2d9fd380Sjfb8856606 
1783*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1784*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1785*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1786*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1787*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1788*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1789*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1790*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1791*2d9fd380Sjfb8856606 	}
1792*2d9fd380Sjfb8856606 }
1793*2d9fd380Sjfb8856606 
1794*2d9fd380Sjfb8856606 /*
1795*2d9fd380Sjfb8856606  * Read all hardware queue status registers
1796*2d9fd380Sjfb8856606  */
1797*2d9fd380Sjfb8856606 static void
igc_read_queue_stats_register(struct rte_eth_dev * dev)1798*2d9fd380Sjfb8856606 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1799*2d9fd380Sjfb8856606 {
1800*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1801*2d9fd380Sjfb8856606 	struct igc_hw_queue_stats *queue_stats =
1802*2d9fd380Sjfb8856606 				IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1803*2d9fd380Sjfb8856606 	int i;
1804*2d9fd380Sjfb8856606 
1805*2d9fd380Sjfb8856606 	/*
1806*2d9fd380Sjfb8856606 	 * This register is not cleared on read. Furthermore, the register wraps
1807*2d9fd380Sjfb8856606 	 * around back to 0x00000000 on the next increment when reaching a value
1808*2d9fd380Sjfb8856606 	 * of 0xFFFFFFFF and then continues normal count operation.
1809*2d9fd380Sjfb8856606 	 */
1810*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1811*2d9fd380Sjfb8856606 		union {
1812*2d9fd380Sjfb8856606 			u64 ddword;
1813*2d9fd380Sjfb8856606 			u32 dword[2];
1814*2d9fd380Sjfb8856606 		} value;
1815*2d9fd380Sjfb8856606 		u32 tmp;
1816*2d9fd380Sjfb8856606 
1817*2d9fd380Sjfb8856606 		/*
1818*2d9fd380Sjfb8856606 		 * Read the register first, if the value is smaller than that
1819*2d9fd380Sjfb8856606 		 * previous read, that mean the register has been overflowed,
1820*2d9fd380Sjfb8856606 		 * then we add the high 4 bytes by 1 and replace the low 4
1821*2d9fd380Sjfb8856606 		 * bytes by the new value.
1822*2d9fd380Sjfb8856606 		 */
1823*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1824*2d9fd380Sjfb8856606 		value.ddword = queue_stats->pqgprc[i];
1825*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1826*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1827*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1828*2d9fd380Sjfb8856606 		queue_stats->pqgprc[i] = value.ddword;
1829*2d9fd380Sjfb8856606 
1830*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1831*2d9fd380Sjfb8856606 		value.ddword = queue_stats->pqgptc[i];
1832*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1833*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1834*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1835*2d9fd380Sjfb8856606 		queue_stats->pqgptc[i] = value.ddword;
1836*2d9fd380Sjfb8856606 
1837*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1838*2d9fd380Sjfb8856606 		value.ddword = queue_stats->pqgorc[i];
1839*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1840*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1841*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1842*2d9fd380Sjfb8856606 		queue_stats->pqgorc[i] = value.ddword;
1843*2d9fd380Sjfb8856606 
1844*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1845*2d9fd380Sjfb8856606 		value.ddword = queue_stats->pqgotc[i];
1846*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1847*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1848*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1849*2d9fd380Sjfb8856606 		queue_stats->pqgotc[i] = value.ddword;
1850*2d9fd380Sjfb8856606 
1851*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1852*2d9fd380Sjfb8856606 		value.ddword = queue_stats->pqmprc[i];
1853*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1854*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1855*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1856*2d9fd380Sjfb8856606 		queue_stats->pqmprc[i] = value.ddword;
1857*2d9fd380Sjfb8856606 
1858*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1859*2d9fd380Sjfb8856606 		value.ddword = queue_stats->rqdpc[i];
1860*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1861*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1862*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1863*2d9fd380Sjfb8856606 		queue_stats->rqdpc[i] = value.ddword;
1864*2d9fd380Sjfb8856606 
1865*2d9fd380Sjfb8856606 		tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1866*2d9fd380Sjfb8856606 		value.ddword = queue_stats->tqdpc[i];
1867*2d9fd380Sjfb8856606 		if (value.dword[U32_0_IN_U64] > tmp)
1868*2d9fd380Sjfb8856606 			value.dword[U32_1_IN_U64]++;
1869*2d9fd380Sjfb8856606 		value.dword[U32_0_IN_U64] = tmp;
1870*2d9fd380Sjfb8856606 		queue_stats->tqdpc[i] = value.ddword;
1871*2d9fd380Sjfb8856606 	}
1872*2d9fd380Sjfb8856606 }
1873*2d9fd380Sjfb8856606 
1874*2d9fd380Sjfb8856606 static int
eth_igc_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * rte_stats)1875*2d9fd380Sjfb8856606 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1876*2d9fd380Sjfb8856606 {
1877*2d9fd380Sjfb8856606 	struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1878*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1879*2d9fd380Sjfb8856606 	struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1880*2d9fd380Sjfb8856606 	struct igc_hw_queue_stats *queue_stats =
1881*2d9fd380Sjfb8856606 			IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1882*2d9fd380Sjfb8856606 	int i;
1883*2d9fd380Sjfb8856606 
1884*2d9fd380Sjfb8856606 	/*
1885*2d9fd380Sjfb8856606 	 * Cancel status handler since it will read the queue status registers
1886*2d9fd380Sjfb8856606 	 */
1887*2d9fd380Sjfb8856606 	rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1888*2d9fd380Sjfb8856606 
1889*2d9fd380Sjfb8856606 	/* Read status register */
1890*2d9fd380Sjfb8856606 	igc_read_queue_stats_register(dev);
1891*2d9fd380Sjfb8856606 	igc_read_stats_registers(hw, stats);
1892*2d9fd380Sjfb8856606 
1893*2d9fd380Sjfb8856606 	if (rte_stats == NULL) {
1894*2d9fd380Sjfb8856606 		/* Restart queue status handler */
1895*2d9fd380Sjfb8856606 		rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1896*2d9fd380Sjfb8856606 				igc_update_queue_stats_handler, dev);
1897*2d9fd380Sjfb8856606 		return -EINVAL;
1898*2d9fd380Sjfb8856606 	}
1899*2d9fd380Sjfb8856606 
1900*2d9fd380Sjfb8856606 	/* Rx Errors */
1901*2d9fd380Sjfb8856606 	rte_stats->imissed = stats->mpc;
1902*2d9fd380Sjfb8856606 	rte_stats->ierrors = stats->crcerrs +
1903*2d9fd380Sjfb8856606 			stats->rlec + stats->ruc + stats->roc +
1904*2d9fd380Sjfb8856606 			stats->rxerrc + stats->algnerrc;
1905*2d9fd380Sjfb8856606 
1906*2d9fd380Sjfb8856606 	/* Tx Errors */
1907*2d9fd380Sjfb8856606 	rte_stats->oerrors = stats->ecol + stats->latecol;
1908*2d9fd380Sjfb8856606 
1909*2d9fd380Sjfb8856606 	rte_stats->ipackets = stats->gprc;
1910*2d9fd380Sjfb8856606 	rte_stats->opackets = stats->gptc;
1911*2d9fd380Sjfb8856606 	rte_stats->ibytes   = stats->gorc;
1912*2d9fd380Sjfb8856606 	rte_stats->obytes   = stats->gotc;
1913*2d9fd380Sjfb8856606 
1914*2d9fd380Sjfb8856606 	/* Get per-queue statuses */
1915*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1916*2d9fd380Sjfb8856606 		/* GET TX queue statuses */
1917*2d9fd380Sjfb8856606 		int map_id = igc->txq_stats_map[i];
1918*2d9fd380Sjfb8856606 		if (map_id >= 0) {
1919*2d9fd380Sjfb8856606 			rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1920*2d9fd380Sjfb8856606 			rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1921*2d9fd380Sjfb8856606 		}
1922*2d9fd380Sjfb8856606 		/* Get RX queue statuses */
1923*2d9fd380Sjfb8856606 		map_id = igc->rxq_stats_map[i];
1924*2d9fd380Sjfb8856606 		if (map_id >= 0) {
1925*2d9fd380Sjfb8856606 			rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1926*2d9fd380Sjfb8856606 			rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1927*2d9fd380Sjfb8856606 			rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1928*2d9fd380Sjfb8856606 		}
1929*2d9fd380Sjfb8856606 	}
1930*2d9fd380Sjfb8856606 
1931*2d9fd380Sjfb8856606 	/* Restart queue status handler */
1932*2d9fd380Sjfb8856606 	rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1933*2d9fd380Sjfb8856606 			igc_update_queue_stats_handler, dev);
1934*2d9fd380Sjfb8856606 	return 0;
1935*2d9fd380Sjfb8856606 }
1936*2d9fd380Sjfb8856606 
1937*2d9fd380Sjfb8856606 static int
eth_igc_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)1938*2d9fd380Sjfb8856606 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1939*2d9fd380Sjfb8856606 		   unsigned int n)
1940*2d9fd380Sjfb8856606 {
1941*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1942*2d9fd380Sjfb8856606 	struct igc_hw_stats *hw_stats =
1943*2d9fd380Sjfb8856606 			IGC_DEV_PRIVATE_STATS(dev);
1944*2d9fd380Sjfb8856606 	unsigned int i;
1945*2d9fd380Sjfb8856606 
1946*2d9fd380Sjfb8856606 	igc_read_stats_registers(hw, hw_stats);
1947*2d9fd380Sjfb8856606 
1948*2d9fd380Sjfb8856606 	if (n < IGC_NB_XSTATS)
1949*2d9fd380Sjfb8856606 		return IGC_NB_XSTATS;
1950*2d9fd380Sjfb8856606 
1951*2d9fd380Sjfb8856606 	/* If this is a reset xstats is NULL, and we have cleared the
1952*2d9fd380Sjfb8856606 	 * registers by reading them.
1953*2d9fd380Sjfb8856606 	 */
1954*2d9fd380Sjfb8856606 	if (!xstats)
1955*2d9fd380Sjfb8856606 		return 0;
1956*2d9fd380Sjfb8856606 
1957*2d9fd380Sjfb8856606 	/* Extended stats */
1958*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_NB_XSTATS; i++) {
1959*2d9fd380Sjfb8856606 		xstats[i].id = i;
1960*2d9fd380Sjfb8856606 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1961*2d9fd380Sjfb8856606 			rte_igc_stats_strings[i].offset);
1962*2d9fd380Sjfb8856606 	}
1963*2d9fd380Sjfb8856606 
1964*2d9fd380Sjfb8856606 	return IGC_NB_XSTATS;
1965*2d9fd380Sjfb8856606 }
1966*2d9fd380Sjfb8856606 
1967*2d9fd380Sjfb8856606 static int
eth_igc_xstats_reset(struct rte_eth_dev * dev)1968*2d9fd380Sjfb8856606 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1969*2d9fd380Sjfb8856606 {
1970*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1971*2d9fd380Sjfb8856606 	struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1972*2d9fd380Sjfb8856606 	struct igc_hw_queue_stats *queue_stats =
1973*2d9fd380Sjfb8856606 			IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1974*2d9fd380Sjfb8856606 
1975*2d9fd380Sjfb8856606 	/* Cancel queue status handler for avoid conflict */
1976*2d9fd380Sjfb8856606 	rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1977*2d9fd380Sjfb8856606 
1978*2d9fd380Sjfb8856606 	/* HW registers are cleared on read */
1979*2d9fd380Sjfb8856606 	igc_reset_queue_stats_register(hw);
1980*2d9fd380Sjfb8856606 	igc_read_stats_registers(hw, hw_stats);
1981*2d9fd380Sjfb8856606 
1982*2d9fd380Sjfb8856606 	/* Reset software totals */
1983*2d9fd380Sjfb8856606 	memset(hw_stats, 0, sizeof(*hw_stats));
1984*2d9fd380Sjfb8856606 	memset(queue_stats, 0, sizeof(*queue_stats));
1985*2d9fd380Sjfb8856606 
1986*2d9fd380Sjfb8856606 	/* Restart the queue status handler */
1987*2d9fd380Sjfb8856606 	rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1988*2d9fd380Sjfb8856606 			dev);
1989*2d9fd380Sjfb8856606 
1990*2d9fd380Sjfb8856606 	return 0;
1991*2d9fd380Sjfb8856606 }
1992*2d9fd380Sjfb8856606 
1993*2d9fd380Sjfb8856606 static int
eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int size)1994*2d9fd380Sjfb8856606 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1995*2d9fd380Sjfb8856606 	struct rte_eth_xstat_name *xstats_names, unsigned int size)
1996*2d9fd380Sjfb8856606 {
1997*2d9fd380Sjfb8856606 	unsigned int i;
1998*2d9fd380Sjfb8856606 
1999*2d9fd380Sjfb8856606 	if (xstats_names == NULL)
2000*2d9fd380Sjfb8856606 		return IGC_NB_XSTATS;
2001*2d9fd380Sjfb8856606 
2002*2d9fd380Sjfb8856606 	if (size < IGC_NB_XSTATS) {
2003*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "not enough buffers!");
2004*2d9fd380Sjfb8856606 		return IGC_NB_XSTATS;
2005*2d9fd380Sjfb8856606 	}
2006*2d9fd380Sjfb8856606 
2007*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_NB_XSTATS; i++)
2008*2d9fd380Sjfb8856606 		strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
2009*2d9fd380Sjfb8856606 			sizeof(xstats_names[i].name));
2010*2d9fd380Sjfb8856606 
2011*2d9fd380Sjfb8856606 	return IGC_NB_XSTATS;
2012*2d9fd380Sjfb8856606 }
2013*2d9fd380Sjfb8856606 
2014*2d9fd380Sjfb8856606 static int
eth_igc_xstats_get_names_by_id(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,const uint64_t * ids,unsigned int limit)2015*2d9fd380Sjfb8856606 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
2016*2d9fd380Sjfb8856606 		struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
2017*2d9fd380Sjfb8856606 		unsigned int limit)
2018*2d9fd380Sjfb8856606 {
2019*2d9fd380Sjfb8856606 	unsigned int i;
2020*2d9fd380Sjfb8856606 
2021*2d9fd380Sjfb8856606 	if (!ids)
2022*2d9fd380Sjfb8856606 		return eth_igc_xstats_get_names(dev, xstats_names, limit);
2023*2d9fd380Sjfb8856606 
2024*2d9fd380Sjfb8856606 	for (i = 0; i < limit; i++) {
2025*2d9fd380Sjfb8856606 		if (ids[i] >= IGC_NB_XSTATS) {
2026*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR, "id value isn't valid");
2027*2d9fd380Sjfb8856606 			return -EINVAL;
2028*2d9fd380Sjfb8856606 		}
2029*2d9fd380Sjfb8856606 		strlcpy(xstats_names[i].name,
2030*2d9fd380Sjfb8856606 			rte_igc_stats_strings[ids[i]].name,
2031*2d9fd380Sjfb8856606 			sizeof(xstats_names[i].name));
2032*2d9fd380Sjfb8856606 	}
2033*2d9fd380Sjfb8856606 	return limit;
2034*2d9fd380Sjfb8856606 }
2035*2d9fd380Sjfb8856606 
2036*2d9fd380Sjfb8856606 static int
eth_igc_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)2037*2d9fd380Sjfb8856606 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2038*2d9fd380Sjfb8856606 		uint64_t *values, unsigned int n)
2039*2d9fd380Sjfb8856606 {
2040*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2041*2d9fd380Sjfb8856606 	struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2042*2d9fd380Sjfb8856606 	unsigned int i;
2043*2d9fd380Sjfb8856606 
2044*2d9fd380Sjfb8856606 	igc_read_stats_registers(hw, hw_stats);
2045*2d9fd380Sjfb8856606 
2046*2d9fd380Sjfb8856606 	if (!ids) {
2047*2d9fd380Sjfb8856606 		if (n < IGC_NB_XSTATS)
2048*2d9fd380Sjfb8856606 			return IGC_NB_XSTATS;
2049*2d9fd380Sjfb8856606 
2050*2d9fd380Sjfb8856606 		/* If this is a reset xstats is NULL, and we have cleared the
2051*2d9fd380Sjfb8856606 		 * registers by reading them.
2052*2d9fd380Sjfb8856606 		 */
2053*2d9fd380Sjfb8856606 		if (!values)
2054*2d9fd380Sjfb8856606 			return 0;
2055*2d9fd380Sjfb8856606 
2056*2d9fd380Sjfb8856606 		/* Extended stats */
2057*2d9fd380Sjfb8856606 		for (i = 0; i < IGC_NB_XSTATS; i++)
2058*2d9fd380Sjfb8856606 			values[i] = *(uint64_t *)(((char *)hw_stats) +
2059*2d9fd380Sjfb8856606 					rte_igc_stats_strings[i].offset);
2060*2d9fd380Sjfb8856606 
2061*2d9fd380Sjfb8856606 		return IGC_NB_XSTATS;
2062*2d9fd380Sjfb8856606 
2063*2d9fd380Sjfb8856606 	} else {
2064*2d9fd380Sjfb8856606 		for (i = 0; i < n; i++) {
2065*2d9fd380Sjfb8856606 			if (ids[i] >= IGC_NB_XSTATS) {
2066*2d9fd380Sjfb8856606 				PMD_DRV_LOG(ERR, "id value isn't valid");
2067*2d9fd380Sjfb8856606 				return -EINVAL;
2068*2d9fd380Sjfb8856606 			}
2069*2d9fd380Sjfb8856606 			values[i] = *(uint64_t *)(((char *)hw_stats) +
2070*2d9fd380Sjfb8856606 					rte_igc_stats_strings[ids[i]].offset);
2071*2d9fd380Sjfb8856606 		}
2072*2d9fd380Sjfb8856606 		return n;
2073*2d9fd380Sjfb8856606 	}
2074*2d9fd380Sjfb8856606 }
2075*2d9fd380Sjfb8856606 
2076*2d9fd380Sjfb8856606 static int
eth_igc_queue_stats_mapping_set(struct rte_eth_dev * dev,uint16_t queue_id,uint8_t stat_idx,uint8_t is_rx)2077*2d9fd380Sjfb8856606 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2078*2d9fd380Sjfb8856606 		uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2079*2d9fd380Sjfb8856606 {
2080*2d9fd380Sjfb8856606 	struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2081*2d9fd380Sjfb8856606 
2082*2d9fd380Sjfb8856606 	/* check queue id is valid */
2083*2d9fd380Sjfb8856606 	if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2084*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2085*2d9fd380Sjfb8856606 			queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2086*2d9fd380Sjfb8856606 		return -EINVAL;
2087*2d9fd380Sjfb8856606 	}
2088*2d9fd380Sjfb8856606 
2089*2d9fd380Sjfb8856606 	/* store the mapping status id */
2090*2d9fd380Sjfb8856606 	if (is_rx)
2091*2d9fd380Sjfb8856606 		igc->rxq_stats_map[queue_id] = stat_idx;
2092*2d9fd380Sjfb8856606 	else
2093*2d9fd380Sjfb8856606 		igc->txq_stats_map[queue_id] = stat_idx;
2094*2d9fd380Sjfb8856606 
2095*2d9fd380Sjfb8856606 	return 0;
2096*2d9fd380Sjfb8856606 }
2097*2d9fd380Sjfb8856606 
2098*2d9fd380Sjfb8856606 static int
eth_igc_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)2099*2d9fd380Sjfb8856606 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2100*2d9fd380Sjfb8856606 {
2101*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2102*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2103*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2104*2d9fd380Sjfb8856606 	uint32_t vec = IGC_MISC_VEC_ID;
2105*2d9fd380Sjfb8856606 
2106*2d9fd380Sjfb8856606 	if (rte_intr_allow_others(intr_handle))
2107*2d9fd380Sjfb8856606 		vec = IGC_RX_VEC_START;
2108*2d9fd380Sjfb8856606 
2109*2d9fd380Sjfb8856606 	uint32_t mask = 1u << (queue_id + vec);
2110*2d9fd380Sjfb8856606 
2111*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EIMC, mask);
2112*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
2113*2d9fd380Sjfb8856606 
2114*2d9fd380Sjfb8856606 	return 0;
2115*2d9fd380Sjfb8856606 }
2116*2d9fd380Sjfb8856606 
2117*2d9fd380Sjfb8856606 static int
eth_igc_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)2118*2d9fd380Sjfb8856606 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2119*2d9fd380Sjfb8856606 {
2120*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2121*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2122*2d9fd380Sjfb8856606 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2123*2d9fd380Sjfb8856606 	uint32_t vec = IGC_MISC_VEC_ID;
2124*2d9fd380Sjfb8856606 
2125*2d9fd380Sjfb8856606 	if (rte_intr_allow_others(intr_handle))
2126*2d9fd380Sjfb8856606 		vec = IGC_RX_VEC_START;
2127*2d9fd380Sjfb8856606 
2128*2d9fd380Sjfb8856606 	uint32_t mask = 1u << (queue_id + vec);
2129*2d9fd380Sjfb8856606 
2130*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_EIMS, mask);
2131*2d9fd380Sjfb8856606 	IGC_WRITE_FLUSH(hw);
2132*2d9fd380Sjfb8856606 
2133*2d9fd380Sjfb8856606 	rte_intr_enable(intr_handle);
2134*2d9fd380Sjfb8856606 
2135*2d9fd380Sjfb8856606 	return 0;
2136*2d9fd380Sjfb8856606 }
2137*2d9fd380Sjfb8856606 
2138*2d9fd380Sjfb8856606 static int
eth_igc_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2139*2d9fd380Sjfb8856606 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2140*2d9fd380Sjfb8856606 {
2141*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2142*2d9fd380Sjfb8856606 	uint32_t ctrl;
2143*2d9fd380Sjfb8856606 	int tx_pause;
2144*2d9fd380Sjfb8856606 	int rx_pause;
2145*2d9fd380Sjfb8856606 
2146*2d9fd380Sjfb8856606 	fc_conf->pause_time = hw->fc.pause_time;
2147*2d9fd380Sjfb8856606 	fc_conf->high_water = hw->fc.high_water;
2148*2d9fd380Sjfb8856606 	fc_conf->low_water = hw->fc.low_water;
2149*2d9fd380Sjfb8856606 	fc_conf->send_xon = hw->fc.send_xon;
2150*2d9fd380Sjfb8856606 	fc_conf->autoneg = hw->mac.autoneg;
2151*2d9fd380Sjfb8856606 
2152*2d9fd380Sjfb8856606 	/*
2153*2d9fd380Sjfb8856606 	 * Return rx_pause and tx_pause status according to actual setting of
2154*2d9fd380Sjfb8856606 	 * the TFCE and RFCE bits in the CTRL register.
2155*2d9fd380Sjfb8856606 	 */
2156*2d9fd380Sjfb8856606 	ctrl = IGC_READ_REG(hw, IGC_CTRL);
2157*2d9fd380Sjfb8856606 	if (ctrl & IGC_CTRL_TFCE)
2158*2d9fd380Sjfb8856606 		tx_pause = 1;
2159*2d9fd380Sjfb8856606 	else
2160*2d9fd380Sjfb8856606 		tx_pause = 0;
2161*2d9fd380Sjfb8856606 
2162*2d9fd380Sjfb8856606 	if (ctrl & IGC_CTRL_RFCE)
2163*2d9fd380Sjfb8856606 		rx_pause = 1;
2164*2d9fd380Sjfb8856606 	else
2165*2d9fd380Sjfb8856606 		rx_pause = 0;
2166*2d9fd380Sjfb8856606 
2167*2d9fd380Sjfb8856606 	if (rx_pause && tx_pause)
2168*2d9fd380Sjfb8856606 		fc_conf->mode = RTE_FC_FULL;
2169*2d9fd380Sjfb8856606 	else if (rx_pause)
2170*2d9fd380Sjfb8856606 		fc_conf->mode = RTE_FC_RX_PAUSE;
2171*2d9fd380Sjfb8856606 	else if (tx_pause)
2172*2d9fd380Sjfb8856606 		fc_conf->mode = RTE_FC_TX_PAUSE;
2173*2d9fd380Sjfb8856606 	else
2174*2d9fd380Sjfb8856606 		fc_conf->mode = RTE_FC_NONE;
2175*2d9fd380Sjfb8856606 
2176*2d9fd380Sjfb8856606 	return 0;
2177*2d9fd380Sjfb8856606 }
2178*2d9fd380Sjfb8856606 
2179*2d9fd380Sjfb8856606 static int
eth_igc_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2180*2d9fd380Sjfb8856606 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2181*2d9fd380Sjfb8856606 {
2182*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2183*2d9fd380Sjfb8856606 	uint32_t rx_buf_size;
2184*2d9fd380Sjfb8856606 	uint32_t max_high_water;
2185*2d9fd380Sjfb8856606 	uint32_t rctl;
2186*2d9fd380Sjfb8856606 	int err;
2187*2d9fd380Sjfb8856606 
2188*2d9fd380Sjfb8856606 	if (fc_conf->autoneg != hw->mac.autoneg)
2189*2d9fd380Sjfb8856606 		return -ENOTSUP;
2190*2d9fd380Sjfb8856606 
2191*2d9fd380Sjfb8856606 	rx_buf_size = igc_get_rx_buffer_size(hw);
2192*2d9fd380Sjfb8856606 	PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2193*2d9fd380Sjfb8856606 
2194*2d9fd380Sjfb8856606 	/* At least reserve one Ethernet frame for watermark */
2195*2d9fd380Sjfb8856606 	max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2196*2d9fd380Sjfb8856606 	if (fc_conf->high_water > max_high_water ||
2197*2d9fd380Sjfb8856606 		fc_conf->high_water < fc_conf->low_water) {
2198*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
2199*2d9fd380Sjfb8856606 			"Incorrect high(%u)/low(%u) water value, max is %u",
2200*2d9fd380Sjfb8856606 			fc_conf->high_water, fc_conf->low_water,
2201*2d9fd380Sjfb8856606 			max_high_water);
2202*2d9fd380Sjfb8856606 		return -EINVAL;
2203*2d9fd380Sjfb8856606 	}
2204*2d9fd380Sjfb8856606 
2205*2d9fd380Sjfb8856606 	switch (fc_conf->mode) {
2206*2d9fd380Sjfb8856606 	case RTE_FC_NONE:
2207*2d9fd380Sjfb8856606 		hw->fc.requested_mode = igc_fc_none;
2208*2d9fd380Sjfb8856606 		break;
2209*2d9fd380Sjfb8856606 	case RTE_FC_RX_PAUSE:
2210*2d9fd380Sjfb8856606 		hw->fc.requested_mode = igc_fc_rx_pause;
2211*2d9fd380Sjfb8856606 		break;
2212*2d9fd380Sjfb8856606 	case RTE_FC_TX_PAUSE:
2213*2d9fd380Sjfb8856606 		hw->fc.requested_mode = igc_fc_tx_pause;
2214*2d9fd380Sjfb8856606 		break;
2215*2d9fd380Sjfb8856606 	case RTE_FC_FULL:
2216*2d9fd380Sjfb8856606 		hw->fc.requested_mode = igc_fc_full;
2217*2d9fd380Sjfb8856606 		break;
2218*2d9fd380Sjfb8856606 	default:
2219*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2220*2d9fd380Sjfb8856606 		return -EINVAL;
2221*2d9fd380Sjfb8856606 	}
2222*2d9fd380Sjfb8856606 
2223*2d9fd380Sjfb8856606 	hw->fc.pause_time     = fc_conf->pause_time;
2224*2d9fd380Sjfb8856606 	hw->fc.high_water     = fc_conf->high_water;
2225*2d9fd380Sjfb8856606 	hw->fc.low_water      = fc_conf->low_water;
2226*2d9fd380Sjfb8856606 	hw->fc.send_xon	      = fc_conf->send_xon;
2227*2d9fd380Sjfb8856606 
2228*2d9fd380Sjfb8856606 	err = igc_setup_link_generic(hw);
2229*2d9fd380Sjfb8856606 	if (err == IGC_SUCCESS) {
2230*2d9fd380Sjfb8856606 		/**
2231*2d9fd380Sjfb8856606 		 * check if we want to forward MAC frames - driver doesn't have
2232*2d9fd380Sjfb8856606 		 * native capability to do that, so we'll write the registers
2233*2d9fd380Sjfb8856606 		 * ourselves
2234*2d9fd380Sjfb8856606 		 **/
2235*2d9fd380Sjfb8856606 		rctl = IGC_READ_REG(hw, IGC_RCTL);
2236*2d9fd380Sjfb8856606 
2237*2d9fd380Sjfb8856606 		/* set or clear MFLCN.PMCF bit depending on configuration */
2238*2d9fd380Sjfb8856606 		if (fc_conf->mac_ctrl_frame_fwd != 0)
2239*2d9fd380Sjfb8856606 			rctl |= IGC_RCTL_PMCF;
2240*2d9fd380Sjfb8856606 		else
2241*2d9fd380Sjfb8856606 			rctl &= ~IGC_RCTL_PMCF;
2242*2d9fd380Sjfb8856606 
2243*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2244*2d9fd380Sjfb8856606 		IGC_WRITE_FLUSH(hw);
2245*2d9fd380Sjfb8856606 
2246*2d9fd380Sjfb8856606 		return 0;
2247*2d9fd380Sjfb8856606 	}
2248*2d9fd380Sjfb8856606 
2249*2d9fd380Sjfb8856606 	PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2250*2d9fd380Sjfb8856606 	return -EIO;
2251*2d9fd380Sjfb8856606 }
2252*2d9fd380Sjfb8856606 
2253*2d9fd380Sjfb8856606 static int
eth_igc_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)2254*2d9fd380Sjfb8856606 eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2255*2d9fd380Sjfb8856606 			struct rte_eth_rss_reta_entry64 *reta_conf,
2256*2d9fd380Sjfb8856606 			uint16_t reta_size)
2257*2d9fd380Sjfb8856606 {
2258*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2259*2d9fd380Sjfb8856606 	uint16_t i;
2260*2d9fd380Sjfb8856606 
2261*2d9fd380Sjfb8856606 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
2262*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
2263*2d9fd380Sjfb8856606 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2264*2d9fd380Sjfb8856606 			reta_size, ETH_RSS_RETA_SIZE_128);
2265*2d9fd380Sjfb8856606 		return -EINVAL;
2266*2d9fd380Sjfb8856606 	}
2267*2d9fd380Sjfb8856606 
2268*2d9fd380Sjfb8856606 	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2269*2d9fd380Sjfb8856606 
2270*2d9fd380Sjfb8856606 	/* set redirection table */
2271*2d9fd380Sjfb8856606 	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2272*2d9fd380Sjfb8856606 		union igc_rss_reta_reg reta, reg;
2273*2d9fd380Sjfb8856606 		uint16_t idx, shift;
2274*2d9fd380Sjfb8856606 		uint8_t j, mask;
2275*2d9fd380Sjfb8856606 
2276*2d9fd380Sjfb8856606 		idx = i / RTE_RETA_GROUP_SIZE;
2277*2d9fd380Sjfb8856606 		shift = i % RTE_RETA_GROUP_SIZE;
2278*2d9fd380Sjfb8856606 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2279*2d9fd380Sjfb8856606 				IGC_RSS_RDT_REG_SIZE_MASK);
2280*2d9fd380Sjfb8856606 
2281*2d9fd380Sjfb8856606 		/* if no need to update the register */
2282*2d9fd380Sjfb8856606 		if (!mask ||
2283*2d9fd380Sjfb8856606 		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2284*2d9fd380Sjfb8856606 			continue;
2285*2d9fd380Sjfb8856606 
2286*2d9fd380Sjfb8856606 		/* check mask whether need to read the register value first */
2287*2d9fd380Sjfb8856606 		if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2288*2d9fd380Sjfb8856606 			reg.dword = 0;
2289*2d9fd380Sjfb8856606 		else
2290*2d9fd380Sjfb8856606 			reg.dword = IGC_READ_REG_LE_VALUE(hw,
2291*2d9fd380Sjfb8856606 					IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2292*2d9fd380Sjfb8856606 
2293*2d9fd380Sjfb8856606 		/* update the register */
2294*2d9fd380Sjfb8856606 		RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2295*2d9fd380Sjfb8856606 		for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2296*2d9fd380Sjfb8856606 			if (mask & (1u << j))
2297*2d9fd380Sjfb8856606 				reta.bytes[j] =
2298*2d9fd380Sjfb8856606 					(uint8_t)reta_conf[idx].reta[shift + j];
2299*2d9fd380Sjfb8856606 			else
2300*2d9fd380Sjfb8856606 				reta.bytes[j] = reg.bytes[j];
2301*2d9fd380Sjfb8856606 		}
2302*2d9fd380Sjfb8856606 		IGC_WRITE_REG_LE_VALUE(hw,
2303*2d9fd380Sjfb8856606 			IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2304*2d9fd380Sjfb8856606 	}
2305*2d9fd380Sjfb8856606 
2306*2d9fd380Sjfb8856606 	return 0;
2307*2d9fd380Sjfb8856606 }
2308*2d9fd380Sjfb8856606 
2309*2d9fd380Sjfb8856606 static int
eth_igc_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)2310*2d9fd380Sjfb8856606 eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2311*2d9fd380Sjfb8856606 		       struct rte_eth_rss_reta_entry64 *reta_conf,
2312*2d9fd380Sjfb8856606 		       uint16_t reta_size)
2313*2d9fd380Sjfb8856606 {
2314*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2315*2d9fd380Sjfb8856606 	uint16_t i;
2316*2d9fd380Sjfb8856606 
2317*2d9fd380Sjfb8856606 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
2318*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR,
2319*2d9fd380Sjfb8856606 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2320*2d9fd380Sjfb8856606 			reta_size, ETH_RSS_RETA_SIZE_128);
2321*2d9fd380Sjfb8856606 		return -EINVAL;
2322*2d9fd380Sjfb8856606 	}
2323*2d9fd380Sjfb8856606 
2324*2d9fd380Sjfb8856606 	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2325*2d9fd380Sjfb8856606 
2326*2d9fd380Sjfb8856606 	/* read redirection table */
2327*2d9fd380Sjfb8856606 	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2328*2d9fd380Sjfb8856606 		union igc_rss_reta_reg reta;
2329*2d9fd380Sjfb8856606 		uint16_t idx, shift;
2330*2d9fd380Sjfb8856606 		uint8_t j, mask;
2331*2d9fd380Sjfb8856606 
2332*2d9fd380Sjfb8856606 		idx = i / RTE_RETA_GROUP_SIZE;
2333*2d9fd380Sjfb8856606 		shift = i % RTE_RETA_GROUP_SIZE;
2334*2d9fd380Sjfb8856606 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2335*2d9fd380Sjfb8856606 				IGC_RSS_RDT_REG_SIZE_MASK);
2336*2d9fd380Sjfb8856606 
2337*2d9fd380Sjfb8856606 		/* if no need to read register */
2338*2d9fd380Sjfb8856606 		if (!mask ||
2339*2d9fd380Sjfb8856606 		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2340*2d9fd380Sjfb8856606 			continue;
2341*2d9fd380Sjfb8856606 
2342*2d9fd380Sjfb8856606 		/* read register and get the queue index */
2343*2d9fd380Sjfb8856606 		RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2344*2d9fd380Sjfb8856606 		reta.dword = IGC_READ_REG_LE_VALUE(hw,
2345*2d9fd380Sjfb8856606 				IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2346*2d9fd380Sjfb8856606 		for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2347*2d9fd380Sjfb8856606 			if (mask & (1u << j))
2348*2d9fd380Sjfb8856606 				reta_conf[idx].reta[shift + j] = reta.bytes[j];
2349*2d9fd380Sjfb8856606 		}
2350*2d9fd380Sjfb8856606 	}
2351*2d9fd380Sjfb8856606 
2352*2d9fd380Sjfb8856606 	return 0;
2353*2d9fd380Sjfb8856606 }
2354*2d9fd380Sjfb8856606 
2355*2d9fd380Sjfb8856606 static int
eth_igc_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2356*2d9fd380Sjfb8856606 eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2357*2d9fd380Sjfb8856606 			struct rte_eth_rss_conf *rss_conf)
2358*2d9fd380Sjfb8856606 {
2359*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2360*2d9fd380Sjfb8856606 	igc_hw_rss_hash_set(hw, rss_conf);
2361*2d9fd380Sjfb8856606 	return 0;
2362*2d9fd380Sjfb8856606 }
2363*2d9fd380Sjfb8856606 
2364*2d9fd380Sjfb8856606 static int
eth_igc_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)2365*2d9fd380Sjfb8856606 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2366*2d9fd380Sjfb8856606 			struct rte_eth_rss_conf *rss_conf)
2367*2d9fd380Sjfb8856606 {
2368*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2369*2d9fd380Sjfb8856606 	uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2370*2d9fd380Sjfb8856606 	uint32_t mrqc;
2371*2d9fd380Sjfb8856606 	uint64_t rss_hf;
2372*2d9fd380Sjfb8856606 
2373*2d9fd380Sjfb8856606 	if (hash_key != NULL) {
2374*2d9fd380Sjfb8856606 		int i;
2375*2d9fd380Sjfb8856606 
2376*2d9fd380Sjfb8856606 		/* if not enough space for store hash key */
2377*2d9fd380Sjfb8856606 		if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2378*2d9fd380Sjfb8856606 			PMD_DRV_LOG(ERR,
2379*2d9fd380Sjfb8856606 				"RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2380*2d9fd380Sjfb8856606 				rss_conf->rss_key_len, IGC_HKEY_SIZE);
2381*2d9fd380Sjfb8856606 			return -EINVAL;
2382*2d9fd380Sjfb8856606 		}
2383*2d9fd380Sjfb8856606 
2384*2d9fd380Sjfb8856606 		/* read RSS key from register */
2385*2d9fd380Sjfb8856606 		for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2386*2d9fd380Sjfb8856606 			hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2387*2d9fd380Sjfb8856606 	}
2388*2d9fd380Sjfb8856606 
2389*2d9fd380Sjfb8856606 	/* get RSS functions configured in MRQC register */
2390*2d9fd380Sjfb8856606 	mrqc = IGC_READ_REG(hw, IGC_MRQC);
2391*2d9fd380Sjfb8856606 	if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2392*2d9fd380Sjfb8856606 		return 0;
2393*2d9fd380Sjfb8856606 
2394*2d9fd380Sjfb8856606 	rss_hf = 0;
2395*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2396*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_IPV4;
2397*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2398*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2399*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2400*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_IPV6;
2401*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2402*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_IPV6_EX;
2403*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2404*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2405*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2406*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_IPV6_TCP_EX;
2407*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2408*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2409*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2410*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2411*2d9fd380Sjfb8856606 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2412*2d9fd380Sjfb8856606 		rss_hf |= ETH_RSS_IPV6_UDP_EX;
2413*2d9fd380Sjfb8856606 
2414*2d9fd380Sjfb8856606 	rss_conf->rss_hf |= rss_hf;
2415*2d9fd380Sjfb8856606 	return 0;
2416*2d9fd380Sjfb8856606 }
2417*2d9fd380Sjfb8856606 
2418*2d9fd380Sjfb8856606 static int
eth_igc_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)2419*2d9fd380Sjfb8856606 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2420*2d9fd380Sjfb8856606 {
2421*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2422*2d9fd380Sjfb8856606 	struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2423*2d9fd380Sjfb8856606 	uint32_t vfta;
2424*2d9fd380Sjfb8856606 	uint32_t vid_idx;
2425*2d9fd380Sjfb8856606 	uint32_t vid_bit;
2426*2d9fd380Sjfb8856606 
2427*2d9fd380Sjfb8856606 	vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
2428*2d9fd380Sjfb8856606 	vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
2429*2d9fd380Sjfb8856606 	vfta = shadow_vfta->vfta[vid_idx];
2430*2d9fd380Sjfb8856606 	if (on)
2431*2d9fd380Sjfb8856606 		vfta |= vid_bit;
2432*2d9fd380Sjfb8856606 	else
2433*2d9fd380Sjfb8856606 		vfta &= ~vid_bit;
2434*2d9fd380Sjfb8856606 	IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
2435*2d9fd380Sjfb8856606 
2436*2d9fd380Sjfb8856606 	/* update local VFTA copy */
2437*2d9fd380Sjfb8856606 	shadow_vfta->vfta[vid_idx] = vfta;
2438*2d9fd380Sjfb8856606 
2439*2d9fd380Sjfb8856606 	return 0;
2440*2d9fd380Sjfb8856606 }
2441*2d9fd380Sjfb8856606 
2442*2d9fd380Sjfb8856606 static void
igc_vlan_hw_filter_disable(struct rte_eth_dev * dev)2443*2d9fd380Sjfb8856606 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2444*2d9fd380Sjfb8856606 {
2445*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2446*2d9fd380Sjfb8856606 	igc_read_reg_check_clear_bits(hw, IGC_RCTL,
2447*2d9fd380Sjfb8856606 			IGC_RCTL_CFIEN | IGC_RCTL_VFE);
2448*2d9fd380Sjfb8856606 }
2449*2d9fd380Sjfb8856606 
2450*2d9fd380Sjfb8856606 static void
igc_vlan_hw_filter_enable(struct rte_eth_dev * dev)2451*2d9fd380Sjfb8856606 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2452*2d9fd380Sjfb8856606 {
2453*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2454*2d9fd380Sjfb8856606 	struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2455*2d9fd380Sjfb8856606 	uint32_t reg_val;
2456*2d9fd380Sjfb8856606 	int i;
2457*2d9fd380Sjfb8856606 
2458*2d9fd380Sjfb8856606 	/* Filter Table Enable, CFI not used for packet acceptance */
2459*2d9fd380Sjfb8856606 	reg_val = IGC_READ_REG(hw, IGC_RCTL);
2460*2d9fd380Sjfb8856606 	reg_val &= ~IGC_RCTL_CFIEN;
2461*2d9fd380Sjfb8856606 	reg_val |= IGC_RCTL_VFE;
2462*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
2463*2d9fd380Sjfb8856606 
2464*2d9fd380Sjfb8856606 	/* restore VFTA table */
2465*2d9fd380Sjfb8856606 	for (i = 0; i < IGC_VFTA_SIZE; i++)
2466*2d9fd380Sjfb8856606 		IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
2467*2d9fd380Sjfb8856606 }
2468*2d9fd380Sjfb8856606 
2469*2d9fd380Sjfb8856606 static void
igc_vlan_hw_strip_disable(struct rte_eth_dev * dev)2470*2d9fd380Sjfb8856606 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2471*2d9fd380Sjfb8856606 {
2472*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2473*2d9fd380Sjfb8856606 
2474*2d9fd380Sjfb8856606 	igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2475*2d9fd380Sjfb8856606 }
2476*2d9fd380Sjfb8856606 
2477*2d9fd380Sjfb8856606 static void
igc_vlan_hw_strip_enable(struct rte_eth_dev * dev)2478*2d9fd380Sjfb8856606 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2479*2d9fd380Sjfb8856606 {
2480*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2481*2d9fd380Sjfb8856606 
2482*2d9fd380Sjfb8856606 	igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2483*2d9fd380Sjfb8856606 }
2484*2d9fd380Sjfb8856606 
2485*2d9fd380Sjfb8856606 static int
igc_vlan_hw_extend_disable(struct rte_eth_dev * dev)2486*2d9fd380Sjfb8856606 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2487*2d9fd380Sjfb8856606 {
2488*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2489*2d9fd380Sjfb8856606 	uint32_t ctrl_ext;
2490*2d9fd380Sjfb8856606 
2491*2d9fd380Sjfb8856606 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2492*2d9fd380Sjfb8856606 
2493*2d9fd380Sjfb8856606 	/* if extend vlan hasn't been enabled */
2494*2d9fd380Sjfb8856606 	if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
2495*2d9fd380Sjfb8856606 		return 0;
2496*2d9fd380Sjfb8856606 
2497*2d9fd380Sjfb8856606 	if ((dev->data->dev_conf.rxmode.offloads &
2498*2d9fd380Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2499*2d9fd380Sjfb8856606 		goto write_ext_vlan;
2500*2d9fd380Sjfb8856606 
2501*2d9fd380Sjfb8856606 	/* Update maximum packet length */
2502*2d9fd380Sjfb8856606 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
2503*2d9fd380Sjfb8856606 		RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
2504*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
2505*2d9fd380Sjfb8856606 			dev->data->dev_conf.rxmode.max_rx_pkt_len,
2506*2d9fd380Sjfb8856606 			VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
2507*2d9fd380Sjfb8856606 		return -EINVAL;
2508*2d9fd380Sjfb8856606 	}
2509*2d9fd380Sjfb8856606 	dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
2510*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RLPML,
2511*2d9fd380Sjfb8856606 		dev->data->dev_conf.rxmode.max_rx_pkt_len);
2512*2d9fd380Sjfb8856606 
2513*2d9fd380Sjfb8856606 write_ext_vlan:
2514*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
2515*2d9fd380Sjfb8856606 	return 0;
2516*2d9fd380Sjfb8856606 }
2517*2d9fd380Sjfb8856606 
2518*2d9fd380Sjfb8856606 static int
igc_vlan_hw_extend_enable(struct rte_eth_dev * dev)2519*2d9fd380Sjfb8856606 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2520*2d9fd380Sjfb8856606 {
2521*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2522*2d9fd380Sjfb8856606 	uint32_t ctrl_ext;
2523*2d9fd380Sjfb8856606 
2524*2d9fd380Sjfb8856606 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2525*2d9fd380Sjfb8856606 
2526*2d9fd380Sjfb8856606 	/* if extend vlan has been enabled */
2527*2d9fd380Sjfb8856606 	if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
2528*2d9fd380Sjfb8856606 		return 0;
2529*2d9fd380Sjfb8856606 
2530*2d9fd380Sjfb8856606 	if ((dev->data->dev_conf.rxmode.offloads &
2531*2d9fd380Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2532*2d9fd380Sjfb8856606 		goto write_ext_vlan;
2533*2d9fd380Sjfb8856606 
2534*2d9fd380Sjfb8856606 	/* Update maximum packet length */
2535*2d9fd380Sjfb8856606 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
2536*2d9fd380Sjfb8856606 		MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
2537*2d9fd380Sjfb8856606 		PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
2538*2d9fd380Sjfb8856606 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
2539*2d9fd380Sjfb8856606 			VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
2540*2d9fd380Sjfb8856606 		return -EINVAL;
2541*2d9fd380Sjfb8856606 	}
2542*2d9fd380Sjfb8856606 	dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
2543*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_RLPML,
2544*2d9fd380Sjfb8856606 		dev->data->dev_conf.rxmode.max_rx_pkt_len);
2545*2d9fd380Sjfb8856606 
2546*2d9fd380Sjfb8856606 write_ext_vlan:
2547*2d9fd380Sjfb8856606 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
2548*2d9fd380Sjfb8856606 	return 0;
2549*2d9fd380Sjfb8856606 }
2550*2d9fd380Sjfb8856606 
2551*2d9fd380Sjfb8856606 static int
eth_igc_vlan_offload_set(struct rte_eth_dev * dev,int mask)2552*2d9fd380Sjfb8856606 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2553*2d9fd380Sjfb8856606 {
2554*2d9fd380Sjfb8856606 	struct rte_eth_rxmode *rxmode;
2555*2d9fd380Sjfb8856606 
2556*2d9fd380Sjfb8856606 	rxmode = &dev->data->dev_conf.rxmode;
2557*2d9fd380Sjfb8856606 	if (mask & ETH_VLAN_STRIP_MASK) {
2558*2d9fd380Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2559*2d9fd380Sjfb8856606 			igc_vlan_hw_strip_enable(dev);
2560*2d9fd380Sjfb8856606 		else
2561*2d9fd380Sjfb8856606 			igc_vlan_hw_strip_disable(dev);
2562*2d9fd380Sjfb8856606 	}
2563*2d9fd380Sjfb8856606 
2564*2d9fd380Sjfb8856606 	if (mask & ETH_VLAN_FILTER_MASK) {
2565*2d9fd380Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2566*2d9fd380Sjfb8856606 			igc_vlan_hw_filter_enable(dev);
2567*2d9fd380Sjfb8856606 		else
2568*2d9fd380Sjfb8856606 			igc_vlan_hw_filter_disable(dev);
2569*2d9fd380Sjfb8856606 	}
2570*2d9fd380Sjfb8856606 
2571*2d9fd380Sjfb8856606 	if (mask & ETH_VLAN_EXTEND_MASK) {
2572*2d9fd380Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2573*2d9fd380Sjfb8856606 			return igc_vlan_hw_extend_enable(dev);
2574*2d9fd380Sjfb8856606 		else
2575*2d9fd380Sjfb8856606 			return igc_vlan_hw_extend_disable(dev);
2576*2d9fd380Sjfb8856606 	}
2577*2d9fd380Sjfb8856606 
2578*2d9fd380Sjfb8856606 	return 0;
2579*2d9fd380Sjfb8856606 }
2580*2d9fd380Sjfb8856606 
2581*2d9fd380Sjfb8856606 static int
eth_igc_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)2582*2d9fd380Sjfb8856606 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2583*2d9fd380Sjfb8856606 		      enum rte_vlan_type vlan_type,
2584*2d9fd380Sjfb8856606 		      uint16_t tpid)
2585*2d9fd380Sjfb8856606 {
2586*2d9fd380Sjfb8856606 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2587*2d9fd380Sjfb8856606 	uint32_t reg_val;
2588*2d9fd380Sjfb8856606 
2589*2d9fd380Sjfb8856606 	/* only outer TPID of double VLAN can be configured*/
2590*2d9fd380Sjfb8856606 	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2591*2d9fd380Sjfb8856606 		reg_val = IGC_READ_REG(hw, IGC_VET);
2592*2d9fd380Sjfb8856606 		reg_val = (reg_val & (~IGC_VET_EXT)) |
2593*2d9fd380Sjfb8856606 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
2594*2d9fd380Sjfb8856606 		IGC_WRITE_REG(hw, IGC_VET, reg_val);
2595*2d9fd380Sjfb8856606 
2596*2d9fd380Sjfb8856606 		return 0;
2597*2d9fd380Sjfb8856606 	}
2598*2d9fd380Sjfb8856606 
2599*2d9fd380Sjfb8856606 	/* all other TPID values are read-only*/
2600*2d9fd380Sjfb8856606 	PMD_DRV_LOG(ERR, "Not supported");
2601*2d9fd380Sjfb8856606 	return -ENOTSUP;
2602*2d9fd380Sjfb8856606 }
2603*2d9fd380Sjfb8856606 
2604*2d9fd380Sjfb8856606 static int
eth_igc_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)2605*2d9fd380Sjfb8856606 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2606*2d9fd380Sjfb8856606 	struct rte_pci_device *pci_dev)
2607*2d9fd380Sjfb8856606 {
2608*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
2609*2d9fd380Sjfb8856606 	return rte_eth_dev_pci_generic_probe(pci_dev,
2610*2d9fd380Sjfb8856606 		sizeof(struct igc_adapter), eth_igc_dev_init);
2611*2d9fd380Sjfb8856606 }
2612*2d9fd380Sjfb8856606 
2613*2d9fd380Sjfb8856606 static int
eth_igc_pci_remove(struct rte_pci_device * pci_dev)2614*2d9fd380Sjfb8856606 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2615*2d9fd380Sjfb8856606 {
2616*2d9fd380Sjfb8856606 	PMD_INIT_FUNC_TRACE();
2617*2d9fd380Sjfb8856606 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2618*2d9fd380Sjfb8856606 }
2619*2d9fd380Sjfb8856606 
2620*2d9fd380Sjfb8856606 static struct rte_pci_driver rte_igc_pmd = {
2621*2d9fd380Sjfb8856606 	.id_table = pci_id_igc_map,
2622*2d9fd380Sjfb8856606 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2623*2d9fd380Sjfb8856606 	.probe = eth_igc_pci_probe,
2624*2d9fd380Sjfb8856606 	.remove = eth_igc_pci_remove,
2625*2d9fd380Sjfb8856606 };
2626*2d9fd380Sjfb8856606 
2627*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2628*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2629*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");
2630