xref: /f-stack/dpdk/drivers/net/igc/igc_ethdev.c (revision 5edfaa42)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 
8 #include <rte_string_fns.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_malloc.h>
14 #include <rte_alarm.h>
15 
16 #include "igc_logs.h"
17 #include "igc_txrx.h"
18 #include "igc_filter.h"
19 #include "igc_flow.h"
20 
21 #define IGC_INTEL_VENDOR_ID		0x8086
22 
23 /*
24  * The overhead from MTU to max frame size.
25  * Considering VLAN so tag needs to be counted.
26  */
27 #define IGC_ETH_OVERHEAD		(RTE_ETHER_HDR_LEN + \
28 					RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
29 
30 #define IGC_FC_PAUSE_TIME		0x0680
31 #define IGC_LINK_UPDATE_CHECK_TIMEOUT	90  /* 9s */
32 #define IGC_LINK_UPDATE_CHECK_INTERVAL	100 /* ms */
33 
34 #define IGC_MISC_VEC_ID			RTE_INTR_VEC_ZERO_OFFSET
35 #define IGC_RX_VEC_START		RTE_INTR_VEC_RXTX_OFFSET
36 #define IGC_MSIX_OTHER_INTR_VEC		0   /* MSI-X other interrupt vector */
37 #define IGC_FLAG_NEED_LINK_UPDATE	(1u << 0)	/* need update link */
38 
39 #define IGC_DEFAULT_RX_FREE_THRESH	32
40 
41 #define IGC_DEFAULT_RX_PTHRESH		8
42 #define IGC_DEFAULT_RX_HTHRESH		8
43 #define IGC_DEFAULT_RX_WTHRESH		4
44 
45 #define IGC_DEFAULT_TX_PTHRESH		8
46 #define IGC_DEFAULT_TX_HTHRESH		1
47 #define IGC_DEFAULT_TX_WTHRESH		16
48 
49 /* MSI-X other interrupt vector */
50 #define IGC_MSIX_OTHER_INTR_VEC		0
51 
52 /* External VLAN Enable bit mask */
53 #define IGC_CTRL_EXT_EXT_VLAN		(1u << 26)
54 
55 /* Speed select */
56 #define IGC_CTRL_SPEED_MASK		(7u << 8)
57 #define IGC_CTRL_SPEED_2500		(6u << 8)
58 
59 /* External VLAN Ether Type bit mask and shift */
60 #define IGC_VET_EXT			0xFFFF0000
61 #define IGC_VET_EXT_SHIFT		16
62 
63 /* Force EEE Auto-negotiation */
64 #define IGC_EEER_EEE_FRC_AN		(1u << 28)
65 
66 /* Per Queue Good Packets Received Count */
67 #define IGC_PQGPRC(idx)		(0x10010 + 0x100 * (idx))
68 /* Per Queue Good Octets Received Count */
69 #define IGC_PQGORC(idx)		(0x10018 + 0x100 * (idx))
70 /* Per Queue Good Octets Transmitted Count */
71 #define IGC_PQGOTC(idx)		(0x10034 + 0x100 * (idx))
72 /* Per Queue Multicast Packets Received Count */
73 #define IGC_PQMPRC(idx)		(0x10038 + 0x100 * (idx))
74 /* Transmit Queue Drop Packet Count */
75 #define IGC_TQDPC(idx)		(0xe030 + 0x40 * (idx))
76 
77 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
78 #define U32_0_IN_U64		0	/* lower bytes of u64 */
79 #define U32_1_IN_U64		1	/* higher bytes of u64 */
80 #else
81 #define U32_0_IN_U64		1
82 #define U32_1_IN_U64		0
83 #endif
84 
85 #define IGC_ALARM_INTERVAL	8000000u
86 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
87 
88 static const struct rte_eth_desc_lim rx_desc_lim = {
89 	.nb_max = IGC_MAX_RXD,
90 	.nb_min = IGC_MIN_RXD,
91 	.nb_align = IGC_RXD_ALIGN,
92 };
93 
94 static const struct rte_eth_desc_lim tx_desc_lim = {
95 	.nb_max = IGC_MAX_TXD,
96 	.nb_min = IGC_MIN_TXD,
97 	.nb_align = IGC_TXD_ALIGN,
98 	.nb_seg_max = IGC_TX_MAX_SEG,
99 	.nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
100 };
101 
102 static const struct rte_pci_id pci_id_igc_map[] = {
103 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
104 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },
105 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I)  },
106 	{ RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K)  },
107 	{ .vendor_id = 0, /* sentinel */ },
108 };
109 
110 /* store statistics names and its offset in stats structure */
111 struct rte_igc_xstats_name_off {
112 	char name[RTE_ETH_XSTATS_NAME_SIZE];
113 	unsigned int offset;
114 };
115 
116 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
117 	{"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
118 	{"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
119 	{"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
120 	{"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
121 	{"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
122 	{"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
123 	{"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
124 		ecol)},
125 	{"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
126 	{"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
127 	{"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
128 	{"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
129 	{"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
130 	{"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
131 	{"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
132 	{"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
133 	{"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
134 	{"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
135 	{"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
136 		fcruc)},
137 	{"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
138 	{"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
139 	{"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
140 	{"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
141 	{"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
142 		prc1023)},
143 	{"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
144 		prc1522)},
145 	{"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
146 	{"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
147 	{"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
148 	{"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
149 	{"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
150 	{"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
151 	{"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
152 	{"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
153 	{"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
154 	{"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
155 	{"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
156 	{"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
157 	{"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
158 	{"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
159 	{"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
160 	{"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
161 	{"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
162 	{"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
163 	{"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
164 		ptc1023)},
165 	{"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
166 		ptc1522)},
167 	{"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
168 	{"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
169 	{"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
170 	{"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
171 	{"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
172 	{"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
173 	{"rx_descriptor_lower_threshold",
174 		offsetof(struct igc_hw_stats, icrxdmtc)},
175 };
176 
177 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
178 		sizeof(rte_igc_stats_strings[0]))
179 
180 static int eth_igc_configure(struct rte_eth_dev *dev);
181 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
182 static int eth_igc_stop(struct rte_eth_dev *dev);
183 static int eth_igc_start(struct rte_eth_dev *dev);
184 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
185 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
186 static int eth_igc_close(struct rte_eth_dev *dev);
187 static int eth_igc_reset(struct rte_eth_dev *dev);
188 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
189 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
190 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
191 				char *fw_version, size_t fw_size);
192 static int eth_igc_infos_get(struct rte_eth_dev *dev,
193 			struct rte_eth_dev_info *dev_info);
194 static int eth_igc_led_on(struct rte_eth_dev *dev);
195 static int eth_igc_led_off(struct rte_eth_dev *dev);
196 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
197 static int eth_igc_rar_set(struct rte_eth_dev *dev,
198 		struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
199 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
200 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
201 			struct rte_ether_addr *addr);
202 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
203 			 struct rte_ether_addr *mc_addr_set,
204 			 uint32_t nb_mc_addr);
205 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
206 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
207 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
208 static int eth_igc_stats_get(struct rte_eth_dev *dev,
209 			struct rte_eth_stats *rte_stats);
210 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
211 			struct rte_eth_xstat *xstats, unsigned int n);
212 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
213 				const uint64_t *ids,
214 				uint64_t *values, unsigned int n);
215 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
216 				struct rte_eth_xstat_name *xstats_names,
217 				unsigned int size);
218 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
219 		struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
220 		unsigned int limit);
221 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
222 static int
223 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
224 	uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
225 static int
226 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
227 static int
228 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
229 static int
230 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
231 static int
232 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
233 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
234 			struct rte_eth_rss_reta_entry64 *reta_conf,
235 			uint16_t reta_size);
236 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
237 		       struct rte_eth_rss_reta_entry64 *reta_conf,
238 		       uint16_t reta_size);
239 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
240 			struct rte_eth_rss_conf *rss_conf);
241 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
242 			struct rte_eth_rss_conf *rss_conf);
243 static int
244 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
245 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
246 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
247 		      enum rte_vlan_type vlan_type, uint16_t tpid);
248 
249 static const struct eth_dev_ops eth_igc_ops = {
250 	.dev_configure		= eth_igc_configure,
251 	.link_update		= eth_igc_link_update,
252 	.dev_stop		= eth_igc_stop,
253 	.dev_start		= eth_igc_start,
254 	.dev_close		= eth_igc_close,
255 	.dev_reset		= eth_igc_reset,
256 	.dev_set_link_up	= eth_igc_set_link_up,
257 	.dev_set_link_down	= eth_igc_set_link_down,
258 	.promiscuous_enable	= eth_igc_promiscuous_enable,
259 	.promiscuous_disable	= eth_igc_promiscuous_disable,
260 	.allmulticast_enable	= eth_igc_allmulticast_enable,
261 	.allmulticast_disable	= eth_igc_allmulticast_disable,
262 	.fw_version_get		= eth_igc_fw_version_get,
263 	.dev_infos_get		= eth_igc_infos_get,
264 	.dev_led_on		= eth_igc_led_on,
265 	.dev_led_off		= eth_igc_led_off,
266 	.dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
267 	.mtu_set		= eth_igc_mtu_set,
268 	.mac_addr_add		= eth_igc_rar_set,
269 	.mac_addr_remove	= eth_igc_rar_clear,
270 	.mac_addr_set		= eth_igc_default_mac_addr_set,
271 	.set_mc_addr_list	= eth_igc_set_mc_addr_list,
272 
273 	.rx_queue_setup		= eth_igc_rx_queue_setup,
274 	.rx_queue_release	= eth_igc_rx_queue_release,
275 	.tx_queue_setup		= eth_igc_tx_queue_setup,
276 	.tx_queue_release	= eth_igc_tx_queue_release,
277 	.tx_done_cleanup	= eth_igc_tx_done_cleanup,
278 	.rxq_info_get		= eth_igc_rxq_info_get,
279 	.txq_info_get		= eth_igc_txq_info_get,
280 	.stats_get		= eth_igc_stats_get,
281 	.xstats_get		= eth_igc_xstats_get,
282 	.xstats_get_by_id	= eth_igc_xstats_get_by_id,
283 	.xstats_get_names_by_id	= eth_igc_xstats_get_names_by_id,
284 	.xstats_get_names	= eth_igc_xstats_get_names,
285 	.stats_reset		= eth_igc_xstats_reset,
286 	.xstats_reset		= eth_igc_xstats_reset,
287 	.queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
288 	.rx_queue_intr_enable	= eth_igc_rx_queue_intr_enable,
289 	.rx_queue_intr_disable	= eth_igc_rx_queue_intr_disable,
290 	.flow_ctrl_get		= eth_igc_flow_ctrl_get,
291 	.flow_ctrl_set		= eth_igc_flow_ctrl_set,
292 	.reta_update		= eth_igc_rss_reta_update,
293 	.reta_query		= eth_igc_rss_reta_query,
294 	.rss_hash_update	= eth_igc_rss_hash_update,
295 	.rss_hash_conf_get	= eth_igc_rss_hash_conf_get,
296 	.vlan_filter_set	= eth_igc_vlan_filter_set,
297 	.vlan_offload_set	= eth_igc_vlan_offload_set,
298 	.vlan_tpid_set		= eth_igc_vlan_tpid_set,
299 	.vlan_strip_queue_set	= eth_igc_vlan_strip_queue_set,
300 	.filter_ctrl		= eth_igc_filter_ctrl,
301 };
302 
303 /*
304  * multiple queue mode checking
305  */
306 static int
307 igc_check_mq_mode(struct rte_eth_dev *dev)
308 {
309 	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
310 	enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
311 
312 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
313 		PMD_INIT_LOG(ERR, "SRIOV is not supported.");
314 		return -EINVAL;
315 	}
316 
317 	if (rx_mq_mode != ETH_MQ_RX_NONE &&
318 		rx_mq_mode != ETH_MQ_RX_RSS) {
319 		/* RSS together with VMDq not supported*/
320 		PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
321 				rx_mq_mode);
322 		return -EINVAL;
323 	}
324 
325 	/* To no break software that set invalid mode, only display
326 	 * warning if invalid mode is used.
327 	 */
328 	if (tx_mq_mode != ETH_MQ_TX_NONE)
329 		PMD_INIT_LOG(WARNING,
330 			"TX mode %d is not supported. Due to meaningless in this driver, just ignore",
331 			tx_mq_mode);
332 
333 	return 0;
334 }
335 
336 static int
337 eth_igc_configure(struct rte_eth_dev *dev)
338 {
339 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
340 	int ret;
341 
342 	PMD_INIT_FUNC_TRACE();
343 
344 	ret  = igc_check_mq_mode(dev);
345 	if (ret != 0)
346 		return ret;
347 
348 	intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
349 	return 0;
350 }
351 
352 static int
353 eth_igc_set_link_up(struct rte_eth_dev *dev)
354 {
355 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
356 
357 	if (hw->phy.media_type == igc_media_type_copper)
358 		igc_power_up_phy(hw);
359 	else
360 		igc_power_up_fiber_serdes_link(hw);
361 	return 0;
362 }
363 
364 static int
365 eth_igc_set_link_down(struct rte_eth_dev *dev)
366 {
367 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
368 
369 	if (hw->phy.media_type == igc_media_type_copper)
370 		igc_power_down_phy(hw);
371 	else
372 		igc_shutdown_fiber_serdes_link(hw);
373 	return 0;
374 }
375 
376 /*
377  * disable other interrupt
378  */
379 static void
380 igc_intr_other_disable(struct rte_eth_dev *dev)
381 {
382 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
383 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
384 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
385 
386 	if (rte_intr_allow_others(intr_handle) &&
387 		dev->data->dev_conf.intr_conf.lsc) {
388 		IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
389 	}
390 
391 	IGC_WRITE_REG(hw, IGC_IMC, ~0);
392 	IGC_WRITE_FLUSH(hw);
393 }
394 
395 /*
396  * enable other interrupt
397  */
398 static inline void
399 igc_intr_other_enable(struct rte_eth_dev *dev)
400 {
401 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
402 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
403 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
404 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
405 
406 	if (rte_intr_allow_others(intr_handle) &&
407 		dev->data->dev_conf.intr_conf.lsc) {
408 		IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
409 	}
410 
411 	IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
412 	IGC_WRITE_FLUSH(hw);
413 }
414 
415 /*
416  * It reads ICR and gets interrupt causes, check it and set a bit flag
417  * to update link status.
418  */
419 static void
420 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
421 {
422 	uint32_t icr;
423 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
424 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
425 
426 	/* read-on-clear nic registers here */
427 	icr = IGC_READ_REG(hw, IGC_ICR);
428 
429 	intr->flags = 0;
430 	if (icr & IGC_ICR_LSC)
431 		intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
432 }
433 
434 /* return 0 means link status changed, -1 means not changed */
435 static int
436 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
437 {
438 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
439 	struct rte_eth_link link;
440 	int link_check, count;
441 
442 	link_check = 0;
443 	hw->mac.get_link_status = 1;
444 
445 	/* possible wait-to-complete in up to 9 seconds */
446 	for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
447 		/* Read the real link status */
448 		switch (hw->phy.media_type) {
449 		case igc_media_type_copper:
450 			/* Do the work to read phy */
451 			igc_check_for_link(hw);
452 			link_check = !hw->mac.get_link_status;
453 			break;
454 
455 		case igc_media_type_fiber:
456 			igc_check_for_link(hw);
457 			link_check = (IGC_READ_REG(hw, IGC_STATUS) &
458 				      IGC_STATUS_LU);
459 			break;
460 
461 		case igc_media_type_internal_serdes:
462 			igc_check_for_link(hw);
463 			link_check = hw->mac.serdes_has_link;
464 			break;
465 
466 		default:
467 			break;
468 		}
469 		if (link_check || wait_to_complete == 0)
470 			break;
471 		rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
472 	}
473 	memset(&link, 0, sizeof(link));
474 
475 	/* Now we check if a transition has happened */
476 	if (link_check) {
477 		uint16_t duplex, speed;
478 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
479 		link.link_duplex = (duplex == FULL_DUPLEX) ?
480 				ETH_LINK_FULL_DUPLEX :
481 				ETH_LINK_HALF_DUPLEX;
482 		link.link_speed = speed;
483 		link.link_status = ETH_LINK_UP;
484 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
485 				ETH_LINK_SPEED_FIXED);
486 
487 		if (speed == SPEED_2500) {
488 			uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
489 			if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
490 				tipg &= ~IGC_TIPG_IPGT_MASK;
491 				tipg |= 0x0b;
492 				IGC_WRITE_REG(hw, IGC_TIPG, tipg);
493 			}
494 		}
495 	} else {
496 		link.link_speed = 0;
497 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
498 		link.link_status = ETH_LINK_DOWN;
499 		link.link_autoneg = ETH_LINK_FIXED;
500 	}
501 
502 	return rte_eth_linkstatus_set(dev, &link);
503 }
504 
505 /*
506  * It executes link_update after knowing an interrupt is present.
507  */
508 static void
509 eth_igc_interrupt_action(struct rte_eth_dev *dev)
510 {
511 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
512 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
513 	struct rte_eth_link link;
514 	int ret;
515 
516 	if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
517 		intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
518 
519 		/* set get_link_status to check register later */
520 		ret = eth_igc_link_update(dev, 0);
521 
522 		/* check if link has changed */
523 		if (ret < 0)
524 			return;
525 
526 		rte_eth_linkstatus_get(dev, &link);
527 		if (link.link_status)
528 			PMD_DRV_LOG(INFO,
529 				" Port %d: Link Up - speed %u Mbps - %s",
530 				dev->data->port_id,
531 				(unsigned int)link.link_speed,
532 				link.link_duplex == ETH_LINK_FULL_DUPLEX ?
533 				"full-duplex" : "half-duplex");
534 		else
535 			PMD_DRV_LOG(INFO, " Port %d: Link Down",
536 				dev->data->port_id);
537 
538 		PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
539 				pci_dev->addr.domain,
540 				pci_dev->addr.bus,
541 				pci_dev->addr.devid,
542 				pci_dev->addr.function);
543 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
544 	}
545 }
546 
547 /*
548  * Interrupt handler which shall be registered at first.
549  *
550  * @handle
551  *  Pointer to interrupt handle.
552  * @param
553  *  The address of parameter (struct rte_eth_dev *) registered before.
554  */
555 static void
556 eth_igc_interrupt_handler(void *param)
557 {
558 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
559 
560 	eth_igc_interrupt_get_status(dev);
561 	eth_igc_interrupt_action(dev);
562 }
563 
564 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
565 
566 /*
567  * Update the queue status every IGC_ALARM_INTERVAL time.
568  * @param
569  *  The address of parameter (struct rte_eth_dev *) registered before.
570  */
571 static void
572 igc_update_queue_stats_handler(void *param)
573 {
574 	struct rte_eth_dev *dev = param;
575 	igc_read_queue_stats_register(dev);
576 	rte_eal_alarm_set(IGC_ALARM_INTERVAL,
577 			igc_update_queue_stats_handler, dev);
578 }
579 
580 /*
581  * rx,tx enable/disable
582  */
583 static void
584 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
585 {
586 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
587 	uint32_t tctl, rctl;
588 
589 	tctl = IGC_READ_REG(hw, IGC_TCTL);
590 	rctl = IGC_READ_REG(hw, IGC_RCTL);
591 
592 	if (enable) {
593 		/* enable Tx/Rx */
594 		tctl |= IGC_TCTL_EN;
595 		rctl |= IGC_RCTL_EN;
596 	} else {
597 		/* disable Tx/Rx */
598 		tctl &= ~IGC_TCTL_EN;
599 		rctl &= ~IGC_RCTL_EN;
600 	}
601 	IGC_WRITE_REG(hw, IGC_TCTL, tctl);
602 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
603 	IGC_WRITE_FLUSH(hw);
604 }
605 
606 /*
607  *  This routine disables all traffic on the adapter by issuing a
608  *  global reset on the MAC.
609  */
610 static int
611 eth_igc_stop(struct rte_eth_dev *dev)
612 {
613 	struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
614 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
615 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
616 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
617 	struct rte_eth_link link;
618 
619 	dev->data->dev_started = 0;
620 	adapter->stopped = 1;
621 
622 	/* disable receive and transmit */
623 	eth_igc_rxtx_control(dev, false);
624 
625 	/* disable all MSI-X interrupts */
626 	IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
627 	IGC_WRITE_FLUSH(hw);
628 
629 	/* clear all MSI-X interrupts */
630 	IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
631 
632 	igc_intr_other_disable(dev);
633 
634 	rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
635 
636 	/* disable intr eventfd mapping */
637 	rte_intr_disable(intr_handle);
638 
639 	igc_reset_hw(hw);
640 
641 	/* disable all wake up */
642 	IGC_WRITE_REG(hw, IGC_WUC, 0);
643 
644 	/* disable checking EEE operation in MAC loopback mode */
645 	igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
646 
647 	/* Set bit for Go Link disconnect */
648 	igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
649 			IGC_82580_PM_GO_LINKD);
650 
651 	/* Power down the phy. Needed to make the link go Down */
652 	eth_igc_set_link_down(dev);
653 
654 	igc_dev_clear_queues(dev);
655 
656 	/* clear the recorded link status */
657 	memset(&link, 0, sizeof(link));
658 	rte_eth_linkstatus_set(dev, &link);
659 
660 	if (!rte_intr_allow_others(intr_handle))
661 		/* resume to the default handler */
662 		rte_intr_callback_register(intr_handle,
663 					   eth_igc_interrupt_handler,
664 					   (void *)dev);
665 
666 	/* Clean datapath event and queue/vec mapping */
667 	rte_intr_efd_disable(intr_handle);
668 	if (intr_handle->intr_vec != NULL) {
669 		rte_free(intr_handle->intr_vec);
670 		intr_handle->intr_vec = NULL;
671 	}
672 
673 	return 0;
674 }
675 
676 /*
677  * write interrupt vector allocation register
678  * @hw
679  *  board private structure
680  * @queue_index
681  *  queue index, valid 0,1,2,3
682  * @tx
683  *  tx:1, rx:0
684  * @msix_vector
685  *  msix-vector, valid 0,1,2,3,4
686  */
687 static void
688 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
689 		bool tx, uint8_t msix_vector)
690 {
691 	uint8_t offset = 0;
692 	uint8_t reg_index = queue_index >> 1;
693 	uint32_t val;
694 
695 	/*
696 	 * IVAR(0)
697 	 * bit31...24	bit23...16	bit15...8	bit7...0
698 	 * TX1		RX1		TX0		RX0
699 	 *
700 	 * IVAR(1)
701 	 * bit31...24	bit23...16	bit15...8	bit7...0
702 	 * TX3		RX3		TX2		RX2
703 	 */
704 
705 	if (tx)
706 		offset = 8;
707 
708 	if (queue_index & 1)
709 		offset += 16;
710 
711 	val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
712 
713 	/* clear bits */
714 	val &= ~((uint32_t)0xFF << offset);
715 
716 	/* write vector and valid bit */
717 	val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
718 
719 	IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
720 }
721 
722 /* Sets up the hardware to generate MSI-X interrupts properly
723  * @hw
724  *  board private structure
725  */
726 static void
727 igc_configure_msix_intr(struct rte_eth_dev *dev)
728 {
729 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
730 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
731 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
732 
733 	uint32_t intr_mask;
734 	uint32_t vec = IGC_MISC_VEC_ID;
735 	uint32_t base = IGC_MISC_VEC_ID;
736 	uint32_t misc_shift = 0;
737 	int i;
738 
739 	/* won't configure msix register if no mapping is done
740 	 * between intr vector and event fd
741 	 */
742 	if (!rte_intr_dp_is_en(intr_handle))
743 		return;
744 
745 	if (rte_intr_allow_others(intr_handle)) {
746 		base = IGC_RX_VEC_START;
747 		vec = base;
748 		misc_shift = 1;
749 	}
750 
751 	/* turn on MSI-X capability first */
752 	IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
753 				IGC_GPIE_PBA | IGC_GPIE_EIAME |
754 				IGC_GPIE_NSICR);
755 	intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
756 		misc_shift;
757 
758 	if (dev->data->dev_conf.intr_conf.lsc)
759 		intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
760 
761 	/* enable msix auto-clear */
762 	igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
763 
764 	/* set other cause interrupt vector */
765 	igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
766 		(uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
767 
768 	/* enable auto-mask */
769 	igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
770 
771 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
772 		igc_write_ivar(hw, i, 0, vec);
773 		intr_handle->intr_vec[i] = vec;
774 		if (vec < base + intr_handle->nb_efd - 1)
775 			vec++;
776 	}
777 
778 	IGC_WRITE_FLUSH(hw);
779 }
780 
781 /**
782  * It enables the interrupt mask and then enable the interrupt.
783  *
784  * @dev
785  *  Pointer to struct rte_eth_dev.
786  * @on
787  *  Enable or Disable
788  */
789 static void
790 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
791 {
792 	struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
793 
794 	if (on)
795 		intr->mask |= IGC_ICR_LSC;
796 	else
797 		intr->mask &= ~IGC_ICR_LSC;
798 }
799 
800 /*
801  * It enables the interrupt.
802  * It will be called once only during nic initialized.
803  */
804 static void
805 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
806 {
807 	uint32_t mask;
808 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
809 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
810 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
811 	int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
812 
813 	/* won't configure msix register if no mapping is done
814 	 * between intr vector and event fd
815 	 */
816 	if (!rte_intr_dp_is_en(intr_handle))
817 		return;
818 
819 	mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;
820 	IGC_WRITE_REG(hw, IGC_EIMS, mask);
821 }
822 
823 /*
824  *  Get hardware rx-buffer size.
825  */
826 static inline int
827 igc_get_rx_buffer_size(struct igc_hw *hw)
828 {
829 	return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
830 }
831 
832 /*
833  * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
834  * For ASF and Pass Through versions of f/w this means
835  * that the driver is loaded.
836  */
837 static void
838 igc_hw_control_acquire(struct igc_hw *hw)
839 {
840 	uint32_t ctrl_ext;
841 
842 	/* Let firmware know the driver has taken over */
843 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
844 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
845 }
846 
847 /*
848  * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
849  * For ASF and Pass Through versions of f/w this means that the
850  * driver is no longer loaded.
851  */
852 static void
853 igc_hw_control_release(struct igc_hw *hw)
854 {
855 	uint32_t ctrl_ext;
856 
857 	/* Let firmware taken over control of h/w */
858 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
859 	IGC_WRITE_REG(hw, IGC_CTRL_EXT,
860 			ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
861 }
862 
863 static int
864 igc_hardware_init(struct igc_hw *hw)
865 {
866 	uint32_t rx_buf_size;
867 	int diag;
868 
869 	/* Let the firmware know the OS is in control */
870 	igc_hw_control_acquire(hw);
871 
872 	/* Issue a global reset */
873 	igc_reset_hw(hw);
874 
875 	/* disable all wake up */
876 	IGC_WRITE_REG(hw, IGC_WUC, 0);
877 
878 	/*
879 	 * Hardware flow control
880 	 * - High water mark should allow for at least two standard size (1518)
881 	 *   frames to be received after sending an XOFF.
882 	 * - Low water mark works best when it is very near the high water mark.
883 	 *   This allows the receiver to restart by sending XON when it has
884 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
885 	 *   restart after one full frame is pulled from the buffer. There
886 	 *   could be several smaller frames in the buffer and if so they will
887 	 *   not trigger the XON until their total number reduces the buffer
888 	 *   by 1500.
889 	 */
890 	rx_buf_size = igc_get_rx_buffer_size(hw);
891 	hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
892 	hw->fc.low_water = hw->fc.high_water - 1500;
893 	hw->fc.pause_time = IGC_FC_PAUSE_TIME;
894 	hw->fc.send_xon = 1;
895 	hw->fc.requested_mode = igc_fc_full;
896 
897 	diag = igc_init_hw(hw);
898 	if (diag < 0)
899 		return diag;
900 
901 	igc_get_phy_info(hw);
902 	igc_check_for_link(hw);
903 
904 	return 0;
905 }
906 
907 static int
908 eth_igc_start(struct rte_eth_dev *dev)
909 {
910 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
911 	struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
912 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
913 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
914 	uint32_t *speeds;
915 	int ret;
916 
917 	PMD_INIT_FUNC_TRACE();
918 
919 	/* disable all MSI-X interrupts */
920 	IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
921 	IGC_WRITE_FLUSH(hw);
922 
923 	/* clear all MSI-X interrupts */
924 	IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
925 
926 	/* disable uio/vfio intr/eventfd mapping */
927 	if (!adapter->stopped)
928 		rte_intr_disable(intr_handle);
929 
930 	/* Power up the phy. Needed to make the link go Up */
931 	eth_igc_set_link_up(dev);
932 
933 	/* Put the address into the Receive Address Array */
934 	igc_rar_set(hw, hw->mac.addr, 0);
935 
936 	/* Initialize the hardware */
937 	if (igc_hardware_init(hw)) {
938 		PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
939 		return -EIO;
940 	}
941 	adapter->stopped = 0;
942 
943 	/* check and configure queue intr-vector mapping */
944 	if (rte_intr_cap_multiple(intr_handle) &&
945 		dev->data->dev_conf.intr_conf.rxq) {
946 		uint32_t intr_vector = dev->data->nb_rx_queues;
947 		if (rte_intr_efd_enable(intr_handle, intr_vector))
948 			return -1;
949 	}
950 
951 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
952 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
953 			dev->data->nb_rx_queues * sizeof(int), 0);
954 		if (intr_handle->intr_vec == NULL) {
955 			PMD_DRV_LOG(ERR,
956 				"Failed to allocate %d rx_queues intr_vec",
957 				dev->data->nb_rx_queues);
958 			return -ENOMEM;
959 		}
960 	}
961 
962 	/* configure msix for rx interrupt */
963 	igc_configure_msix_intr(dev);
964 
965 	igc_tx_init(dev);
966 
967 	/* This can fail when allocating mbufs for descriptor rings */
968 	ret = igc_rx_init(dev);
969 	if (ret) {
970 		PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
971 		igc_dev_clear_queues(dev);
972 		return ret;
973 	}
974 
975 	igc_clear_hw_cntrs_base_generic(hw);
976 
977 	/* VLAN Offload Settings */
978 	eth_igc_vlan_offload_set(dev,
979 		ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
980 		ETH_VLAN_EXTEND_MASK);
981 
982 	/* Setup link speed and duplex */
983 	speeds = &dev->data->dev_conf.link_speeds;
984 	if (*speeds == ETH_LINK_SPEED_AUTONEG) {
985 		hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
986 		hw->mac.autoneg = 1;
987 	} else {
988 		int num_speeds = 0;
989 		bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
990 
991 		/* Reset */
992 		hw->phy.autoneg_advertised = 0;
993 
994 		if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
995 				ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
996 				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
997 				ETH_LINK_SPEED_FIXED)) {
998 			num_speeds = -1;
999 			goto error_invalid_config;
1000 		}
1001 		if (*speeds & ETH_LINK_SPEED_10M_HD) {
1002 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1003 			num_speeds++;
1004 		}
1005 		if (*speeds & ETH_LINK_SPEED_10M) {
1006 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1007 			num_speeds++;
1008 		}
1009 		if (*speeds & ETH_LINK_SPEED_100M_HD) {
1010 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1011 			num_speeds++;
1012 		}
1013 		if (*speeds & ETH_LINK_SPEED_100M) {
1014 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1015 			num_speeds++;
1016 		}
1017 		if (*speeds & ETH_LINK_SPEED_1G) {
1018 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1019 			num_speeds++;
1020 		}
1021 		if (*speeds & ETH_LINK_SPEED_2_5G) {
1022 			hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
1023 			num_speeds++;
1024 		}
1025 		if (num_speeds == 0 || (!autoneg && num_speeds > 1))
1026 			goto error_invalid_config;
1027 
1028 		/* Set/reset the mac.autoneg based on the link speed,
1029 		 * fixed or not
1030 		 */
1031 		if (!autoneg) {
1032 			hw->mac.autoneg = 0;
1033 			hw->mac.forced_speed_duplex =
1034 					hw->phy.autoneg_advertised;
1035 		} else {
1036 			hw->mac.autoneg = 1;
1037 		}
1038 	}
1039 
1040 	igc_setup_link(hw);
1041 
1042 	if (rte_intr_allow_others(intr_handle)) {
1043 		/* check if lsc interrupt is enabled */
1044 		if (dev->data->dev_conf.intr_conf.lsc)
1045 			igc_lsc_interrupt_setup(dev, 1);
1046 		else
1047 			igc_lsc_interrupt_setup(dev, 0);
1048 	} else {
1049 		rte_intr_callback_unregister(intr_handle,
1050 					     eth_igc_interrupt_handler,
1051 					     (void *)dev);
1052 		if (dev->data->dev_conf.intr_conf.lsc)
1053 			PMD_DRV_LOG(INFO,
1054 				"LSC won't enable because of no intr multiplex");
1055 	}
1056 
1057 	/* enable uio/vfio intr/eventfd mapping */
1058 	rte_intr_enable(intr_handle);
1059 
1060 	rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1061 			igc_update_queue_stats_handler, dev);
1062 
1063 	/* check if rxq interrupt is enabled */
1064 	if (dev->data->dev_conf.intr_conf.rxq &&
1065 			rte_intr_dp_is_en(intr_handle))
1066 		igc_rxq_interrupt_setup(dev);
1067 
1068 	/* resume enabled intr since hw reset */
1069 	igc_intr_other_enable(dev);
1070 
1071 	eth_igc_rxtx_control(dev, true);
1072 	eth_igc_link_update(dev, 0);
1073 
1074 	/* configure MAC-loopback mode */
1075 	if (dev->data->dev_conf.lpbk_mode == 1) {
1076 		uint32_t reg_val;
1077 
1078 		reg_val = IGC_READ_REG(hw, IGC_CTRL);
1079 		reg_val &= ~IGC_CTRL_SPEED_MASK;
1080 		reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
1081 			IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
1082 		IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
1083 
1084 		igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
1085 	}
1086 
1087 	return 0;
1088 
1089 error_invalid_config:
1090 	PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1091 		     dev->data->dev_conf.link_speeds, dev->data->port_id);
1092 	igc_dev_clear_queues(dev);
1093 	return -EINVAL;
1094 }
1095 
1096 static int
1097 igc_reset_swfw_lock(struct igc_hw *hw)
1098 {
1099 	int ret_val;
1100 
1101 	/*
1102 	 * Do mac ops initialization manually here, since we will need
1103 	 * some function pointers set by this call.
1104 	 */
1105 	ret_val = igc_init_mac_params(hw);
1106 	if (ret_val)
1107 		return ret_val;
1108 
1109 	/*
1110 	 * SMBI lock should not fail in this early stage. If this is the case,
1111 	 * it is due to an improper exit of the application.
1112 	 * So force the release of the faulty lock.
1113 	 */
1114 	if (igc_get_hw_semaphore_generic(hw) < 0)
1115 		PMD_DRV_LOG(DEBUG, "SMBI lock released");
1116 
1117 	igc_put_hw_semaphore_generic(hw);
1118 
1119 	if (hw->mac.ops.acquire_swfw_sync != NULL) {
1120 		uint16_t mask;
1121 
1122 		/*
1123 		 * Phy lock should not fail in this early stage.
1124 		 * If this is the case, it is due to an improper exit of the
1125 		 * application. So force the release of the faulty lock.
1126 		 */
1127 		mask = IGC_SWFW_PHY0_SM;
1128 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1129 			PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1130 				    hw->bus.func);
1131 		}
1132 		hw->mac.ops.release_swfw_sync(hw, mask);
1133 
1134 		/*
1135 		 * This one is more tricky since it is common to all ports; but
1136 		 * swfw_sync retries last long enough (1s) to be almost sure
1137 		 * that if lock can not be taken it is due to an improper lock
1138 		 * of the semaphore.
1139 		 */
1140 		mask = IGC_SWFW_EEP_SM;
1141 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1142 			PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1143 
1144 		hw->mac.ops.release_swfw_sync(hw, mask);
1145 	}
1146 
1147 	return IGC_SUCCESS;
1148 }
1149 
1150 /*
1151  * free all rx/tx queues.
1152  */
1153 static void
1154 igc_dev_free_queues(struct rte_eth_dev *dev)
1155 {
1156 	uint16_t i;
1157 
1158 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1159 		eth_igc_rx_queue_release(dev->data->rx_queues[i]);
1160 		dev->data->rx_queues[i] = NULL;
1161 	}
1162 	dev->data->nb_rx_queues = 0;
1163 
1164 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1165 		eth_igc_tx_queue_release(dev->data->tx_queues[i]);
1166 		dev->data->tx_queues[i] = NULL;
1167 	}
1168 	dev->data->nb_tx_queues = 0;
1169 }
1170 
1171 static int
1172 eth_igc_close(struct rte_eth_dev *dev)
1173 {
1174 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1175 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1176 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1177 	struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1178 	int retry = 0;
1179 	int ret = 0;
1180 
1181 	PMD_INIT_FUNC_TRACE();
1182 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1183 		return 0;
1184 
1185 	if (!adapter->stopped)
1186 		ret = eth_igc_stop(dev);
1187 
1188 	igc_flow_flush(dev, NULL);
1189 	igc_clear_all_filter(dev);
1190 
1191 	igc_intr_other_disable(dev);
1192 	do {
1193 		int ret = rte_intr_callback_unregister(intr_handle,
1194 				eth_igc_interrupt_handler, dev);
1195 		if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1196 			break;
1197 
1198 		PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1199 		DELAY(200 * 1000); /* delay 200ms */
1200 	} while (retry++ < 5);
1201 
1202 	igc_phy_hw_reset(hw);
1203 	igc_hw_control_release(hw);
1204 	igc_dev_free_queues(dev);
1205 
1206 	/* Reset any pending lock */
1207 	igc_reset_swfw_lock(hw);
1208 
1209 	return ret;
1210 }
1211 
1212 static void
1213 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1214 {
1215 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1216 
1217 	hw->vendor_id = pci_dev->id.vendor_id;
1218 	hw->device_id = pci_dev->id.device_id;
1219 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1220 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1221 }
1222 
1223 static int
1224 eth_igc_dev_init(struct rte_eth_dev *dev)
1225 {
1226 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1227 	struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1228 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1229 	int i, error = 0;
1230 
1231 	PMD_INIT_FUNC_TRACE();
1232 	dev->dev_ops = &eth_igc_ops;
1233 	dev->rx_descriptor_done	= eth_igc_rx_descriptor_done;
1234 	dev->rx_queue_count = eth_igc_rx_queue_count;
1235 	dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
1236 	dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
1237 
1238 	/*
1239 	 * for secondary processes, we don't initialize any further as primary
1240 	 * has already done this work. Only check we don't need a different
1241 	 * RX function.
1242 	 */
1243 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1244 		return 0;
1245 
1246 	rte_eth_copy_pci_info(dev, pci_dev);
1247 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1248 
1249 	hw->back = pci_dev;
1250 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1251 
1252 	igc_identify_hardware(dev, pci_dev);
1253 	if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1254 		error = -EIO;
1255 		goto err_late;
1256 	}
1257 
1258 	igc_get_bus_info(hw);
1259 
1260 	/* Reset any pending lock */
1261 	if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1262 		error = -EIO;
1263 		goto err_late;
1264 	}
1265 
1266 	/* Finish initialization */
1267 	if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1268 		error = -EIO;
1269 		goto err_late;
1270 	}
1271 
1272 	hw->mac.autoneg = 1;
1273 	hw->phy.autoneg_wait_to_complete = 0;
1274 	hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1275 
1276 	/* Copper options */
1277 	if (hw->phy.media_type == igc_media_type_copper) {
1278 		hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1279 		hw->phy.disable_polarity_correction = 0;
1280 		hw->phy.ms_type = igc_ms_hw_default;
1281 	}
1282 
1283 	/*
1284 	 * Start from a known state, this is important in reading the nvm
1285 	 * and mac from that.
1286 	 */
1287 	igc_reset_hw(hw);
1288 
1289 	/* Make sure we have a good EEPROM before we read from it */
1290 	if (igc_validate_nvm_checksum(hw) < 0) {
1291 		/*
1292 		 * Some PCI-E parts fail the first check due to
1293 		 * the link being in sleep state, call it again,
1294 		 * if it fails a second time its a real issue.
1295 		 */
1296 		if (igc_validate_nvm_checksum(hw) < 0) {
1297 			PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1298 			error = -EIO;
1299 			goto err_late;
1300 		}
1301 	}
1302 
1303 	/* Read the permanent MAC address out of the EEPROM */
1304 	if (igc_read_mac_addr(hw) != 0) {
1305 		PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1306 		error = -EIO;
1307 		goto err_late;
1308 	}
1309 
1310 	/* Allocate memory for storing MAC addresses */
1311 	dev->data->mac_addrs = rte_zmalloc("igc",
1312 		RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1313 	if (dev->data->mac_addrs == NULL) {
1314 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1315 				RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1316 		error = -ENOMEM;
1317 		goto err_late;
1318 	}
1319 
1320 	/* Copy the permanent MAC address */
1321 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1322 			&dev->data->mac_addrs[0]);
1323 
1324 	/* Now initialize the hardware */
1325 	if (igc_hardware_init(hw) != 0) {
1326 		PMD_INIT_LOG(ERR, "Hardware initialization failed");
1327 		rte_free(dev->data->mac_addrs);
1328 		dev->data->mac_addrs = NULL;
1329 		error = -ENODEV;
1330 		goto err_late;
1331 	}
1332 
1333 	hw->mac.get_link_status = 1;
1334 	igc->stopped = 0;
1335 
1336 	/* Indicate SOL/IDER usage */
1337 	if (igc_check_reset_block(hw) < 0)
1338 		PMD_INIT_LOG(ERR,
1339 			"PHY reset is blocked due to SOL/IDER session.");
1340 
1341 	PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1342 			dev->data->port_id, pci_dev->id.vendor_id,
1343 			pci_dev->id.device_id);
1344 
1345 	rte_intr_callback_register(&pci_dev->intr_handle,
1346 			eth_igc_interrupt_handler, (void *)dev);
1347 
1348 	/* enable uio/vfio intr/eventfd mapping */
1349 	rte_intr_enable(&pci_dev->intr_handle);
1350 
1351 	/* enable support intr */
1352 	igc_intr_other_enable(dev);
1353 
1354 	/* initiate queue status */
1355 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1356 		igc->txq_stats_map[i] = -1;
1357 		igc->rxq_stats_map[i] = -1;
1358 	}
1359 
1360 	igc_flow_init(dev);
1361 	igc_clear_all_filter(dev);
1362 	return 0;
1363 
1364 err_late:
1365 	igc_hw_control_release(hw);
1366 	return error;
1367 }
1368 
1369 static int
1370 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1371 {
1372 	PMD_INIT_FUNC_TRACE();
1373 	eth_igc_close(eth_dev);
1374 	return 0;
1375 }
1376 
1377 static int
1378 eth_igc_reset(struct rte_eth_dev *dev)
1379 {
1380 	int ret;
1381 
1382 	PMD_INIT_FUNC_TRACE();
1383 
1384 	ret = eth_igc_dev_uninit(dev);
1385 	if (ret)
1386 		return ret;
1387 
1388 	return eth_igc_dev_init(dev);
1389 }
1390 
1391 static int
1392 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1393 {
1394 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1395 	uint32_t rctl;
1396 
1397 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1398 	rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1399 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1400 	return 0;
1401 }
1402 
1403 static int
1404 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1405 {
1406 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1407 	uint32_t rctl;
1408 
1409 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1410 	rctl &= (~IGC_RCTL_UPE);
1411 	if (dev->data->all_multicast == 1)
1412 		rctl |= IGC_RCTL_MPE;
1413 	else
1414 		rctl &= (~IGC_RCTL_MPE);
1415 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1416 	return 0;
1417 }
1418 
1419 static int
1420 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1421 {
1422 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1423 	uint32_t rctl;
1424 
1425 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1426 	rctl |= IGC_RCTL_MPE;
1427 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1428 	return 0;
1429 }
1430 
1431 static int
1432 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1433 {
1434 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1435 	uint32_t rctl;
1436 
1437 	if (dev->data->promiscuous == 1)
1438 		return 0;	/* must remain in all_multicast mode */
1439 
1440 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1441 	rctl &= (~IGC_RCTL_MPE);
1442 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1443 	return 0;
1444 }
1445 
1446 static int
1447 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1448 		       size_t fw_size)
1449 {
1450 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1451 	struct igc_fw_version fw;
1452 	int ret;
1453 
1454 	igc_get_fw_version(hw, &fw);
1455 
1456 	/* if option rom is valid, display its version too */
1457 	if (fw.or_valid) {
1458 		ret = snprintf(fw_version, fw_size,
1459 			 "%d.%d, 0x%08x, %d.%d.%d",
1460 			 fw.eep_major, fw.eep_minor, fw.etrack_id,
1461 			 fw.or_major, fw.or_build, fw.or_patch);
1462 	/* no option rom */
1463 	} else {
1464 		if (fw.etrack_id != 0X0000) {
1465 			ret = snprintf(fw_version, fw_size,
1466 				 "%d.%d, 0x%08x",
1467 				 fw.eep_major, fw.eep_minor,
1468 				 fw.etrack_id);
1469 		} else {
1470 			ret = snprintf(fw_version, fw_size,
1471 				 "%d.%d.%d",
1472 				 fw.eep_major, fw.eep_minor,
1473 				 fw.eep_build);
1474 		}
1475 	}
1476 
1477 	ret += 1; /* add the size of '\0' */
1478 	if (fw_size < (u32)ret)
1479 		return ret;
1480 	else
1481 		return 0;
1482 }
1483 
1484 static int
1485 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1486 {
1487 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1488 
1489 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1490 	dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1491 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1492 	dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1493 	dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1494 	dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1495 
1496 	dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1497 	dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1498 	dev_info->max_vmdq_pools = 0;
1499 
1500 	dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1501 	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1502 	dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1503 
1504 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1505 		.rx_thresh = {
1506 			.pthresh = IGC_DEFAULT_RX_PTHRESH,
1507 			.hthresh = IGC_DEFAULT_RX_HTHRESH,
1508 			.wthresh = IGC_DEFAULT_RX_WTHRESH,
1509 		},
1510 		.rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1511 		.rx_drop_en = 0,
1512 		.offloads = 0,
1513 	};
1514 
1515 	dev_info->default_txconf = (struct rte_eth_txconf) {
1516 		.tx_thresh = {
1517 			.pthresh = IGC_DEFAULT_TX_PTHRESH,
1518 			.hthresh = IGC_DEFAULT_TX_HTHRESH,
1519 			.wthresh = IGC_DEFAULT_TX_WTHRESH,
1520 		},
1521 		.offloads = 0,
1522 	};
1523 
1524 	dev_info->rx_desc_lim = rx_desc_lim;
1525 	dev_info->tx_desc_lim = tx_desc_lim;
1526 
1527 	dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1528 			ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1529 			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1530 
1531 	dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1532 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1533 	return 0;
1534 }
1535 
1536 static int
1537 eth_igc_led_on(struct rte_eth_dev *dev)
1538 {
1539 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1540 
1541 	return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1542 }
1543 
1544 static int
1545 eth_igc_led_off(struct rte_eth_dev *dev)
1546 {
1547 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1548 
1549 	return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1550 }
1551 
1552 static const uint32_t *
1553 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1554 {
1555 	static const uint32_t ptypes[] = {
1556 		/* refers to rx_desc_pkt_info_to_pkt_type() */
1557 		RTE_PTYPE_L2_ETHER,
1558 		RTE_PTYPE_L3_IPV4,
1559 		RTE_PTYPE_L3_IPV4_EXT,
1560 		RTE_PTYPE_L3_IPV6,
1561 		RTE_PTYPE_L3_IPV6_EXT,
1562 		RTE_PTYPE_L4_TCP,
1563 		RTE_PTYPE_L4_UDP,
1564 		RTE_PTYPE_L4_SCTP,
1565 		RTE_PTYPE_TUNNEL_IP,
1566 		RTE_PTYPE_INNER_L3_IPV6,
1567 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1568 		RTE_PTYPE_INNER_L4_TCP,
1569 		RTE_PTYPE_INNER_L4_UDP,
1570 		RTE_PTYPE_UNKNOWN
1571 	};
1572 
1573 	return ptypes;
1574 }
1575 
1576 static int
1577 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1578 {
1579 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1580 	uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1581 	uint32_t rctl;
1582 
1583 	/* if extend vlan has been enabled */
1584 	if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1585 		frame_size += VLAN_TAG_SIZE;
1586 
1587 	/* check that mtu is within the allowed range */
1588 	if (mtu < RTE_ETHER_MIN_MTU ||
1589 		frame_size > MAX_RX_JUMBO_FRAME_SIZE)
1590 		return -EINVAL;
1591 
1592 	/*
1593 	 * refuse mtu that requires the support of scattered packets when
1594 	 * this feature has not been enabled before.
1595 	 */
1596 	if (!dev->data->scattered_rx &&
1597 	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1598 		return -EINVAL;
1599 
1600 	rctl = IGC_READ_REG(hw, IGC_RCTL);
1601 
1602 	/* switch to jumbo mode if needed */
1603 	if (mtu > RTE_ETHER_MTU) {
1604 		dev->data->dev_conf.rxmode.offloads |=
1605 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1606 		rctl |= IGC_RCTL_LPE;
1607 	} else {
1608 		dev->data->dev_conf.rxmode.offloads &=
1609 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1610 		rctl &= ~IGC_RCTL_LPE;
1611 	}
1612 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1613 
1614 	/* update max frame size */
1615 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1616 
1617 	IGC_WRITE_REG(hw, IGC_RLPML,
1618 			dev->data->dev_conf.rxmode.max_rx_pkt_len);
1619 
1620 	return 0;
1621 }
1622 
1623 static int
1624 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1625 		uint32_t index, uint32_t pool)
1626 {
1627 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1628 
1629 	igc_rar_set(hw, mac_addr->addr_bytes, index);
1630 	RTE_SET_USED(pool);
1631 	return 0;
1632 }
1633 
1634 static void
1635 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1636 {
1637 	uint8_t addr[RTE_ETHER_ADDR_LEN];
1638 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1639 
1640 	memset(addr, 0, sizeof(addr));
1641 	igc_rar_set(hw, addr, index);
1642 }
1643 
1644 static int
1645 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1646 			struct rte_ether_addr *addr)
1647 {
1648 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1649 	igc_rar_set(hw, addr->addr_bytes, 0);
1650 	return 0;
1651 }
1652 
1653 static int
1654 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1655 			 struct rte_ether_addr *mc_addr_set,
1656 			 uint32_t nb_mc_addr)
1657 {
1658 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1659 	igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1660 	return 0;
1661 }
1662 
1663 /*
1664  * Read hardware registers
1665  */
1666 static void
1667 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1668 {
1669 	int pause_frames;
1670 
1671 	uint64_t old_gprc  = stats->gprc;
1672 	uint64_t old_gptc  = stats->gptc;
1673 	uint64_t old_tpr   = stats->tpr;
1674 	uint64_t old_tpt   = stats->tpt;
1675 	uint64_t old_rpthc = stats->rpthc;
1676 	uint64_t old_hgptc = stats->hgptc;
1677 
1678 	stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1679 	stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1680 	stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1681 	stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1682 	stats->scc += IGC_READ_REG(hw, IGC_SCC);
1683 	stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1684 
1685 	stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1686 	stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1687 	stats->colc += IGC_READ_REG(hw, IGC_COLC);
1688 
1689 	stats->dc += IGC_READ_REG(hw, IGC_DC);
1690 	stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1691 	stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1692 	stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1693 	stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1694 	stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1695 
1696 	/*
1697 	 * For watchdog management we need to know if we have been
1698 	 * paused during the last interval, so capture that here.
1699 	 */
1700 	pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1701 	stats->xoffrxc += pause_frames;
1702 	stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1703 	stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1704 	stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1705 	stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1706 	stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1707 	stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1708 	stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1709 	stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1710 	stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1711 	stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1712 	stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1713 	stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1714 
1715 	/* For the 64-bit byte counters the low dword must be read first. */
1716 	/* Both registers clear on the read of the high dword */
1717 
1718 	/* Workaround CRC bytes included in size, take away 4 bytes/packet */
1719 	stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1720 	stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1721 	stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1722 	stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1723 	stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1724 	stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1725 
1726 	stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1727 	stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1728 	stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1729 	stats->roc += IGC_READ_REG(hw, IGC_ROC);
1730 	stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1731 
1732 	stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1733 	stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1734 	stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1735 	stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1736 	stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1737 	stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1738 	stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1739 
1740 	stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1741 	stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1742 
1743 	stats->tor += IGC_READ_REG(hw, IGC_TORL);
1744 	stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1745 	stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1746 	stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1747 	stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1748 	stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1749 
1750 	stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1751 	stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1752 	stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1753 	stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1754 	stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1755 	stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1756 	stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1757 	stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1758 	stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1759 
1760 	stats->iac += IGC_READ_REG(hw, IGC_IAC);
1761 	stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1762 	stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1763 	stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1764 
1765 	/* Host to Card Statistics */
1766 	stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1767 	stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1768 	stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1769 	stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1770 	stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1771 	stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1772 	stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1773 }
1774 
1775 /*
1776  * Write 0 to all queue status registers
1777  */
1778 static void
1779 igc_reset_queue_stats_register(struct igc_hw *hw)
1780 {
1781 	int i;
1782 
1783 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1784 		IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1785 		IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1786 		IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1787 		IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1788 		IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1789 		IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1790 		IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1791 	}
1792 }
1793 
1794 /*
1795  * Read all hardware queue status registers
1796  */
1797 static void
1798 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1799 {
1800 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1801 	struct igc_hw_queue_stats *queue_stats =
1802 				IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1803 	int i;
1804 
1805 	/*
1806 	 * This register is not cleared on read. Furthermore, the register wraps
1807 	 * around back to 0x00000000 on the next increment when reaching a value
1808 	 * of 0xFFFFFFFF and then continues normal count operation.
1809 	 */
1810 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1811 		union {
1812 			u64 ddword;
1813 			u32 dword[2];
1814 		} value;
1815 		u32 tmp;
1816 
1817 		/*
1818 		 * Read the register first, if the value is smaller than that
1819 		 * previous read, that mean the register has been overflowed,
1820 		 * then we add the high 4 bytes by 1 and replace the low 4
1821 		 * bytes by the new value.
1822 		 */
1823 		tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1824 		value.ddword = queue_stats->pqgprc[i];
1825 		if (value.dword[U32_0_IN_U64] > tmp)
1826 			value.dword[U32_1_IN_U64]++;
1827 		value.dword[U32_0_IN_U64] = tmp;
1828 		queue_stats->pqgprc[i] = value.ddword;
1829 
1830 		tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1831 		value.ddword = queue_stats->pqgptc[i];
1832 		if (value.dword[U32_0_IN_U64] > tmp)
1833 			value.dword[U32_1_IN_U64]++;
1834 		value.dword[U32_0_IN_U64] = tmp;
1835 		queue_stats->pqgptc[i] = value.ddword;
1836 
1837 		tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1838 		value.ddword = queue_stats->pqgorc[i];
1839 		if (value.dword[U32_0_IN_U64] > tmp)
1840 			value.dword[U32_1_IN_U64]++;
1841 		value.dword[U32_0_IN_U64] = tmp;
1842 		queue_stats->pqgorc[i] = value.ddword;
1843 
1844 		tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1845 		value.ddword = queue_stats->pqgotc[i];
1846 		if (value.dword[U32_0_IN_U64] > tmp)
1847 			value.dword[U32_1_IN_U64]++;
1848 		value.dword[U32_0_IN_U64] = tmp;
1849 		queue_stats->pqgotc[i] = value.ddword;
1850 
1851 		tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1852 		value.ddword = queue_stats->pqmprc[i];
1853 		if (value.dword[U32_0_IN_U64] > tmp)
1854 			value.dword[U32_1_IN_U64]++;
1855 		value.dword[U32_0_IN_U64] = tmp;
1856 		queue_stats->pqmprc[i] = value.ddword;
1857 
1858 		tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1859 		value.ddword = queue_stats->rqdpc[i];
1860 		if (value.dword[U32_0_IN_U64] > tmp)
1861 			value.dword[U32_1_IN_U64]++;
1862 		value.dword[U32_0_IN_U64] = tmp;
1863 		queue_stats->rqdpc[i] = value.ddword;
1864 
1865 		tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1866 		value.ddword = queue_stats->tqdpc[i];
1867 		if (value.dword[U32_0_IN_U64] > tmp)
1868 			value.dword[U32_1_IN_U64]++;
1869 		value.dword[U32_0_IN_U64] = tmp;
1870 		queue_stats->tqdpc[i] = value.ddword;
1871 	}
1872 }
1873 
1874 static int
1875 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1876 {
1877 	struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1878 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1879 	struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1880 	struct igc_hw_queue_stats *queue_stats =
1881 			IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1882 	int i;
1883 
1884 	/*
1885 	 * Cancel status handler since it will read the queue status registers
1886 	 */
1887 	rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1888 
1889 	/* Read status register */
1890 	igc_read_queue_stats_register(dev);
1891 	igc_read_stats_registers(hw, stats);
1892 
1893 	if (rte_stats == NULL) {
1894 		/* Restart queue status handler */
1895 		rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1896 				igc_update_queue_stats_handler, dev);
1897 		return -EINVAL;
1898 	}
1899 
1900 	/* Rx Errors */
1901 	rte_stats->imissed = stats->mpc;
1902 	rte_stats->ierrors = stats->crcerrs +
1903 			stats->rlec + stats->ruc + stats->roc +
1904 			stats->rxerrc + stats->algnerrc;
1905 
1906 	/* Tx Errors */
1907 	rte_stats->oerrors = stats->ecol + stats->latecol;
1908 
1909 	rte_stats->ipackets = stats->gprc;
1910 	rte_stats->opackets = stats->gptc;
1911 	rte_stats->ibytes   = stats->gorc;
1912 	rte_stats->obytes   = stats->gotc;
1913 
1914 	/* Get per-queue statuses */
1915 	for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1916 		/* GET TX queue statuses */
1917 		int map_id = igc->txq_stats_map[i];
1918 		if (map_id >= 0) {
1919 			rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1920 			rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1921 		}
1922 		/* Get RX queue statuses */
1923 		map_id = igc->rxq_stats_map[i];
1924 		if (map_id >= 0) {
1925 			rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1926 			rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1927 			rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1928 		}
1929 	}
1930 
1931 	/* Restart queue status handler */
1932 	rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1933 			igc_update_queue_stats_handler, dev);
1934 	return 0;
1935 }
1936 
1937 static int
1938 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1939 		   unsigned int n)
1940 {
1941 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1942 	struct igc_hw_stats *hw_stats =
1943 			IGC_DEV_PRIVATE_STATS(dev);
1944 	unsigned int i;
1945 
1946 	igc_read_stats_registers(hw, hw_stats);
1947 
1948 	if (n < IGC_NB_XSTATS)
1949 		return IGC_NB_XSTATS;
1950 
1951 	/* If this is a reset xstats is NULL, and we have cleared the
1952 	 * registers by reading them.
1953 	 */
1954 	if (!xstats)
1955 		return 0;
1956 
1957 	/* Extended stats */
1958 	for (i = 0; i < IGC_NB_XSTATS; i++) {
1959 		xstats[i].id = i;
1960 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1961 			rte_igc_stats_strings[i].offset);
1962 	}
1963 
1964 	return IGC_NB_XSTATS;
1965 }
1966 
1967 static int
1968 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1969 {
1970 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1971 	struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1972 	struct igc_hw_queue_stats *queue_stats =
1973 			IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1974 
1975 	/* Cancel queue status handler for avoid conflict */
1976 	rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1977 
1978 	/* HW registers are cleared on read */
1979 	igc_reset_queue_stats_register(hw);
1980 	igc_read_stats_registers(hw, hw_stats);
1981 
1982 	/* Reset software totals */
1983 	memset(hw_stats, 0, sizeof(*hw_stats));
1984 	memset(queue_stats, 0, sizeof(*queue_stats));
1985 
1986 	/* Restart the queue status handler */
1987 	rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1988 			dev);
1989 
1990 	return 0;
1991 }
1992 
1993 static int
1994 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1995 	struct rte_eth_xstat_name *xstats_names, unsigned int size)
1996 {
1997 	unsigned int i;
1998 
1999 	if (xstats_names == NULL)
2000 		return IGC_NB_XSTATS;
2001 
2002 	if (size < IGC_NB_XSTATS) {
2003 		PMD_DRV_LOG(ERR, "not enough buffers!");
2004 		return IGC_NB_XSTATS;
2005 	}
2006 
2007 	for (i = 0; i < IGC_NB_XSTATS; i++)
2008 		strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
2009 			sizeof(xstats_names[i].name));
2010 
2011 	return IGC_NB_XSTATS;
2012 }
2013 
2014 static int
2015 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
2016 		struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
2017 		unsigned int limit)
2018 {
2019 	unsigned int i;
2020 
2021 	if (!ids)
2022 		return eth_igc_xstats_get_names(dev, xstats_names, limit);
2023 
2024 	for (i = 0; i < limit; i++) {
2025 		if (ids[i] >= IGC_NB_XSTATS) {
2026 			PMD_DRV_LOG(ERR, "id value isn't valid");
2027 			return -EINVAL;
2028 		}
2029 		strlcpy(xstats_names[i].name,
2030 			rte_igc_stats_strings[ids[i]].name,
2031 			sizeof(xstats_names[i].name));
2032 	}
2033 	return limit;
2034 }
2035 
2036 static int
2037 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2038 		uint64_t *values, unsigned int n)
2039 {
2040 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2041 	struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2042 	unsigned int i;
2043 
2044 	igc_read_stats_registers(hw, hw_stats);
2045 
2046 	if (!ids) {
2047 		if (n < IGC_NB_XSTATS)
2048 			return IGC_NB_XSTATS;
2049 
2050 		/* If this is a reset xstats is NULL, and we have cleared the
2051 		 * registers by reading them.
2052 		 */
2053 		if (!values)
2054 			return 0;
2055 
2056 		/* Extended stats */
2057 		for (i = 0; i < IGC_NB_XSTATS; i++)
2058 			values[i] = *(uint64_t *)(((char *)hw_stats) +
2059 					rte_igc_stats_strings[i].offset);
2060 
2061 		return IGC_NB_XSTATS;
2062 
2063 	} else {
2064 		for (i = 0; i < n; i++) {
2065 			if (ids[i] >= IGC_NB_XSTATS) {
2066 				PMD_DRV_LOG(ERR, "id value isn't valid");
2067 				return -EINVAL;
2068 			}
2069 			values[i] = *(uint64_t *)(((char *)hw_stats) +
2070 					rte_igc_stats_strings[ids[i]].offset);
2071 		}
2072 		return n;
2073 	}
2074 }
2075 
2076 static int
2077 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2078 		uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2079 {
2080 	struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2081 
2082 	/* check queue id is valid */
2083 	if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2084 		PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2085 			queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2086 		return -EINVAL;
2087 	}
2088 
2089 	/* store the mapping status id */
2090 	if (is_rx)
2091 		igc->rxq_stats_map[queue_id] = stat_idx;
2092 	else
2093 		igc->txq_stats_map[queue_id] = stat_idx;
2094 
2095 	return 0;
2096 }
2097 
2098 static int
2099 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2100 {
2101 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2102 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2103 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2104 	uint32_t vec = IGC_MISC_VEC_ID;
2105 
2106 	if (rte_intr_allow_others(intr_handle))
2107 		vec = IGC_RX_VEC_START;
2108 
2109 	uint32_t mask = 1u << (queue_id + vec);
2110 
2111 	IGC_WRITE_REG(hw, IGC_EIMC, mask);
2112 	IGC_WRITE_FLUSH(hw);
2113 
2114 	return 0;
2115 }
2116 
2117 static int
2118 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2119 {
2120 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2121 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2122 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2123 	uint32_t vec = IGC_MISC_VEC_ID;
2124 
2125 	if (rte_intr_allow_others(intr_handle))
2126 		vec = IGC_RX_VEC_START;
2127 
2128 	uint32_t mask = 1u << (queue_id + vec);
2129 
2130 	IGC_WRITE_REG(hw, IGC_EIMS, mask);
2131 	IGC_WRITE_FLUSH(hw);
2132 
2133 	rte_intr_enable(intr_handle);
2134 
2135 	return 0;
2136 }
2137 
2138 static int
2139 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2140 {
2141 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2142 	uint32_t ctrl;
2143 	int tx_pause;
2144 	int rx_pause;
2145 
2146 	fc_conf->pause_time = hw->fc.pause_time;
2147 	fc_conf->high_water = hw->fc.high_water;
2148 	fc_conf->low_water = hw->fc.low_water;
2149 	fc_conf->send_xon = hw->fc.send_xon;
2150 	fc_conf->autoneg = hw->mac.autoneg;
2151 
2152 	/*
2153 	 * Return rx_pause and tx_pause status according to actual setting of
2154 	 * the TFCE and RFCE bits in the CTRL register.
2155 	 */
2156 	ctrl = IGC_READ_REG(hw, IGC_CTRL);
2157 	if (ctrl & IGC_CTRL_TFCE)
2158 		tx_pause = 1;
2159 	else
2160 		tx_pause = 0;
2161 
2162 	if (ctrl & IGC_CTRL_RFCE)
2163 		rx_pause = 1;
2164 	else
2165 		rx_pause = 0;
2166 
2167 	if (rx_pause && tx_pause)
2168 		fc_conf->mode = RTE_FC_FULL;
2169 	else if (rx_pause)
2170 		fc_conf->mode = RTE_FC_RX_PAUSE;
2171 	else if (tx_pause)
2172 		fc_conf->mode = RTE_FC_TX_PAUSE;
2173 	else
2174 		fc_conf->mode = RTE_FC_NONE;
2175 
2176 	return 0;
2177 }
2178 
2179 static int
2180 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2181 {
2182 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2183 	uint32_t rx_buf_size;
2184 	uint32_t max_high_water;
2185 	uint32_t rctl;
2186 	int err;
2187 
2188 	if (fc_conf->autoneg != hw->mac.autoneg)
2189 		return -ENOTSUP;
2190 
2191 	rx_buf_size = igc_get_rx_buffer_size(hw);
2192 	PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2193 
2194 	/* At least reserve one Ethernet frame for watermark */
2195 	max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2196 	if (fc_conf->high_water > max_high_water ||
2197 		fc_conf->high_water < fc_conf->low_water) {
2198 		PMD_DRV_LOG(ERR,
2199 			"Incorrect high(%u)/low(%u) water value, max is %u",
2200 			fc_conf->high_water, fc_conf->low_water,
2201 			max_high_water);
2202 		return -EINVAL;
2203 	}
2204 
2205 	switch (fc_conf->mode) {
2206 	case RTE_FC_NONE:
2207 		hw->fc.requested_mode = igc_fc_none;
2208 		break;
2209 	case RTE_FC_RX_PAUSE:
2210 		hw->fc.requested_mode = igc_fc_rx_pause;
2211 		break;
2212 	case RTE_FC_TX_PAUSE:
2213 		hw->fc.requested_mode = igc_fc_tx_pause;
2214 		break;
2215 	case RTE_FC_FULL:
2216 		hw->fc.requested_mode = igc_fc_full;
2217 		break;
2218 	default:
2219 		PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2220 		return -EINVAL;
2221 	}
2222 
2223 	hw->fc.pause_time     = fc_conf->pause_time;
2224 	hw->fc.high_water     = fc_conf->high_water;
2225 	hw->fc.low_water      = fc_conf->low_water;
2226 	hw->fc.send_xon	      = fc_conf->send_xon;
2227 
2228 	err = igc_setup_link_generic(hw);
2229 	if (err == IGC_SUCCESS) {
2230 		/**
2231 		 * check if we want to forward MAC frames - driver doesn't have
2232 		 * native capability to do that, so we'll write the registers
2233 		 * ourselves
2234 		 **/
2235 		rctl = IGC_READ_REG(hw, IGC_RCTL);
2236 
2237 		/* set or clear MFLCN.PMCF bit depending on configuration */
2238 		if (fc_conf->mac_ctrl_frame_fwd != 0)
2239 			rctl |= IGC_RCTL_PMCF;
2240 		else
2241 			rctl &= ~IGC_RCTL_PMCF;
2242 
2243 		IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2244 		IGC_WRITE_FLUSH(hw);
2245 
2246 		return 0;
2247 	}
2248 
2249 	PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2250 	return -EIO;
2251 }
2252 
2253 static int
2254 eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2255 			struct rte_eth_rss_reta_entry64 *reta_conf,
2256 			uint16_t reta_size)
2257 {
2258 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2259 	uint16_t i;
2260 
2261 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
2262 		PMD_DRV_LOG(ERR,
2263 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2264 			reta_size, ETH_RSS_RETA_SIZE_128);
2265 		return -EINVAL;
2266 	}
2267 
2268 	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2269 
2270 	/* set redirection table */
2271 	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2272 		union igc_rss_reta_reg reta, reg;
2273 		uint16_t idx, shift;
2274 		uint8_t j, mask;
2275 
2276 		idx = i / RTE_RETA_GROUP_SIZE;
2277 		shift = i % RTE_RETA_GROUP_SIZE;
2278 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2279 				IGC_RSS_RDT_REG_SIZE_MASK);
2280 
2281 		/* if no need to update the register */
2282 		if (!mask ||
2283 		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2284 			continue;
2285 
2286 		/* check mask whether need to read the register value first */
2287 		if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2288 			reg.dword = 0;
2289 		else
2290 			reg.dword = IGC_READ_REG_LE_VALUE(hw,
2291 					IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2292 
2293 		/* update the register */
2294 		RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2295 		for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2296 			if (mask & (1u << j))
2297 				reta.bytes[j] =
2298 					(uint8_t)reta_conf[idx].reta[shift + j];
2299 			else
2300 				reta.bytes[j] = reg.bytes[j];
2301 		}
2302 		IGC_WRITE_REG_LE_VALUE(hw,
2303 			IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2304 	}
2305 
2306 	return 0;
2307 }
2308 
2309 static int
2310 eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2311 		       struct rte_eth_rss_reta_entry64 *reta_conf,
2312 		       uint16_t reta_size)
2313 {
2314 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2315 	uint16_t i;
2316 
2317 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
2318 		PMD_DRV_LOG(ERR,
2319 			"The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2320 			reta_size, ETH_RSS_RETA_SIZE_128);
2321 		return -EINVAL;
2322 	}
2323 
2324 	RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2325 
2326 	/* read redirection table */
2327 	for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2328 		union igc_rss_reta_reg reta;
2329 		uint16_t idx, shift;
2330 		uint8_t j, mask;
2331 
2332 		idx = i / RTE_RETA_GROUP_SIZE;
2333 		shift = i % RTE_RETA_GROUP_SIZE;
2334 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2335 				IGC_RSS_RDT_REG_SIZE_MASK);
2336 
2337 		/* if no need to read register */
2338 		if (!mask ||
2339 		    shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2340 			continue;
2341 
2342 		/* read register and get the queue index */
2343 		RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2344 		reta.dword = IGC_READ_REG_LE_VALUE(hw,
2345 				IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2346 		for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2347 			if (mask & (1u << j))
2348 				reta_conf[idx].reta[shift + j] = reta.bytes[j];
2349 		}
2350 	}
2351 
2352 	return 0;
2353 }
2354 
2355 static int
2356 eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2357 			struct rte_eth_rss_conf *rss_conf)
2358 {
2359 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2360 	igc_hw_rss_hash_set(hw, rss_conf);
2361 	return 0;
2362 }
2363 
2364 static int
2365 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2366 			struct rte_eth_rss_conf *rss_conf)
2367 {
2368 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2369 	uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2370 	uint32_t mrqc;
2371 	uint64_t rss_hf;
2372 
2373 	if (hash_key != NULL) {
2374 		int i;
2375 
2376 		/* if not enough space for store hash key */
2377 		if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2378 			PMD_DRV_LOG(ERR,
2379 				"RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2380 				rss_conf->rss_key_len, IGC_HKEY_SIZE);
2381 			return -EINVAL;
2382 		}
2383 
2384 		/* read RSS key from register */
2385 		for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2386 			hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2387 	}
2388 
2389 	/* get RSS functions configured in MRQC register */
2390 	mrqc = IGC_READ_REG(hw, IGC_MRQC);
2391 	if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2392 		return 0;
2393 
2394 	rss_hf = 0;
2395 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2396 		rss_hf |= ETH_RSS_IPV4;
2397 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2398 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2399 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2400 		rss_hf |= ETH_RSS_IPV6;
2401 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2402 		rss_hf |= ETH_RSS_IPV6_EX;
2403 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2404 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2405 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2406 		rss_hf |= ETH_RSS_IPV6_TCP_EX;
2407 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2408 		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2409 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2410 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2411 	if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2412 		rss_hf |= ETH_RSS_IPV6_UDP_EX;
2413 
2414 	rss_conf->rss_hf |= rss_hf;
2415 	return 0;
2416 }
2417 
2418 static int
2419 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2420 {
2421 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2422 	struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2423 	uint32_t vfta;
2424 	uint32_t vid_idx;
2425 	uint32_t vid_bit;
2426 
2427 	vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
2428 	vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
2429 	vfta = shadow_vfta->vfta[vid_idx];
2430 	if (on)
2431 		vfta |= vid_bit;
2432 	else
2433 		vfta &= ~vid_bit;
2434 	IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
2435 
2436 	/* update local VFTA copy */
2437 	shadow_vfta->vfta[vid_idx] = vfta;
2438 
2439 	return 0;
2440 }
2441 
2442 static void
2443 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2444 {
2445 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2446 	igc_read_reg_check_clear_bits(hw, IGC_RCTL,
2447 			IGC_RCTL_CFIEN | IGC_RCTL_VFE);
2448 }
2449 
2450 static void
2451 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2452 {
2453 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2454 	struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2455 	uint32_t reg_val;
2456 	int i;
2457 
2458 	/* Filter Table Enable, CFI not used for packet acceptance */
2459 	reg_val = IGC_READ_REG(hw, IGC_RCTL);
2460 	reg_val &= ~IGC_RCTL_CFIEN;
2461 	reg_val |= IGC_RCTL_VFE;
2462 	IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
2463 
2464 	/* restore VFTA table */
2465 	for (i = 0; i < IGC_VFTA_SIZE; i++)
2466 		IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
2467 }
2468 
2469 static void
2470 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2471 {
2472 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2473 
2474 	igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2475 }
2476 
2477 static void
2478 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2479 {
2480 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2481 
2482 	igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2483 }
2484 
2485 static int
2486 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2487 {
2488 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2489 	uint32_t ctrl_ext;
2490 
2491 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2492 
2493 	/* if extend vlan hasn't been enabled */
2494 	if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
2495 		return 0;
2496 
2497 	if ((dev->data->dev_conf.rxmode.offloads &
2498 			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2499 		goto write_ext_vlan;
2500 
2501 	/* Update maximum packet length */
2502 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
2503 		RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
2504 		PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
2505 			dev->data->dev_conf.rxmode.max_rx_pkt_len,
2506 			VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
2507 		return -EINVAL;
2508 	}
2509 	dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
2510 	IGC_WRITE_REG(hw, IGC_RLPML,
2511 		dev->data->dev_conf.rxmode.max_rx_pkt_len);
2512 
2513 write_ext_vlan:
2514 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
2515 	return 0;
2516 }
2517 
2518 static int
2519 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2520 {
2521 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2522 	uint32_t ctrl_ext;
2523 
2524 	ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2525 
2526 	/* if extend vlan has been enabled */
2527 	if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
2528 		return 0;
2529 
2530 	if ((dev->data->dev_conf.rxmode.offloads &
2531 			DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2532 		goto write_ext_vlan;
2533 
2534 	/* Update maximum packet length */
2535 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
2536 		MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
2537 		PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
2538 			dev->data->dev_conf.rxmode.max_rx_pkt_len +
2539 			VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
2540 		return -EINVAL;
2541 	}
2542 	dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
2543 	IGC_WRITE_REG(hw, IGC_RLPML,
2544 		dev->data->dev_conf.rxmode.max_rx_pkt_len);
2545 
2546 write_ext_vlan:
2547 	IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
2548 	return 0;
2549 }
2550 
2551 static int
2552 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2553 {
2554 	struct rte_eth_rxmode *rxmode;
2555 
2556 	rxmode = &dev->data->dev_conf.rxmode;
2557 	if (mask & ETH_VLAN_STRIP_MASK) {
2558 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2559 			igc_vlan_hw_strip_enable(dev);
2560 		else
2561 			igc_vlan_hw_strip_disable(dev);
2562 	}
2563 
2564 	if (mask & ETH_VLAN_FILTER_MASK) {
2565 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2566 			igc_vlan_hw_filter_enable(dev);
2567 		else
2568 			igc_vlan_hw_filter_disable(dev);
2569 	}
2570 
2571 	if (mask & ETH_VLAN_EXTEND_MASK) {
2572 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2573 			return igc_vlan_hw_extend_enable(dev);
2574 		else
2575 			return igc_vlan_hw_extend_disable(dev);
2576 	}
2577 
2578 	return 0;
2579 }
2580 
2581 static int
2582 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2583 		      enum rte_vlan_type vlan_type,
2584 		      uint16_t tpid)
2585 {
2586 	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2587 	uint32_t reg_val;
2588 
2589 	/* only outer TPID of double VLAN can be configured*/
2590 	if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2591 		reg_val = IGC_READ_REG(hw, IGC_VET);
2592 		reg_val = (reg_val & (~IGC_VET_EXT)) |
2593 			((uint32_t)tpid << IGC_VET_EXT_SHIFT);
2594 		IGC_WRITE_REG(hw, IGC_VET, reg_val);
2595 
2596 		return 0;
2597 	}
2598 
2599 	/* all other TPID values are read-only*/
2600 	PMD_DRV_LOG(ERR, "Not supported");
2601 	return -ENOTSUP;
2602 }
2603 
2604 static int
2605 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2606 	struct rte_pci_device *pci_dev)
2607 {
2608 	PMD_INIT_FUNC_TRACE();
2609 	return rte_eth_dev_pci_generic_probe(pci_dev,
2610 		sizeof(struct igc_adapter), eth_igc_dev_init);
2611 }
2612 
2613 static int
2614 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2615 {
2616 	PMD_INIT_FUNC_TRACE();
2617 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2618 }
2619 
2620 static struct rte_pci_driver rte_igc_pmd = {
2621 	.id_table = pci_id_igc_map,
2622 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2623 	.probe = eth_igc_pci_probe,
2624 	.remove = eth_igc_pci_remove,
2625 };
2626 
2627 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2628 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2629 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");
2630