xref: /dpdk/drivers/net/e1000/igb_ethdev.c (revision 452c1916)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdarg.h>
10 
11 #include <rte_string_fns.h>
12 #include <rte_common.h>
13 #include <rte_interrupts.h>
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memory.h>
23 #include <rte_eal.h>
24 #include <rte_malloc.h>
25 #include <rte_dev.h>
26 
27 #include "e1000_logs.h"
28 #include "base/e1000_api.h"
29 #include "e1000_ethdev.h"
30 #include "igb_regs.h"
31 
32 /*
33  * Default values for port configuration
34  */
35 #define IGB_DEFAULT_RX_FREE_THRESH  32
36 
37 #define IGB_DEFAULT_RX_PTHRESH      ((hw->mac.type == e1000_i354) ? 12 : 8)
38 #define IGB_DEFAULT_RX_HTHRESH      8
39 #define IGB_DEFAULT_RX_WTHRESH      ((hw->mac.type == e1000_82576) ? 1 : 4)
40 
41 #define IGB_DEFAULT_TX_PTHRESH      ((hw->mac.type == e1000_i354) ? 20 : 8)
42 #define IGB_DEFAULT_TX_HTHRESH      1
43 #define IGB_DEFAULT_TX_WTHRESH      ((hw->mac.type == e1000_82576) ? 1 : 16)
44 
45 /* Bit shift and mask */
46 #define IGB_4_BIT_WIDTH  (CHAR_BIT / 2)
47 #define IGB_4_BIT_MASK   RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
48 #define IGB_8_BIT_WIDTH  CHAR_BIT
49 #define IGB_8_BIT_MASK   UINT8_MAX
50 
51 /* Additional timesync values. */
52 #define E1000_CYCLECOUNTER_MASK      0xffffffffffffffffULL
53 #define E1000_ETQF_FILTER_1588       3
54 #define IGB_82576_TSYNC_SHIFT        16
55 #define E1000_INCPERIOD_82576        (1 << E1000_TIMINCA_16NS_SHIFT)
56 #define E1000_INCVALUE_82576         (16 << IGB_82576_TSYNC_SHIFT)
57 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
58 
59 #define E1000_VTIVAR_MISC                0x01740
60 #define E1000_VTIVAR_MISC_MASK           0xFF
61 #define E1000_VTIVAR_VALID               0x80
62 #define E1000_VTIVAR_MISC_MAILBOX        0
63 #define E1000_VTIVAR_MISC_INTR_MASK      0x3
64 
65 /* External VLAN Enable bit mask */
66 #define E1000_CTRL_EXT_EXT_VLAN      (1 << 26)
67 
68 /* External VLAN Ether Type bit mask and shift */
69 #define E1000_VET_VET_EXT            0xFFFF0000
70 #define E1000_VET_VET_EXT_SHIFT      16
71 
72 /* MSI-X other interrupt vector */
73 #define IGB_MSIX_OTHER_INTR_VEC      0
74 
75 static int  eth_igb_configure(struct rte_eth_dev *dev);
76 static int  eth_igb_start(struct rte_eth_dev *dev);
77 static int  eth_igb_stop(struct rte_eth_dev *dev);
78 static int  eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
79 static int  eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
80 static int eth_igb_close(struct rte_eth_dev *dev);
81 static int eth_igb_reset(struct rte_eth_dev *dev);
82 static int  eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
83 static int  eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
84 static int  eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
85 static int  eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
86 static int  eth_igb_link_update(struct rte_eth_dev *dev,
87 				int wait_to_complete);
88 static int eth_igb_stats_get(struct rte_eth_dev *dev,
89 				struct rte_eth_stats *rte_stats);
90 static int eth_igb_xstats_get(struct rte_eth_dev *dev,
91 			      struct rte_eth_xstat *xstats, unsigned n);
92 static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
93 		const uint64_t *ids,
94 		uint64_t *values, unsigned int n);
95 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
96 				    struct rte_eth_xstat_name *xstats_names,
97 				    unsigned int size);
98 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
99 		const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
100 		unsigned int limit);
101 static int eth_igb_stats_reset(struct rte_eth_dev *dev);
102 static int eth_igb_xstats_reset(struct rte_eth_dev *dev);
103 static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
104 				   char *fw_version, size_t fw_size);
105 static int eth_igb_infos_get(struct rte_eth_dev *dev,
106 			      struct rte_eth_dev_info *dev_info);
107 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
108 static int eth_igbvf_infos_get(struct rte_eth_dev *dev,
109 				struct rte_eth_dev_info *dev_info);
110 static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
111 				struct rte_eth_fc_conf *fc_conf);
112 static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
113 				struct rte_eth_fc_conf *fc_conf);
114 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
115 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
116 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
117 static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
118 				    struct rte_intr_handle *handle);
119 static void eth_igb_interrupt_handler(void *param);
120 static int  igb_hardware_init(struct e1000_hw *hw);
121 static void igb_hw_control_acquire(struct e1000_hw *hw);
122 static void igb_hw_control_release(struct e1000_hw *hw);
123 static void igb_init_manageability(struct e1000_hw *hw);
124 static void igb_release_manageability(struct e1000_hw *hw);
125 
126 static int  eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
127 
128 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
129 		uint16_t vlan_id, int on);
130 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
131 				 enum rte_vlan_type vlan_type,
132 				 uint16_t tpid_id);
133 static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
134 
135 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
136 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
137 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
138 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
139 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
140 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
141 
142 static int eth_igb_led_on(struct rte_eth_dev *dev);
143 static int eth_igb_led_off(struct rte_eth_dev *dev);
144 
145 static void igb_intr_disable(struct rte_eth_dev *dev);
146 static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
147 static int eth_igb_rar_set(struct rte_eth_dev *dev,
148 			   struct rte_ether_addr *mac_addr,
149 			   uint32_t index, uint32_t pool);
150 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
151 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
152 		struct rte_ether_addr *addr);
153 
154 static void igbvf_intr_disable(struct e1000_hw *hw);
155 static int igbvf_dev_configure(struct rte_eth_dev *dev);
156 static int igbvf_dev_start(struct rte_eth_dev *dev);
157 static int igbvf_dev_stop(struct rte_eth_dev *dev);
158 static int igbvf_dev_close(struct rte_eth_dev *dev);
159 static int igbvf_promiscuous_enable(struct rte_eth_dev *dev);
160 static int igbvf_promiscuous_disable(struct rte_eth_dev *dev);
161 static int igbvf_allmulticast_enable(struct rte_eth_dev *dev);
162 static int igbvf_allmulticast_disable(struct rte_eth_dev *dev);
163 static int eth_igbvf_link_update(struct e1000_hw *hw);
164 static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
165 				struct rte_eth_stats *rte_stats);
166 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
167 				struct rte_eth_xstat *xstats, unsigned n);
168 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
169 				      struct rte_eth_xstat_name *xstats_names,
170 				      unsigned limit);
171 static int eth_igbvf_stats_reset(struct rte_eth_dev *dev);
172 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
173 		uint16_t vlan_id, int on);
174 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
175 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
176 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
177 		struct rte_ether_addr *addr);
178 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
179 static int igbvf_get_regs(struct rte_eth_dev *dev,
180 		struct rte_dev_reg_info *regs);
181 
182 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
183 				   struct rte_eth_rss_reta_entry64 *reta_conf,
184 				   uint16_t reta_size);
185 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
186 				  struct rte_eth_rss_reta_entry64 *reta_conf,
187 				  uint16_t reta_size);
188 
189 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
190 			struct rte_eth_ntuple_filter *ntuple_filter);
191 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
192 			struct rte_eth_ntuple_filter *ntuple_filter);
193 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
194 			struct rte_eth_ntuple_filter *ntuple_filter);
195 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
196 			struct rte_eth_ntuple_filter *ntuple_filter);
197 static int eth_igb_flow_ops_get(struct rte_eth_dev *dev,
198 				const struct rte_flow_ops **ops);
199 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
200 static int eth_igb_get_regs(struct rte_eth_dev *dev,
201 		struct rte_dev_reg_info *regs);
202 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
203 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
204 		struct rte_dev_eeprom_info *eeprom);
205 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
206 		struct rte_dev_eeprom_info *eeprom);
207 static int eth_igb_get_module_info(struct rte_eth_dev *dev,
208 				   struct rte_eth_dev_module_info *modinfo);
209 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
210 				     struct rte_dev_eeprom_info *info);
211 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
212 				    struct rte_ether_addr *mc_addr_set,
213 				    uint32_t nb_mc_addr);
214 static int igb_timesync_enable(struct rte_eth_dev *dev);
215 static int igb_timesync_disable(struct rte_eth_dev *dev);
216 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
217 					  struct timespec *timestamp,
218 					  uint32_t flags);
219 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
220 					  struct timespec *timestamp);
221 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
222 static int igb_timesync_read_time(struct rte_eth_dev *dev,
223 				  struct timespec *timestamp);
224 static int igb_timesync_write_time(struct rte_eth_dev *dev,
225 				   const struct timespec *timestamp);
226 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
227 					uint16_t queue_id);
228 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
229 					 uint16_t queue_id);
230 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
231 				       uint8_t queue, uint8_t msix_vector);
232 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
233 			       uint8_t index, uint8_t offset);
234 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
235 static void eth_igbvf_interrupt_handler(void *param);
236 static void igbvf_mbx_process(struct rte_eth_dev *dev);
237 static int igb_filter_restore(struct rte_eth_dev *dev);
238 
239 /*
240  * Define VF Stats MACRO for Non "cleared on read" register
241  */
242 #define UPDATE_VF_STAT(reg, last, cur)            \
243 {                                                 \
244 	u32 latest = E1000_READ_REG(hw, reg);     \
245 	cur += (latest - last) & UINT_MAX;        \
246 	last = latest;                            \
247 }
248 
249 #define IGB_FC_PAUSE_TIME 0x0680
250 #define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
251 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
252 
253 #define IGBVF_PMD_NAME "rte_igbvf_pmd"     /* PMD name */
254 
255 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
256 
257 /*
258  * The set of PCI devices this driver supports
259  */
260 static const struct rte_pci_id pci_id_igb_map[] = {
261 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
262 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
263 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
264 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
265 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
266 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
267 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
268 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
269 
270 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
271 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
272 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
273 
274 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
275 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
276 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
277 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
278 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
279 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
280 
281 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
282 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
283 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
284 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
285 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
286 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
287 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
288 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
289 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
290 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
291 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
292 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
293 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
294 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
295 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
296 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
297 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
298 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
299 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
300 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
301 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
302 	{ .vendor_id = 0, /* sentinel */ },
303 };
304 
305 /*
306  * The set of PCI devices this driver supports (for 82576&I350 VF)
307  */
308 static const struct rte_pci_id pci_id_igbvf_map[] = {
309 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
310 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
311 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
312 	{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
313 	{ .vendor_id = 0, /* sentinel */ },
314 };
315 
316 static const struct rte_eth_desc_lim rx_desc_lim = {
317 	.nb_max = E1000_MAX_RING_DESC,
318 	.nb_min = E1000_MIN_RING_DESC,
319 	.nb_align = IGB_RXD_ALIGN,
320 };
321 
322 static const struct rte_eth_desc_lim tx_desc_lim = {
323 	.nb_max = E1000_MAX_RING_DESC,
324 	.nb_min = E1000_MIN_RING_DESC,
325 	.nb_align = IGB_RXD_ALIGN,
326 	.nb_seg_max = IGB_TX_MAX_SEG,
327 	.nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
328 };
329 
330 static const struct eth_dev_ops eth_igb_ops = {
331 	.dev_configure        = eth_igb_configure,
332 	.dev_start            = eth_igb_start,
333 	.dev_stop             = eth_igb_stop,
334 	.dev_set_link_up      = eth_igb_dev_set_link_up,
335 	.dev_set_link_down    = eth_igb_dev_set_link_down,
336 	.dev_close            = eth_igb_close,
337 	.dev_reset            = eth_igb_reset,
338 	.promiscuous_enable   = eth_igb_promiscuous_enable,
339 	.promiscuous_disable  = eth_igb_promiscuous_disable,
340 	.allmulticast_enable  = eth_igb_allmulticast_enable,
341 	.allmulticast_disable = eth_igb_allmulticast_disable,
342 	.link_update          = eth_igb_link_update,
343 	.stats_get            = eth_igb_stats_get,
344 	.xstats_get           = eth_igb_xstats_get,
345 	.xstats_get_by_id     = eth_igb_xstats_get_by_id,
346 	.xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
347 	.xstats_get_names     = eth_igb_xstats_get_names,
348 	.stats_reset          = eth_igb_stats_reset,
349 	.xstats_reset         = eth_igb_xstats_reset,
350 	.fw_version_get       = eth_igb_fw_version_get,
351 	.dev_infos_get        = eth_igb_infos_get,
352 	.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
353 	.mtu_set              = eth_igb_mtu_set,
354 	.vlan_filter_set      = eth_igb_vlan_filter_set,
355 	.vlan_tpid_set        = eth_igb_vlan_tpid_set,
356 	.vlan_offload_set     = eth_igb_vlan_offload_set,
357 	.rx_queue_setup       = eth_igb_rx_queue_setup,
358 	.rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
359 	.rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
360 	.rx_queue_release     = eth_igb_rx_queue_release,
361 	.tx_queue_setup       = eth_igb_tx_queue_setup,
362 	.tx_queue_release     = eth_igb_tx_queue_release,
363 	.tx_done_cleanup      = eth_igb_tx_done_cleanup,
364 	.dev_led_on           = eth_igb_led_on,
365 	.dev_led_off          = eth_igb_led_off,
366 	.flow_ctrl_get        = eth_igb_flow_ctrl_get,
367 	.flow_ctrl_set        = eth_igb_flow_ctrl_set,
368 	.mac_addr_add         = eth_igb_rar_set,
369 	.mac_addr_remove      = eth_igb_rar_clear,
370 	.mac_addr_set         = eth_igb_default_mac_addr_set,
371 	.reta_update          = eth_igb_rss_reta_update,
372 	.reta_query           = eth_igb_rss_reta_query,
373 	.rss_hash_update      = eth_igb_rss_hash_update,
374 	.rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
375 	.flow_ops_get         = eth_igb_flow_ops_get,
376 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
377 	.rxq_info_get         = igb_rxq_info_get,
378 	.txq_info_get         = igb_txq_info_get,
379 	.timesync_enable      = igb_timesync_enable,
380 	.timesync_disable     = igb_timesync_disable,
381 	.timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
382 	.timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
383 	.get_reg              = eth_igb_get_regs,
384 	.get_eeprom_length    = eth_igb_get_eeprom_length,
385 	.get_eeprom           = eth_igb_get_eeprom,
386 	.set_eeprom           = eth_igb_set_eeprom,
387 	.get_module_info      = eth_igb_get_module_info,
388 	.get_module_eeprom    = eth_igb_get_module_eeprom,
389 	.timesync_adjust_time = igb_timesync_adjust_time,
390 	.timesync_read_time   = igb_timesync_read_time,
391 	.timesync_write_time  = igb_timesync_write_time,
392 };
393 
394 /*
395  * dev_ops for virtual function, bare necessities for basic vf
396  * operation have been implemented
397  */
398 static const struct eth_dev_ops igbvf_eth_dev_ops = {
399 	.dev_configure        = igbvf_dev_configure,
400 	.dev_start            = igbvf_dev_start,
401 	.dev_stop             = igbvf_dev_stop,
402 	.dev_close            = igbvf_dev_close,
403 	.promiscuous_enable   = igbvf_promiscuous_enable,
404 	.promiscuous_disable  = igbvf_promiscuous_disable,
405 	.allmulticast_enable  = igbvf_allmulticast_enable,
406 	.allmulticast_disable = igbvf_allmulticast_disable,
407 	.link_update          = eth_igb_link_update,
408 	.stats_get            = eth_igbvf_stats_get,
409 	.xstats_get           = eth_igbvf_xstats_get,
410 	.xstats_get_names     = eth_igbvf_xstats_get_names,
411 	.stats_reset          = eth_igbvf_stats_reset,
412 	.xstats_reset         = eth_igbvf_stats_reset,
413 	.vlan_filter_set      = igbvf_vlan_filter_set,
414 	.dev_infos_get        = eth_igbvf_infos_get,
415 	.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
416 	.rx_queue_setup       = eth_igb_rx_queue_setup,
417 	.rx_queue_release     = eth_igb_rx_queue_release,
418 	.tx_queue_setup       = eth_igb_tx_queue_setup,
419 	.tx_queue_release     = eth_igb_tx_queue_release,
420 	.tx_done_cleanup      = eth_igb_tx_done_cleanup,
421 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
422 	.rxq_info_get         = igb_rxq_info_get,
423 	.txq_info_get         = igb_txq_info_get,
424 	.mac_addr_set         = igbvf_default_mac_addr_set,
425 	.get_reg              = igbvf_get_regs,
426 };
427 
428 /* store statistics names and its offset in stats structure */
429 struct rte_igb_xstats_name_off {
430 	char name[RTE_ETH_XSTATS_NAME_SIZE];
431 	unsigned offset;
432 };
433 
434 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
435 	{"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
436 	{"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
437 	{"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
438 	{"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
439 	{"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
440 	{"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
441 	{"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
442 		ecol)},
443 	{"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
444 	{"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
445 	{"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
446 	{"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
447 	{"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
448 	{"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
449 	{"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
450 	{"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
451 	{"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
452 	{"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
453 	{"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
454 		fcruc)},
455 	{"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
456 	{"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
457 	{"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
458 	{"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
459 	{"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
460 		prc1023)},
461 	{"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
462 		prc1522)},
463 	{"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
464 	{"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
465 	{"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
466 	{"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
467 	{"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
468 	{"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
469 	{"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
470 	{"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
471 	{"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
472 	{"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
473 	{"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
474 	{"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
475 	{"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
476 	{"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
477 	{"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
478 	{"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
479 	{"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
480 	{"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
481 		ptc1023)},
482 	{"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
483 		ptc1522)},
484 	{"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
485 	{"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
486 	{"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
487 	{"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
488 	{"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
489 	{"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
490 	{"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
491 
492 	{"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
493 };
494 
495 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
496 		sizeof(rte_igb_stats_strings[0]))
497 
498 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
499 	{"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
500 	{"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
501 	{"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
502 	{"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
503 	{"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
504 };
505 
506 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
507 		sizeof(rte_igbvf_stats_strings[0]))
508 
509 
510 static inline void
511 igb_intr_enable(struct rte_eth_dev *dev)
512 {
513 	struct e1000_interrupt *intr =
514 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
515 	struct e1000_hw *hw =
516 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
517 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
518 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
519 
520 	if (rte_intr_allow_others(intr_handle) &&
521 		dev->data->dev_conf.intr_conf.lsc != 0) {
522 		E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC);
523 	}
524 
525 	E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
526 	E1000_WRITE_FLUSH(hw);
527 }
528 
529 static void
530 igb_intr_disable(struct rte_eth_dev *dev)
531 {
532 	struct e1000_hw *hw =
533 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
535 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
536 
537 	if (rte_intr_allow_others(intr_handle) &&
538 		dev->data->dev_conf.intr_conf.lsc != 0) {
539 		E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC);
540 	}
541 
542 	E1000_WRITE_REG(hw, E1000_IMC, ~0);
543 	E1000_WRITE_FLUSH(hw);
544 }
545 
546 static inline void
547 igbvf_intr_enable(struct rte_eth_dev *dev)
548 {
549 	struct e1000_hw *hw =
550 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
551 
552 	/* only for mailbox */
553 	E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
554 	E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
555 	E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
556 	E1000_WRITE_FLUSH(hw);
557 }
558 
559 /* only for mailbox now. If RX/TX needed, should extend this function.  */
560 static void
561 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
562 {
563 	uint32_t tmp = 0;
564 
565 	/* mailbox */
566 	tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
567 	tmp |= E1000_VTIVAR_VALID;
568 	E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
569 }
570 
571 static void
572 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
573 {
574 	struct e1000_hw *hw =
575 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576 
577 	/* Configure VF other cause ivar */
578 	igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
579 }
580 
581 static inline int32_t
582 igb_pf_reset_hw(struct e1000_hw *hw)
583 {
584 	uint32_t ctrl_ext;
585 	int32_t status;
586 
587 	status = e1000_reset_hw(hw);
588 
589 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
590 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
591 	ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
592 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
593 	E1000_WRITE_FLUSH(hw);
594 
595 	return status;
596 }
597 
598 static void
599 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
600 {
601 	struct e1000_hw *hw =
602 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
603 
604 
605 	hw->vendor_id = pci_dev->id.vendor_id;
606 	hw->device_id = pci_dev->id.device_id;
607 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
608 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
609 
610 	e1000_set_mac_type(hw);
611 
612 	/* need to check if it is a vf device below */
613 }
614 
615 static int
616 igb_reset_swfw_lock(struct e1000_hw *hw)
617 {
618 	int ret_val;
619 
620 	/*
621 	 * Do mac ops initialization manually here, since we will need
622 	 * some function pointers set by this call.
623 	 */
624 	ret_val = e1000_init_mac_params(hw);
625 	if (ret_val)
626 		return ret_val;
627 
628 	/*
629 	 * SMBI lock should not fail in this early stage. If this is the case,
630 	 * it is due to an improper exit of the application.
631 	 * So force the release of the faulty lock.
632 	 */
633 	if (e1000_get_hw_semaphore_generic(hw) < 0) {
634 		PMD_DRV_LOG(DEBUG, "SMBI lock released");
635 	}
636 	e1000_put_hw_semaphore_generic(hw);
637 
638 	if (hw->mac.ops.acquire_swfw_sync != NULL) {
639 		uint16_t mask;
640 
641 		/*
642 		 * Phy lock should not fail in this early stage. If this is the case,
643 		 * it is due to an improper exit of the application.
644 		 * So force the release of the faulty lock.
645 		 */
646 		mask = E1000_SWFW_PHY0_SM << hw->bus.func;
647 		if (hw->bus.func > E1000_FUNC_1)
648 			mask <<= 2;
649 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
650 			PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
651 				    hw->bus.func);
652 		}
653 		hw->mac.ops.release_swfw_sync(hw, mask);
654 
655 		/*
656 		 * This one is more tricky since it is common to all ports; but
657 		 * swfw_sync retries last long enough (1s) to be almost sure that if
658 		 * lock can not be taken it is due to an improper lock of the
659 		 * semaphore.
660 		 */
661 		mask = E1000_SWFW_EEP_SM;
662 		if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
663 			PMD_DRV_LOG(DEBUG, "SWFW common locks released");
664 		}
665 		hw->mac.ops.release_swfw_sync(hw, mask);
666 	}
667 
668 	return E1000_SUCCESS;
669 }
670 
671 /* Remove all ntuple filters of the device */
672 static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
673 {
674 	struct e1000_filter_info *filter_info =
675 		E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
676 	struct e1000_5tuple_filter *p_5tuple;
677 	struct e1000_2tuple_filter *p_2tuple;
678 
679 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
680 		TAILQ_REMOVE(&filter_info->fivetuple_list,
681 			p_5tuple, entries);
682 			rte_free(p_5tuple);
683 	}
684 	filter_info->fivetuple_mask = 0;
685 	while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
686 		TAILQ_REMOVE(&filter_info->twotuple_list,
687 			p_2tuple, entries);
688 			rte_free(p_2tuple);
689 	}
690 	filter_info->twotuple_mask = 0;
691 
692 	return 0;
693 }
694 
695 /* Remove all flex filters of the device */
696 static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
697 {
698 	struct e1000_filter_info *filter_info =
699 		E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
700 	struct e1000_flex_filter *p_flex;
701 
702 	while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
703 		TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
704 		rte_free(p_flex);
705 	}
706 	filter_info->flex_mask = 0;
707 
708 	return 0;
709 }
710 
711 static int
712 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
713 {
714 	int error = 0;
715 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
716 	struct e1000_hw *hw =
717 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
718 	struct e1000_vfta * shadow_vfta =
719 		E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
720 	struct e1000_filter_info *filter_info =
721 		E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
722 	struct e1000_adapter *adapter =
723 		E1000_DEV_PRIVATE(eth_dev->data->dev_private);
724 
725 	uint32_t ctrl_ext;
726 
727 	eth_dev->dev_ops = &eth_igb_ops;
728 	eth_dev->rx_queue_count = eth_igb_rx_queue_count;
729 	eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
730 	eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
731 	eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
732 	eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
733 	eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
734 
735 	/* for secondary processes, we don't initialise any further as primary
736 	 * has already done this work. Only check we don't need a different
737 	 * RX function */
738 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
739 		if (eth_dev->data->scattered_rx)
740 			eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
741 		return 0;
742 	}
743 
744 	rte_eth_copy_pci_info(eth_dev, pci_dev);
745 
746 	hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
747 
748 	igb_identify_hardware(eth_dev, pci_dev);
749 	if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
750 		error = -EIO;
751 		goto err_late;
752 	}
753 
754 	e1000_get_bus_info(hw);
755 
756 	/* Reset any pending lock */
757 	if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
758 		error = -EIO;
759 		goto err_late;
760 	}
761 
762 	/* Finish initialization */
763 	if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
764 		error = -EIO;
765 		goto err_late;
766 	}
767 
768 	hw->mac.autoneg = 1;
769 	hw->phy.autoneg_wait_to_complete = 0;
770 	hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
771 
772 	/* Copper options */
773 	if (hw->phy.media_type == e1000_media_type_copper) {
774 		hw->phy.mdix = 0; /* AUTO_ALL_MODES */
775 		hw->phy.disable_polarity_correction = 0;
776 		hw->phy.ms_type = e1000_ms_hw_default;
777 	}
778 
779 	/*
780 	 * Start from a known state, this is important in reading the nvm
781 	 * and mac from that.
782 	 */
783 	igb_pf_reset_hw(hw);
784 
785 	/* Make sure we have a good EEPROM before we read from it */
786 	if (e1000_validate_nvm_checksum(hw) < 0) {
787 		/*
788 		 * Some PCI-E parts fail the first check due to
789 		 * the link being in sleep state, call it again,
790 		 * if it fails a second time its a real issue.
791 		 */
792 		if (e1000_validate_nvm_checksum(hw) < 0) {
793 			PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
794 			error = -EIO;
795 			goto err_late;
796 		}
797 	}
798 
799 	/* Read the permanent MAC address out of the EEPROM */
800 	if (e1000_read_mac_addr(hw) != 0) {
801 		PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
802 		error = -EIO;
803 		goto err_late;
804 	}
805 
806 	/* Allocate memory for storing MAC addresses */
807 	eth_dev->data->mac_addrs = rte_zmalloc("e1000",
808 		RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
809 	if (eth_dev->data->mac_addrs == NULL) {
810 		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
811 						"store MAC addresses",
812 				RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
813 		error = -ENOMEM;
814 		goto err_late;
815 	}
816 
817 	/* Copy the permanent MAC address */
818 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
819 			&eth_dev->data->mac_addrs[0]);
820 
821 	/* initialize the vfta */
822 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
823 
824 	/* Now initialize the hardware */
825 	if (igb_hardware_init(hw) != 0) {
826 		PMD_INIT_LOG(ERR, "Hardware initialization failed");
827 		rte_free(eth_dev->data->mac_addrs);
828 		eth_dev->data->mac_addrs = NULL;
829 		error = -ENODEV;
830 		goto err_late;
831 	}
832 	hw->mac.get_link_status = 1;
833 	adapter->stopped = 0;
834 
835 	/* Indicate SOL/IDER usage */
836 	if (e1000_check_reset_block(hw) < 0) {
837 		PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
838 					"SOL/IDER session");
839 	}
840 
841 	/* initialize PF if max_vfs not zero */
842 	igb_pf_host_init(eth_dev);
843 
844 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
845 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
846 	ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
847 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
848 	E1000_WRITE_FLUSH(hw);
849 
850 	PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
851 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
852 		     pci_dev->id.device_id);
853 
854 	rte_intr_callback_register(pci_dev->intr_handle,
855 				   eth_igb_interrupt_handler,
856 				   (void *)eth_dev);
857 
858 	/* enable uio/vfio intr/eventfd mapping */
859 	rte_intr_enable(pci_dev->intr_handle);
860 
861 	/* enable support intr */
862 	igb_intr_enable(eth_dev);
863 
864 	eth_igb_dev_set_link_down(eth_dev);
865 
866 	/* initialize filter info */
867 	memset(filter_info, 0,
868 	       sizeof(struct e1000_filter_info));
869 
870 	TAILQ_INIT(&filter_info->flex_list);
871 	TAILQ_INIT(&filter_info->twotuple_list);
872 	TAILQ_INIT(&filter_info->fivetuple_list);
873 
874 	TAILQ_INIT(&igb_filter_ntuple_list);
875 	TAILQ_INIT(&igb_filter_ethertype_list);
876 	TAILQ_INIT(&igb_filter_syn_list);
877 	TAILQ_INIT(&igb_filter_flex_list);
878 	TAILQ_INIT(&igb_filter_rss_list);
879 	TAILQ_INIT(&igb_flow_list);
880 
881 	return 0;
882 
883 err_late:
884 	igb_hw_control_release(hw);
885 
886 	return error;
887 }
888 
889 static int
890 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
891 {
892 	PMD_INIT_FUNC_TRACE();
893 
894 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
895 		return 0;
896 
897 	eth_igb_close(eth_dev);
898 
899 	return 0;
900 }
901 
902 /*
903  * Virtual Function device init
904  */
905 static int
906 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
907 {
908 	struct rte_pci_device *pci_dev;
909 	struct rte_intr_handle *intr_handle;
910 	struct e1000_adapter *adapter =
911 		E1000_DEV_PRIVATE(eth_dev->data->dev_private);
912 	struct e1000_hw *hw =
913 		E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
914 	int diag;
915 	struct rte_ether_addr *perm_addr =
916 		(struct rte_ether_addr *)hw->mac.perm_addr;
917 
918 	PMD_INIT_FUNC_TRACE();
919 
920 	eth_dev->dev_ops = &igbvf_eth_dev_ops;
921 	eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
922 	eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
923 	eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
924 	eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
925 	eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
926 
927 	/* for secondary processes, we don't initialise any further as primary
928 	 * has already done this work. Only check we don't need a different
929 	 * RX function */
930 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
931 		if (eth_dev->data->scattered_rx)
932 			eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
933 		return 0;
934 	}
935 
936 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
937 	rte_eth_copy_pci_info(eth_dev, pci_dev);
938 
939 	hw->device_id = pci_dev->id.device_id;
940 	hw->vendor_id = pci_dev->id.vendor_id;
941 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
942 	adapter->stopped = 0;
943 
944 	/* Initialize the shared code (base driver) */
945 	diag = e1000_setup_init_funcs(hw, TRUE);
946 	if (diag != 0) {
947 		PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
948 			diag);
949 		return -EIO;
950 	}
951 
952 	/* init_mailbox_params */
953 	hw->mbx.ops.init_params(hw);
954 
955 	/* Disable the interrupts for VF */
956 	igbvf_intr_disable(hw);
957 
958 	diag = hw->mac.ops.reset_hw(hw);
959 
960 	/* Allocate memory for storing MAC addresses */
961 	eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN *
962 		hw->mac.rar_entry_count, 0);
963 	if (eth_dev->data->mac_addrs == NULL) {
964 		PMD_INIT_LOG(ERR,
965 			"Failed to allocate %d bytes needed to store MAC "
966 			"addresses",
967 			RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
968 		return -ENOMEM;
969 	}
970 
971 	/* Generate a random MAC address, if none was assigned by PF. */
972 	if (rte_is_zero_ether_addr(perm_addr)) {
973 		rte_eth_random_addr(perm_addr->addr_bytes);
974 		PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
975 		PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
976 			     RTE_ETHER_ADDR_PRT_FMT,
977 			     RTE_ETHER_ADDR_BYTES(perm_addr));
978 	}
979 
980 	diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
981 	if (diag) {
982 		rte_free(eth_dev->data->mac_addrs);
983 		eth_dev->data->mac_addrs = NULL;
984 		return diag;
985 	}
986 	/* Copy the permanent MAC address */
987 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
988 			&eth_dev->data->mac_addrs[0]);
989 
990 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
991 		     "mac.type=%s",
992 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
993 		     pci_dev->id.device_id, "igb_mac_82576_vf");
994 
995 	intr_handle = pci_dev->intr_handle;
996 	rte_intr_callback_register(intr_handle,
997 				   eth_igbvf_interrupt_handler, eth_dev);
998 
999 	return 0;
1000 }
1001 
1002 static int
1003 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
1004 {
1005 	PMD_INIT_FUNC_TRACE();
1006 
1007 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1008 		return 0;
1009 
1010 	igbvf_dev_close(eth_dev);
1011 
1012 	return 0;
1013 }
1014 
1015 static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1016 	struct rte_pci_device *pci_dev)
1017 {
1018 	return rte_eth_dev_pci_generic_probe(pci_dev,
1019 		sizeof(struct e1000_adapter), eth_igb_dev_init);
1020 }
1021 
1022 static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
1023 {
1024 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
1025 }
1026 
1027 static struct rte_pci_driver rte_igb_pmd = {
1028 	.id_table = pci_id_igb_map,
1029 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1030 	.probe = eth_igb_pci_probe,
1031 	.remove = eth_igb_pci_remove,
1032 };
1033 
1034 
1035 static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1036 	struct rte_pci_device *pci_dev)
1037 {
1038 	return rte_eth_dev_pci_generic_probe(pci_dev,
1039 		sizeof(struct e1000_adapter), eth_igbvf_dev_init);
1040 }
1041 
1042 static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
1043 {
1044 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
1045 }
1046 
1047 /*
1048  * virtual function driver struct
1049  */
1050 static struct rte_pci_driver rte_igbvf_pmd = {
1051 	.id_table = pci_id_igbvf_map,
1052 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1053 	.probe = eth_igbvf_pci_probe,
1054 	.remove = eth_igbvf_pci_remove,
1055 };
1056 
1057 static void
1058 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1059 {
1060 	struct e1000_hw *hw =
1061 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062 	/* RCTL: enable VLAN filter since VMDq always use VLAN filter */
1063 	uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1064 	rctl |= E1000_RCTL_VFE;
1065 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1066 }
1067 
1068 static int
1069 igb_check_mq_mode(struct rte_eth_dev *dev)
1070 {
1071 	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1072 	enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1073 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
1074 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
1075 
1076 	if ((rx_mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) ||
1077 	    tx_mq_mode == RTE_ETH_MQ_TX_DCB ||
1078 	    tx_mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
1079 		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1080 		return -EINVAL;
1081 	}
1082 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1083 		/* Check multi-queue mode.
1084 		 * To no break software we accept RTE_ETH_MQ_RX_NONE as this might
1085 		 * be used to turn off VLAN filter.
1086 		 */
1087 
1088 		if (rx_mq_mode == RTE_ETH_MQ_RX_NONE ||
1089 		    rx_mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1090 			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY;
1091 			RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1092 		} else {
1093 			/* Only support one queue on VFs.
1094 			 * RSS together with SRIOV is not supported.
1095 			 */
1096 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1097 					" wrong mq_mode rx %d.",
1098 					rx_mq_mode);
1099 			return -EINVAL;
1100 		}
1101 		/* TX mode is not used here, so mode might be ignored.*/
1102 		if (tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
1103 			/* SRIOV only works in VMDq enable mode */
1104 			PMD_INIT_LOG(WARNING, "SRIOV is active,"
1105 					" TX mode %d is not supported. "
1106 					" Driver will behave as %d mode.",
1107 					tx_mq_mode, RTE_ETH_MQ_TX_VMDQ_ONLY);
1108 		}
1109 
1110 		/* check valid queue number */
1111 		if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1112 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1113 					" only support one queue on VFs.");
1114 			return -EINVAL;
1115 		}
1116 	} else {
1117 		/* To no break software that set invalid mode, only display
1118 		 * warning if invalid mode is used.
1119 		 */
1120 		if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
1121 		    rx_mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY &&
1122 		    rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
1123 			/* RSS together with VMDq not supported*/
1124 			PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1125 				     rx_mq_mode);
1126 			return -EINVAL;
1127 		}
1128 
1129 		if (tx_mq_mode != RTE_ETH_MQ_TX_NONE &&
1130 		    tx_mq_mode != RTE_ETH_MQ_TX_VMDQ_ONLY) {
1131 			PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1132 					" Due to txmode is meaningless in this"
1133 					" driver, just ignore.",
1134 					tx_mq_mode);
1135 		}
1136 	}
1137 	return 0;
1138 }
1139 
1140 static int
1141 eth_igb_configure(struct rte_eth_dev *dev)
1142 {
1143 	struct e1000_interrupt *intr =
1144 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1145 	int ret;
1146 
1147 	PMD_INIT_FUNC_TRACE();
1148 
1149 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1150 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1151 
1152 	/* multipe queue mode checking */
1153 	ret  = igb_check_mq_mode(dev);
1154 	if (ret != 0) {
1155 		PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1156 			    ret);
1157 		return ret;
1158 	}
1159 
1160 	intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1161 	PMD_INIT_FUNC_TRACE();
1162 
1163 	return 0;
1164 }
1165 
1166 static void
1167 eth_igb_rxtx_control(struct rte_eth_dev *dev,
1168 		     bool enable)
1169 {
1170 	struct e1000_hw *hw =
1171 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172 	uint32_t tctl, rctl;
1173 
1174 	tctl = E1000_READ_REG(hw, E1000_TCTL);
1175 	rctl = E1000_READ_REG(hw, E1000_RCTL);
1176 
1177 	if (enable) {
1178 		/* enable Tx/Rx */
1179 		tctl |= E1000_TCTL_EN;
1180 		rctl |= E1000_RCTL_EN;
1181 	} else {
1182 		/* disable Tx/Rx */
1183 		tctl &= ~E1000_TCTL_EN;
1184 		rctl &= ~E1000_RCTL_EN;
1185 	}
1186 	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1187 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1188 	E1000_WRITE_FLUSH(hw);
1189 }
1190 
1191 static int
1192 eth_igb_start(struct rte_eth_dev *dev)
1193 {
1194 	struct e1000_hw *hw =
1195 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1196 	struct e1000_adapter *adapter =
1197 		E1000_DEV_PRIVATE(dev->data->dev_private);
1198 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1199 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1200 	int ret, mask;
1201 	uint32_t intr_vector = 0;
1202 	uint32_t ctrl_ext;
1203 	uint32_t *speeds;
1204 	int num_speeds;
1205 	bool autoneg;
1206 
1207 	PMD_INIT_FUNC_TRACE();
1208 
1209 	/* disable uio/vfio intr/eventfd mapping */
1210 	rte_intr_disable(intr_handle);
1211 
1212 	/* Power up the phy. Needed to make the link go Up */
1213 	eth_igb_dev_set_link_up(dev);
1214 
1215 	/*
1216 	 * Packet Buffer Allocation (PBA)
1217 	 * Writing PBA sets the receive portion of the buffer
1218 	 * the remainder is used for the transmit buffer.
1219 	 */
1220 	if (hw->mac.type == e1000_82575) {
1221 		uint32_t pba;
1222 
1223 		pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1224 		E1000_WRITE_REG(hw, E1000_PBA, pba);
1225 	}
1226 
1227 	/* Put the address into the Receive Address Array */
1228 	e1000_rar_set(hw, hw->mac.addr, 0);
1229 
1230 	/* Initialize the hardware */
1231 	if (igb_hardware_init(hw)) {
1232 		PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1233 		return -EIO;
1234 	}
1235 	adapter->stopped = 0;
1236 
1237 	E1000_WRITE_REG(hw, E1000_VET,
1238 			RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
1239 
1240 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1241 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
1242 	ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1243 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1244 	E1000_WRITE_FLUSH(hw);
1245 
1246 	/* configure PF module if SRIOV enabled */
1247 	igb_pf_host_configure(dev);
1248 
1249 	/* check and configure queue intr-vector mapping */
1250 	if ((rte_intr_cap_multiple(intr_handle) ||
1251 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1252 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1253 		intr_vector = dev->data->nb_rx_queues;
1254 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1255 			return -1;
1256 	}
1257 
1258 	/* Allocate the vector list */
1259 	if (rte_intr_dp_is_en(intr_handle)) {
1260 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1261 						   dev->data->nb_rx_queues)) {
1262 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1263 				     " intr_vec", dev->data->nb_rx_queues);
1264 			return -ENOMEM;
1265 		}
1266 	}
1267 
1268 	/* confiugre msix for rx interrupt */
1269 	eth_igb_configure_msix_intr(dev);
1270 
1271 	/* Configure for OS presence */
1272 	igb_init_manageability(hw);
1273 
1274 	eth_igb_tx_init(dev);
1275 
1276 	/* This can fail when allocating mbufs for descriptor rings */
1277 	ret = eth_igb_rx_init(dev);
1278 	if (ret) {
1279 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1280 		igb_dev_clear_queues(dev);
1281 		return ret;
1282 	}
1283 
1284 	e1000_clear_hw_cntrs_base_generic(hw);
1285 
1286 	/*
1287 	 * VLAN Offload Settings
1288 	 */
1289 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1290 			RTE_ETH_VLAN_EXTEND_MASK;
1291 	ret = eth_igb_vlan_offload_set(dev, mask);
1292 	if (ret) {
1293 		PMD_INIT_LOG(ERR, "Unable to set vlan offload");
1294 		igb_dev_clear_queues(dev);
1295 		return ret;
1296 	}
1297 
1298 	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1299 		/* Enable VLAN filter since VMDq always use VLAN filter */
1300 		igb_vmdq_vlan_hw_filter_enable(dev);
1301 	}
1302 
1303 	if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1304 		(hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1305 		(hw->mac.type == e1000_i211)) {
1306 		/* Configure EITR with the maximum possible value (0xFFFF) */
1307 		E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1308 	}
1309 
1310 	/* Setup link speed and duplex */
1311 	speeds = &dev->data->dev_conf.link_speeds;
1312 	if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1313 		hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1314 		hw->mac.autoneg = 1;
1315 	} else {
1316 		num_speeds = 0;
1317 		autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0;
1318 
1319 		/* Reset */
1320 		hw->phy.autoneg_advertised = 0;
1321 
1322 		if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1323 				RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1324 				RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) {
1325 			num_speeds = -1;
1326 			goto error_invalid_config;
1327 		}
1328 		if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
1329 			hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1330 			num_speeds++;
1331 		}
1332 		if (*speeds & RTE_ETH_LINK_SPEED_10M) {
1333 			hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1334 			num_speeds++;
1335 		}
1336 		if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
1337 			hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1338 			num_speeds++;
1339 		}
1340 		if (*speeds & RTE_ETH_LINK_SPEED_100M) {
1341 			hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1342 			num_speeds++;
1343 		}
1344 		if (*speeds & RTE_ETH_LINK_SPEED_1G) {
1345 			hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1346 			num_speeds++;
1347 		}
1348 		if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
1349 			goto error_invalid_config;
1350 
1351 		/* Set/reset the mac.autoneg based on the link speed,
1352 		 * fixed or not
1353 		 */
1354 		if (!autoneg) {
1355 			hw->mac.autoneg = 0;
1356 			hw->mac.forced_speed_duplex =
1357 					hw->phy.autoneg_advertised;
1358 		} else {
1359 			hw->mac.autoneg = 1;
1360 		}
1361 	}
1362 
1363 	e1000_setup_link(hw);
1364 
1365 	if (rte_intr_allow_others(intr_handle)) {
1366 		/* check if lsc interrupt is enabled */
1367 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1368 			eth_igb_lsc_interrupt_setup(dev, TRUE);
1369 		else
1370 			eth_igb_lsc_interrupt_setup(dev, FALSE);
1371 	} else {
1372 		rte_intr_callback_unregister(intr_handle,
1373 					     eth_igb_interrupt_handler,
1374 					     (void *)dev);
1375 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1376 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1377 				     " no intr multiplex");
1378 	}
1379 
1380 	/* check if rxq interrupt is enabled */
1381 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1382 	    rte_intr_dp_is_en(intr_handle))
1383 		eth_igb_rxq_interrupt_setup(dev);
1384 
1385 	/* enable uio/vfio intr/eventfd mapping */
1386 	rte_intr_enable(intr_handle);
1387 
1388 	/* resume enabled intr since hw reset */
1389 	igb_intr_enable(dev);
1390 
1391 	/* restore all types filter */
1392 	igb_filter_restore(dev);
1393 
1394 	eth_igb_rxtx_control(dev, true);
1395 	eth_igb_link_update(dev, 0);
1396 
1397 	PMD_INIT_LOG(DEBUG, "<<");
1398 
1399 	return 0;
1400 
1401 error_invalid_config:
1402 	PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1403 		     dev->data->dev_conf.link_speeds, dev->data->port_id);
1404 	igb_dev_clear_queues(dev);
1405 	return -EINVAL;
1406 }
1407 
1408 /*********************************************************************
1409  *
1410  *  This routine disables all traffic on the adapter by issuing a
1411  *  global reset on the MAC.
1412  *
1413  **********************************************************************/
1414 static int
1415 eth_igb_stop(struct rte_eth_dev *dev)
1416 {
1417 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1418 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1419 	struct rte_eth_link link;
1420 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1421 	struct e1000_adapter *adapter =
1422 		E1000_DEV_PRIVATE(dev->data->dev_private);
1423 
1424 	if (adapter->stopped)
1425 		return 0;
1426 
1427 	eth_igb_rxtx_control(dev, false);
1428 
1429 	igb_intr_disable(dev);
1430 
1431 	/* disable intr eventfd mapping */
1432 	rte_intr_disable(intr_handle);
1433 
1434 	igb_pf_reset_hw(hw);
1435 	E1000_WRITE_REG(hw, E1000_WUC, 0);
1436 
1437 	/* Set bit for Go Link disconnect if PHY reset is not blocked */
1438 	if (hw->mac.type >= e1000_82580 &&
1439 	    (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
1440 		uint32_t phpm_reg;
1441 
1442 		phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1443 		phpm_reg |= E1000_82580_PM_GO_LINKD;
1444 		E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1445 	}
1446 
1447 	/* Power down the phy. Needed to make the link go Down */
1448 	eth_igb_dev_set_link_down(dev);
1449 
1450 	igb_dev_clear_queues(dev);
1451 
1452 	/* clear the recorded link status */
1453 	memset(&link, 0, sizeof(link));
1454 	rte_eth_linkstatus_set(dev, &link);
1455 
1456 	if (!rte_intr_allow_others(intr_handle))
1457 		/* resume to the default handler */
1458 		rte_intr_callback_register(intr_handle,
1459 					   eth_igb_interrupt_handler,
1460 					   (void *)dev);
1461 
1462 	/* Clean datapath event and queue/vec mapping */
1463 	rte_intr_efd_disable(intr_handle);
1464 	rte_intr_vec_list_free(intr_handle);
1465 
1466 	adapter->stopped = true;
1467 	dev->data->dev_started = 0;
1468 
1469 	return 0;
1470 }
1471 
1472 static int
1473 eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
1474 {
1475 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476 
1477 	if (hw->phy.media_type == e1000_media_type_copper)
1478 		e1000_power_up_phy(hw);
1479 	else
1480 		e1000_power_up_fiber_serdes_link(hw);
1481 
1482 	return 0;
1483 }
1484 
1485 static int
1486 eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
1487 {
1488 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1489 
1490 	if (hw->phy.media_type == e1000_media_type_copper)
1491 		e1000_power_down_phy(hw);
1492 	else
1493 		e1000_shutdown_fiber_serdes_link(hw);
1494 
1495 	return 0;
1496 }
1497 
1498 static int
1499 eth_igb_close(struct rte_eth_dev *dev)
1500 {
1501 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1502 	struct rte_eth_link link;
1503 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1504 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1505 	struct e1000_filter_info *filter_info =
1506 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1507 	int ret;
1508 
1509 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1510 		return 0;
1511 
1512 	ret = eth_igb_stop(dev);
1513 
1514 	e1000_phy_hw_reset(hw);
1515 	igb_release_manageability(hw);
1516 	igb_hw_control_release(hw);
1517 
1518 	/* Clear bit for Go Link disconnect if PHY reset is not blocked */
1519 	if (hw->mac.type >= e1000_82580 &&
1520 	    (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
1521 		uint32_t phpm_reg;
1522 
1523 		phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1524 		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1525 		E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1526 	}
1527 
1528 	igb_dev_free_queues(dev);
1529 
1530 	/* Cleanup vector list */
1531 	rte_intr_vec_list_free(intr_handle);
1532 
1533 	memset(&link, 0, sizeof(link));
1534 	rte_eth_linkstatus_set(dev, &link);
1535 
1536 	/* Reset any pending lock */
1537 	igb_reset_swfw_lock(hw);
1538 
1539 	/* uninitialize PF if max_vfs not zero */
1540 	igb_pf_host_uninit(dev);
1541 
1542 	rte_intr_callback_unregister(intr_handle,
1543 				     eth_igb_interrupt_handler, dev);
1544 
1545 	/* clear the SYN filter info */
1546 	filter_info->syn_info = 0;
1547 
1548 	/* clear the ethertype filters info */
1549 	filter_info->ethertype_mask = 0;
1550 	memset(filter_info->ethertype_filters, 0,
1551 		E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
1552 
1553 	/* clear the rss filter info */
1554 	memset(&filter_info->rss_info, 0,
1555 		sizeof(struct igb_rte_flow_rss_conf));
1556 
1557 	/* remove all ntuple filters of the device */
1558 	igb_ntuple_filter_uninit(dev);
1559 
1560 	/* remove all flex filters of the device */
1561 	igb_flex_filter_uninit(dev);
1562 
1563 	/* clear all the filters list */
1564 	igb_filterlist_flush(dev);
1565 
1566 	return ret;
1567 }
1568 
1569 /*
1570  * Reset PF device.
1571  */
1572 static int
1573 eth_igb_reset(struct rte_eth_dev *dev)
1574 {
1575 	int ret;
1576 
1577 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1578 	 * its VF to make them align with it. The detailed notification
1579 	 * mechanism is PMD specific and is currently not implemented.
1580 	 * To avoid unexpected behavior in VF, currently reset of PF with
1581 	 * SR-IOV activation is not supported. It might be supported later.
1582 	 */
1583 	if (dev->data->sriov.active)
1584 		return -ENOTSUP;
1585 
1586 	ret = eth_igb_dev_uninit(dev);
1587 	if (ret)
1588 		return ret;
1589 
1590 	ret = eth_igb_dev_init(dev);
1591 
1592 	return ret;
1593 }
1594 
1595 
1596 static int
1597 igb_get_rx_buffer_size(struct e1000_hw *hw)
1598 {
1599 	uint32_t rx_buf_size;
1600 	if (hw->mac.type == e1000_82576) {
1601 		rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1602 	} else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1603 		/* PBS needs to be translated according to a lookup table */
1604 		rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1605 		rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1606 		rx_buf_size = (rx_buf_size << 10);
1607 	} else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1608 		rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1609 	} else {
1610 		rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1611 	}
1612 
1613 	return rx_buf_size;
1614 }
1615 
1616 /*********************************************************************
1617  *
1618  *  Initialize the hardware
1619  *
1620  **********************************************************************/
1621 static int
1622 igb_hardware_init(struct e1000_hw *hw)
1623 {
1624 	uint32_t rx_buf_size;
1625 	int diag;
1626 
1627 	/* Let the firmware know the OS is in control */
1628 	igb_hw_control_acquire(hw);
1629 
1630 	/*
1631 	 * These parameters control the automatic generation (Tx) and
1632 	 * response (Rx) to Ethernet PAUSE frames.
1633 	 * - High water mark should allow for at least two standard size (1518)
1634 	 *   frames to be received after sending an XOFF.
1635 	 * - Low water mark works best when it is very near the high water mark.
1636 	 *   This allows the receiver to restart by sending XON when it has
1637 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
1638 	 *   restart after one full frame is pulled from the buffer. There
1639 	 *   could be several smaller frames in the buffer and if so they will
1640 	 *   not trigger the XON until their total number reduces the buffer
1641 	 *   by 1500.
1642 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1643 	 */
1644 	rx_buf_size = igb_get_rx_buffer_size(hw);
1645 
1646 	hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
1647 	hw->fc.low_water = hw->fc.high_water - 1500;
1648 	hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1649 	hw->fc.send_xon = 1;
1650 
1651 	/* Set Flow control, use the tunable location if sane */
1652 	if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1653 		hw->fc.requested_mode = igb_fc_setting;
1654 	else
1655 		hw->fc.requested_mode = e1000_fc_none;
1656 
1657 	/* Issue a global reset */
1658 	igb_pf_reset_hw(hw);
1659 	E1000_WRITE_REG(hw, E1000_WUC, 0);
1660 
1661 	diag = e1000_init_hw(hw);
1662 	if (diag < 0)
1663 		return diag;
1664 
1665 	E1000_WRITE_REG(hw, E1000_VET,
1666 			RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
1667 	e1000_get_phy_info(hw);
1668 	e1000_check_for_link(hw);
1669 
1670 	return 0;
1671 }
1672 
1673 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1674 static void
1675 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1676 {
1677 	int pause_frames;
1678 
1679 	uint64_t old_gprc  = stats->gprc;
1680 	uint64_t old_gptc  = stats->gptc;
1681 	uint64_t old_tpr   = stats->tpr;
1682 	uint64_t old_tpt   = stats->tpt;
1683 	uint64_t old_rpthc = stats->rpthc;
1684 	uint64_t old_hgptc = stats->hgptc;
1685 
1686 	if(hw->phy.media_type == e1000_media_type_copper ||
1687 	    (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1688 		stats->symerrs +=
1689 		    E1000_READ_REG(hw,E1000_SYMERRS);
1690 		stats->sec += E1000_READ_REG(hw, E1000_SEC);
1691 	}
1692 
1693 	stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1694 	stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1695 	stats->scc += E1000_READ_REG(hw, E1000_SCC);
1696 	stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1697 
1698 	stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1699 	stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1700 	stats->colc += E1000_READ_REG(hw, E1000_COLC);
1701 	stats->dc += E1000_READ_REG(hw, E1000_DC);
1702 	stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1703 	stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1704 	stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1705 	/*
1706 	** For watchdog management we need to know if we have been
1707 	** paused during the last interval, so capture that here.
1708 	*/
1709 	pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1710 	stats->xoffrxc += pause_frames;
1711 	stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1712 	stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1713 	stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1714 	stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1715 	stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1716 	stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1717 	stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1718 	stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1719 	stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1720 	stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1721 	stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1722 	stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1723 
1724 	/* For the 64-bit byte counters the low dword must be read first. */
1725 	/* Both registers clear on the read of the high dword */
1726 
1727 	/* Workaround CRC bytes included in size, take away 4 bytes/packet */
1728 	stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1729 	stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1730 	stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1731 	stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1732 	stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1733 	stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1734 
1735 	stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1736 	stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1737 	stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1738 	stats->roc += E1000_READ_REG(hw, E1000_ROC);
1739 	stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1740 
1741 	stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1742 	stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1743 
1744 	stats->tor += E1000_READ_REG(hw, E1000_TORL);
1745 	stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1746 	stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1747 	stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1748 	stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1749 	stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1750 
1751 	stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1752 	stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1753 	stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1754 	stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1755 	stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1756 	stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1757 	stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1758 	stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1759 
1760 	/* Interrupt Counts */
1761 
1762 	stats->iac += E1000_READ_REG(hw, E1000_IAC);
1763 	stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1764 	stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1765 	stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1766 	stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1767 	stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1768 	stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1769 	stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1770 	stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1771 
1772 	/* Host to Card Statistics */
1773 
1774 	stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1775 	stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1776 	stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1777 	stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1778 	stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1779 	stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1780 	stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1781 	stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1782 	stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1783 	stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1784 	stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1785 	stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1786 	stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1787 	stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1788 	stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1789 	stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1790 
1791 	stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1792 	stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1793 	stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1794 	stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1795 	stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1796 	stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1797 }
1798 
1799 static int
1800 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1801 {
1802 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803 	struct e1000_hw_stats *stats =
1804 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1805 
1806 	igb_read_stats_registers(hw, stats);
1807 
1808 	if (rte_stats == NULL)
1809 		return -EINVAL;
1810 
1811 	/* Rx Errors */
1812 	rte_stats->imissed = stats->mpc;
1813 	rte_stats->ierrors = stats->crcerrs + stats->rlec +
1814 	                     stats->rxerrc + stats->algnerrc + stats->cexterr;
1815 
1816 	/* Tx Errors */
1817 	rte_stats->oerrors = stats->ecol + stats->latecol;
1818 
1819 	rte_stats->ipackets = stats->gprc;
1820 	rte_stats->opackets = stats->gptc;
1821 	rte_stats->ibytes   = stats->gorc;
1822 	rte_stats->obytes   = stats->gotc;
1823 	return 0;
1824 }
1825 
1826 static int
1827 eth_igb_stats_reset(struct rte_eth_dev *dev)
1828 {
1829 	struct e1000_hw_stats *hw_stats =
1830 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1831 
1832 	/* HW registers are cleared on read */
1833 	eth_igb_stats_get(dev, NULL);
1834 
1835 	/* Reset software totals */
1836 	memset(hw_stats, 0, sizeof(*hw_stats));
1837 
1838 	return 0;
1839 }
1840 
1841 static int
1842 eth_igb_xstats_reset(struct rte_eth_dev *dev)
1843 {
1844 	struct e1000_hw_stats *stats =
1845 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1846 
1847 	/* HW registers are cleared on read */
1848 	eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1849 
1850 	/* Reset software totals */
1851 	memset(stats, 0, sizeof(*stats));
1852 
1853 	return 0;
1854 }
1855 
1856 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1857 	struct rte_eth_xstat_name *xstats_names,
1858 	__rte_unused unsigned int size)
1859 {
1860 	unsigned i;
1861 
1862 	if (xstats_names == NULL)
1863 		return IGB_NB_XSTATS;
1864 
1865 	/* Note: limit checked in rte_eth_xstats_names() */
1866 
1867 	for (i = 0; i < IGB_NB_XSTATS; i++) {
1868 		strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name,
1869 			sizeof(xstats_names[i].name));
1870 	}
1871 
1872 	return IGB_NB_XSTATS;
1873 }
1874 
1875 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
1876 		const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
1877 		unsigned int limit)
1878 {
1879 	unsigned int i;
1880 
1881 	if (!ids) {
1882 		if (xstats_names == NULL)
1883 			return IGB_NB_XSTATS;
1884 
1885 		for (i = 0; i < IGB_NB_XSTATS; i++)
1886 			strlcpy(xstats_names[i].name,
1887 				rte_igb_stats_strings[i].name,
1888 				sizeof(xstats_names[i].name));
1889 
1890 		return IGB_NB_XSTATS;
1891 
1892 	} else {
1893 		struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
1894 
1895 		eth_igb_xstats_get_names_by_id(dev, NULL, xstats_names_copy,
1896 				IGB_NB_XSTATS);
1897 
1898 		for (i = 0; i < limit; i++) {
1899 			if (ids[i] >= IGB_NB_XSTATS) {
1900 				PMD_INIT_LOG(ERR, "id value isn't valid");
1901 				return -1;
1902 			}
1903 			strcpy(xstats_names[i].name,
1904 					xstats_names_copy[ids[i]].name);
1905 		}
1906 		return limit;
1907 	}
1908 }
1909 
1910 static int
1911 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1912 		   unsigned n)
1913 {
1914 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 	struct e1000_hw_stats *hw_stats =
1916 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1917 	unsigned i;
1918 
1919 	if (n < IGB_NB_XSTATS)
1920 		return IGB_NB_XSTATS;
1921 
1922 	igb_read_stats_registers(hw, hw_stats);
1923 
1924 	/* If this is a reset xstats is NULL, and we have cleared the
1925 	 * registers by reading them.
1926 	 */
1927 	if (!xstats)
1928 		return 0;
1929 
1930 	/* Extended stats */
1931 	for (i = 0; i < IGB_NB_XSTATS; i++) {
1932 		xstats[i].id = i;
1933 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1934 			rte_igb_stats_strings[i].offset);
1935 	}
1936 
1937 	return IGB_NB_XSTATS;
1938 }
1939 
1940 static int
1941 eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1942 		uint64_t *values, unsigned int n)
1943 {
1944 	unsigned int i;
1945 
1946 	if (!ids) {
1947 		struct e1000_hw *hw =
1948 			E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1949 		struct e1000_hw_stats *hw_stats =
1950 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1951 
1952 		if (n < IGB_NB_XSTATS)
1953 			return IGB_NB_XSTATS;
1954 
1955 		igb_read_stats_registers(hw, hw_stats);
1956 
1957 		/* If this is a reset xstats is NULL, and we have cleared the
1958 		 * registers by reading them.
1959 		 */
1960 		if (!values)
1961 			return 0;
1962 
1963 		/* Extended stats */
1964 		for (i = 0; i < IGB_NB_XSTATS; i++)
1965 			values[i] = *(uint64_t *)(((char *)hw_stats) +
1966 					rte_igb_stats_strings[i].offset);
1967 
1968 		return IGB_NB_XSTATS;
1969 
1970 	} else {
1971 		uint64_t values_copy[IGB_NB_XSTATS];
1972 
1973 		eth_igb_xstats_get_by_id(dev, NULL, values_copy,
1974 				IGB_NB_XSTATS);
1975 
1976 		for (i = 0; i < n; i++) {
1977 			if (ids[i] >= IGB_NB_XSTATS) {
1978 				PMD_INIT_LOG(ERR, "id value isn't valid");
1979 				return -1;
1980 			}
1981 			values[i] = values_copy[ids[i]];
1982 		}
1983 		return n;
1984 	}
1985 }
1986 
1987 static void
1988 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
1989 {
1990 	/* Good Rx packets, include VF loopback */
1991 	UPDATE_VF_STAT(E1000_VFGPRC,
1992 	    hw_stats->last_gprc, hw_stats->gprc);
1993 
1994 	/* Good Rx octets, include VF loopback */
1995 	UPDATE_VF_STAT(E1000_VFGORC,
1996 	    hw_stats->last_gorc, hw_stats->gorc);
1997 
1998 	/* Good Tx packets, include VF loopback */
1999 	UPDATE_VF_STAT(E1000_VFGPTC,
2000 	    hw_stats->last_gptc, hw_stats->gptc);
2001 
2002 	/* Good Tx octets, include VF loopback */
2003 	UPDATE_VF_STAT(E1000_VFGOTC,
2004 	    hw_stats->last_gotc, hw_stats->gotc);
2005 
2006 	/* Rx Multicst packets */
2007 	UPDATE_VF_STAT(E1000_VFMPRC,
2008 	    hw_stats->last_mprc, hw_stats->mprc);
2009 
2010 	/* Good Rx loopback packets */
2011 	UPDATE_VF_STAT(E1000_VFGPRLBC,
2012 	    hw_stats->last_gprlbc, hw_stats->gprlbc);
2013 
2014 	/* Good Rx loopback octets */
2015 	UPDATE_VF_STAT(E1000_VFGORLBC,
2016 	    hw_stats->last_gorlbc, hw_stats->gorlbc);
2017 
2018 	/* Good Tx loopback packets */
2019 	UPDATE_VF_STAT(E1000_VFGPTLBC,
2020 	    hw_stats->last_gptlbc, hw_stats->gptlbc);
2021 
2022 	/* Good Tx loopback octets */
2023 	UPDATE_VF_STAT(E1000_VFGOTLBC,
2024 	    hw_stats->last_gotlbc, hw_stats->gotlbc);
2025 }
2026 
2027 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2028 				     struct rte_eth_xstat_name *xstats_names,
2029 				     __rte_unused unsigned limit)
2030 {
2031 	unsigned i;
2032 
2033 	if (xstats_names != NULL)
2034 		for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2035 			strlcpy(xstats_names[i].name,
2036 				rte_igbvf_stats_strings[i].name,
2037 				sizeof(xstats_names[i].name));
2038 		}
2039 	return IGBVF_NB_XSTATS;
2040 }
2041 
2042 static int
2043 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2044 		     unsigned n)
2045 {
2046 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2047 	struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2048 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2049 	unsigned i;
2050 
2051 	if (n < IGBVF_NB_XSTATS)
2052 		return IGBVF_NB_XSTATS;
2053 
2054 	igbvf_read_stats_registers(hw, hw_stats);
2055 
2056 	if (!xstats)
2057 		return 0;
2058 
2059 	for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2060 		xstats[i].id = i;
2061 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2062 			rte_igbvf_stats_strings[i].offset);
2063 	}
2064 
2065 	return IGBVF_NB_XSTATS;
2066 }
2067 
2068 static int
2069 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
2070 {
2071 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072 	struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2073 			  E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2074 
2075 	igbvf_read_stats_registers(hw, hw_stats);
2076 
2077 	if (rte_stats == NULL)
2078 		return -EINVAL;
2079 
2080 	rte_stats->ipackets = hw_stats->gprc;
2081 	rte_stats->ibytes = hw_stats->gorc;
2082 	rte_stats->opackets = hw_stats->gptc;
2083 	rte_stats->obytes = hw_stats->gotc;
2084 	return 0;
2085 }
2086 
2087 static int
2088 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
2089 {
2090 	struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
2091 			E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2092 
2093 	/* Sync HW register to the last stats */
2094 	eth_igbvf_stats_get(dev, NULL);
2095 
2096 	/* reset HW current stats*/
2097 	memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
2098 	       offsetof(struct e1000_vf_stats, gprc));
2099 
2100 	return 0;
2101 }
2102 
2103 static int
2104 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
2105 		       size_t fw_size)
2106 {
2107 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2108 	struct e1000_fw_version fw;
2109 	int ret;
2110 
2111 	e1000_get_fw_version(hw, &fw);
2112 
2113 	switch (hw->mac.type) {
2114 	case e1000_i210:
2115 	case e1000_i211:
2116 		if (!(e1000_get_flash_presence_i210(hw))) {
2117 			ret = snprintf(fw_version, fw_size,
2118 				 "%2d.%2d-%d",
2119 				 fw.invm_major, fw.invm_minor,
2120 				 fw.invm_img_type);
2121 			break;
2122 		}
2123 		/* fall through */
2124 	default:
2125 		/* if option rom is valid, display its version too */
2126 		if (fw.or_valid) {
2127 			ret = snprintf(fw_version, fw_size,
2128 				 "%d.%d, 0x%08x, %d.%d.%d",
2129 				 fw.eep_major, fw.eep_minor, fw.etrack_id,
2130 				 fw.or_major, fw.or_build, fw.or_patch);
2131 		/* no option rom */
2132 		} else {
2133 			if (fw.etrack_id != 0X0000) {
2134 				ret = snprintf(fw_version, fw_size,
2135 					 "%d.%d, 0x%08x",
2136 					 fw.eep_major, fw.eep_minor,
2137 					 fw.etrack_id);
2138 			} else {
2139 				ret = snprintf(fw_version, fw_size,
2140 					 "%d.%d.%d",
2141 					 fw.eep_major, fw.eep_minor,
2142 					 fw.eep_build);
2143 			}
2144 		}
2145 		break;
2146 	}
2147 	if (ret < 0)
2148 		return -EINVAL;
2149 
2150 	ret += 1; /* add the size of '\0' */
2151 	if (fw_size < (size_t)ret)
2152 		return ret;
2153 	else
2154 		return 0;
2155 }
2156 
2157 static int
2158 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2159 {
2160 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2161 
2162 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2163 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
2164 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2165 	dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2166 	dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2167 				    dev_info->rx_queue_offload_capa;
2168 	dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2169 	dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2170 				    dev_info->tx_queue_offload_capa;
2171 
2172 	switch (hw->mac.type) {
2173 	case e1000_82575:
2174 		dev_info->max_rx_queues = 4;
2175 		dev_info->max_tx_queues = 4;
2176 		dev_info->max_vmdq_pools = 0;
2177 		break;
2178 
2179 	case e1000_82576:
2180 		dev_info->max_rx_queues = 16;
2181 		dev_info->max_tx_queues = 16;
2182 		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
2183 		dev_info->vmdq_queue_num = 16;
2184 		break;
2185 
2186 	case e1000_82580:
2187 		dev_info->max_rx_queues = 8;
2188 		dev_info->max_tx_queues = 8;
2189 		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
2190 		dev_info->vmdq_queue_num = 8;
2191 		break;
2192 
2193 	case e1000_i350:
2194 		dev_info->max_rx_queues = 8;
2195 		dev_info->max_tx_queues = 8;
2196 		dev_info->max_vmdq_pools = RTE_ETH_8_POOLS;
2197 		dev_info->vmdq_queue_num = 8;
2198 		break;
2199 
2200 	case e1000_i354:
2201 		dev_info->max_rx_queues = 8;
2202 		dev_info->max_tx_queues = 8;
2203 		break;
2204 
2205 	case e1000_i210:
2206 		dev_info->max_rx_queues = 4;
2207 		dev_info->max_tx_queues = 4;
2208 		dev_info->max_vmdq_pools = 0;
2209 		break;
2210 
2211 	case e1000_i211:
2212 		dev_info->max_rx_queues = 2;
2213 		dev_info->max_tx_queues = 2;
2214 		dev_info->max_vmdq_pools = 0;
2215 		break;
2216 
2217 	default:
2218 		/* Should not happen */
2219 		return -EINVAL;
2220 	}
2221 	dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
2222 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
2223 	dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
2224 
2225 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2226 		.rx_thresh = {
2227 			.pthresh = IGB_DEFAULT_RX_PTHRESH,
2228 			.hthresh = IGB_DEFAULT_RX_HTHRESH,
2229 			.wthresh = IGB_DEFAULT_RX_WTHRESH,
2230 		},
2231 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2232 		.rx_drop_en = 0,
2233 		.offloads = 0,
2234 	};
2235 
2236 	dev_info->default_txconf = (struct rte_eth_txconf) {
2237 		.tx_thresh = {
2238 			.pthresh = IGB_DEFAULT_TX_PTHRESH,
2239 			.hthresh = IGB_DEFAULT_TX_HTHRESH,
2240 			.wthresh = IGB_DEFAULT_TX_WTHRESH,
2241 		},
2242 		.offloads = 0,
2243 	};
2244 
2245 	dev_info->rx_desc_lim = rx_desc_lim;
2246 	dev_info->tx_desc_lim = tx_desc_lim;
2247 
2248 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
2249 			RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
2250 			RTE_ETH_LINK_SPEED_1G;
2251 
2252 	dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
2253 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2254 
2255 	return 0;
2256 }
2257 
2258 static const uint32_t *
2259 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
2260 {
2261 	static const uint32_t ptypes[] = {
2262 		/* refers to igb_rxd_pkt_info_to_pkt_type() */
2263 		RTE_PTYPE_L2_ETHER,
2264 		RTE_PTYPE_L3_IPV4,
2265 		RTE_PTYPE_L3_IPV4_EXT,
2266 		RTE_PTYPE_L3_IPV6,
2267 		RTE_PTYPE_L3_IPV6_EXT,
2268 		RTE_PTYPE_L4_TCP,
2269 		RTE_PTYPE_L4_UDP,
2270 		RTE_PTYPE_L4_SCTP,
2271 		RTE_PTYPE_TUNNEL_IP,
2272 		RTE_PTYPE_INNER_L3_IPV6,
2273 		RTE_PTYPE_INNER_L3_IPV6_EXT,
2274 		RTE_PTYPE_INNER_L4_TCP,
2275 		RTE_PTYPE_INNER_L4_UDP,
2276 		RTE_PTYPE_UNKNOWN
2277 	};
2278 
2279 	if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
2280 	    dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
2281 		return ptypes;
2282 	return NULL;
2283 }
2284 
2285 static int
2286 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2287 {
2288 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2289 
2290 	dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2291 	dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
2292 	dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2293 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
2294 				RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
2295 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
2296 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
2297 				RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
2298 				RTE_ETH_TX_OFFLOAD_TCP_TSO;
2299 	switch (hw->mac.type) {
2300 	case e1000_vfadapt:
2301 		dev_info->max_rx_queues = 2;
2302 		dev_info->max_tx_queues = 2;
2303 		break;
2304 	case e1000_vfadapt_i350:
2305 		dev_info->max_rx_queues = 1;
2306 		dev_info->max_tx_queues = 1;
2307 		break;
2308 	default:
2309 		/* Should not happen */
2310 		return -EINVAL;
2311 	}
2312 
2313 	dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2314 	dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2315 				    dev_info->rx_queue_offload_capa;
2316 	dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2317 	dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2318 				    dev_info->tx_queue_offload_capa;
2319 
2320 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2321 		.rx_thresh = {
2322 			.pthresh = IGB_DEFAULT_RX_PTHRESH,
2323 			.hthresh = IGB_DEFAULT_RX_HTHRESH,
2324 			.wthresh = IGB_DEFAULT_RX_WTHRESH,
2325 		},
2326 		.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2327 		.rx_drop_en = 0,
2328 		.offloads = 0,
2329 	};
2330 
2331 	dev_info->default_txconf = (struct rte_eth_txconf) {
2332 		.tx_thresh = {
2333 			.pthresh = IGB_DEFAULT_TX_PTHRESH,
2334 			.hthresh = IGB_DEFAULT_TX_HTHRESH,
2335 			.wthresh = IGB_DEFAULT_TX_WTHRESH,
2336 		},
2337 		.offloads = 0,
2338 	};
2339 
2340 	dev_info->rx_desc_lim = rx_desc_lim;
2341 	dev_info->tx_desc_lim = tx_desc_lim;
2342 
2343 	return 0;
2344 }
2345 
2346 /* return 0 means link status changed, -1 means not changed */
2347 static int
2348 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2349 {
2350 	struct e1000_hw *hw =
2351 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2352 	struct rte_eth_link link;
2353 	int link_check, count;
2354 
2355 	link_check = 0;
2356 	hw->mac.get_link_status = 1;
2357 
2358 	/* possible wait-to-complete in up to 9 seconds */
2359 	for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
2360 		/* Read the real link status */
2361 		switch (hw->phy.media_type) {
2362 		case e1000_media_type_copper:
2363 			/* Do the work to read phy */
2364 			e1000_check_for_link(hw);
2365 			link_check = !hw->mac.get_link_status;
2366 			break;
2367 
2368 		case e1000_media_type_fiber:
2369 			e1000_check_for_link(hw);
2370 			link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2371 				      E1000_STATUS_LU);
2372 			break;
2373 
2374 		case e1000_media_type_internal_serdes:
2375 			e1000_check_for_link(hw);
2376 			link_check = hw->mac.serdes_has_link;
2377 			break;
2378 
2379 		/* VF device is type_unknown */
2380 		case e1000_media_type_unknown:
2381 			eth_igbvf_link_update(hw);
2382 			link_check = !hw->mac.get_link_status;
2383 			break;
2384 
2385 		default:
2386 			break;
2387 		}
2388 		if (link_check || wait_to_complete == 0)
2389 			break;
2390 		rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2391 	}
2392 	memset(&link, 0, sizeof(link));
2393 
2394 	/* Now we check if a transition has happened */
2395 	if (link_check) {
2396 		uint16_t duplex, speed;
2397 		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
2398 		link.link_duplex = (duplex == FULL_DUPLEX) ?
2399 				RTE_ETH_LINK_FULL_DUPLEX :
2400 				RTE_ETH_LINK_HALF_DUPLEX;
2401 		link.link_speed = speed;
2402 		link.link_status = RTE_ETH_LINK_UP;
2403 		link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2404 				RTE_ETH_LINK_SPEED_FIXED);
2405 	} else if (!link_check) {
2406 		link.link_speed = 0;
2407 		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
2408 		link.link_status = RTE_ETH_LINK_DOWN;
2409 		link.link_autoneg = RTE_ETH_LINK_FIXED;
2410 	}
2411 
2412 	return rte_eth_linkstatus_set(dev, &link);
2413 }
2414 
2415 /*
2416  * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2417  * For ASF and Pass Through versions of f/w this means
2418  * that the driver is loaded.
2419  */
2420 static void
2421 igb_hw_control_acquire(struct e1000_hw *hw)
2422 {
2423 	uint32_t ctrl_ext;
2424 
2425 	/* Let firmware know the driver has taken over */
2426 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2427 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2428 }
2429 
2430 /*
2431  * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2432  * For ASF and Pass Through versions of f/w this means that the
2433  * driver is no longer loaded.
2434  */
2435 static void
2436 igb_hw_control_release(struct e1000_hw *hw)
2437 {
2438 	uint32_t ctrl_ext;
2439 
2440 	/* Let firmware taken over control of h/w */
2441 	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2442 	E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2443 			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2444 }
2445 
2446 /*
2447  * Bit of a misnomer, what this really means is
2448  * to enable OS management of the system... aka
2449  * to disable special hardware management features.
2450  */
2451 static void
2452 igb_init_manageability(struct e1000_hw *hw)
2453 {
2454 	if (e1000_enable_mng_pass_thru(hw)) {
2455 		uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2456 		uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2457 
2458 		/* disable hardware interception of ARP */
2459 		manc &= ~(E1000_MANC_ARP_EN);
2460 
2461 		/* enable receiving management packets to the host */
2462 		manc |= E1000_MANC_EN_MNG2HOST;
2463 		manc2h |= 1 << 5;  /* Mng Port 623 */
2464 		manc2h |= 1 << 6;  /* Mng Port 664 */
2465 		E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2466 		E1000_WRITE_REG(hw, E1000_MANC, manc);
2467 	}
2468 }
2469 
2470 static void
2471 igb_release_manageability(struct e1000_hw *hw)
2472 {
2473 	if (e1000_enable_mng_pass_thru(hw)) {
2474 		uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2475 
2476 		manc |= E1000_MANC_ARP_EN;
2477 		manc &= ~E1000_MANC_EN_MNG2HOST;
2478 
2479 		E1000_WRITE_REG(hw, E1000_MANC, manc);
2480 	}
2481 }
2482 
2483 static int
2484 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2485 {
2486 	struct e1000_hw *hw =
2487 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2488 	uint32_t rctl;
2489 
2490 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2491 	rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2492 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2493 
2494 	return 0;
2495 }
2496 
2497 static int
2498 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2499 {
2500 	struct e1000_hw *hw =
2501 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2502 	uint32_t rctl;
2503 
2504 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2505 	rctl &= (~E1000_RCTL_UPE);
2506 	if (dev->data->all_multicast == 1)
2507 		rctl |= E1000_RCTL_MPE;
2508 	else
2509 		rctl &= (~E1000_RCTL_MPE);
2510 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2511 
2512 	return 0;
2513 }
2514 
2515 static int
2516 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2517 {
2518 	struct e1000_hw *hw =
2519 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2520 	uint32_t rctl;
2521 
2522 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2523 	rctl |= E1000_RCTL_MPE;
2524 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2525 
2526 	return 0;
2527 }
2528 
2529 static int
2530 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2531 {
2532 	struct e1000_hw *hw =
2533 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2534 	uint32_t rctl;
2535 
2536 	if (dev->data->promiscuous == 1)
2537 		return 0; /* must remain in all_multicast mode */
2538 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2539 	rctl &= (~E1000_RCTL_MPE);
2540 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2541 
2542 	return 0;
2543 }
2544 
2545 static int
2546 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2547 {
2548 	struct e1000_hw *hw =
2549 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2550 	struct e1000_vfta * shadow_vfta =
2551 		E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2552 	uint32_t vfta;
2553 	uint32_t vid_idx;
2554 	uint32_t vid_bit;
2555 
2556 	vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2557 			      E1000_VFTA_ENTRY_MASK);
2558 	vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2559 	vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2560 	if (on)
2561 		vfta |= vid_bit;
2562 	else
2563 		vfta &= ~vid_bit;
2564 	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2565 
2566 	/* update local VFTA copy */
2567 	shadow_vfta->vfta[vid_idx] = vfta;
2568 
2569 	return 0;
2570 }
2571 
2572 static int
2573 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2574 		      enum rte_vlan_type vlan_type,
2575 		      uint16_t tpid)
2576 {
2577 	struct e1000_hw *hw =
2578 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2579 	uint32_t reg, qinq;
2580 
2581 	qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
2582 	qinq &= E1000_CTRL_EXT_EXT_VLAN;
2583 
2584 	/* only outer TPID of double VLAN can be configured*/
2585 	if (qinq && vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
2586 		reg = E1000_READ_REG(hw, E1000_VET);
2587 		reg = (reg & (~E1000_VET_VET_EXT)) |
2588 			((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
2589 		E1000_WRITE_REG(hw, E1000_VET, reg);
2590 
2591 		return 0;
2592 	}
2593 
2594 	/* all other TPID values are read-only*/
2595 	PMD_DRV_LOG(ERR, "Not supported");
2596 
2597 	return -ENOTSUP;
2598 }
2599 
2600 static void
2601 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2602 {
2603 	struct e1000_hw *hw =
2604 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2605 	uint32_t reg;
2606 
2607 	/* Filter Table Disable */
2608 	reg = E1000_READ_REG(hw, E1000_RCTL);
2609 	reg &= ~E1000_RCTL_CFIEN;
2610 	reg &= ~E1000_RCTL_VFE;
2611 	E1000_WRITE_REG(hw, E1000_RCTL, reg);
2612 }
2613 
2614 static void
2615 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2616 {
2617 	struct e1000_hw *hw =
2618 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2619 	struct e1000_vfta * shadow_vfta =
2620 		E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2621 	uint32_t reg;
2622 	int i;
2623 
2624 	/* Filter Table Enable, CFI not used for packet acceptance */
2625 	reg = E1000_READ_REG(hw, E1000_RCTL);
2626 	reg &= ~E1000_RCTL_CFIEN;
2627 	reg |= E1000_RCTL_VFE;
2628 	E1000_WRITE_REG(hw, E1000_RCTL, reg);
2629 
2630 	/* restore VFTA table */
2631 	for (i = 0; i < IGB_VFTA_SIZE; i++)
2632 		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2633 }
2634 
2635 static void
2636 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2637 {
2638 	struct e1000_hw *hw =
2639 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2640 	uint32_t reg;
2641 
2642 	/* VLAN Mode Disable */
2643 	reg = E1000_READ_REG(hw, E1000_CTRL);
2644 	reg &= ~E1000_CTRL_VME;
2645 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2646 }
2647 
2648 static void
2649 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2650 {
2651 	struct e1000_hw *hw =
2652 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2653 	uint32_t reg;
2654 
2655 	/* VLAN Mode Enable */
2656 	reg = E1000_READ_REG(hw, E1000_CTRL);
2657 	reg |= E1000_CTRL_VME;
2658 	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2659 }
2660 
2661 static void
2662 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2663 {
2664 	struct e1000_hw *hw =
2665 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2666 	uint32_t reg;
2667 
2668 	/* CTRL_EXT: Extended VLAN */
2669 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2670 	reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2671 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2672 
2673 	/* Update maximum packet length */
2674 	E1000_WRITE_REG(hw, E1000_RLPML, dev->data->mtu + E1000_ETH_OVERHEAD);
2675 }
2676 
2677 static void
2678 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2679 {
2680 	struct e1000_hw *hw =
2681 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2682 	uint32_t reg;
2683 
2684 	/* CTRL_EXT: Extended VLAN */
2685 	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2686 	reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2687 	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2688 
2689 	/* Update maximum packet length */
2690 	E1000_WRITE_REG(hw, E1000_RLPML,
2691 		dev->data->mtu + E1000_ETH_OVERHEAD + VLAN_TAG_SIZE);
2692 }
2693 
2694 static int
2695 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2696 {
2697 	struct rte_eth_rxmode *rxmode;
2698 
2699 	rxmode = &dev->data->dev_conf.rxmode;
2700 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2701 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2702 			igb_vlan_hw_strip_enable(dev);
2703 		else
2704 			igb_vlan_hw_strip_disable(dev);
2705 	}
2706 
2707 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2708 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
2709 			igb_vlan_hw_filter_enable(dev);
2710 		else
2711 			igb_vlan_hw_filter_disable(dev);
2712 	}
2713 
2714 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
2715 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
2716 			igb_vlan_hw_extend_enable(dev);
2717 		else
2718 			igb_vlan_hw_extend_disable(dev);
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 
2725 /**
2726  * It enables the interrupt mask and then enable the interrupt.
2727  *
2728  * @param dev
2729  *  Pointer to struct rte_eth_dev.
2730  * @param on
2731  *  Enable or Disable
2732  *
2733  * @return
2734  *  - On success, zero.
2735  *  - On failure, a negative value.
2736  */
2737 static int
2738 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2739 {
2740 	struct e1000_interrupt *intr =
2741 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2742 
2743 	if (on)
2744 		intr->mask |= E1000_ICR_LSC;
2745 	else
2746 		intr->mask &= ~E1000_ICR_LSC;
2747 
2748 	return 0;
2749 }
2750 
2751 /* It clears the interrupt causes and enables the interrupt.
2752  * It will be called once only during nic initialized.
2753  *
2754  * @param dev
2755  *  Pointer to struct rte_eth_dev.
2756  *
2757  * @return
2758  *  - On success, zero.
2759  *  - On failure, a negative value.
2760  */
2761 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2762 {
2763 	uint32_t mask, regval;
2764 	int ret;
2765 	struct e1000_hw *hw =
2766 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2767 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2768 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2769 	int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
2770 	struct rte_eth_dev_info dev_info;
2771 
2772 	memset(&dev_info, 0, sizeof(dev_info));
2773 	ret = eth_igb_infos_get(dev, &dev_info);
2774 	if (ret != 0)
2775 		return ret;
2776 
2777 	mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift;
2778 	regval = E1000_READ_REG(hw, E1000_EIMS);
2779 	E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2780 
2781 	return 0;
2782 }
2783 
2784 /*
2785  * It reads ICR and gets interrupt causes, check it and set a bit flag
2786  * to update link status.
2787  *
2788  * @param dev
2789  *  Pointer to struct rte_eth_dev.
2790  *
2791  * @return
2792  *  - On success, zero.
2793  *  - On failure, a negative value.
2794  */
2795 static int
2796 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2797 {
2798 	uint32_t icr;
2799 	struct e1000_hw *hw =
2800 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2801 	struct e1000_interrupt *intr =
2802 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2803 
2804 	igb_intr_disable(dev);
2805 
2806 	/* read-on-clear nic registers here */
2807 	icr = E1000_READ_REG(hw, E1000_ICR);
2808 
2809 	intr->flags = 0;
2810 	if (icr & E1000_ICR_LSC) {
2811 		intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2812 	}
2813 
2814 	if (icr & E1000_ICR_VMMB)
2815 		intr->flags |= E1000_FLAG_MAILBOX;
2816 
2817 	return 0;
2818 }
2819 
2820 /*
2821  * It executes link_update after knowing an interrupt is prsent.
2822  *
2823  * @param dev
2824  *  Pointer to struct rte_eth_dev.
2825  *
2826  * @return
2827  *  - On success, zero.
2828  *  - On failure, a negative value.
2829  */
2830 static int
2831 eth_igb_interrupt_action(struct rte_eth_dev *dev,
2832 			 struct rte_intr_handle *intr_handle)
2833 {
2834 	struct e1000_hw *hw =
2835 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2836 	struct e1000_interrupt *intr =
2837 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2838 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2839 	struct rte_eth_link link;
2840 	int ret;
2841 
2842 	if (intr->flags & E1000_FLAG_MAILBOX) {
2843 		igb_pf_mbx_process(dev);
2844 		intr->flags &= ~E1000_FLAG_MAILBOX;
2845 	}
2846 
2847 	igb_intr_enable(dev);
2848 	rte_intr_ack(intr_handle);
2849 
2850 	if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2851 		intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2852 
2853 		/* set get_link_status to check register later */
2854 		hw->mac.get_link_status = 1;
2855 		ret = eth_igb_link_update(dev, 0);
2856 
2857 		/* check if link has changed */
2858 		if (ret < 0)
2859 			return 0;
2860 
2861 		rte_eth_linkstatus_get(dev, &link);
2862 		if (link.link_status) {
2863 			PMD_INIT_LOG(INFO,
2864 				     " Port %d: Link Up - speed %u Mbps - %s",
2865 				     dev->data->port_id,
2866 				     (unsigned)link.link_speed,
2867 				     link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2868 				     "full-duplex" : "half-duplex");
2869 		} else {
2870 			PMD_INIT_LOG(INFO, " Port %d: Link Down",
2871 				     dev->data->port_id);
2872 		}
2873 
2874 		PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2875 			     pci_dev->addr.domain,
2876 			     pci_dev->addr.bus,
2877 			     pci_dev->addr.devid,
2878 			     pci_dev->addr.function);
2879 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2880 	}
2881 
2882 	return 0;
2883 }
2884 
2885 /**
2886  * Interrupt handler which shall be registered at first.
2887  *
2888  * @param handle
2889  *  Pointer to interrupt handle.
2890  * @param param
2891  *  The address of parameter (struct rte_eth_dev *) regsitered before.
2892  *
2893  * @return
2894  *  void
2895  */
2896 static void
2897 eth_igb_interrupt_handler(void *param)
2898 {
2899 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2900 
2901 	eth_igb_interrupt_get_status(dev);
2902 	eth_igb_interrupt_action(dev, dev->intr_handle);
2903 }
2904 
2905 static int
2906 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
2907 {
2908 	uint32_t eicr;
2909 	struct e1000_hw *hw =
2910 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2911 	struct e1000_interrupt *intr =
2912 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2913 
2914 	igbvf_intr_disable(hw);
2915 
2916 	/* read-on-clear nic registers here */
2917 	eicr = E1000_READ_REG(hw, E1000_EICR);
2918 	intr->flags = 0;
2919 
2920 	if (eicr == E1000_VTIVAR_MISC_MAILBOX)
2921 		intr->flags |= E1000_FLAG_MAILBOX;
2922 
2923 	return 0;
2924 }
2925 
2926 void igbvf_mbx_process(struct rte_eth_dev *dev)
2927 {
2928 	struct e1000_hw *hw =
2929 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2930 	struct e1000_mbx_info *mbx = &hw->mbx;
2931 	u32 in_msg = 0;
2932 
2933 	/* peek the message first */
2934 	in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
2935 
2936 	/* PF reset VF event */
2937 	if (in_msg == E1000_PF_CONTROL_MSG) {
2938 		/* dummy mbx read to ack pf */
2939 		if (mbx->ops.read(hw, &in_msg, 1, 0))
2940 			return;
2941 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
2942 					     NULL);
2943 	}
2944 }
2945 
2946 static int
2947 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
2948 {
2949 	struct e1000_interrupt *intr =
2950 		E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2951 
2952 	if (intr->flags & E1000_FLAG_MAILBOX) {
2953 		igbvf_mbx_process(dev);
2954 		intr->flags &= ~E1000_FLAG_MAILBOX;
2955 	}
2956 
2957 	igbvf_intr_enable(dev);
2958 	rte_intr_ack(intr_handle);
2959 
2960 	return 0;
2961 }
2962 
2963 static void
2964 eth_igbvf_interrupt_handler(void *param)
2965 {
2966 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2967 
2968 	eth_igbvf_interrupt_get_status(dev);
2969 	eth_igbvf_interrupt_action(dev, dev->intr_handle);
2970 }
2971 
2972 static int
2973 eth_igb_led_on(struct rte_eth_dev *dev)
2974 {
2975 	struct e1000_hw *hw;
2976 
2977 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2978 	return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2979 }
2980 
2981 static int
2982 eth_igb_led_off(struct rte_eth_dev *dev)
2983 {
2984 	struct e1000_hw *hw;
2985 
2986 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2987 	return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2988 }
2989 
2990 static int
2991 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2992 {
2993 	struct e1000_hw *hw;
2994 	uint32_t ctrl;
2995 	int tx_pause;
2996 	int rx_pause;
2997 
2998 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2999 	fc_conf->pause_time = hw->fc.pause_time;
3000 	fc_conf->high_water = hw->fc.high_water;
3001 	fc_conf->low_water = hw->fc.low_water;
3002 	fc_conf->send_xon = hw->fc.send_xon;
3003 	fc_conf->autoneg = hw->mac.autoneg;
3004 
3005 	/*
3006 	 * Return rx_pause and tx_pause status according to actual setting of
3007 	 * the TFCE and RFCE bits in the CTRL register.
3008 	 */
3009 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3010 	if (ctrl & E1000_CTRL_TFCE)
3011 		tx_pause = 1;
3012 	else
3013 		tx_pause = 0;
3014 
3015 	if (ctrl & E1000_CTRL_RFCE)
3016 		rx_pause = 1;
3017 	else
3018 		rx_pause = 0;
3019 
3020 	if (rx_pause && tx_pause)
3021 		fc_conf->mode = RTE_ETH_FC_FULL;
3022 	else if (rx_pause)
3023 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
3024 	else if (tx_pause)
3025 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
3026 	else
3027 		fc_conf->mode = RTE_ETH_FC_NONE;
3028 
3029 	return 0;
3030 }
3031 
3032 static int
3033 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3034 {
3035 	struct e1000_hw *hw;
3036 	int err;
3037 	enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
3038 		e1000_fc_none,
3039 		e1000_fc_rx_pause,
3040 		e1000_fc_tx_pause,
3041 		e1000_fc_full
3042 	};
3043 	uint32_t rx_buf_size;
3044 	uint32_t max_high_water;
3045 	uint32_t rctl;
3046 	uint32_t ctrl;
3047 
3048 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3049 	if (fc_conf->autoneg != hw->mac.autoneg)
3050 		return -ENOTSUP;
3051 	rx_buf_size = igb_get_rx_buffer_size(hw);
3052 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3053 
3054 	/* At least reserve one Ethernet frame for watermark */
3055 	max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
3056 	if ((fc_conf->high_water > max_high_water) ||
3057 	    (fc_conf->high_water < fc_conf->low_water)) {
3058 		PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
3059 		PMD_INIT_LOG(ERR, "high water must <=  0x%x", max_high_water);
3060 		return -EINVAL;
3061 	}
3062 
3063 	hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
3064 	hw->fc.pause_time     = fc_conf->pause_time;
3065 	hw->fc.high_water     = fc_conf->high_water;
3066 	hw->fc.low_water      = fc_conf->low_water;
3067 	hw->fc.send_xon	      = fc_conf->send_xon;
3068 
3069 	err = e1000_setup_link_generic(hw);
3070 	if (err == E1000_SUCCESS) {
3071 
3072 		/* check if we want to forward MAC frames - driver doesn't have native
3073 		 * capability to do that, so we'll write the registers ourselves */
3074 
3075 		rctl = E1000_READ_REG(hw, E1000_RCTL);
3076 
3077 		/* set or clear MFLCN.PMCF bit depending on configuration */
3078 		if (fc_conf->mac_ctrl_frame_fwd != 0)
3079 			rctl |= E1000_RCTL_PMCF;
3080 		else
3081 			rctl &= ~E1000_RCTL_PMCF;
3082 
3083 		E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3084 
3085 		/*
3086 		 * check if we want to change flow control mode - driver doesn't have native
3087 		 * capability to do that, so we'll write the registers ourselves
3088 		 */
3089 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
3090 
3091 		/*
3092 		 * set or clear E1000_CTRL_RFCE and E1000_CTRL_TFCE bits depending
3093 		 * on configuration
3094 		 */
3095 		switch (fc_conf->mode) {
3096 		case RTE_ETH_FC_NONE:
3097 			ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE;
3098 			break;
3099 		case RTE_ETH_FC_RX_PAUSE:
3100 			ctrl |= E1000_CTRL_RFCE;
3101 			ctrl &= ~E1000_CTRL_TFCE;
3102 			break;
3103 		case RTE_ETH_FC_TX_PAUSE:
3104 			ctrl |= E1000_CTRL_TFCE;
3105 			ctrl &= ~E1000_CTRL_RFCE;
3106 			break;
3107 		case RTE_ETH_FC_FULL:
3108 			ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE;
3109 			break;
3110 		default:
3111 			PMD_INIT_LOG(ERR, "invalid flow control mode");
3112 			return -EINVAL;
3113 		}
3114 
3115 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3116 
3117 		E1000_WRITE_FLUSH(hw);
3118 
3119 		return 0;
3120 	}
3121 
3122 	PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
3123 	return -EIO;
3124 }
3125 
3126 #define E1000_RAH_POOLSEL_SHIFT      (18)
3127 static int
3128 eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3129 		uint32_t index, uint32_t pool)
3130 {
3131 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3132 	uint32_t rah;
3133 
3134 	e1000_rar_set(hw, mac_addr->addr_bytes, index);
3135 	rah = E1000_READ_REG(hw, E1000_RAH(index));
3136 	rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
3137 	E1000_WRITE_REG(hw, E1000_RAH(index), rah);
3138 	return 0;
3139 }
3140 
3141 static void
3142 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
3143 {
3144 	uint8_t addr[RTE_ETHER_ADDR_LEN];
3145 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3146 
3147 	memset(addr, 0, sizeof(addr));
3148 
3149 	e1000_rar_set(hw, addr, index);
3150 }
3151 
3152 static int
3153 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
3154 				struct rte_ether_addr *addr)
3155 {
3156 	eth_igb_rar_clear(dev, 0);
3157 	eth_igb_rar_set(dev, (void *)addr, 0, 0);
3158 
3159 	return 0;
3160 }
3161 /*
3162  * Virtual Function operations
3163  */
3164 static void
3165 igbvf_intr_disable(struct e1000_hw *hw)
3166 {
3167 	PMD_INIT_FUNC_TRACE();
3168 
3169 	/* Clear interrupt mask to stop from interrupts being generated */
3170 	E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
3171 
3172 	E1000_WRITE_FLUSH(hw);
3173 }
3174 
3175 static void
3176 igbvf_stop_adapter(struct rte_eth_dev *dev)
3177 {
3178 	u32 reg_val;
3179 	u16 i;
3180 	struct rte_eth_dev_info dev_info;
3181 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3182 	int ret;
3183 
3184 	memset(&dev_info, 0, sizeof(dev_info));
3185 	ret = eth_igbvf_infos_get(dev, &dev_info);
3186 	if (ret != 0)
3187 		return;
3188 
3189 	/* Clear interrupt mask to stop from interrupts being generated */
3190 	igbvf_intr_disable(hw);
3191 
3192 	/* Clear any pending interrupts, flush previous writes */
3193 	E1000_READ_REG(hw, E1000_EICR);
3194 
3195 	/* Disable the transmit unit.  Each queue must be disabled. */
3196 	for (i = 0; i < dev_info.max_tx_queues; i++)
3197 		E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
3198 
3199 	/* Disable the receive unit by stopping each queue */
3200 	for (i = 0; i < dev_info.max_rx_queues; i++) {
3201 		reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
3202 		reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
3203 		E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
3204 		while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
3205 			;
3206 	}
3207 
3208 	/* flush all queues disables */
3209 	E1000_WRITE_FLUSH(hw);
3210 	msec_delay(2);
3211 }
3212 
3213 static int eth_igbvf_link_update(struct e1000_hw *hw)
3214 {
3215 	struct e1000_mbx_info *mbx = &hw->mbx;
3216 	struct e1000_mac_info *mac = &hw->mac;
3217 	int ret_val = E1000_SUCCESS;
3218 
3219 	PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
3220 
3221 	/*
3222 	 * We only want to run this if there has been a rst asserted.
3223 	 * in this case that could mean a link change, device reset,
3224 	 * or a virtual function reset
3225 	 */
3226 
3227 	/* If we were hit with a reset or timeout drop the link */
3228 	if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
3229 		mac->get_link_status = TRUE;
3230 
3231 	if (!mac->get_link_status)
3232 		goto out;
3233 
3234 	/* if link status is down no point in checking to see if pf is up */
3235 	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
3236 		goto out;
3237 
3238 	/* if we passed all the tests above then the link is up and we no
3239 	 * longer need to check for link */
3240 	mac->get_link_status = FALSE;
3241 
3242 out:
3243 	return ret_val;
3244 }
3245 
3246 
3247 static int
3248 igbvf_dev_configure(struct rte_eth_dev *dev)
3249 {
3250 	struct rte_eth_conf* conf = &dev->data->dev_conf;
3251 
3252 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3253 		     dev->data->port_id);
3254 
3255 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
3256 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
3257 
3258 	/*
3259 	 * VF has no ability to enable/disable HW CRC
3260 	 * Keep the persistent behavior the same as Host PF
3261 	 */
3262 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3263 	if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
3264 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3265 		conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
3266 	}
3267 #else
3268 	if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) {
3269 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3270 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
3271 	}
3272 #endif
3273 
3274 	return 0;
3275 }
3276 
3277 static int
3278 igbvf_dev_start(struct rte_eth_dev *dev)
3279 {
3280 	struct e1000_hw *hw =
3281 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3282 	struct e1000_adapter *adapter =
3283 		E1000_DEV_PRIVATE(dev->data->dev_private);
3284 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3285 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3286 	int ret;
3287 	uint32_t intr_vector = 0;
3288 
3289 	PMD_INIT_FUNC_TRACE();
3290 
3291 	hw->mac.ops.reset_hw(hw);
3292 	adapter->stopped = 0;
3293 
3294 	/* Set all vfta */
3295 	igbvf_set_vfta_all(dev,1);
3296 
3297 	eth_igbvf_tx_init(dev);
3298 
3299 	/* This can fail when allocating mbufs for descriptor rings */
3300 	ret = eth_igbvf_rx_init(dev);
3301 	if (ret) {
3302 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
3303 		igb_dev_clear_queues(dev);
3304 		return ret;
3305 	}
3306 
3307 	/* check and configure queue intr-vector mapping */
3308 	if (rte_intr_cap_multiple(intr_handle) &&
3309 	    dev->data->dev_conf.intr_conf.rxq) {
3310 		intr_vector = dev->data->nb_rx_queues;
3311 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
3312 		if (ret)
3313 			return ret;
3314 	}
3315 
3316 	/* Allocate the vector list */
3317 	if (rte_intr_dp_is_en(intr_handle)) {
3318 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
3319 						   dev->data->nb_rx_queues)) {
3320 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3321 				     " intr_vec", dev->data->nb_rx_queues);
3322 			return -ENOMEM;
3323 		}
3324 	}
3325 
3326 	eth_igbvf_configure_msix_intr(dev);
3327 
3328 	/* enable uio/vfio intr/eventfd mapping */
3329 	rte_intr_enable(intr_handle);
3330 
3331 	/* resume enabled intr since hw reset */
3332 	igbvf_intr_enable(dev);
3333 
3334 	return 0;
3335 }
3336 
3337 static int
3338 igbvf_dev_stop(struct rte_eth_dev *dev)
3339 {
3340 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3341 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3342 	struct e1000_adapter *adapter =
3343 		E1000_DEV_PRIVATE(dev->data->dev_private);
3344 
3345 	if (adapter->stopped)
3346 		return 0;
3347 
3348 	PMD_INIT_FUNC_TRACE();
3349 
3350 	igbvf_stop_adapter(dev);
3351 
3352 	/*
3353 	  * Clear what we set, but we still keep shadow_vfta to
3354 	  * restore after device starts
3355 	  */
3356 	igbvf_set_vfta_all(dev,0);
3357 
3358 	igb_dev_clear_queues(dev);
3359 
3360 	/* disable intr eventfd mapping */
3361 	rte_intr_disable(intr_handle);
3362 
3363 	/* Clean datapath event and queue/vec mapping */
3364 	rte_intr_efd_disable(intr_handle);
3365 
3366 	/* Clean vector list */
3367 	rte_intr_vec_list_free(intr_handle);
3368 
3369 	adapter->stopped = true;
3370 	dev->data->dev_started = 0;
3371 
3372 	return 0;
3373 }
3374 
3375 static int
3376 igbvf_dev_close(struct rte_eth_dev *dev)
3377 {
3378 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3379 	struct rte_ether_addr addr;
3380 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3381 	int ret;
3382 
3383 	PMD_INIT_FUNC_TRACE();
3384 
3385 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3386 		return 0;
3387 
3388 	e1000_reset_hw(hw);
3389 
3390 	ret = igbvf_dev_stop(dev);
3391 	if (ret != 0)
3392 		return ret;
3393 
3394 	igb_dev_free_queues(dev);
3395 
3396 	/**
3397 	 * reprogram the RAR with a zero mac address,
3398 	 * to ensure that the VF traffic goes to the PF
3399 	 * after stop, close and detach of the VF.
3400 	 **/
3401 
3402 	memset(&addr, 0, sizeof(addr));
3403 	igbvf_default_mac_addr_set(dev, &addr);
3404 
3405 	rte_intr_callback_unregister(pci_dev->intr_handle,
3406 				     eth_igbvf_interrupt_handler,
3407 				     (void *)dev);
3408 
3409 	return 0;
3410 }
3411 
3412 static int
3413 igbvf_promiscuous_enable(struct rte_eth_dev *dev)
3414 {
3415 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3416 
3417 	/* Set both unicast and multicast promisc */
3418 	e1000_promisc_set_vf(hw, e1000_promisc_enabled);
3419 
3420 	return 0;
3421 }
3422 
3423 static int
3424 igbvf_promiscuous_disable(struct rte_eth_dev *dev)
3425 {
3426 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3427 
3428 	/* If in allmulticast mode leave multicast promisc */
3429 	if (dev->data->all_multicast == 1)
3430 		e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3431 	else
3432 		e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3433 
3434 	return 0;
3435 }
3436 
3437 static int
3438 igbvf_allmulticast_enable(struct rte_eth_dev *dev)
3439 {
3440 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3441 
3442 	/* In promiscuous mode multicast promisc already set */
3443 	if (dev->data->promiscuous == 0)
3444 		e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3445 
3446 	return 0;
3447 }
3448 
3449 static int
3450 igbvf_allmulticast_disable(struct rte_eth_dev *dev)
3451 {
3452 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3453 
3454 	/* In promiscuous mode leave multicast promisc enabled */
3455 	if (dev->data->promiscuous == 0)
3456 		e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3457 
3458 	return 0;
3459 }
3460 
3461 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
3462 {
3463 	struct e1000_mbx_info *mbx = &hw->mbx;
3464 	uint32_t msgbuf[2];
3465 	s32 err;
3466 
3467 	/* After set vlan, vlan strip will also be enabled in igb driver*/
3468 	msgbuf[0] = E1000_VF_SET_VLAN;
3469 	msgbuf[1] = vid;
3470 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
3471 	if (on)
3472 		msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
3473 
3474 	err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
3475 	if (err)
3476 		goto mbx_err;
3477 
3478 	err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
3479 	if (err)
3480 		goto mbx_err;
3481 
3482 	msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
3483 	if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
3484 		err = -EINVAL;
3485 
3486 mbx_err:
3487 	return err;
3488 }
3489 
3490 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3491 {
3492 	struct e1000_hw *hw =
3493 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3494 	struct e1000_vfta * shadow_vfta =
3495 		E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3496 	int i = 0, j = 0, vfta = 0, mask = 1;
3497 
3498 	for (i = 0; i < IGB_VFTA_SIZE; i++){
3499 		vfta = shadow_vfta->vfta[i];
3500 		if(vfta){
3501 			mask = 1;
3502 			for (j = 0; j < 32; j++){
3503 				if(vfta & mask)
3504 					igbvf_set_vfta(hw,
3505 						(uint16_t)((i<<5)+j), on);
3506 				mask<<=1;
3507 			}
3508 		}
3509 	}
3510 
3511 }
3512 
3513 static int
3514 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3515 {
3516 	struct e1000_hw *hw =
3517 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3518 	struct e1000_vfta * shadow_vfta =
3519 		E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3520 	uint32_t vid_idx = 0;
3521 	uint32_t vid_bit = 0;
3522 	int ret = 0;
3523 
3524 	PMD_INIT_FUNC_TRACE();
3525 
3526 	/*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
3527 	ret = igbvf_set_vfta(hw, vlan_id, !!on);
3528 	if(ret){
3529 		PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3530 		return ret;
3531 	}
3532 	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3533 	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3534 
3535 	/*Save what we set and retore it after device reset*/
3536 	if (on)
3537 		shadow_vfta->vfta[vid_idx] |= vid_bit;
3538 	else
3539 		shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3540 
3541 	return 0;
3542 }
3543 
3544 static int
3545 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3546 {
3547 	struct e1000_hw *hw =
3548 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3549 
3550 	/* index is not used by rar_set() */
3551 	hw->mac.ops.rar_set(hw, (void *)addr, 0);
3552 	return 0;
3553 }
3554 
3555 
3556 static int
3557 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
3558 			struct rte_eth_rss_reta_entry64 *reta_conf,
3559 			uint16_t reta_size)
3560 {
3561 	uint8_t i, j, mask;
3562 	uint32_t reta, r;
3563 	uint16_t idx, shift;
3564 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3565 
3566 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3567 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3568 			"(%d) doesn't match the number hardware can supported "
3569 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3570 		return -EINVAL;
3571 	}
3572 
3573 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3574 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3575 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3576 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3577 						IGB_4_BIT_MASK);
3578 		if (!mask)
3579 			continue;
3580 		if (mask == IGB_4_BIT_MASK)
3581 			r = 0;
3582 		else
3583 			r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3584 		for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
3585 			if (mask & (0x1 << j))
3586 				reta |= reta_conf[idx].reta[shift + j] <<
3587 							(CHAR_BIT * j);
3588 			else
3589 				reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
3590 		}
3591 		E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
3592 	}
3593 
3594 	return 0;
3595 }
3596 
3597 static int
3598 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
3599 		       struct rte_eth_rss_reta_entry64 *reta_conf,
3600 		       uint16_t reta_size)
3601 {
3602 	uint8_t i, j, mask;
3603 	uint32_t reta;
3604 	uint16_t idx, shift;
3605 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3606 
3607 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3608 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3609 			"(%d) doesn't match the number hardware can supported "
3610 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3611 		return -EINVAL;
3612 	}
3613 
3614 	for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3615 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3616 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3617 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3618 						IGB_4_BIT_MASK);
3619 		if (!mask)
3620 			continue;
3621 		reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3622 		for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3623 			if (mask & (0x1 << j))
3624 				reta_conf[idx].reta[shift + j] =
3625 					((reta >> (CHAR_BIT * j)) &
3626 						IGB_8_BIT_MASK);
3627 		}
3628 	}
3629 
3630 	return 0;
3631 }
3632 
3633 int
3634 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3635 			struct rte_eth_syn_filter *filter,
3636 			bool add)
3637 {
3638 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3639 	struct e1000_filter_info *filter_info =
3640 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3641 	uint32_t synqf, rfctl;
3642 
3643 	if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3644 		return -EINVAL;
3645 
3646 	synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3647 
3648 	if (add) {
3649 		if (synqf & E1000_SYN_FILTER_ENABLE)
3650 			return -EINVAL;
3651 
3652 		synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3653 			E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3654 
3655 		rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3656 		if (filter->hig_pri)
3657 			rfctl |= E1000_RFCTL_SYNQFP;
3658 		else
3659 			rfctl &= ~E1000_RFCTL_SYNQFP;
3660 
3661 		E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3662 	} else {
3663 		if (!(synqf & E1000_SYN_FILTER_ENABLE))
3664 			return -ENOENT;
3665 		synqf = 0;
3666 	}
3667 
3668 	filter_info->syn_info = synqf;
3669 	E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3670 	E1000_WRITE_FLUSH(hw);
3671 	return 0;
3672 }
3673 
3674 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3675 static inline int
3676 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3677 			struct e1000_2tuple_filter_info *filter_info)
3678 {
3679 	if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3680 		return -EINVAL;
3681 	if (filter->priority > E1000_2TUPLE_MAX_PRI)
3682 		return -EINVAL;  /* filter index is out of range. */
3683 	if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
3684 		return -EINVAL;  /* flags is invalid. */
3685 
3686 	switch (filter->dst_port_mask) {
3687 	case UINT16_MAX:
3688 		filter_info->dst_port_mask = 0;
3689 		filter_info->dst_port = filter->dst_port;
3690 		break;
3691 	case 0:
3692 		filter_info->dst_port_mask = 1;
3693 		break;
3694 	default:
3695 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3696 		return -EINVAL;
3697 	}
3698 
3699 	switch (filter->proto_mask) {
3700 	case UINT8_MAX:
3701 		filter_info->proto_mask = 0;
3702 		filter_info->proto = filter->proto;
3703 		break;
3704 	case 0:
3705 		filter_info->proto_mask = 1;
3706 		break;
3707 	default:
3708 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
3709 		return -EINVAL;
3710 	}
3711 
3712 	filter_info->priority = (uint8_t)filter->priority;
3713 	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3714 		filter_info->tcp_flags = filter->tcp_flags;
3715 	else
3716 		filter_info->tcp_flags = 0;
3717 
3718 	return 0;
3719 }
3720 
3721 static inline struct e1000_2tuple_filter *
3722 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3723 			struct e1000_2tuple_filter_info *key)
3724 {
3725 	struct e1000_2tuple_filter *it;
3726 
3727 	TAILQ_FOREACH(it, filter_list, entries) {
3728 		if (memcmp(key, &it->filter_info,
3729 			sizeof(struct e1000_2tuple_filter_info)) == 0) {
3730 			return it;
3731 		}
3732 	}
3733 	return NULL;
3734 }
3735 
3736 /* inject a igb 2tuple filter to HW */
3737 static inline void
3738 igb_inject_2uple_filter(struct rte_eth_dev *dev,
3739 			   struct e1000_2tuple_filter *filter)
3740 {
3741 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3742 	uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
3743 	uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3744 	int i;
3745 
3746 	i = filter->index;
3747 	imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3748 	if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3749 		imir |= E1000_IMIR_PORT_BP;
3750 	else
3751 		imir &= ~E1000_IMIR_PORT_BP;
3752 
3753 	imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3754 
3755 	ttqf |= E1000_TTQF_QUEUE_ENABLE;
3756 	ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
3757 	ttqf |= (uint32_t)(filter->filter_info.proto &
3758 						E1000_TTQF_PROTOCOL_MASK);
3759 	if (filter->filter_info.proto_mask == 0)
3760 		ttqf &= ~E1000_TTQF_MASK_ENABLE;
3761 
3762 	/* tcp flags bits setting. */
3763 	if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
3764 		if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
3765 			imir_ext |= E1000_IMIREXT_CTRL_URG;
3766 		if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
3767 			imir_ext |= E1000_IMIREXT_CTRL_ACK;
3768 		if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG)
3769 			imir_ext |= E1000_IMIREXT_CTRL_PSH;
3770 		if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG)
3771 			imir_ext |= E1000_IMIREXT_CTRL_RST;
3772 		if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG)
3773 			imir_ext |= E1000_IMIREXT_CTRL_SYN;
3774 		if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG)
3775 			imir_ext |= E1000_IMIREXT_CTRL_FIN;
3776 	} else {
3777 		imir_ext |= E1000_IMIREXT_CTRL_BP;
3778 	}
3779 	E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3780 	E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
3781 	E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3782 }
3783 
3784 /*
3785  * igb_add_2tuple_filter - add a 2tuple filter
3786  *
3787  * @param
3788  * dev: Pointer to struct rte_eth_dev.
3789  * ntuple_filter: ponter to the filter that will be added.
3790  *
3791  * @return
3792  *    - On success, zero.
3793  *    - On failure, a negative value.
3794  */
3795 static int
3796 igb_add_2tuple_filter(struct rte_eth_dev *dev,
3797 			struct rte_eth_ntuple_filter *ntuple_filter)
3798 {
3799 	struct e1000_filter_info *filter_info =
3800 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3801 	struct e1000_2tuple_filter *filter;
3802 	int i, ret;
3803 
3804 	filter = rte_zmalloc("e1000_2tuple_filter",
3805 			sizeof(struct e1000_2tuple_filter), 0);
3806 	if (filter == NULL)
3807 		return -ENOMEM;
3808 
3809 	ret = ntuple_filter_to_2tuple(ntuple_filter,
3810 				      &filter->filter_info);
3811 	if (ret < 0) {
3812 		rte_free(filter);
3813 		return ret;
3814 	}
3815 	if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3816 					 &filter->filter_info) != NULL) {
3817 		PMD_DRV_LOG(ERR, "filter exists.");
3818 		rte_free(filter);
3819 		return -EEXIST;
3820 	}
3821 	filter->queue = ntuple_filter->queue;
3822 
3823 	/*
3824 	 * look for an unused 2tuple filter index,
3825 	 * and insert the filter to list.
3826 	 */
3827 	for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3828 		if (!(filter_info->twotuple_mask & (1 << i))) {
3829 			filter_info->twotuple_mask |= 1 << i;
3830 			filter->index = i;
3831 			TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3832 					  filter,
3833 					  entries);
3834 			break;
3835 		}
3836 	}
3837 	if (i >= E1000_MAX_TTQF_FILTERS) {
3838 		PMD_DRV_LOG(ERR, "2tuple filters are full.");
3839 		rte_free(filter);
3840 		return -ENOSYS;
3841 	}
3842 
3843 	igb_inject_2uple_filter(dev, filter);
3844 	return 0;
3845 }
3846 
3847 int
3848 igb_delete_2tuple_filter(struct rte_eth_dev *dev,
3849 			struct e1000_2tuple_filter *filter)
3850 {
3851 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3852 	struct e1000_filter_info *filter_info =
3853 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3854 
3855 	filter_info->twotuple_mask &= ~(1 << filter->index);
3856 	TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
3857 	rte_free(filter);
3858 
3859 	E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
3860 	E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3861 	E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3862 	return 0;
3863 }
3864 
3865 /*
3866  * igb_remove_2tuple_filter - remove a 2tuple filter
3867  *
3868  * @param
3869  * dev: Pointer to struct rte_eth_dev.
3870  * ntuple_filter: ponter to the filter that will be removed.
3871  *
3872  * @return
3873  *    - On success, zero.
3874  *    - On failure, a negative value.
3875  */
3876 static int
3877 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3878 			struct rte_eth_ntuple_filter *ntuple_filter)
3879 {
3880 	struct e1000_filter_info *filter_info =
3881 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3882 	struct e1000_2tuple_filter_info filter_2tuple;
3883 	struct e1000_2tuple_filter *filter;
3884 	int ret;
3885 
3886 	memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3887 	ret = ntuple_filter_to_2tuple(ntuple_filter,
3888 				      &filter_2tuple);
3889 	if (ret < 0)
3890 		return ret;
3891 
3892 	filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3893 					 &filter_2tuple);
3894 	if (filter == NULL) {
3895 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
3896 		return -ENOENT;
3897 	}
3898 
3899 	igb_delete_2tuple_filter(dev, filter);
3900 
3901 	return 0;
3902 }
3903 
3904 /* inject a igb flex filter to HW */
3905 static inline void
3906 igb_inject_flex_filter(struct rte_eth_dev *dev,
3907 			   struct e1000_flex_filter *filter)
3908 {
3909 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3910 	uint32_t wufc, queueing;
3911 	uint32_t reg_off;
3912 	uint8_t i, j = 0;
3913 
3914 	wufc = E1000_READ_REG(hw, E1000_WUFC);
3915 	if (filter->index < E1000_MAX_FHFT)
3916 		reg_off = E1000_FHFT(filter->index);
3917 	else
3918 		reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
3919 
3920 	E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3921 			(E1000_WUFC_FLX0 << filter->index));
3922 	queueing = filter->filter_info.len |
3923 		(filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3924 		(filter->filter_info.priority <<
3925 			E1000_FHFT_QUEUEING_PRIO_SHIFT);
3926 	E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3927 			queueing);
3928 
3929 	for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3930 		E1000_WRITE_REG(hw, reg_off,
3931 				filter->filter_info.dwords[j]);
3932 		reg_off += sizeof(uint32_t);
3933 		E1000_WRITE_REG(hw, reg_off,
3934 				filter->filter_info.dwords[++j]);
3935 		reg_off += sizeof(uint32_t);
3936 		E1000_WRITE_REG(hw, reg_off,
3937 			(uint32_t)filter->filter_info.mask[i]);
3938 		reg_off += sizeof(uint32_t) * 2;
3939 		++j;
3940 	}
3941 }
3942 
3943 static inline struct e1000_flex_filter *
3944 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3945 			struct e1000_flex_filter_info *key)
3946 {
3947 	struct e1000_flex_filter *it;
3948 
3949 	TAILQ_FOREACH(it, filter_list, entries) {
3950 		if (memcmp(key, &it->filter_info,
3951 			sizeof(struct e1000_flex_filter_info)) == 0)
3952 			return it;
3953 	}
3954 
3955 	return NULL;
3956 }
3957 
3958 /* remove a flex byte filter
3959  * @param
3960  * dev: Pointer to struct rte_eth_dev.
3961  * filter: the pointer of the filter will be removed.
3962  */
3963 void
3964 igb_remove_flex_filter(struct rte_eth_dev *dev,
3965 			struct e1000_flex_filter *filter)
3966 {
3967 	struct e1000_filter_info *filter_info =
3968 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3969 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3970 	uint32_t wufc, i;
3971 	uint32_t reg_off;
3972 
3973 	wufc = E1000_READ_REG(hw, E1000_WUFC);
3974 	if (filter->index < E1000_MAX_FHFT)
3975 		reg_off = E1000_FHFT(filter->index);
3976 	else
3977 		reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
3978 
3979 	for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3980 		E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3981 
3982 	E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3983 		(~(E1000_WUFC_FLX0 << filter->index)));
3984 
3985 	filter_info->flex_mask &= ~(1 << filter->index);
3986 	TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
3987 	rte_free(filter);
3988 }
3989 
3990 int
3991 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3992 			struct igb_flex_filter *filter,
3993 			bool add)
3994 {
3995 	struct e1000_filter_info *filter_info =
3996 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3997 	struct e1000_flex_filter *flex_filter, *it;
3998 	uint32_t mask;
3999 	uint8_t shift, i;
4000 
4001 	flex_filter = rte_zmalloc("e1000_flex_filter",
4002 			sizeof(struct e1000_flex_filter), 0);
4003 	if (flex_filter == NULL)
4004 		return -ENOMEM;
4005 
4006 	flex_filter->filter_info.len = filter->len;
4007 	flex_filter->filter_info.priority = filter->priority;
4008 	memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
4009 	for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
4010 		mask = 0;
4011 		/* reverse bits in flex filter's mask*/
4012 		for (shift = 0; shift < CHAR_BIT; shift++) {
4013 			if (filter->mask[i] & (0x01 << shift))
4014 				mask |= (0x80 >> shift);
4015 		}
4016 		flex_filter->filter_info.mask[i] = mask;
4017 	}
4018 
4019 	it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
4020 				&flex_filter->filter_info);
4021 	if (it == NULL && !add) {
4022 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
4023 		rte_free(flex_filter);
4024 		return -ENOENT;
4025 	}
4026 	if (it != NULL && add) {
4027 		PMD_DRV_LOG(ERR, "filter exists.");
4028 		rte_free(flex_filter);
4029 		return -EEXIST;
4030 	}
4031 
4032 	if (add) {
4033 		flex_filter->queue = filter->queue;
4034 		/*
4035 		 * look for an unused flex filter index
4036 		 * and insert the filter into the list.
4037 		 */
4038 		for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
4039 			if (!(filter_info->flex_mask & (1 << i))) {
4040 				filter_info->flex_mask |= 1 << i;
4041 				flex_filter->index = i;
4042 				TAILQ_INSERT_TAIL(&filter_info->flex_list,
4043 					flex_filter,
4044 					entries);
4045 				break;
4046 			}
4047 		}
4048 		if (i >= E1000_MAX_FLEX_FILTERS) {
4049 			PMD_DRV_LOG(ERR, "flex filters are full.");
4050 			rte_free(flex_filter);
4051 			return -ENOSYS;
4052 		}
4053 
4054 		igb_inject_flex_filter(dev, flex_filter);
4055 
4056 	} else {
4057 		igb_remove_flex_filter(dev, it);
4058 		rte_free(flex_filter);
4059 	}
4060 
4061 	return 0;
4062 }
4063 
4064 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
4065 static inline int
4066 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
4067 			struct e1000_5tuple_filter_info *filter_info)
4068 {
4069 	if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
4070 		return -EINVAL;
4071 	if (filter->priority > E1000_2TUPLE_MAX_PRI)
4072 		return -EINVAL;  /* filter index is out of range. */
4073 	if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
4074 		return -EINVAL;  /* flags is invalid. */
4075 
4076 	switch (filter->dst_ip_mask) {
4077 	case UINT32_MAX:
4078 		filter_info->dst_ip_mask = 0;
4079 		filter_info->dst_ip = filter->dst_ip;
4080 		break;
4081 	case 0:
4082 		filter_info->dst_ip_mask = 1;
4083 		break;
4084 	default:
4085 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4086 		return -EINVAL;
4087 	}
4088 
4089 	switch (filter->src_ip_mask) {
4090 	case UINT32_MAX:
4091 		filter_info->src_ip_mask = 0;
4092 		filter_info->src_ip = filter->src_ip;
4093 		break;
4094 	case 0:
4095 		filter_info->src_ip_mask = 1;
4096 		break;
4097 	default:
4098 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4099 		return -EINVAL;
4100 	}
4101 
4102 	switch (filter->dst_port_mask) {
4103 	case UINT16_MAX:
4104 		filter_info->dst_port_mask = 0;
4105 		filter_info->dst_port = filter->dst_port;
4106 		break;
4107 	case 0:
4108 		filter_info->dst_port_mask = 1;
4109 		break;
4110 	default:
4111 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4112 		return -EINVAL;
4113 	}
4114 
4115 	switch (filter->src_port_mask) {
4116 	case UINT16_MAX:
4117 		filter_info->src_port_mask = 0;
4118 		filter_info->src_port = filter->src_port;
4119 		break;
4120 	case 0:
4121 		filter_info->src_port_mask = 1;
4122 		break;
4123 	default:
4124 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
4125 		return -EINVAL;
4126 	}
4127 
4128 	switch (filter->proto_mask) {
4129 	case UINT8_MAX:
4130 		filter_info->proto_mask = 0;
4131 		filter_info->proto = filter->proto;
4132 		break;
4133 	case 0:
4134 		filter_info->proto_mask = 1;
4135 		break;
4136 	default:
4137 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
4138 		return -EINVAL;
4139 	}
4140 
4141 	filter_info->priority = (uint8_t)filter->priority;
4142 	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
4143 		filter_info->tcp_flags = filter->tcp_flags;
4144 	else
4145 		filter_info->tcp_flags = 0;
4146 
4147 	return 0;
4148 }
4149 
4150 static inline struct e1000_5tuple_filter *
4151 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
4152 			struct e1000_5tuple_filter_info *key)
4153 {
4154 	struct e1000_5tuple_filter *it;
4155 
4156 	TAILQ_FOREACH(it, filter_list, entries) {
4157 		if (memcmp(key, &it->filter_info,
4158 			sizeof(struct e1000_5tuple_filter_info)) == 0) {
4159 			return it;
4160 		}
4161 	}
4162 	return NULL;
4163 }
4164 
4165 /* inject a igb 5-tuple filter to HW */
4166 static inline void
4167 igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
4168 			   struct e1000_5tuple_filter *filter)
4169 {
4170 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4171 	uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
4172 	uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
4173 	uint8_t i;
4174 
4175 	i = filter->index;
4176 	ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
4177 	if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
4178 		ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
4179 	if (filter->filter_info.dst_ip_mask == 0)
4180 		ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
4181 	if (filter->filter_info.src_port_mask == 0)
4182 		ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
4183 	if (filter->filter_info.proto_mask == 0)
4184 		ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
4185 	ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
4186 		E1000_FTQF_QUEUE_MASK;
4187 	ftqf |= E1000_FTQF_QUEUE_ENABLE;
4188 	E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
4189 	E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
4190 	E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
4191 
4192 	spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
4193 	E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
4194 
4195 	imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
4196 	if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
4197 		imir |= E1000_IMIR_PORT_BP;
4198 	else
4199 		imir &= ~E1000_IMIR_PORT_BP;
4200 	imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
4201 
4202 	/* tcp flags bits setting. */
4203 	if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
4204 		if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
4205 			imir_ext |= E1000_IMIREXT_CTRL_URG;
4206 		if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
4207 			imir_ext |= E1000_IMIREXT_CTRL_ACK;
4208 		if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG)
4209 			imir_ext |= E1000_IMIREXT_CTRL_PSH;
4210 		if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG)
4211 			imir_ext |= E1000_IMIREXT_CTRL_RST;
4212 		if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG)
4213 			imir_ext |= E1000_IMIREXT_CTRL_SYN;
4214 		if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG)
4215 			imir_ext |= E1000_IMIREXT_CTRL_FIN;
4216 	} else {
4217 		imir_ext |= E1000_IMIREXT_CTRL_BP;
4218 	}
4219 	E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
4220 	E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
4221 }
4222 
4223 /*
4224  * igb_add_5tuple_filter_82576 - add a 5tuple filter
4225  *
4226  * @param
4227  * dev: Pointer to struct rte_eth_dev.
4228  * ntuple_filter: ponter to the filter that will be added.
4229  *
4230  * @return
4231  *    - On success, zero.
4232  *    - On failure, a negative value.
4233  */
4234 static int
4235 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
4236 			struct rte_eth_ntuple_filter *ntuple_filter)
4237 {
4238 	struct e1000_filter_info *filter_info =
4239 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4240 	struct e1000_5tuple_filter *filter;
4241 	uint8_t i;
4242 	int ret;
4243 
4244 	filter = rte_zmalloc("e1000_5tuple_filter",
4245 			sizeof(struct e1000_5tuple_filter), 0);
4246 	if (filter == NULL)
4247 		return -ENOMEM;
4248 
4249 	ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4250 					    &filter->filter_info);
4251 	if (ret < 0) {
4252 		rte_free(filter);
4253 		return ret;
4254 	}
4255 
4256 	if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4257 					 &filter->filter_info) != NULL) {
4258 		PMD_DRV_LOG(ERR, "filter exists.");
4259 		rte_free(filter);
4260 		return -EEXIST;
4261 	}
4262 	filter->queue = ntuple_filter->queue;
4263 
4264 	/*
4265 	 * look for an unused 5tuple filter index,
4266 	 * and insert the filter to list.
4267 	 */
4268 	for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
4269 		if (!(filter_info->fivetuple_mask & (1 << i))) {
4270 			filter_info->fivetuple_mask |= 1 << i;
4271 			filter->index = i;
4272 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4273 					  filter,
4274 					  entries);
4275 			break;
4276 		}
4277 	}
4278 	if (i >= E1000_MAX_FTQF_FILTERS) {
4279 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
4280 		rte_free(filter);
4281 		return -ENOSYS;
4282 	}
4283 
4284 	igb_inject_5tuple_filter_82576(dev, filter);
4285 	return 0;
4286 }
4287 
4288 int
4289 igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
4290 				struct e1000_5tuple_filter *filter)
4291 {
4292 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4293 	struct e1000_filter_info *filter_info =
4294 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4295 
4296 	filter_info->fivetuple_mask &= ~(1 << filter->index);
4297 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4298 	rte_free(filter);
4299 
4300 	E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
4301 			E1000_FTQF_VF_BP | E1000_FTQF_MASK);
4302 	E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
4303 	E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
4304 	E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
4305 	E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
4306 	E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
4307 	return 0;
4308 }
4309 
4310 /*
4311  * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
4312  *
4313  * @param
4314  * dev: Pointer to struct rte_eth_dev.
4315  * ntuple_filter: ponter to the filter that will be removed.
4316  *
4317  * @return
4318  *    - On success, zero.
4319  *    - On failure, a negative value.
4320  */
4321 static int
4322 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
4323 				struct rte_eth_ntuple_filter *ntuple_filter)
4324 {
4325 	struct e1000_filter_info *filter_info =
4326 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4327 	struct e1000_5tuple_filter_info filter_5tuple;
4328 	struct e1000_5tuple_filter *filter;
4329 	int ret;
4330 
4331 	memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
4332 	ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4333 					    &filter_5tuple);
4334 	if (ret < 0)
4335 		return ret;
4336 
4337 	filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4338 					 &filter_5tuple);
4339 	if (filter == NULL) {
4340 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
4341 		return -ENOENT;
4342 	}
4343 
4344 	igb_delete_5tuple_filter_82576(dev, filter);
4345 
4346 	return 0;
4347 }
4348 
4349 static int
4350 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4351 {
4352 	uint32_t rctl;
4353 	struct e1000_hw *hw;
4354 	uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
4355 
4356 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4357 
4358 #ifdef RTE_LIBRTE_82571_SUPPORT
4359 	/* XXX: not bigger than max_rx_pktlen */
4360 	if (hw->mac.type == e1000_82571)
4361 		return -ENOTSUP;
4362 #endif
4363 	/*
4364 	 * If device is started, refuse mtu that requires the support of
4365 	 * scattered packets when this feature has not been enabled before.
4366 	 */
4367 	if (dev->data->dev_started && !dev->data->scattered_rx &&
4368 	    frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
4369 		PMD_INIT_LOG(ERR, "Stop port first.");
4370 		return -EINVAL;
4371 	}
4372 
4373 	rctl = E1000_READ_REG(hw, E1000_RCTL);
4374 
4375 	/* switch to jumbo mode if needed */
4376 	if (mtu > RTE_ETHER_MTU)
4377 		rctl |= E1000_RCTL_LPE;
4378 	else
4379 		rctl &= ~E1000_RCTL_LPE;
4380 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4381 
4382 	E1000_WRITE_REG(hw, E1000_RLPML, frame_size);
4383 
4384 	return 0;
4385 }
4386 
4387 /*
4388  * igb_add_del_ntuple_filter - add or delete a ntuple filter
4389  *
4390  * @param
4391  * dev: Pointer to struct rte_eth_dev.
4392  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4393  * add: if true, add filter, if false, remove filter
4394  *
4395  * @return
4396  *    - On success, zero.
4397  *    - On failure, a negative value.
4398  */
4399 int
4400 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
4401 			struct rte_eth_ntuple_filter *ntuple_filter,
4402 			bool add)
4403 {
4404 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4405 	int ret;
4406 
4407 	switch (ntuple_filter->flags) {
4408 	case RTE_5TUPLE_FLAGS:
4409 	case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4410 		if (hw->mac.type != e1000_82576)
4411 			return -ENOTSUP;
4412 		if (add)
4413 			ret = igb_add_5tuple_filter_82576(dev,
4414 							  ntuple_filter);
4415 		else
4416 			ret = igb_remove_5tuple_filter_82576(dev,
4417 							     ntuple_filter);
4418 		break;
4419 	case RTE_2TUPLE_FLAGS:
4420 	case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4421 		if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
4422 			hw->mac.type != e1000_i210 &&
4423 			hw->mac.type != e1000_i211)
4424 			return -ENOTSUP;
4425 		if (add)
4426 			ret = igb_add_2tuple_filter(dev, ntuple_filter);
4427 		else
4428 			ret = igb_remove_2tuple_filter(dev, ntuple_filter);
4429 		break;
4430 	default:
4431 		ret = -EINVAL;
4432 		break;
4433 	}
4434 
4435 	return ret;
4436 }
4437 
4438 static inline int
4439 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4440 			uint16_t ethertype)
4441 {
4442 	int i;
4443 
4444 	for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4445 		if (filter_info->ethertype_filters[i].ethertype == ethertype &&
4446 		    (filter_info->ethertype_mask & (1 << i)))
4447 			return i;
4448 	}
4449 	return -1;
4450 }
4451 
4452 static inline int
4453 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
4454 			uint16_t ethertype, uint32_t etqf)
4455 {
4456 	int i;
4457 
4458 	for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4459 		if (!(filter_info->ethertype_mask & (1 << i))) {
4460 			filter_info->ethertype_mask |= 1 << i;
4461 			filter_info->ethertype_filters[i].ethertype = ethertype;
4462 			filter_info->ethertype_filters[i].etqf = etqf;
4463 			return i;
4464 		}
4465 	}
4466 	return -1;
4467 }
4468 
4469 int
4470 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4471 			uint8_t idx)
4472 {
4473 	if (idx >= E1000_MAX_ETQF_FILTERS)
4474 		return -1;
4475 	filter_info->ethertype_mask &= ~(1 << idx);
4476 	filter_info->ethertype_filters[idx].ethertype = 0;
4477 	filter_info->ethertype_filters[idx].etqf = 0;
4478 	return idx;
4479 }
4480 
4481 
4482 int
4483 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4484 			struct rte_eth_ethertype_filter *filter,
4485 			bool add)
4486 {
4487 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4488 	struct e1000_filter_info *filter_info =
4489 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4490 	uint32_t etqf = 0;
4491 	int ret;
4492 
4493 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4494 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4495 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4496 			" ethertype filter.", filter->ether_type);
4497 		return -EINVAL;
4498 	}
4499 
4500 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4501 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4502 		return -EINVAL;
4503 	}
4504 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4505 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
4506 		return -EINVAL;
4507 	}
4508 
4509 	ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4510 	if (ret >= 0 && add) {
4511 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4512 			    filter->ether_type);
4513 		return -EEXIST;
4514 	}
4515 	if (ret < 0 && !add) {
4516 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4517 			    filter->ether_type);
4518 		return -ENOENT;
4519 	}
4520 
4521 	if (add) {
4522 		etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
4523 		etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
4524 		etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4525 		ret = igb_ethertype_filter_insert(filter_info,
4526 				filter->ether_type, etqf);
4527 		if (ret < 0) {
4528 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
4529 			return -ENOSYS;
4530 		}
4531 	} else {
4532 		ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4533 		if (ret < 0)
4534 			return -ENOSYS;
4535 	}
4536 	E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4537 	E1000_WRITE_FLUSH(hw);
4538 
4539 	return 0;
4540 }
4541 
4542 static int
4543 eth_igb_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
4544 		     const struct rte_flow_ops **ops)
4545 {
4546 	*ops = &igb_flow_ops;
4547 	return 0;
4548 }
4549 
4550 static int
4551 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
4552 			 struct rte_ether_addr *mc_addr_set,
4553 			 uint32_t nb_mc_addr)
4554 {
4555 	struct e1000_hw *hw;
4556 
4557 	hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4558 	e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4559 	return 0;
4560 }
4561 
4562 static uint64_t
4563 igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4564 {
4565 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4566 	uint64_t systime_cycles;
4567 
4568 	switch (hw->mac.type) {
4569 	case e1000_i210:
4570 	case e1000_i211:
4571 		/*
4572 		 * Need to read System Time Residue Register to be able
4573 		 * to read the other two registers.
4574 		 */
4575 		E1000_READ_REG(hw, E1000_SYSTIMR);
4576 		/* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4577 		systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4578 		systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4579 				* NSEC_PER_SEC;
4580 		break;
4581 	case e1000_82580:
4582 	case e1000_i350:
4583 	case e1000_i354:
4584 		/*
4585 		 * Need to read System Time Residue Register to be able
4586 		 * to read the other two registers.
4587 		 */
4588 		E1000_READ_REG(hw, E1000_SYSTIMR);
4589 		systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4590 		/* Only the 8 LSB are valid. */
4591 		systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4592 				& 0xff) << 32;
4593 		break;
4594 	default:
4595 		systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4596 		systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4597 				<< 32;
4598 		break;
4599 	}
4600 
4601 	return systime_cycles;
4602 }
4603 
4604 static uint64_t
4605 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4606 {
4607 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4608 	uint64_t rx_tstamp_cycles;
4609 
4610 	switch (hw->mac.type) {
4611 	case e1000_i210:
4612 	case e1000_i211:
4613 		/* RXSTMPL stores ns and RXSTMPH stores seconds. */
4614 		rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4615 		rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4616 				* NSEC_PER_SEC;
4617 		break;
4618 	case e1000_82580:
4619 	case e1000_i350:
4620 	case e1000_i354:
4621 		rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4622 		/* Only the 8 LSB are valid. */
4623 		rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4624 				& 0xff) << 32;
4625 		break;
4626 	default:
4627 		rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4628 		rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4629 				<< 32;
4630 		break;
4631 	}
4632 
4633 	return rx_tstamp_cycles;
4634 }
4635 
4636 static uint64_t
4637 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4638 {
4639 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4640 	uint64_t tx_tstamp_cycles;
4641 
4642 	switch (hw->mac.type) {
4643 	case e1000_i210:
4644 	case e1000_i211:
4645 		/* RXSTMPL stores ns and RXSTMPH stores seconds. */
4646 		tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4647 		tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4648 				* NSEC_PER_SEC;
4649 		break;
4650 	case e1000_82580:
4651 	case e1000_i350:
4652 	case e1000_i354:
4653 		tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4654 		/* Only the 8 LSB are valid. */
4655 		tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4656 				& 0xff) << 32;
4657 		break;
4658 	default:
4659 		tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4660 		tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4661 				<< 32;
4662 		break;
4663 	}
4664 
4665 	return tx_tstamp_cycles;
4666 }
4667 
4668 static void
4669 igb_start_timecounters(struct rte_eth_dev *dev)
4670 {
4671 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4672 	struct e1000_adapter *adapter = dev->data->dev_private;
4673 	uint32_t incval = 1;
4674 	uint32_t shift = 0;
4675 	uint64_t mask = E1000_CYCLECOUNTER_MASK;
4676 
4677 	switch (hw->mac.type) {
4678 	case e1000_82580:
4679 	case e1000_i350:
4680 	case e1000_i354:
4681 		/* 32 LSB bits + 8 MSB bits = 40 bits */
4682 		mask = (1ULL << 40) - 1;
4683 		/* fall-through */
4684 	case e1000_i210:
4685 	case e1000_i211:
4686 		/*
4687 		 * Start incrementing the register
4688 		 * used to timestamp PTP packets.
4689 		 */
4690 		E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4691 		break;
4692 	case e1000_82576:
4693 		incval = E1000_INCVALUE_82576;
4694 		shift = IGB_82576_TSYNC_SHIFT;
4695 		E1000_WRITE_REG(hw, E1000_TIMINCA,
4696 				E1000_INCPERIOD_82576 | incval);
4697 		break;
4698 	default:
4699 		/* Not supported */
4700 		return;
4701 	}
4702 
4703 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4704 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4705 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4706 
4707 	adapter->systime_tc.cc_mask = mask;
4708 	adapter->systime_tc.cc_shift = shift;
4709 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4710 
4711 	adapter->rx_tstamp_tc.cc_mask = mask;
4712 	adapter->rx_tstamp_tc.cc_shift = shift;
4713 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4714 
4715 	adapter->tx_tstamp_tc.cc_mask = mask;
4716 	adapter->tx_tstamp_tc.cc_shift = shift;
4717 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4718 }
4719 
4720 static int
4721 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4722 {
4723 	struct e1000_adapter *adapter = dev->data->dev_private;
4724 
4725 	adapter->systime_tc.nsec += delta;
4726 	adapter->rx_tstamp_tc.nsec += delta;
4727 	adapter->tx_tstamp_tc.nsec += delta;
4728 
4729 	return 0;
4730 }
4731 
4732 static int
4733 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4734 {
4735 	uint64_t ns;
4736 	struct e1000_adapter *adapter = dev->data->dev_private;
4737 
4738 	ns = rte_timespec_to_ns(ts);
4739 
4740 	/* Set the timecounters to a new value. */
4741 	adapter->systime_tc.nsec = ns;
4742 	adapter->rx_tstamp_tc.nsec = ns;
4743 	adapter->tx_tstamp_tc.nsec = ns;
4744 
4745 	return 0;
4746 }
4747 
4748 static int
4749 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4750 {
4751 	uint64_t ns, systime_cycles;
4752 	struct e1000_adapter *adapter = dev->data->dev_private;
4753 
4754 	systime_cycles = igb_read_systime_cyclecounter(dev);
4755 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4756 	*ts = rte_ns_to_timespec(ns);
4757 
4758 	return 0;
4759 }
4760 
4761 static int
4762 igb_timesync_enable(struct rte_eth_dev *dev)
4763 {
4764 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4765 	uint32_t tsync_ctl;
4766 	uint32_t tsauxc;
4767 
4768 	/* Stop the timesync system time. */
4769 	E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
4770 	/* Reset the timesync system time value. */
4771 	switch (hw->mac.type) {
4772 	case e1000_82580:
4773 	case e1000_i350:
4774 	case e1000_i354:
4775 	case e1000_i210:
4776 	case e1000_i211:
4777 		E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
4778 		/* fall-through */
4779 	case e1000_82576:
4780 		E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
4781 		E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
4782 		break;
4783 	default:
4784 		/* Not supported. */
4785 		return -ENOTSUP;
4786 	}
4787 
4788 	/* Enable system time for it isn't on by default. */
4789 	tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
4790 	tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
4791 	E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
4792 
4793 	igb_start_timecounters(dev);
4794 
4795 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4796 	E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
4797 			(RTE_ETHER_TYPE_1588 |
4798 			 E1000_ETQF_FILTER_ENABLE |
4799 			 E1000_ETQF_1588));
4800 
4801 	/* Enable timestamping of received PTP packets. */
4802 	tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4803 	tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
4804 	E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4805 
4806 	/* Enable Timestamping of transmitted PTP packets. */
4807 	tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4808 	tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
4809 	E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4810 
4811 	return 0;
4812 }
4813 
4814 static int
4815 igb_timesync_disable(struct rte_eth_dev *dev)
4816 {
4817 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4818 	uint32_t tsync_ctl;
4819 
4820 	/* Disable timestamping of transmitted PTP packets. */
4821 	tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4822 	tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
4823 	E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4824 
4825 	/* Disable timestamping of received PTP packets. */
4826 	tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4827 	tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
4828 	E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4829 
4830 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4831 	E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
4832 
4833 	/* Stop incrementating the System Time registers. */
4834 	E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
4835 
4836 	return 0;
4837 }
4838 
4839 static int
4840 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4841 			       struct timespec *timestamp,
4842 			       uint32_t flags __rte_unused)
4843 {
4844 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4845 	struct e1000_adapter *adapter = dev->data->dev_private;
4846 	uint32_t tsync_rxctl;
4847 	uint64_t rx_tstamp_cycles;
4848 	uint64_t ns;
4849 
4850 	tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4851 	if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
4852 		return -EINVAL;
4853 
4854 	rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
4855 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4856 	*timestamp = rte_ns_to_timespec(ns);
4857 
4858 	return  0;
4859 }
4860 
4861 static int
4862 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4863 			       struct timespec *timestamp)
4864 {
4865 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4866 	struct e1000_adapter *adapter = dev->data->dev_private;
4867 	uint32_t tsync_txctl;
4868 	uint64_t tx_tstamp_cycles;
4869 	uint64_t ns;
4870 
4871 	tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4872 	if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
4873 		return -EINVAL;
4874 
4875 	tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
4876 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4877 	*timestamp = rte_ns_to_timespec(ns);
4878 
4879 	return  0;
4880 }
4881 
4882 static int
4883 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4884 {
4885 	int count = 0;
4886 	int g_ind = 0;
4887 	const struct reg_info *reg_group;
4888 
4889 	while ((reg_group = igb_regs[g_ind++]))
4890 		count += igb_reg_group_count(reg_group);
4891 
4892 	return count;
4893 }
4894 
4895 static int
4896 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4897 {
4898 	int count = 0;
4899 	int g_ind = 0;
4900 	const struct reg_info *reg_group;
4901 
4902 	while ((reg_group = igbvf_regs[g_ind++]))
4903 		count += igb_reg_group_count(reg_group);
4904 
4905 	return count;
4906 }
4907 
4908 static int
4909 eth_igb_get_regs(struct rte_eth_dev *dev,
4910 	struct rte_dev_reg_info *regs)
4911 {
4912 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4913 	uint32_t *data = regs->data;
4914 	int g_ind = 0;
4915 	int count = 0;
4916 	const struct reg_info *reg_group;
4917 
4918 	if (data == NULL) {
4919 		regs->length = eth_igb_get_reg_length(dev);
4920 		regs->width = sizeof(uint32_t);
4921 		return 0;
4922 	}
4923 
4924 	/* Support only full register dump */
4925 	if ((regs->length == 0) ||
4926 	    (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4927 		regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4928 			hw->device_id;
4929 		while ((reg_group = igb_regs[g_ind++]))
4930 			count += igb_read_regs_group(dev, &data[count],
4931 							reg_group);
4932 		return 0;
4933 	}
4934 
4935 	return -ENOTSUP;
4936 }
4937 
4938 static int
4939 igbvf_get_regs(struct rte_eth_dev *dev,
4940 	struct rte_dev_reg_info *regs)
4941 {
4942 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4943 	uint32_t *data = regs->data;
4944 	int g_ind = 0;
4945 	int count = 0;
4946 	const struct reg_info *reg_group;
4947 
4948 	if (data == NULL) {
4949 		regs->length = igbvf_get_reg_length(dev);
4950 		regs->width = sizeof(uint32_t);
4951 		return 0;
4952 	}
4953 
4954 	/* Support only full register dump */
4955 	if ((regs->length == 0) ||
4956 	    (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4957 		regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4958 			hw->device_id;
4959 		while ((reg_group = igbvf_regs[g_ind++]))
4960 			count += igb_read_regs_group(dev, &data[count],
4961 							reg_group);
4962 		return 0;
4963 	}
4964 
4965 	return -ENOTSUP;
4966 }
4967 
4968 static int
4969 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4970 {
4971 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4972 
4973 	/* Return unit is byte count */
4974 	return hw->nvm.word_size * 2;
4975 }
4976 
4977 static int
4978 eth_igb_get_eeprom(struct rte_eth_dev *dev,
4979 	struct rte_dev_eeprom_info *in_eeprom)
4980 {
4981 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4982 	struct e1000_nvm_info *nvm = &hw->nvm;
4983 	uint16_t *data = in_eeprom->data;
4984 	int first, length;
4985 
4986 	first = in_eeprom->offset >> 1;
4987 	length = in_eeprom->length >> 1;
4988 	if ((first >= hw->nvm.word_size) ||
4989 	    ((first + length) >= hw->nvm.word_size))
4990 		return -EINVAL;
4991 
4992 	in_eeprom->magic = hw->vendor_id |
4993 		((uint32_t)hw->device_id << 16);
4994 
4995 	if ((nvm->ops.read) == NULL)
4996 		return -ENOTSUP;
4997 
4998 	return nvm->ops.read(hw, first, length, data);
4999 }
5000 
5001 static int
5002 eth_igb_set_eeprom(struct rte_eth_dev *dev,
5003 	struct rte_dev_eeprom_info *in_eeprom)
5004 {
5005 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5006 	struct e1000_nvm_info *nvm = &hw->nvm;
5007 	uint16_t *data = in_eeprom->data;
5008 	int first, length;
5009 
5010 	first = in_eeprom->offset >> 1;
5011 	length = in_eeprom->length >> 1;
5012 	if ((first >= hw->nvm.word_size) ||
5013 	    ((first + length) >= hw->nvm.word_size))
5014 		return -EINVAL;
5015 
5016 	in_eeprom->magic = (uint32_t)hw->vendor_id |
5017 		((uint32_t)hw->device_id << 16);
5018 
5019 	if ((nvm->ops.write) == NULL)
5020 		return -ENOTSUP;
5021 	return nvm->ops.write(hw,  first, length, data);
5022 }
5023 
5024 static int
5025 eth_igb_get_module_info(struct rte_eth_dev *dev,
5026 			struct rte_eth_dev_module_info *modinfo)
5027 {
5028 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5029 
5030 	uint32_t status = 0;
5031 	uint16_t sff8472_rev, addr_mode;
5032 	bool page_swap = false;
5033 
5034 	if (hw->phy.media_type == e1000_media_type_copper ||
5035 	    hw->phy.media_type == e1000_media_type_unknown)
5036 		return -EOPNOTSUPP;
5037 
5038 	/* Check whether we support SFF-8472 or not */
5039 	status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
5040 	if (status)
5041 		return -EIO;
5042 
5043 	/* addressing mode is not supported */
5044 	status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
5045 	if (status)
5046 		return -EIO;
5047 
5048 	/* addressing mode is not supported */
5049 	if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
5050 		PMD_DRV_LOG(ERR,
5051 			    "Address change required to access page 0xA2, "
5052 			    "but not supported. Please report the module "
5053 			    "type to the driver maintainers.\n");
5054 		page_swap = true;
5055 	}
5056 
5057 	if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
5058 		/* We have an SFP, but it does not support SFF-8472 */
5059 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
5060 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
5061 	} else {
5062 		/* We have an SFP which supports a revision of SFF-8472 */
5063 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
5064 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
5065 	}
5066 
5067 	return 0;
5068 }
5069 
5070 static int
5071 eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
5072 			  struct rte_dev_eeprom_info *info)
5073 {
5074 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5075 
5076 	uint32_t status = 0;
5077 	uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
5078 	u16 first_word, last_word;
5079 	int i = 0;
5080 
5081 	first_word = info->offset >> 1;
5082 	last_word = (info->offset + info->length - 1) >> 1;
5083 
5084 	/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
5085 	for (i = 0; i < last_word - first_word + 1; i++) {
5086 		status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
5087 						&dataword[i]);
5088 		if (status) {
5089 			/* Error occurred while reading module */
5090 			return -EIO;
5091 		}
5092 
5093 		dataword[i] = rte_be_to_cpu_16(dataword[i]);
5094 	}
5095 
5096 	memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
5097 
5098 	return 0;
5099 }
5100 
5101 static int
5102 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5103 {
5104 	struct e1000_hw *hw =
5105 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5106 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5107 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5108 	uint32_t vec = E1000_MISC_VEC_ID;
5109 
5110 	if (rte_intr_allow_others(intr_handle))
5111 		vec = E1000_RX_VEC_START;
5112 
5113 	uint32_t mask = 1 << (queue_id + vec);
5114 
5115 	E1000_WRITE_REG(hw, E1000_EIMC, mask);
5116 	E1000_WRITE_FLUSH(hw);
5117 
5118 	return 0;
5119 }
5120 
5121 static int
5122 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5123 {
5124 	struct e1000_hw *hw =
5125 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5126 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5127 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5128 	uint32_t vec = E1000_MISC_VEC_ID;
5129 
5130 	if (rte_intr_allow_others(intr_handle))
5131 		vec = E1000_RX_VEC_START;
5132 
5133 	uint32_t mask = 1 << (queue_id + vec);
5134 	uint32_t regval;
5135 
5136 	regval = E1000_READ_REG(hw, E1000_EIMS);
5137 	E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
5138 	E1000_WRITE_FLUSH(hw);
5139 
5140 	rte_intr_ack(intr_handle);
5141 
5142 	return 0;
5143 }
5144 
5145 static void
5146 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t  msix_vector,
5147 		   uint8_t index, uint8_t offset)
5148 {
5149 	uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
5150 
5151 	/* clear bits */
5152 	val &= ~((uint32_t)0xFF << offset);
5153 
5154 	/* write vector and valid bit */
5155 	val |= (msix_vector | E1000_IVAR_VALID) << offset;
5156 
5157 	E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
5158 }
5159 
5160 static void
5161 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
5162 			   uint8_t queue, uint8_t msix_vector)
5163 {
5164 	uint32_t tmp = 0;
5165 
5166 	if (hw->mac.type == e1000_82575) {
5167 		if (direction == 0)
5168 			tmp = E1000_EICR_RX_QUEUE0 << queue;
5169 		else if (direction == 1)
5170 			tmp = E1000_EICR_TX_QUEUE0 << queue;
5171 		E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
5172 	} else if (hw->mac.type == e1000_82576) {
5173 		if ((direction == 0) || (direction == 1))
5174 			eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
5175 					   ((queue & 0x8) << 1) +
5176 					   8 * direction);
5177 	} else if ((hw->mac.type == e1000_82580) ||
5178 			(hw->mac.type == e1000_i350) ||
5179 			(hw->mac.type == e1000_i354) ||
5180 			(hw->mac.type == e1000_i210) ||
5181 			(hw->mac.type == e1000_i211)) {
5182 		if ((direction == 0) || (direction == 1))
5183 			eth_igb_write_ivar(hw, msix_vector,
5184 					   queue >> 1,
5185 					   ((queue & 0x1) << 4) +
5186 					   8 * direction);
5187 	}
5188 }
5189 
5190 /* Sets up the hardware to generate MSI-X interrupts properly
5191  * @hw
5192  *  board private structure
5193  */
5194 static void
5195 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
5196 {
5197 	int queue_id;
5198 	uint32_t tmpval, regval, intr_mask;
5199 	struct e1000_hw *hw =
5200 		E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5201 	uint32_t vec = E1000_MISC_VEC_ID;
5202 	uint32_t base = E1000_MISC_VEC_ID;
5203 	uint32_t misc_shift = 0;
5204 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5205 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
5206 
5207 	/* won't configure msix register if no mapping is done
5208 	 * between intr vector and event fd
5209 	 */
5210 	if (!rte_intr_dp_is_en(intr_handle))
5211 		return;
5212 
5213 	if (rte_intr_allow_others(intr_handle)) {
5214 		vec = base = E1000_RX_VEC_START;
5215 		misc_shift = 1;
5216 	}
5217 
5218 	/* set interrupt vector for other causes */
5219 	if (hw->mac.type == e1000_82575) {
5220 		tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
5221 		/* enable MSI-X PBA support */
5222 		tmpval |= E1000_CTRL_EXT_PBA_CLR;
5223 
5224 		/* Auto-Mask interrupts upon ICR read */
5225 		tmpval |= E1000_CTRL_EXT_EIAME;
5226 		tmpval |= E1000_CTRL_EXT_IRCA;
5227 
5228 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
5229 
5230 		/* enable msix_other interrupt */
5231 		E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
5232 		regval = E1000_READ_REG(hw, E1000_EIAC);
5233 		E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
5234 		regval = E1000_READ_REG(hw, E1000_EIAM);
5235 		E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
5236 	} else if ((hw->mac.type == e1000_82576) ||
5237 			(hw->mac.type == e1000_82580) ||
5238 			(hw->mac.type == e1000_i350) ||
5239 			(hw->mac.type == e1000_i354) ||
5240 			(hw->mac.type == e1000_i210) ||
5241 			(hw->mac.type == e1000_i211)) {
5242 		/* turn on MSI-X capability first */
5243 		E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
5244 					E1000_GPIE_PBA | E1000_GPIE_EIAME |
5245 					E1000_GPIE_NSICR);
5246 		intr_mask =
5247 			RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle),
5248 				     uint32_t) << misc_shift;
5249 
5250 		if (dev->data->dev_conf.intr_conf.lsc != 0)
5251 			intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
5252 
5253 		regval = E1000_READ_REG(hw, E1000_EIAC);
5254 		E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
5255 
5256 		/* enable msix_other interrupt */
5257 		regval = E1000_READ_REG(hw, E1000_EIMS);
5258 		E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
5259 		tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8;
5260 		E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
5261 	}
5262 
5263 	/* use EIAM to auto-mask when MSI-X interrupt
5264 	 * is asserted, this saves a register write for every interrupt
5265 	 */
5266 	intr_mask = RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle),
5267 				 uint32_t) << misc_shift;
5268 
5269 	if (dev->data->dev_conf.intr_conf.lsc != 0)
5270 		intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
5271 
5272 	regval = E1000_READ_REG(hw, E1000_EIAM);
5273 	E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
5274 
5275 	for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
5276 		eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
5277 		rte_intr_vec_list_index_set(intr_handle, queue_id, vec);
5278 		if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
5279 			vec++;
5280 	}
5281 
5282 	E1000_WRITE_FLUSH(hw);
5283 }
5284 
5285 /* restore n-tuple filter */
5286 static inline void
5287 igb_ntuple_filter_restore(struct rte_eth_dev *dev)
5288 {
5289 	struct e1000_filter_info *filter_info =
5290 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5291 	struct e1000_5tuple_filter *p_5tuple;
5292 	struct e1000_2tuple_filter *p_2tuple;
5293 
5294 	TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
5295 		igb_inject_5tuple_filter_82576(dev, p_5tuple);
5296 	}
5297 
5298 	TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
5299 		igb_inject_2uple_filter(dev, p_2tuple);
5300 	}
5301 }
5302 
5303 /* restore SYN filter */
5304 static inline void
5305 igb_syn_filter_restore(struct rte_eth_dev *dev)
5306 {
5307 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5308 	struct e1000_filter_info *filter_info =
5309 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5310 	uint32_t synqf;
5311 
5312 	synqf = filter_info->syn_info;
5313 
5314 	if (synqf & E1000_SYN_FILTER_ENABLE) {
5315 		E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
5316 		E1000_WRITE_FLUSH(hw);
5317 	}
5318 }
5319 
5320 /* restore ethernet type filter */
5321 static inline void
5322 igb_ethertype_filter_restore(struct rte_eth_dev *dev)
5323 {
5324 	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5325 	struct e1000_filter_info *filter_info =
5326 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5327 	int i;
5328 
5329 	for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
5330 		if (filter_info->ethertype_mask & (1 << i)) {
5331 			E1000_WRITE_REG(hw, E1000_ETQF(i),
5332 				filter_info->ethertype_filters[i].etqf);
5333 			E1000_WRITE_FLUSH(hw);
5334 		}
5335 	}
5336 }
5337 
5338 /* restore flex byte filter */
5339 static inline void
5340 igb_flex_filter_restore(struct rte_eth_dev *dev)
5341 {
5342 	struct e1000_filter_info *filter_info =
5343 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5344 	struct e1000_flex_filter *flex_filter;
5345 
5346 	TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
5347 		igb_inject_flex_filter(dev, flex_filter);
5348 	}
5349 }
5350 
5351 /* restore rss filter */
5352 static inline void
5353 igb_rss_filter_restore(struct rte_eth_dev *dev)
5354 {
5355 	struct e1000_filter_info *filter_info =
5356 		E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5357 
5358 	if (filter_info->rss_info.conf.queue_num)
5359 		igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
5360 }
5361 
5362 /* restore all types filter */
5363 static int
5364 igb_filter_restore(struct rte_eth_dev *dev)
5365 {
5366 	igb_ntuple_filter_restore(dev);
5367 	igb_ethertype_filter_restore(dev);
5368 	igb_syn_filter_restore(dev);
5369 	igb_flex_filter_restore(dev);
5370 	igb_rss_filter_restore(dev);
5371 
5372 	return 0;
5373 }
5374 
5375 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
5376 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
5377 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
5378 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
5379 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
5380 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
5381