xref: /f-stack/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c (revision fa64a7ff)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17 
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_kvargs.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_malloc.h>
32 #include <rte_random.h>
33 #include <rte_dev.h>
34 #include <rte_hash_crc.h>
35 #ifdef RTE_LIBRTE_SECURITY
36 #include <rte_security_driver.h>
37 #endif
38 
39 #include "ixgbe_logs.h"
40 #include "base/ixgbe_api.h"
41 #include "base/ixgbe_vf.h"
42 #include "base/ixgbe_common.h"
43 #include "ixgbe_ethdev.h"
44 #include "ixgbe_bypass.h"
45 #include "ixgbe_rxtx.h"
46 #include "base/ixgbe_type.h"
47 #include "base/ixgbe_phy.h"
48 #include "ixgbe_regs.h"
49 
50 /*
51  * High threshold controlling when to start sending XOFF frames. Must be at
52  * least 8 bytes less than receive packet buffer size. This value is in units
53  * of 1024 bytes.
54  */
55 #define IXGBE_FC_HI    0x80
56 
57 /*
58  * Low threshold controlling when to start sending XON frames. This value is
59  * in units of 1024 bytes.
60  */
61 #define IXGBE_FC_LO    0x40
62 
63 /* Timer value included in XOFF frames. */
64 #define IXGBE_FC_PAUSE 0x680
65 
66 /*Default value of Max Rx Queue*/
67 #define IXGBE_MAX_RX_QUEUE_NUM 128
68 
69 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
70 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
71 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
72 
73 #define IXGBE_MMW_SIZE_DEFAULT        0x4
74 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
75 #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
76 
77 /*
78  *  Default values for RX/TX configuration
79  */
80 #define IXGBE_DEFAULT_RX_FREE_THRESH  32
81 #define IXGBE_DEFAULT_RX_PTHRESH      8
82 #define IXGBE_DEFAULT_RX_HTHRESH      8
83 #define IXGBE_DEFAULT_RX_WTHRESH      0
84 
85 #define IXGBE_DEFAULT_TX_FREE_THRESH  32
86 #define IXGBE_DEFAULT_TX_PTHRESH      32
87 #define IXGBE_DEFAULT_TX_HTHRESH      0
88 #define IXGBE_DEFAULT_TX_WTHRESH      0
89 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
90 
91 /* Bit shift and mask */
92 #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
93 #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
94 #define IXGBE_8_BIT_WIDTH  CHAR_BIT
95 #define IXGBE_8_BIT_MASK   UINT8_MAX
96 
97 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
98 
99 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
100 
101 /* Additional timesync values. */
102 #define NSEC_PER_SEC             1000000000L
103 #define IXGBE_INCVAL_10GB        0x66666666
104 #define IXGBE_INCVAL_1GB         0x40000000
105 #define IXGBE_INCVAL_100         0x50000000
106 #define IXGBE_INCVAL_SHIFT_10GB  28
107 #define IXGBE_INCVAL_SHIFT_1GB   24
108 #define IXGBE_INCVAL_SHIFT_100   21
109 #define IXGBE_INCVAL_SHIFT_82599 7
110 #define IXGBE_INCPER_SHIFT_82599 24
111 
112 #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
113 
114 #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
115 #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
116 #define IXGBE_ETAG_ETYPE                       0x00005084
117 #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
118 #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
119 #define IXGBE_RAH_ADTYPE                       0x40000000
120 #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
121 #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
122 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
123 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
124 #define IXGBE_QDE_STRIP_TAG                    0x00000004
125 #define IXGBE_VTEICR_MASK                      0x07
126 
127 #define IXGBE_EXVET_VET_EXT_SHIFT              16
128 #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
129 
130 #define IXGBEVF_DEVARG_PFLINK_FULLCHK		"pflink_fullchk"
131 
132 static const char * const ixgbevf_valid_arguments[] = {
133 	IXGBEVF_DEVARG_PFLINK_FULLCHK,
134 	NULL
135 };
136 
137 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
138 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
139 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
140 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
141 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
142 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
143 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
144 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
145 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
146 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
147 static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
148 static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
149 static void ixgbe_dev_close(struct rte_eth_dev *dev);
150 static int  ixgbe_dev_reset(struct rte_eth_dev *dev);
151 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
152 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
153 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
154 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
155 static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
156 				int wait_to_complete);
157 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
158 				struct rte_eth_stats *stats);
159 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
160 				struct rte_eth_xstat *xstats, unsigned n);
161 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
162 				  struct rte_eth_xstat *xstats, unsigned n);
163 static int
164 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
165 		uint64_t *values, unsigned int n);
166 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
167 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
168 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
169 	struct rte_eth_xstat_name *xstats_names,
170 	unsigned int size);
171 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
172 	struct rte_eth_xstat_name *xstats_names, unsigned limit);
173 static int ixgbe_dev_xstats_get_names_by_id(
174 	struct rte_eth_dev *dev,
175 	struct rte_eth_xstat_name *xstats_names,
176 	const uint64_t *ids,
177 	unsigned int limit);
178 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
179 					     uint16_t queue_id,
180 					     uint8_t stat_idx,
181 					     uint8_t is_rx);
182 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
183 				 size_t fw_size);
184 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
185 			       struct rte_eth_dev_info *dev_info);
186 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
187 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
188 				 struct rte_eth_dev_info *dev_info);
189 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
190 
191 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
192 		uint16_t vlan_id, int on);
193 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
194 			       enum rte_vlan_type vlan_type,
195 			       uint16_t tpid_id);
196 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
197 		uint16_t queue, bool on);
198 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
199 		int on);
200 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
201 						  int mask);
202 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
203 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
204 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
205 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
206 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
207 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
208 
209 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
210 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
211 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
212 			       struct rte_eth_fc_conf *fc_conf);
213 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
214 			       struct rte_eth_fc_conf *fc_conf);
215 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
216 		struct rte_eth_pfc_conf *pfc_conf);
217 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
218 			struct rte_eth_rss_reta_entry64 *reta_conf,
219 			uint16_t reta_size);
220 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
221 			struct rte_eth_rss_reta_entry64 *reta_conf,
222 			uint16_t reta_size);
223 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
224 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
225 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
226 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
227 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
228 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
229 static void ixgbe_dev_interrupt_handler(void *param);
230 static void ixgbe_dev_interrupt_delayed_handler(void *param);
231 static void ixgbe_dev_setup_link_alarm_handler(void *param);
232 
233 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
234 			 uint32_t index, uint32_t pool);
235 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
236 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
237 					   struct ether_addr *mac_addr);
238 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
239 static bool is_device_supported(struct rte_eth_dev *dev,
240 				struct rte_pci_driver *drv);
241 
242 /* For Virtual Function support */
243 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
244 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
245 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
246 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
247 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
248 				   int wait_to_complete);
249 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
250 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
251 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
252 static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
253 static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
254 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
255 		struct rte_eth_stats *stats);
256 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
257 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
258 		uint16_t vlan_id, int on);
259 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
260 		uint16_t queue, int on);
261 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
262 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
263 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
264 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
265 					    uint16_t queue_id);
266 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
267 					     uint16_t queue_id);
268 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
269 				 uint8_t queue, uint8_t msix_vector);
270 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
271 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
272 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
273 
274 /* For Eth VMDQ APIs support */
275 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
276 		ether_addr * mac_addr, uint8_t on);
277 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
278 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
279 		struct rte_eth_mirror_conf *mirror_conf,
280 		uint8_t rule_id, uint8_t on);
281 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
282 		uint8_t	rule_id);
283 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
284 					  uint16_t queue_id);
285 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
286 					   uint16_t queue_id);
287 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
288 			       uint8_t queue, uint8_t msix_vector);
289 static void ixgbe_configure_msix(struct rte_eth_dev *dev);
290 
291 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
292 				struct ether_addr *mac_addr,
293 				uint32_t index, uint32_t pool);
294 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
295 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
296 					     struct ether_addr *mac_addr);
297 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
298 			struct rte_eth_syn_filter *filter);
299 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
300 			enum rte_filter_op filter_op,
301 			void *arg);
302 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
303 			struct ixgbe_5tuple_filter *filter);
304 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
305 			struct ixgbe_5tuple_filter *filter);
306 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
307 				enum rte_filter_op filter_op,
308 				void *arg);
309 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
310 			struct rte_eth_ntuple_filter *filter);
311 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
312 				enum rte_filter_op filter_op,
313 				void *arg);
314 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
315 			struct rte_eth_ethertype_filter *filter);
316 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
317 		     enum rte_filter_type filter_type,
318 		     enum rte_filter_op filter_op,
319 		     void *arg);
320 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
321 
322 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
323 				      struct ether_addr *mc_addr_set,
324 				      uint32_t nb_mc_addr);
325 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
326 				   struct rte_eth_dcb_info *dcb_info);
327 
328 static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
329 static int ixgbe_get_regs(struct rte_eth_dev *dev,
330 			    struct rte_dev_reg_info *regs);
331 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
332 static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
333 				struct rte_dev_eeprom_info *eeprom);
334 static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
335 				struct rte_dev_eeprom_info *eeprom);
336 
337 static int ixgbe_get_module_info(struct rte_eth_dev *dev,
338 				 struct rte_eth_dev_module_info *modinfo);
339 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
340 				   struct rte_dev_eeprom_info *info);
341 
342 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
343 static int ixgbevf_get_regs(struct rte_eth_dev *dev,
344 				struct rte_dev_reg_info *regs);
345 
346 static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
347 static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
348 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
349 					    struct timespec *timestamp,
350 					    uint32_t flags);
351 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
352 					    struct timespec *timestamp);
353 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
355 				   struct timespec *timestamp);
356 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
357 				   const struct timespec *timestamp);
358 static void ixgbevf_dev_interrupt_handler(void *param);
359 
360 static int ixgbe_dev_l2_tunnel_eth_type_conf
361 	(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
362 static int ixgbe_dev_l2_tunnel_offload_set
363 	(struct rte_eth_dev *dev,
364 	 struct rte_eth_l2_tunnel_conf *l2_tunnel,
365 	 uint32_t mask,
366 	 uint8_t en);
367 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
368 					     enum rte_filter_op filter_op,
369 					     void *arg);
370 
371 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
372 					 struct rte_eth_udp_tunnel *udp_tunnel);
373 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
374 					 struct rte_eth_udp_tunnel *udp_tunnel);
375 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
376 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
377 
378 /*
379  * Define VF Stats MACRO for Non "cleared on read" register
380  */
381 #define UPDATE_VF_STAT(reg, last, cur)                          \
382 {                                                               \
383 	uint32_t latest = IXGBE_READ_REG(hw, reg);              \
384 	cur += (latest - last) & UINT_MAX;                      \
385 	last = latest;                                          \
386 }
387 
388 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
389 {                                                                \
390 	u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
391 	u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
392 	u64 latest = ((new_msb << 32) | new_lsb);                \
393 	cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
394 	last = latest;                                           \
395 }
396 
397 #define IXGBE_SET_HWSTRIP(h, q) do {\
398 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
399 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
400 		(h)->bitmap[idx] |= 1 << bit;\
401 	} while (0)
402 
403 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
404 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
405 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
406 		(h)->bitmap[idx] &= ~(1 << bit);\
407 	} while (0)
408 
409 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
410 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
411 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
412 		(r) = (h)->bitmap[idx] >> bit & 1;\
413 	} while (0)
414 
415 int ixgbe_logtype_init;
416 int ixgbe_logtype_driver;
417 
418 /*
419  * The set of PCI devices this driver supports
420  */
421 static const struct rte_pci_id pci_id_ixgbe_map[] = {
422 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
423 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
424 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
425 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
426 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
427 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
428 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
429 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
430 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
431 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
432 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
433 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
434 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
435 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
436 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
437 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
438 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
439 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
440 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
441 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
442 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
443 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
444 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
445 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
446 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
447 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
448 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
449 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
450 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
451 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
452 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
453 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
454 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
455 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
456 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
457 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
458 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
459 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
460 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
461 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
462 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
463 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
464 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
465 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
466 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
467 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
468 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
469 #ifdef RTE_LIBRTE_IXGBE_BYPASS
470 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
471 #endif
472 	{ .vendor_id = 0, /* sentinel */ },
473 };
474 
475 /*
476  * The set of PCI devices this driver supports (for 82599 VF)
477  */
478 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
479 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
480 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
481 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
482 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
483 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
484 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
485 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
486 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
487 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
488 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
489 	{ .vendor_id = 0, /* sentinel */ },
490 };
491 
492 static const struct rte_eth_desc_lim rx_desc_lim = {
493 	.nb_max = IXGBE_MAX_RING_DESC,
494 	.nb_min = IXGBE_MIN_RING_DESC,
495 	.nb_align = IXGBE_RXD_ALIGN,
496 };
497 
498 static const struct rte_eth_desc_lim tx_desc_lim = {
499 	.nb_max = IXGBE_MAX_RING_DESC,
500 	.nb_min = IXGBE_MIN_RING_DESC,
501 	.nb_align = IXGBE_TXD_ALIGN,
502 	.nb_seg_max = IXGBE_TX_MAX_SEG,
503 	.nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
504 };
505 
506 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
507 	.dev_configure        = ixgbe_dev_configure,
508 	.dev_start            = ixgbe_dev_start,
509 	.dev_stop             = ixgbe_dev_stop,
510 	.dev_set_link_up    = ixgbe_dev_set_link_up,
511 	.dev_set_link_down  = ixgbe_dev_set_link_down,
512 	.dev_close            = ixgbe_dev_close,
513 	.dev_reset	      = ixgbe_dev_reset,
514 	.promiscuous_enable   = ixgbe_dev_promiscuous_enable,
515 	.promiscuous_disable  = ixgbe_dev_promiscuous_disable,
516 	.allmulticast_enable  = ixgbe_dev_allmulticast_enable,
517 	.allmulticast_disable = ixgbe_dev_allmulticast_disable,
518 	.link_update          = ixgbe_dev_link_update,
519 	.stats_get            = ixgbe_dev_stats_get,
520 	.xstats_get           = ixgbe_dev_xstats_get,
521 	.xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
522 	.stats_reset          = ixgbe_dev_stats_reset,
523 	.xstats_reset         = ixgbe_dev_xstats_reset,
524 	.xstats_get_names     = ixgbe_dev_xstats_get_names,
525 	.xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
526 	.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
527 	.fw_version_get       = ixgbe_fw_version_get,
528 	.dev_infos_get        = ixgbe_dev_info_get,
529 	.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
530 	.mtu_set              = ixgbe_dev_mtu_set,
531 	.vlan_filter_set      = ixgbe_vlan_filter_set,
532 	.vlan_tpid_set        = ixgbe_vlan_tpid_set,
533 	.vlan_offload_set     = ixgbe_vlan_offload_set,
534 	.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
535 	.rx_queue_start	      = ixgbe_dev_rx_queue_start,
536 	.rx_queue_stop        = ixgbe_dev_rx_queue_stop,
537 	.tx_queue_start	      = ixgbe_dev_tx_queue_start,
538 	.tx_queue_stop        = ixgbe_dev_tx_queue_stop,
539 	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
540 	.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
541 	.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
542 	.rx_queue_release     = ixgbe_dev_rx_queue_release,
543 	.rx_queue_count       = ixgbe_dev_rx_queue_count,
544 	.rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
545 	.rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
546 	.tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
547 	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
548 	.tx_queue_release     = ixgbe_dev_tx_queue_release,
549 	.dev_led_on           = ixgbe_dev_led_on,
550 	.dev_led_off          = ixgbe_dev_led_off,
551 	.flow_ctrl_get        = ixgbe_flow_ctrl_get,
552 	.flow_ctrl_set        = ixgbe_flow_ctrl_set,
553 	.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
554 	.mac_addr_add         = ixgbe_add_rar,
555 	.mac_addr_remove      = ixgbe_remove_rar,
556 	.mac_addr_set         = ixgbe_set_default_mac_addr,
557 	.uc_hash_table_set    = ixgbe_uc_hash_table_set,
558 	.uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
559 	.mirror_rule_set      = ixgbe_mirror_rule_set,
560 	.mirror_rule_reset    = ixgbe_mirror_rule_reset,
561 	.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
562 	.reta_update          = ixgbe_dev_rss_reta_update,
563 	.reta_query           = ixgbe_dev_rss_reta_query,
564 	.rss_hash_update      = ixgbe_dev_rss_hash_update,
565 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
566 	.filter_ctrl          = ixgbe_dev_filter_ctrl,
567 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
568 	.rxq_info_get         = ixgbe_rxq_info_get,
569 	.txq_info_get         = ixgbe_txq_info_get,
570 	.timesync_enable      = ixgbe_timesync_enable,
571 	.timesync_disable     = ixgbe_timesync_disable,
572 	.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
573 	.timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
574 	.get_reg              = ixgbe_get_regs,
575 	.get_eeprom_length    = ixgbe_get_eeprom_length,
576 	.get_eeprom           = ixgbe_get_eeprom,
577 	.set_eeprom           = ixgbe_set_eeprom,
578 	.get_module_info      = ixgbe_get_module_info,
579 	.get_module_eeprom    = ixgbe_get_module_eeprom,
580 	.get_dcb_info         = ixgbe_dev_get_dcb_info,
581 	.timesync_adjust_time = ixgbe_timesync_adjust_time,
582 	.timesync_read_time   = ixgbe_timesync_read_time,
583 	.timesync_write_time  = ixgbe_timesync_write_time,
584 	.l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
585 	.l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
586 	.udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
587 	.udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
588 	.tm_ops_get           = ixgbe_tm_ops_get,
589 };
590 
591 /*
592  * dev_ops for virtual function, bare necessities for basic vf
593  * operation have been implemented
594  */
595 static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
596 	.dev_configure        = ixgbevf_dev_configure,
597 	.dev_start            = ixgbevf_dev_start,
598 	.dev_stop             = ixgbevf_dev_stop,
599 	.link_update          = ixgbevf_dev_link_update,
600 	.stats_get            = ixgbevf_dev_stats_get,
601 	.xstats_get           = ixgbevf_dev_xstats_get,
602 	.stats_reset          = ixgbevf_dev_stats_reset,
603 	.xstats_reset         = ixgbevf_dev_stats_reset,
604 	.xstats_get_names     = ixgbevf_dev_xstats_get_names,
605 	.dev_close            = ixgbevf_dev_close,
606 	.dev_reset	      = ixgbevf_dev_reset,
607 	.allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
608 	.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
609 	.dev_infos_get        = ixgbevf_dev_info_get,
610 	.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
611 	.mtu_set              = ixgbevf_dev_set_mtu,
612 	.vlan_filter_set      = ixgbevf_vlan_filter_set,
613 	.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
614 	.vlan_offload_set     = ixgbevf_vlan_offload_set,
615 	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
616 	.rx_queue_release     = ixgbe_dev_rx_queue_release,
617 	.rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
618 	.rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
619 	.tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
620 	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
621 	.tx_queue_release     = ixgbe_dev_tx_queue_release,
622 	.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
623 	.rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
624 	.mac_addr_add         = ixgbevf_add_mac_addr,
625 	.mac_addr_remove      = ixgbevf_remove_mac_addr,
626 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
627 	.rxq_info_get         = ixgbe_rxq_info_get,
628 	.txq_info_get         = ixgbe_txq_info_get,
629 	.mac_addr_set         = ixgbevf_set_default_mac_addr,
630 	.get_reg              = ixgbevf_get_regs,
631 	.reta_update          = ixgbe_dev_rss_reta_update,
632 	.reta_query           = ixgbe_dev_rss_reta_query,
633 	.rss_hash_update      = ixgbe_dev_rss_hash_update,
634 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
635 };
636 
637 /* store statistics names and its offset in stats structure */
638 struct rte_ixgbe_xstats_name_off {
639 	char name[RTE_ETH_XSTATS_NAME_SIZE];
640 	unsigned offset;
641 };
642 
643 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
644 	{"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
645 	{"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
646 	{"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
647 	{"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
648 	{"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
649 	{"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
650 	{"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
651 	{"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
652 	{"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
653 	{"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
654 	{"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
655 	{"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
656 	{"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
657 	{"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
658 	{"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
659 		prc1023)},
660 	{"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
661 		prc1522)},
662 	{"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
663 	{"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
664 	{"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
665 	{"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
666 	{"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
667 	{"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
668 	{"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
669 	{"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
670 	{"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
671 	{"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
672 	{"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
673 	{"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
674 	{"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
675 	{"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
676 	{"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
677 	{"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
678 	{"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
679 		ptc1023)},
680 	{"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
681 		ptc1522)},
682 	{"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
683 	{"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
684 	{"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
685 	{"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
686 
687 	{"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
688 		fdirustat_add)},
689 	{"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
690 		fdirustat_remove)},
691 	{"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
692 		fdirfstat_fadd)},
693 	{"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
694 		fdirfstat_fremove)},
695 	{"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
696 		fdirmatch)},
697 	{"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
698 		fdirmiss)},
699 
700 	{"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
701 	{"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
702 	{"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
703 		fclast)},
704 	{"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
705 	{"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
706 	{"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
707 	{"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
708 	{"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
709 		fcoe_noddp)},
710 	{"rx_fcoe_no_direct_data_placement_ext_buff",
711 		offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
712 
713 	{"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
714 		lxontxc)},
715 	{"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
716 		lxonrxc)},
717 	{"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
718 		lxofftxc)},
719 	{"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
720 		lxoffrxc)},
721 	{"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
722 };
723 
724 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
725 			   sizeof(rte_ixgbe_stats_strings[0]))
726 
727 /* MACsec statistics */
728 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
729 	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
730 		out_pkts_untagged)},
731 	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
732 		out_pkts_encrypted)},
733 	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
734 		out_pkts_protected)},
735 	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
736 		out_octets_encrypted)},
737 	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
738 		out_octets_protected)},
739 	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
740 		in_pkts_untagged)},
741 	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
742 		in_pkts_badtag)},
743 	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
744 		in_pkts_nosci)},
745 	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
746 		in_pkts_unknownsci)},
747 	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
748 		in_octets_decrypted)},
749 	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
750 		in_octets_validated)},
751 	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
752 		in_pkts_unchecked)},
753 	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
754 		in_pkts_delayed)},
755 	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
756 		in_pkts_late)},
757 	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
758 		in_pkts_ok)},
759 	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
760 		in_pkts_invalid)},
761 	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
762 		in_pkts_notvalid)},
763 	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
764 		in_pkts_unusedsa)},
765 	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
766 		in_pkts_notusingsa)},
767 };
768 
769 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
770 			   sizeof(rte_ixgbe_macsec_strings[0]))
771 
772 /* Per-queue statistics */
773 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
774 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
775 	{"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
776 	{"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
777 	{"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
778 };
779 
780 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
781 			   sizeof(rte_ixgbe_rxq_strings[0]))
782 #define IXGBE_NB_RXQ_PRIO_VALUES 8
783 
784 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
785 	{"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
786 	{"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
787 	{"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
788 		pxon2offc)},
789 };
790 
791 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
792 			   sizeof(rte_ixgbe_txq_strings[0]))
793 #define IXGBE_NB_TXQ_PRIO_VALUES 8
794 
795 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
796 	{"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
797 };
798 
799 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /	\
800 		sizeof(rte_ixgbevf_stats_strings[0]))
801 
802 /*
803  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
804  */
805 static inline int
806 ixgbe_is_sfp(struct ixgbe_hw *hw)
807 {
808 	switch (hw->phy.type) {
809 	case ixgbe_phy_sfp_avago:
810 	case ixgbe_phy_sfp_ftl:
811 	case ixgbe_phy_sfp_intel:
812 	case ixgbe_phy_sfp_unknown:
813 	case ixgbe_phy_sfp_passive_tyco:
814 	case ixgbe_phy_sfp_passive_unknown:
815 		return 1;
816 	default:
817 		return 0;
818 	}
819 }
820 
821 static inline int32_t
822 ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
823 {
824 	uint32_t ctrl_ext;
825 	int32_t status;
826 
827 	status = ixgbe_reset_hw(hw);
828 
829 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
830 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
831 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
832 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
833 	IXGBE_WRITE_FLUSH(hw);
834 
835 	if (status == IXGBE_ERR_SFP_NOT_PRESENT)
836 		status = IXGBE_SUCCESS;
837 	return status;
838 }
839 
840 static inline void
841 ixgbe_enable_intr(struct rte_eth_dev *dev)
842 {
843 	struct ixgbe_interrupt *intr =
844 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
845 	struct ixgbe_hw *hw =
846 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
847 
848 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
849 	IXGBE_WRITE_FLUSH(hw);
850 }
851 
852 /*
853  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
854  */
855 static void
856 ixgbe_disable_intr(struct ixgbe_hw *hw)
857 {
858 	PMD_INIT_FUNC_TRACE();
859 
860 	if (hw->mac.type == ixgbe_mac_82598EB) {
861 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
862 	} else {
863 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
864 		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
865 		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
866 	}
867 	IXGBE_WRITE_FLUSH(hw);
868 }
869 
870 /*
871  * This function resets queue statistics mapping registers.
872  * From Niantic datasheet, Initialization of Statistics section:
873  * "...if software requires the queue counters, the RQSMR and TQSM registers
874  * must be re-programmed following a device reset.
875  */
876 static void
877 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
878 {
879 	uint32_t i;
880 
881 	for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
882 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
883 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
884 	}
885 }
886 
887 
888 static int
889 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
890 				  uint16_t queue_id,
891 				  uint8_t stat_idx,
892 				  uint8_t is_rx)
893 {
894 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
895 #define NB_QMAP_FIELDS_PER_QSM_REG 4
896 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
897 
898 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
899 	struct ixgbe_stat_mapping_registers *stat_mappings =
900 		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
901 	uint32_t qsmr_mask = 0;
902 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
903 	uint32_t q_map;
904 	uint8_t n, offset;
905 
906 	if ((hw->mac.type != ixgbe_mac_82599EB) &&
907 		(hw->mac.type != ixgbe_mac_X540) &&
908 		(hw->mac.type != ixgbe_mac_X550) &&
909 		(hw->mac.type != ixgbe_mac_X550EM_x) &&
910 		(hw->mac.type != ixgbe_mac_X550EM_a))
911 		return -ENOSYS;
912 
913 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
914 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
915 		     queue_id, stat_idx);
916 
917 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
918 	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
919 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
920 		return -EIO;
921 	}
922 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
923 
924 	/* Now clear any previous stat_idx set */
925 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
926 	if (!is_rx)
927 		stat_mappings->tqsm[n] &= ~clearing_mask;
928 	else
929 		stat_mappings->rqsmr[n] &= ~clearing_mask;
930 
931 	q_map = (uint32_t)stat_idx;
932 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
933 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
934 	if (!is_rx)
935 		stat_mappings->tqsm[n] |= qsmr_mask;
936 	else
937 		stat_mappings->rqsmr[n] |= qsmr_mask;
938 
939 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
940 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
941 		     queue_id, stat_idx);
942 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
943 		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
944 
945 	/* Now write the mapping in the appropriate register */
946 	if (is_rx) {
947 		PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
948 			     stat_mappings->rqsmr[n], n);
949 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
950 	} else {
951 		PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
952 			     stat_mappings->tqsm[n], n);
953 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
954 	}
955 	return 0;
956 }
957 
958 static void
959 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
960 {
961 	struct ixgbe_stat_mapping_registers *stat_mappings =
962 		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
963 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
964 	int i;
965 
966 	/* write whatever was in stat mapping table to the NIC */
967 	for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
968 		/* rx */
969 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
970 
971 		/* tx */
972 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
973 	}
974 }
975 
976 static void
977 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
978 {
979 	uint8_t i;
980 	struct ixgbe_dcb_tc_config *tc;
981 	uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
982 
983 	dcb_config->num_tcs.pg_tcs = dcb_max_tc;
984 	dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
985 	for (i = 0; i < dcb_max_tc; i++) {
986 		tc = &dcb_config->tc_config[i];
987 		tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
988 		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
989 				 (uint8_t)(100/dcb_max_tc + (i & 1));
990 		tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
991 		tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
992 				 (uint8_t)(100/dcb_max_tc + (i & 1));
993 		tc->pfc = ixgbe_dcb_pfc_disabled;
994 	}
995 
996 	/* Initialize default user to priority mapping, UPx->TC0 */
997 	tc = &dcb_config->tc_config[0];
998 	tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
999 	tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
1000 	for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
1001 		dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
1002 		dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
1003 	}
1004 	dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
1005 	dcb_config->pfc_mode_enable = false;
1006 	dcb_config->vt_mode = true;
1007 	dcb_config->round_robin_enable = false;
1008 	/* support all DCB capabilities in 82599 */
1009 	dcb_config->support.capabilities = 0xFF;
1010 
1011 	/*we only support 4 Tcs for X540, X550 */
1012 	if (hw->mac.type == ixgbe_mac_X540 ||
1013 		hw->mac.type == ixgbe_mac_X550 ||
1014 		hw->mac.type == ixgbe_mac_X550EM_x ||
1015 		hw->mac.type == ixgbe_mac_X550EM_a) {
1016 		dcb_config->num_tcs.pg_tcs = 4;
1017 		dcb_config->num_tcs.pfc_tcs = 4;
1018 	}
1019 }
1020 
1021 /*
1022  * Ensure that all locks are released before first NVM or PHY access
1023  */
1024 static void
1025 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1026 {
1027 	uint16_t mask;
1028 
1029 	/*
1030 	 * Phy lock should not fail in this early stage. If this is the case,
1031 	 * it is due to an improper exit of the application.
1032 	 * So force the release of the faulty lock. Release of common lock
1033 	 * is done automatically by swfw_sync function.
1034 	 */
1035 	mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1036 	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1037 		PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1038 	}
1039 	ixgbe_release_swfw_semaphore(hw, mask);
1040 
1041 	/*
1042 	 * These ones are more tricky since they are common to all ports; but
1043 	 * swfw_sync retries last long enough (1s) to be almost sure that if
1044 	 * lock can not be taken it is due to an improper lock of the
1045 	 * semaphore.
1046 	 */
1047 	mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1048 	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1049 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1050 	}
1051 	ixgbe_release_swfw_semaphore(hw, mask);
1052 }
1053 
1054 /*
1055  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1056  * It returns 0 on success.
1057  */
1058 static int
1059 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
1060 {
1061 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1062 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1063 	struct ixgbe_hw *hw =
1064 		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1065 	struct ixgbe_vfta *shadow_vfta =
1066 		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1067 	struct ixgbe_hwstrip *hwstrip =
1068 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1069 	struct ixgbe_dcb_config *dcb_config =
1070 		IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1071 	struct ixgbe_filter_info *filter_info =
1072 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1073 	struct ixgbe_bw_conf *bw_conf =
1074 		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1075 	uint32_t ctrl_ext;
1076 	uint16_t csum;
1077 	int diag, i;
1078 
1079 	PMD_INIT_FUNC_TRACE();
1080 
1081 	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1082 	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1083 	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1084 	eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1085 
1086 	/*
1087 	 * For secondary processes, we don't initialise any further as primary
1088 	 * has already done this work. Only check we don't need a different
1089 	 * RX and TX function.
1090 	 */
1091 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1092 		struct ixgbe_tx_queue *txq;
1093 		/* TX queue function in primary, set by last queue initialized
1094 		 * Tx queue may not initialized by primary process
1095 		 */
1096 		if (eth_dev->data->tx_queues) {
1097 			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1098 			ixgbe_set_tx_function(eth_dev, txq);
1099 		} else {
1100 			/* Use default TX function if we get here */
1101 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1102 				     "Using default TX function.");
1103 		}
1104 
1105 		ixgbe_set_rx_function(eth_dev);
1106 
1107 		return 0;
1108 	}
1109 
1110 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1111 
1112 	/* Vendor and Device ID need to be set before init of shared code */
1113 	hw->device_id = pci_dev->id.device_id;
1114 	hw->vendor_id = pci_dev->id.vendor_id;
1115 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1116 	hw->allow_unsupported_sfp = 1;
1117 
1118 	/* Initialize the shared code (base driver) */
1119 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1120 	diag = ixgbe_bypass_init_shared_code(hw);
1121 #else
1122 	diag = ixgbe_init_shared_code(hw);
1123 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1124 
1125 	if (diag != IXGBE_SUCCESS) {
1126 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1127 		return -EIO;
1128 	}
1129 
1130 	if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
1131 		PMD_INIT_LOG(ERR, "\nERROR: "
1132 			"Firmware recovery mode detected. Limiting functionality.\n"
1133 			"Refer to the Intel(R) Ethernet Adapters and Devices "
1134 			"User Guide for details on firmware recovery mode.");
1135 		return -EIO;
1136 	}
1137 
1138 	/* pick up the PCI bus settings for reporting later */
1139 	ixgbe_get_bus_info(hw);
1140 
1141 	/* Unlock any pending hardware semaphore */
1142 	ixgbe_swfw_lock_reset(hw);
1143 
1144 #ifdef RTE_LIBRTE_SECURITY
1145 	/* Initialize security_ctx only for primary process*/
1146 	if (ixgbe_ipsec_ctx_create(eth_dev))
1147 		return -ENOMEM;
1148 #endif
1149 
1150 	/* Initialize DCB configuration*/
1151 	memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1152 	ixgbe_dcb_init(hw, dcb_config);
1153 	/* Get Hardware Flow Control setting */
1154 	hw->fc.requested_mode = ixgbe_fc_full;
1155 	hw->fc.current_mode = ixgbe_fc_full;
1156 	hw->fc.pause_time = IXGBE_FC_PAUSE;
1157 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1158 		hw->fc.low_water[i] = IXGBE_FC_LO;
1159 		hw->fc.high_water[i] = IXGBE_FC_HI;
1160 	}
1161 	hw->fc.send_xon = 1;
1162 
1163 	/* Make sure we have a good EEPROM before we read from it */
1164 	diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1165 	if (diag != IXGBE_SUCCESS) {
1166 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1167 		return -EIO;
1168 	}
1169 
1170 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1171 	diag = ixgbe_bypass_init_hw(hw);
1172 #else
1173 	diag = ixgbe_init_hw(hw);
1174 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1175 
1176 	/*
1177 	 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1178 	 * is called too soon after the kernel driver unbinding/binding occurs.
1179 	 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1180 	 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1181 	 * also called. See ixgbe_identify_phy_82599(). The reason for the
1182 	 * failure is not known, and only occuts when virtualisation features
1183 	 * are disabled in the bios. A delay of 100ms  was found to be enough by
1184 	 * trial-and-error, and is doubled to be safe.
1185 	 */
1186 	if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1187 		rte_delay_ms(200);
1188 		diag = ixgbe_init_hw(hw);
1189 	}
1190 
1191 	if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
1192 		diag = IXGBE_SUCCESS;
1193 
1194 	if (diag == IXGBE_ERR_EEPROM_VERSION) {
1195 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1196 			     "LOM.  Please be aware there may be issues associated "
1197 			     "with your hardware.");
1198 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
1199 			     "please contact your Intel or hardware representative "
1200 			     "who provided you with this hardware.");
1201 	} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1202 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1203 	if (diag) {
1204 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1205 		return -EIO;
1206 	}
1207 
1208 	/* Reset the hw statistics */
1209 	ixgbe_dev_stats_reset(eth_dev);
1210 
1211 	/* disable interrupt */
1212 	ixgbe_disable_intr(hw);
1213 
1214 	/* reset mappings for queue statistics hw counters*/
1215 	ixgbe_reset_qstat_mappings(hw);
1216 
1217 	/* Allocate memory for storing MAC addresses */
1218 	eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1219 					       hw->mac.num_rar_entries, 0);
1220 	if (eth_dev->data->mac_addrs == NULL) {
1221 		PMD_INIT_LOG(ERR,
1222 			     "Failed to allocate %u bytes needed to store "
1223 			     "MAC addresses",
1224 			     ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1225 		return -ENOMEM;
1226 	}
1227 	/* Copy the permanent MAC address */
1228 	ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1229 			&eth_dev->data->mac_addrs[0]);
1230 
1231 	/* Allocate memory for storing hash filter MAC addresses */
1232 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
1233 						    IXGBE_VMDQ_NUM_UC_MAC, 0);
1234 	if (eth_dev->data->hash_mac_addrs == NULL) {
1235 		PMD_INIT_LOG(ERR,
1236 			     "Failed to allocate %d bytes needed to store MAC addresses",
1237 			     ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1238 		return -ENOMEM;
1239 	}
1240 
1241 	/* initialize the vfta */
1242 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1243 
1244 	/* initialize the hw strip bitmap*/
1245 	memset(hwstrip, 0, sizeof(*hwstrip));
1246 
1247 	/* initialize PF if max_vfs not zero */
1248 	ixgbe_pf_host_init(eth_dev);
1249 
1250 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1251 	/* let hardware know driver is loaded */
1252 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1253 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
1254 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1255 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1256 	IXGBE_WRITE_FLUSH(hw);
1257 
1258 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1259 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1260 			     (int) hw->mac.type, (int) hw->phy.type,
1261 			     (int) hw->phy.sfp_type);
1262 	else
1263 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1264 			     (int) hw->mac.type, (int) hw->phy.type);
1265 
1266 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1267 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1268 		     pci_dev->id.device_id);
1269 
1270 	rte_intr_callback_register(intr_handle,
1271 				   ixgbe_dev_interrupt_handler, eth_dev);
1272 
1273 	/* enable uio/vfio intr/eventfd mapping */
1274 	rte_intr_enable(intr_handle);
1275 
1276 	/* enable support intr */
1277 	ixgbe_enable_intr(eth_dev);
1278 
1279 	/* initialize filter info */
1280 	memset(filter_info, 0,
1281 	       sizeof(struct ixgbe_filter_info));
1282 
1283 	/* initialize 5tuple filter list */
1284 	TAILQ_INIT(&filter_info->fivetuple_list);
1285 
1286 	/* initialize flow director filter list & hash */
1287 	ixgbe_fdir_filter_init(eth_dev);
1288 
1289 	/* initialize l2 tunnel filter list & hash */
1290 	ixgbe_l2_tn_filter_init(eth_dev);
1291 
1292 	/* initialize flow filter lists */
1293 	ixgbe_filterlist_init();
1294 
1295 	/* initialize bandwidth configuration info */
1296 	memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
1297 
1298 	/* initialize Traffic Manager configuration */
1299 	ixgbe_tm_conf_init(eth_dev);
1300 
1301 	return 0;
1302 }
1303 
1304 static int
1305 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1306 {
1307 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1308 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1309 	struct ixgbe_hw *hw;
1310 	int retries = 0;
1311 	int ret;
1312 
1313 	PMD_INIT_FUNC_TRACE();
1314 
1315 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1316 		return 0;
1317 
1318 	hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1319 
1320 	if (hw->adapter_stopped == 0)
1321 		ixgbe_dev_close(eth_dev);
1322 
1323 	eth_dev->dev_ops = NULL;
1324 	eth_dev->rx_pkt_burst = NULL;
1325 	eth_dev->tx_pkt_burst = NULL;
1326 
1327 	/* Unlock any pending hardware semaphore */
1328 	ixgbe_swfw_lock_reset(hw);
1329 
1330 	/* disable uio intr before callback unregister */
1331 	rte_intr_disable(intr_handle);
1332 
1333 	do {
1334 		ret = rte_intr_callback_unregister(intr_handle,
1335 				ixgbe_dev_interrupt_handler, eth_dev);
1336 		if (ret >= 0) {
1337 			break;
1338 		} else if (ret != -EAGAIN) {
1339 			PMD_INIT_LOG(ERR,
1340 				"intr callback unregister failed: %d",
1341 				ret);
1342 			return ret;
1343 		}
1344 		rte_delay_ms(100);
1345 	} while (retries++ < (10 + IXGBE_LINK_UP_TIME));
1346 
1347 	/* cancel the delay handler before remove dev */
1348 	rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, eth_dev);
1349 
1350 	/* cancel the link handler before remove dev */
1351 	rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, eth_dev);
1352 
1353 	/* uninitialize PF if max_vfs not zero */
1354 	ixgbe_pf_host_uninit(eth_dev);
1355 
1356 	/* remove all the fdir filters & hash */
1357 	ixgbe_fdir_filter_uninit(eth_dev);
1358 
1359 	/* remove all the L2 tunnel filters & hash */
1360 	ixgbe_l2_tn_filter_uninit(eth_dev);
1361 
1362 	/* Remove all ntuple filters of the device */
1363 	ixgbe_ntuple_filter_uninit(eth_dev);
1364 
1365 	/* clear all the filters list */
1366 	ixgbe_filterlist_flush();
1367 
1368 	/* Remove all Traffic Manager configuration */
1369 	ixgbe_tm_conf_uninit(eth_dev);
1370 
1371 #ifdef RTE_LIBRTE_SECURITY
1372 	rte_free(eth_dev->security_ctx);
1373 #endif
1374 
1375 	return 0;
1376 }
1377 
1378 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
1379 {
1380 	struct ixgbe_filter_info *filter_info =
1381 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
1382 	struct ixgbe_5tuple_filter *p_5tuple;
1383 
1384 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
1385 		TAILQ_REMOVE(&filter_info->fivetuple_list,
1386 			     p_5tuple,
1387 			     entries);
1388 		rte_free(p_5tuple);
1389 	}
1390 	memset(filter_info->fivetuple_mask, 0,
1391 	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
1392 
1393 	return 0;
1394 }
1395 
1396 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
1397 {
1398 	struct ixgbe_hw_fdir_info *fdir_info =
1399 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1400 	struct ixgbe_fdir_filter *fdir_filter;
1401 
1402 		if (fdir_info->hash_map)
1403 		rte_free(fdir_info->hash_map);
1404 	if (fdir_info->hash_handle)
1405 		rte_hash_free(fdir_info->hash_handle);
1406 
1407 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1408 		TAILQ_REMOVE(&fdir_info->fdir_list,
1409 			     fdir_filter,
1410 			     entries);
1411 		rte_free(fdir_filter);
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
1418 {
1419 	struct ixgbe_l2_tn_info *l2_tn_info =
1420 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1421 	struct ixgbe_l2_tn_filter *l2_tn_filter;
1422 
1423 	if (l2_tn_info->hash_map)
1424 		rte_free(l2_tn_info->hash_map);
1425 	if (l2_tn_info->hash_handle)
1426 		rte_hash_free(l2_tn_info->hash_handle);
1427 
1428 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
1429 		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
1430 			     l2_tn_filter,
1431 			     entries);
1432 		rte_free(l2_tn_filter);
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
1439 {
1440 	struct ixgbe_hw_fdir_info *fdir_info =
1441 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
1442 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1443 	struct rte_hash_parameters fdir_hash_params = {
1444 		.name = fdir_hash_name,
1445 		.entries = IXGBE_MAX_FDIR_FILTER_NUM,
1446 		.key_len = sizeof(union ixgbe_atr_input),
1447 		.hash_func = rte_hash_crc,
1448 		.hash_func_init_val = 0,
1449 		.socket_id = rte_socket_id(),
1450 	};
1451 
1452 	TAILQ_INIT(&fdir_info->fdir_list);
1453 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1454 		 "fdir_%s", eth_dev->device->name);
1455 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
1456 	if (!fdir_info->hash_handle) {
1457 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1458 		return -EINVAL;
1459 	}
1460 	fdir_info->hash_map = rte_zmalloc("ixgbe",
1461 					  sizeof(struct ixgbe_fdir_filter *) *
1462 					  IXGBE_MAX_FDIR_FILTER_NUM,
1463 					  0);
1464 	if (!fdir_info->hash_map) {
1465 		PMD_INIT_LOG(ERR,
1466 			     "Failed to allocate memory for fdir hash map!");
1467 		return -ENOMEM;
1468 	}
1469 	fdir_info->mask_added = FALSE;
1470 
1471 	return 0;
1472 }
1473 
1474 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
1475 {
1476 	struct ixgbe_l2_tn_info *l2_tn_info =
1477 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
1478 	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
1479 	struct rte_hash_parameters l2_tn_hash_params = {
1480 		.name = l2_tn_hash_name,
1481 		.entries = IXGBE_MAX_L2_TN_FILTER_NUM,
1482 		.key_len = sizeof(struct ixgbe_l2_tn_key),
1483 		.hash_func = rte_hash_crc,
1484 		.hash_func_init_val = 0,
1485 		.socket_id = rte_socket_id(),
1486 	};
1487 
1488 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
1489 	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
1490 		 "l2_tn_%s", eth_dev->device->name);
1491 	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
1492 	if (!l2_tn_info->hash_handle) {
1493 		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
1494 		return -EINVAL;
1495 	}
1496 	l2_tn_info->hash_map = rte_zmalloc("ixgbe",
1497 				   sizeof(struct ixgbe_l2_tn_filter *) *
1498 				   IXGBE_MAX_L2_TN_FILTER_NUM,
1499 				   0);
1500 	if (!l2_tn_info->hash_map) {
1501 		PMD_INIT_LOG(ERR,
1502 			"Failed to allocate memory for L2 TN hash map!");
1503 		return -ENOMEM;
1504 	}
1505 	l2_tn_info->e_tag_en = FALSE;
1506 	l2_tn_info->e_tag_fwd_en = FALSE;
1507 	l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
1508 
1509 	return 0;
1510 }
1511 /*
1512  * Negotiate mailbox API version with the PF.
1513  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1514  * Then we try to negotiate starting with the most recent one.
1515  * If all negotiation attempts fail, then we will proceed with
1516  * the default one (ixgbe_mbox_api_10).
1517  */
1518 static void
1519 ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1520 {
1521 	int32_t i;
1522 
1523 	/* start with highest supported, proceed down */
1524 	static const enum ixgbe_pfvf_api_rev sup_ver[] = {
1525 		ixgbe_mbox_api_12,
1526 		ixgbe_mbox_api_11,
1527 		ixgbe_mbox_api_10,
1528 	};
1529 
1530 	for (i = 0;
1531 			i != RTE_DIM(sup_ver) &&
1532 			ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1533 			i++)
1534 		;
1535 }
1536 
1537 static void
1538 generate_random_mac_addr(struct ether_addr *mac_addr)
1539 {
1540 	uint64_t random;
1541 
1542 	/* Set Organizationally Unique Identifier (OUI) prefix. */
1543 	mac_addr->addr_bytes[0] = 0x00;
1544 	mac_addr->addr_bytes[1] = 0x09;
1545 	mac_addr->addr_bytes[2] = 0xC0;
1546 	/* Force indication of locally assigned MAC address. */
1547 	mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
1548 	/* Generate the last 3 bytes of the MAC address with a random number. */
1549 	random = rte_rand();
1550 	memcpy(&mac_addr->addr_bytes[3], &random, 3);
1551 }
1552 
1553 static int
1554 devarg_handle_int(__rte_unused const char *key, const char *value,
1555 		  void *extra_args)
1556 {
1557 	uint16_t *n = extra_args;
1558 
1559 	if (value == NULL || extra_args == NULL)
1560 		return -EINVAL;
1561 
1562 	*n = (uint16_t)strtoul(value, NULL, 0);
1563 	if (*n == USHRT_MAX && errno == ERANGE)
1564 		return -1;
1565 
1566 	return 0;
1567 }
1568 
1569 static void
1570 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter,
1571 		      struct rte_devargs *devargs)
1572 {
1573 	struct rte_kvargs *kvlist;
1574 	uint16_t pflink_fullchk;
1575 
1576 	if (devargs == NULL)
1577 		return;
1578 
1579 	kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments);
1580 	if (kvlist == NULL)
1581 		return;
1582 
1583 	if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 &&
1584 	    rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK,
1585 			       devarg_handle_int, &pflink_fullchk) == 0 &&
1586 	    pflink_fullchk == 1)
1587 		adapter->pflink_fullchk = 1;
1588 
1589 	rte_kvargs_free(kvlist);
1590 }
1591 
1592 /*
1593  * Virtual Function device init
1594  */
1595 static int
1596 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1597 {
1598 	int diag;
1599 	uint32_t tc, tcs;
1600 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1601 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1602 	struct ixgbe_hw *hw =
1603 		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1604 	struct ixgbe_vfta *shadow_vfta =
1605 		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1606 	struct ixgbe_hwstrip *hwstrip =
1607 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1608 	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
1609 
1610 	PMD_INIT_FUNC_TRACE();
1611 
1612 	eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1613 	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1614 	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1615 
1616 	/* for secondary processes, we don't initialise any further as primary
1617 	 * has already done this work. Only check we don't need a different
1618 	 * RX function
1619 	 */
1620 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1621 		struct ixgbe_tx_queue *txq;
1622 		/* TX queue function in primary, set by last queue initialized
1623 		 * Tx queue may not initialized by primary process
1624 		 */
1625 		if (eth_dev->data->tx_queues) {
1626 			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1627 			ixgbe_set_tx_function(eth_dev, txq);
1628 		} else {
1629 			/* Use default TX function if we get here */
1630 			PMD_INIT_LOG(NOTICE,
1631 				     "No TX queues configured yet. Using default TX function.");
1632 		}
1633 
1634 		ixgbe_set_rx_function(eth_dev);
1635 
1636 		return 0;
1637 	}
1638 
1639 	ixgbevf_parse_devargs(eth_dev->data->dev_private,
1640 			      pci_dev->device.devargs);
1641 
1642 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1643 
1644 	hw->device_id = pci_dev->id.device_id;
1645 	hw->vendor_id = pci_dev->id.vendor_id;
1646 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1647 
1648 	/* initialize the vfta */
1649 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1650 
1651 	/* initialize the hw strip bitmap*/
1652 	memset(hwstrip, 0, sizeof(*hwstrip));
1653 
1654 	/* Initialize the shared code (base driver) */
1655 	diag = ixgbe_init_shared_code(hw);
1656 	if (diag != IXGBE_SUCCESS) {
1657 		PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1658 		return -EIO;
1659 	}
1660 
1661 	/* init_mailbox_params */
1662 	hw->mbx.ops.init_params(hw);
1663 
1664 	/* Reset the hw statistics */
1665 	ixgbevf_dev_stats_reset(eth_dev);
1666 
1667 	/* Disable the interrupts for VF */
1668 	ixgbevf_intr_disable(eth_dev);
1669 
1670 	hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1671 	diag = hw->mac.ops.reset_hw(hw);
1672 
1673 	/*
1674 	 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1675 	 * the underlying PF driver has not assigned a MAC address to the VF.
1676 	 * In this case, assign a random MAC address.
1677 	 */
1678 	if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1679 		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1680 		/*
1681 		 * This error code will be propagated to the app by
1682 		 * rte_eth_dev_reset, so use a public error code rather than
1683 		 * the internal-only IXGBE_ERR_RESET_FAILED
1684 		 */
1685 		return -EAGAIN;
1686 	}
1687 
1688 	/* negotiate mailbox API version to use with the PF. */
1689 	ixgbevf_negotiate_api(hw);
1690 
1691 	/* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1692 	ixgbevf_get_queues(hw, &tcs, &tc);
1693 
1694 	/* Allocate memory for storing MAC addresses */
1695 	eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
1696 					       hw->mac.num_rar_entries, 0);
1697 	if (eth_dev->data->mac_addrs == NULL) {
1698 		PMD_INIT_LOG(ERR,
1699 			     "Failed to allocate %u bytes needed to store "
1700 			     "MAC addresses",
1701 			     ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1702 		return -ENOMEM;
1703 	}
1704 
1705 	/* Generate a random MAC address, if none was assigned by PF. */
1706 	if (is_zero_ether_addr(perm_addr)) {
1707 		generate_random_mac_addr(perm_addr);
1708 		diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1709 		if (diag) {
1710 			rte_free(eth_dev->data->mac_addrs);
1711 			eth_dev->data->mac_addrs = NULL;
1712 			return diag;
1713 		}
1714 		PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1715 		PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1716 			     "%02x:%02x:%02x:%02x:%02x:%02x",
1717 			     perm_addr->addr_bytes[0],
1718 			     perm_addr->addr_bytes[1],
1719 			     perm_addr->addr_bytes[2],
1720 			     perm_addr->addr_bytes[3],
1721 			     perm_addr->addr_bytes[4],
1722 			     perm_addr->addr_bytes[5]);
1723 	}
1724 
1725 	/* Copy the permanent MAC address */
1726 	ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1727 
1728 	/* reset the hardware with the new settings */
1729 	diag = hw->mac.ops.start_hw(hw);
1730 	switch (diag) {
1731 	case  0:
1732 		break;
1733 
1734 	default:
1735 		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1736 		return -EIO;
1737 	}
1738 
1739 	rte_intr_callback_register(intr_handle,
1740 				   ixgbevf_dev_interrupt_handler, eth_dev);
1741 	rte_intr_enable(intr_handle);
1742 	ixgbevf_intr_enable(eth_dev);
1743 
1744 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1745 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1746 		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1747 
1748 	return 0;
1749 }
1750 
1751 /* Virtual Function device uninit */
1752 
1753 static int
1754 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1755 {
1756 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1757 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1758 	struct ixgbe_hw *hw;
1759 
1760 	PMD_INIT_FUNC_TRACE();
1761 
1762 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1763 		return 0;
1764 
1765 	hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1766 
1767 	if (hw->adapter_stopped == 0)
1768 		ixgbevf_dev_close(eth_dev);
1769 
1770 	eth_dev->dev_ops = NULL;
1771 	eth_dev->rx_pkt_burst = NULL;
1772 	eth_dev->tx_pkt_burst = NULL;
1773 
1774 	/* Disable the interrupts for VF */
1775 	ixgbevf_intr_disable(eth_dev);
1776 
1777 	rte_intr_disable(intr_handle);
1778 	rte_intr_callback_unregister(intr_handle,
1779 				     ixgbevf_dev_interrupt_handler, eth_dev);
1780 
1781 	return 0;
1782 }
1783 
1784 static int
1785 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1786 		struct rte_pci_device *pci_dev)
1787 {
1788 	char name[RTE_ETH_NAME_MAX_LEN];
1789 	struct rte_eth_dev *pf_ethdev;
1790 	struct rte_eth_devargs eth_da;
1791 	int i, retval;
1792 
1793 	if (pci_dev->device.devargs) {
1794 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1795 				&eth_da);
1796 		if (retval)
1797 			return retval;
1798 	} else
1799 		memset(&eth_da, 0, sizeof(eth_da));
1800 
1801 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1802 		sizeof(struct ixgbe_adapter),
1803 		eth_dev_pci_specific_init, pci_dev,
1804 		eth_ixgbe_dev_init, NULL);
1805 
1806 	if (retval || eth_da.nb_representor_ports < 1)
1807 		return retval;
1808 
1809 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1810 	if (pf_ethdev == NULL)
1811 		return -ENODEV;
1812 
1813 	/* probe VF representor ports */
1814 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
1815 		struct ixgbe_vf_info *vfinfo;
1816 		struct ixgbe_vf_representor representor;
1817 
1818 		vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
1819 			pf_ethdev->data->dev_private);
1820 		if (vfinfo == NULL) {
1821 			PMD_DRV_LOG(ERR,
1822 				"no virtual functions supported by PF");
1823 			break;
1824 		}
1825 
1826 		representor.vf_id = eth_da.representor_ports[i];
1827 		representor.switch_domain_id = vfinfo->switch_domain_id;
1828 		representor.pf_ethdev = pf_ethdev;
1829 
1830 		/* representor port net_bdf_port */
1831 		snprintf(name, sizeof(name), "net_%s_representor_%d",
1832 			pci_dev->device.name,
1833 			eth_da.representor_ports[i]);
1834 
1835 		retval = rte_eth_dev_create(&pci_dev->device, name,
1836 			sizeof(struct ixgbe_vf_representor), NULL, NULL,
1837 			ixgbe_vf_representor_init, &representor);
1838 
1839 		if (retval)
1840 			PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
1841 				"representor %s.", name);
1842 	}
1843 
1844 	return 0;
1845 }
1846 
1847 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
1848 {
1849 	struct rte_eth_dev *ethdev;
1850 
1851 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1852 	if (!ethdev)
1853 		return -ENODEV;
1854 
1855 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1856 		return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
1857 	else
1858 		return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
1859 }
1860 
1861 static struct rte_pci_driver rte_ixgbe_pmd = {
1862 	.id_table = pci_id_ixgbe_map,
1863 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1864 		     RTE_PCI_DRV_IOVA_AS_VA,
1865 	.probe = eth_ixgbe_pci_probe,
1866 	.remove = eth_ixgbe_pci_remove,
1867 };
1868 
1869 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1870 	struct rte_pci_device *pci_dev)
1871 {
1872 	return rte_eth_dev_pci_generic_probe(pci_dev,
1873 		sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
1874 }
1875 
1876 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
1877 {
1878 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
1879 }
1880 
1881 /*
1882  * virtual function driver struct
1883  */
1884 static struct rte_pci_driver rte_ixgbevf_pmd = {
1885 	.id_table = pci_id_ixgbevf_map,
1886 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1887 	.probe = eth_ixgbevf_pci_probe,
1888 	.remove = eth_ixgbevf_pci_remove,
1889 };
1890 
1891 static int
1892 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1893 {
1894 	struct ixgbe_hw *hw =
1895 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1896 	struct ixgbe_vfta *shadow_vfta =
1897 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1898 	uint32_t vfta;
1899 	uint32_t vid_idx;
1900 	uint32_t vid_bit;
1901 
1902 	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1903 	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1904 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1905 	if (on)
1906 		vfta |= vid_bit;
1907 	else
1908 		vfta &= ~vid_bit;
1909 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1910 
1911 	/* update local VFTA copy */
1912 	shadow_vfta->vfta[vid_idx] = vfta;
1913 
1914 	return 0;
1915 }
1916 
1917 static void
1918 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1919 {
1920 	if (on)
1921 		ixgbe_vlan_hw_strip_enable(dev, queue);
1922 	else
1923 		ixgbe_vlan_hw_strip_disable(dev, queue);
1924 }
1925 
1926 static int
1927 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1928 		    enum rte_vlan_type vlan_type,
1929 		    uint16_t tpid)
1930 {
1931 	struct ixgbe_hw *hw =
1932 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1933 	int ret = 0;
1934 	uint32_t reg;
1935 	uint32_t qinq;
1936 
1937 	qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1938 	qinq &= IXGBE_DMATXCTL_GDV;
1939 
1940 	switch (vlan_type) {
1941 	case ETH_VLAN_TYPE_INNER:
1942 		if (qinq) {
1943 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1944 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1945 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1946 			reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1947 			reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1948 				| ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1949 			IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1950 		} else {
1951 			ret = -ENOTSUP;
1952 			PMD_DRV_LOG(ERR, "Inner type is not supported"
1953 				    " by single VLAN");
1954 		}
1955 		break;
1956 	case ETH_VLAN_TYPE_OUTER:
1957 		if (qinq) {
1958 			/* Only the high 16-bits is valid */
1959 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1960 					IXGBE_EXVET_VET_EXT_SHIFT);
1961 		} else {
1962 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1963 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1964 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1965 			reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1966 			reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1967 				| ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1968 			IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1969 		}
1970 
1971 		break;
1972 	default:
1973 		ret = -EINVAL;
1974 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1975 		break;
1976 	}
1977 
1978 	return ret;
1979 }
1980 
1981 void
1982 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1983 {
1984 	struct ixgbe_hw *hw =
1985 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1986 	uint32_t vlnctrl;
1987 
1988 	PMD_INIT_FUNC_TRACE();
1989 
1990 	/* Filter Table Disable */
1991 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1992 	vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1993 
1994 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1995 }
1996 
1997 void
1998 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1999 {
2000 	struct ixgbe_hw *hw =
2001 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2002 	struct ixgbe_vfta *shadow_vfta =
2003 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2004 	uint32_t vlnctrl;
2005 	uint16_t i;
2006 
2007 	PMD_INIT_FUNC_TRACE();
2008 
2009 	/* Filter Table Enable */
2010 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2011 	vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2012 	vlnctrl |= IXGBE_VLNCTRL_VFE;
2013 
2014 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2015 
2016 	/* write whatever is in local vfta copy */
2017 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2018 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
2019 }
2020 
2021 static void
2022 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
2023 {
2024 	struct ixgbe_hwstrip *hwstrip =
2025 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
2026 	struct ixgbe_rx_queue *rxq;
2027 
2028 	if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
2029 		return;
2030 
2031 	if (on)
2032 		IXGBE_SET_HWSTRIP(hwstrip, queue);
2033 	else
2034 		IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
2035 
2036 	if (queue >= dev->data->nb_rx_queues)
2037 		return;
2038 
2039 	rxq = dev->data->rx_queues[queue];
2040 
2041 	if (on) {
2042 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2043 		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2044 	} else {
2045 		rxq->vlan_flags = PKT_RX_VLAN;
2046 		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2047 	}
2048 }
2049 
2050 static void
2051 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
2052 {
2053 	struct ixgbe_hw *hw =
2054 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2055 	uint32_t ctrl;
2056 
2057 	PMD_INIT_FUNC_TRACE();
2058 
2059 	if (hw->mac.type == ixgbe_mac_82598EB) {
2060 		/* No queue level support */
2061 		PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2062 		return;
2063 	}
2064 
2065 	/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2066 	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2067 	ctrl &= ~IXGBE_RXDCTL_VME;
2068 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2069 
2070 	/* record those setting for HW strip per queue */
2071 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
2072 }
2073 
2074 static void
2075 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
2076 {
2077 	struct ixgbe_hw *hw =
2078 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2079 	uint32_t ctrl;
2080 
2081 	PMD_INIT_FUNC_TRACE();
2082 
2083 	if (hw->mac.type == ixgbe_mac_82598EB) {
2084 		/* No queue level supported */
2085 		PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2086 		return;
2087 	}
2088 
2089 	/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2090 	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2091 	ctrl |= IXGBE_RXDCTL_VME;
2092 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2093 
2094 	/* record those setting for HW strip per queue */
2095 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2096 }
2097 
2098 static void
2099 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2100 {
2101 	struct ixgbe_hw *hw =
2102 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2103 	uint32_t ctrl;
2104 
2105 	PMD_INIT_FUNC_TRACE();
2106 
2107 	/* DMATXCTRL: Geric Double VLAN Disable */
2108 	ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2109 	ctrl &= ~IXGBE_DMATXCTL_GDV;
2110 	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2111 
2112 	/* CTRL_EXT: Global Double VLAN Disable */
2113 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2114 	ctrl &= ~IXGBE_EXTENDED_VLAN;
2115 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2116 
2117 }
2118 
2119 static void
2120 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2121 {
2122 	struct ixgbe_hw *hw =
2123 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2124 	uint32_t ctrl;
2125 
2126 	PMD_INIT_FUNC_TRACE();
2127 
2128 	/* DMATXCTRL: Geric Double VLAN Enable */
2129 	ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2130 	ctrl |= IXGBE_DMATXCTL_GDV;
2131 	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2132 
2133 	/* CTRL_EXT: Global Double VLAN Enable */
2134 	ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2135 	ctrl |= IXGBE_EXTENDED_VLAN;
2136 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2137 
2138 	/* Clear pooling mode of PFVTCTL. It's required by X550. */
2139 	if (hw->mac.type == ixgbe_mac_X550 ||
2140 	    hw->mac.type == ixgbe_mac_X550EM_x ||
2141 	    hw->mac.type == ixgbe_mac_X550EM_a) {
2142 		ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2143 		ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2144 		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2145 	}
2146 
2147 	/*
2148 	 * VET EXT field in the EXVET register = 0x8100 by default
2149 	 * So no need to change. Same to VT field of DMATXCTL register
2150 	 */
2151 }
2152 
2153 void
2154 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
2155 {
2156 	struct ixgbe_hw *hw =
2157 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2158 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2159 	uint32_t ctrl;
2160 	uint16_t i;
2161 	struct ixgbe_rx_queue *rxq;
2162 	bool on;
2163 
2164 	PMD_INIT_FUNC_TRACE();
2165 
2166 	if (hw->mac.type == ixgbe_mac_82598EB) {
2167 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2168 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2169 			ctrl |= IXGBE_VLNCTRL_VME;
2170 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2171 		} else {
2172 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2173 			ctrl &= ~IXGBE_VLNCTRL_VME;
2174 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2175 		}
2176 	} else {
2177 		/*
2178 		 * Other 10G NIC, the VLAN strip can be setup
2179 		 * per queue in RXDCTL
2180 		 */
2181 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
2182 			rxq = dev->data->rx_queues[i];
2183 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2184 			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2185 				ctrl |= IXGBE_RXDCTL_VME;
2186 				on = TRUE;
2187 			} else {
2188 				ctrl &= ~IXGBE_RXDCTL_VME;
2189 				on = FALSE;
2190 			}
2191 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2192 
2193 			/* record those setting for HW strip per queue */
2194 			ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
2195 		}
2196 	}
2197 }
2198 
2199 static void
2200 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
2201 {
2202 	uint16_t i;
2203 	struct rte_eth_rxmode *rxmode;
2204 	struct ixgbe_rx_queue *rxq;
2205 
2206 	if (mask & ETH_VLAN_STRIP_MASK) {
2207 		rxmode = &dev->data->dev_conf.rxmode;
2208 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2209 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
2210 				rxq = dev->data->rx_queues[i];
2211 				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2212 			}
2213 		else
2214 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
2215 				rxq = dev->data->rx_queues[i];
2216 				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2217 			}
2218 	}
2219 }
2220 
2221 static int
2222 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2223 {
2224 	struct rte_eth_rxmode *rxmode;
2225 	rxmode = &dev->data->dev_conf.rxmode;
2226 
2227 	if (mask & ETH_VLAN_STRIP_MASK) {
2228 		ixgbe_vlan_hw_strip_config(dev);
2229 	}
2230 
2231 	if (mask & ETH_VLAN_FILTER_MASK) {
2232 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2233 			ixgbe_vlan_hw_filter_enable(dev);
2234 		else
2235 			ixgbe_vlan_hw_filter_disable(dev);
2236 	}
2237 
2238 	if (mask & ETH_VLAN_EXTEND_MASK) {
2239 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2240 			ixgbe_vlan_hw_extend_enable(dev);
2241 		else
2242 			ixgbe_vlan_hw_extend_disable(dev);
2243 	}
2244 
2245 	return 0;
2246 }
2247 
2248 static int
2249 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2250 {
2251 	ixgbe_config_vlan_strip_on_all_queues(dev, mask);
2252 
2253 	ixgbe_vlan_offload_config(dev, mask);
2254 
2255 	return 0;
2256 }
2257 
2258 static void
2259 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2260 {
2261 	struct ixgbe_hw *hw =
2262 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2263 	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2264 	uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2265 
2266 	vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2267 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2268 }
2269 
2270 static int
2271 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2272 {
2273 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2274 
2275 	switch (nb_rx_q) {
2276 	case 1:
2277 	case 2:
2278 		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2279 		break;
2280 	case 4:
2281 		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2282 		break;
2283 	default:
2284 		return -EINVAL;
2285 	}
2286 
2287 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
2288 		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2289 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
2290 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2291 	return 0;
2292 }
2293 
2294 static int
2295 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2296 {
2297 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2298 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2299 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
2300 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
2301 
2302 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2303 		/* check multi-queue mode */
2304 		switch (dev_conf->rxmode.mq_mode) {
2305 		case ETH_MQ_RX_VMDQ_DCB:
2306 			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2307 			break;
2308 		case ETH_MQ_RX_VMDQ_DCB_RSS:
2309 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2310 			PMD_INIT_LOG(ERR, "SRIOV active,"
2311 					" unsupported mq_mode rx %d.",
2312 					dev_conf->rxmode.mq_mode);
2313 			return -EINVAL;
2314 		case ETH_MQ_RX_RSS:
2315 		case ETH_MQ_RX_VMDQ_RSS:
2316 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2317 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2318 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2319 					PMD_INIT_LOG(ERR, "SRIOV is active,"
2320 						" invalid queue number"
2321 						" for VMDQ RSS, allowed"
2322 						" value are 1, 2 or 4.");
2323 					return -EINVAL;
2324 				}
2325 			break;
2326 		case ETH_MQ_RX_VMDQ_ONLY:
2327 		case ETH_MQ_RX_NONE:
2328 			/* if nothing mq mode configure, use default scheme */
2329 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2330 			break;
2331 		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2332 			/* SRIOV only works in VMDq enable mode */
2333 			PMD_INIT_LOG(ERR, "SRIOV is active,"
2334 					" wrong mq_mode rx %d.",
2335 					dev_conf->rxmode.mq_mode);
2336 			return -EINVAL;
2337 		}
2338 
2339 		switch (dev_conf->txmode.mq_mode) {
2340 		case ETH_MQ_TX_VMDQ_DCB:
2341 			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2342 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2343 			break;
2344 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2345 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2346 			break;
2347 		}
2348 
2349 		/* check valid queue number */
2350 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2351 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2352 			PMD_INIT_LOG(ERR, "SRIOV is active,"
2353 					" nb_rx_q=%d nb_tx_q=%d queue number"
2354 					" must be less than or equal to %d.",
2355 					nb_rx_q, nb_tx_q,
2356 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2357 			return -EINVAL;
2358 		}
2359 	} else {
2360 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2361 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2362 					  " not supported.");
2363 			return -EINVAL;
2364 		}
2365 		/* check configuration for vmdb+dcb mode */
2366 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2367 			const struct rte_eth_vmdq_dcb_conf *conf;
2368 
2369 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2370 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2371 						IXGBE_VMDQ_DCB_NB_QUEUES);
2372 				return -EINVAL;
2373 			}
2374 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2375 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2376 			       conf->nb_queue_pools == ETH_32_POOLS)) {
2377 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2378 						" nb_queue_pools must be %d or %d.",
2379 						ETH_16_POOLS, ETH_32_POOLS);
2380 				return -EINVAL;
2381 			}
2382 		}
2383 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2384 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
2385 
2386 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2387 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2388 						 IXGBE_VMDQ_DCB_NB_QUEUES);
2389 				return -EINVAL;
2390 			}
2391 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2392 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2393 			       conf->nb_queue_pools == ETH_32_POOLS)) {
2394 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2395 						" nb_queue_pools != %d and"
2396 						" nb_queue_pools != %d.",
2397 						ETH_16_POOLS, ETH_32_POOLS);
2398 				return -EINVAL;
2399 			}
2400 		}
2401 
2402 		/* For DCB mode check our configuration before we go further */
2403 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2404 			const struct rte_eth_dcb_rx_conf *conf;
2405 
2406 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2407 			if (!(conf->nb_tcs == ETH_4_TCS ||
2408 			       conf->nb_tcs == ETH_8_TCS)) {
2409 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2410 						" and nb_tcs != %d.",
2411 						ETH_4_TCS, ETH_8_TCS);
2412 				return -EINVAL;
2413 			}
2414 		}
2415 
2416 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2417 			const struct rte_eth_dcb_tx_conf *conf;
2418 
2419 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2420 			if (!(conf->nb_tcs == ETH_4_TCS ||
2421 			       conf->nb_tcs == ETH_8_TCS)) {
2422 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2423 						" and nb_tcs != %d.",
2424 						ETH_4_TCS, ETH_8_TCS);
2425 				return -EINVAL;
2426 			}
2427 		}
2428 
2429 		/*
2430 		 * When DCB/VT is off, maximum number of queues changes,
2431 		 * except for 82598EB, which remains constant.
2432 		 */
2433 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2434 				hw->mac.type != ixgbe_mac_82598EB) {
2435 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2436 				PMD_INIT_LOG(ERR,
2437 					     "Neither VT nor DCB are enabled, "
2438 					     "nb_tx_q > %d.",
2439 					     IXGBE_NONE_MODE_TX_NB_QUEUES);
2440 				return -EINVAL;
2441 			}
2442 		}
2443 	}
2444 	return 0;
2445 }
2446 
2447 static int
2448 ixgbe_dev_configure(struct rte_eth_dev *dev)
2449 {
2450 	struct ixgbe_interrupt *intr =
2451 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2452 	struct ixgbe_adapter *adapter = dev->data->dev_private;
2453 	int ret;
2454 
2455 	PMD_INIT_FUNC_TRACE();
2456 	/* multipe queue mode checking */
2457 	ret  = ixgbe_check_mq_mode(dev);
2458 	if (ret != 0) {
2459 		PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2460 			    ret);
2461 		return ret;
2462 	}
2463 
2464 	/* set flag to update link status after init */
2465 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2466 
2467 	/*
2468 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2469 	 * allocation or vector Rx preconditions we will reset it.
2470 	 */
2471 	adapter->rx_bulk_alloc_allowed = true;
2472 	adapter->rx_vec_allowed = true;
2473 
2474 	return 0;
2475 }
2476 
2477 static void
2478 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2479 {
2480 	struct ixgbe_hw *hw =
2481 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2482 	struct ixgbe_interrupt *intr =
2483 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2484 	uint32_t gpie;
2485 
2486 	/* only set up it on X550EM_X */
2487 	if (hw->mac.type == ixgbe_mac_X550EM_x) {
2488 		gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2489 		gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2490 		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2491 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2492 			intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2493 	}
2494 }
2495 
2496 int
2497 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
2498 			uint16_t tx_rate, uint64_t q_msk)
2499 {
2500 	struct ixgbe_hw *hw;
2501 	struct ixgbe_vf_info *vfinfo;
2502 	struct rte_eth_link link;
2503 	uint8_t  nb_q_per_pool;
2504 	uint32_t queue_stride;
2505 	uint32_t queue_idx, idx = 0, vf_idx;
2506 	uint32_t queue_end;
2507 	uint16_t total_rate = 0;
2508 	struct rte_pci_device *pci_dev;
2509 
2510 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2511 	rte_eth_link_get_nowait(dev->data->port_id, &link);
2512 
2513 	if (vf >= pci_dev->max_vfs)
2514 		return -EINVAL;
2515 
2516 	if (tx_rate > link.link_speed)
2517 		return -EINVAL;
2518 
2519 	if (q_msk == 0)
2520 		return 0;
2521 
2522 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2523 	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
2524 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2525 	queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
2526 	queue_idx = vf * queue_stride;
2527 	queue_end = queue_idx + nb_q_per_pool - 1;
2528 	if (queue_end >= hw->mac.max_tx_queues)
2529 		return -EINVAL;
2530 
2531 	if (vfinfo) {
2532 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
2533 			if (vf_idx == vf)
2534 				continue;
2535 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
2536 				idx++)
2537 				total_rate += vfinfo[vf_idx].tx_rate[idx];
2538 		}
2539 	} else {
2540 		return -EINVAL;
2541 	}
2542 
2543 	/* Store tx_rate for this vf. */
2544 	for (idx = 0; idx < nb_q_per_pool; idx++) {
2545 		if (((uint64_t)0x1 << idx) & q_msk) {
2546 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
2547 				vfinfo[vf].tx_rate[idx] = tx_rate;
2548 			total_rate += tx_rate;
2549 		}
2550 	}
2551 
2552 	if (total_rate > dev->data->dev_link.link_speed) {
2553 		/* Reset stored TX rate of the VF if it causes exceed
2554 		 * link speed.
2555 		 */
2556 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
2557 		return -EINVAL;
2558 	}
2559 
2560 	/* Set RTTBCNRC of each queue/pool for vf X  */
2561 	for (; queue_idx <= queue_end; queue_idx++) {
2562 		if (0x1 & q_msk)
2563 			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
2564 		q_msk = q_msk >> 1;
2565 	}
2566 
2567 	return 0;
2568 }
2569 
2570 /*
2571  * Configure device link speed and setup link.
2572  * It returns 0 on success.
2573  */
2574 static int
2575 ixgbe_dev_start(struct rte_eth_dev *dev)
2576 {
2577 	struct ixgbe_hw *hw =
2578 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2579 	struct ixgbe_vf_info *vfinfo =
2580 		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2581 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2582 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2583 	uint32_t intr_vector = 0;
2584 	int err, link_up = 0, negotiate = 0;
2585 	uint32_t speed = 0;
2586 	uint32_t allowed_speeds = 0;
2587 	int mask = 0;
2588 	int status;
2589 	uint16_t vf, idx;
2590 	uint32_t *link_speeds;
2591 	struct ixgbe_tm_conf *tm_conf =
2592 		IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2593 
2594 	PMD_INIT_FUNC_TRACE();
2595 
2596 	/* IXGBE devices don't support:
2597 	*    - half duplex (checked afterwards for valid speeds)
2598 	*    - fixed speed: TODO implement
2599 	*/
2600 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2601 		PMD_INIT_LOG(ERR,
2602 		"Invalid link_speeds for port %u, fix speed not supported",
2603 				dev->data->port_id);
2604 		return -EINVAL;
2605 	}
2606 
2607 	/* Stop the link setup handler before resetting the HW. */
2608 	rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
2609 
2610 	/* disable uio/vfio intr/eventfd mapping */
2611 	rte_intr_disable(intr_handle);
2612 
2613 	/* stop adapter */
2614 	hw->adapter_stopped = 0;
2615 	ixgbe_stop_adapter(hw);
2616 
2617 	/* reinitialize adapter
2618 	 * this calls reset and start
2619 	 */
2620 	status = ixgbe_pf_reset_hw(hw);
2621 	if (status != 0)
2622 		return -1;
2623 	hw->mac.ops.start_hw(hw);
2624 	hw->mac.get_link_status = true;
2625 
2626 	/* configure PF module if SRIOV enabled */
2627 	ixgbe_pf_host_configure(dev);
2628 
2629 	ixgbe_dev_phy_intr_setup(dev);
2630 
2631 	/* check and configure queue intr-vector mapping */
2632 	if ((rte_intr_cap_multiple(intr_handle) ||
2633 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2634 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2635 		intr_vector = dev->data->nb_rx_queues;
2636 		if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2637 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2638 					IXGBE_MAX_INTR_QUEUE_NUM);
2639 			return -ENOTSUP;
2640 		}
2641 		if (rte_intr_efd_enable(intr_handle, intr_vector))
2642 			return -1;
2643 	}
2644 
2645 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2646 		intr_handle->intr_vec =
2647 			rte_zmalloc("intr_vec",
2648 				    dev->data->nb_rx_queues * sizeof(int), 0);
2649 		if (intr_handle->intr_vec == NULL) {
2650 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2651 				     " intr_vec", dev->data->nb_rx_queues);
2652 			return -ENOMEM;
2653 		}
2654 	}
2655 
2656 	/* confiugre msix for sleep until rx interrupt */
2657 	ixgbe_configure_msix(dev);
2658 
2659 	/* initialize transmission unit */
2660 	ixgbe_dev_tx_init(dev);
2661 
2662 	/* This can fail when allocating mbufs for descriptor rings */
2663 	err = ixgbe_dev_rx_init(dev);
2664 	if (err) {
2665 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2666 		goto error;
2667 	}
2668 
2669 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2670 		ETH_VLAN_EXTEND_MASK;
2671 	err = ixgbe_vlan_offload_config(dev, mask);
2672 	if (err) {
2673 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2674 		goto error;
2675 	}
2676 
2677 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
2678 		/* Enable vlan filtering for VMDq */
2679 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
2680 	}
2681 
2682 	/* Configure DCB hw */
2683 	ixgbe_configure_dcb(dev);
2684 
2685 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
2686 		err = ixgbe_fdir_configure(dev);
2687 		if (err)
2688 			goto error;
2689 	}
2690 
2691 	/* Restore vf rate limit */
2692 	if (vfinfo != NULL) {
2693 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
2694 			for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
2695 				if (vfinfo[vf].tx_rate[idx] != 0)
2696 					ixgbe_set_vf_rate_limit(
2697 						dev, vf,
2698 						vfinfo[vf].tx_rate[idx],
2699 						1 << idx);
2700 	}
2701 
2702 	ixgbe_restore_statistics_mapping(dev);
2703 
2704 	err = ixgbe_dev_rxtx_start(dev);
2705 	if (err < 0) {
2706 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2707 		goto error;
2708 	}
2709 
2710 	/* Skip link setup if loopback mode is enabled for 82599. */
2711 	if (hw->mac.type == ixgbe_mac_82599EB &&
2712 			dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
2713 		goto skip_link_setup;
2714 
2715 	if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2716 		err = hw->mac.ops.setup_sfp(hw);
2717 		if (err)
2718 			goto error;
2719 	}
2720 
2721 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2722 		/* Turn on the copper */
2723 		ixgbe_set_phy_power(hw, true);
2724 	} else {
2725 		/* Turn on the laser */
2726 		ixgbe_enable_tx_laser(hw);
2727 	}
2728 
2729 	err = ixgbe_check_link(hw, &speed, &link_up, 0);
2730 	if (err)
2731 		goto error;
2732 	dev->data->dev_link.link_status = link_up;
2733 
2734 	err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2735 	if (err)
2736 		goto error;
2737 
2738 	switch (hw->mac.type) {
2739 	case ixgbe_mac_X550:
2740 	case ixgbe_mac_X550EM_x:
2741 	case ixgbe_mac_X550EM_a:
2742 		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2743 			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
2744 			ETH_LINK_SPEED_10G;
2745 		break;
2746 	default:
2747 		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2748 			ETH_LINK_SPEED_10G;
2749 	}
2750 
2751 	link_speeds = &dev->data->dev_conf.link_speeds;
2752 	if (*link_speeds & ~allowed_speeds) {
2753 		PMD_INIT_LOG(ERR, "Invalid link setting");
2754 		goto error;
2755 	}
2756 
2757 	speed = 0x0;
2758 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
2759 		switch (hw->mac.type) {
2760 		case ixgbe_mac_82598EB:
2761 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
2762 			break;
2763 		case ixgbe_mac_82599EB:
2764 		case ixgbe_mac_X540:
2765 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2766 			break;
2767 		case ixgbe_mac_X550:
2768 		case ixgbe_mac_X550EM_x:
2769 		case ixgbe_mac_X550EM_a:
2770 			speed = IXGBE_LINK_SPEED_X550_AUTONEG;
2771 			break;
2772 		default:
2773 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
2774 		}
2775 	} else {
2776 		if (*link_speeds & ETH_LINK_SPEED_10G)
2777 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2778 		if (*link_speeds & ETH_LINK_SPEED_5G)
2779 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
2780 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
2781 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2782 		if (*link_speeds & ETH_LINK_SPEED_1G)
2783 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2784 		if (*link_speeds & ETH_LINK_SPEED_100M)
2785 			speed |= IXGBE_LINK_SPEED_100_FULL;
2786 	}
2787 
2788 	err = ixgbe_setup_link(hw, speed, link_up);
2789 	if (err)
2790 		goto error;
2791 
2792 skip_link_setup:
2793 
2794 	if (rte_intr_allow_others(intr_handle)) {
2795 		/* check if lsc interrupt is enabled */
2796 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2797 			ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
2798 		else
2799 			ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
2800 		ixgbe_dev_macsec_interrupt_setup(dev);
2801 	} else {
2802 		rte_intr_callback_unregister(intr_handle,
2803 					     ixgbe_dev_interrupt_handler, dev);
2804 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2805 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
2806 				     " no intr multiplex");
2807 	}
2808 
2809 	/* check if rxq interrupt is enabled */
2810 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2811 	    rte_intr_dp_is_en(intr_handle))
2812 		ixgbe_dev_rxq_interrupt_setup(dev);
2813 
2814 	/* enable uio/vfio intr/eventfd mapping */
2815 	rte_intr_enable(intr_handle);
2816 
2817 	/* resume enabled intr since hw reset */
2818 	ixgbe_enable_intr(dev);
2819 	ixgbe_l2_tunnel_conf(dev);
2820 	ixgbe_filter_restore(dev);
2821 
2822 	if (tm_conf->root && !tm_conf->committed)
2823 		PMD_DRV_LOG(WARNING,
2824 			    "please call hierarchy_commit() "
2825 			    "before starting the port");
2826 
2827 	/*
2828 	 * Update link status right before return, because it may
2829 	 * start link configuration process in a separate thread.
2830 	 */
2831 	ixgbe_dev_link_update(dev, 0);
2832 
2833 	return 0;
2834 
2835 error:
2836 	PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2837 	ixgbe_dev_clear_queues(dev);
2838 	return -EIO;
2839 }
2840 
2841 /*
2842  * Stop device: disable rx and tx functions to allow for reconfiguring.
2843  */
2844 static void
2845 ixgbe_dev_stop(struct rte_eth_dev *dev)
2846 {
2847 	struct rte_eth_link link;
2848 	struct ixgbe_adapter *adapter = dev->data->dev_private;
2849 	struct ixgbe_hw *hw =
2850 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2851 	struct ixgbe_vf_info *vfinfo =
2852 		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
2853 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2854 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2855 	int vf;
2856 	struct ixgbe_tm_conf *tm_conf =
2857 		IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2858 
2859 	PMD_INIT_FUNC_TRACE();
2860 
2861 	rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
2862 
2863 	/* disable interrupts */
2864 	ixgbe_disable_intr(hw);
2865 
2866 	/* reset the NIC */
2867 	ixgbe_pf_reset_hw(hw);
2868 	hw->adapter_stopped = 0;
2869 
2870 	/* stop adapter */
2871 	ixgbe_stop_adapter(hw);
2872 
2873 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2874 		vfinfo[vf].clear_to_send = false;
2875 
2876 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2877 		/* Turn off the copper */
2878 		ixgbe_set_phy_power(hw, false);
2879 	} else {
2880 		/* Turn off the laser */
2881 		ixgbe_disable_tx_laser(hw);
2882 	}
2883 
2884 	ixgbe_dev_clear_queues(dev);
2885 
2886 	/* Clear stored conf */
2887 	dev->data->scattered_rx = 0;
2888 	dev->data->lro = 0;
2889 
2890 	/* Clear recorded link status */
2891 	memset(&link, 0, sizeof(link));
2892 	rte_eth_linkstatus_set(dev, &link);
2893 
2894 	if (!rte_intr_allow_others(intr_handle))
2895 		/* resume to the default handler */
2896 		rte_intr_callback_register(intr_handle,
2897 					   ixgbe_dev_interrupt_handler,
2898 					   (void *)dev);
2899 
2900 	/* Clean datapath event and queue/vec mapping */
2901 	rte_intr_efd_disable(intr_handle);
2902 	if (intr_handle->intr_vec != NULL) {
2903 		rte_free(intr_handle->intr_vec);
2904 		intr_handle->intr_vec = NULL;
2905 	}
2906 
2907 	/* reset hierarchy commit */
2908 	tm_conf->committed = false;
2909 
2910 	adapter->rss_reta_updated = 0;
2911 }
2912 
2913 /*
2914  * Set device link up: enable tx.
2915  */
2916 static int
2917 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2918 {
2919 	struct ixgbe_hw *hw =
2920 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2921 	if (hw->mac.type == ixgbe_mac_82599EB) {
2922 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2923 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2924 			/* Not suported in bypass mode */
2925 			PMD_INIT_LOG(ERR, "Set link up is not supported "
2926 				     "by device id 0x%x", hw->device_id);
2927 			return -ENOTSUP;
2928 		}
2929 #endif
2930 	}
2931 
2932 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2933 		/* Turn on the copper */
2934 		ixgbe_set_phy_power(hw, true);
2935 	} else {
2936 		/* Turn on the laser */
2937 		ixgbe_enable_tx_laser(hw);
2938 	}
2939 
2940 	return 0;
2941 }
2942 
2943 /*
2944  * Set device link down: disable tx.
2945  */
2946 static int
2947 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2948 {
2949 	struct ixgbe_hw *hw =
2950 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2951 	if (hw->mac.type == ixgbe_mac_82599EB) {
2952 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2953 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2954 			/* Not suported in bypass mode */
2955 			PMD_INIT_LOG(ERR, "Set link down is not supported "
2956 				     "by device id 0x%x", hw->device_id);
2957 			return -ENOTSUP;
2958 		}
2959 #endif
2960 	}
2961 
2962 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2963 		/* Turn off the copper */
2964 		ixgbe_set_phy_power(hw, false);
2965 	} else {
2966 		/* Turn off the laser */
2967 		ixgbe_disable_tx_laser(hw);
2968 	}
2969 
2970 	return 0;
2971 }
2972 
2973 /*
2974  * Reset and stop device.
2975  */
2976 static void
2977 ixgbe_dev_close(struct rte_eth_dev *dev)
2978 {
2979 	struct ixgbe_hw *hw =
2980 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2981 
2982 	PMD_INIT_FUNC_TRACE();
2983 
2984 	ixgbe_pf_reset_hw(hw);
2985 
2986 	ixgbe_dev_stop(dev);
2987 	hw->adapter_stopped = 1;
2988 
2989 	ixgbe_dev_free_queues(dev);
2990 
2991 	ixgbe_disable_pcie_master(hw);
2992 
2993 	/* reprogram the RAR[0] in case user changed it. */
2994 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2995 }
2996 
2997 /*
2998  * Reset PF device.
2999  */
3000 static int
3001 ixgbe_dev_reset(struct rte_eth_dev *dev)
3002 {
3003 	int ret;
3004 
3005 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
3006 	 * its VF to make them align with it. The detailed notification
3007 	 * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
3008 	 * To avoid unexpected behavior in VF, currently reset of PF with
3009 	 * SR-IOV activation is not supported. It might be supported later.
3010 	 */
3011 	if (dev->data->sriov.active)
3012 		return -ENOTSUP;
3013 
3014 	ret = eth_ixgbe_dev_uninit(dev);
3015 	if (ret)
3016 		return ret;
3017 
3018 	ret = eth_ixgbe_dev_init(dev, NULL);
3019 
3020 	return ret;
3021 }
3022 
3023 static void
3024 ixgbe_read_stats_registers(struct ixgbe_hw *hw,
3025 			   struct ixgbe_hw_stats *hw_stats,
3026 			   struct ixgbe_macsec_stats *macsec_stats,
3027 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
3028 			   uint64_t *total_qprc, uint64_t *total_qprdc)
3029 {
3030 	uint32_t bprc, lxon, lxoff, total;
3031 	uint32_t delta_gprc = 0;
3032 	unsigned i;
3033 	/* Workaround for RX byte count not including CRC bytes when CRC
3034 	 * strip is enabled. CRC bytes are removed from counters when crc_strip
3035 	 * is disabled.
3036 	 */
3037 	int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
3038 			IXGBE_HLREG0_RXCRCSTRP);
3039 
3040 	hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3041 	hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3042 	hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3043 	hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3044 
3045 	for (i = 0; i < 8; i++) {
3046 		uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3047 
3048 		/* global total per queue */
3049 		hw_stats->mpc[i] += mp;
3050 		/* Running comprehensive total for stats display */
3051 		*total_missed_rx += hw_stats->mpc[i];
3052 		if (hw->mac.type == ixgbe_mac_82598EB) {
3053 			hw_stats->rnbc[i] +=
3054 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3055 			hw_stats->pxonrxc[i] +=
3056 				IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3057 			hw_stats->pxoffrxc[i] +=
3058 				IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3059 		} else {
3060 			hw_stats->pxonrxc[i] +=
3061 				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3062 			hw_stats->pxoffrxc[i] +=
3063 				IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
3064 			hw_stats->pxon2offc[i] +=
3065 				IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3066 		}
3067 		hw_stats->pxontxc[i] +=
3068 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3069 		hw_stats->pxofftxc[i] +=
3070 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3071 	}
3072 	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3073 		uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3074 		uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3075 		uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3076 
3077 		delta_gprc += delta_qprc;
3078 
3079 		hw_stats->qprc[i] += delta_qprc;
3080 		hw_stats->qptc[i] += delta_qptc;
3081 
3082 		hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
3083 		hw_stats->qbrc[i] +=
3084 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
3085 		if (crc_strip == 0)
3086 			hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
3087 
3088 		hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
3089 		hw_stats->qbtc[i] +=
3090 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
3091 
3092 		hw_stats->qprdc[i] += delta_qprdc;
3093 		*total_qprdc += hw_stats->qprdc[i];
3094 
3095 		*total_qprc += hw_stats->qprc[i];
3096 		*total_qbrc += hw_stats->qbrc[i];
3097 	}
3098 	hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3099 	hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3100 	hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3101 
3102 	/*
3103 	 * An errata states that gprc actually counts good + missed packets:
3104 	 * Workaround to set gprc to summated queue packet receives
3105 	 */
3106 	hw_stats->gprc = *total_qprc;
3107 
3108 	if (hw->mac.type != ixgbe_mac_82598EB) {
3109 		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3110 		hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3111 		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3112 		hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3113 		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3114 		hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3115 		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3116 		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3117 	} else {
3118 		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3119 		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3120 		/* 82598 only has a counter in the high register */
3121 		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3122 		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3123 		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3124 	}
3125 	uint64_t old_tpr = hw_stats->tpr;
3126 
3127 	hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3128 	hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3129 
3130 	if (crc_strip == 0)
3131 		hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
3132 
3133 	uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3134 	hw_stats->gptc += delta_gptc;
3135 	hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
3136 	hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
3137 
3138 	/*
3139 	 * Workaround: mprc hardware is incorrectly counting
3140 	 * broadcasts, so for now we subtract those.
3141 	 */
3142 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3143 	hw_stats->bprc += bprc;
3144 	hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3145 	if (hw->mac.type == ixgbe_mac_82598EB)
3146 		hw_stats->mprc -= bprc;
3147 
3148 	hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3149 	hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3150 	hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3151 	hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3152 	hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3153 	hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3154 
3155 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3156 	hw_stats->lxontxc += lxon;
3157 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3158 	hw_stats->lxofftxc += lxoff;
3159 	total = lxon + lxoff;
3160 
3161 	hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3162 	hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3163 	hw_stats->gptc -= total;
3164 	hw_stats->mptc -= total;
3165 	hw_stats->ptc64 -= total;
3166 	hw_stats->gotc -= total * ETHER_MIN_LEN;
3167 
3168 	hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3169 	hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3170 	hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3171 	hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3172 	hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3173 	hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3174 	hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3175 	hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3176 	hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3177 	hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3178 	hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3179 	hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3180 	hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3181 	hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3182 	hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3183 	hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3184 	/* Only read FCOE on 82599 */
3185 	if (hw->mac.type != ixgbe_mac_82598EB) {
3186 		hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3187 		hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3188 		hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3189 		hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3190 		hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3191 	}
3192 
3193 	/* Flow Director Stats registers */
3194 	if (hw->mac.type != ixgbe_mac_82598EB) {
3195 		hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3196 		hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3197 		hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
3198 					IXGBE_FDIRUSTAT) & 0xFFFF;
3199 		hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
3200 					IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
3201 		hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
3202 					IXGBE_FDIRFSTAT) & 0xFFFF;
3203 		hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
3204 					IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
3205 	}
3206 	/* MACsec Stats registers */
3207 	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
3208 	macsec_stats->out_pkts_encrypted +=
3209 		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
3210 	macsec_stats->out_pkts_protected +=
3211 		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
3212 	macsec_stats->out_octets_encrypted +=
3213 		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
3214 	macsec_stats->out_octets_protected +=
3215 		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
3216 	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
3217 	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
3218 	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
3219 	macsec_stats->in_pkts_unknownsci +=
3220 		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
3221 	macsec_stats->in_octets_decrypted +=
3222 		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
3223 	macsec_stats->in_octets_validated +=
3224 		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
3225 	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
3226 	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
3227 	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
3228 	for (i = 0; i < 2; i++) {
3229 		macsec_stats->in_pkts_ok +=
3230 			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
3231 		macsec_stats->in_pkts_invalid +=
3232 			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
3233 		macsec_stats->in_pkts_notvalid +=
3234 			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
3235 	}
3236 	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
3237 	macsec_stats->in_pkts_notusingsa +=
3238 		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3239 }
3240 
3241 /*
3242  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3243  */
3244 static int
3245 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3246 {
3247 	struct ixgbe_hw *hw =
3248 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3249 	struct ixgbe_hw_stats *hw_stats =
3250 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3251 	struct ixgbe_macsec_stats *macsec_stats =
3252 			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3253 				dev->data->dev_private);
3254 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3255 	unsigned i;
3256 
3257 	total_missed_rx = 0;
3258 	total_qbrc = 0;
3259 	total_qprc = 0;
3260 	total_qprdc = 0;
3261 
3262 	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3263 			&total_qbrc, &total_qprc, &total_qprdc);
3264 
3265 	if (stats == NULL)
3266 		return -EINVAL;
3267 
3268 	/* Fill out the rte_eth_stats statistics structure */
3269 	stats->ipackets = total_qprc;
3270 	stats->ibytes = total_qbrc;
3271 	stats->opackets = hw_stats->gptc;
3272 	stats->obytes = hw_stats->gotc;
3273 
3274 	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3275 		stats->q_ipackets[i] = hw_stats->qprc[i];
3276 		stats->q_opackets[i] = hw_stats->qptc[i];
3277 		stats->q_ibytes[i] = hw_stats->qbrc[i];
3278 		stats->q_obytes[i] = hw_stats->qbtc[i];
3279 		stats->q_errors[i] = hw_stats->qprdc[i];
3280 	}
3281 
3282 	/* Rx Errors */
3283 	stats->imissed  = total_missed_rx;
3284 	stats->ierrors  = hw_stats->crcerrs +
3285 			  hw_stats->mspdc +
3286 			  hw_stats->rlec +
3287 			  hw_stats->ruc +
3288 			  hw_stats->roc +
3289 			  hw_stats->illerrc +
3290 			  hw_stats->errbc +
3291 			  hw_stats->rfc +
3292 			  hw_stats->fccrc +
3293 			  hw_stats->fclast;
3294 
3295 	/* Tx Errors */
3296 	stats->oerrors  = 0;
3297 	return 0;
3298 }
3299 
3300 static void
3301 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3302 {
3303 	struct ixgbe_hw_stats *stats =
3304 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3305 
3306 	/* HW registers are cleared on read */
3307 	ixgbe_dev_stats_get(dev, NULL);
3308 
3309 	/* Reset software totals */
3310 	memset(stats, 0, sizeof(*stats));
3311 }
3312 
3313 /* This function calculates the number of xstats based on the current config */
3314 static unsigned
3315 ixgbe_xstats_calc_num(void) {
3316 	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3317 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3318 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3319 }
3320 
3321 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3322 	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3323 {
3324 	const unsigned cnt_stats = ixgbe_xstats_calc_num();
3325 	unsigned stat, i, count;
3326 
3327 	if (xstats_names != NULL) {
3328 		count = 0;
3329 
3330 		/* Note: limit >= cnt_stats checked upstream
3331 		 * in rte_eth_xstats_names()
3332 		 */
3333 
3334 		/* Extended stats from ixgbe_hw_stats */
3335 		for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3336 			snprintf(xstats_names[count].name,
3337 				sizeof(xstats_names[count].name),
3338 				"%s",
3339 				rte_ixgbe_stats_strings[i].name);
3340 			count++;
3341 		}
3342 
3343 		/* MACsec Stats */
3344 		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3345 			snprintf(xstats_names[count].name,
3346 				sizeof(xstats_names[count].name),
3347 				"%s",
3348 				rte_ixgbe_macsec_strings[i].name);
3349 			count++;
3350 		}
3351 
3352 		/* RX Priority Stats */
3353 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3354 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3355 				snprintf(xstats_names[count].name,
3356 					sizeof(xstats_names[count].name),
3357 					"rx_priority%u_%s", i,
3358 					rte_ixgbe_rxq_strings[stat].name);
3359 				count++;
3360 			}
3361 		}
3362 
3363 		/* TX Priority Stats */
3364 		for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3365 			for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3366 				snprintf(xstats_names[count].name,
3367 					sizeof(xstats_names[count].name),
3368 					"tx_priority%u_%s", i,
3369 					rte_ixgbe_txq_strings[stat].name);
3370 				count++;
3371 			}
3372 		}
3373 	}
3374 	return cnt_stats;
3375 }
3376 
3377 static int ixgbe_dev_xstats_get_names_by_id(
3378 	struct rte_eth_dev *dev,
3379 	struct rte_eth_xstat_name *xstats_names,
3380 	const uint64_t *ids,
3381 	unsigned int limit)
3382 {
3383 	if (!ids) {
3384 		const unsigned int cnt_stats = ixgbe_xstats_calc_num();
3385 		unsigned int stat, i, count;
3386 
3387 		if (xstats_names != NULL) {
3388 			count = 0;
3389 
3390 			/* Note: limit >= cnt_stats checked upstream
3391 			 * in rte_eth_xstats_names()
3392 			 */
3393 
3394 			/* Extended stats from ixgbe_hw_stats */
3395 			for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3396 				snprintf(xstats_names[count].name,
3397 					sizeof(xstats_names[count].name),
3398 					"%s",
3399 					rte_ixgbe_stats_strings[i].name);
3400 				count++;
3401 			}
3402 
3403 			/* MACsec Stats */
3404 			for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3405 				snprintf(xstats_names[count].name,
3406 					sizeof(xstats_names[count].name),
3407 					"%s",
3408 					rte_ixgbe_macsec_strings[i].name);
3409 				count++;
3410 			}
3411 
3412 			/* RX Priority Stats */
3413 			for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3414 				for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3415 					snprintf(xstats_names[count].name,
3416 					    sizeof(xstats_names[count].name),
3417 					    "rx_priority%u_%s", i,
3418 					    rte_ixgbe_rxq_strings[stat].name);
3419 					count++;
3420 				}
3421 			}
3422 
3423 			/* TX Priority Stats */
3424 			for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3425 				for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3426 					snprintf(xstats_names[count].name,
3427 					    sizeof(xstats_names[count].name),
3428 					    "tx_priority%u_%s", i,
3429 					    rte_ixgbe_txq_strings[stat].name);
3430 					count++;
3431 				}
3432 			}
3433 		}
3434 		return cnt_stats;
3435 	}
3436 
3437 	uint16_t i;
3438 	uint16_t size = ixgbe_xstats_calc_num();
3439 	struct rte_eth_xstat_name xstats_names_copy[size];
3440 
3441 	ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
3442 			size);
3443 
3444 	for (i = 0; i < limit; i++) {
3445 		if (ids[i] >= size) {
3446 			PMD_INIT_LOG(ERR, "id value isn't valid");
3447 			return -1;
3448 		}
3449 		strcpy(xstats_names[i].name,
3450 				xstats_names_copy[ids[i]].name);
3451 	}
3452 	return limit;
3453 }
3454 
3455 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3456 	struct rte_eth_xstat_name *xstats_names, unsigned limit)
3457 {
3458 	unsigned i;
3459 
3460 	if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3461 		return -ENOMEM;
3462 
3463 	if (xstats_names != NULL)
3464 		for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
3465 			snprintf(xstats_names[i].name,
3466 				sizeof(xstats_names[i].name),
3467 				"%s", rte_ixgbevf_stats_strings[i].name);
3468 	return IXGBEVF_NB_XSTATS;
3469 }
3470 
3471 static int
3472 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3473 					 unsigned n)
3474 {
3475 	struct ixgbe_hw *hw =
3476 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3477 	struct ixgbe_hw_stats *hw_stats =
3478 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3479 	struct ixgbe_macsec_stats *macsec_stats =
3480 			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3481 				dev->data->dev_private);
3482 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3483 	unsigned i, stat, count = 0;
3484 
3485 	count = ixgbe_xstats_calc_num();
3486 
3487 	if (n < count)
3488 		return count;
3489 
3490 	total_missed_rx = 0;
3491 	total_qbrc = 0;
3492 	total_qprc = 0;
3493 	total_qprdc = 0;
3494 
3495 	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
3496 			&total_qbrc, &total_qprc, &total_qprdc);
3497 
3498 	/* If this is a reset xstats is NULL, and we have cleared the
3499 	 * registers by reading them.
3500 	 */
3501 	if (!xstats)
3502 		return 0;
3503 
3504 	/* Extended stats from ixgbe_hw_stats */
3505 	count = 0;
3506 	for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3507 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3508 				rte_ixgbe_stats_strings[i].offset);
3509 		xstats[count].id = count;
3510 		count++;
3511 	}
3512 
3513 	/* MACsec Stats */
3514 	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3515 		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
3516 				rte_ixgbe_macsec_strings[i].offset);
3517 		xstats[count].id = count;
3518 		count++;
3519 	}
3520 
3521 	/* RX Priority Stats */
3522 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3523 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3524 			xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3525 					rte_ixgbe_rxq_strings[stat].offset +
3526 					(sizeof(uint64_t) * i));
3527 			xstats[count].id = count;
3528 			count++;
3529 		}
3530 	}
3531 
3532 	/* TX Priority Stats */
3533 	for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3534 		for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3535 			xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3536 					rte_ixgbe_txq_strings[stat].offset +
3537 					(sizeof(uint64_t) * i));
3538 			xstats[count].id = count;
3539 			count++;
3540 		}
3541 	}
3542 	return count;
3543 }
3544 
3545 static int
3546 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
3547 		uint64_t *values, unsigned int n)
3548 {
3549 	if (!ids) {
3550 		struct ixgbe_hw *hw =
3551 				IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3552 		struct ixgbe_hw_stats *hw_stats =
3553 				IXGBE_DEV_PRIVATE_TO_STATS(
3554 						dev->data->dev_private);
3555 		struct ixgbe_macsec_stats *macsec_stats =
3556 				IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3557 					dev->data->dev_private);
3558 		uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3559 		unsigned int i, stat, count = 0;
3560 
3561 		count = ixgbe_xstats_calc_num();
3562 
3563 		if (!ids && n < count)
3564 			return count;
3565 
3566 		total_missed_rx = 0;
3567 		total_qbrc = 0;
3568 		total_qprc = 0;
3569 		total_qprdc = 0;
3570 
3571 		ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
3572 				&total_missed_rx, &total_qbrc, &total_qprc,
3573 				&total_qprdc);
3574 
3575 		/* If this is a reset xstats is NULL, and we have cleared the
3576 		 * registers by reading them.
3577 		 */
3578 		if (!ids && !values)
3579 			return 0;
3580 
3581 		/* Extended stats from ixgbe_hw_stats */
3582 		count = 0;
3583 		for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3584 			values[count] = *(uint64_t *)(((char *)hw_stats) +
3585 					rte_ixgbe_stats_strings[i].offset);
3586 			count++;
3587 		}
3588 
3589 		/* MACsec Stats */
3590 		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
3591 			values[count] = *(uint64_t *)(((char *)macsec_stats) +
3592 					rte_ixgbe_macsec_strings[i].offset);
3593 			count++;
3594 		}
3595 
3596 		/* RX Priority Stats */
3597 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3598 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3599 				values[count] =
3600 					*(uint64_t *)(((char *)hw_stats) +
3601 					rte_ixgbe_rxq_strings[stat].offset +
3602 					(sizeof(uint64_t) * i));
3603 				count++;
3604 			}
3605 		}
3606 
3607 		/* TX Priority Stats */
3608 		for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3609 			for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3610 				values[count] =
3611 					*(uint64_t *)(((char *)hw_stats) +
3612 					rte_ixgbe_txq_strings[stat].offset +
3613 					(sizeof(uint64_t) * i));
3614 				count++;
3615 			}
3616 		}
3617 		return count;
3618 	}
3619 
3620 	uint16_t i;
3621 	uint16_t size = ixgbe_xstats_calc_num();
3622 	uint64_t values_copy[size];
3623 
3624 	ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
3625 
3626 	for (i = 0; i < n; i++) {
3627 		if (ids[i] >= size) {
3628 			PMD_INIT_LOG(ERR, "id value isn't valid");
3629 			return -1;
3630 		}
3631 		values[i] = values_copy[ids[i]];
3632 	}
3633 	return n;
3634 }
3635 
3636 static void
3637 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3638 {
3639 	struct ixgbe_hw_stats *stats =
3640 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3641 	struct ixgbe_macsec_stats *macsec_stats =
3642 			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3643 				dev->data->dev_private);
3644 
3645 	unsigned count = ixgbe_xstats_calc_num();
3646 
3647 	/* HW registers are cleared on read */
3648 	ixgbe_dev_xstats_get(dev, NULL, count);
3649 
3650 	/* Reset software totals */
3651 	memset(stats, 0, sizeof(*stats));
3652 	memset(macsec_stats, 0, sizeof(*macsec_stats));
3653 }
3654 
3655 static void
3656 ixgbevf_update_stats(struct rte_eth_dev *dev)
3657 {
3658 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3659 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3660 			  IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3661 
3662 	/* Good Rx packet, include VF loopback */
3663 	UPDATE_VF_STAT(IXGBE_VFGPRC,
3664 	    hw_stats->last_vfgprc, hw_stats->vfgprc);
3665 
3666 	/* Good Rx octets, include VF loopback */
3667 	UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3668 	    hw_stats->last_vfgorc, hw_stats->vfgorc);
3669 
3670 	/* Good Tx packet, include VF loopback */
3671 	UPDATE_VF_STAT(IXGBE_VFGPTC,
3672 	    hw_stats->last_vfgptc, hw_stats->vfgptc);
3673 
3674 	/* Good Tx octets, include VF loopback */
3675 	UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3676 	    hw_stats->last_vfgotc, hw_stats->vfgotc);
3677 
3678 	/* Rx Multicst Packet */
3679 	UPDATE_VF_STAT(IXGBE_VFMPRC,
3680 	    hw_stats->last_vfmprc, hw_stats->vfmprc);
3681 }
3682 
3683 static int
3684 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3685 		       unsigned n)
3686 {
3687 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3688 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3689 	unsigned i;
3690 
3691 	if (n < IXGBEVF_NB_XSTATS)
3692 		return IXGBEVF_NB_XSTATS;
3693 
3694 	ixgbevf_update_stats(dev);
3695 
3696 	if (!xstats)
3697 		return 0;
3698 
3699 	/* Extended stats */
3700 	for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
3701 		xstats[i].id = i;
3702 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3703 			rte_ixgbevf_stats_strings[i].offset);
3704 	}
3705 
3706 	return IXGBEVF_NB_XSTATS;
3707 }
3708 
3709 static int
3710 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3711 {
3712 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3713 			  IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3714 
3715 	ixgbevf_update_stats(dev);
3716 
3717 	if (stats == NULL)
3718 		return -EINVAL;
3719 
3720 	stats->ipackets = hw_stats->vfgprc;
3721 	stats->ibytes = hw_stats->vfgorc;
3722 	stats->opackets = hw_stats->vfgptc;
3723 	stats->obytes = hw_stats->vfgotc;
3724 	return 0;
3725 }
3726 
3727 static void
3728 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3729 {
3730 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3731 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3732 
3733 	/* Sync HW register to the last stats */
3734 	ixgbevf_dev_stats_get(dev, NULL);
3735 
3736 	/* reset HW current stats*/
3737 	hw_stats->vfgprc = 0;
3738 	hw_stats->vfgorc = 0;
3739 	hw_stats->vfgptc = 0;
3740 	hw_stats->vfgotc = 0;
3741 }
3742 
3743 static int
3744 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3745 {
3746 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3747 	u16 eeprom_verh, eeprom_verl;
3748 	u32 etrack_id;
3749 	int ret;
3750 
3751 	ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
3752 	ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
3753 
3754 	etrack_id = (eeprom_verh << 16) | eeprom_verl;
3755 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
3756 
3757 	ret += 1; /* add the size of '\0' */
3758 	if (fw_size < (u32)ret)
3759 		return ret;
3760 	else
3761 		return 0;
3762 }
3763 
3764 static void
3765 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3766 {
3767 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3768 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3769 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3770 
3771 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3772 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3773 	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3774 		/*
3775 		 * When DCB/VT is off, maximum number of queues changes,
3776 		 * except for 82598EB, which remains constant.
3777 		 */
3778 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3779 				hw->mac.type != ixgbe_mac_82598EB)
3780 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3781 	}
3782 	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3783 	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3784 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3785 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3786 	dev_info->max_vfs = pci_dev->max_vfs;
3787 	if (hw->mac.type == ixgbe_mac_82598EB)
3788 		dev_info->max_vmdq_pools = ETH_16_POOLS;
3789 	else
3790 		dev_info->max_vmdq_pools = ETH_64_POOLS;
3791 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3792 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3793 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3794 				     dev_info->rx_queue_offload_capa);
3795 	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3796 	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3797 
3798 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3799 		.rx_thresh = {
3800 			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3801 			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3802 			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3803 		},
3804 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3805 		.rx_drop_en = 0,
3806 		.offloads = 0,
3807 	};
3808 
3809 	dev_info->default_txconf = (struct rte_eth_txconf) {
3810 		.tx_thresh = {
3811 			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3812 			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3813 			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3814 		},
3815 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3816 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3817 		.offloads = 0,
3818 	};
3819 
3820 	dev_info->rx_desc_lim = rx_desc_lim;
3821 	dev_info->tx_desc_lim = tx_desc_lim;
3822 
3823 	dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3824 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3825 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3826 
3827 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3828 	if (hw->mac.type == ixgbe_mac_X540 ||
3829 	    hw->mac.type == ixgbe_mac_X540_vf ||
3830 	    hw->mac.type == ixgbe_mac_X550 ||
3831 	    hw->mac.type == ixgbe_mac_X550_vf) {
3832 		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3833 	}
3834 	if (hw->mac.type == ixgbe_mac_X550) {
3835 		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
3836 		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
3837 	}
3838 
3839 	/* Driver-preferred Rx/Tx parameters */
3840 	dev_info->default_rxportconf.burst_size = 32;
3841 	dev_info->default_txportconf.burst_size = 32;
3842 	dev_info->default_rxportconf.nb_queues = 1;
3843 	dev_info->default_txportconf.nb_queues = 1;
3844 	dev_info->default_rxportconf.ring_size = 256;
3845 	dev_info->default_txportconf.ring_size = 256;
3846 }
3847 
3848 static const uint32_t *
3849 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3850 {
3851 	static const uint32_t ptypes[] = {
3852 		/* For non-vec functions,
3853 		 * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3854 		 * for vec functions,
3855 		 * refers to _recv_raw_pkts_vec().
3856 		 */
3857 		RTE_PTYPE_L2_ETHER,
3858 		RTE_PTYPE_L3_IPV4,
3859 		RTE_PTYPE_L3_IPV4_EXT,
3860 		RTE_PTYPE_L3_IPV6,
3861 		RTE_PTYPE_L3_IPV6_EXT,
3862 		RTE_PTYPE_L4_SCTP,
3863 		RTE_PTYPE_L4_TCP,
3864 		RTE_PTYPE_L4_UDP,
3865 		RTE_PTYPE_TUNNEL_IP,
3866 		RTE_PTYPE_INNER_L3_IPV6,
3867 		RTE_PTYPE_INNER_L3_IPV6_EXT,
3868 		RTE_PTYPE_INNER_L4_TCP,
3869 		RTE_PTYPE_INNER_L4_UDP,
3870 		RTE_PTYPE_UNKNOWN
3871 	};
3872 
3873 	if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3874 	    dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3875 	    dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3876 	    dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3877 		return ptypes;
3878 
3879 #if defined(RTE_ARCH_X86)
3880 	if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
3881 	    dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
3882 		return ptypes;
3883 #endif
3884 	return NULL;
3885 }
3886 
3887 static void
3888 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3889 		     struct rte_eth_dev_info *dev_info)
3890 {
3891 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3892 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3893 
3894 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3895 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3896 	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
3897 	dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
3898 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3899 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
3900 	dev_info->max_vfs = pci_dev->max_vfs;
3901 	if (hw->mac.type == ixgbe_mac_82598EB)
3902 		dev_info->max_vmdq_pools = ETH_16_POOLS;
3903 	else
3904 		dev_info->max_vmdq_pools = ETH_64_POOLS;
3905 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3906 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3907 				     dev_info->rx_queue_offload_capa);
3908 	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3909 	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
3910 	dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3911 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3912 
3913 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3914 		.rx_thresh = {
3915 			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3916 			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3917 			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3918 		},
3919 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3920 		.rx_drop_en = 0,
3921 		.offloads = 0,
3922 	};
3923 
3924 	dev_info->default_txconf = (struct rte_eth_txconf) {
3925 		.tx_thresh = {
3926 			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3927 			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3928 			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3929 		},
3930 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3931 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3932 		.offloads = 0,
3933 	};
3934 
3935 	dev_info->rx_desc_lim = rx_desc_lim;
3936 	dev_info->tx_desc_lim = tx_desc_lim;
3937 }
3938 
3939 static int
3940 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3941 		   int *link_up, int wait_to_complete)
3942 {
3943 	struct ixgbe_adapter *adapter = container_of(hw,
3944 						     struct ixgbe_adapter, hw);
3945 	struct ixgbe_mbx_info *mbx = &hw->mbx;
3946 	struct ixgbe_mac_info *mac = &hw->mac;
3947 	uint32_t links_reg, in_msg;
3948 	int ret_val = 0;
3949 
3950 	/* If we were hit with a reset drop the link */
3951 	if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
3952 		mac->get_link_status = true;
3953 
3954 	if (!mac->get_link_status)
3955 		goto out;
3956 
3957 	/* if link status is down no point in checking to see if pf is up */
3958 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3959 	if (!(links_reg & IXGBE_LINKS_UP))
3960 		goto out;
3961 
3962 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3963 	 * before the link status is correct
3964 	 */
3965 	if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
3966 		int i;
3967 
3968 		for (i = 0; i < 5; i++) {
3969 			rte_delay_us(100);
3970 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
3971 
3972 			if (!(links_reg & IXGBE_LINKS_UP))
3973 				goto out;
3974 		}
3975 	}
3976 
3977 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3978 	case IXGBE_LINKS_SPEED_10G_82599:
3979 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
3980 		if (hw->mac.type >= ixgbe_mac_X550) {
3981 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3982 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3983 		}
3984 		break;
3985 	case IXGBE_LINKS_SPEED_1G_82599:
3986 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
3987 		break;
3988 	case IXGBE_LINKS_SPEED_100_82599:
3989 		*speed = IXGBE_LINK_SPEED_100_FULL;
3990 		if (hw->mac.type == ixgbe_mac_X550) {
3991 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
3992 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
3993 		}
3994 		break;
3995 	case IXGBE_LINKS_SPEED_10_X550EM_A:
3996 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
3997 		/* Since Reserved in older MAC's */
3998 		if (hw->mac.type >= ixgbe_mac_X550)
3999 			*speed = IXGBE_LINK_SPEED_10_FULL;
4000 		break;
4001 	default:
4002 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4003 	}
4004 
4005 	if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) {
4006 		if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
4007 			mac->get_link_status = true;
4008 		else
4009 			mac->get_link_status = false;
4010 
4011 		goto out;
4012 	}
4013 
4014 	/* if the read failed it could just be a mailbox collision, best wait
4015 	 * until we are called again and don't report an error
4016 	 */
4017 	if (mbx->ops.read(hw, &in_msg, 1, 0))
4018 		goto out;
4019 
4020 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
4021 		/* msg is not CTS and is NACK we must have lost CTS status */
4022 		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
4023 			mac->get_link_status = false;
4024 		goto out;
4025 	}
4026 
4027 	/* the pf is talking, if we timed out in the past we reinit */
4028 	if (!mbx->timeout) {
4029 		ret_val = -1;
4030 		goto out;
4031 	}
4032 
4033 	/* if we passed all the tests above then the link is up and we no
4034 	 * longer need to check for link
4035 	 */
4036 	mac->get_link_status = false;
4037 
4038 out:
4039 	*link_up = !mac->get_link_status;
4040 	return ret_val;
4041 }
4042 
4043 static void
4044 ixgbe_dev_setup_link_alarm_handler(void *param)
4045 {
4046 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4047 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4048 	struct ixgbe_interrupt *intr =
4049 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4050 	u32 speed;
4051 	bool autoneg = false;
4052 
4053 	speed = hw->phy.autoneg_advertised;
4054 	if (!speed)
4055 		ixgbe_get_link_capabilities(hw, &speed, &autoneg);
4056 
4057 	ixgbe_setup_link(hw, speed, true);
4058 
4059 	intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4060 }
4061 
4062 /* return 0 means link status changed, -1 means not changed */
4063 int
4064 ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
4065 			    int wait_to_complete, int vf)
4066 {
4067 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4068 	struct rte_eth_link link;
4069 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4070 	struct ixgbe_interrupt *intr =
4071 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4072 	int link_up;
4073 	int diag;
4074 	int wait = 1;
4075 
4076 	memset(&link, 0, sizeof(link));
4077 	link.link_status = ETH_LINK_DOWN;
4078 	link.link_speed = ETH_SPEED_NUM_NONE;
4079 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
4080 	link.link_autoneg = ETH_LINK_AUTONEG;
4081 
4082 	hw->mac.get_link_status = true;
4083 
4084 	if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG)
4085 		return rte_eth_linkstatus_set(dev, &link);
4086 
4087 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
4088 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
4089 		wait = 0;
4090 
4091 	if (vf)
4092 		diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4093 	else
4094 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4095 
4096 	if (diag != 0) {
4097 		link.link_speed = ETH_SPEED_NUM_100M;
4098 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
4099 		return rte_eth_linkstatus_set(dev, &link);
4100 	}
4101 
4102 	if (link_up == 0) {
4103 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
4104 			intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
4105 			rte_eal_alarm_set(10,
4106 				ixgbe_dev_setup_link_alarm_handler, dev);
4107 		}
4108 		return rte_eth_linkstatus_set(dev, &link);
4109 	}
4110 
4111 	link.link_status = ETH_LINK_UP;
4112 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
4113 
4114 	switch (link_speed) {
4115 	default:
4116 	case IXGBE_LINK_SPEED_UNKNOWN:
4117 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
4118 		link.link_speed = ETH_SPEED_NUM_100M;
4119 		break;
4120 
4121 	case IXGBE_LINK_SPEED_100_FULL:
4122 		link.link_speed = ETH_SPEED_NUM_100M;
4123 		break;
4124 
4125 	case IXGBE_LINK_SPEED_1GB_FULL:
4126 		link.link_speed = ETH_SPEED_NUM_1G;
4127 		break;
4128 
4129 	case IXGBE_LINK_SPEED_2_5GB_FULL:
4130 		link.link_speed = ETH_SPEED_NUM_2_5G;
4131 		break;
4132 
4133 	case IXGBE_LINK_SPEED_5GB_FULL:
4134 		link.link_speed = ETH_SPEED_NUM_5G;
4135 		break;
4136 
4137 	case IXGBE_LINK_SPEED_10GB_FULL:
4138 		link.link_speed = ETH_SPEED_NUM_10G;
4139 		break;
4140 	}
4141 
4142 	return rte_eth_linkstatus_set(dev, &link);
4143 }
4144 
4145 static int
4146 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4147 {
4148 	return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
4149 }
4150 
4151 static int
4152 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4153 {
4154 	return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
4155 }
4156 
4157 static void
4158 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4159 {
4160 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4161 	uint32_t fctrl;
4162 
4163 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4164 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4165 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4166 }
4167 
4168 static void
4169 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4170 {
4171 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4172 	uint32_t fctrl;
4173 
4174 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4175 	fctrl &= (~IXGBE_FCTRL_UPE);
4176 	if (dev->data->all_multicast == 1)
4177 		fctrl |= IXGBE_FCTRL_MPE;
4178 	else
4179 		fctrl &= (~IXGBE_FCTRL_MPE);
4180 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4181 }
4182 
4183 static void
4184 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4185 {
4186 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4187 	uint32_t fctrl;
4188 
4189 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4190 	fctrl |= IXGBE_FCTRL_MPE;
4191 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4192 }
4193 
4194 static void
4195 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4196 {
4197 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4198 	uint32_t fctrl;
4199 
4200 	if (dev->data->promiscuous == 1)
4201 		return; /* must remain in all_multicast mode */
4202 
4203 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4204 	fctrl &= (~IXGBE_FCTRL_MPE);
4205 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4206 }
4207 
4208 /**
4209  * It clears the interrupt causes and enables the interrupt.
4210  * It will be called once only during nic initialized.
4211  *
4212  * @param dev
4213  *  Pointer to struct rte_eth_dev.
4214  * @param on
4215  *  Enable or Disable.
4216  *
4217  * @return
4218  *  - On success, zero.
4219  *  - On failure, a negative value.
4220  */
4221 static int
4222 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4223 {
4224 	struct ixgbe_interrupt *intr =
4225 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4226 
4227 	ixgbe_dev_link_status_print(dev);
4228 	if (on)
4229 		intr->mask |= IXGBE_EICR_LSC;
4230 	else
4231 		intr->mask &= ~IXGBE_EICR_LSC;
4232 
4233 	return 0;
4234 }
4235 
4236 /**
4237  * It clears the interrupt causes and enables the interrupt.
4238  * It will be called once only during nic initialized.
4239  *
4240  * @param dev
4241  *  Pointer to struct rte_eth_dev.
4242  *
4243  * @return
4244  *  - On success, zero.
4245  *  - On failure, a negative value.
4246  */
4247 static int
4248 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4249 {
4250 	struct ixgbe_interrupt *intr =
4251 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4252 
4253 	intr->mask |= IXGBE_EICR_RTX_QUEUE;
4254 
4255 	return 0;
4256 }
4257 
4258 /**
4259  * It clears the interrupt causes and enables the interrupt.
4260  * It will be called once only during nic initialized.
4261  *
4262  * @param dev
4263  *  Pointer to struct rte_eth_dev.
4264  *
4265  * @return
4266  *  - On success, zero.
4267  *  - On failure, a negative value.
4268  */
4269 static int
4270 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
4271 {
4272 	struct ixgbe_interrupt *intr =
4273 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4274 
4275 	intr->mask |= IXGBE_EICR_LINKSEC;
4276 
4277 	return 0;
4278 }
4279 
4280 /*
4281  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4282  *
4283  * @param dev
4284  *  Pointer to struct rte_eth_dev.
4285  *
4286  * @return
4287  *  - On success, zero.
4288  *  - On failure, a negative value.
4289  */
4290 static int
4291 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4292 {
4293 	uint32_t eicr;
4294 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4295 	struct ixgbe_interrupt *intr =
4296 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4297 
4298 	/* clear all cause mask */
4299 	ixgbe_disable_intr(hw);
4300 
4301 	/* read-on-clear nic registers here */
4302 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4303 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4304 
4305 	intr->flags = 0;
4306 
4307 	/* set flag for async link update */
4308 	if (eicr & IXGBE_EICR_LSC)
4309 		intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4310 
4311 	if (eicr & IXGBE_EICR_MAILBOX)
4312 		intr->flags |= IXGBE_FLAG_MAILBOX;
4313 
4314 	if (eicr & IXGBE_EICR_LINKSEC)
4315 		intr->flags |= IXGBE_FLAG_MACSEC;
4316 
4317 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4318 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
4319 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4320 		intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4321 
4322 	return 0;
4323 }
4324 
4325 /**
4326  * It gets and then prints the link status.
4327  *
4328  * @param dev
4329  *  Pointer to struct rte_eth_dev.
4330  *
4331  * @return
4332  *  - On success, zero.
4333  *  - On failure, a negative value.
4334  */
4335 static void
4336 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4337 {
4338 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4339 	struct rte_eth_link link;
4340 
4341 	rte_eth_linkstatus_get(dev, &link);
4342 
4343 	if (link.link_status) {
4344 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4345 					(int)(dev->data->port_id),
4346 					(unsigned)link.link_speed,
4347 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4348 					"full-duplex" : "half-duplex");
4349 	} else {
4350 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
4351 				(int)(dev->data->port_id));
4352 	}
4353 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
4354 				pci_dev->addr.domain,
4355 				pci_dev->addr.bus,
4356 				pci_dev->addr.devid,
4357 				pci_dev->addr.function);
4358 }
4359 
4360 /*
4361  * It executes link_update after knowing an interrupt occurred.
4362  *
4363  * @param dev
4364  *  Pointer to struct rte_eth_dev.
4365  *
4366  * @return
4367  *  - On success, zero.
4368  *  - On failure, a negative value.
4369  */
4370 static int
4371 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
4372 {
4373 	struct ixgbe_interrupt *intr =
4374 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4375 	int64_t timeout;
4376 	struct ixgbe_hw *hw =
4377 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4378 
4379 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4380 
4381 	if (intr->flags & IXGBE_FLAG_MAILBOX) {
4382 		ixgbe_pf_mbx_process(dev);
4383 		intr->flags &= ~IXGBE_FLAG_MAILBOX;
4384 	}
4385 
4386 	if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4387 		ixgbe_handle_lasi(hw);
4388 		intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4389 	}
4390 
4391 	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4392 		struct rte_eth_link link;
4393 
4394 		/* get the link status before link update, for predicting later */
4395 		rte_eth_linkstatus_get(dev, &link);
4396 
4397 		ixgbe_dev_link_update(dev, 0);
4398 
4399 		/* likely to up */
4400 		if (!link.link_status)
4401 			/* handle it 1 sec later, wait it being stable */
4402 			timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4403 		/* likely to down */
4404 		else
4405 			/* handle it 4 sec later, wait it being stable */
4406 			timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4407 
4408 		ixgbe_dev_link_status_print(dev);
4409 		if (rte_eal_alarm_set(timeout * 1000,
4410 				      ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4411 			PMD_DRV_LOG(ERR, "Error setting alarm");
4412 		else {
4413 			/* remember original mask */
4414 			intr->mask_original = intr->mask;
4415 			/* only disable lsc interrupt */
4416 			intr->mask &= ~IXGBE_EIMS_LSC;
4417 		}
4418 	}
4419 
4420 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
4421 	ixgbe_enable_intr(dev);
4422 
4423 	return 0;
4424 }
4425 
4426 /**
4427  * Interrupt handler which shall be registered for alarm callback for delayed
4428  * handling specific interrupt to wait for the stable nic state. As the
4429  * NIC interrupt state is not stable for ixgbe after link is just down,
4430  * it needs to wait 4 seconds to get the stable status.
4431  *
4432  * @param handle
4433  *  Pointer to interrupt handle.
4434  * @param param
4435  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4436  *
4437  * @return
4438  *  void
4439  */
4440 static void
4441 ixgbe_dev_interrupt_delayed_handler(void *param)
4442 {
4443 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4444 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4445 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4446 	struct ixgbe_interrupt *intr =
4447 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4448 	struct ixgbe_hw *hw =
4449 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4450 	uint32_t eicr;
4451 
4452 	ixgbe_disable_intr(hw);
4453 
4454 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4455 	if (eicr & IXGBE_EICR_MAILBOX)
4456 		ixgbe_pf_mbx_process(dev);
4457 
4458 	if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4459 		ixgbe_handle_lasi(hw);
4460 		intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4461 	}
4462 
4463 	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4464 		ixgbe_dev_link_update(dev, 0);
4465 		intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4466 		ixgbe_dev_link_status_print(dev);
4467 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
4468 					      NULL);
4469 	}
4470 
4471 	if (intr->flags & IXGBE_FLAG_MACSEC) {
4472 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
4473 					      NULL);
4474 		intr->flags &= ~IXGBE_FLAG_MACSEC;
4475 	}
4476 
4477 	/* restore original mask */
4478 	intr->mask = intr->mask_original;
4479 	intr->mask_original = 0;
4480 
4481 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4482 	ixgbe_enable_intr(dev);
4483 	rte_intr_enable(intr_handle);
4484 }
4485 
4486 /**
4487  * Interrupt handler triggered by NIC  for handling
4488  * specific interrupt.
4489  *
4490  * @param handle
4491  *  Pointer to interrupt handle.
4492  * @param param
4493  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4494  *
4495  * @return
4496  *  void
4497  */
4498 static void
4499 ixgbe_dev_interrupt_handler(void *param)
4500 {
4501 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4502 
4503 	ixgbe_dev_interrupt_get_status(dev);
4504 	ixgbe_dev_interrupt_action(dev);
4505 }
4506 
4507 static int
4508 ixgbe_dev_led_on(struct rte_eth_dev *dev)
4509 {
4510 	struct ixgbe_hw *hw;
4511 
4512 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4513 	return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4514 }
4515 
4516 static int
4517 ixgbe_dev_led_off(struct rte_eth_dev *dev)
4518 {
4519 	struct ixgbe_hw *hw;
4520 
4521 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4522 	return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4523 }
4524 
4525 static int
4526 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4527 {
4528 	struct ixgbe_hw *hw;
4529 	uint32_t mflcn_reg;
4530 	uint32_t fccfg_reg;
4531 	int rx_pause;
4532 	int tx_pause;
4533 
4534 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4535 
4536 	fc_conf->pause_time = hw->fc.pause_time;
4537 	fc_conf->high_water = hw->fc.high_water[0];
4538 	fc_conf->low_water = hw->fc.low_water[0];
4539 	fc_conf->send_xon = hw->fc.send_xon;
4540 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4541 
4542 	/*
4543 	 * Return rx_pause status according to actual setting of
4544 	 * MFLCN register.
4545 	 */
4546 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4547 	if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4548 		rx_pause = 1;
4549 	else
4550 		rx_pause = 0;
4551 
4552 	/*
4553 	 * Return tx_pause status according to actual setting of
4554 	 * FCCFG register.
4555 	 */
4556 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4557 	if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4558 		tx_pause = 1;
4559 	else
4560 		tx_pause = 0;
4561 
4562 	if (rx_pause && tx_pause)
4563 		fc_conf->mode = RTE_FC_FULL;
4564 	else if (rx_pause)
4565 		fc_conf->mode = RTE_FC_RX_PAUSE;
4566 	else if (tx_pause)
4567 		fc_conf->mode = RTE_FC_TX_PAUSE;
4568 	else
4569 		fc_conf->mode = RTE_FC_NONE;
4570 
4571 	return 0;
4572 }
4573 
4574 static int
4575 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4576 {
4577 	struct ixgbe_hw *hw;
4578 	int err;
4579 	uint32_t rx_buf_size;
4580 	uint32_t max_high_water;
4581 	uint32_t mflcn;
4582 	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4583 		ixgbe_fc_none,
4584 		ixgbe_fc_rx_pause,
4585 		ixgbe_fc_tx_pause,
4586 		ixgbe_fc_full
4587 	};
4588 
4589 	PMD_INIT_FUNC_TRACE();
4590 
4591 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4592 	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4593 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4594 
4595 	/*
4596 	 * At least reserve one Ethernet frame for watermark
4597 	 * high_water/low_water in kilo bytes for ixgbe
4598 	 */
4599 	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4600 	if ((fc_conf->high_water > max_high_water) ||
4601 		(fc_conf->high_water < fc_conf->low_water)) {
4602 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4603 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4604 		return -EINVAL;
4605 	}
4606 
4607 	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4608 	hw->fc.pause_time     = fc_conf->pause_time;
4609 	hw->fc.high_water[0]  = fc_conf->high_water;
4610 	hw->fc.low_water[0]   = fc_conf->low_water;
4611 	hw->fc.send_xon       = fc_conf->send_xon;
4612 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
4613 
4614 	err = ixgbe_fc_enable(hw);
4615 
4616 	/* Not negotiated is not an error case */
4617 	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
4618 
4619 		/* check if we want to forward MAC frames - driver doesn't have native
4620 		 * capability to do that, so we'll write the registers ourselves */
4621 
4622 		mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4623 
4624 		/* set or clear MFLCN.PMCF bit depending on configuration */
4625 		if (fc_conf->mac_ctrl_frame_fwd != 0)
4626 			mflcn |= IXGBE_MFLCN_PMCF;
4627 		else
4628 			mflcn &= ~IXGBE_MFLCN_PMCF;
4629 
4630 		IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
4631 		IXGBE_WRITE_FLUSH(hw);
4632 
4633 		return 0;
4634 	}
4635 
4636 	PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
4637 	return -EIO;
4638 }
4639 
4640 /**
4641  *  ixgbe_pfc_enable_generic - Enable flow control
4642  *  @hw: pointer to hardware structure
4643  *  @tc_num: traffic class number
4644  *  Enable flow control according to the current settings.
4645  */
4646 static int
4647 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4648 {
4649 	int ret_val = 0;
4650 	uint32_t mflcn_reg, fccfg_reg;
4651 	uint32_t reg;
4652 	uint32_t fcrtl, fcrth;
4653 	uint8_t i;
4654 	uint8_t nb_rx_en;
4655 
4656 	/* Validate the water mark configuration */
4657 	if (!hw->fc.pause_time) {
4658 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4659 		goto out;
4660 	}
4661 
4662 	/* Low water mark of zero causes XOFF floods */
4663 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4664 		 /* High/Low water can not be 0 */
4665 		if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4666 			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4667 			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4668 			goto out;
4669 		}
4670 
4671 		if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4672 			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4673 			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4674 			goto out;
4675 		}
4676 	}
4677 	/* Negotiate the fc mode to use */
4678 	ixgbe_fc_autoneg(hw);
4679 
4680 	/* Disable any previous flow control settings */
4681 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4682 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4683 
4684 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4685 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4686 
4687 	switch (hw->fc.current_mode) {
4688 	case ixgbe_fc_none:
4689 		/*
4690 		 * If the count of enabled RX Priority Flow control >1,
4691 		 * and the TX pause can not be disabled
4692 		 */
4693 		nb_rx_en = 0;
4694 		for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4695 			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4696 			if (reg & IXGBE_FCRTH_FCEN)
4697 				nb_rx_en++;
4698 		}
4699 		if (nb_rx_en > 1)
4700 			fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4701 		break;
4702 	case ixgbe_fc_rx_pause:
4703 		/*
4704 		 * Rx Flow control is enabled and Tx Flow control is
4705 		 * disabled by software override. Since there really
4706 		 * isn't a way to advertise that we are capable of RX
4707 		 * Pause ONLY, we will advertise that we support both
4708 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
4709 		 * disable the adapter's ability to send PAUSE frames.
4710 		 */
4711 		mflcn_reg |= IXGBE_MFLCN_RPFCE;
4712 		/*
4713 		 * If the count of enabled RX Priority Flow control >1,
4714 		 * and the TX pause can not be disabled
4715 		 */
4716 		nb_rx_en = 0;
4717 		for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4718 			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4719 			if (reg & IXGBE_FCRTH_FCEN)
4720 				nb_rx_en++;
4721 		}
4722 		if (nb_rx_en > 1)
4723 			fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4724 		break;
4725 	case ixgbe_fc_tx_pause:
4726 		/*
4727 		 * Tx Flow control is enabled, and Rx Flow control is
4728 		 * disabled by software override.
4729 		 */
4730 		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4731 		break;
4732 	case ixgbe_fc_full:
4733 		/* Flow control (both Rx and Tx) is enabled by SW override. */
4734 		mflcn_reg |= IXGBE_MFLCN_RPFCE;
4735 		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4736 		break;
4737 	default:
4738 		PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4739 		ret_val = IXGBE_ERR_CONFIG;
4740 		goto out;
4741 	}
4742 
4743 	/* Set 802.3x based flow control settings. */
4744 	mflcn_reg |= IXGBE_MFLCN_DPF;
4745 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4746 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4747 
4748 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
4749 	if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4750 		hw->fc.high_water[tc_num]) {
4751 		fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4752 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4753 		fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4754 	} else {
4755 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4756 		/*
4757 		 * In order to prevent Tx hangs when the internal Tx
4758 		 * switch is enabled we must set the high water mark
4759 		 * to the maximum FCRTH value.  This allows the Tx
4760 		 * switch to function even under heavy Rx workloads.
4761 		 */
4762 		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4763 	}
4764 	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4765 
4766 	/* Configure pause time (2 TCs per register) */
4767 	reg = hw->fc.pause_time * 0x00010001;
4768 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4769 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4770 
4771 	/* Configure flow control refresh threshold value */
4772 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4773 
4774 out:
4775 	return ret_val;
4776 }
4777 
4778 static int
4779 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4780 {
4781 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4782 	int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4783 
4784 	if (hw->mac.type != ixgbe_mac_82598EB) {
4785 		ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4786 	}
4787 	return ret_val;
4788 }
4789 
4790 static int
4791 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4792 {
4793 	int err;
4794 	uint32_t rx_buf_size;
4795 	uint32_t max_high_water;
4796 	uint8_t tc_num;
4797 	uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4798 	struct ixgbe_hw *hw =
4799 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4800 	struct ixgbe_dcb_config *dcb_config =
4801 		IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4802 
4803 	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4804 		ixgbe_fc_none,
4805 		ixgbe_fc_rx_pause,
4806 		ixgbe_fc_tx_pause,
4807 		ixgbe_fc_full
4808 	};
4809 
4810 	PMD_INIT_FUNC_TRACE();
4811 
4812 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4813 	tc_num = map[pfc_conf->priority];
4814 	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4815 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4816 	/*
4817 	 * At least reserve one Ethernet frame for watermark
4818 	 * high_water/low_water in kilo bytes for ixgbe
4819 	 */
4820 	max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4821 	if ((pfc_conf->fc.high_water > max_high_water) ||
4822 	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4823 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4824 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4825 		return -EINVAL;
4826 	}
4827 
4828 	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4829 	hw->fc.pause_time = pfc_conf->fc.pause_time;
4830 	hw->fc.send_xon = pfc_conf->fc.send_xon;
4831 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4832 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4833 
4834 	err = ixgbe_dcb_pfc_enable(dev, tc_num);
4835 
4836 	/* Not negotiated is not an error case */
4837 	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4838 		return 0;
4839 
4840 	PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4841 	return -EIO;
4842 }
4843 
4844 static int
4845 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
4846 			  struct rte_eth_rss_reta_entry64 *reta_conf,
4847 			  uint16_t reta_size)
4848 {
4849 	uint16_t i, sp_reta_size;
4850 	uint8_t j, mask;
4851 	uint32_t reta, r;
4852 	uint16_t idx, shift;
4853 	struct ixgbe_adapter *adapter = dev->data->dev_private;
4854 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4855 	uint32_t reta_reg;
4856 
4857 	PMD_INIT_FUNC_TRACE();
4858 
4859 	if (!ixgbe_rss_update_sp(hw->mac.type)) {
4860 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
4861 			"NIC.");
4862 		return -ENOTSUP;
4863 	}
4864 
4865 	sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4866 	if (reta_size != sp_reta_size) {
4867 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4868 			"(%d) doesn't match the number hardware can supported "
4869 			"(%d)", reta_size, sp_reta_size);
4870 		return -EINVAL;
4871 	}
4872 
4873 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4874 		idx = i / RTE_RETA_GROUP_SIZE;
4875 		shift = i % RTE_RETA_GROUP_SIZE;
4876 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4877 						IXGBE_4_BIT_MASK);
4878 		if (!mask)
4879 			continue;
4880 		reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4881 		if (mask == IXGBE_4_BIT_MASK)
4882 			r = 0;
4883 		else
4884 			r = IXGBE_READ_REG(hw, reta_reg);
4885 		for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4886 			if (mask & (0x1 << j))
4887 				reta |= reta_conf[idx].reta[shift + j] <<
4888 							(CHAR_BIT * j);
4889 			else
4890 				reta |= r & (IXGBE_8_BIT_MASK <<
4891 						(CHAR_BIT * j));
4892 		}
4893 		IXGBE_WRITE_REG(hw, reta_reg, reta);
4894 	}
4895 	adapter->rss_reta_updated = 1;
4896 
4897 	return 0;
4898 }
4899 
4900 static int
4901 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
4902 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4903 			 uint16_t reta_size)
4904 {
4905 	uint16_t i, sp_reta_size;
4906 	uint8_t j, mask;
4907 	uint32_t reta;
4908 	uint16_t idx, shift;
4909 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4910 	uint32_t reta_reg;
4911 
4912 	PMD_INIT_FUNC_TRACE();
4913 	sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
4914 	if (reta_size != sp_reta_size) {
4915 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
4916 			"(%d) doesn't match the number hardware can supported "
4917 			"(%d)", reta_size, sp_reta_size);
4918 		return -EINVAL;
4919 	}
4920 
4921 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
4922 		idx = i / RTE_RETA_GROUP_SIZE;
4923 		shift = i % RTE_RETA_GROUP_SIZE;
4924 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
4925 						IXGBE_4_BIT_MASK);
4926 		if (!mask)
4927 			continue;
4928 
4929 		reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
4930 		reta = IXGBE_READ_REG(hw, reta_reg);
4931 		for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
4932 			if (mask & (0x1 << j))
4933 				reta_conf[idx].reta[shift + j] =
4934 					((reta >> (CHAR_BIT * j)) &
4935 						IXGBE_8_BIT_MASK);
4936 		}
4937 	}
4938 
4939 	return 0;
4940 }
4941 
4942 static int
4943 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
4944 				uint32_t index, uint32_t pool)
4945 {
4946 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4947 	uint32_t enable_addr = 1;
4948 
4949 	return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
4950 			     pool, enable_addr);
4951 }
4952 
4953 static void
4954 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
4955 {
4956 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4957 
4958 	ixgbe_clear_rar(hw, index);
4959 }
4960 
4961 static int
4962 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
4963 {
4964 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4965 
4966 	ixgbe_remove_rar(dev, 0);
4967 	ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
4968 
4969 	return 0;
4970 }
4971 
4972 static bool
4973 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
4974 {
4975 	if (strcmp(dev->device->driver->name, drv->driver.name))
4976 		return false;
4977 
4978 	return true;
4979 }
4980 
4981 bool
4982 is_ixgbe_supported(struct rte_eth_dev *dev)
4983 {
4984 	return is_device_supported(dev, &rte_ixgbe_pmd);
4985 }
4986 
4987 static int
4988 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4989 {
4990 	uint32_t hlreg0;
4991 	uint32_t maxfrs;
4992 	struct ixgbe_hw *hw;
4993 	struct rte_eth_dev_info dev_info;
4994 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
4995 	struct rte_eth_dev_data *dev_data = dev->data;
4996 
4997 	ixgbe_dev_info_get(dev, &dev_info);
4998 
4999 	/* check that mtu is within the allowed range */
5000 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
5001 		return -EINVAL;
5002 
5003 	/* If device is started, refuse mtu that requires the support of
5004 	 * scattered packets when this feature has not been enabled before.
5005 	 */
5006 	if (dev_data->dev_started && !dev_data->scattered_rx &&
5007 	    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
5008 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
5009 		PMD_INIT_LOG(ERR, "Stop port first.");
5010 		return -EINVAL;
5011 	}
5012 
5013 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5014 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5015 
5016 	/* switch to jumbo mode if needed */
5017 	if (frame_size > ETHER_MAX_LEN) {
5018 		dev->data->dev_conf.rxmode.offloads |=
5019 			DEV_RX_OFFLOAD_JUMBO_FRAME;
5020 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
5021 	} else {
5022 		dev->data->dev_conf.rxmode.offloads &=
5023 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
5024 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
5025 	}
5026 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5027 
5028 	/* update max frame size */
5029 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
5030 
5031 	maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
5032 	maxfrs &= 0x0000FFFF;
5033 	maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
5034 	IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
5035 
5036 	return 0;
5037 }
5038 
5039 /*
5040  * Virtual Function operations
5041  */
5042 static void
5043 ixgbevf_intr_disable(struct rte_eth_dev *dev)
5044 {
5045 	struct ixgbe_interrupt *intr =
5046 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5047 	struct ixgbe_hw *hw =
5048 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5049 
5050 	PMD_INIT_FUNC_TRACE();
5051 
5052 	/* Clear interrupt mask to stop from interrupts being generated */
5053 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
5054 
5055 	IXGBE_WRITE_FLUSH(hw);
5056 
5057 	/* Clear mask value. */
5058 	intr->mask = 0;
5059 }
5060 
5061 static void
5062 ixgbevf_intr_enable(struct rte_eth_dev *dev)
5063 {
5064 	struct ixgbe_interrupt *intr =
5065 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5066 	struct ixgbe_hw *hw =
5067 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5068 
5069 	PMD_INIT_FUNC_TRACE();
5070 
5071 	/* VF enable interrupt autoclean */
5072 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
5073 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
5074 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
5075 
5076 	IXGBE_WRITE_FLUSH(hw);
5077 
5078 	/* Save IXGBE_VTEIMS value to mask. */
5079 	intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
5080 }
5081 
5082 static int
5083 ixgbevf_dev_configure(struct rte_eth_dev *dev)
5084 {
5085 	struct rte_eth_conf *conf = &dev->data->dev_conf;
5086 	struct ixgbe_adapter *adapter = dev->data->dev_private;
5087 
5088 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
5089 		     dev->data->port_id);
5090 
5091 	/*
5092 	 * VF has no ability to enable/disable HW CRC
5093 	 * Keep the persistent behavior the same as Host PF
5094 	 */
5095 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
5096 	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
5097 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
5098 		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
5099 	}
5100 #else
5101 	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
5102 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5103 		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
5104 	}
5105 #endif
5106 
5107 	/*
5108 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5109 	 * allocation or vector Rx preconditions we will reset it.
5110 	 */
5111 	adapter->rx_bulk_alloc_allowed = true;
5112 	adapter->rx_vec_allowed = true;
5113 
5114 	return 0;
5115 }
5116 
5117 static int
5118 ixgbevf_dev_start(struct rte_eth_dev *dev)
5119 {
5120 	struct ixgbe_hw *hw =
5121 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5122 	uint32_t intr_vector = 0;
5123 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5124 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5125 
5126 	int err, mask = 0;
5127 
5128 	PMD_INIT_FUNC_TRACE();
5129 
5130 	/* Stop the link setup handler before resetting the HW. */
5131 	rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
5132 
5133 	err = hw->mac.ops.reset_hw(hw);
5134 	if (err) {
5135 		PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
5136 		return err;
5137 	}
5138 	hw->mac.get_link_status = true;
5139 
5140 	/* negotiate mailbox API version to use with the PF. */
5141 	ixgbevf_negotiate_api(hw);
5142 
5143 	ixgbevf_dev_tx_init(dev);
5144 
5145 	/* This can fail when allocating mbufs for descriptor rings */
5146 	err = ixgbevf_dev_rx_init(dev);
5147 	if (err) {
5148 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5149 		ixgbe_dev_clear_queues(dev);
5150 		return err;
5151 	}
5152 
5153 	/* Set vfta */
5154 	ixgbevf_set_vfta_all(dev, 1);
5155 
5156 	/* Set HW strip */
5157 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5158 		ETH_VLAN_EXTEND_MASK;
5159 	err = ixgbevf_vlan_offload_config(dev, mask);
5160 	if (err) {
5161 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
5162 		ixgbe_dev_clear_queues(dev);
5163 		return err;
5164 	}
5165 
5166 	ixgbevf_dev_rxtx_start(dev);
5167 
5168 	/* check and configure queue intr-vector mapping */
5169 	if (rte_intr_cap_multiple(intr_handle) &&
5170 	    dev->data->dev_conf.intr_conf.rxq) {
5171 		/* According to datasheet, only vector 0/1/2 can be used,
5172 		 * now only one vector is used for Rx queue
5173 		 */
5174 		intr_vector = 1;
5175 		if (rte_intr_efd_enable(intr_handle, intr_vector))
5176 			return -1;
5177 	}
5178 
5179 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5180 		intr_handle->intr_vec =
5181 			rte_zmalloc("intr_vec",
5182 				    dev->data->nb_rx_queues * sizeof(int), 0);
5183 		if (intr_handle->intr_vec == NULL) {
5184 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
5185 				     " intr_vec", dev->data->nb_rx_queues);
5186 			return -ENOMEM;
5187 		}
5188 	}
5189 	ixgbevf_configure_msix(dev);
5190 
5191 	/* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5192 	 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5193 	 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5194 	 * is not cleared, it will fail when following rte_intr_enable( ) tries
5195 	 * to map Rx queue interrupt to other VFIO vectors.
5196 	 * So clear uio/vfio intr/evevnfd first to avoid failure.
5197 	 */
5198 	rte_intr_disable(intr_handle);
5199 
5200 	rte_intr_enable(intr_handle);
5201 
5202 	/* Re-enable interrupt for VF */
5203 	ixgbevf_intr_enable(dev);
5204 
5205 	/*
5206 	 * Update link status right before return, because it may
5207 	 * start link configuration process in a separate thread.
5208 	 */
5209 	ixgbevf_dev_link_update(dev, 0);
5210 
5211 	return 0;
5212 }
5213 
5214 static void
5215 ixgbevf_dev_stop(struct rte_eth_dev *dev)
5216 {
5217 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5218 	struct ixgbe_adapter *adapter = dev->data->dev_private;
5219 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5220 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5221 
5222 	PMD_INIT_FUNC_TRACE();
5223 
5224 	rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
5225 
5226 	ixgbevf_intr_disable(dev);
5227 
5228 	hw->adapter_stopped = 1;
5229 	ixgbe_stop_adapter(hw);
5230 
5231 	/*
5232 	  * Clear what we set, but we still keep shadow_vfta to
5233 	  * restore after device starts
5234 	  */
5235 	ixgbevf_set_vfta_all(dev, 0);
5236 
5237 	/* Clear stored conf */
5238 	dev->data->scattered_rx = 0;
5239 
5240 	ixgbe_dev_clear_queues(dev);
5241 
5242 	/* Clean datapath event and queue/vec mapping */
5243 	rte_intr_efd_disable(intr_handle);
5244 	if (intr_handle->intr_vec != NULL) {
5245 		rte_free(intr_handle->intr_vec);
5246 		intr_handle->intr_vec = NULL;
5247 	}
5248 
5249 	adapter->rss_reta_updated = 0;
5250 }
5251 
5252 static void
5253 ixgbevf_dev_close(struct rte_eth_dev *dev)
5254 {
5255 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5256 
5257 	PMD_INIT_FUNC_TRACE();
5258 
5259 	ixgbe_reset_hw(hw);
5260 
5261 	ixgbevf_dev_stop(dev);
5262 
5263 	ixgbe_dev_free_queues(dev);
5264 
5265 	/**
5266 	 * Remove the VF MAC address ro ensure
5267 	 * that the VF traffic goes to the PF
5268 	 * after stop, close and detach of the VF
5269 	 **/
5270 	ixgbevf_remove_mac_addr(dev, 0);
5271 }
5272 
5273 /*
5274  * Reset VF device
5275  */
5276 static int
5277 ixgbevf_dev_reset(struct rte_eth_dev *dev)
5278 {
5279 	int ret;
5280 
5281 	ret = eth_ixgbevf_dev_uninit(dev);
5282 	if (ret)
5283 		return ret;
5284 
5285 	ret = eth_ixgbevf_dev_init(dev);
5286 
5287 	return ret;
5288 }
5289 
5290 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5291 {
5292 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5293 	struct ixgbe_vfta *shadow_vfta =
5294 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5295 	int i = 0, j = 0, vfta = 0, mask = 1;
5296 
5297 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5298 		vfta = shadow_vfta->vfta[i];
5299 		if (vfta) {
5300 			mask = 1;
5301 			for (j = 0; j < 32; j++) {
5302 				if (vfta & mask)
5303 					ixgbe_set_vfta(hw, (i<<5)+j, 0,
5304 						       on, false);
5305 				mask <<= 1;
5306 			}
5307 		}
5308 	}
5309 
5310 }
5311 
5312 static int
5313 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5314 {
5315 	struct ixgbe_hw *hw =
5316 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5317 	struct ixgbe_vfta *shadow_vfta =
5318 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5319 	uint32_t vid_idx = 0;
5320 	uint32_t vid_bit = 0;
5321 	int ret = 0;
5322 
5323 	PMD_INIT_FUNC_TRACE();
5324 
5325 	/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5326 	ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5327 	if (ret) {
5328 		PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5329 		return ret;
5330 	}
5331 	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5332 	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5333 
5334 	/* Save what we set and retore it after device reset */
5335 	if (on)
5336 		shadow_vfta->vfta[vid_idx] |= vid_bit;
5337 	else
5338 		shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5339 
5340 	return 0;
5341 }
5342 
5343 static void
5344 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5345 {
5346 	struct ixgbe_hw *hw =
5347 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5348 	uint32_t ctrl;
5349 
5350 	PMD_INIT_FUNC_TRACE();
5351 
5352 	if (queue >= hw->mac.max_rx_queues)
5353 		return;
5354 
5355 	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5356 	if (on)
5357 		ctrl |= IXGBE_RXDCTL_VME;
5358 	else
5359 		ctrl &= ~IXGBE_RXDCTL_VME;
5360 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5361 
5362 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5363 }
5364 
5365 static int
5366 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
5367 {
5368 	struct ixgbe_rx_queue *rxq;
5369 	uint16_t i;
5370 	int on = 0;
5371 
5372 	/* VF function only support hw strip feature, others are not support */
5373 	if (mask & ETH_VLAN_STRIP_MASK) {
5374 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
5375 			rxq = dev->data->rx_queues[i];
5376 			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
5377 			ixgbevf_vlan_strip_queue_set(dev, i, on);
5378 		}
5379 	}
5380 
5381 	return 0;
5382 }
5383 
5384 static int
5385 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5386 {
5387 	ixgbe_config_vlan_strip_on_all_queues(dev, mask);
5388 
5389 	ixgbevf_vlan_offload_config(dev, mask);
5390 
5391 	return 0;
5392 }
5393 
5394 int
5395 ixgbe_vt_check(struct ixgbe_hw *hw)
5396 {
5397 	uint32_t reg_val;
5398 
5399 	/* if Virtualization Technology is enabled */
5400 	reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5401 	if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
5402 		PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5403 		return -1;
5404 	}
5405 
5406 	return 0;
5407 }
5408 
5409 static uint32_t
5410 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
5411 {
5412 	uint32_t vector = 0;
5413 
5414 	switch (hw->mac.mc_filter_type) {
5415 	case 0:   /* use bits [47:36] of the address */
5416 		vector = ((uc_addr->addr_bytes[4] >> 4) |
5417 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
5418 		break;
5419 	case 1:   /* use bits [46:35] of the address */
5420 		vector = ((uc_addr->addr_bytes[4] >> 3) |
5421 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
5422 		break;
5423 	case 2:   /* use bits [45:34] of the address */
5424 		vector = ((uc_addr->addr_bytes[4] >> 2) |
5425 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
5426 		break;
5427 	case 3:   /* use bits [43:32] of the address */
5428 		vector = ((uc_addr->addr_bytes[4]) |
5429 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
5430 		break;
5431 	default:  /* Invalid mc_filter_type */
5432 		break;
5433 	}
5434 
5435 	/* vector can only be 12-bits or boundary will be exceeded */
5436 	vector &= 0xFFF;
5437 	return vector;
5438 }
5439 
5440 static int
5441 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
5442 			uint8_t on)
5443 {
5444 	uint32_t vector;
5445 	uint32_t uta_idx;
5446 	uint32_t reg_val;
5447 	uint32_t uta_shift;
5448 	uint32_t rc;
5449 	const uint32_t ixgbe_uta_idx_mask = 0x7F;
5450 	const uint32_t ixgbe_uta_bit_shift = 5;
5451 	const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5452 	const uint32_t bit1 = 0x1;
5453 
5454 	struct ixgbe_hw *hw =
5455 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5456 	struct ixgbe_uta_info *uta_info =
5457 		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5458 
5459 	/* The UTA table only exists on 82599 hardware and newer */
5460 	if (hw->mac.type < ixgbe_mac_82599EB)
5461 		return -ENOTSUP;
5462 
5463 	vector = ixgbe_uta_vector(hw, mac_addr);
5464 	uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5465 	uta_shift = vector & ixgbe_uta_bit_mask;
5466 
5467 	rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5468 	if (rc == on)
5469 		return 0;
5470 
5471 	reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5472 	if (on) {
5473 		uta_info->uta_in_use++;
5474 		reg_val |= (bit1 << uta_shift);
5475 		uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5476 	} else {
5477 		uta_info->uta_in_use--;
5478 		reg_val &= ~(bit1 << uta_shift);
5479 		uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5480 	}
5481 
5482 	IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5483 
5484 	if (uta_info->uta_in_use > 0)
5485 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5486 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5487 	else
5488 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5489 
5490 	return 0;
5491 }
5492 
5493 static int
5494 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5495 {
5496 	int i;
5497 	struct ixgbe_hw *hw =
5498 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5499 	struct ixgbe_uta_info *uta_info =
5500 		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5501 
5502 	/* The UTA table only exists on 82599 hardware and newer */
5503 	if (hw->mac.type < ixgbe_mac_82599EB)
5504 		return -ENOTSUP;
5505 
5506 	if (on) {
5507 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5508 			uta_info->uta_shadow[i] = ~0;
5509 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5510 		}
5511 	} else {
5512 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5513 			uta_info->uta_shadow[i] = 0;
5514 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5515 		}
5516 	}
5517 	return 0;
5518 
5519 }
5520 
5521 uint32_t
5522 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5523 {
5524 	uint32_t new_val = orig_val;
5525 
5526 	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5527 		new_val |= IXGBE_VMOLR_AUPE;
5528 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5529 		new_val |= IXGBE_VMOLR_ROMPE;
5530 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5531 		new_val |= IXGBE_VMOLR_ROPE;
5532 	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5533 		new_val |= IXGBE_VMOLR_BAM;
5534 	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5535 		new_val |= IXGBE_VMOLR_MPE;
5536 
5537 	return new_val;
5538 }
5539 
5540 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5541 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5542 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5543 #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5544 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5545 	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5546 	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5547 
5548 static int
5549 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5550 		      struct rte_eth_mirror_conf *mirror_conf,
5551 		      uint8_t rule_id, uint8_t on)
5552 {
5553 	uint32_t mr_ctl, vlvf;
5554 	uint32_t mp_lsb = 0;
5555 	uint32_t mv_msb = 0;
5556 	uint32_t mv_lsb = 0;
5557 	uint32_t mp_msb = 0;
5558 	uint8_t i = 0;
5559 	int reg_index = 0;
5560 	uint64_t vlan_mask = 0;
5561 
5562 	const uint8_t pool_mask_offset = 32;
5563 	const uint8_t vlan_mask_offset = 32;
5564 	const uint8_t dst_pool_offset = 8;
5565 	const uint8_t rule_mr_offset  = 4;
5566 	const uint8_t mirror_rule_mask = 0x0F;
5567 
5568 	struct ixgbe_mirror_info *mr_info =
5569 			(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5570 	struct ixgbe_hw *hw =
5571 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5572 	uint8_t mirror_type = 0;
5573 
5574 	if (ixgbe_vt_check(hw) < 0)
5575 		return -ENOTSUP;
5576 
5577 	if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5578 		return -EINVAL;
5579 
5580 	if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5581 		PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5582 			    mirror_conf->rule_type);
5583 		return -EINVAL;
5584 	}
5585 
5586 	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5587 		mirror_type |= IXGBE_MRCTL_VLME;
5588 		/* Check if vlan id is valid and find conresponding VLAN ID
5589 		 * index in VLVF
5590 		 */
5591 		for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5592 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5593 				/* search vlan id related pool vlan filter
5594 				 * index
5595 				 */
5596 				reg_index = ixgbe_find_vlvf_slot(
5597 						hw,
5598 						mirror_conf->vlan.vlan_id[i],
5599 						false);
5600 				if (reg_index < 0)
5601 					return -EINVAL;
5602 				vlvf = IXGBE_READ_REG(hw,
5603 						      IXGBE_VLVF(reg_index));
5604 				if ((vlvf & IXGBE_VLVF_VIEN) &&
5605 				    ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5606 				      mirror_conf->vlan.vlan_id[i]))
5607 					vlan_mask |= (1ULL << reg_index);
5608 				else
5609 					return -EINVAL;
5610 			}
5611 		}
5612 
5613 		if (on) {
5614 			mv_lsb = vlan_mask & 0xFFFFFFFF;
5615 			mv_msb = vlan_mask >> vlan_mask_offset;
5616 
5617 			mr_info->mr_conf[rule_id].vlan.vlan_mask =
5618 						mirror_conf->vlan.vlan_mask;
5619 			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5620 				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5621 					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5622 						mirror_conf->vlan.vlan_id[i];
5623 			}
5624 		} else {
5625 			mv_lsb = 0;
5626 			mv_msb = 0;
5627 			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5628 			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5629 				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5630 		}
5631 	}
5632 
5633 	/**
5634 	 * if enable pool mirror, write related pool mask register,if disable
5635 	 * pool mirror, clear PFMRVM register
5636 	 */
5637 	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5638 		mirror_type |= IXGBE_MRCTL_VPME;
5639 		if (on) {
5640 			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5641 			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5642 			mr_info->mr_conf[rule_id].pool_mask =
5643 					mirror_conf->pool_mask;
5644 
5645 		} else {
5646 			mp_lsb = 0;
5647 			mp_msb = 0;
5648 			mr_info->mr_conf[rule_id].pool_mask = 0;
5649 		}
5650 	}
5651 	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5652 		mirror_type |= IXGBE_MRCTL_UPME;
5653 	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5654 		mirror_type |= IXGBE_MRCTL_DPME;
5655 
5656 	/* read  mirror control register and recalculate it */
5657 	mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5658 
5659 	if (on) {
5660 		mr_ctl |= mirror_type;
5661 		mr_ctl &= mirror_rule_mask;
5662 		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
5663 	} else {
5664 		mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
5665 	}
5666 
5667 	mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5668 	mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5669 
5670 	/* write mirrror control  register */
5671 	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5672 
5673 	/* write pool mirrror control  register */
5674 	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5675 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5676 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5677 				mp_msb);
5678 	}
5679 	/* write VLAN mirrror control  register */
5680 	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5681 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5682 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5683 				mv_msb);
5684 	}
5685 
5686 	return 0;
5687 }
5688 
5689 static int
5690 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5691 {
5692 	int mr_ctl = 0;
5693 	uint32_t lsb_val = 0;
5694 	uint32_t msb_val = 0;
5695 	const uint8_t rule_mr_offset = 4;
5696 
5697 	struct ixgbe_hw *hw =
5698 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5699 	struct ixgbe_mirror_info *mr_info =
5700 		(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5701 
5702 	if (ixgbe_vt_check(hw) < 0)
5703 		return -ENOTSUP;
5704 
5705 	if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5706 		return -EINVAL;
5707 
5708 	memset(&mr_info->mr_conf[rule_id], 0,
5709 	       sizeof(struct rte_eth_mirror_conf));
5710 
5711 	/* clear PFVMCTL register */
5712 	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5713 
5714 	/* clear pool mask register */
5715 	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5716 	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5717 
5718 	/* clear vlan mask register */
5719 	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5720 	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5721 
5722 	return 0;
5723 }
5724 
5725 static int
5726 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5727 {
5728 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5729 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5730 	struct ixgbe_interrupt *intr =
5731 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5732 	struct ixgbe_hw *hw =
5733 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5734 	uint32_t vec = IXGBE_MISC_VEC_ID;
5735 
5736 	if (rte_intr_allow_others(intr_handle))
5737 		vec = IXGBE_RX_VEC_START;
5738 	intr->mask |= (1 << vec);
5739 	RTE_SET_USED(queue_id);
5740 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5741 
5742 	rte_intr_enable(intr_handle);
5743 
5744 	return 0;
5745 }
5746 
5747 static int
5748 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5749 {
5750 	struct ixgbe_interrupt *intr =
5751 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5752 	struct ixgbe_hw *hw =
5753 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5754 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5755 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5756 	uint32_t vec = IXGBE_MISC_VEC_ID;
5757 
5758 	if (rte_intr_allow_others(intr_handle))
5759 		vec = IXGBE_RX_VEC_START;
5760 	intr->mask &= ~(1 << vec);
5761 	RTE_SET_USED(queue_id);
5762 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5763 
5764 	return 0;
5765 }
5766 
5767 static int
5768 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5769 {
5770 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5771 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5772 	uint32_t mask;
5773 	struct ixgbe_hw *hw =
5774 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5775 	struct ixgbe_interrupt *intr =
5776 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5777 
5778 	if (queue_id < 16) {
5779 		ixgbe_disable_intr(hw);
5780 		intr->mask |= (1 << queue_id);
5781 		ixgbe_enable_intr(dev);
5782 	} else if (queue_id < 32) {
5783 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5784 		mask &= (1 << queue_id);
5785 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5786 	} else if (queue_id < 64) {
5787 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5788 		mask &= (1 << (queue_id - 32));
5789 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5790 	}
5791 	rte_intr_enable(intr_handle);
5792 
5793 	return 0;
5794 }
5795 
5796 static int
5797 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5798 {
5799 	uint32_t mask;
5800 	struct ixgbe_hw *hw =
5801 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5802 	struct ixgbe_interrupt *intr =
5803 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5804 
5805 	if (queue_id < 16) {
5806 		ixgbe_disable_intr(hw);
5807 		intr->mask &= ~(1 << queue_id);
5808 		ixgbe_enable_intr(dev);
5809 	} else if (queue_id < 32) {
5810 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5811 		mask &= ~(1 << queue_id);
5812 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5813 	} else if (queue_id < 64) {
5814 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5815 		mask &= ~(1 << (queue_id - 32));
5816 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5817 	}
5818 
5819 	return 0;
5820 }
5821 
5822 static void
5823 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5824 		     uint8_t queue, uint8_t msix_vector)
5825 {
5826 	uint32_t tmp, idx;
5827 
5828 	if (direction == -1) {
5829 		/* other causes */
5830 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5831 		tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
5832 		tmp &= ~0xFF;
5833 		tmp |= msix_vector;
5834 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
5835 	} else {
5836 		/* rx or tx cause */
5837 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5838 		idx = ((16 * (queue & 1)) + (8 * direction));
5839 		tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
5840 		tmp &= ~(0xFF << idx);
5841 		tmp |= (msix_vector << idx);
5842 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
5843 	}
5844 }
5845 
5846 /**
5847  * set the IVAR registers, mapping interrupt causes to vectors
5848  * @param hw
5849  *  pointer to ixgbe_hw struct
5850  * @direction
5851  *  0 for Rx, 1 for Tx, -1 for other causes
5852  * @queue
5853  *  queue to map the corresponding interrupt to
5854  * @msix_vector
5855  *  the vector to map to the corresponding queue
5856  */
5857 static void
5858 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
5859 		   uint8_t queue, uint8_t msix_vector)
5860 {
5861 	uint32_t tmp, idx;
5862 
5863 	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5864 	if (hw->mac.type == ixgbe_mac_82598EB) {
5865 		if (direction == -1)
5866 			direction = 0;
5867 		idx = (((direction * 64) + queue) >> 2) & 0x1F;
5868 		tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
5869 		tmp &= ~(0xFF << (8 * (queue & 0x3)));
5870 		tmp |= (msix_vector << (8 * (queue & 0x3)));
5871 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
5872 	} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
5873 			(hw->mac.type == ixgbe_mac_X540) ||
5874 			(hw->mac.type == ixgbe_mac_X550)) {
5875 		if (direction == -1) {
5876 			/* other causes */
5877 			idx = ((queue & 1) * 8);
5878 			tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5879 			tmp &= ~(0xFF << idx);
5880 			tmp |= (msix_vector << idx);
5881 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
5882 		} else {
5883 			/* rx or tx causes */
5884 			idx = ((16 * (queue & 1)) + (8 * direction));
5885 			tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
5886 			tmp &= ~(0xFF << idx);
5887 			tmp |= (msix_vector << idx);
5888 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
5889 		}
5890 	}
5891 }
5892 
5893 static void
5894 ixgbevf_configure_msix(struct rte_eth_dev *dev)
5895 {
5896 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5897 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5898 	struct ixgbe_hw *hw =
5899 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5900 	uint32_t q_idx;
5901 	uint32_t vector_idx = IXGBE_MISC_VEC_ID;
5902 	uint32_t base = IXGBE_MISC_VEC_ID;
5903 
5904 	/* Configure VF other cause ivar */
5905 	ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
5906 
5907 	/* won't configure msix register if no mapping is done
5908 	 * between intr vector and event fd.
5909 	 */
5910 	if (!rte_intr_dp_is_en(intr_handle))
5911 		return;
5912 
5913 	if (rte_intr_allow_others(intr_handle)) {
5914 		base = IXGBE_RX_VEC_START;
5915 		vector_idx = IXGBE_RX_VEC_START;
5916 	}
5917 
5918 	/* Configure all RX queues of VF */
5919 	for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
5920 		/* Force all queue use vector 0,
5921 		 * as IXGBE_VF_MAXMSIVECOTR = 1
5922 		 */
5923 		ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
5924 		intr_handle->intr_vec[q_idx] = vector_idx;
5925 		if (vector_idx < base + intr_handle->nb_efd - 1)
5926 			vector_idx++;
5927 	}
5928 
5929 	/* As RX queue setting above show, all queues use the vector 0.
5930 	 * Set only the ITR value of IXGBE_MISC_VEC_ID.
5931 	 */
5932 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
5933 			IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
5934 			| IXGBE_EITR_CNT_WDIS);
5935 }
5936 
5937 /**
5938  * Sets up the hardware to properly generate MSI-X interrupts
5939  * @hw
5940  *  board private structure
5941  */
5942 static void
5943 ixgbe_configure_msix(struct rte_eth_dev *dev)
5944 {
5945 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5946 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5947 	struct ixgbe_hw *hw =
5948 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5949 	uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
5950 	uint32_t vec = IXGBE_MISC_VEC_ID;
5951 	uint32_t mask;
5952 	uint32_t gpie;
5953 
5954 	/* won't configure msix register if no mapping is done
5955 	 * between intr vector and event fd
5956 	 * but if misx has been enabled already, need to configure
5957 	 * auto clean, auto mask and throttling.
5958 	 */
5959 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5960 	if (!rte_intr_dp_is_en(intr_handle) &&
5961 	    !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
5962 		return;
5963 
5964 	if (rte_intr_allow_others(intr_handle))
5965 		vec = base = IXGBE_RX_VEC_START;
5966 
5967 	/* setup GPIE for MSI-x mode */
5968 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
5969 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
5970 		IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
5971 	/* auto clearing and auto setting corresponding bits in EIMS
5972 	 * when MSI-X interrupt is triggered
5973 	 */
5974 	if (hw->mac.type == ixgbe_mac_82598EB) {
5975 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
5976 	} else {
5977 		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5978 		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5979 	}
5980 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
5981 
5982 	/* Populate the IVAR table and set the ITR values to the
5983 	 * corresponding register.
5984 	 */
5985 	if (rte_intr_dp_is_en(intr_handle)) {
5986 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
5987 			queue_id++) {
5988 			/* by default, 1:1 mapping */
5989 			ixgbe_set_ivar_map(hw, 0, queue_id, vec);
5990 			intr_handle->intr_vec[queue_id] = vec;
5991 			if (vec < base + intr_handle->nb_efd - 1)
5992 				vec++;
5993 		}
5994 
5995 		switch (hw->mac.type) {
5996 		case ixgbe_mac_82598EB:
5997 			ixgbe_set_ivar_map(hw, -1,
5998 					   IXGBE_IVAR_OTHER_CAUSES_INDEX,
5999 					   IXGBE_MISC_VEC_ID);
6000 			break;
6001 		case ixgbe_mac_82599EB:
6002 		case ixgbe_mac_X540:
6003 		case ixgbe_mac_X550:
6004 			ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
6005 			break;
6006 		default:
6007 			break;
6008 		}
6009 	}
6010 	IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
6011 			IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6012 			| IXGBE_EITR_CNT_WDIS);
6013 
6014 	/* set up to autoclear timer, and the vectors */
6015 	mask = IXGBE_EIMS_ENABLE_MASK;
6016 	mask &= ~(IXGBE_EIMS_OTHER |
6017 		  IXGBE_EIMS_MAILBOX |
6018 		  IXGBE_EIMS_LSC);
6019 
6020 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
6021 }
6022 
6023 int
6024 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
6025 			   uint16_t queue_idx, uint16_t tx_rate)
6026 {
6027 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6028 	struct rte_eth_rxmode *rxmode;
6029 	uint32_t rf_dec, rf_int;
6030 	uint32_t bcnrc_val;
6031 	uint16_t link_speed = dev->data->dev_link.link_speed;
6032 
6033 	if (queue_idx >= hw->mac.max_tx_queues)
6034 		return -EINVAL;
6035 
6036 	if (tx_rate != 0) {
6037 		/* Calculate the rate factor values to set */
6038 		rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
6039 		rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
6040 		rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
6041 
6042 		bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
6043 		bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
6044 				IXGBE_RTTBCNRC_RF_INT_MASK_M);
6045 		bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
6046 	} else {
6047 		bcnrc_val = 0;
6048 	}
6049 
6050 	rxmode = &dev->data->dev_conf.rxmode;
6051 	/*
6052 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6053 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
6054 	 * set as 0x4.
6055 	 */
6056 	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
6057 	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
6058 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6059 			IXGBE_MMW_SIZE_JUMBO_FRAME);
6060 	else
6061 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6062 			IXGBE_MMW_SIZE_DEFAULT);
6063 
6064 	/* Set RTTBCNRC of queue X */
6065 	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
6066 	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
6067 	IXGBE_WRITE_FLUSH(hw);
6068 
6069 	return 0;
6070 }
6071 
6072 static int
6073 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
6074 		     __attribute__((unused)) uint32_t index,
6075 		     __attribute__((unused)) uint32_t pool)
6076 {
6077 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6078 	int diag;
6079 
6080 	/*
6081 	 * On a 82599 VF, adding again the same MAC addr is not an idempotent
6082 	 * operation. Trap this case to avoid exhausting the [very limited]
6083 	 * set of PF resources used to store VF MAC addresses.
6084 	 */
6085 	if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
6086 		return -1;
6087 	diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6088 	if (diag != 0)
6089 		PMD_DRV_LOG(ERR, "Unable to add MAC address "
6090 			    "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
6091 			    mac_addr->addr_bytes[0],
6092 			    mac_addr->addr_bytes[1],
6093 			    mac_addr->addr_bytes[2],
6094 			    mac_addr->addr_bytes[3],
6095 			    mac_addr->addr_bytes[4],
6096 			    mac_addr->addr_bytes[5],
6097 			    diag);
6098 	return diag;
6099 }
6100 
6101 static void
6102 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
6103 {
6104 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6105 	struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
6106 	struct ether_addr *mac_addr;
6107 	uint32_t i;
6108 	int diag;
6109 
6110 	/*
6111 	 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
6112 	 * not support the deletion of a given MAC address.
6113 	 * Instead, it imposes to delete all MAC addresses, then to add again
6114 	 * all MAC addresses with the exception of the one to be deleted.
6115 	 */
6116 	(void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
6117 
6118 	/*
6119 	 * Add again all MAC addresses, with the exception of the deleted one
6120 	 * and of the permanent MAC address.
6121 	 */
6122 	for (i = 0, mac_addr = dev->data->mac_addrs;
6123 	     i < hw->mac.num_rar_entries; i++, mac_addr++) {
6124 		/* Skip the deleted MAC address */
6125 		if (i == index)
6126 			continue;
6127 		/* Skip NULL MAC addresses */
6128 		if (is_zero_ether_addr(mac_addr))
6129 			continue;
6130 		/* Skip the permanent MAC address */
6131 		if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
6132 			continue;
6133 		diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6134 		if (diag != 0)
6135 			PMD_DRV_LOG(ERR,
6136 				    "Adding again MAC address "
6137 				    "%02x:%02x:%02x:%02x:%02x:%02x failed "
6138 				    "diag=%d",
6139 				    mac_addr->addr_bytes[0],
6140 				    mac_addr->addr_bytes[1],
6141 				    mac_addr->addr_bytes[2],
6142 				    mac_addr->addr_bytes[3],
6143 				    mac_addr->addr_bytes[4],
6144 				    mac_addr->addr_bytes[5],
6145 				    diag);
6146 	}
6147 }
6148 
6149 static int
6150 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
6151 {
6152 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6153 
6154 	hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6155 
6156 	return 0;
6157 }
6158 
6159 int
6160 ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6161 			struct rte_eth_syn_filter *filter,
6162 			bool add)
6163 {
6164 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6165 	struct ixgbe_filter_info *filter_info =
6166 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6167 	uint32_t syn_info;
6168 	uint32_t synqf;
6169 
6170 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6171 		return -EINVAL;
6172 
6173 	syn_info = filter_info->syn_info;
6174 
6175 	if (add) {
6176 		if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6177 			return -EINVAL;
6178 		synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6179 			IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6180 
6181 		if (filter->hig_pri)
6182 			synqf |= IXGBE_SYN_FILTER_SYNQFP;
6183 		else
6184 			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6185 	} else {
6186 		synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6187 		if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6188 			return -ENOENT;
6189 		synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6190 	}
6191 
6192 	filter_info->syn_info = synqf;
6193 	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6194 	IXGBE_WRITE_FLUSH(hw);
6195 	return 0;
6196 }
6197 
6198 static int
6199 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
6200 			struct rte_eth_syn_filter *filter)
6201 {
6202 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6203 	uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
6204 
6205 	if (synqf & IXGBE_SYN_FILTER_ENABLE) {
6206 		filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
6207 		filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
6208 		return 0;
6209 	}
6210 	return -ENOENT;
6211 }
6212 
6213 static int
6214 ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
6215 			enum rte_filter_op filter_op,
6216 			void *arg)
6217 {
6218 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6219 	int ret;
6220 
6221 	MAC_TYPE_FILTER_SUP(hw->mac.type);
6222 
6223 	if (filter_op == RTE_ETH_FILTER_NOP)
6224 		return 0;
6225 
6226 	if (arg == NULL) {
6227 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
6228 			    filter_op);
6229 		return -EINVAL;
6230 	}
6231 
6232 	switch (filter_op) {
6233 	case RTE_ETH_FILTER_ADD:
6234 		ret = ixgbe_syn_filter_set(dev,
6235 				(struct rte_eth_syn_filter *)arg,
6236 				TRUE);
6237 		break;
6238 	case RTE_ETH_FILTER_DELETE:
6239 		ret = ixgbe_syn_filter_set(dev,
6240 				(struct rte_eth_syn_filter *)arg,
6241 				FALSE);
6242 		break;
6243 	case RTE_ETH_FILTER_GET:
6244 		ret = ixgbe_syn_filter_get(dev,
6245 				(struct rte_eth_syn_filter *)arg);
6246 		break;
6247 	default:
6248 		PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
6249 		ret = -EINVAL;
6250 		break;
6251 	}
6252 
6253 	return ret;
6254 }
6255 
6256 
6257 static inline enum ixgbe_5tuple_protocol
6258 convert_protocol_type(uint8_t protocol_value)
6259 {
6260 	if (protocol_value == IPPROTO_TCP)
6261 		return IXGBE_FILTER_PROTOCOL_TCP;
6262 	else if (protocol_value == IPPROTO_UDP)
6263 		return IXGBE_FILTER_PROTOCOL_UDP;
6264 	else if (protocol_value == IPPROTO_SCTP)
6265 		return IXGBE_FILTER_PROTOCOL_SCTP;
6266 	else
6267 		return IXGBE_FILTER_PROTOCOL_NONE;
6268 }
6269 
6270 /* inject a 5-tuple filter to HW */
6271 static inline void
6272 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6273 			   struct ixgbe_5tuple_filter *filter)
6274 {
6275 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6276 	int i;
6277 	uint32_t ftqf, sdpqf;
6278 	uint32_t l34timir = 0;
6279 	uint8_t mask = 0xff;
6280 
6281 	i = filter->index;
6282 
6283 	sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6284 				IXGBE_SDPQF_DSTPORT_SHIFT);
6285 	sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6286 
6287 	ftqf = (uint32_t)(filter->filter_info.proto &
6288 		IXGBE_FTQF_PROTOCOL_MASK);
6289 	ftqf |= (uint32_t)((filter->filter_info.priority &
6290 		IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6291 	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6292 		mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6293 	if (filter->filter_info.dst_ip_mask == 0)
6294 		mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6295 	if (filter->filter_info.src_port_mask == 0)
6296 		mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6297 	if (filter->filter_info.dst_port_mask == 0)
6298 		mask &= IXGBE_FTQF_DEST_PORT_MASK;
6299 	if (filter->filter_info.proto_mask == 0)
6300 		mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6301 	ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6302 	ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6303 	ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6304 
6305 	IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6306 	IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6307 	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6308 	IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6309 
6310 	l34timir |= IXGBE_L34T_IMIR_RESERVE;
6311 	l34timir |= (uint32_t)(filter->queue <<
6312 				IXGBE_L34T_IMIR_QUEUE_SHIFT);
6313 	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
6314 }
6315 
6316 /*
6317  * add a 5tuple filter
6318  *
6319  * @param
6320  * dev: Pointer to struct rte_eth_dev.
6321  * index: the index the filter allocates.
6322  * filter: ponter to the filter that will be added.
6323  * rx_queue: the queue id the filter assigned to.
6324  *
6325  * @return
6326  *    - On success, zero.
6327  *    - On failure, a negative value.
6328  */
6329 static int
6330 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
6331 			struct ixgbe_5tuple_filter *filter)
6332 {
6333 	struct ixgbe_filter_info *filter_info =
6334 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6335 	int i, idx, shift;
6336 
6337 	/*
6338 	 * look for an unused 5tuple filter index,
6339 	 * and insert the filter to list.
6340 	 */
6341 	for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
6342 		idx = i / (sizeof(uint32_t) * NBBY);
6343 		shift = i % (sizeof(uint32_t) * NBBY);
6344 		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
6345 			filter_info->fivetuple_mask[idx] |= 1 << shift;
6346 			filter->index = i;
6347 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
6348 					  filter,
6349 					  entries);
6350 			break;
6351 		}
6352 	}
6353 	if (i >= IXGBE_MAX_FTQF_FILTERS) {
6354 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
6355 		return -ENOSYS;
6356 	}
6357 
6358 	ixgbe_inject_5tuple_filter(dev, filter);
6359 
6360 	return 0;
6361 }
6362 
6363 /*
6364  * remove a 5tuple filter
6365  *
6366  * @param
6367  * dev: Pointer to struct rte_eth_dev.
6368  * filter: the pointer of the filter will be removed.
6369  */
6370 static void
6371 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6372 			struct ixgbe_5tuple_filter *filter)
6373 {
6374 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6375 	struct ixgbe_filter_info *filter_info =
6376 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6377 	uint16_t index = filter->index;
6378 
6379 	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6380 				~(1 << (index % (sizeof(uint32_t) * NBBY)));
6381 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6382 	rte_free(filter);
6383 
6384 	IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6385 	IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6386 	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6387 	IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6388 	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6389 }
6390 
6391 static int
6392 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6393 {
6394 	struct ixgbe_hw *hw;
6395 	uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
6396 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
6397 
6398 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6399 
6400 	if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
6401 		return -EINVAL;
6402 
6403 	/* refuse mtu that requires the support of scattered packets when this
6404 	 * feature has not been enabled before.
6405 	 */
6406 	if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
6407 	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
6408 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
6409 		return -EINVAL;
6410 
6411 	/*
6412 	 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6413 	 * request of the version 2.0 of the mailbox API.
6414 	 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6415 	 * of the mailbox API.
6416 	 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6417 	 * prior to 3.11.33 which contains the following change:
6418 	 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6419 	 */
6420 	ixgbevf_rlpml_set_vf(hw, max_frame);
6421 
6422 	/* update max frame size */
6423 	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6424 	return 0;
6425 }
6426 
6427 static inline struct ixgbe_5tuple_filter *
6428 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6429 			struct ixgbe_5tuple_filter_info *key)
6430 {
6431 	struct ixgbe_5tuple_filter *it;
6432 
6433 	TAILQ_FOREACH(it, filter_list, entries) {
6434 		if (memcmp(key, &it->filter_info,
6435 			sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6436 			return it;
6437 		}
6438 	}
6439 	return NULL;
6440 }
6441 
6442 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6443 static inline int
6444 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6445 			struct ixgbe_5tuple_filter_info *filter_info)
6446 {
6447 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6448 		filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6449 		filter->priority < IXGBE_5TUPLE_MIN_PRI)
6450 		return -EINVAL;
6451 
6452 	switch (filter->dst_ip_mask) {
6453 	case UINT32_MAX:
6454 		filter_info->dst_ip_mask = 0;
6455 		filter_info->dst_ip = filter->dst_ip;
6456 		break;
6457 	case 0:
6458 		filter_info->dst_ip_mask = 1;
6459 		break;
6460 	default:
6461 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6462 		return -EINVAL;
6463 	}
6464 
6465 	switch (filter->src_ip_mask) {
6466 	case UINT32_MAX:
6467 		filter_info->src_ip_mask = 0;
6468 		filter_info->src_ip = filter->src_ip;
6469 		break;
6470 	case 0:
6471 		filter_info->src_ip_mask = 1;
6472 		break;
6473 	default:
6474 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6475 		return -EINVAL;
6476 	}
6477 
6478 	switch (filter->dst_port_mask) {
6479 	case UINT16_MAX:
6480 		filter_info->dst_port_mask = 0;
6481 		filter_info->dst_port = filter->dst_port;
6482 		break;
6483 	case 0:
6484 		filter_info->dst_port_mask = 1;
6485 		break;
6486 	default:
6487 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6488 		return -EINVAL;
6489 	}
6490 
6491 	switch (filter->src_port_mask) {
6492 	case UINT16_MAX:
6493 		filter_info->src_port_mask = 0;
6494 		filter_info->src_port = filter->src_port;
6495 		break;
6496 	case 0:
6497 		filter_info->src_port_mask = 1;
6498 		break;
6499 	default:
6500 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
6501 		return -EINVAL;
6502 	}
6503 
6504 	switch (filter->proto_mask) {
6505 	case UINT8_MAX:
6506 		filter_info->proto_mask = 0;
6507 		filter_info->proto =
6508 			convert_protocol_type(filter->proto);
6509 		break;
6510 	case 0:
6511 		filter_info->proto_mask = 1;
6512 		break;
6513 	default:
6514 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
6515 		return -EINVAL;
6516 	}
6517 
6518 	filter_info->priority = (uint8_t)filter->priority;
6519 	return 0;
6520 }
6521 
6522 /*
6523  * add or delete a ntuple filter
6524  *
6525  * @param
6526  * dev: Pointer to struct rte_eth_dev.
6527  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6528  * add: if true, add filter, if false, remove filter
6529  *
6530  * @return
6531  *    - On success, zero.
6532  *    - On failure, a negative value.
6533  */
6534 int
6535 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6536 			struct rte_eth_ntuple_filter *ntuple_filter,
6537 			bool add)
6538 {
6539 	struct ixgbe_filter_info *filter_info =
6540 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6541 	struct ixgbe_5tuple_filter_info filter_5tuple;
6542 	struct ixgbe_5tuple_filter *filter;
6543 	int ret;
6544 
6545 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6546 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6547 		return -EINVAL;
6548 	}
6549 
6550 	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6551 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6552 	if (ret < 0)
6553 		return ret;
6554 
6555 	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6556 					 &filter_5tuple);
6557 	if (filter != NULL && add) {
6558 		PMD_DRV_LOG(ERR, "filter exists.");
6559 		return -EEXIST;
6560 	}
6561 	if (filter == NULL && !add) {
6562 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
6563 		return -ENOENT;
6564 	}
6565 
6566 	if (add) {
6567 		filter = rte_zmalloc("ixgbe_5tuple_filter",
6568 				sizeof(struct ixgbe_5tuple_filter), 0);
6569 		if (filter == NULL)
6570 			return -ENOMEM;
6571 		rte_memcpy(&filter->filter_info,
6572 				 &filter_5tuple,
6573 				 sizeof(struct ixgbe_5tuple_filter_info));
6574 		filter->queue = ntuple_filter->queue;
6575 		ret = ixgbe_add_5tuple_filter(dev, filter);
6576 		if (ret < 0) {
6577 			rte_free(filter);
6578 			return ret;
6579 		}
6580 	} else
6581 		ixgbe_remove_5tuple_filter(dev, filter);
6582 
6583 	return 0;
6584 }
6585 
6586 /*
6587  * get a ntuple filter
6588  *
6589  * @param
6590  * dev: Pointer to struct rte_eth_dev.
6591  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6592  *
6593  * @return
6594  *    - On success, zero.
6595  *    - On failure, a negative value.
6596  */
6597 static int
6598 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
6599 			struct rte_eth_ntuple_filter *ntuple_filter)
6600 {
6601 	struct ixgbe_filter_info *filter_info =
6602 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6603 	struct ixgbe_5tuple_filter_info filter_5tuple;
6604 	struct ixgbe_5tuple_filter *filter;
6605 	int ret;
6606 
6607 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6608 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6609 		return -EINVAL;
6610 	}
6611 
6612 	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6613 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6614 	if (ret < 0)
6615 		return ret;
6616 
6617 	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6618 					 &filter_5tuple);
6619 	if (filter == NULL) {
6620 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
6621 		return -ENOENT;
6622 	}
6623 	ntuple_filter->queue = filter->queue;
6624 	return 0;
6625 }
6626 
6627 /*
6628  * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6629  * @dev: pointer to rte_eth_dev structure
6630  * @filter_op:operation will be taken.
6631  * @arg: a pointer to specific structure corresponding to the filter_op
6632  *
6633  * @return
6634  *    - On success, zero.
6635  *    - On failure, a negative value.
6636  */
6637 static int
6638 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
6639 				enum rte_filter_op filter_op,
6640 				void *arg)
6641 {
6642 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6643 	int ret;
6644 
6645 	MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
6646 
6647 	if (filter_op == RTE_ETH_FILTER_NOP)
6648 		return 0;
6649 
6650 	if (arg == NULL) {
6651 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6652 			    filter_op);
6653 		return -EINVAL;
6654 	}
6655 
6656 	switch (filter_op) {
6657 	case RTE_ETH_FILTER_ADD:
6658 		ret = ixgbe_add_del_ntuple_filter(dev,
6659 			(struct rte_eth_ntuple_filter *)arg,
6660 			TRUE);
6661 		break;
6662 	case RTE_ETH_FILTER_DELETE:
6663 		ret = ixgbe_add_del_ntuple_filter(dev,
6664 			(struct rte_eth_ntuple_filter *)arg,
6665 			FALSE);
6666 		break;
6667 	case RTE_ETH_FILTER_GET:
6668 		ret = ixgbe_get_ntuple_filter(dev,
6669 			(struct rte_eth_ntuple_filter *)arg);
6670 		break;
6671 	default:
6672 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6673 		ret = -EINVAL;
6674 		break;
6675 	}
6676 	return ret;
6677 }
6678 
6679 int
6680 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6681 			struct rte_eth_ethertype_filter *filter,
6682 			bool add)
6683 {
6684 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6685 	struct ixgbe_filter_info *filter_info =
6686 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6687 	uint32_t etqf = 0;
6688 	uint32_t etqs = 0;
6689 	int ret;
6690 	struct ixgbe_ethertype_filter ethertype_filter;
6691 
6692 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6693 		return -EINVAL;
6694 
6695 	if (filter->ether_type == ETHER_TYPE_IPv4 ||
6696 		filter->ether_type == ETHER_TYPE_IPv6) {
6697 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6698 			" ethertype filter.", filter->ether_type);
6699 		return -EINVAL;
6700 	}
6701 
6702 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6703 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6704 		return -EINVAL;
6705 	}
6706 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6707 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
6708 		return -EINVAL;
6709 	}
6710 
6711 	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6712 	if (ret >= 0 && add) {
6713 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6714 			    filter->ether_type);
6715 		return -EEXIST;
6716 	}
6717 	if (ret < 0 && !add) {
6718 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6719 			    filter->ether_type);
6720 		return -ENOENT;
6721 	}
6722 
6723 	if (add) {
6724 		etqf = IXGBE_ETQF_FILTER_EN;
6725 		etqf |= (uint32_t)filter->ether_type;
6726 		etqs |= (uint32_t)((filter->queue <<
6727 				    IXGBE_ETQS_RX_QUEUE_SHIFT) &
6728 				    IXGBE_ETQS_RX_QUEUE);
6729 		etqs |= IXGBE_ETQS_QUEUE_EN;
6730 
6731 		ethertype_filter.ethertype = filter->ether_type;
6732 		ethertype_filter.etqf = etqf;
6733 		ethertype_filter.etqs = etqs;
6734 		ethertype_filter.conf = FALSE;
6735 		ret = ixgbe_ethertype_filter_insert(filter_info,
6736 						    &ethertype_filter);
6737 		if (ret < 0) {
6738 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
6739 			return -ENOSPC;
6740 		}
6741 	} else {
6742 		ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6743 		if (ret < 0)
6744 			return -ENOSYS;
6745 	}
6746 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6747 	IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6748 	IXGBE_WRITE_FLUSH(hw);
6749 
6750 	return 0;
6751 }
6752 
6753 static int
6754 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
6755 			struct rte_eth_ethertype_filter *filter)
6756 {
6757 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6758 	struct ixgbe_filter_info *filter_info =
6759 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6760 	uint32_t etqf, etqs;
6761 	int ret;
6762 
6763 	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6764 	if (ret < 0) {
6765 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6766 			    filter->ether_type);
6767 		return -ENOENT;
6768 	}
6769 
6770 	etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
6771 	if (etqf & IXGBE_ETQF_FILTER_EN) {
6772 		etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
6773 		filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
6774 		filter->flags = 0;
6775 		filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
6776 			       IXGBE_ETQS_RX_QUEUE_SHIFT;
6777 		return 0;
6778 	}
6779 	return -ENOENT;
6780 }
6781 
6782 /*
6783  * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6784  * @dev: pointer to rte_eth_dev structure
6785  * @filter_op:operation will be taken.
6786  * @arg: a pointer to specific structure corresponding to the filter_op
6787  */
6788 static int
6789 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
6790 				enum rte_filter_op filter_op,
6791 				void *arg)
6792 {
6793 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6794 	int ret;
6795 
6796 	MAC_TYPE_FILTER_SUP(hw->mac.type);
6797 
6798 	if (filter_op == RTE_ETH_FILTER_NOP)
6799 		return 0;
6800 
6801 	if (arg == NULL) {
6802 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
6803 			    filter_op);
6804 		return -EINVAL;
6805 	}
6806 
6807 	switch (filter_op) {
6808 	case RTE_ETH_FILTER_ADD:
6809 		ret = ixgbe_add_del_ethertype_filter(dev,
6810 			(struct rte_eth_ethertype_filter *)arg,
6811 			TRUE);
6812 		break;
6813 	case RTE_ETH_FILTER_DELETE:
6814 		ret = ixgbe_add_del_ethertype_filter(dev,
6815 			(struct rte_eth_ethertype_filter *)arg,
6816 			FALSE);
6817 		break;
6818 	case RTE_ETH_FILTER_GET:
6819 		ret = ixgbe_get_ethertype_filter(dev,
6820 			(struct rte_eth_ethertype_filter *)arg);
6821 		break;
6822 	default:
6823 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
6824 		ret = -EINVAL;
6825 		break;
6826 	}
6827 	return ret;
6828 }
6829 
6830 static int
6831 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
6832 		     enum rte_filter_type filter_type,
6833 		     enum rte_filter_op filter_op,
6834 		     void *arg)
6835 {
6836 	int ret = 0;
6837 
6838 	switch (filter_type) {
6839 	case RTE_ETH_FILTER_NTUPLE:
6840 		ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
6841 		break;
6842 	case RTE_ETH_FILTER_ETHERTYPE:
6843 		ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
6844 		break;
6845 	case RTE_ETH_FILTER_SYN:
6846 		ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
6847 		break;
6848 	case RTE_ETH_FILTER_FDIR:
6849 		ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
6850 		break;
6851 	case RTE_ETH_FILTER_L2_TUNNEL:
6852 		ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
6853 		break;
6854 	case RTE_ETH_FILTER_GENERIC:
6855 		if (filter_op != RTE_ETH_FILTER_GET)
6856 			return -EINVAL;
6857 		*(const void **)arg = &ixgbe_flow_ops;
6858 		break;
6859 	default:
6860 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6861 							filter_type);
6862 		ret = -EINVAL;
6863 		break;
6864 	}
6865 
6866 	return ret;
6867 }
6868 
6869 static u8 *
6870 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
6871 			u8 **mc_addr_ptr, u32 *vmdq)
6872 {
6873 	u8 *mc_addr;
6874 
6875 	*vmdq = 0;
6876 	mc_addr = *mc_addr_ptr;
6877 	*mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
6878 	return mc_addr;
6879 }
6880 
6881 static int
6882 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
6883 			  struct ether_addr *mc_addr_set,
6884 			  uint32_t nb_mc_addr)
6885 {
6886 	struct ixgbe_hw *hw;
6887 	u8 *mc_addr_list;
6888 
6889 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6890 	mc_addr_list = (u8 *)mc_addr_set;
6891 	return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6892 					 ixgbe_dev_addr_list_itr, TRUE);
6893 }
6894 
6895 static uint64_t
6896 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6897 {
6898 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6899 	uint64_t systime_cycles;
6900 
6901 	switch (hw->mac.type) {
6902 	case ixgbe_mac_X550:
6903 	case ixgbe_mac_X550EM_x:
6904 	case ixgbe_mac_X550EM_a:
6905 		/* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6906 		systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6907 		systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6908 				* NSEC_PER_SEC;
6909 		break;
6910 	default:
6911 		systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6912 		systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6913 				<< 32;
6914 	}
6915 
6916 	return systime_cycles;
6917 }
6918 
6919 static uint64_t
6920 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6921 {
6922 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6923 	uint64_t rx_tstamp_cycles;
6924 
6925 	switch (hw->mac.type) {
6926 	case ixgbe_mac_X550:
6927 	case ixgbe_mac_X550EM_x:
6928 	case ixgbe_mac_X550EM_a:
6929 		/* RXSTMPL stores ns and RXSTMPH stores seconds. */
6930 		rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6931 		rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6932 				* NSEC_PER_SEC;
6933 		break;
6934 	default:
6935 		/* RXSTMPL stores ns and RXSTMPH stores seconds. */
6936 		rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6937 		rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6938 				<< 32;
6939 	}
6940 
6941 	return rx_tstamp_cycles;
6942 }
6943 
6944 static uint64_t
6945 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6946 {
6947 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6948 	uint64_t tx_tstamp_cycles;
6949 
6950 	switch (hw->mac.type) {
6951 	case ixgbe_mac_X550:
6952 	case ixgbe_mac_X550EM_x:
6953 	case ixgbe_mac_X550EM_a:
6954 		/* TXSTMPL stores ns and TXSTMPH stores seconds. */
6955 		tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6956 		tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6957 				* NSEC_PER_SEC;
6958 		break;
6959 	default:
6960 		/* TXSTMPL stores ns and TXSTMPH stores seconds. */
6961 		tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6962 		tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6963 				<< 32;
6964 	}
6965 
6966 	return tx_tstamp_cycles;
6967 }
6968 
6969 static void
6970 ixgbe_start_timecounters(struct rte_eth_dev *dev)
6971 {
6972 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6973 	struct ixgbe_adapter *adapter = dev->data->dev_private;
6974 	struct rte_eth_link link;
6975 	uint32_t incval = 0;
6976 	uint32_t shift = 0;
6977 
6978 	/* Get current link speed. */
6979 	ixgbe_dev_link_update(dev, 1);
6980 	rte_eth_linkstatus_get(dev, &link);
6981 
6982 	switch (link.link_speed) {
6983 	case ETH_SPEED_NUM_100M:
6984 		incval = IXGBE_INCVAL_100;
6985 		shift = IXGBE_INCVAL_SHIFT_100;
6986 		break;
6987 	case ETH_SPEED_NUM_1G:
6988 		incval = IXGBE_INCVAL_1GB;
6989 		shift = IXGBE_INCVAL_SHIFT_1GB;
6990 		break;
6991 	case ETH_SPEED_NUM_10G:
6992 	default:
6993 		incval = IXGBE_INCVAL_10GB;
6994 		shift = IXGBE_INCVAL_SHIFT_10GB;
6995 		break;
6996 	}
6997 
6998 	switch (hw->mac.type) {
6999 	case ixgbe_mac_X550:
7000 	case ixgbe_mac_X550EM_x:
7001 	case ixgbe_mac_X550EM_a:
7002 		/* Independent of link speed. */
7003 		incval = 1;
7004 		/* Cycles read will be interpreted as ns. */
7005 		shift = 0;
7006 		/* Fall-through */
7007 	case ixgbe_mac_X540:
7008 		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
7009 		break;
7010 	case ixgbe_mac_82599EB:
7011 		incval >>= IXGBE_INCVAL_SHIFT_82599;
7012 		shift -= IXGBE_INCVAL_SHIFT_82599;
7013 		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
7014 				(1 << IXGBE_INCPER_SHIFT_82599) | incval);
7015 		break;
7016 	default:
7017 		/* Not supported. */
7018 		return;
7019 	}
7020 
7021 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
7022 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7023 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
7024 
7025 	adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7026 	adapter->systime_tc.cc_shift = shift;
7027 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
7028 
7029 	adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7030 	adapter->rx_tstamp_tc.cc_shift = shift;
7031 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7032 
7033 	adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
7034 	adapter->tx_tstamp_tc.cc_shift = shift;
7035 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
7036 }
7037 
7038 static int
7039 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
7040 {
7041 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7042 
7043 	adapter->systime_tc.nsec += delta;
7044 	adapter->rx_tstamp_tc.nsec += delta;
7045 	adapter->tx_tstamp_tc.nsec += delta;
7046 
7047 	return 0;
7048 }
7049 
7050 static int
7051 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
7052 {
7053 	uint64_t ns;
7054 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7055 
7056 	ns = rte_timespec_to_ns(ts);
7057 	/* Set the timecounters to a new value. */
7058 	adapter->systime_tc.nsec = ns;
7059 	adapter->rx_tstamp_tc.nsec = ns;
7060 	adapter->tx_tstamp_tc.nsec = ns;
7061 
7062 	return 0;
7063 }
7064 
7065 static int
7066 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
7067 {
7068 	uint64_t ns, systime_cycles;
7069 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7070 
7071 	systime_cycles = ixgbe_read_systime_cyclecounter(dev);
7072 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
7073 	*ts = rte_ns_to_timespec(ns);
7074 
7075 	return 0;
7076 }
7077 
7078 static int
7079 ixgbe_timesync_enable(struct rte_eth_dev *dev)
7080 {
7081 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7082 	uint32_t tsync_ctl;
7083 	uint32_t tsauxc;
7084 
7085 	/* Stop the timesync system time. */
7086 	IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
7087 	/* Reset the timesync system time value. */
7088 	IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
7089 	IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
7090 
7091 	/* Enable system time for platforms where it isn't on by default. */
7092 	tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
7093 	tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
7094 	IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
7095 
7096 	ixgbe_start_timecounters(dev);
7097 
7098 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7099 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
7100 			(ETHER_TYPE_1588 |
7101 			 IXGBE_ETQF_FILTER_EN |
7102 			 IXGBE_ETQF_1588));
7103 
7104 	/* Enable timestamping of received PTP packets. */
7105 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7106 	tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
7107 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7108 
7109 	/* Enable timestamping of transmitted PTP packets. */
7110 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7111 	tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
7112 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7113 
7114 	IXGBE_WRITE_FLUSH(hw);
7115 
7116 	return 0;
7117 }
7118 
7119 static int
7120 ixgbe_timesync_disable(struct rte_eth_dev *dev)
7121 {
7122 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7123 	uint32_t tsync_ctl;
7124 
7125 	/* Disable timestamping of transmitted PTP packets. */
7126 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7127 	tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
7128 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7129 
7130 	/* Disable timestamping of received PTP packets. */
7131 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7132 	tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
7133 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7134 
7135 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7136 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
7137 
7138 	/* Stop incrementating the System Time registers. */
7139 	IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
7140 
7141 	return 0;
7142 }
7143 
7144 static int
7145 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7146 				 struct timespec *timestamp,
7147 				 uint32_t flags __rte_unused)
7148 {
7149 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7150 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7151 	uint32_t tsync_rxctl;
7152 	uint64_t rx_tstamp_cycles;
7153 	uint64_t ns;
7154 
7155 	tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7156 	if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7157 		return -EINVAL;
7158 
7159 	rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7160 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7161 	*timestamp = rte_ns_to_timespec(ns);
7162 
7163 	return  0;
7164 }
7165 
7166 static int
7167 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7168 				 struct timespec *timestamp)
7169 {
7170 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7171 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7172 	uint32_t tsync_txctl;
7173 	uint64_t tx_tstamp_cycles;
7174 	uint64_t ns;
7175 
7176 	tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7177 	if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7178 		return -EINVAL;
7179 
7180 	tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7181 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7182 	*timestamp = rte_ns_to_timespec(ns);
7183 
7184 	return 0;
7185 }
7186 
7187 static int
7188 ixgbe_get_reg_length(struct rte_eth_dev *dev)
7189 {
7190 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7191 	int count = 0;
7192 	int g_ind = 0;
7193 	const struct reg_info *reg_group;
7194 	const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7195 				    ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7196 
7197 	while ((reg_group = reg_set[g_ind++]))
7198 		count += ixgbe_regs_group_count(reg_group);
7199 
7200 	return count;
7201 }
7202 
7203 static int
7204 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7205 {
7206 	int count = 0;
7207 	int g_ind = 0;
7208 	const struct reg_info *reg_group;
7209 
7210 	while ((reg_group = ixgbevf_regs[g_ind++]))
7211 		count += ixgbe_regs_group_count(reg_group);
7212 
7213 	return count;
7214 }
7215 
7216 static int
7217 ixgbe_get_regs(struct rte_eth_dev *dev,
7218 	      struct rte_dev_reg_info *regs)
7219 {
7220 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7221 	uint32_t *data = regs->data;
7222 	int g_ind = 0;
7223 	int count = 0;
7224 	const struct reg_info *reg_group;
7225 	const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7226 				    ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7227 
7228 	if (data == NULL) {
7229 		regs->length = ixgbe_get_reg_length(dev);
7230 		regs->width = sizeof(uint32_t);
7231 		return 0;
7232 	}
7233 
7234 	/* Support only full register dump */
7235 	if ((regs->length == 0) ||
7236 	    (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7237 		regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7238 			hw->device_id;
7239 		while ((reg_group = reg_set[g_ind++]))
7240 			count += ixgbe_read_regs_group(dev, &data[count],
7241 				reg_group);
7242 		return 0;
7243 	}
7244 
7245 	return -ENOTSUP;
7246 }
7247 
7248 static int
7249 ixgbevf_get_regs(struct rte_eth_dev *dev,
7250 		struct rte_dev_reg_info *regs)
7251 {
7252 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7253 	uint32_t *data = regs->data;
7254 	int g_ind = 0;
7255 	int count = 0;
7256 	const struct reg_info *reg_group;
7257 
7258 	if (data == NULL) {
7259 		regs->length = ixgbevf_get_reg_length(dev);
7260 		regs->width = sizeof(uint32_t);
7261 		return 0;
7262 	}
7263 
7264 	/* Support only full register dump */
7265 	if ((regs->length == 0) ||
7266 	    (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7267 		regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7268 			hw->device_id;
7269 		while ((reg_group = ixgbevf_regs[g_ind++]))
7270 			count += ixgbe_read_regs_group(dev, &data[count],
7271 						      reg_group);
7272 		return 0;
7273 	}
7274 
7275 	return -ENOTSUP;
7276 }
7277 
7278 static int
7279 ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7280 {
7281 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7282 
7283 	/* Return unit is byte count */
7284 	return hw->eeprom.word_size * 2;
7285 }
7286 
7287 static int
7288 ixgbe_get_eeprom(struct rte_eth_dev *dev,
7289 		struct rte_dev_eeprom_info *in_eeprom)
7290 {
7291 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7292 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7293 	uint16_t *data = in_eeprom->data;
7294 	int first, length;
7295 
7296 	first = in_eeprom->offset >> 1;
7297 	length = in_eeprom->length >> 1;
7298 	if ((first > hw->eeprom.word_size) ||
7299 	    ((first + length) > hw->eeprom.word_size))
7300 		return -EINVAL;
7301 
7302 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7303 
7304 	return eeprom->ops.read_buffer(hw, first, length, data);
7305 }
7306 
7307 static int
7308 ixgbe_set_eeprom(struct rte_eth_dev *dev,
7309 		struct rte_dev_eeprom_info *in_eeprom)
7310 {
7311 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7312 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7313 	uint16_t *data = in_eeprom->data;
7314 	int first, length;
7315 
7316 	first = in_eeprom->offset >> 1;
7317 	length = in_eeprom->length >> 1;
7318 	if ((first > hw->eeprom.word_size) ||
7319 	    ((first + length) > hw->eeprom.word_size))
7320 		return -EINVAL;
7321 
7322 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7323 
7324 	return eeprom->ops.write_buffer(hw,  first, length, data);
7325 }
7326 
7327 static int
7328 ixgbe_get_module_info(struct rte_eth_dev *dev,
7329 		      struct rte_eth_dev_module_info *modinfo)
7330 {
7331 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7332 	uint32_t status;
7333 	uint8_t sff8472_rev, addr_mode;
7334 	bool page_swap = false;
7335 
7336 	/* Check whether we support SFF-8472 or not */
7337 	status = hw->phy.ops.read_i2c_eeprom(hw,
7338 					     IXGBE_SFF_SFF_8472_COMP,
7339 					     &sff8472_rev);
7340 	if (status != 0)
7341 		return -EIO;
7342 
7343 	/* addressing mode is not supported */
7344 	status = hw->phy.ops.read_i2c_eeprom(hw,
7345 					     IXGBE_SFF_SFF_8472_SWAP,
7346 					     &addr_mode);
7347 	if (status != 0)
7348 		return -EIO;
7349 
7350 	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
7351 		PMD_DRV_LOG(ERR,
7352 			    "Address change required to access page 0xA2, "
7353 			    "but not supported. Please report the module "
7354 			    "type to the driver maintainers.");
7355 		page_swap = true;
7356 	}
7357 
7358 	if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
7359 		/* We have a SFP, but it does not support SFF-8472 */
7360 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
7361 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
7362 	} else {
7363 		/* We have a SFP which supports a revision of SFF-8472. */
7364 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
7365 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
7366 	}
7367 
7368 	return 0;
7369 }
7370 
7371 static int
7372 ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
7373 			struct rte_dev_eeprom_info *info)
7374 {
7375 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7376 	uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
7377 	uint8_t databyte = 0xFF;
7378 	uint8_t *data = info->data;
7379 	uint32_t i = 0;
7380 
7381 	if (info->length == 0)
7382 		return -EINVAL;
7383 
7384 	for (i = info->offset; i < info->offset + info->length; i++) {
7385 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
7386 			status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
7387 		else
7388 			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
7389 
7390 		if (status != 0)
7391 			return -EIO;
7392 
7393 		data[i - info->offset] = databyte;
7394 	}
7395 
7396 	return 0;
7397 }
7398 
7399 uint16_t
7400 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7401 	switch (mac_type) {
7402 	case ixgbe_mac_X550:
7403 	case ixgbe_mac_X550EM_x:
7404 	case ixgbe_mac_X550EM_a:
7405 		return ETH_RSS_RETA_SIZE_512;
7406 	case ixgbe_mac_X550_vf:
7407 	case ixgbe_mac_X550EM_x_vf:
7408 	case ixgbe_mac_X550EM_a_vf:
7409 		return ETH_RSS_RETA_SIZE_64;
7410 	case ixgbe_mac_X540_vf:
7411 	case ixgbe_mac_82599_vf:
7412 		return 0;
7413 	default:
7414 		return ETH_RSS_RETA_SIZE_128;
7415 	}
7416 }
7417 
7418 uint32_t
7419 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7420 	switch (mac_type) {
7421 	case ixgbe_mac_X550:
7422 	case ixgbe_mac_X550EM_x:
7423 	case ixgbe_mac_X550EM_a:
7424 		if (reta_idx < ETH_RSS_RETA_SIZE_128)
7425 			return IXGBE_RETA(reta_idx >> 2);
7426 		else
7427 			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7428 	case ixgbe_mac_X550_vf:
7429 	case ixgbe_mac_X550EM_x_vf:
7430 	case ixgbe_mac_X550EM_a_vf:
7431 		return IXGBE_VFRETA(reta_idx >> 2);
7432 	default:
7433 		return IXGBE_RETA(reta_idx >> 2);
7434 	}
7435 }
7436 
7437 uint32_t
7438 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7439 	switch (mac_type) {
7440 	case ixgbe_mac_X550_vf:
7441 	case ixgbe_mac_X550EM_x_vf:
7442 	case ixgbe_mac_X550EM_a_vf:
7443 		return IXGBE_VFMRQC;
7444 	default:
7445 		return IXGBE_MRQC;
7446 	}
7447 }
7448 
7449 uint32_t
7450 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7451 	switch (mac_type) {
7452 	case ixgbe_mac_X550_vf:
7453 	case ixgbe_mac_X550EM_x_vf:
7454 	case ixgbe_mac_X550EM_a_vf:
7455 		return IXGBE_VFRSSRK(i);
7456 	default:
7457 		return IXGBE_RSSRK(i);
7458 	}
7459 }
7460 
7461 bool
7462 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7463 	switch (mac_type) {
7464 	case ixgbe_mac_82599_vf:
7465 	case ixgbe_mac_X540_vf:
7466 		return 0;
7467 	default:
7468 		return 1;
7469 	}
7470 }
7471 
7472 static int
7473 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7474 			struct rte_eth_dcb_info *dcb_info)
7475 {
7476 	struct ixgbe_dcb_config *dcb_config =
7477 			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7478 	struct ixgbe_dcb_tc_config *tc;
7479 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
7480 	uint8_t nb_tcs;
7481 	uint8_t i, j;
7482 
7483 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7484 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7485 	else
7486 		dcb_info->nb_tcs = 1;
7487 
7488 	tc_queue = &dcb_info->tc_queue;
7489 	nb_tcs = dcb_info->nb_tcs;
7490 
7491 	if (dcb_config->vt_mode) { /* vt is enabled*/
7492 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7493 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7494 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7495 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
7496 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
7497 			for (j = 0; j < nb_tcs; j++) {
7498 				tc_queue->tc_rxq[0][j].base = j;
7499 				tc_queue->tc_rxq[0][j].nb_queue = 1;
7500 				tc_queue->tc_txq[0][j].base = j;
7501 				tc_queue->tc_txq[0][j].nb_queue = 1;
7502 			}
7503 		} else {
7504 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
7505 				for (j = 0; j < nb_tcs; j++) {
7506 					tc_queue->tc_rxq[i][j].base =
7507 						i * nb_tcs + j;
7508 					tc_queue->tc_rxq[i][j].nb_queue = 1;
7509 					tc_queue->tc_txq[i][j].base =
7510 						i * nb_tcs + j;
7511 					tc_queue->tc_txq[i][j].nb_queue = 1;
7512 				}
7513 			}
7514 		}
7515 	} else { /* vt is disabled*/
7516 		struct rte_eth_dcb_rx_conf *rx_conf =
7517 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7518 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7519 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7520 		if (dcb_info->nb_tcs == ETH_4_TCS) {
7521 			for (i = 0; i < dcb_info->nb_tcs; i++) {
7522 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7523 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7524 			}
7525 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
7526 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
7527 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
7528 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
7529 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7530 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7531 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7532 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7533 		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
7534 			for (i = 0; i < dcb_info->nb_tcs; i++) {
7535 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7536 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7537 			}
7538 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
7539 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
7540 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
7541 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
7542 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
7543 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
7544 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
7545 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
7546 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7547 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7548 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7549 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7550 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7551 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7552 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7553 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7554 		}
7555 	}
7556 	for (i = 0; i < dcb_info->nb_tcs; i++) {
7557 		tc = &dcb_config->tc_config[i];
7558 		dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7559 	}
7560 	return 0;
7561 }
7562 
7563 /* Update e-tag ether type */
7564 static int
7565 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7566 			    uint16_t ether_type)
7567 {
7568 	uint32_t etag_etype;
7569 
7570 	if (hw->mac.type != ixgbe_mac_X550 &&
7571 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7572 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7573 		return -ENOTSUP;
7574 	}
7575 
7576 	etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7577 	etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7578 	etag_etype |= ether_type;
7579 	IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7580 	IXGBE_WRITE_FLUSH(hw);
7581 
7582 	return 0;
7583 }
7584 
7585 /* Config l2 tunnel ether type */
7586 static int
7587 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
7588 				  struct rte_eth_l2_tunnel_conf *l2_tunnel)
7589 {
7590 	int ret = 0;
7591 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7592 	struct ixgbe_l2_tn_info *l2_tn_info =
7593 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7594 
7595 	if (l2_tunnel == NULL)
7596 		return -EINVAL;
7597 
7598 	switch (l2_tunnel->l2_tunnel_type) {
7599 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7600 		l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
7601 		ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
7602 		break;
7603 	default:
7604 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7605 		ret = -EINVAL;
7606 		break;
7607 	}
7608 
7609 	return ret;
7610 }
7611 
7612 /* Enable e-tag tunnel */
7613 static int
7614 ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7615 {
7616 	uint32_t etag_etype;
7617 
7618 	if (hw->mac.type != ixgbe_mac_X550 &&
7619 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7620 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7621 		return -ENOTSUP;
7622 	}
7623 
7624 	etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7625 	etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7626 	IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7627 	IXGBE_WRITE_FLUSH(hw);
7628 
7629 	return 0;
7630 }
7631 
7632 /* Enable l2 tunnel */
7633 static int
7634 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
7635 			   enum rte_eth_tunnel_type l2_tunnel_type)
7636 {
7637 	int ret = 0;
7638 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7639 	struct ixgbe_l2_tn_info *l2_tn_info =
7640 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7641 
7642 	switch (l2_tunnel_type) {
7643 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7644 		l2_tn_info->e_tag_en = TRUE;
7645 		ret = ixgbe_e_tag_enable(hw);
7646 		break;
7647 	default:
7648 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7649 		ret = -EINVAL;
7650 		break;
7651 	}
7652 
7653 	return ret;
7654 }
7655 
7656 /* Disable e-tag tunnel */
7657 static int
7658 ixgbe_e_tag_disable(struct ixgbe_hw *hw)
7659 {
7660 	uint32_t etag_etype;
7661 
7662 	if (hw->mac.type != ixgbe_mac_X550 &&
7663 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7664 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7665 		return -ENOTSUP;
7666 	}
7667 
7668 	etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7669 	etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
7670 	IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7671 	IXGBE_WRITE_FLUSH(hw);
7672 
7673 	return 0;
7674 }
7675 
7676 /* Disable l2 tunnel */
7677 static int
7678 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
7679 			    enum rte_eth_tunnel_type l2_tunnel_type)
7680 {
7681 	int ret = 0;
7682 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7683 	struct ixgbe_l2_tn_info *l2_tn_info =
7684 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7685 
7686 	switch (l2_tunnel_type) {
7687 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7688 		l2_tn_info->e_tag_en = FALSE;
7689 		ret = ixgbe_e_tag_disable(hw);
7690 		break;
7691 	default:
7692 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7693 		ret = -EINVAL;
7694 		break;
7695 	}
7696 
7697 	return ret;
7698 }
7699 
7700 static int
7701 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7702 		       struct rte_eth_l2_tunnel_conf *l2_tunnel)
7703 {
7704 	int ret = 0;
7705 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7706 	uint32_t i, rar_entries;
7707 	uint32_t rar_low, rar_high;
7708 
7709 	if (hw->mac.type != ixgbe_mac_X550 &&
7710 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7711 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7712 		return -ENOTSUP;
7713 	}
7714 
7715 	rar_entries = ixgbe_get_num_rx_addrs(hw);
7716 
7717 	for (i = 1; i < rar_entries; i++) {
7718 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7719 		rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7720 		if ((rar_high & IXGBE_RAH_AV) &&
7721 		    (rar_high & IXGBE_RAH_ADTYPE) &&
7722 		    ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7723 		     l2_tunnel->tunnel_id)) {
7724 			IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7725 			IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7726 
7727 			ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7728 
7729 			return ret;
7730 		}
7731 	}
7732 
7733 	return ret;
7734 }
7735 
7736 static int
7737 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7738 		       struct rte_eth_l2_tunnel_conf *l2_tunnel)
7739 {
7740 	int ret = 0;
7741 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7742 	uint32_t i, rar_entries;
7743 	uint32_t rar_low, rar_high;
7744 
7745 	if (hw->mac.type != ixgbe_mac_X550 &&
7746 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7747 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7748 		return -ENOTSUP;
7749 	}
7750 
7751 	/* One entry for one tunnel. Try to remove potential existing entry. */
7752 	ixgbe_e_tag_filter_del(dev, l2_tunnel);
7753 
7754 	rar_entries = ixgbe_get_num_rx_addrs(hw);
7755 
7756 	for (i = 1; i < rar_entries; i++) {
7757 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7758 		if (rar_high & IXGBE_RAH_AV) {
7759 			continue;
7760 		} else {
7761 			ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7762 			rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7763 			rar_low = l2_tunnel->tunnel_id;
7764 
7765 			IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7766 			IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7767 
7768 			return ret;
7769 		}
7770 	}
7771 
7772 	PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7773 		     " Please remove a rule before adding a new one.");
7774 	return -EINVAL;
7775 }
7776 
7777 static inline struct ixgbe_l2_tn_filter *
7778 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
7779 			  struct ixgbe_l2_tn_key *key)
7780 {
7781 	int ret;
7782 
7783 	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
7784 	if (ret < 0)
7785 		return NULL;
7786 
7787 	return l2_tn_info->hash_map[ret];
7788 }
7789 
7790 static inline int
7791 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7792 			  struct ixgbe_l2_tn_filter *l2_tn_filter)
7793 {
7794 	int ret;
7795 
7796 	ret = rte_hash_add_key(l2_tn_info->hash_handle,
7797 			       &l2_tn_filter->key);
7798 
7799 	if (ret < 0) {
7800 		PMD_DRV_LOG(ERR,
7801 			    "Failed to insert L2 tunnel filter"
7802 			    " to hash table %d!",
7803 			    ret);
7804 		return ret;
7805 	}
7806 
7807 	l2_tn_info->hash_map[ret] = l2_tn_filter;
7808 
7809 	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7810 
7811 	return 0;
7812 }
7813 
7814 static inline int
7815 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
7816 			  struct ixgbe_l2_tn_key *key)
7817 {
7818 	int ret;
7819 	struct ixgbe_l2_tn_filter *l2_tn_filter;
7820 
7821 	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
7822 
7823 	if (ret < 0) {
7824 		PMD_DRV_LOG(ERR,
7825 			    "No such L2 tunnel filter to delete %d!",
7826 			    ret);
7827 		return ret;
7828 	}
7829 
7830 	l2_tn_filter = l2_tn_info->hash_map[ret];
7831 	l2_tn_info->hash_map[ret] = NULL;
7832 
7833 	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
7834 	rte_free(l2_tn_filter);
7835 
7836 	return 0;
7837 }
7838 
7839 /* Add l2 tunnel filter */
7840 int
7841 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7842 			       struct rte_eth_l2_tunnel_conf *l2_tunnel,
7843 			       bool restore)
7844 {
7845 	int ret;
7846 	struct ixgbe_l2_tn_info *l2_tn_info =
7847 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7848 	struct ixgbe_l2_tn_key key;
7849 	struct ixgbe_l2_tn_filter *node;
7850 
7851 	if (!restore) {
7852 		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7853 		key.tn_id = l2_tunnel->tunnel_id;
7854 
7855 		node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
7856 
7857 		if (node) {
7858 			PMD_DRV_LOG(ERR,
7859 				    "The L2 tunnel filter already exists!");
7860 			return -EINVAL;
7861 		}
7862 
7863 		node = rte_zmalloc("ixgbe_l2_tn",
7864 				   sizeof(struct ixgbe_l2_tn_filter),
7865 				   0);
7866 		if (!node)
7867 			return -ENOMEM;
7868 
7869 		rte_memcpy(&node->key,
7870 				 &key,
7871 				 sizeof(struct ixgbe_l2_tn_key));
7872 		node->pool = l2_tunnel->pool;
7873 		ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
7874 		if (ret < 0) {
7875 			rte_free(node);
7876 			return ret;
7877 		}
7878 	}
7879 
7880 	switch (l2_tunnel->l2_tunnel_type) {
7881 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7882 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7883 		break;
7884 	default:
7885 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7886 		ret = -EINVAL;
7887 		break;
7888 	}
7889 
7890 	if ((!restore) && (ret < 0))
7891 		(void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7892 
7893 	return ret;
7894 }
7895 
7896 /* Delete l2 tunnel filter */
7897 int
7898 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7899 			       struct rte_eth_l2_tunnel_conf *l2_tunnel)
7900 {
7901 	int ret;
7902 	struct ixgbe_l2_tn_info *l2_tn_info =
7903 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7904 	struct ixgbe_l2_tn_key key;
7905 
7906 	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
7907 	key.tn_id = l2_tunnel->tunnel_id;
7908 	ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
7909 	if (ret < 0)
7910 		return ret;
7911 
7912 	switch (l2_tunnel->l2_tunnel_type) {
7913 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7914 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7915 		break;
7916 	default:
7917 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7918 		ret = -EINVAL;
7919 		break;
7920 	}
7921 
7922 	return ret;
7923 }
7924 
7925 /**
7926  * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7927  * @dev: pointer to rte_eth_dev structure
7928  * @filter_op:operation will be taken.
7929  * @arg: a pointer to specific structure corresponding to the filter_op
7930  */
7931 static int
7932 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
7933 				  enum rte_filter_op filter_op,
7934 				  void *arg)
7935 {
7936 	int ret;
7937 
7938 	if (filter_op == RTE_ETH_FILTER_NOP)
7939 		return 0;
7940 
7941 	if (arg == NULL) {
7942 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
7943 			    filter_op);
7944 		return -EINVAL;
7945 	}
7946 
7947 	switch (filter_op) {
7948 	case RTE_ETH_FILTER_ADD:
7949 		ret = ixgbe_dev_l2_tunnel_filter_add
7950 			(dev,
7951 			 (struct rte_eth_l2_tunnel_conf *)arg,
7952 			 FALSE);
7953 		break;
7954 	case RTE_ETH_FILTER_DELETE:
7955 		ret = ixgbe_dev_l2_tunnel_filter_del
7956 			(dev,
7957 			 (struct rte_eth_l2_tunnel_conf *)arg);
7958 		break;
7959 	default:
7960 		PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
7961 		ret = -EINVAL;
7962 		break;
7963 	}
7964 	return ret;
7965 }
7966 
7967 static int
7968 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7969 {
7970 	int ret = 0;
7971 	uint32_t ctrl;
7972 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7973 
7974 	if (hw->mac.type != ixgbe_mac_X550 &&
7975 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7976 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7977 		return -ENOTSUP;
7978 	}
7979 
7980 	ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7981 	ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7982 	if (en)
7983 		ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7984 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7985 
7986 	return ret;
7987 }
7988 
7989 /* Enable l2 tunnel forwarding */
7990 static int
7991 ixgbe_dev_l2_tunnel_forwarding_enable
7992 	(struct rte_eth_dev *dev,
7993 	 enum rte_eth_tunnel_type l2_tunnel_type)
7994 {
7995 	struct ixgbe_l2_tn_info *l2_tn_info =
7996 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
7997 	int ret = 0;
7998 
7999 	switch (l2_tunnel_type) {
8000 	case RTE_L2_TUNNEL_TYPE_E_TAG:
8001 		l2_tn_info->e_tag_fwd_en = TRUE;
8002 		ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
8003 		break;
8004 	default:
8005 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8006 		ret = -EINVAL;
8007 		break;
8008 	}
8009 
8010 	return ret;
8011 }
8012 
8013 /* Disable l2 tunnel forwarding */
8014 static int
8015 ixgbe_dev_l2_tunnel_forwarding_disable
8016 	(struct rte_eth_dev *dev,
8017 	 enum rte_eth_tunnel_type l2_tunnel_type)
8018 {
8019 	struct ixgbe_l2_tn_info *l2_tn_info =
8020 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8021 	int ret = 0;
8022 
8023 	switch (l2_tunnel_type) {
8024 	case RTE_L2_TUNNEL_TYPE_E_TAG:
8025 		l2_tn_info->e_tag_fwd_en = FALSE;
8026 		ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
8027 		break;
8028 	default:
8029 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8030 		ret = -EINVAL;
8031 		break;
8032 	}
8033 
8034 	return ret;
8035 }
8036 
8037 static int
8038 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
8039 			     struct rte_eth_l2_tunnel_conf *l2_tunnel,
8040 			     bool en)
8041 {
8042 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
8043 	int ret = 0;
8044 	uint32_t vmtir, vmvir;
8045 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8046 
8047 	if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
8048 		PMD_DRV_LOG(ERR,
8049 			    "VF id %u should be less than %u",
8050 			    l2_tunnel->vf_id,
8051 			    pci_dev->max_vfs);
8052 		return -EINVAL;
8053 	}
8054 
8055 	if (hw->mac.type != ixgbe_mac_X550 &&
8056 	    hw->mac.type != ixgbe_mac_X550EM_x &&
8057 	    hw->mac.type != ixgbe_mac_X550EM_a) {
8058 		return -ENOTSUP;
8059 	}
8060 
8061 	if (en)
8062 		vmtir = l2_tunnel->tunnel_id;
8063 	else
8064 		vmtir = 0;
8065 
8066 	IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
8067 
8068 	vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
8069 	vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
8070 	if (en)
8071 		vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
8072 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
8073 
8074 	return ret;
8075 }
8076 
8077 /* Enable l2 tunnel tag insertion */
8078 static int
8079 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
8080 				     struct rte_eth_l2_tunnel_conf *l2_tunnel)
8081 {
8082 	int ret = 0;
8083 
8084 	switch (l2_tunnel->l2_tunnel_type) {
8085 	case RTE_L2_TUNNEL_TYPE_E_TAG:
8086 		ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
8087 		break;
8088 	default:
8089 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8090 		ret = -EINVAL;
8091 		break;
8092 	}
8093 
8094 	return ret;
8095 }
8096 
8097 /* Disable l2 tunnel tag insertion */
8098 static int
8099 ixgbe_dev_l2_tunnel_insertion_disable
8100 	(struct rte_eth_dev *dev,
8101 	 struct rte_eth_l2_tunnel_conf *l2_tunnel)
8102 {
8103 	int ret = 0;
8104 
8105 	switch (l2_tunnel->l2_tunnel_type) {
8106 	case RTE_L2_TUNNEL_TYPE_E_TAG:
8107 		ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
8108 		break;
8109 	default:
8110 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8111 		ret = -EINVAL;
8112 		break;
8113 	}
8114 
8115 	return ret;
8116 }
8117 
8118 static int
8119 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
8120 			     bool en)
8121 {
8122 	int ret = 0;
8123 	uint32_t qde;
8124 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8125 
8126 	if (hw->mac.type != ixgbe_mac_X550 &&
8127 	    hw->mac.type != ixgbe_mac_X550EM_x &&
8128 	    hw->mac.type != ixgbe_mac_X550EM_a) {
8129 		return -ENOTSUP;
8130 	}
8131 
8132 	qde = IXGBE_READ_REG(hw, IXGBE_QDE);
8133 	if (en)
8134 		qde |= IXGBE_QDE_STRIP_TAG;
8135 	else
8136 		qde &= ~IXGBE_QDE_STRIP_TAG;
8137 	qde &= ~IXGBE_QDE_READ;
8138 	qde |= IXGBE_QDE_WRITE;
8139 	IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
8140 
8141 	return ret;
8142 }
8143 
8144 /* Enable l2 tunnel tag stripping */
8145 static int
8146 ixgbe_dev_l2_tunnel_stripping_enable
8147 	(struct rte_eth_dev *dev,
8148 	 enum rte_eth_tunnel_type l2_tunnel_type)
8149 {
8150 	int ret = 0;
8151 
8152 	switch (l2_tunnel_type) {
8153 	case RTE_L2_TUNNEL_TYPE_E_TAG:
8154 		ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
8155 		break;
8156 	default:
8157 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8158 		ret = -EINVAL;
8159 		break;
8160 	}
8161 
8162 	return ret;
8163 }
8164 
8165 /* Disable l2 tunnel tag stripping */
8166 static int
8167 ixgbe_dev_l2_tunnel_stripping_disable
8168 	(struct rte_eth_dev *dev,
8169 	 enum rte_eth_tunnel_type l2_tunnel_type)
8170 {
8171 	int ret = 0;
8172 
8173 	switch (l2_tunnel_type) {
8174 	case RTE_L2_TUNNEL_TYPE_E_TAG:
8175 		ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
8176 		break;
8177 	default:
8178 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8179 		ret = -EINVAL;
8180 		break;
8181 	}
8182 
8183 	return ret;
8184 }
8185 
8186 /* Enable/disable l2 tunnel offload functions */
8187 static int
8188 ixgbe_dev_l2_tunnel_offload_set
8189 	(struct rte_eth_dev *dev,
8190 	 struct rte_eth_l2_tunnel_conf *l2_tunnel,
8191 	 uint32_t mask,
8192 	 uint8_t en)
8193 {
8194 	int ret = 0;
8195 
8196 	if (l2_tunnel == NULL)
8197 		return -EINVAL;
8198 
8199 	ret = -EINVAL;
8200 	if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
8201 		if (en)
8202 			ret = ixgbe_dev_l2_tunnel_enable(
8203 				dev,
8204 				l2_tunnel->l2_tunnel_type);
8205 		else
8206 			ret = ixgbe_dev_l2_tunnel_disable(
8207 				dev,
8208 				l2_tunnel->l2_tunnel_type);
8209 	}
8210 
8211 	if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
8212 		if (en)
8213 			ret = ixgbe_dev_l2_tunnel_insertion_enable(
8214 				dev,
8215 				l2_tunnel);
8216 		else
8217 			ret = ixgbe_dev_l2_tunnel_insertion_disable(
8218 				dev,
8219 				l2_tunnel);
8220 	}
8221 
8222 	if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
8223 		if (en)
8224 			ret = ixgbe_dev_l2_tunnel_stripping_enable(
8225 				dev,
8226 				l2_tunnel->l2_tunnel_type);
8227 		else
8228 			ret = ixgbe_dev_l2_tunnel_stripping_disable(
8229 				dev,
8230 				l2_tunnel->l2_tunnel_type);
8231 	}
8232 
8233 	if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
8234 		if (en)
8235 			ret = ixgbe_dev_l2_tunnel_forwarding_enable(
8236 				dev,
8237 				l2_tunnel->l2_tunnel_type);
8238 		else
8239 			ret = ixgbe_dev_l2_tunnel_forwarding_disable(
8240 				dev,
8241 				l2_tunnel->l2_tunnel_type);
8242 	}
8243 
8244 	return ret;
8245 }
8246 
8247 static int
8248 ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
8249 			uint16_t port)
8250 {
8251 	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
8252 	IXGBE_WRITE_FLUSH(hw);
8253 
8254 	return 0;
8255 }
8256 
8257 /* There's only one register for VxLAN UDP port.
8258  * So, we cannot add several ports. Will update it.
8259  */
8260 static int
8261 ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
8262 		     uint16_t port)
8263 {
8264 	if (port == 0) {
8265 		PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
8266 		return -EINVAL;
8267 	}
8268 
8269 	return ixgbe_update_vxlan_port(hw, port);
8270 }
8271 
8272 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8273  * UDP port, it must have a value.
8274  * So, will reset it to the original value 0.
8275  */
8276 static int
8277 ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
8278 		     uint16_t port)
8279 {
8280 	uint16_t cur_port;
8281 
8282 	cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
8283 
8284 	if (cur_port != port) {
8285 		PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
8286 		return -EINVAL;
8287 	}
8288 
8289 	return ixgbe_update_vxlan_port(hw, 0);
8290 }
8291 
8292 /* Add UDP tunneling port */
8293 static int
8294 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8295 			      struct rte_eth_udp_tunnel *udp_tunnel)
8296 {
8297 	int ret = 0;
8298 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8299 
8300 	if (hw->mac.type != ixgbe_mac_X550 &&
8301 	    hw->mac.type != ixgbe_mac_X550EM_x &&
8302 	    hw->mac.type != ixgbe_mac_X550EM_a) {
8303 		return -ENOTSUP;
8304 	}
8305 
8306 	if (udp_tunnel == NULL)
8307 		return -EINVAL;
8308 
8309 	switch (udp_tunnel->prot_type) {
8310 	case RTE_TUNNEL_TYPE_VXLAN:
8311 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
8312 		break;
8313 
8314 	case RTE_TUNNEL_TYPE_GENEVE:
8315 	case RTE_TUNNEL_TYPE_TEREDO:
8316 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8317 		ret = -EINVAL;
8318 		break;
8319 
8320 	default:
8321 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8322 		ret = -EINVAL;
8323 		break;
8324 	}
8325 
8326 	return ret;
8327 }
8328 
8329 /* Remove UDP tunneling port */
8330 static int
8331 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8332 			      struct rte_eth_udp_tunnel *udp_tunnel)
8333 {
8334 	int ret = 0;
8335 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8336 
8337 	if (hw->mac.type != ixgbe_mac_X550 &&
8338 	    hw->mac.type != ixgbe_mac_X550EM_x &&
8339 	    hw->mac.type != ixgbe_mac_X550EM_a) {
8340 		return -ENOTSUP;
8341 	}
8342 
8343 	if (udp_tunnel == NULL)
8344 		return -EINVAL;
8345 
8346 	switch (udp_tunnel->prot_type) {
8347 	case RTE_TUNNEL_TYPE_VXLAN:
8348 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
8349 		break;
8350 	case RTE_TUNNEL_TYPE_GENEVE:
8351 	case RTE_TUNNEL_TYPE_TEREDO:
8352 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8353 		ret = -EINVAL;
8354 		break;
8355 	default:
8356 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8357 		ret = -EINVAL;
8358 		break;
8359 	}
8360 
8361 	return ret;
8362 }
8363 
8364 static void
8365 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
8366 {
8367 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8368 
8369 	hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
8370 }
8371 
8372 static void
8373 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
8374 {
8375 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8376 
8377 	hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
8378 }
8379 
8380 static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8381 {
8382 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8383 	u32 in_msg = 0;
8384 
8385 	/* peek the message first */
8386 	in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
8387 
8388 	/* PF reset VF event */
8389 	if (in_msg == IXGBE_PF_CONTROL_MSG) {
8390 		/* dummy mbx read to ack pf */
8391 		if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8392 			return;
8393 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8394 					      NULL);
8395 	}
8396 }
8397 
8398 static int
8399 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8400 {
8401 	uint32_t eicr;
8402 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8403 	struct ixgbe_interrupt *intr =
8404 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8405 	ixgbevf_intr_disable(dev);
8406 
8407 	/* read-on-clear nic registers here */
8408 	eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8409 	intr->flags = 0;
8410 
8411 	/* only one misc vector supported - mailbox */
8412 	eicr &= IXGBE_VTEICR_MASK;
8413 	if (eicr == IXGBE_MISC_VEC_ID)
8414 		intr->flags |= IXGBE_FLAG_MAILBOX;
8415 
8416 	return 0;
8417 }
8418 
8419 static int
8420 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8421 {
8422 	struct ixgbe_interrupt *intr =
8423 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8424 
8425 	if (intr->flags & IXGBE_FLAG_MAILBOX) {
8426 		ixgbevf_mbx_process(dev);
8427 		intr->flags &= ~IXGBE_FLAG_MAILBOX;
8428 	}
8429 
8430 	ixgbevf_intr_enable(dev);
8431 
8432 	return 0;
8433 }
8434 
8435 static void
8436 ixgbevf_dev_interrupt_handler(void *param)
8437 {
8438 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8439 
8440 	ixgbevf_dev_interrupt_get_status(dev);
8441 	ixgbevf_dev_interrupt_action(dev);
8442 }
8443 
8444 /**
8445  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8446  *  @hw: pointer to hardware structure
8447  *
8448  *  Stops the transmit data path and waits for the HW to internally empty
8449  *  the Tx security block
8450  **/
8451 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
8452 {
8453 #define IXGBE_MAX_SECTX_POLL 40
8454 
8455 	int i;
8456 	int sectxreg;
8457 
8458 	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8459 	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
8460 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8461 	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
8462 		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
8463 		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
8464 			break;
8465 		/* Use interrupt-safe sleep just in case */
8466 		usec_delay(1000);
8467 	}
8468 
8469 	/* For informational purposes only */
8470 	if (i >= IXGBE_MAX_SECTX_POLL)
8471 		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
8472 			 "path fully disabled.  Continuing with init.");
8473 
8474 	return IXGBE_SUCCESS;
8475 }
8476 
8477 /**
8478  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8479  *  @hw: pointer to hardware structure
8480  *
8481  *  Enables the transmit data path.
8482  **/
8483 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
8484 {
8485 	uint32_t sectxreg;
8486 
8487 	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
8488 	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
8489 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
8490 	IXGBE_WRITE_FLUSH(hw);
8491 
8492 	return IXGBE_SUCCESS;
8493 }
8494 
8495 /* restore n-tuple filter */
8496 static inline void
8497 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
8498 {
8499 	struct ixgbe_filter_info *filter_info =
8500 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8501 	struct ixgbe_5tuple_filter *node;
8502 
8503 	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
8504 		ixgbe_inject_5tuple_filter(dev, node);
8505 	}
8506 }
8507 
8508 /* restore ethernet type filter */
8509 static inline void
8510 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
8511 {
8512 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8513 	struct ixgbe_filter_info *filter_info =
8514 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8515 	int i;
8516 
8517 	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8518 		if (filter_info->ethertype_mask & (1 << i)) {
8519 			IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
8520 					filter_info->ethertype_filters[i].etqf);
8521 			IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
8522 					filter_info->ethertype_filters[i].etqs);
8523 			IXGBE_WRITE_FLUSH(hw);
8524 		}
8525 	}
8526 }
8527 
8528 /* restore SYN filter */
8529 static inline void
8530 ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
8531 {
8532 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8533 	struct ixgbe_filter_info *filter_info =
8534 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8535 	uint32_t synqf;
8536 
8537 	synqf = filter_info->syn_info;
8538 
8539 	if (synqf & IXGBE_SYN_FILTER_ENABLE) {
8540 		IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
8541 		IXGBE_WRITE_FLUSH(hw);
8542 	}
8543 }
8544 
8545 /* restore L2 tunnel filter */
8546 static inline void
8547 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
8548 {
8549 	struct ixgbe_l2_tn_info *l2_tn_info =
8550 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8551 	struct ixgbe_l2_tn_filter *node;
8552 	struct rte_eth_l2_tunnel_conf l2_tn_conf;
8553 
8554 	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
8555 		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
8556 		l2_tn_conf.tunnel_id      = node->key.tn_id;
8557 		l2_tn_conf.pool           = node->pool;
8558 		(void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
8559 	}
8560 }
8561 
8562 /* restore rss filter */
8563 static inline void
8564 ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
8565 {
8566 	struct ixgbe_filter_info *filter_info =
8567 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8568 
8569 	if (filter_info->rss_info.conf.queue_num)
8570 		ixgbe_config_rss_filter(dev,
8571 			&filter_info->rss_info, TRUE);
8572 }
8573 
8574 static int
8575 ixgbe_filter_restore(struct rte_eth_dev *dev)
8576 {
8577 	ixgbe_ntuple_filter_restore(dev);
8578 	ixgbe_ethertype_filter_restore(dev);
8579 	ixgbe_syn_filter_restore(dev);
8580 	ixgbe_fdir_filter_restore(dev);
8581 	ixgbe_l2_tn_filter_restore(dev);
8582 	ixgbe_rss_filter_restore(dev);
8583 
8584 	return 0;
8585 }
8586 
8587 static void
8588 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
8589 {
8590 	struct ixgbe_l2_tn_info *l2_tn_info =
8591 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8592 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8593 
8594 	if (l2_tn_info->e_tag_en)
8595 		(void)ixgbe_e_tag_enable(hw);
8596 
8597 	if (l2_tn_info->e_tag_fwd_en)
8598 		(void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
8599 
8600 	(void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
8601 }
8602 
8603 /* remove all the n-tuple filters */
8604 void
8605 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
8606 {
8607 	struct ixgbe_filter_info *filter_info =
8608 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8609 	struct ixgbe_5tuple_filter *p_5tuple;
8610 
8611 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
8612 		ixgbe_remove_5tuple_filter(dev, p_5tuple);
8613 }
8614 
8615 /* remove all the ether type filters */
8616 void
8617 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
8618 {
8619 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8620 	struct ixgbe_filter_info *filter_info =
8621 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8622 	int i;
8623 
8624 	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
8625 		if (filter_info->ethertype_mask & (1 << i) &&
8626 		    !filter_info->ethertype_filters[i].conf) {
8627 			(void)ixgbe_ethertype_filter_remove(filter_info,
8628 							    (uint8_t)i);
8629 			IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
8630 			IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
8631 			IXGBE_WRITE_FLUSH(hw);
8632 		}
8633 	}
8634 }
8635 
8636 /* remove the SYN filter */
8637 void
8638 ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
8639 {
8640 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8641 	struct ixgbe_filter_info *filter_info =
8642 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8643 
8644 	if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
8645 		filter_info->syn_info = 0;
8646 
8647 		IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
8648 		IXGBE_WRITE_FLUSH(hw);
8649 	}
8650 }
8651 
8652 /* remove all the L2 tunnel filters */
8653 int
8654 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
8655 {
8656 	struct ixgbe_l2_tn_info *l2_tn_info =
8657 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
8658 	struct ixgbe_l2_tn_filter *l2_tn_filter;
8659 	struct rte_eth_l2_tunnel_conf l2_tn_conf;
8660 	int ret = 0;
8661 
8662 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
8663 		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
8664 		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
8665 		l2_tn_conf.pool           = l2_tn_filter->pool;
8666 		ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
8667 		if (ret < 0)
8668 			return ret;
8669 	}
8670 
8671 	return 0;
8672 }
8673 
8674 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
8675 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
8676 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
8677 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
8678 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
8679 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
8680 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf,
8681 			      IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>");
8682 
8683 RTE_INIT(ixgbe_init_log)
8684 {
8685 	ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
8686 	if (ixgbe_logtype_init >= 0)
8687 		rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE);
8688 	ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver");
8689 	if (ixgbe_logtype_driver >= 0)
8690 		rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE);
8691 }
8692