xref: /dpdk/drivers/net/i40e/i40e_ethdev.c (revision ff4e52ef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13 
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 #include <rte_bitmap.h>
30 #include <rte_os_shim.h>
31 
32 #include "i40e_logs.h"
33 #include "base/i40e_prototype.h"
34 #include "base/i40e_adminq_cmd.h"
35 #include "base/i40e_type.h"
36 #include "base/i40e_register.h"
37 #include "base/i40e_dcb.h"
38 #include "i40e_ethdev.h"
39 #include "i40e_rxtx.h"
40 #include "i40e_pf.h"
41 #include "i40e_regs.h"
42 #include "rte_pmd_i40e.h"
43 #include "i40e_hash.h"
44 
45 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
46 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
47 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
48 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
49 #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
50 
51 #define I40E_CLEAR_PXE_WAIT_MS     200
52 #define I40E_VSI_TSR_QINQ_STRIP		0x4010
53 #define I40E_VSI_TSR(_i)	(0x00050800 + ((_i) * 4))
54 
55 /* Maximun number of capability elements */
56 #define I40E_MAX_CAP_ELE_NUM       128
57 
58 /* Wait count and interval */
59 #define I40E_CHK_Q_ENA_COUNT       1000
60 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
61 
62 /* Maximun number of VSI */
63 #define I40E_MAX_NUM_VSIS          (384UL)
64 
65 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
66 
67 /* Flow control default timer */
68 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
69 
70 /* Flow control enable fwd bit */
71 #define I40E_PRTMAC_FWD_CTRL   0x00000001
72 
73 /* Receive Packet Buffer size */
74 #define I40E_RXPBSIZE (968 * 1024)
75 
76 /* Kilobytes shift */
77 #define I40E_KILOSHIFT 10
78 
79 /* Flow control default high water */
80 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
81 
82 /* Flow control default low water */
83 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
84 
85 /* Receive Average Packet Size in Byte*/
86 #define I40E_PACKET_AVERAGE_SIZE 128
87 
88 /* Mask of PF interrupt causes */
89 #define I40E_PFINT_ICR0_ENA_MASK ( \
90 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
91 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
92 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
93 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
94 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
95 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
96 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
97 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
98 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
99 
100 #define I40E_FLOW_TYPES ( \
101 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
102 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
104 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
105 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
106 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
107 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
109 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
110 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
111 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
112 
113 /* Additional timesync values. */
114 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
115 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
116 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
117 #define I40E_PRTTSYN_TSYNENA     0x80000000
118 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
119 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
120 
121 /**
122  * Below are values for writing un-exposed registers suggested
123  * by silicon experts
124  */
125 /* Destination MAC address */
126 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
127 /* Source MAC address */
128 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
129 /* Outer (S-Tag) VLAN tag in the outer L2 header */
130 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
131 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
132 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
133 /* Single VLAN tag in the inner L2 header */
134 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
135 /* Source IPv4 address */
136 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
137 /* Destination IPv4 address */
138 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
139 /* Source IPv4 address for X722 */
140 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
141 /* Destination IPv4 address for X722 */
142 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
143 /* IPv4 Protocol for X722 */
144 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
145 /* IPv4 Time to Live for X722 */
146 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
147 /* IPv4 Type of Service (TOS) */
148 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
149 /* IPv4 Protocol */
150 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
151 /* IPv4 Time to Live */
152 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
153 /* Source IPv6 address */
154 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
155 /* Destination IPv6 address */
156 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
157 /* IPv6 Traffic Class (TC) */
158 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
159 /* IPv6 Next Header */
160 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
161 /* IPv6 Hop Limit */
162 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
163 /* Source L4 port */
164 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
165 /* Destination L4 port */
166 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
167 /* SCTP verification tag */
168 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
169 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
170 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
171 /* Source port of tunneling UDP */
172 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
173 /* Destination port of tunneling UDP */
174 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
175 /* UDP Tunneling ID, NVGRE/GRE key */
176 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
177 /* Last ether type */
178 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
179 /* Tunneling outer destination IPv4 address */
180 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
181 /* Tunneling outer destination IPv6 address */
182 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
183 /* 1st word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
185 /* 2nd word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
187 /* 3rd word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
189 /* 4th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
191 /* 5th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
193 /* 6th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
195 /* 7th word of flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
197 /* 8th word of flex payload */
198 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
199 /* all 8 words flex payload */
200 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
201 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
202 
203 #define I40E_TRANSLATE_INSET 0
204 #define I40E_TRANSLATE_REG   1
205 
206 #define I40E_INSET_IPV4_TOS_MASK        0x0000FF00UL
207 #define I40E_INSET_IPV4_TTL_MASK        0x000000FFUL
208 #define I40E_INSET_IPV4_PROTO_MASK      0x0000FF00UL
209 #define I40E_INSET_IPV6_TC_MASK         0x0000F00FUL
210 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x0000FF00UL
211 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000000FFUL
212 
213 /* PCI offset for querying capability */
214 #define PCI_DEV_CAP_REG            0xA4
215 /* PCI offset for enabling/disabling Extended Tag */
216 #define PCI_DEV_CTRL_REG           0xA8
217 /* Bit mask of Extended Tag capability */
218 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
219 /* Bit shift of Extended Tag enable/disable */
220 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
221 /* Bit mask of Extended Tag enable/disable */
222 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
223 
224 #define I40E_GLQF_PIT_IPV4_START	2
225 #define I40E_GLQF_PIT_IPV4_COUNT	2
226 #define I40E_GLQF_PIT_IPV6_START	4
227 #define I40E_GLQF_PIT_IPV6_COUNT	2
228 
229 #define I40E_GLQF_PIT_SOURCE_OFF_GET(a)	\
230 				(((a) & I40E_GLQF_PIT_SOURCE_OFF_MASK) >> \
231 				 I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
232 
233 #define I40E_GLQF_PIT_DEST_OFF_GET(a) \
234 				(((a) & I40E_GLQF_PIT_DEST_OFF_MASK) >> \
235 				 I40E_GLQF_PIT_DEST_OFF_SHIFT)
236 
237 #define I40E_GLQF_PIT_FSIZE_GET(a)	(((a) & I40E_GLQF_PIT_FSIZE_MASK) >> \
238 					 I40E_GLQF_PIT_FSIZE_SHIFT)
239 
240 #define I40E_GLQF_PIT_BUILD(off, mask)	(((off) << 16) | (mask))
241 #define I40E_FDIR_FIELD_OFFSET(a)	((a) >> 1)
242 
243 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
244 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
245 static int i40e_dev_configure(struct rte_eth_dev *dev);
246 static int i40e_dev_start(struct rte_eth_dev *dev);
247 static int i40e_dev_stop(struct rte_eth_dev *dev);
248 static int i40e_dev_close(struct rte_eth_dev *dev);
249 static int  i40e_dev_reset(struct rte_eth_dev *dev);
250 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
251 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
252 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
253 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
254 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
255 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
256 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
257 			       struct rte_eth_stats *stats);
258 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
259 			       struct rte_eth_xstat *xstats, unsigned n);
260 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
261 				     struct rte_eth_xstat_name *xstats_names,
262 				     unsigned limit);
263 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
264 static int i40e_fw_version_get(struct rte_eth_dev *dev,
265 				char *fw_version, size_t fw_size);
266 static int i40e_dev_info_get(struct rte_eth_dev *dev,
267 			     struct rte_eth_dev_info *dev_info);
268 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
269 				uint16_t vlan_id,
270 				int on);
271 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
272 			      enum rte_vlan_type vlan_type,
273 			      uint16_t tpid);
274 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
275 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
276 				      uint16_t queue,
277 				      int on);
278 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
279 static int i40e_dev_led_on(struct rte_eth_dev *dev);
280 static int i40e_dev_led_off(struct rte_eth_dev *dev);
281 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
282 			      struct rte_eth_fc_conf *fc_conf);
283 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
284 			      struct rte_eth_fc_conf *fc_conf);
285 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
286 				       struct rte_eth_pfc_conf *pfc_conf);
287 static int i40e_macaddr_add(struct rte_eth_dev *dev,
288 			    struct rte_ether_addr *mac_addr,
289 			    uint32_t index,
290 			    uint32_t pool);
291 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
292 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
293 				    struct rte_eth_rss_reta_entry64 *reta_conf,
294 				    uint16_t reta_size);
295 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
296 				   struct rte_eth_rss_reta_entry64 *reta_conf,
297 				   uint16_t reta_size);
298 
299 static int i40e_get_cap(struct i40e_hw *hw);
300 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
301 static int i40e_pf_setup(struct i40e_pf *pf);
302 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
303 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
304 static int i40e_dcb_setup(struct rte_eth_dev *dev);
305 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
306 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
307 static void i40e_stat_update_48(struct i40e_hw *hw,
308 			       uint32_t hireg,
309 			       uint32_t loreg,
310 			       bool offset_loaded,
311 			       uint64_t *offset,
312 			       uint64_t *stat);
313 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
314 static void i40e_dev_interrupt_handler(void *param);
315 static void i40e_dev_alarm_handler(void *param);
316 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
317 				uint32_t base, uint32_t num);
318 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
319 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
320 			uint32_t base);
321 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
322 			uint16_t num);
323 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
324 static int i40e_veb_release(struct i40e_veb *veb);
325 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
326 						struct i40e_vsi *vsi);
327 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
328 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
329 					     struct i40e_macvlan_filter *mv_f,
330 					     int num,
331 					     uint16_t vlan);
332 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
333 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
334 				    struct rte_eth_rss_conf *rss_conf);
335 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
336 				      struct rte_eth_rss_conf *rss_conf);
337 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
338 					struct rte_eth_udp_tunnel *udp_tunnel);
339 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
340 					struct rte_eth_udp_tunnel *udp_tunnel);
341 static void i40e_filter_input_set_init(struct i40e_pf *pf);
342 static int i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
343 				 const struct rte_flow_ops **ops);
344 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
345 				  struct rte_eth_dcb_info *dcb_info);
346 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
347 static void i40e_configure_registers(struct i40e_hw *hw);
348 static void i40e_hw_init(struct rte_eth_dev *dev);
349 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
350 
351 static int i40e_timesync_enable(struct rte_eth_dev *dev);
352 static int i40e_timesync_disable(struct rte_eth_dev *dev);
353 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
354 					   struct timespec *timestamp,
355 					   uint32_t flags);
356 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
357 					   struct timespec *timestamp);
358 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
359 
360 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
361 
362 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
363 				   struct timespec *timestamp);
364 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
365 				    const struct timespec *timestamp);
366 
367 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
368 					 uint16_t queue_id);
369 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
370 					  uint16_t queue_id);
371 
372 static int i40e_get_regs(struct rte_eth_dev *dev,
373 			 struct rte_dev_reg_info *regs);
374 
375 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
376 
377 static int i40e_get_eeprom(struct rte_eth_dev *dev,
378 			   struct rte_dev_eeprom_info *eeprom);
379 
380 static int i40e_get_module_info(struct rte_eth_dev *dev,
381 				struct rte_eth_dev_module_info *modinfo);
382 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
383 				  struct rte_dev_eeprom_info *info);
384 
385 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
386 				      struct rte_ether_addr *mac_addr);
387 
388 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
389 
390 static int i40e_ethertype_filter_convert(
391 	const struct rte_eth_ethertype_filter *input,
392 	struct i40e_ethertype_filter *filter);
393 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
394 				   struct i40e_ethertype_filter *filter);
395 
396 static int i40e_tunnel_filter_convert(
397 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
398 	struct i40e_tunnel_filter *tunnel_filter);
399 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
400 				struct i40e_tunnel_filter *tunnel_filter);
401 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
402 
403 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
404 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
405 static void i40e_filter_restore(struct i40e_pf *pf);
406 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
407 
408 static const char *const valid_keys[] = {
409 	ETH_I40E_FLOATING_VEB_ARG,
410 	ETH_I40E_FLOATING_VEB_LIST_ARG,
411 	ETH_I40E_SUPPORT_MULTI_DRIVER,
412 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
413 	ETH_I40E_VF_MSG_CFG,
414 	NULL};
415 
416 static const struct rte_pci_id pci_id_i40e_map[] = {
417 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
418 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
419 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
420 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
421 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
422 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
423 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
424 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
425 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
426 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
427 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
428 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
429 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
430 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
431 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
432 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
433 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
434 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
435 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
436 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
437 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
438 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
439 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
440 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC) },
441 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B) },
442 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP) },
443 	{ .vendor_id = 0, /* sentinel */ },
444 };
445 
446 static const struct eth_dev_ops i40e_eth_dev_ops = {
447 	.dev_configure                = i40e_dev_configure,
448 	.dev_start                    = i40e_dev_start,
449 	.dev_stop                     = i40e_dev_stop,
450 	.dev_close                    = i40e_dev_close,
451 	.dev_reset		      = i40e_dev_reset,
452 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
453 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
454 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
455 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
456 	.dev_set_link_up              = i40e_dev_set_link_up,
457 	.dev_set_link_down            = i40e_dev_set_link_down,
458 	.link_update                  = i40e_dev_link_update,
459 	.stats_get                    = i40e_dev_stats_get,
460 	.xstats_get                   = i40e_dev_xstats_get,
461 	.xstats_get_names             = i40e_dev_xstats_get_names,
462 	.stats_reset                  = i40e_dev_stats_reset,
463 	.xstats_reset                 = i40e_dev_stats_reset,
464 	.fw_version_get               = i40e_fw_version_get,
465 	.dev_infos_get                = i40e_dev_info_get,
466 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
467 	.vlan_filter_set              = i40e_vlan_filter_set,
468 	.vlan_tpid_set                = i40e_vlan_tpid_set,
469 	.vlan_offload_set             = i40e_vlan_offload_set,
470 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
471 	.vlan_pvid_set                = i40e_vlan_pvid_set,
472 	.rx_queue_start               = i40e_dev_rx_queue_start,
473 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
474 	.tx_queue_start               = i40e_dev_tx_queue_start,
475 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
476 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
477 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
478 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
479 	.rx_queue_release             = i40e_dev_rx_queue_release,
480 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
481 	.tx_queue_release             = i40e_dev_tx_queue_release,
482 	.dev_led_on                   = i40e_dev_led_on,
483 	.dev_led_off                  = i40e_dev_led_off,
484 	.flow_ctrl_get                = i40e_flow_ctrl_get,
485 	.flow_ctrl_set                = i40e_flow_ctrl_set,
486 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
487 	.mac_addr_add                 = i40e_macaddr_add,
488 	.mac_addr_remove              = i40e_macaddr_remove,
489 	.reta_update                  = i40e_dev_rss_reta_update,
490 	.reta_query                   = i40e_dev_rss_reta_query,
491 	.rss_hash_update              = i40e_dev_rss_hash_update,
492 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
493 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
494 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
495 	.flow_ops_get                 = i40e_dev_flow_ops_get,
496 	.rxq_info_get                 = i40e_rxq_info_get,
497 	.txq_info_get                 = i40e_txq_info_get,
498 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
499 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
500 	.timesync_enable              = i40e_timesync_enable,
501 	.timesync_disable             = i40e_timesync_disable,
502 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
503 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
504 	.get_dcb_info                 = i40e_dev_get_dcb_info,
505 	.timesync_adjust_time         = i40e_timesync_adjust_time,
506 	.timesync_read_time           = i40e_timesync_read_time,
507 	.timesync_write_time          = i40e_timesync_write_time,
508 	.get_reg                      = i40e_get_regs,
509 	.get_eeprom_length            = i40e_get_eeprom_length,
510 	.get_eeprom                   = i40e_get_eeprom,
511 	.get_module_info              = i40e_get_module_info,
512 	.get_module_eeprom            = i40e_get_module_eeprom,
513 	.mac_addr_set                 = i40e_set_default_mac_addr,
514 	.mtu_set                      = i40e_dev_mtu_set,
515 	.tm_ops_get                   = i40e_tm_ops_get,
516 	.tx_done_cleanup              = i40e_tx_done_cleanup,
517 	.get_monitor_addr             = i40e_get_monitor_addr,
518 };
519 
520 /* store statistics names and its offset in stats structure */
521 struct rte_i40e_xstats_name_off {
522 	char name[RTE_ETH_XSTATS_NAME_SIZE];
523 	unsigned offset;
524 };
525 
526 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
527 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
528 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
529 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
530 	{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
531 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
532 		rx_unknown_protocol)},
533 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
534 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
535 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
536 	{"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
537 };
538 
539 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
540 		sizeof(rte_i40e_stats_strings[0]))
541 
542 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
543 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
544 		tx_dropped_link_down)},
545 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
546 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
547 		illegal_bytes)},
548 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
549 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
550 		mac_local_faults)},
551 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
552 		mac_remote_faults)},
553 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
554 		rx_length_errors)},
555 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
556 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
557 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
558 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
559 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
560 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
561 		rx_size_127)},
562 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
563 		rx_size_255)},
564 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
565 		rx_size_511)},
566 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
567 		rx_size_1023)},
568 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
569 		rx_size_1522)},
570 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
571 		rx_size_big)},
572 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
573 		rx_undersize)},
574 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
575 		rx_oversize)},
576 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
577 		mac_short_packet_dropped)},
578 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
579 		rx_fragments)},
580 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
581 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
582 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
583 		tx_size_127)},
584 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
585 		tx_size_255)},
586 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
587 		tx_size_511)},
588 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
589 		tx_size_1023)},
590 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
591 		tx_size_1522)},
592 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
593 		tx_size_big)},
594 	{"rx_flow_director_atr_match_packets",
595 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
596 	{"rx_flow_director_sb_match_packets",
597 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
598 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
599 		tx_lpi_status)},
600 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
601 		rx_lpi_status)},
602 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
603 		tx_lpi_count)},
604 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
605 		rx_lpi_count)},
606 };
607 
608 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
609 		sizeof(rte_i40e_hw_port_strings[0]))
610 
611 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
612 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
613 		priority_xon_rx)},
614 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
615 		priority_xoff_rx)},
616 };
617 
618 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
619 		sizeof(rte_i40e_rxq_prio_strings[0]))
620 
621 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
622 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
623 		priority_xon_tx)},
624 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
625 		priority_xoff_tx)},
626 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
627 		priority_xon_2_xoff)},
628 };
629 
630 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
631 		sizeof(rte_i40e_txq_prio_strings[0]))
632 
633 static int
634 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
635 	struct rte_pci_device *pci_dev)
636 {
637 	char name[RTE_ETH_NAME_MAX_LEN];
638 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
639 	int i, retval;
640 
641 	if (pci_dev->device.devargs) {
642 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
643 				&eth_da);
644 		if (retval)
645 			return retval;
646 	}
647 
648 	if (eth_da.nb_representor_ports > 0 &&
649 	    eth_da.type != RTE_ETH_REPRESENTOR_VF) {
650 		PMD_DRV_LOG(ERR, "unsupported representor type: %s\n",
651 			    pci_dev->device.devargs->args);
652 		return -ENOTSUP;
653 	}
654 
655 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
656 		sizeof(struct i40e_adapter),
657 		eth_dev_pci_specific_init, pci_dev,
658 		eth_i40e_dev_init, NULL);
659 
660 	if (retval || eth_da.nb_representor_ports < 1)
661 		return retval;
662 
663 	/* probe VF representor ports */
664 	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
665 		pci_dev->device.name);
666 
667 	if (pf_ethdev == NULL)
668 		return -ENODEV;
669 
670 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
671 		struct i40e_vf_representor representor = {
672 			.vf_id = eth_da.representor_ports[i],
673 			.switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
674 				pf_ethdev->data->dev_private)->switch_domain_id,
675 			.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
676 				pf_ethdev->data->dev_private)
677 		};
678 
679 		/* representor port net_bdf_port */
680 		snprintf(name, sizeof(name), "net_%s_representor_%d",
681 			pci_dev->device.name, eth_da.representor_ports[i]);
682 
683 		retval = rte_eth_dev_create(&pci_dev->device, name,
684 			sizeof(struct i40e_vf_representor), NULL, NULL,
685 			i40e_vf_representor_init, &representor);
686 
687 		if (retval)
688 			PMD_DRV_LOG(ERR, "failed to create i40e vf "
689 				"representor %s.", name);
690 	}
691 
692 	return 0;
693 }
694 
695 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
696 {
697 	struct rte_eth_dev *ethdev;
698 
699 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
700 	if (!ethdev)
701 		return 0;
702 
703 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
704 		return rte_eth_dev_pci_generic_remove(pci_dev,
705 					i40e_vf_representor_uninit);
706 	else
707 		return rte_eth_dev_pci_generic_remove(pci_dev,
708 						eth_i40e_dev_uninit);
709 }
710 
711 static struct rte_pci_driver rte_i40e_pmd = {
712 	.id_table = pci_id_i40e_map,
713 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
714 	.probe = eth_i40e_pci_probe,
715 	.remove = eth_i40e_pci_remove,
716 };
717 
718 static inline void
719 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
720 			 uint32_t reg_val)
721 {
722 	uint32_t ori_reg_val;
723 	struct rte_eth_dev_data *dev_data =
724 		((struct i40e_adapter *)hw->back)->pf.dev_data;
725 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
726 
727 	ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
728 	i40e_write_rx_ctl(hw, reg_addr, reg_val);
729 	if (ori_reg_val != reg_val)
730 		PMD_DRV_LOG(WARNING,
731 			    "i40e device %s changed global register [0x%08x]."
732 			    " original: 0x%08x, new: 0x%08x",
733 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
734 }
735 
736 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
737 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
738 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
739 
740 #ifndef I40E_GLQF_ORT
741 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
742 #endif
743 #ifndef I40E_GLQF_PIT
744 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
745 #endif
746 #ifndef I40E_GLQF_L3_MAP
747 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
748 #endif
749 
750 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
751 {
752 	/*
753 	 * Initialize registers for parsing packet type of QinQ
754 	 * This should be removed from code once proper
755 	 * configuration API is added to avoid configuration conflicts
756 	 * between ports of the same device.
757 	 */
758 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
759 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
760 }
761 
762 static inline void i40e_config_automask(struct i40e_pf *pf)
763 {
764 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
765 	uint32_t val;
766 
767 	/* INTENA flag is not auto-cleared for interrupt */
768 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
769 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
770 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
771 
772 	/* If support multi-driver, PF will use INT0. */
773 	if (!pf->support_multi_driver)
774 		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
775 
776 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
777 }
778 
779 static inline void i40e_clear_automask(struct i40e_pf *pf)
780 {
781 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
782 	uint32_t val;
783 
784 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
785 	val &= ~(I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
786 		 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK);
787 
788 	if (!pf->support_multi_driver)
789 		val &= ~I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
790 
791 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
792 }
793 
794 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
795 
796 /*
797  * Add a ethertype filter to drop all flow control frames transmitted
798  * from VSIs.
799 */
800 static void
801 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
802 {
803 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
804 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
805 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
806 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
807 	int ret;
808 
809 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
810 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
811 				pf->main_vsi_seid, 0,
812 				TRUE, NULL, NULL);
813 	if (ret)
814 		PMD_INIT_LOG(ERR,
815 			"Failed to add filter to drop flow control frames from VSIs.");
816 }
817 
818 static int
819 floating_veb_list_handler(__rte_unused const char *key,
820 			  const char *floating_veb_value,
821 			  void *opaque)
822 {
823 	int idx = 0;
824 	unsigned int count = 0;
825 	char *end = NULL;
826 	int min, max;
827 	bool *vf_floating_veb = opaque;
828 
829 	while (isblank(*floating_veb_value))
830 		floating_veb_value++;
831 
832 	/* Reset floating VEB configuration for VFs */
833 	for (idx = 0; idx < I40E_MAX_VF; idx++)
834 		vf_floating_veb[idx] = false;
835 
836 	min = I40E_MAX_VF;
837 	do {
838 		while (isblank(*floating_veb_value))
839 			floating_veb_value++;
840 		if (*floating_veb_value == '\0')
841 			return -1;
842 		errno = 0;
843 		idx = strtoul(floating_veb_value, &end, 10);
844 		if (errno || end == NULL)
845 			return -1;
846 		if (idx < 0)
847 			return -1;
848 		while (isblank(*end))
849 			end++;
850 		if (*end == '-') {
851 			min = idx;
852 		} else if ((*end == ';') || (*end == '\0')) {
853 			max = idx;
854 			if (min == I40E_MAX_VF)
855 				min = idx;
856 			if (max >= I40E_MAX_VF)
857 				max = I40E_MAX_VF - 1;
858 			for (idx = min; idx <= max; idx++) {
859 				vf_floating_veb[idx] = true;
860 				count++;
861 			}
862 			min = I40E_MAX_VF;
863 		} else {
864 			return -1;
865 		}
866 		floating_veb_value = end + 1;
867 	} while (*end != '\0');
868 
869 	if (count == 0)
870 		return -1;
871 
872 	return 0;
873 }
874 
875 static void
876 config_vf_floating_veb(struct rte_devargs *devargs,
877 		       uint16_t floating_veb,
878 		       bool *vf_floating_veb)
879 {
880 	struct rte_kvargs *kvlist;
881 	int i;
882 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
883 
884 	if (!floating_veb)
885 		return;
886 	/* All the VFs attach to the floating VEB by default
887 	 * when the floating VEB is enabled.
888 	 */
889 	for (i = 0; i < I40E_MAX_VF; i++)
890 		vf_floating_veb[i] = true;
891 
892 	if (devargs == NULL)
893 		return;
894 
895 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
896 	if (kvlist == NULL)
897 		return;
898 
899 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
900 		rte_kvargs_free(kvlist);
901 		return;
902 	}
903 	/* When the floating_veb_list parameter exists, all the VFs
904 	 * will attach to the legacy VEB firstly, then configure VFs
905 	 * to the floating VEB according to the floating_veb_list.
906 	 */
907 	if (rte_kvargs_process(kvlist, floating_veb_list,
908 			       floating_veb_list_handler,
909 			       vf_floating_veb) < 0) {
910 		rte_kvargs_free(kvlist);
911 		return;
912 	}
913 	rte_kvargs_free(kvlist);
914 }
915 
916 static int
917 i40e_check_floating_handler(__rte_unused const char *key,
918 			    const char *value,
919 			    __rte_unused void *opaque)
920 {
921 	if (strcmp(value, "1"))
922 		return -1;
923 
924 	return 0;
925 }
926 
927 static int
928 is_floating_veb_supported(struct rte_devargs *devargs)
929 {
930 	struct rte_kvargs *kvlist;
931 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
932 
933 	if (devargs == NULL)
934 		return 0;
935 
936 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
937 	if (kvlist == NULL)
938 		return 0;
939 
940 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
941 		rte_kvargs_free(kvlist);
942 		return 0;
943 	}
944 	/* Floating VEB is enabled when there's key-value:
945 	 * enable_floating_veb=1
946 	 */
947 	if (rte_kvargs_process(kvlist, floating_veb_key,
948 			       i40e_check_floating_handler, NULL) < 0) {
949 		rte_kvargs_free(kvlist);
950 		return 0;
951 	}
952 	rte_kvargs_free(kvlist);
953 
954 	return 1;
955 }
956 
957 static void
958 config_floating_veb(struct rte_eth_dev *dev)
959 {
960 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
961 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
962 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
963 
964 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
965 
966 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
967 		pf->floating_veb =
968 			is_floating_veb_supported(pci_dev->device.devargs);
969 		config_vf_floating_veb(pci_dev->device.devargs,
970 				       pf->floating_veb,
971 				       pf->floating_veb_list);
972 	} else {
973 		pf->floating_veb = false;
974 	}
975 }
976 
977 #define I40E_L2_TAGS_S_TAG_SHIFT 1
978 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
979 
980 static int
981 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
982 {
983 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
984 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
985 	char ethertype_hash_name[RTE_HASH_NAMESIZE];
986 	int ret;
987 
988 	struct rte_hash_parameters ethertype_hash_params = {
989 		.name = ethertype_hash_name,
990 		.entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
991 		.key_len = sizeof(struct i40e_ethertype_filter_input),
992 		.hash_func = rte_hash_crc,
993 		.hash_func_init_val = 0,
994 		.socket_id = rte_socket_id(),
995 	};
996 
997 	/* Initialize ethertype filter rule list and hash */
998 	TAILQ_INIT(&ethertype_rule->ethertype_list);
999 	snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
1000 		 "ethertype_%s", dev->device->name);
1001 	ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
1002 	if (!ethertype_rule->hash_table) {
1003 		PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
1004 		return -EINVAL;
1005 	}
1006 	ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
1007 				       sizeof(struct i40e_ethertype_filter *) *
1008 				       I40E_MAX_ETHERTYPE_FILTER_NUM,
1009 				       0);
1010 	if (!ethertype_rule->hash_map) {
1011 		PMD_INIT_LOG(ERR,
1012 			     "Failed to allocate memory for ethertype hash map!");
1013 		ret = -ENOMEM;
1014 		goto err_ethertype_hash_map_alloc;
1015 	}
1016 
1017 	return 0;
1018 
1019 err_ethertype_hash_map_alloc:
1020 	rte_hash_free(ethertype_rule->hash_table);
1021 
1022 	return ret;
1023 }
1024 
1025 static int
1026 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1027 {
1028 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1029 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1030 	char tunnel_hash_name[RTE_HASH_NAMESIZE];
1031 	int ret;
1032 
1033 	struct rte_hash_parameters tunnel_hash_params = {
1034 		.name = tunnel_hash_name,
1035 		.entries = I40E_MAX_TUNNEL_FILTER_NUM,
1036 		.key_len = sizeof(struct i40e_tunnel_filter_input),
1037 		.hash_func = rte_hash_crc,
1038 		.hash_func_init_val = 0,
1039 		.socket_id = rte_socket_id(),
1040 	};
1041 
1042 	/* Initialize tunnel filter rule list and hash */
1043 	TAILQ_INIT(&tunnel_rule->tunnel_list);
1044 	snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1045 		 "tunnel_%s", dev->device->name);
1046 	tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1047 	if (!tunnel_rule->hash_table) {
1048 		PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1049 		return -EINVAL;
1050 	}
1051 	tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1052 				    sizeof(struct i40e_tunnel_filter *) *
1053 				    I40E_MAX_TUNNEL_FILTER_NUM,
1054 				    0);
1055 	if (!tunnel_rule->hash_map) {
1056 		PMD_INIT_LOG(ERR,
1057 			     "Failed to allocate memory for tunnel hash map!");
1058 		ret = -ENOMEM;
1059 		goto err_tunnel_hash_map_alloc;
1060 	}
1061 
1062 	return 0;
1063 
1064 err_tunnel_hash_map_alloc:
1065 	rte_hash_free(tunnel_rule->hash_table);
1066 
1067 	return ret;
1068 }
1069 
1070 static int
1071 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1072 {
1073 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1074 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1075 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1076 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1077 	uint32_t alloc = hw->func_caps.fd_filters_guaranteed;
1078 	uint32_t best = hw->func_caps.fd_filters_best_effort;
1079 	enum i40e_filter_pctype pctype;
1080 	struct rte_bitmap *bmp = NULL;
1081 	uint32_t bmp_size;
1082 	void *mem = NULL;
1083 	uint32_t i = 0;
1084 	int ret;
1085 
1086 	struct rte_hash_parameters fdir_hash_params = {
1087 		.name = fdir_hash_name,
1088 		.entries = I40E_MAX_FDIR_FILTER_NUM,
1089 		.key_len = sizeof(struct i40e_fdir_input),
1090 		.hash_func = rte_hash_crc,
1091 		.hash_func_init_val = 0,
1092 		.socket_id = rte_socket_id(),
1093 	};
1094 
1095 	/* Initialize flow director filter rule list and hash */
1096 	TAILQ_INIT(&fdir_info->fdir_list);
1097 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1098 		 "fdir_%s", dev->device->name);
1099 	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1100 	if (!fdir_info->hash_table) {
1101 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1102 		return -EINVAL;
1103 	}
1104 
1105 	fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1106 					  sizeof(struct i40e_fdir_filter *) *
1107 					  I40E_MAX_FDIR_FILTER_NUM,
1108 					  0);
1109 	if (!fdir_info->hash_map) {
1110 		PMD_INIT_LOG(ERR,
1111 			     "Failed to allocate memory for fdir hash map!");
1112 		ret = -ENOMEM;
1113 		goto err_fdir_hash_map_alloc;
1114 	}
1115 
1116 	fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter",
1117 			sizeof(struct i40e_fdir_filter) *
1118 			I40E_MAX_FDIR_FILTER_NUM,
1119 			0);
1120 
1121 	if (!fdir_info->fdir_filter_array) {
1122 		PMD_INIT_LOG(ERR,
1123 			     "Failed to allocate memory for fdir filter array!");
1124 		ret = -ENOMEM;
1125 		goto err_fdir_filter_array_alloc;
1126 	}
1127 
1128 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1129 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
1130 		pf->fdir.flow_count[pctype] = 0;
1131 
1132 	fdir_info->fdir_space_size = alloc + best;
1133 	fdir_info->fdir_actual_cnt = 0;
1134 	fdir_info->fdir_guarantee_total_space = alloc;
1135 	fdir_info->fdir_guarantee_free_space =
1136 		fdir_info->fdir_guarantee_total_space;
1137 
1138 	PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best);
1139 
1140 	fdir_info->fdir_flow_pool.pool =
1141 			rte_zmalloc("i40e_fdir_entry",
1142 				sizeof(struct i40e_fdir_entry) *
1143 				fdir_info->fdir_space_size,
1144 				0);
1145 
1146 	if (!fdir_info->fdir_flow_pool.pool) {
1147 		PMD_INIT_LOG(ERR,
1148 			     "Failed to allocate memory for bitmap flow!");
1149 		ret = -ENOMEM;
1150 		goto err_fdir_bitmap_flow_alloc;
1151 	}
1152 
1153 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1154 		fdir_info->fdir_flow_pool.pool[i].idx = i;
1155 
1156 	bmp_size =
1157 		rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size);
1158 	mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
1159 	if (mem == NULL) {
1160 		PMD_INIT_LOG(ERR,
1161 			     "Failed to allocate memory for fdir bitmap!");
1162 		ret = -ENOMEM;
1163 		goto err_fdir_mem_alloc;
1164 	}
1165 	bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size);
1166 	if (bmp == NULL) {
1167 		PMD_INIT_LOG(ERR,
1168 			     "Failed to initialization fdir bitmap!");
1169 		ret = -ENOMEM;
1170 		goto err_fdir_bmp_alloc;
1171 	}
1172 	for (i = 0; i < fdir_info->fdir_space_size; i++)
1173 		rte_bitmap_set(bmp, i);
1174 
1175 	fdir_info->fdir_flow_pool.bitmap = bmp;
1176 
1177 	return 0;
1178 
1179 err_fdir_bmp_alloc:
1180 	rte_free(mem);
1181 err_fdir_mem_alloc:
1182 	rte_free(fdir_info->fdir_flow_pool.pool);
1183 err_fdir_bitmap_flow_alloc:
1184 	rte_free(fdir_info->fdir_filter_array);
1185 err_fdir_filter_array_alloc:
1186 	rte_free(fdir_info->hash_map);
1187 err_fdir_hash_map_alloc:
1188 	rte_hash_free(fdir_info->hash_table);
1189 
1190 	return ret;
1191 }
1192 
1193 static void
1194 i40e_init_customized_info(struct i40e_pf *pf)
1195 {
1196 	int i;
1197 
1198 	/* Initialize customized pctype */
1199 	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1200 		pf->customized_pctype[i].index = i;
1201 		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1202 		pf->customized_pctype[i].valid = false;
1203 	}
1204 
1205 	pf->gtp_support = false;
1206 	pf->esp_support = false;
1207 }
1208 
1209 static void
1210 i40e_init_filter_invalidation(struct i40e_pf *pf)
1211 {
1212 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1213 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1214 	uint32_t glqf_ctl_reg = 0;
1215 
1216 	glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
1217 	if (!pf->support_multi_driver) {
1218 		fdir_info->fdir_invalprio = 1;
1219 		glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK;
1220 		PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first");
1221 		i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg);
1222 	} else {
1223 		if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) {
1224 			fdir_info->fdir_invalprio = 1;
1225 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first");
1226 		} else {
1227 			fdir_info->fdir_invalprio = 0;
1228 			PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first");
1229 		}
1230 	}
1231 }
1232 
1233 void
1234 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1235 {
1236 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1237 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1238 	struct i40e_queue_regions *info = &pf->queue_region;
1239 	uint16_t i;
1240 
1241 	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1242 		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1243 
1244 	memset(info, 0, sizeof(struct i40e_queue_regions));
1245 }
1246 
1247 static int
1248 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1249 			       const char *value,
1250 			       void *opaque)
1251 {
1252 	struct i40e_pf *pf;
1253 	unsigned long support_multi_driver;
1254 	char *end;
1255 
1256 	pf = (struct i40e_pf *)opaque;
1257 
1258 	errno = 0;
1259 	support_multi_driver = strtoul(value, &end, 10);
1260 	if (errno != 0 || end == value || *end != 0) {
1261 		PMD_DRV_LOG(WARNING, "Wrong global configuration");
1262 		return -(EINVAL);
1263 	}
1264 
1265 	if (support_multi_driver == 1 || support_multi_driver == 0)
1266 		pf->support_multi_driver = (bool)support_multi_driver;
1267 	else
1268 		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1269 			    "enable global configuration by default."
1270 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1271 	return 0;
1272 }
1273 
1274 static int
1275 i40e_support_multi_driver(struct rte_eth_dev *dev)
1276 {
1277 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1278 	struct rte_kvargs *kvlist;
1279 	int kvargs_count;
1280 
1281 	/* Enable global configuration by default */
1282 	pf->support_multi_driver = false;
1283 
1284 	if (!dev->device->devargs)
1285 		return 0;
1286 
1287 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1288 	if (!kvlist)
1289 		return -EINVAL;
1290 
1291 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1292 	if (!kvargs_count) {
1293 		rte_kvargs_free(kvlist);
1294 		return 0;
1295 	}
1296 
1297 	if (kvargs_count > 1)
1298 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1299 			    "the first invalid or last valid one is used !",
1300 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1301 
1302 	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1303 			       i40e_parse_multi_drv_handler, pf) < 0) {
1304 		rte_kvargs_free(kvlist);
1305 		return -EINVAL;
1306 	}
1307 
1308 	rte_kvargs_free(kvlist);
1309 	return 0;
1310 }
1311 
1312 static int
1313 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1314 				    uint32_t reg_addr, uint64_t reg_val,
1315 				    struct i40e_asq_cmd_details *cmd_details)
1316 {
1317 	uint64_t ori_reg_val;
1318 	struct rte_eth_dev_data *dev_data =
1319 		((struct i40e_adapter *)hw->back)->pf.dev_data;
1320 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
1321 	int ret;
1322 
1323 	ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1324 	if (ret != I40E_SUCCESS) {
1325 		PMD_DRV_LOG(ERR,
1326 			    "Fail to debug read from 0x%08x",
1327 			    reg_addr);
1328 		return -EIO;
1329 	}
1330 
1331 	if (ori_reg_val != reg_val)
1332 		PMD_DRV_LOG(WARNING,
1333 			    "i40e device %s changed global register [0x%08x]."
1334 			    " original: 0x%"PRIx64", after: 0x%"PRIx64,
1335 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
1336 
1337 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1338 }
1339 
1340 static int
1341 read_vf_msg_config(__rte_unused const char *key,
1342 			       const char *value,
1343 			       void *opaque)
1344 {
1345 	struct i40e_vf_msg_cfg *cfg = opaque;
1346 
1347 	if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1348 			&cfg->ignore_second) != 3) {
1349 		memset(cfg, 0, sizeof(*cfg));
1350 		PMD_DRV_LOG(ERR, "format error! example: "
1351 				"%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1352 		return -EINVAL;
1353 	}
1354 
1355 	/*
1356 	 * If the message validation function been enabled, the 'period'
1357 	 * and 'ignore_second' must greater than 0.
1358 	 */
1359 	if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1360 		memset(cfg, 0, sizeof(*cfg));
1361 		PMD_DRV_LOG(ERR, "%s error! the second and third"
1362 				" number must be greater than 0!",
1363 				ETH_I40E_VF_MSG_CFG);
1364 		return -EINVAL;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 static int
1371 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1372 		struct i40e_vf_msg_cfg *msg_cfg)
1373 {
1374 	struct rte_kvargs *kvlist;
1375 	int kvargs_count;
1376 	int ret = 0;
1377 
1378 	memset(msg_cfg, 0, sizeof(*msg_cfg));
1379 
1380 	if (!dev->device->devargs)
1381 		return ret;
1382 
1383 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1384 	if (!kvlist)
1385 		return -EINVAL;
1386 
1387 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1388 	if (!kvargs_count)
1389 		goto free_end;
1390 
1391 	if (kvargs_count > 1) {
1392 		PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1393 				ETH_I40E_VF_MSG_CFG);
1394 		ret = -EINVAL;
1395 		goto free_end;
1396 	}
1397 
1398 	if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1399 			read_vf_msg_config, msg_cfg) < 0)
1400 		ret = -EINVAL;
1401 
1402 free_end:
1403 	rte_kvargs_free(kvlist);
1404 	return ret;
1405 }
1406 
1407 #define I40E_ALARM_INTERVAL 50000 /* us */
1408 
1409 static int
1410 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1411 {
1412 	struct rte_pci_device *pci_dev;
1413 	struct rte_intr_handle *intr_handle;
1414 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1415 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1416 	struct i40e_vsi *vsi;
1417 	int ret;
1418 	uint32_t len, val;
1419 	uint8_t aq_fail = 0;
1420 
1421 	PMD_INIT_FUNC_TRACE();
1422 
1423 	dev->dev_ops = &i40e_eth_dev_ops;
1424 	dev->rx_queue_count = i40e_dev_rx_queue_count;
1425 	dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
1426 	dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
1427 	dev->rx_pkt_burst = i40e_recv_pkts;
1428 	dev->tx_pkt_burst = i40e_xmit_pkts;
1429 	dev->tx_pkt_prepare = i40e_prep_pkts;
1430 
1431 	/* for secondary processes, we don't initialise any further as primary
1432 	 * has already done this work. Only check we don't need a different
1433 	 * RX function */
1434 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1435 		i40e_set_rx_function(dev);
1436 		i40e_set_tx_function(dev);
1437 		return 0;
1438 	}
1439 	i40e_set_default_ptype_table(dev);
1440 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1441 	intr_handle = &pci_dev->intr_handle;
1442 
1443 	rte_eth_copy_pci_info(dev, pci_dev);
1444 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1445 
1446 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1447 	pf->dev_data = dev->data;
1448 
1449 	hw->back = I40E_PF_TO_ADAPTER(pf);
1450 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1451 	if (!hw->hw_addr) {
1452 		PMD_INIT_LOG(ERR,
1453 			"Hardware is not available, as address is NULL");
1454 		return -ENODEV;
1455 	}
1456 
1457 	hw->vendor_id = pci_dev->id.vendor_id;
1458 	hw->device_id = pci_dev->id.device_id;
1459 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1460 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1461 	hw->bus.device = pci_dev->addr.devid;
1462 	hw->bus.func = pci_dev->addr.function;
1463 	hw->adapter_stopped = 0;
1464 	hw->adapter_closed = 0;
1465 
1466 	/* Init switch device pointer */
1467 	hw->switch_dev = NULL;
1468 
1469 	/*
1470 	 * Switch Tag value should not be identical to either the First Tag
1471 	 * or Second Tag values. So set something other than common Ethertype
1472 	 * for internal switching.
1473 	 */
1474 	hw->switch_tag = 0xffff;
1475 
1476 	val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1477 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1478 		PMD_INIT_LOG(ERR, "\nERROR: "
1479 			"Firmware recovery mode detected. Limiting functionality.\n"
1480 			"Refer to the Intel(R) Ethernet Adapters and Devices "
1481 			"User Guide for details on firmware recovery mode.");
1482 		return -EIO;
1483 	}
1484 
1485 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1486 	/* Check if need to support multi-driver */
1487 	i40e_support_multi_driver(dev);
1488 
1489 	/* Make sure all is clean before doing PF reset */
1490 	i40e_clear_hw(hw);
1491 
1492 	/* Reset here to make sure all is clean for each PF */
1493 	ret = i40e_pf_reset(hw);
1494 	if (ret) {
1495 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1496 		return ret;
1497 	}
1498 
1499 	/* Initialize the shared code (base driver) */
1500 	ret = i40e_init_shared_code(hw);
1501 	if (ret) {
1502 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1503 		return ret;
1504 	}
1505 
1506 	/* Initialize the parameters for adminq */
1507 	i40e_init_adminq_parameter(hw);
1508 	ret = i40e_init_adminq(hw);
1509 	if (ret != I40E_SUCCESS) {
1510 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1511 		return -EIO;
1512 	}
1513 	/* Firmware of SFP x722 does not support 802.1ad frames ability */
1514 	if (hw->device_id == I40E_DEV_ID_SFP_X722 ||
1515 		hw->device_id == I40E_DEV_ID_SFP_I_X722)
1516 		hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1517 
1518 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1519 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1520 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1521 		     ((hw->nvm.version >> 12) & 0xf),
1522 		     ((hw->nvm.version >> 4) & 0xff),
1523 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1524 
1525 	/* Initialize the hardware */
1526 	i40e_hw_init(dev);
1527 
1528 	i40e_config_automask(pf);
1529 
1530 	i40e_set_default_pctype_table(dev);
1531 
1532 	/*
1533 	 * To work around the NVM issue, initialize registers
1534 	 * for packet type of QinQ by software.
1535 	 * It should be removed once issues are fixed in NVM.
1536 	 */
1537 	if (!pf->support_multi_driver)
1538 		i40e_GLQF_reg_init(hw);
1539 
1540 	/* Initialize the input set for filters (hash and fd) to default value */
1541 	i40e_filter_input_set_init(pf);
1542 
1543 	/* initialise the L3_MAP register */
1544 	if (!pf->support_multi_driver) {
1545 		ret = i40e_aq_debug_write_global_register(hw,
1546 						   I40E_GLQF_L3_MAP(40),
1547 						   0x00000028,	NULL);
1548 		if (ret)
1549 			PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1550 				     ret);
1551 		PMD_INIT_LOG(DEBUG,
1552 			     "Global register 0x%08x is changed with 0x28",
1553 			     I40E_GLQF_L3_MAP(40));
1554 	}
1555 
1556 	/* Need the special FW version to support floating VEB */
1557 	config_floating_veb(dev);
1558 	/* Clear PXE mode */
1559 	i40e_clear_pxe_mode(hw);
1560 	i40e_dev_sync_phy_type(hw);
1561 
1562 	/*
1563 	 * On X710, performance number is far from the expectation on recent
1564 	 * firmware versions. The fix for this issue may not be integrated in
1565 	 * the following firmware version. So the workaround in software driver
1566 	 * is needed. It needs to modify the initial values of 3 internal only
1567 	 * registers. Note that the workaround can be removed when it is fixed
1568 	 * in firmware in the future.
1569 	 */
1570 	i40e_configure_registers(hw);
1571 
1572 	/* Get hw capabilities */
1573 	ret = i40e_get_cap(hw);
1574 	if (ret != I40E_SUCCESS) {
1575 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1576 		goto err_get_capabilities;
1577 	}
1578 
1579 	/* Initialize parameters for PF */
1580 	ret = i40e_pf_parameter_init(dev);
1581 	if (ret != 0) {
1582 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1583 		goto err_parameter_init;
1584 	}
1585 
1586 	/* Initialize the queue management */
1587 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1588 	if (ret < 0) {
1589 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1590 		goto err_qp_pool_init;
1591 	}
1592 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1593 				hw->func_caps.num_msix_vectors - 1);
1594 	if (ret < 0) {
1595 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1596 		goto err_msix_pool_init;
1597 	}
1598 
1599 	/* Initialize lan hmc */
1600 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1601 				hw->func_caps.num_rx_qp, 0, 0);
1602 	if (ret != I40E_SUCCESS) {
1603 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1604 		goto err_init_lan_hmc;
1605 	}
1606 
1607 	/* Configure lan hmc */
1608 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1609 	if (ret != I40E_SUCCESS) {
1610 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1611 		goto err_configure_lan_hmc;
1612 	}
1613 
1614 	/* Get and check the mac address */
1615 	i40e_get_mac_addr(hw, hw->mac.addr);
1616 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1617 		PMD_INIT_LOG(ERR, "mac address is not valid");
1618 		ret = -EIO;
1619 		goto err_get_mac_addr;
1620 	}
1621 	/* Copy the permanent MAC address */
1622 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1623 			(struct rte_ether_addr *)hw->mac.perm_addr);
1624 
1625 	/* Disable flow control */
1626 	hw->fc.requested_mode = I40E_FC_NONE;
1627 	i40e_set_fc(hw, &aq_fail, TRUE);
1628 
1629 	/* Set the global registers with default ether type value */
1630 	if (!pf->support_multi_driver) {
1631 		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1632 					 RTE_ETHER_TYPE_VLAN);
1633 		if (ret != I40E_SUCCESS) {
1634 			PMD_INIT_LOG(ERR,
1635 				     "Failed to set the default outer "
1636 				     "VLAN ether type");
1637 			goto err_setup_pf_switch;
1638 		}
1639 	}
1640 
1641 	/* PF setup, which includes VSI setup */
1642 	ret = i40e_pf_setup(pf);
1643 	if (ret) {
1644 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1645 		goto err_setup_pf_switch;
1646 	}
1647 
1648 	vsi = pf->main_vsi;
1649 
1650 	/* Disable double vlan by default */
1651 	i40e_vsi_config_double_vlan(vsi, FALSE);
1652 
1653 	/* Disable S-TAG identification when floating_veb is disabled */
1654 	if (!pf->floating_veb) {
1655 		ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1656 		if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1657 			ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1658 			I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1659 		}
1660 	}
1661 
1662 	if (!vsi->max_macaddrs)
1663 		len = RTE_ETHER_ADDR_LEN;
1664 	else
1665 		len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1666 
1667 	/* Should be after VSI initialized */
1668 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1669 	if (!dev->data->mac_addrs) {
1670 		PMD_INIT_LOG(ERR,
1671 			"Failed to allocated memory for storing mac address");
1672 		goto err_mac_alloc;
1673 	}
1674 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1675 					&dev->data->mac_addrs[0]);
1676 
1677 	/* Init dcb to sw mode by default */
1678 	ret = i40e_dcb_init_configure(dev, TRUE);
1679 	if (ret != I40E_SUCCESS) {
1680 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
1681 		pf->flags &= ~I40E_FLAG_DCB;
1682 	}
1683 	/* Update HW struct after DCB configuration */
1684 	i40e_get_cap(hw);
1685 
1686 	/* initialize pf host driver to setup SRIOV resource if applicable */
1687 	i40e_pf_host_init(dev);
1688 
1689 	/* register callback func to eal lib */
1690 	rte_intr_callback_register(intr_handle,
1691 				   i40e_dev_interrupt_handler, dev);
1692 
1693 	/* configure and enable device interrupt */
1694 	i40e_pf_config_irq0(hw, TRUE);
1695 	i40e_pf_enable_irq0(hw);
1696 
1697 	/* enable uio intr after callback register */
1698 	rte_intr_enable(intr_handle);
1699 
1700 	/* By default disable flexible payload in global configuration */
1701 	if (!pf->support_multi_driver)
1702 		i40e_flex_payload_reg_set_default(hw);
1703 
1704 	/*
1705 	 * Add an ethertype filter to drop all flow control frames transmitted
1706 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1707 	 * frames to wire.
1708 	 */
1709 	i40e_add_tx_flow_control_drop_filter(pf);
1710 
1711 	/* Set the max frame size to 0x2600 by default,
1712 	 * in case other drivers changed the default value.
1713 	 */
1714 	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
1715 
1716 	/* initialize RSS rule list */
1717 	TAILQ_INIT(&pf->rss_config_list);
1718 
1719 	/* initialize Traffic Manager configuration */
1720 	i40e_tm_conf_init(dev);
1721 
1722 	/* Initialize customized information */
1723 	i40e_init_customized_info(pf);
1724 
1725 	/* Initialize the filter invalidation configuration */
1726 	i40e_init_filter_invalidation(pf);
1727 
1728 	ret = i40e_init_ethtype_filter_list(dev);
1729 	if (ret < 0)
1730 		goto err_init_ethtype_filter_list;
1731 	ret = i40e_init_tunnel_filter_list(dev);
1732 	if (ret < 0)
1733 		goto err_init_tunnel_filter_list;
1734 	ret = i40e_init_fdir_filter_list(dev);
1735 	if (ret < 0)
1736 		goto err_init_fdir_filter_list;
1737 
1738 	/* initialize queue region configuration */
1739 	i40e_init_queue_region_conf(dev);
1740 
1741 	/* reset all stats of the device, including pf and main vsi */
1742 	i40e_dev_stats_reset(dev);
1743 
1744 	return 0;
1745 
1746 err_init_fdir_filter_list:
1747 	rte_hash_free(pf->tunnel.hash_table);
1748 	rte_free(pf->tunnel.hash_map);
1749 err_init_tunnel_filter_list:
1750 	rte_hash_free(pf->ethertype.hash_table);
1751 	rte_free(pf->ethertype.hash_map);
1752 err_init_ethtype_filter_list:
1753 	rte_intr_callback_unregister(intr_handle,
1754 		i40e_dev_interrupt_handler, dev);
1755 	rte_free(dev->data->mac_addrs);
1756 	dev->data->mac_addrs = NULL;
1757 err_mac_alloc:
1758 	i40e_vsi_release(pf->main_vsi);
1759 err_setup_pf_switch:
1760 err_get_mac_addr:
1761 err_configure_lan_hmc:
1762 	(void)i40e_shutdown_lan_hmc(hw);
1763 err_init_lan_hmc:
1764 	i40e_res_pool_destroy(&pf->msix_pool);
1765 err_msix_pool_init:
1766 	i40e_res_pool_destroy(&pf->qp_pool);
1767 err_qp_pool_init:
1768 err_parameter_init:
1769 err_get_capabilities:
1770 	(void)i40e_shutdown_adminq(hw);
1771 
1772 	return ret;
1773 }
1774 
1775 static void
1776 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1777 {
1778 	struct i40e_ethertype_filter *p_ethertype;
1779 	struct i40e_ethertype_rule *ethertype_rule;
1780 
1781 	ethertype_rule = &pf->ethertype;
1782 	/* Remove all ethertype filter rules and hash */
1783 	if (ethertype_rule->hash_map)
1784 		rte_free(ethertype_rule->hash_map);
1785 	if (ethertype_rule->hash_table)
1786 		rte_hash_free(ethertype_rule->hash_table);
1787 
1788 	while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1789 		TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1790 			     p_ethertype, rules);
1791 		rte_free(p_ethertype);
1792 	}
1793 }
1794 
1795 static void
1796 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1797 {
1798 	struct i40e_tunnel_filter *p_tunnel;
1799 	struct i40e_tunnel_rule *tunnel_rule;
1800 
1801 	tunnel_rule = &pf->tunnel;
1802 	/* Remove all tunnel director rules and hash */
1803 	if (tunnel_rule->hash_map)
1804 		rte_free(tunnel_rule->hash_map);
1805 	if (tunnel_rule->hash_table)
1806 		rte_hash_free(tunnel_rule->hash_table);
1807 
1808 	while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1809 		TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1810 		rte_free(p_tunnel);
1811 	}
1812 }
1813 
1814 static void
1815 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1816 {
1817 	struct i40e_fdir_filter *p_fdir;
1818 	struct i40e_fdir_info *fdir_info;
1819 
1820 	fdir_info = &pf->fdir;
1821 
1822 	/* Remove all flow director rules */
1823 	while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list)))
1824 		TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1825 }
1826 
1827 static void
1828 i40e_fdir_memory_cleanup(struct i40e_pf *pf)
1829 {
1830 	struct i40e_fdir_info *fdir_info;
1831 
1832 	fdir_info = &pf->fdir;
1833 
1834 	/* flow director memory cleanup */
1835 	if (fdir_info->hash_map)
1836 		rte_free(fdir_info->hash_map);
1837 	if (fdir_info->hash_table)
1838 		rte_hash_free(fdir_info->hash_table);
1839 	if (fdir_info->fdir_flow_pool.bitmap)
1840 		rte_free(fdir_info->fdir_flow_pool.bitmap);
1841 	if (fdir_info->fdir_flow_pool.pool)
1842 		rte_free(fdir_info->fdir_flow_pool.pool);
1843 	if (fdir_info->fdir_filter_array)
1844 		rte_free(fdir_info->fdir_filter_array);
1845 }
1846 
1847 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1848 {
1849 	/*
1850 	 * Disable by default flexible payload
1851 	 * for corresponding L2/L3/L4 layers.
1852 	 */
1853 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1854 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1855 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1856 }
1857 
1858 static int
1859 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1860 {
1861 	struct i40e_hw *hw;
1862 
1863 	PMD_INIT_FUNC_TRACE();
1864 
1865 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1866 		return 0;
1867 
1868 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1869 
1870 	if (hw->adapter_closed == 0)
1871 		i40e_dev_close(dev);
1872 
1873 	return 0;
1874 }
1875 
1876 static int
1877 i40e_dev_configure(struct rte_eth_dev *dev)
1878 {
1879 	struct i40e_adapter *ad =
1880 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1881 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1882 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1883 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1884 	int i, ret;
1885 
1886 	ret = i40e_dev_sync_phy_type(hw);
1887 	if (ret)
1888 		return ret;
1889 
1890 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1891 	 * bulk allocation or vector Rx preconditions we will reset it.
1892 	 */
1893 	ad->rx_bulk_alloc_allowed = true;
1894 	ad->rx_vec_allowed = true;
1895 	ad->tx_simple_allowed = true;
1896 	ad->tx_vec_allowed = true;
1897 
1898 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1899 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1900 
1901 	/* Only legacy filter API needs the following fdir config. So when the
1902 	 * legacy filter API is deprecated, the following codes should also be
1903 	 * removed.
1904 	 */
1905 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1906 		ret = i40e_fdir_setup(pf);
1907 		if (ret != I40E_SUCCESS) {
1908 			PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1909 			return -ENOTSUP;
1910 		}
1911 		ret = i40e_fdir_configure(dev);
1912 		if (ret < 0) {
1913 			PMD_DRV_LOG(ERR, "failed to configure fdir.");
1914 			goto err;
1915 		}
1916 	} else
1917 		i40e_fdir_teardown(pf);
1918 
1919 	ret = i40e_dev_init_vlan(dev);
1920 	if (ret < 0)
1921 		goto err;
1922 
1923 	/* VMDQ setup.
1924 	 *  General PMD driver call sequence are NIC init, configure,
1925 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1926 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
1927 	 *  applicable. So, VMDQ setting has to be done before
1928 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1929 	 *  For RSS setting, it will try to calculate actual configured RX queue
1930 	 *  number, which will be available after rx_queue_setup(). dev_start()
1931 	 *  function is good to place RSS setup.
1932 	 */
1933 	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1934 		ret = i40e_vmdq_setup(dev);
1935 		if (ret)
1936 			goto err;
1937 	}
1938 
1939 	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1940 		ret = i40e_dcb_setup(dev);
1941 		if (ret) {
1942 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
1943 			goto err_dcb;
1944 		}
1945 	}
1946 
1947 	TAILQ_INIT(&pf->flow_list);
1948 
1949 	return 0;
1950 
1951 err_dcb:
1952 	/* need to release vmdq resource if exists */
1953 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1954 		i40e_vsi_release(pf->vmdq[i].vsi);
1955 		pf->vmdq[i].vsi = NULL;
1956 	}
1957 	rte_free(pf->vmdq);
1958 	pf->vmdq = NULL;
1959 err:
1960 	/* Need to release fdir resource if exists.
1961 	 * Only legacy filter API needs the following fdir config. So when the
1962 	 * legacy filter API is deprecated, the following code should also be
1963 	 * removed.
1964 	 */
1965 	i40e_fdir_teardown(pf);
1966 	return ret;
1967 }
1968 
1969 void
1970 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1971 {
1972 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
1973 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1974 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1975 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1976 	uint16_t msix_vect = vsi->msix_intr;
1977 	uint16_t i;
1978 
1979 	for (i = 0; i < vsi->nb_qps; i++) {
1980 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1981 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1982 		rte_wmb();
1983 	}
1984 
1985 	if (vsi->type != I40E_VSI_SRIOV) {
1986 		if (!rte_intr_allow_others(intr_handle)) {
1987 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1988 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1989 			I40E_WRITE_REG(hw,
1990 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1991 				       0);
1992 		} else {
1993 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1994 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1995 			I40E_WRITE_REG(hw,
1996 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1997 						       msix_vect - 1), 0);
1998 		}
1999 	} else {
2000 		uint32_t reg;
2001 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2002 			vsi->user_param + (msix_vect - 1);
2003 
2004 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2005 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
2006 	}
2007 	I40E_WRITE_FLUSH(hw);
2008 }
2009 
2010 static void
2011 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
2012 		       int base_queue, int nb_queue,
2013 		       uint16_t itr_idx)
2014 {
2015 	int i;
2016 	uint32_t val;
2017 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2018 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2019 
2020 	/* Bind all RX queues to allocated MSIX interrupt */
2021 	for (i = 0; i < nb_queue; i++) {
2022 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2023 			itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
2024 			((base_queue + i + 1) <<
2025 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2026 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2027 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2028 
2029 		if (i == nb_queue - 1)
2030 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
2031 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
2032 	}
2033 
2034 	/* Write first RX queue to Link list register as the head element */
2035 	if (vsi->type != I40E_VSI_SRIOV) {
2036 		uint16_t interval =
2037 			i40e_calc_itr_interval(1, pf->support_multi_driver);
2038 
2039 		if (msix_vect == I40E_MISC_VEC_ID) {
2040 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
2041 				       (base_queue <<
2042 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2043 				       (0x0 <<
2044 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2045 			I40E_WRITE_REG(hw,
2046 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
2047 				       interval);
2048 		} else {
2049 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
2050 				       (base_queue <<
2051 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2052 				       (0x0 <<
2053 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2054 			I40E_WRITE_REG(hw,
2055 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
2056 						       msix_vect - 1),
2057 				       interval);
2058 		}
2059 	} else {
2060 		uint32_t reg;
2061 
2062 		if (msix_vect == I40E_MISC_VEC_ID) {
2063 			I40E_WRITE_REG(hw,
2064 				       I40E_VPINT_LNKLST0(vsi->user_param),
2065 				       (base_queue <<
2066 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2067 				       (0x0 <<
2068 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
2069 		} else {
2070 			/* num_msix_vectors_vf needs to minus irq0 */
2071 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
2072 				vsi->user_param + (msix_vect - 1);
2073 
2074 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
2075 				       (base_queue <<
2076 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2077 				       (0x0 <<
2078 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2079 		}
2080 	}
2081 
2082 	I40E_WRITE_FLUSH(hw);
2083 }
2084 
2085 int
2086 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2087 {
2088 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2089 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2090 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2091 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2092 	uint16_t msix_vect = vsi->msix_intr;
2093 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2094 	uint16_t queue_idx = 0;
2095 	int record = 0;
2096 	int i;
2097 
2098 	for (i = 0; i < vsi->nb_qps; i++) {
2099 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2100 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2101 	}
2102 
2103 	/* VF bind interrupt */
2104 	if (vsi->type == I40E_VSI_SRIOV) {
2105 		if (vsi->nb_msix == 0) {
2106 			PMD_DRV_LOG(ERR, "No msix resource");
2107 			return -EINVAL;
2108 		}
2109 		__vsi_queues_bind_intr(vsi, msix_vect,
2110 				       vsi->base_queue, vsi->nb_qps,
2111 				       itr_idx);
2112 		return 0;
2113 	}
2114 
2115 	/* PF & VMDq bind interrupt */
2116 	if (rte_intr_dp_is_en(intr_handle)) {
2117 		if (vsi->type == I40E_VSI_MAIN) {
2118 			queue_idx = 0;
2119 			record = 1;
2120 		} else if (vsi->type == I40E_VSI_VMDQ2) {
2121 			struct i40e_vsi *main_vsi =
2122 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2123 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
2124 			record = 1;
2125 		}
2126 	}
2127 
2128 	for (i = 0; i < vsi->nb_used_qps; i++) {
2129 		if (vsi->nb_msix == 0) {
2130 			PMD_DRV_LOG(ERR, "No msix resource");
2131 			return -EINVAL;
2132 		} else if (nb_msix <= 1) {
2133 			if (!rte_intr_allow_others(intr_handle))
2134 				/* allow to share MISC_VEC_ID */
2135 				msix_vect = I40E_MISC_VEC_ID;
2136 
2137 			/* no enough msix_vect, map all to one */
2138 			__vsi_queues_bind_intr(vsi, msix_vect,
2139 					       vsi->base_queue + i,
2140 					       vsi->nb_used_qps - i,
2141 					       itr_idx);
2142 			for (; !!record && i < vsi->nb_used_qps; i++)
2143 				intr_handle->intr_vec[queue_idx + i] =
2144 					msix_vect;
2145 			break;
2146 		}
2147 		/* 1:1 queue/msix_vect mapping */
2148 		__vsi_queues_bind_intr(vsi, msix_vect,
2149 				       vsi->base_queue + i, 1,
2150 				       itr_idx);
2151 		if (!!record)
2152 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
2153 
2154 		msix_vect++;
2155 		nb_msix--;
2156 	}
2157 
2158 	return 0;
2159 }
2160 
2161 void
2162 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2163 {
2164 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2165 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2166 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2167 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2168 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2169 	uint16_t msix_intr, i;
2170 
2171 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2172 		for (i = 0; i < vsi->nb_msix; i++) {
2173 			msix_intr = vsi->msix_intr + i;
2174 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2175 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
2176 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2177 				I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2178 		}
2179 	else
2180 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2181 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
2182 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2183 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2184 
2185 	I40E_WRITE_FLUSH(hw);
2186 }
2187 
2188 void
2189 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2190 {
2191 	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
2192 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2193 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2194 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2195 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2196 	uint16_t msix_intr, i;
2197 
2198 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2199 		for (i = 0; i < vsi->nb_msix; i++) {
2200 			msix_intr = vsi->msix_intr + i;
2201 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2202 				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2203 		}
2204 	else
2205 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2206 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2207 
2208 	I40E_WRITE_FLUSH(hw);
2209 }
2210 
2211 static inline uint8_t
2212 i40e_parse_link_speeds(uint16_t link_speeds)
2213 {
2214 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2215 
2216 	if (link_speeds & ETH_LINK_SPEED_40G)
2217 		link_speed |= I40E_LINK_SPEED_40GB;
2218 	if (link_speeds & ETH_LINK_SPEED_25G)
2219 		link_speed |= I40E_LINK_SPEED_25GB;
2220 	if (link_speeds & ETH_LINK_SPEED_20G)
2221 		link_speed |= I40E_LINK_SPEED_20GB;
2222 	if (link_speeds & ETH_LINK_SPEED_10G)
2223 		link_speed |= I40E_LINK_SPEED_10GB;
2224 	if (link_speeds & ETH_LINK_SPEED_1G)
2225 		link_speed |= I40E_LINK_SPEED_1GB;
2226 	if (link_speeds & ETH_LINK_SPEED_100M)
2227 		link_speed |= I40E_LINK_SPEED_100MB;
2228 
2229 	return link_speed;
2230 }
2231 
2232 static int
2233 i40e_phy_conf_link(struct i40e_hw *hw,
2234 		   uint8_t abilities,
2235 		   uint8_t force_speed,
2236 		   bool is_up)
2237 {
2238 	enum i40e_status_code status;
2239 	struct i40e_aq_get_phy_abilities_resp phy_ab;
2240 	struct i40e_aq_set_phy_config phy_conf;
2241 	enum i40e_aq_phy_type cnt;
2242 	uint8_t avail_speed;
2243 	uint32_t phy_type_mask = 0;
2244 
2245 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2246 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2247 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2248 			I40E_AQ_PHY_FLAG_LOW_POWER;
2249 	int ret = -ENOTSUP;
2250 
2251 	/* To get phy capabilities of available speeds. */
2252 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2253 					      NULL);
2254 	if (status) {
2255 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2256 				status);
2257 		return ret;
2258 	}
2259 	avail_speed = phy_ab.link_speed;
2260 
2261 	/* To get the current phy config. */
2262 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2263 					      NULL);
2264 	if (status) {
2265 		PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2266 				status);
2267 		return ret;
2268 	}
2269 
2270 	/* If link needs to go up and it is in autoneg mode the speed is OK,
2271 	 * no need to set up again.
2272 	 */
2273 	if (is_up && phy_ab.phy_type != 0 &&
2274 		     abilities & I40E_AQ_PHY_AN_ENABLED &&
2275 		     phy_ab.link_speed != 0)
2276 		return I40E_SUCCESS;
2277 
2278 	memset(&phy_conf, 0, sizeof(phy_conf));
2279 
2280 	/* bits 0-2 use the values from get_phy_abilities_resp */
2281 	abilities &= ~mask;
2282 	abilities |= phy_ab.abilities & mask;
2283 
2284 	phy_conf.abilities = abilities;
2285 
2286 	/* If link needs to go up, but the force speed is not supported,
2287 	 * Warn users and config the default available speeds.
2288 	 */
2289 	if (is_up && !(force_speed & avail_speed)) {
2290 		PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2291 		phy_conf.link_speed = avail_speed;
2292 	} else {
2293 		phy_conf.link_speed = is_up ? force_speed : avail_speed;
2294 	}
2295 
2296 	/* PHY type mask needs to include each type except PHY type extension */
2297 	for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2298 		phy_type_mask |= 1 << cnt;
2299 
2300 	/* use get_phy_abilities_resp value for the rest */
2301 	phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2302 	phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2303 		I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2304 		I40E_AQ_PHY_TYPE_EXT_25G_LR | I40E_AQ_PHY_TYPE_EXT_25G_AOC |
2305 		I40E_AQ_PHY_TYPE_EXT_25G_ACC) : 0;
2306 	phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2307 	phy_conf.eee_capability = phy_ab.eee_capability;
2308 	phy_conf.eeer = phy_ab.eeer_val;
2309 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2310 
2311 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2312 		    phy_ab.abilities, phy_ab.link_speed);
2313 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2314 		    phy_conf.abilities, phy_conf.link_speed);
2315 
2316 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2317 	if (status)
2318 		return ret;
2319 
2320 	return I40E_SUCCESS;
2321 }
2322 
2323 static int
2324 i40e_apply_link_speed(struct rte_eth_dev *dev)
2325 {
2326 	uint8_t speed;
2327 	uint8_t abilities = 0;
2328 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2329 	struct rte_eth_conf *conf = &dev->data->dev_conf;
2330 
2331 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2332 		     I40E_AQ_PHY_LINK_ENABLED;
2333 
2334 	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2335 		conf->link_speeds = ETH_LINK_SPEED_40G |
2336 				    ETH_LINK_SPEED_25G |
2337 				    ETH_LINK_SPEED_20G |
2338 				    ETH_LINK_SPEED_10G |
2339 				    ETH_LINK_SPEED_1G |
2340 				    ETH_LINK_SPEED_100M;
2341 
2342 		abilities |= I40E_AQ_PHY_AN_ENABLED;
2343 	} else {
2344 		abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2345 	}
2346 	speed = i40e_parse_link_speeds(conf->link_speeds);
2347 
2348 	return i40e_phy_conf_link(hw, abilities, speed, true);
2349 }
2350 
2351 static int
2352 i40e_dev_start(struct rte_eth_dev *dev)
2353 {
2354 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2355 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2356 	struct i40e_vsi *main_vsi = pf->main_vsi;
2357 	int ret, i;
2358 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2359 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2360 	uint32_t intr_vector = 0;
2361 	struct i40e_vsi *vsi;
2362 	uint16_t nb_rxq, nb_txq;
2363 
2364 	hw->adapter_stopped = 0;
2365 
2366 	rte_intr_disable(intr_handle);
2367 
2368 	if ((rte_intr_cap_multiple(intr_handle) ||
2369 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2370 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2371 		intr_vector = dev->data->nb_rx_queues;
2372 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
2373 		if (ret)
2374 			return ret;
2375 	}
2376 
2377 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2378 		intr_handle->intr_vec =
2379 			rte_zmalloc("intr_vec",
2380 				    dev->data->nb_rx_queues * sizeof(int),
2381 				    0);
2382 		if (!intr_handle->intr_vec) {
2383 			PMD_INIT_LOG(ERR,
2384 				"Failed to allocate %d rx_queues intr_vec",
2385 				dev->data->nb_rx_queues);
2386 			return -ENOMEM;
2387 		}
2388 	}
2389 
2390 	/* Initialize VSI */
2391 	ret = i40e_dev_rxtx_init(pf);
2392 	if (ret != I40E_SUCCESS) {
2393 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2394 		return ret;
2395 	}
2396 
2397 	/* Map queues with MSIX interrupt */
2398 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2399 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2400 	ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2401 	if (ret < 0)
2402 		return ret;
2403 	i40e_vsi_enable_queues_intr(main_vsi);
2404 
2405 	/* Map VMDQ VSI queues with MSIX interrupt */
2406 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2407 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2408 		ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2409 						I40E_ITR_INDEX_DEFAULT);
2410 		if (ret < 0)
2411 			return ret;
2412 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2413 	}
2414 
2415 	/* Enable all queues which have been configured */
2416 	for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
2417 		ret = i40e_dev_rx_queue_start(dev, nb_rxq);
2418 		if (ret)
2419 			goto rx_err;
2420 	}
2421 
2422 	for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
2423 		ret = i40e_dev_tx_queue_start(dev, nb_txq);
2424 		if (ret)
2425 			goto tx_err;
2426 	}
2427 
2428 	/* Enable receiving broadcast packets */
2429 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2430 	if (ret != I40E_SUCCESS)
2431 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2432 
2433 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2434 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2435 						true, NULL);
2436 		if (ret != I40E_SUCCESS)
2437 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2438 	}
2439 
2440 	/* Enable the VLAN promiscuous mode. */
2441 	if (pf->vfs) {
2442 		for (i = 0; i < pf->vf_num; i++) {
2443 			vsi = pf->vfs[i].vsi;
2444 			i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2445 						     true, NULL);
2446 		}
2447 	}
2448 
2449 	/* Enable mac loopback mode */
2450 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2451 	    dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2452 		ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2453 		if (ret != I40E_SUCCESS) {
2454 			PMD_DRV_LOG(ERR, "fail to set loopback link");
2455 			goto tx_err;
2456 		}
2457 	}
2458 
2459 	/* Apply link configure */
2460 	ret = i40e_apply_link_speed(dev);
2461 	if (I40E_SUCCESS != ret) {
2462 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
2463 		goto tx_err;
2464 	}
2465 
2466 	if (!rte_intr_allow_others(intr_handle)) {
2467 		rte_intr_callback_unregister(intr_handle,
2468 					     i40e_dev_interrupt_handler,
2469 					     (void *)dev);
2470 		/* configure and enable device interrupt */
2471 		i40e_pf_config_irq0(hw, FALSE);
2472 		i40e_pf_enable_irq0(hw);
2473 
2474 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2475 			PMD_INIT_LOG(INFO,
2476 				"lsc won't enable because of no intr multiplex");
2477 	} else {
2478 		ret = i40e_aq_set_phy_int_mask(hw,
2479 					       ~(I40E_AQ_EVENT_LINK_UPDOWN |
2480 					       I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2481 					       I40E_AQ_EVENT_MEDIA_NA), NULL);
2482 		if (ret != I40E_SUCCESS)
2483 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2484 
2485 		/* Call get_link_info aq commond to enable/disable LSE */
2486 		i40e_dev_link_update(dev, 0);
2487 	}
2488 
2489 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2490 		rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2491 				  i40e_dev_alarm_handler, dev);
2492 	} else {
2493 		/* enable uio intr after callback register */
2494 		rte_intr_enable(intr_handle);
2495 	}
2496 
2497 	i40e_filter_restore(pf);
2498 
2499 	if (pf->tm_conf.root && !pf->tm_conf.committed)
2500 		PMD_DRV_LOG(WARNING,
2501 			    "please call hierarchy_commit() "
2502 			    "before starting the port");
2503 
2504 	return I40E_SUCCESS;
2505 
2506 tx_err:
2507 	for (i = 0; i < nb_txq; i++)
2508 		i40e_dev_tx_queue_stop(dev, i);
2509 rx_err:
2510 	for (i = 0; i < nb_rxq; i++)
2511 		i40e_dev_rx_queue_stop(dev, i);
2512 
2513 	return ret;
2514 }
2515 
2516 static int
2517 i40e_dev_stop(struct rte_eth_dev *dev)
2518 {
2519 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2520 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2521 	struct i40e_vsi *main_vsi = pf->main_vsi;
2522 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2523 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2524 	int i;
2525 
2526 	if (hw->adapter_stopped == 1)
2527 		return 0;
2528 
2529 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2530 		rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2531 		rte_intr_enable(intr_handle);
2532 	}
2533 
2534 	/* Disable all queues */
2535 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2536 		i40e_dev_tx_queue_stop(dev, i);
2537 
2538 	for (i = 0; i < dev->data->nb_rx_queues; i++)
2539 		i40e_dev_rx_queue_stop(dev, i);
2540 
2541 	/* un-map queues with interrupt registers */
2542 	i40e_vsi_disable_queues_intr(main_vsi);
2543 	i40e_vsi_queues_unbind_intr(main_vsi);
2544 
2545 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2546 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2547 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2548 	}
2549 
2550 	/* Clear all queues and release memory */
2551 	i40e_dev_clear_queues(dev);
2552 
2553 	/* Set link down */
2554 	i40e_dev_set_link_down(dev);
2555 
2556 	if (!rte_intr_allow_others(intr_handle))
2557 		/* resume to the default handler */
2558 		rte_intr_callback_register(intr_handle,
2559 					   i40e_dev_interrupt_handler,
2560 					   (void *)dev);
2561 
2562 	/* Clean datapath event and queue/vec mapping */
2563 	rte_intr_efd_disable(intr_handle);
2564 	if (intr_handle->intr_vec) {
2565 		rte_free(intr_handle->intr_vec);
2566 		intr_handle->intr_vec = NULL;
2567 	}
2568 
2569 	/* reset hierarchy commit */
2570 	pf->tm_conf.committed = false;
2571 
2572 	hw->adapter_stopped = 1;
2573 	dev->data->dev_started = 0;
2574 
2575 	pf->adapter->rss_reta_updated = 0;
2576 
2577 	return 0;
2578 }
2579 
2580 static int
2581 i40e_dev_close(struct rte_eth_dev *dev)
2582 {
2583 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2584 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2585 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2586 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2587 	struct i40e_filter_control_settings settings;
2588 	struct rte_flow *p_flow;
2589 	uint32_t reg;
2590 	int i;
2591 	int ret;
2592 	uint8_t aq_fail = 0;
2593 	int retries = 0;
2594 
2595 	PMD_INIT_FUNC_TRACE();
2596 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2597 		return 0;
2598 
2599 	ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2600 	if (ret)
2601 		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2602 
2603 
2604 	ret = i40e_dev_stop(dev);
2605 
2606 	i40e_dev_free_queues(dev);
2607 
2608 	/* Disable interrupt */
2609 	i40e_pf_disable_irq0(hw);
2610 	rte_intr_disable(intr_handle);
2611 
2612 	/*
2613 	 * Only legacy filter API needs the following fdir config. So when the
2614 	 * legacy filter API is deprecated, the following code should also be
2615 	 * removed.
2616 	 */
2617 	i40e_fdir_teardown(pf);
2618 
2619 	/* shutdown and destroy the HMC */
2620 	i40e_shutdown_lan_hmc(hw);
2621 
2622 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2623 		i40e_vsi_release(pf->vmdq[i].vsi);
2624 		pf->vmdq[i].vsi = NULL;
2625 	}
2626 	rte_free(pf->vmdq);
2627 	pf->vmdq = NULL;
2628 
2629 	/* release all the existing VSIs and VEBs */
2630 	i40e_vsi_release(pf->main_vsi);
2631 
2632 	/* shutdown the adminq */
2633 	i40e_aq_queue_shutdown(hw, true);
2634 	i40e_shutdown_adminq(hw);
2635 
2636 	i40e_res_pool_destroy(&pf->qp_pool);
2637 	i40e_res_pool_destroy(&pf->msix_pool);
2638 
2639 	/* Disable flexible payload in global configuration */
2640 	if (!pf->support_multi_driver)
2641 		i40e_flex_payload_reg_set_default(hw);
2642 
2643 	/* force a PF reset to clean anything leftover */
2644 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2645 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2646 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2647 	I40E_WRITE_FLUSH(hw);
2648 
2649 	/* Clear PXE mode */
2650 	i40e_clear_pxe_mode(hw);
2651 
2652 	/* Unconfigure filter control */
2653 	memset(&settings, 0, sizeof(settings));
2654 	ret = i40e_set_filter_control(hw, &settings);
2655 	if (ret)
2656 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2657 					ret);
2658 
2659 	/* Disable flow control */
2660 	hw->fc.requested_mode = I40E_FC_NONE;
2661 	i40e_set_fc(hw, &aq_fail, TRUE);
2662 
2663 	/* uninitialize pf host driver */
2664 	i40e_pf_host_uninit(dev);
2665 
2666 	do {
2667 		ret = rte_intr_callback_unregister(intr_handle,
2668 				i40e_dev_interrupt_handler, dev);
2669 		if (ret >= 0 || ret == -ENOENT) {
2670 			break;
2671 		} else if (ret != -EAGAIN) {
2672 			PMD_INIT_LOG(ERR,
2673 				 "intr callback unregister failed: %d",
2674 				 ret);
2675 		}
2676 		i40e_msec_delay(500);
2677 	} while (retries++ < 5);
2678 
2679 	i40e_rm_ethtype_filter_list(pf);
2680 	i40e_rm_tunnel_filter_list(pf);
2681 	i40e_rm_fdir_filter_list(pf);
2682 
2683 	/* Remove all flows */
2684 	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2685 		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2686 		/* Do not free FDIR flows since they are static allocated */
2687 		if (p_flow->filter_type != RTE_ETH_FILTER_FDIR)
2688 			rte_free(p_flow);
2689 	}
2690 
2691 	/* release the fdir static allocated memory */
2692 	i40e_fdir_memory_cleanup(pf);
2693 
2694 	/* Remove all Traffic Manager configuration */
2695 	i40e_tm_conf_uninit(dev);
2696 
2697 	i40e_clear_automask(pf);
2698 
2699 	hw->adapter_closed = 1;
2700 	return ret;
2701 }
2702 
2703 /*
2704  * Reset PF device only to re-initialize resources in PMD layer
2705  */
2706 static int
2707 i40e_dev_reset(struct rte_eth_dev *dev)
2708 {
2709 	int ret;
2710 
2711 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2712 	 * its VF to make them align with it. The detailed notification
2713 	 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2714 	 * To avoid unexpected behavior in VF, currently reset of PF with
2715 	 * SR-IOV activation is not supported. It might be supported later.
2716 	 */
2717 	if (dev->data->sriov.active)
2718 		return -ENOTSUP;
2719 
2720 	ret = eth_i40e_dev_uninit(dev);
2721 	if (ret)
2722 		return ret;
2723 
2724 	ret = eth_i40e_dev_init(dev, NULL);
2725 
2726 	return ret;
2727 }
2728 
2729 static int
2730 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2731 {
2732 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2733 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2734 	struct i40e_vsi *vsi = pf->main_vsi;
2735 	int status;
2736 
2737 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2738 						     true, NULL, true);
2739 	if (status != I40E_SUCCESS) {
2740 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2741 		return -EAGAIN;
2742 	}
2743 
2744 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2745 							TRUE, NULL);
2746 	if (status != I40E_SUCCESS) {
2747 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2748 		/* Rollback unicast promiscuous mode */
2749 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2750 						    false, NULL, true);
2751 		return -EAGAIN;
2752 	}
2753 
2754 	return 0;
2755 }
2756 
2757 static int
2758 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2759 {
2760 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2761 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2762 	struct i40e_vsi *vsi = pf->main_vsi;
2763 	int status;
2764 
2765 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2766 						     false, NULL, true);
2767 	if (status != I40E_SUCCESS) {
2768 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2769 		return -EAGAIN;
2770 	}
2771 
2772 	/* must remain in all_multicast mode */
2773 	if (dev->data->all_multicast == 1)
2774 		return 0;
2775 
2776 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2777 							false, NULL);
2778 	if (status != I40E_SUCCESS) {
2779 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2780 		/* Rollback unicast promiscuous mode */
2781 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2782 						    true, NULL, true);
2783 		return -EAGAIN;
2784 	}
2785 
2786 	return 0;
2787 }
2788 
2789 static int
2790 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2791 {
2792 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2793 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2794 	struct i40e_vsi *vsi = pf->main_vsi;
2795 	int ret;
2796 
2797 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2798 	if (ret != I40E_SUCCESS) {
2799 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2800 		return -EAGAIN;
2801 	}
2802 
2803 	return 0;
2804 }
2805 
2806 static int
2807 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2808 {
2809 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2810 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2811 	struct i40e_vsi *vsi = pf->main_vsi;
2812 	int ret;
2813 
2814 	if (dev->data->promiscuous == 1)
2815 		return 0; /* must remain in all_multicast mode */
2816 
2817 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2818 				vsi->seid, FALSE, NULL);
2819 	if (ret != I40E_SUCCESS) {
2820 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2821 		return -EAGAIN;
2822 	}
2823 
2824 	return 0;
2825 }
2826 
2827 /*
2828  * Set device link up.
2829  */
2830 static int
2831 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2832 {
2833 	/* re-apply link speed setting */
2834 	return i40e_apply_link_speed(dev);
2835 }
2836 
2837 /*
2838  * Set device link down.
2839  */
2840 static int
2841 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2842 {
2843 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2844 	uint8_t abilities = 0;
2845 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2846 
2847 	abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2848 	return i40e_phy_conf_link(hw, abilities, speed, false);
2849 }
2850 
2851 static __rte_always_inline void
2852 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2853 {
2854 /* Link status registers and values*/
2855 #define I40E_PRTMAC_LINKSTA		0x001E2420
2856 #define I40E_REG_LINK_UP		0x40000080
2857 #define I40E_PRTMAC_MACC		0x001E24E0
2858 #define I40E_REG_MACC_25GB		0x00020000
2859 #define I40E_REG_SPEED_MASK		0x38000000
2860 #define I40E_REG_SPEED_0		0x00000000
2861 #define I40E_REG_SPEED_1		0x08000000
2862 #define I40E_REG_SPEED_2		0x10000000
2863 #define I40E_REG_SPEED_3		0x18000000
2864 #define I40E_REG_SPEED_4		0x20000000
2865 	uint32_t link_speed;
2866 	uint32_t reg_val;
2867 
2868 	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2869 	link_speed = reg_val & I40E_REG_SPEED_MASK;
2870 	reg_val &= I40E_REG_LINK_UP;
2871 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2872 
2873 	if (unlikely(link->link_status == 0))
2874 		return;
2875 
2876 	/* Parse the link status */
2877 	switch (link_speed) {
2878 	case I40E_REG_SPEED_0:
2879 		link->link_speed = ETH_SPEED_NUM_100M;
2880 		break;
2881 	case I40E_REG_SPEED_1:
2882 		link->link_speed = ETH_SPEED_NUM_1G;
2883 		break;
2884 	case I40E_REG_SPEED_2:
2885 		if (hw->mac.type == I40E_MAC_X722)
2886 			link->link_speed = ETH_SPEED_NUM_2_5G;
2887 		else
2888 			link->link_speed = ETH_SPEED_NUM_10G;
2889 		break;
2890 	case I40E_REG_SPEED_3:
2891 		if (hw->mac.type == I40E_MAC_X722) {
2892 			link->link_speed = ETH_SPEED_NUM_5G;
2893 		} else {
2894 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2895 
2896 			if (reg_val & I40E_REG_MACC_25GB)
2897 				link->link_speed = ETH_SPEED_NUM_25G;
2898 			else
2899 				link->link_speed = ETH_SPEED_NUM_40G;
2900 		}
2901 		break;
2902 	case I40E_REG_SPEED_4:
2903 		if (hw->mac.type == I40E_MAC_X722)
2904 			link->link_speed = ETH_SPEED_NUM_10G;
2905 		else
2906 			link->link_speed = ETH_SPEED_NUM_20G;
2907 		break;
2908 	default:
2909 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2910 		break;
2911 	}
2912 }
2913 
2914 static __rte_always_inline void
2915 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2916 	bool enable_lse, int wait_to_complete)
2917 {
2918 #define CHECK_INTERVAL             100  /* 100ms */
2919 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2920 	uint32_t rep_cnt = MAX_REPEAT_TIME;
2921 	struct i40e_link_status link_status;
2922 	int status;
2923 
2924 	memset(&link_status, 0, sizeof(link_status));
2925 
2926 	do {
2927 		memset(&link_status, 0, sizeof(link_status));
2928 
2929 		/* Get link status information from hardware */
2930 		status = i40e_aq_get_link_info(hw, enable_lse,
2931 						&link_status, NULL);
2932 		if (unlikely(status != I40E_SUCCESS)) {
2933 			link->link_speed = ETH_SPEED_NUM_NONE;
2934 			link->link_duplex = ETH_LINK_FULL_DUPLEX;
2935 			PMD_DRV_LOG(ERR, "Failed to get link info");
2936 			return;
2937 		}
2938 
2939 		link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2940 		if (!wait_to_complete || link->link_status)
2941 			break;
2942 
2943 		rte_delay_ms(CHECK_INTERVAL);
2944 	} while (--rep_cnt);
2945 
2946 	/* Parse the link status */
2947 	switch (link_status.link_speed) {
2948 	case I40E_LINK_SPEED_100MB:
2949 		link->link_speed = ETH_SPEED_NUM_100M;
2950 		break;
2951 	case I40E_LINK_SPEED_1GB:
2952 		link->link_speed = ETH_SPEED_NUM_1G;
2953 		break;
2954 	case I40E_LINK_SPEED_10GB:
2955 		link->link_speed = ETH_SPEED_NUM_10G;
2956 		break;
2957 	case I40E_LINK_SPEED_20GB:
2958 		link->link_speed = ETH_SPEED_NUM_20G;
2959 		break;
2960 	case I40E_LINK_SPEED_25GB:
2961 		link->link_speed = ETH_SPEED_NUM_25G;
2962 		break;
2963 	case I40E_LINK_SPEED_40GB:
2964 		link->link_speed = ETH_SPEED_NUM_40G;
2965 		break;
2966 	default:
2967 		if (link->link_status)
2968 			link->link_speed = ETH_SPEED_NUM_UNKNOWN;
2969 		else
2970 			link->link_speed = ETH_SPEED_NUM_NONE;
2971 		break;
2972 	}
2973 }
2974 
2975 int
2976 i40e_dev_link_update(struct rte_eth_dev *dev,
2977 		     int wait_to_complete)
2978 {
2979 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2980 	struct rte_eth_link link;
2981 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2982 	int ret;
2983 
2984 	memset(&link, 0, sizeof(link));
2985 
2986 	/* i40e uses full duplex only */
2987 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2988 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2989 			ETH_LINK_SPEED_FIXED);
2990 
2991 	if (!wait_to_complete && !enable_lse)
2992 		update_link_reg(hw, &link);
2993 	else
2994 		update_link_aq(hw, &link, enable_lse, wait_to_complete);
2995 
2996 	if (hw->switch_dev)
2997 		rte_eth_linkstatus_get(hw->switch_dev, &link);
2998 
2999 	ret = rte_eth_linkstatus_set(dev, &link);
3000 	i40e_notify_all_vfs_link_status(dev);
3001 
3002 	return ret;
3003 }
3004 
3005 static void
3006 i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
3007 			  uint32_t loreg, bool offset_loaded, uint64_t *offset,
3008 			  uint64_t *stat, uint64_t *prev_stat)
3009 {
3010 	i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
3011 	/* enlarge the limitation when statistics counters overflowed */
3012 	if (offset_loaded) {
3013 		if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
3014 			*stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
3015 		*stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
3016 	}
3017 	*prev_stat = *stat;
3018 }
3019 
3020 /* Get all the statistics of a VSI */
3021 void
3022 i40e_update_vsi_stats(struct i40e_vsi *vsi)
3023 {
3024 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
3025 	struct i40e_eth_stats *nes = &vsi->eth_stats;
3026 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3027 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
3028 
3029 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
3030 				  vsi->offset_loaded, &oes->rx_bytes,
3031 				  &nes->rx_bytes, &vsi->prev_rx_bytes);
3032 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
3033 			    vsi->offset_loaded, &oes->rx_unicast,
3034 			    &nes->rx_unicast);
3035 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
3036 			    vsi->offset_loaded, &oes->rx_multicast,
3037 			    &nes->rx_multicast);
3038 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
3039 			    vsi->offset_loaded, &oes->rx_broadcast,
3040 			    &nes->rx_broadcast);
3041 	/* exclude CRC bytes */
3042 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3043 		nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3044 
3045 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
3046 			    &oes->rx_discards, &nes->rx_discards);
3047 	/* GLV_REPC not supported */
3048 	/* GLV_RMPC not supported */
3049 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
3050 			    &oes->rx_unknown_protocol,
3051 			    &nes->rx_unknown_protocol);
3052 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
3053 				  vsi->offset_loaded, &oes->tx_bytes,
3054 				  &nes->tx_bytes, &vsi->prev_tx_bytes);
3055 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
3056 			    vsi->offset_loaded, &oes->tx_unicast,
3057 			    &nes->tx_unicast);
3058 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
3059 			    vsi->offset_loaded, &oes->tx_multicast,
3060 			    &nes->tx_multicast);
3061 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
3062 			    vsi->offset_loaded,  &oes->tx_broadcast,
3063 			    &nes->tx_broadcast);
3064 	/* GLV_TDPC not supported */
3065 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
3066 			    &oes->tx_errors, &nes->tx_errors);
3067 	vsi->offset_loaded = true;
3068 
3069 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
3070 		    vsi->vsi_id);
3071 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
3072 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
3073 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
3074 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
3075 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
3076 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3077 		    nes->rx_unknown_protocol);
3078 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
3079 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
3080 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
3081 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
3082 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
3083 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
3084 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
3085 		    vsi->vsi_id);
3086 }
3087 
3088 static void
3089 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
3090 {
3091 	unsigned int i;
3092 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3093 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
3094 
3095 	/* Get rx/tx bytes of internal transfer packets */
3096 	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
3097 				  I40E_GLV_GORCL(hw->port),
3098 				  pf->offset_loaded,
3099 				  &pf->internal_stats_offset.rx_bytes,
3100 				  &pf->internal_stats.rx_bytes,
3101 				  &pf->internal_prev_rx_bytes);
3102 	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
3103 				  I40E_GLV_GOTCL(hw->port),
3104 				  pf->offset_loaded,
3105 				  &pf->internal_stats_offset.tx_bytes,
3106 				  &pf->internal_stats.tx_bytes,
3107 				  &pf->internal_prev_tx_bytes);
3108 	/* Get total internal rx packet count */
3109 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3110 			    I40E_GLV_UPRCL(hw->port),
3111 			    pf->offset_loaded,
3112 			    &pf->internal_stats_offset.rx_unicast,
3113 			    &pf->internal_stats.rx_unicast);
3114 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3115 			    I40E_GLV_MPRCL(hw->port),
3116 			    pf->offset_loaded,
3117 			    &pf->internal_stats_offset.rx_multicast,
3118 			    &pf->internal_stats.rx_multicast);
3119 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3120 			    I40E_GLV_BPRCL(hw->port),
3121 			    pf->offset_loaded,
3122 			    &pf->internal_stats_offset.rx_broadcast,
3123 			    &pf->internal_stats.rx_broadcast);
3124 	/* Get total internal tx packet count */
3125 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3126 			    I40E_GLV_UPTCL(hw->port),
3127 			    pf->offset_loaded,
3128 			    &pf->internal_stats_offset.tx_unicast,
3129 			    &pf->internal_stats.tx_unicast);
3130 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3131 			    I40E_GLV_MPTCL(hw->port),
3132 			    pf->offset_loaded,
3133 			    &pf->internal_stats_offset.tx_multicast,
3134 			    &pf->internal_stats.tx_multicast);
3135 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3136 			    I40E_GLV_BPTCL(hw->port),
3137 			    pf->offset_loaded,
3138 			    &pf->internal_stats_offset.tx_broadcast,
3139 			    &pf->internal_stats.tx_broadcast);
3140 
3141 	/* exclude CRC size */
3142 	pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3143 		pf->internal_stats.rx_multicast +
3144 		pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3145 
3146 	/* Get statistics of struct i40e_eth_stats */
3147 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
3148 				  I40E_GLPRT_GORCL(hw->port),
3149 				  pf->offset_loaded, &os->eth.rx_bytes,
3150 				  &ns->eth.rx_bytes, &pf->prev_rx_bytes);
3151 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3152 			    I40E_GLPRT_UPRCL(hw->port),
3153 			    pf->offset_loaded, &os->eth.rx_unicast,
3154 			    &ns->eth.rx_unicast);
3155 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3156 			    I40E_GLPRT_MPRCL(hw->port),
3157 			    pf->offset_loaded, &os->eth.rx_multicast,
3158 			    &ns->eth.rx_multicast);
3159 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3160 			    I40E_GLPRT_BPRCL(hw->port),
3161 			    pf->offset_loaded, &os->eth.rx_broadcast,
3162 			    &ns->eth.rx_broadcast);
3163 	/* Workaround: CRC size should not be included in byte statistics,
3164 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3165 	 * packet.
3166 	 */
3167 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3168 		ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3169 
3170 	/* exclude internal rx bytes
3171 	 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3172 	 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3173 	 * value.
3174 	 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3175 	 */
3176 	if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3177 		ns->eth.rx_bytes = 0;
3178 	else
3179 		ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3180 
3181 	if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3182 		ns->eth.rx_unicast = 0;
3183 	else
3184 		ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3185 
3186 	if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3187 		ns->eth.rx_multicast = 0;
3188 	else
3189 		ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3190 
3191 	if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3192 		ns->eth.rx_broadcast = 0;
3193 	else
3194 		ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3195 
3196 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3197 			    pf->offset_loaded, &os->eth.rx_discards,
3198 			    &ns->eth.rx_discards);
3199 	/* GLPRT_REPC not supported */
3200 	/* GLPRT_RMPC not supported */
3201 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3202 			    pf->offset_loaded,
3203 			    &os->eth.rx_unknown_protocol,
3204 			    &ns->eth.rx_unknown_protocol);
3205 	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
3206 				  I40E_GLPRT_GOTCL(hw->port),
3207 				  pf->offset_loaded, &os->eth.tx_bytes,
3208 				  &ns->eth.tx_bytes, &pf->prev_tx_bytes);
3209 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3210 			    I40E_GLPRT_UPTCL(hw->port),
3211 			    pf->offset_loaded, &os->eth.tx_unicast,
3212 			    &ns->eth.tx_unicast);
3213 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3214 			    I40E_GLPRT_MPTCL(hw->port),
3215 			    pf->offset_loaded, &os->eth.tx_multicast,
3216 			    &ns->eth.tx_multicast);
3217 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3218 			    I40E_GLPRT_BPTCL(hw->port),
3219 			    pf->offset_loaded, &os->eth.tx_broadcast,
3220 			    &ns->eth.tx_broadcast);
3221 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3222 		ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3223 
3224 	/* exclude internal tx bytes
3225 	 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3226 	 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3227 	 * value.
3228 	 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3229 	 */
3230 	if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3231 		ns->eth.tx_bytes = 0;
3232 	else
3233 		ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3234 
3235 	if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3236 		ns->eth.tx_unicast = 0;
3237 	else
3238 		ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3239 
3240 	if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3241 		ns->eth.tx_multicast = 0;
3242 	else
3243 		ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3244 
3245 	if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3246 		ns->eth.tx_broadcast = 0;
3247 	else
3248 		ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3249 
3250 	/* GLPRT_TEPC not supported */
3251 
3252 	/* additional port specific stats */
3253 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3254 			    pf->offset_loaded, &os->tx_dropped_link_down,
3255 			    &ns->tx_dropped_link_down);
3256 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3257 			    pf->offset_loaded, &os->crc_errors,
3258 			    &ns->crc_errors);
3259 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3260 			    pf->offset_loaded, &os->illegal_bytes,
3261 			    &ns->illegal_bytes);
3262 	/* GLPRT_ERRBC not supported */
3263 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3264 			    pf->offset_loaded, &os->mac_local_faults,
3265 			    &ns->mac_local_faults);
3266 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3267 			    pf->offset_loaded, &os->mac_remote_faults,
3268 			    &ns->mac_remote_faults);
3269 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3270 			    pf->offset_loaded, &os->rx_length_errors,
3271 			    &ns->rx_length_errors);
3272 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3273 			    pf->offset_loaded, &os->link_xon_rx,
3274 			    &ns->link_xon_rx);
3275 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3276 			    pf->offset_loaded, &os->link_xoff_rx,
3277 			    &ns->link_xoff_rx);
3278 	for (i = 0; i < 8; i++) {
3279 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3280 				    pf->offset_loaded,
3281 				    &os->priority_xon_rx[i],
3282 				    &ns->priority_xon_rx[i]);
3283 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3284 				    pf->offset_loaded,
3285 				    &os->priority_xoff_rx[i],
3286 				    &ns->priority_xoff_rx[i]);
3287 	}
3288 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3289 			    pf->offset_loaded, &os->link_xon_tx,
3290 			    &ns->link_xon_tx);
3291 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3292 			    pf->offset_loaded, &os->link_xoff_tx,
3293 			    &ns->link_xoff_tx);
3294 	for (i = 0; i < 8; i++) {
3295 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3296 				    pf->offset_loaded,
3297 				    &os->priority_xon_tx[i],
3298 				    &ns->priority_xon_tx[i]);
3299 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3300 				    pf->offset_loaded,
3301 				    &os->priority_xoff_tx[i],
3302 				    &ns->priority_xoff_tx[i]);
3303 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3304 				    pf->offset_loaded,
3305 				    &os->priority_xon_2_xoff[i],
3306 				    &ns->priority_xon_2_xoff[i]);
3307 	}
3308 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3309 			    I40E_GLPRT_PRC64L(hw->port),
3310 			    pf->offset_loaded, &os->rx_size_64,
3311 			    &ns->rx_size_64);
3312 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3313 			    I40E_GLPRT_PRC127L(hw->port),
3314 			    pf->offset_loaded, &os->rx_size_127,
3315 			    &ns->rx_size_127);
3316 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3317 			    I40E_GLPRT_PRC255L(hw->port),
3318 			    pf->offset_loaded, &os->rx_size_255,
3319 			    &ns->rx_size_255);
3320 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3321 			    I40E_GLPRT_PRC511L(hw->port),
3322 			    pf->offset_loaded, &os->rx_size_511,
3323 			    &ns->rx_size_511);
3324 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3325 			    I40E_GLPRT_PRC1023L(hw->port),
3326 			    pf->offset_loaded, &os->rx_size_1023,
3327 			    &ns->rx_size_1023);
3328 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3329 			    I40E_GLPRT_PRC1522L(hw->port),
3330 			    pf->offset_loaded, &os->rx_size_1522,
3331 			    &ns->rx_size_1522);
3332 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3333 			    I40E_GLPRT_PRC9522L(hw->port),
3334 			    pf->offset_loaded, &os->rx_size_big,
3335 			    &ns->rx_size_big);
3336 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3337 			    pf->offset_loaded, &os->rx_undersize,
3338 			    &ns->rx_undersize);
3339 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3340 			    pf->offset_loaded, &os->rx_fragments,
3341 			    &ns->rx_fragments);
3342 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3343 			    pf->offset_loaded, &os->rx_oversize,
3344 			    &ns->rx_oversize);
3345 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3346 			    pf->offset_loaded, &os->rx_jabber,
3347 			    &ns->rx_jabber);
3348 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3349 			    I40E_GLPRT_PTC64L(hw->port),
3350 			    pf->offset_loaded, &os->tx_size_64,
3351 			    &ns->tx_size_64);
3352 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3353 			    I40E_GLPRT_PTC127L(hw->port),
3354 			    pf->offset_loaded, &os->tx_size_127,
3355 			    &ns->tx_size_127);
3356 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3357 			    I40E_GLPRT_PTC255L(hw->port),
3358 			    pf->offset_loaded, &os->tx_size_255,
3359 			    &ns->tx_size_255);
3360 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3361 			    I40E_GLPRT_PTC511L(hw->port),
3362 			    pf->offset_loaded, &os->tx_size_511,
3363 			    &ns->tx_size_511);
3364 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3365 			    I40E_GLPRT_PTC1023L(hw->port),
3366 			    pf->offset_loaded, &os->tx_size_1023,
3367 			    &ns->tx_size_1023);
3368 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3369 			    I40E_GLPRT_PTC1522L(hw->port),
3370 			    pf->offset_loaded, &os->tx_size_1522,
3371 			    &ns->tx_size_1522);
3372 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3373 			    I40E_GLPRT_PTC9522L(hw->port),
3374 			    pf->offset_loaded, &os->tx_size_big,
3375 			    &ns->tx_size_big);
3376 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3377 			   pf->offset_loaded,
3378 			   &os->fd_sb_match, &ns->fd_sb_match);
3379 	/* GLPRT_MSPDC not supported */
3380 	/* GLPRT_XEC not supported */
3381 
3382 	pf->offset_loaded = true;
3383 
3384 	if (pf->main_vsi)
3385 		i40e_update_vsi_stats(pf->main_vsi);
3386 }
3387 
3388 /* Get all statistics of a port */
3389 static int
3390 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3391 {
3392 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3393 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3394 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3395 	struct i40e_vsi *vsi;
3396 	unsigned i;
3397 
3398 	/* call read registers - updates values, now write them to struct */
3399 	i40e_read_stats_registers(pf, hw);
3400 
3401 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3402 			pf->main_vsi->eth_stats.rx_multicast +
3403 			pf->main_vsi->eth_stats.rx_broadcast -
3404 			pf->main_vsi->eth_stats.rx_discards;
3405 	stats->opackets = ns->eth.tx_unicast +
3406 			ns->eth.tx_multicast +
3407 			ns->eth.tx_broadcast;
3408 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3409 	stats->obytes   = ns->eth.tx_bytes;
3410 	stats->oerrors  = ns->eth.tx_errors +
3411 			pf->main_vsi->eth_stats.tx_errors;
3412 
3413 	/* Rx Errors */
3414 	stats->imissed  = ns->eth.rx_discards +
3415 			pf->main_vsi->eth_stats.rx_discards;
3416 	stats->ierrors  = ns->crc_errors +
3417 			ns->rx_length_errors + ns->rx_undersize +
3418 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3419 
3420 	if (pf->vfs) {
3421 		for (i = 0; i < pf->vf_num; i++) {
3422 			vsi = pf->vfs[i].vsi;
3423 			i40e_update_vsi_stats(vsi);
3424 
3425 			stats->ipackets += (vsi->eth_stats.rx_unicast +
3426 					vsi->eth_stats.rx_multicast +
3427 					vsi->eth_stats.rx_broadcast -
3428 					vsi->eth_stats.rx_discards);
3429 			stats->ibytes   += vsi->eth_stats.rx_bytes;
3430 			stats->oerrors  += vsi->eth_stats.tx_errors;
3431 			stats->imissed  += vsi->eth_stats.rx_discards;
3432 		}
3433 	}
3434 
3435 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3436 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3437 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3438 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3439 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3440 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3441 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3442 		    ns->eth.rx_unknown_protocol);
3443 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3444 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3445 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3446 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3447 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3448 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3449 
3450 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3451 		    ns->tx_dropped_link_down);
3452 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3453 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3454 		    ns->illegal_bytes);
3455 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3456 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3457 		    ns->mac_local_faults);
3458 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3459 		    ns->mac_remote_faults);
3460 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3461 		    ns->rx_length_errors);
3462 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3463 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3464 	for (i = 0; i < 8; i++) {
3465 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3466 				i, ns->priority_xon_rx[i]);
3467 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3468 				i, ns->priority_xoff_rx[i]);
3469 	}
3470 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3471 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3472 	for (i = 0; i < 8; i++) {
3473 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3474 				i, ns->priority_xon_tx[i]);
3475 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3476 				i, ns->priority_xoff_tx[i]);
3477 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3478 				i, ns->priority_xon_2_xoff[i]);
3479 	}
3480 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3481 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3482 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3483 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3484 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3485 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3486 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3487 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3488 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3489 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3490 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3491 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3492 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3493 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3494 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3495 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3496 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3497 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3498 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3499 			ns->mac_short_packet_dropped);
3500 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3501 		    ns->checksum_error);
3502 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3503 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3504 	return 0;
3505 }
3506 
3507 /* Reset the statistics */
3508 static int
3509 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3510 {
3511 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3512 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3513 
3514 	/* Mark PF and VSI stats to update the offset, aka "reset" */
3515 	pf->offset_loaded = false;
3516 	if (pf->main_vsi)
3517 		pf->main_vsi->offset_loaded = false;
3518 
3519 	/* read the stats, reading current register values into offset */
3520 	i40e_read_stats_registers(pf, hw);
3521 
3522 	return 0;
3523 }
3524 
3525 static uint32_t
3526 i40e_xstats_calc_num(void)
3527 {
3528 	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3529 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
3530 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
3531 }
3532 
3533 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3534 				     struct rte_eth_xstat_name *xstats_names,
3535 				     __rte_unused unsigned limit)
3536 {
3537 	unsigned count = 0;
3538 	unsigned i, prio;
3539 
3540 	if (xstats_names == NULL)
3541 		return i40e_xstats_calc_num();
3542 
3543 	/* Note: limit checked in rte_eth_xstats_names() */
3544 
3545 	/* Get stats from i40e_eth_stats struct */
3546 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3547 		strlcpy(xstats_names[count].name,
3548 			rte_i40e_stats_strings[i].name,
3549 			sizeof(xstats_names[count].name));
3550 		count++;
3551 	}
3552 
3553 	/* Get individiual stats from i40e_hw_port struct */
3554 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3555 		strlcpy(xstats_names[count].name,
3556 			rte_i40e_hw_port_strings[i].name,
3557 			sizeof(xstats_names[count].name));
3558 		count++;
3559 	}
3560 
3561 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3562 		for (prio = 0; prio < 8; prio++) {
3563 			snprintf(xstats_names[count].name,
3564 				 sizeof(xstats_names[count].name),
3565 				 "rx_priority%u_%s", prio,
3566 				 rte_i40e_rxq_prio_strings[i].name);
3567 			count++;
3568 		}
3569 	}
3570 
3571 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3572 		for (prio = 0; prio < 8; prio++) {
3573 			snprintf(xstats_names[count].name,
3574 				 sizeof(xstats_names[count].name),
3575 				 "tx_priority%u_%s", prio,
3576 				 rte_i40e_txq_prio_strings[i].name);
3577 			count++;
3578 		}
3579 	}
3580 	return count;
3581 }
3582 
3583 static int
3584 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3585 		    unsigned n)
3586 {
3587 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3588 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3589 	unsigned i, count, prio;
3590 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
3591 
3592 	count = i40e_xstats_calc_num();
3593 	if (n < count)
3594 		return count;
3595 
3596 	i40e_read_stats_registers(pf, hw);
3597 
3598 	if (xstats == NULL)
3599 		return 0;
3600 
3601 	count = 0;
3602 
3603 	/* Get stats from i40e_eth_stats struct */
3604 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3605 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3606 			rte_i40e_stats_strings[i].offset);
3607 		xstats[count].id = count;
3608 		count++;
3609 	}
3610 
3611 	/* Get individiual stats from i40e_hw_port struct */
3612 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3613 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3614 			rte_i40e_hw_port_strings[i].offset);
3615 		xstats[count].id = count;
3616 		count++;
3617 	}
3618 
3619 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3620 		for (prio = 0; prio < 8; prio++) {
3621 			xstats[count].value =
3622 				*(uint64_t *)(((char *)hw_stats) +
3623 				rte_i40e_rxq_prio_strings[i].offset +
3624 				(sizeof(uint64_t) * prio));
3625 			xstats[count].id = count;
3626 			count++;
3627 		}
3628 	}
3629 
3630 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3631 		for (prio = 0; prio < 8; prio++) {
3632 			xstats[count].value =
3633 				*(uint64_t *)(((char *)hw_stats) +
3634 				rte_i40e_txq_prio_strings[i].offset +
3635 				(sizeof(uint64_t) * prio));
3636 			xstats[count].id = count;
3637 			count++;
3638 		}
3639 	}
3640 
3641 	return count;
3642 }
3643 
3644 static int
3645 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3646 {
3647 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3648 	u32 full_ver;
3649 	u8 ver, patch;
3650 	u16 build;
3651 	int ret;
3652 
3653 	full_ver = hw->nvm.oem_ver;
3654 	ver = (u8)(full_ver >> 24);
3655 	build = (u16)((full_ver >> 8) & 0xffff);
3656 	patch = (u8)(full_ver & 0xff);
3657 
3658 	ret = snprintf(fw_version, fw_size,
3659 		 "%d.%d%d 0x%08x %d.%d.%d",
3660 		 ((hw->nvm.version >> 12) & 0xf),
3661 		 ((hw->nvm.version >> 4) & 0xff),
3662 		 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3663 		 ver, build, patch);
3664 	if (ret < 0)
3665 		return -EINVAL;
3666 
3667 	ret += 1; /* add the size of '\0' */
3668 	if (fw_size < (size_t)ret)
3669 		return ret;
3670 	else
3671 		return 0;
3672 }
3673 
3674 /*
3675  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3676  * the Rx data path does not hang if the FW LLDP is stopped.
3677  * return true if lldp need to stop
3678  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3679  */
3680 static bool
3681 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3682 {
3683 	double nvm_ver;
3684 	char ver_str[64] = {0};
3685 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3686 
3687 	i40e_fw_version_get(dev, ver_str, 64);
3688 	nvm_ver = atof(ver_str);
3689 	if ((hw->mac.type == I40E_MAC_X722 ||
3690 	     hw->mac.type == I40E_MAC_X722_VF) &&
3691 	     ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3692 		return true;
3693 	else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3694 		return true;
3695 
3696 	return false;
3697 }
3698 
3699 static int
3700 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3701 {
3702 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3703 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3704 	struct i40e_vsi *vsi = pf->main_vsi;
3705 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3706 
3707 	dev_info->max_rx_queues = vsi->nb_qps;
3708 	dev_info->max_tx_queues = vsi->nb_qps;
3709 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3710 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3711 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3712 	dev_info->max_vfs = pci_dev->max_vfs;
3713 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3714 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3715 	dev_info->rx_queue_offload_capa = 0;
3716 	dev_info->rx_offload_capa =
3717 		DEV_RX_OFFLOAD_VLAN_STRIP |
3718 		DEV_RX_OFFLOAD_QINQ_STRIP |
3719 		DEV_RX_OFFLOAD_IPV4_CKSUM |
3720 		DEV_RX_OFFLOAD_UDP_CKSUM |
3721 		DEV_RX_OFFLOAD_TCP_CKSUM |
3722 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3723 		DEV_RX_OFFLOAD_KEEP_CRC |
3724 		DEV_RX_OFFLOAD_SCATTER |
3725 		DEV_RX_OFFLOAD_VLAN_EXTEND |
3726 		DEV_RX_OFFLOAD_VLAN_FILTER |
3727 		DEV_RX_OFFLOAD_JUMBO_FRAME |
3728 		DEV_RX_OFFLOAD_RSS_HASH;
3729 
3730 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3731 	dev_info->tx_offload_capa =
3732 		DEV_TX_OFFLOAD_VLAN_INSERT |
3733 		DEV_TX_OFFLOAD_QINQ_INSERT |
3734 		DEV_TX_OFFLOAD_IPV4_CKSUM |
3735 		DEV_TX_OFFLOAD_UDP_CKSUM |
3736 		DEV_TX_OFFLOAD_TCP_CKSUM |
3737 		DEV_TX_OFFLOAD_SCTP_CKSUM |
3738 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3739 		DEV_TX_OFFLOAD_TCP_TSO |
3740 		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3741 		DEV_TX_OFFLOAD_GRE_TNL_TSO |
3742 		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3743 		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3744 		DEV_TX_OFFLOAD_MULTI_SEGS |
3745 		dev_info->tx_queue_offload_capa;
3746 	dev_info->dev_capa =
3747 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3748 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3749 
3750 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3751 						sizeof(uint32_t);
3752 	dev_info->reta_size = pf->hash_lut_size;
3753 	dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3754 
3755 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3756 		.rx_thresh = {
3757 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
3758 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
3759 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
3760 		},
3761 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3762 		.rx_drop_en = 0,
3763 		.offloads = 0,
3764 	};
3765 
3766 	dev_info->default_txconf = (struct rte_eth_txconf) {
3767 		.tx_thresh = {
3768 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
3769 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
3770 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
3771 		},
3772 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3773 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3774 		.offloads = 0,
3775 	};
3776 
3777 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3778 		.nb_max = I40E_MAX_RING_DESC,
3779 		.nb_min = I40E_MIN_RING_DESC,
3780 		.nb_align = I40E_ALIGN_RING_DESC,
3781 	};
3782 
3783 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3784 		.nb_max = I40E_MAX_RING_DESC,
3785 		.nb_min = I40E_MIN_RING_DESC,
3786 		.nb_align = I40E_ALIGN_RING_DESC,
3787 		.nb_seg_max = I40E_TX_MAX_SEG,
3788 		.nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3789 	};
3790 
3791 	if (pf->flags & I40E_FLAG_VMDQ) {
3792 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3793 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3794 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3795 						pf->max_nb_vmdq_vsi;
3796 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3797 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3798 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3799 	}
3800 
3801 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3802 		/* For XL710 */
3803 		dev_info->speed_capa = ETH_LINK_SPEED_40G;
3804 		dev_info->default_rxportconf.nb_queues = 2;
3805 		dev_info->default_txportconf.nb_queues = 2;
3806 		if (dev->data->nb_rx_queues == 1)
3807 			dev_info->default_rxportconf.ring_size = 2048;
3808 		else
3809 			dev_info->default_rxportconf.ring_size = 1024;
3810 		if (dev->data->nb_tx_queues == 1)
3811 			dev_info->default_txportconf.ring_size = 1024;
3812 		else
3813 			dev_info->default_txportconf.ring_size = 512;
3814 
3815 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3816 		/* For XXV710 */
3817 		dev_info->speed_capa = ETH_LINK_SPEED_25G;
3818 		dev_info->default_rxportconf.nb_queues = 1;
3819 		dev_info->default_txportconf.nb_queues = 1;
3820 		dev_info->default_rxportconf.ring_size = 256;
3821 		dev_info->default_txportconf.ring_size = 256;
3822 	} else {
3823 		/* For X710 */
3824 		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3825 		dev_info->default_rxportconf.nb_queues = 1;
3826 		dev_info->default_txportconf.nb_queues = 1;
3827 		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3828 			dev_info->default_rxportconf.ring_size = 512;
3829 			dev_info->default_txportconf.ring_size = 256;
3830 		} else {
3831 			dev_info->default_rxportconf.ring_size = 256;
3832 			dev_info->default_txportconf.ring_size = 256;
3833 		}
3834 	}
3835 	dev_info->default_rxportconf.burst_size = 32;
3836 	dev_info->default_txportconf.burst_size = 32;
3837 
3838 	return 0;
3839 }
3840 
3841 static int
3842 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3843 {
3844 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3845 	struct i40e_vsi *vsi = pf->main_vsi;
3846 	PMD_INIT_FUNC_TRACE();
3847 
3848 	if (on)
3849 		return i40e_vsi_add_vlan(vsi, vlan_id);
3850 	else
3851 		return i40e_vsi_delete_vlan(vsi, vlan_id);
3852 }
3853 
3854 static int
3855 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3856 				enum rte_vlan_type vlan_type,
3857 				uint16_t tpid, int qinq)
3858 {
3859 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3860 	uint64_t reg_r = 0;
3861 	uint64_t reg_w = 0;
3862 	uint16_t reg_id = 3;
3863 	int ret;
3864 
3865 	if (qinq) {
3866 		if (vlan_type == ETH_VLAN_TYPE_OUTER)
3867 			reg_id = 2;
3868 	}
3869 
3870 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3871 					  &reg_r, NULL);
3872 	if (ret != I40E_SUCCESS) {
3873 		PMD_DRV_LOG(ERR,
3874 			   "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3875 			   reg_id);
3876 		return -EIO;
3877 	}
3878 	PMD_DRV_LOG(DEBUG,
3879 		    "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3880 		    reg_id, reg_r);
3881 
3882 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3883 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3884 	if (reg_r == reg_w) {
3885 		PMD_DRV_LOG(DEBUG, "No need to write");
3886 		return 0;
3887 	}
3888 
3889 	ret = i40e_aq_debug_write_global_register(hw,
3890 					   I40E_GL_SWT_L2TAGCTRL(reg_id),
3891 					   reg_w, NULL);
3892 	if (ret != I40E_SUCCESS) {
3893 		PMD_DRV_LOG(ERR,
3894 			    "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3895 			    reg_id);
3896 		return -EIO;
3897 	}
3898 	PMD_DRV_LOG(DEBUG,
3899 		    "Global register 0x%08x is changed with value 0x%08x",
3900 		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3901 
3902 	return 0;
3903 }
3904 
3905 static int
3906 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3907 		   enum rte_vlan_type vlan_type,
3908 		   uint16_t tpid)
3909 {
3910 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3911 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3912 	int qinq = dev->data->dev_conf.rxmode.offloads &
3913 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
3914 	int ret = 0;
3915 
3916 	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3917 	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
3918 	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3919 		PMD_DRV_LOG(ERR,
3920 			    "Unsupported vlan type.");
3921 		return -EINVAL;
3922 	}
3923 
3924 	if (pf->support_multi_driver) {
3925 		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3926 		return -ENOTSUP;
3927 	}
3928 
3929 	/* 802.1ad frames ability is added in NVM API 1.7*/
3930 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3931 		if (qinq) {
3932 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
3933 				hw->first_tag = rte_cpu_to_le_16(tpid);
3934 			else if (vlan_type == ETH_VLAN_TYPE_INNER)
3935 				hw->second_tag = rte_cpu_to_le_16(tpid);
3936 		} else {
3937 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
3938 				hw->second_tag = rte_cpu_to_le_16(tpid);
3939 		}
3940 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3941 		if (ret != I40E_SUCCESS) {
3942 			PMD_DRV_LOG(ERR,
3943 				    "Set switch config failed aq_err: %d",
3944 				    hw->aq.asq_last_status);
3945 			ret = -EIO;
3946 		}
3947 	} else
3948 		/* If NVM API < 1.7, keep the register setting */
3949 		ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3950 						      tpid, qinq);
3951 
3952 	return ret;
3953 }
3954 
3955 /* Configure outer vlan stripping on or off in QinQ mode */
3956 static int
3957 i40e_vsi_config_outer_vlan_stripping(struct i40e_vsi *vsi, bool on)
3958 {
3959 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3960 	int ret = I40E_SUCCESS;
3961 	uint32_t reg;
3962 
3963 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
3964 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
3965 		return -EINVAL;
3966 	}
3967 
3968 	/* Configure for outer VLAN RX stripping */
3969 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
3970 
3971 	if (on)
3972 		reg |= I40E_VSI_TSR_QINQ_STRIP;
3973 	else
3974 		reg &= ~I40E_VSI_TSR_QINQ_STRIP;
3975 
3976 	ret = i40e_aq_debug_write_register(hw,
3977 						   I40E_VSI_TSR(vsi->vsi_id),
3978 						   reg, NULL);
3979 	if (ret < 0) {
3980 		PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
3981 				    vsi->vsi_id);
3982 		return I40E_ERR_CONFIG;
3983 	}
3984 
3985 	return ret;
3986 }
3987 
3988 static int
3989 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3990 {
3991 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3992 	struct i40e_vsi *vsi = pf->main_vsi;
3993 	struct rte_eth_rxmode *rxmode;
3994 
3995 	rxmode = &dev->data->dev_conf.rxmode;
3996 	if (mask & ETH_VLAN_FILTER_MASK) {
3997 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3998 			i40e_vsi_config_vlan_filter(vsi, TRUE);
3999 		else
4000 			i40e_vsi_config_vlan_filter(vsi, FALSE);
4001 	}
4002 
4003 	if (mask & ETH_VLAN_STRIP_MASK) {
4004 		/* Enable or disable VLAN stripping */
4005 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4006 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
4007 		else
4008 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
4009 	}
4010 
4011 	if (mask & ETH_VLAN_EXTEND_MASK) {
4012 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
4013 			i40e_vsi_config_double_vlan(vsi, TRUE);
4014 			/* Set global registers with default ethertype. */
4015 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
4016 					   RTE_ETHER_TYPE_VLAN);
4017 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
4018 					   RTE_ETHER_TYPE_VLAN);
4019 		}
4020 		else
4021 			i40e_vsi_config_double_vlan(vsi, FALSE);
4022 	}
4023 
4024 	if (mask & ETH_QINQ_STRIP_MASK) {
4025 		/* Enable or disable outer VLAN stripping */
4026 		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
4027 			i40e_vsi_config_outer_vlan_stripping(vsi, TRUE);
4028 		else
4029 			i40e_vsi_config_outer_vlan_stripping(vsi, FALSE);
4030 	}
4031 
4032 	return 0;
4033 }
4034 
4035 static void
4036 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
4037 			  __rte_unused uint16_t queue,
4038 			  __rte_unused int on)
4039 {
4040 	PMD_INIT_FUNC_TRACE();
4041 }
4042 
4043 static int
4044 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4045 {
4046 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4047 	struct i40e_vsi *vsi = pf->main_vsi;
4048 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
4049 	struct i40e_vsi_vlan_pvid_info info;
4050 
4051 	memset(&info, 0, sizeof(info));
4052 	info.on = on;
4053 	if (info.on)
4054 		info.config.pvid = pvid;
4055 	else {
4056 		info.config.reject.tagged =
4057 				data->dev_conf.txmode.hw_vlan_reject_tagged;
4058 		info.config.reject.untagged =
4059 				data->dev_conf.txmode.hw_vlan_reject_untagged;
4060 	}
4061 
4062 	return i40e_vsi_vlan_pvid_set(vsi, &info);
4063 }
4064 
4065 static int
4066 i40e_dev_led_on(struct rte_eth_dev *dev)
4067 {
4068 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4069 	uint32_t mode = i40e_led_get(hw);
4070 
4071 	if (mode == 0)
4072 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
4073 
4074 	return 0;
4075 }
4076 
4077 static int
4078 i40e_dev_led_off(struct rte_eth_dev *dev)
4079 {
4080 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4081 	uint32_t mode = i40e_led_get(hw);
4082 
4083 	if (mode != 0)
4084 		i40e_led_set(hw, 0, false);
4085 
4086 	return 0;
4087 }
4088 
4089 static int
4090 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4091 {
4092 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4093 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4094 
4095 	fc_conf->pause_time = pf->fc_conf.pause_time;
4096 
4097 	/* read out from register, in case they are modified by other port */
4098 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
4099 		I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
4100 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
4101 		I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
4102 
4103 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
4104 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
4105 
4106 	 /* Return current mode according to actual setting*/
4107 	switch (hw->fc.current_mode) {
4108 	case I40E_FC_FULL:
4109 		fc_conf->mode = RTE_FC_FULL;
4110 		break;
4111 	case I40E_FC_TX_PAUSE:
4112 		fc_conf->mode = RTE_FC_TX_PAUSE;
4113 		break;
4114 	case I40E_FC_RX_PAUSE:
4115 		fc_conf->mode = RTE_FC_RX_PAUSE;
4116 		break;
4117 	case I40E_FC_NONE:
4118 	default:
4119 		fc_conf->mode = RTE_FC_NONE;
4120 	};
4121 
4122 	return 0;
4123 }
4124 
4125 static int
4126 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4127 {
4128 	uint32_t mflcn_reg, fctrl_reg, reg;
4129 	uint32_t max_high_water;
4130 	uint8_t i, aq_failure;
4131 	int err;
4132 	struct i40e_hw *hw;
4133 	struct i40e_pf *pf;
4134 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
4135 		[RTE_FC_NONE] = I40E_FC_NONE,
4136 		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
4137 		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
4138 		[RTE_FC_FULL] = I40E_FC_FULL
4139 	};
4140 
4141 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4142 
4143 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4144 	if ((fc_conf->high_water > max_high_water) ||
4145 			(fc_conf->high_water < fc_conf->low_water)) {
4146 		PMD_INIT_LOG(ERR,
4147 			"Invalid high/low water setup value in KB, High_water must be <= %d.",
4148 			max_high_water);
4149 		return -EINVAL;
4150 	}
4151 
4152 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4153 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4154 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4155 
4156 	pf->fc_conf.pause_time = fc_conf->pause_time;
4157 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4158 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4159 
4160 	PMD_INIT_FUNC_TRACE();
4161 
4162 	/* All the link flow control related enable/disable register
4163 	 * configuration is handle by the F/W
4164 	 */
4165 	err = i40e_set_fc(hw, &aq_failure, true);
4166 	if (err < 0)
4167 		return -ENOSYS;
4168 
4169 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4170 		/* Configure flow control refresh threshold,
4171 		 * the value for stat_tx_pause_refresh_timer[8]
4172 		 * is used for global pause operation.
4173 		 */
4174 
4175 		I40E_WRITE_REG(hw,
4176 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4177 			       pf->fc_conf.pause_time);
4178 
4179 		/* configure the timer value included in transmitted pause
4180 		 * frame,
4181 		 * the value for stat_tx_pause_quanta[8] is used for global
4182 		 * pause operation
4183 		 */
4184 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4185 			       pf->fc_conf.pause_time);
4186 
4187 		fctrl_reg = I40E_READ_REG(hw,
4188 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4189 
4190 		if (fc_conf->mac_ctrl_frame_fwd != 0)
4191 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4192 		else
4193 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4194 
4195 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4196 			       fctrl_reg);
4197 	} else {
4198 		/* Configure pause time (2 TCs per register) */
4199 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4200 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4201 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4202 
4203 		/* Configure flow control refresh threshold value */
4204 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4205 			       pf->fc_conf.pause_time / 2);
4206 
4207 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4208 
4209 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
4210 		 *depending on configuration
4211 		 */
4212 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
4213 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4214 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4215 		} else {
4216 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4217 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4218 		}
4219 
4220 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4221 	}
4222 
4223 	if (!pf->support_multi_driver) {
4224 		/* config water marker both based on the packets and bytes */
4225 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4226 				 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4227 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4228 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4229 				  (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4230 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4231 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4232 				  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4233 				  << I40E_KILOSHIFT);
4234 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4235 				   pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4236 				   << I40E_KILOSHIFT);
4237 	} else {
4238 		PMD_DRV_LOG(ERR,
4239 			    "Water marker configuration is not supported.");
4240 	}
4241 
4242 	I40E_WRITE_FLUSH(hw);
4243 
4244 	return 0;
4245 }
4246 
4247 static int
4248 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4249 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4250 {
4251 	PMD_INIT_FUNC_TRACE();
4252 
4253 	return -ENOSYS;
4254 }
4255 
4256 /* Add a MAC address, and update filters */
4257 static int
4258 i40e_macaddr_add(struct rte_eth_dev *dev,
4259 		 struct rte_ether_addr *mac_addr,
4260 		 __rte_unused uint32_t index,
4261 		 uint32_t pool)
4262 {
4263 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4264 	struct i40e_mac_filter_info mac_filter;
4265 	struct i40e_vsi *vsi;
4266 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4267 	int ret;
4268 
4269 	/* If VMDQ not enabled or configured, return */
4270 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4271 			  !pf->nb_cfg_vmdq_vsi)) {
4272 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4273 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4274 			pool);
4275 		return -ENOTSUP;
4276 	}
4277 
4278 	if (pool > pf->nb_cfg_vmdq_vsi) {
4279 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4280 				pool, pf->nb_cfg_vmdq_vsi);
4281 		return -EINVAL;
4282 	}
4283 
4284 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4285 	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4286 		mac_filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
4287 	else
4288 		mac_filter.filter_type = I40E_MAC_PERFECT_MATCH;
4289 
4290 	if (pool == 0)
4291 		vsi = pf->main_vsi;
4292 	else
4293 		vsi = pf->vmdq[pool - 1].vsi;
4294 
4295 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
4296 	if (ret != I40E_SUCCESS) {
4297 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4298 		return -ENODEV;
4299 	}
4300 	return 0;
4301 }
4302 
4303 /* Remove a MAC address, and update filters */
4304 static void
4305 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4306 {
4307 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4308 	struct i40e_vsi *vsi;
4309 	struct rte_eth_dev_data *data = dev->data;
4310 	struct rte_ether_addr *macaddr;
4311 	int ret;
4312 	uint32_t i;
4313 	uint64_t pool_sel;
4314 
4315 	macaddr = &(data->mac_addrs[index]);
4316 
4317 	pool_sel = dev->data->mac_pool_sel[index];
4318 
4319 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4320 		if (pool_sel & (1ULL << i)) {
4321 			if (i == 0)
4322 				vsi = pf->main_vsi;
4323 			else {
4324 				/* No VMDQ pool enabled or configured */
4325 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
4326 					(i > pf->nb_cfg_vmdq_vsi)) {
4327 					PMD_DRV_LOG(ERR,
4328 						"No VMDQ pool enabled/configured");
4329 					return;
4330 				}
4331 				vsi = pf->vmdq[i - 1].vsi;
4332 			}
4333 			ret = i40e_vsi_delete_mac(vsi, macaddr);
4334 
4335 			if (ret) {
4336 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4337 				return;
4338 			}
4339 		}
4340 	}
4341 }
4342 
4343 static int
4344 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4345 {
4346 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4347 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4348 	uint32_t reg;
4349 	int ret;
4350 
4351 	if (!lut)
4352 		return -EINVAL;
4353 
4354 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4355 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4356 					  vsi->type != I40E_VSI_SRIOV,
4357 					  lut, lut_size);
4358 		if (ret) {
4359 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4360 			return ret;
4361 		}
4362 	} else {
4363 		uint32_t *lut_dw = (uint32_t *)lut;
4364 		uint16_t i, lut_size_dw = lut_size / 4;
4365 
4366 		if (vsi->type == I40E_VSI_SRIOV) {
4367 			for (i = 0; i <= lut_size_dw; i++) {
4368 				reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4369 				lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4370 			}
4371 		} else {
4372 			for (i = 0; i < lut_size_dw; i++)
4373 				lut_dw[i] = I40E_READ_REG(hw,
4374 							  I40E_PFQF_HLUT(i));
4375 		}
4376 	}
4377 
4378 	return 0;
4379 }
4380 
4381 int
4382 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4383 {
4384 	struct i40e_pf *pf;
4385 	struct i40e_hw *hw;
4386 
4387 	if (!vsi || !lut)
4388 		return -EINVAL;
4389 
4390 	pf = I40E_VSI_TO_PF(vsi);
4391 	hw = I40E_VSI_TO_HW(vsi);
4392 
4393 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4394 		enum i40e_status_code status;
4395 
4396 		status = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4397 					     vsi->type != I40E_VSI_SRIOV,
4398 					     lut, lut_size);
4399 		if (status) {
4400 			PMD_DRV_LOG(ERR,
4401 				    "Failed to update RSS lookup table, error status: %d",
4402 				    status);
4403 			return -EIO;
4404 		}
4405 	} else {
4406 		uint32_t *lut_dw = (uint32_t *)lut;
4407 		uint16_t i, lut_size_dw = lut_size / 4;
4408 
4409 		if (vsi->type == I40E_VSI_SRIOV) {
4410 			for (i = 0; i < lut_size_dw; i++)
4411 				I40E_WRITE_REG(
4412 					hw,
4413 					I40E_VFQF_HLUT1(i, vsi->user_param),
4414 					lut_dw[i]);
4415 		} else {
4416 			for (i = 0; i < lut_size_dw; i++)
4417 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4418 					       lut_dw[i]);
4419 		}
4420 		I40E_WRITE_FLUSH(hw);
4421 	}
4422 
4423 	return 0;
4424 }
4425 
4426 static int
4427 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4428 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4429 			 uint16_t reta_size)
4430 {
4431 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4432 	uint16_t i, lut_size = pf->hash_lut_size;
4433 	uint16_t idx, shift;
4434 	uint8_t *lut;
4435 	int ret;
4436 
4437 	if (reta_size != lut_size ||
4438 		reta_size > ETH_RSS_RETA_SIZE_512) {
4439 		PMD_DRV_LOG(ERR,
4440 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4441 			reta_size, lut_size);
4442 		return -EINVAL;
4443 	}
4444 
4445 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4446 	if (!lut) {
4447 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4448 		return -ENOMEM;
4449 	}
4450 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4451 	if (ret)
4452 		goto out;
4453 	for (i = 0; i < reta_size; i++) {
4454 		idx = i / RTE_RETA_GROUP_SIZE;
4455 		shift = i % RTE_RETA_GROUP_SIZE;
4456 		if (reta_conf[idx].mask & (1ULL << shift))
4457 			lut[i] = reta_conf[idx].reta[shift];
4458 	}
4459 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4460 
4461 	pf->adapter->rss_reta_updated = 1;
4462 
4463 out:
4464 	rte_free(lut);
4465 
4466 	return ret;
4467 }
4468 
4469 static int
4470 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4471 			struct rte_eth_rss_reta_entry64 *reta_conf,
4472 			uint16_t reta_size)
4473 {
4474 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4475 	uint16_t i, lut_size = pf->hash_lut_size;
4476 	uint16_t idx, shift;
4477 	uint8_t *lut;
4478 	int ret;
4479 
4480 	if (reta_size != lut_size ||
4481 		reta_size > ETH_RSS_RETA_SIZE_512) {
4482 		PMD_DRV_LOG(ERR,
4483 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4484 			reta_size, lut_size);
4485 		return -EINVAL;
4486 	}
4487 
4488 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4489 	if (!lut) {
4490 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4491 		return -ENOMEM;
4492 	}
4493 
4494 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4495 	if (ret)
4496 		goto out;
4497 	for (i = 0; i < reta_size; i++) {
4498 		idx = i / RTE_RETA_GROUP_SIZE;
4499 		shift = i % RTE_RETA_GROUP_SIZE;
4500 		if (reta_conf[idx].mask & (1ULL << shift))
4501 			reta_conf[idx].reta[shift] = lut[i];
4502 	}
4503 
4504 out:
4505 	rte_free(lut);
4506 
4507 	return ret;
4508 }
4509 
4510 /**
4511  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4512  * @hw:   pointer to the HW structure
4513  * @mem:  pointer to mem struct to fill out
4514  * @size: size of memory requested
4515  * @alignment: what to align the allocation to
4516  **/
4517 enum i40e_status_code
4518 i40e_allocate_dma_mem_d(__rte_unused struct i40e_hw *hw,
4519 			struct i40e_dma_mem *mem,
4520 			u64 size,
4521 			u32 alignment)
4522 {
4523 	static uint64_t i40e_dma_memzone_id;
4524 	const struct rte_memzone *mz = NULL;
4525 	char z_name[RTE_MEMZONE_NAMESIZE];
4526 
4527 	if (!mem)
4528 		return I40E_ERR_PARAM;
4529 
4530 	snprintf(z_name, sizeof(z_name), "i40e_dma_%" PRIu64,
4531 		__atomic_fetch_add(&i40e_dma_memzone_id, 1, __ATOMIC_RELAXED));
4532 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4533 			RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4534 	if (!mz)
4535 		return I40E_ERR_NO_MEMORY;
4536 
4537 	mem->size = size;
4538 	mem->va = mz->addr;
4539 	mem->pa = mz->iova;
4540 	mem->zone = (const void *)mz;
4541 	PMD_DRV_LOG(DEBUG,
4542 		"memzone %s allocated with physical address: %"PRIu64,
4543 		mz->name, mem->pa);
4544 
4545 	return I40E_SUCCESS;
4546 }
4547 
4548 /**
4549  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4550  * @hw:   pointer to the HW structure
4551  * @mem:  ptr to mem struct to free
4552  **/
4553 enum i40e_status_code
4554 i40e_free_dma_mem_d(__rte_unused struct i40e_hw *hw,
4555 		    struct i40e_dma_mem *mem)
4556 {
4557 	if (!mem)
4558 		return I40E_ERR_PARAM;
4559 
4560 	PMD_DRV_LOG(DEBUG,
4561 		"memzone %s to be freed with physical address: %"PRIu64,
4562 		((const struct rte_memzone *)mem->zone)->name, mem->pa);
4563 	rte_memzone_free((const struct rte_memzone *)mem->zone);
4564 	mem->zone = NULL;
4565 	mem->va = NULL;
4566 	mem->pa = (u64)0;
4567 
4568 	return I40E_SUCCESS;
4569 }
4570 
4571 /**
4572  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4573  * @hw:   pointer to the HW structure
4574  * @mem:  pointer to mem struct to fill out
4575  * @size: size of memory requested
4576  **/
4577 enum i40e_status_code
4578 i40e_allocate_virt_mem_d(__rte_unused struct i40e_hw *hw,
4579 			 struct i40e_virt_mem *mem,
4580 			 u32 size)
4581 {
4582 	if (!mem)
4583 		return I40E_ERR_PARAM;
4584 
4585 	mem->size = size;
4586 	mem->va = rte_zmalloc("i40e", size, 0);
4587 
4588 	if (mem->va)
4589 		return I40E_SUCCESS;
4590 	else
4591 		return I40E_ERR_NO_MEMORY;
4592 }
4593 
4594 /**
4595  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4596  * @hw:   pointer to the HW structure
4597  * @mem:  pointer to mem struct to free
4598  **/
4599 enum i40e_status_code
4600 i40e_free_virt_mem_d(__rte_unused struct i40e_hw *hw,
4601 		     struct i40e_virt_mem *mem)
4602 {
4603 	if (!mem)
4604 		return I40E_ERR_PARAM;
4605 
4606 	rte_free(mem->va);
4607 	mem->va = NULL;
4608 
4609 	return I40E_SUCCESS;
4610 }
4611 
4612 void
4613 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4614 {
4615 	rte_spinlock_init(&sp->spinlock);
4616 }
4617 
4618 void
4619 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4620 {
4621 	rte_spinlock_lock(&sp->spinlock);
4622 }
4623 
4624 void
4625 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4626 {
4627 	rte_spinlock_unlock(&sp->spinlock);
4628 }
4629 
4630 void
4631 i40e_destroy_spinlock_d(__rte_unused struct i40e_spinlock *sp)
4632 {
4633 	return;
4634 }
4635 
4636 /**
4637  * Get the hardware capabilities, which will be parsed
4638  * and saved into struct i40e_hw.
4639  */
4640 static int
4641 i40e_get_cap(struct i40e_hw *hw)
4642 {
4643 	struct i40e_aqc_list_capabilities_element_resp *buf;
4644 	uint16_t len, size = 0;
4645 	int ret;
4646 
4647 	/* Calculate a huge enough buff for saving response data temporarily */
4648 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4649 						I40E_MAX_CAP_ELE_NUM;
4650 	buf = rte_zmalloc("i40e", len, 0);
4651 	if (!buf) {
4652 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
4653 		return I40E_ERR_NO_MEMORY;
4654 	}
4655 
4656 	/* Get, parse the capabilities and save it to hw */
4657 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4658 			i40e_aqc_opc_list_func_capabilities, NULL);
4659 	if (ret != I40E_SUCCESS)
4660 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4661 
4662 	/* Free the temporary buffer after being used */
4663 	rte_free(buf);
4664 
4665 	return ret;
4666 }
4667 
4668 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
4669 
4670 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4671 		const char *value,
4672 		void *opaque)
4673 {
4674 	struct i40e_pf *pf;
4675 	unsigned long num;
4676 	char *end;
4677 
4678 	pf = (struct i40e_pf *)opaque;
4679 	RTE_SET_USED(key);
4680 
4681 	errno = 0;
4682 	num = strtoul(value, &end, 0);
4683 	if (errno != 0 || end == value || *end != 0) {
4684 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4685 			    "kept the value = %hu", value, pf->vf_nb_qp_max);
4686 		return -(EINVAL);
4687 	}
4688 
4689 	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4690 		pf->vf_nb_qp_max = (uint16_t)num;
4691 	else
4692 		/* here return 0 to make next valid same argument work */
4693 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4694 			    "power of 2 and equal or less than 16 !, Now it is "
4695 			    "kept the value = %hu", num, pf->vf_nb_qp_max);
4696 
4697 	return 0;
4698 }
4699 
4700 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4701 {
4702 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4703 	struct rte_kvargs *kvlist;
4704 	int kvargs_count;
4705 
4706 	/* set default queue number per VF as 4 */
4707 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4708 
4709 	if (dev->device->devargs == NULL)
4710 		return 0;
4711 
4712 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4713 	if (kvlist == NULL)
4714 		return -(EINVAL);
4715 
4716 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4717 	if (!kvargs_count) {
4718 		rte_kvargs_free(kvlist);
4719 		return 0;
4720 	}
4721 
4722 	if (kvargs_count > 1)
4723 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4724 			    "the first invalid or last valid one is used !",
4725 			    ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4726 
4727 	rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4728 			   i40e_pf_parse_vf_queue_number_handler, pf);
4729 
4730 	rte_kvargs_free(kvlist);
4731 
4732 	return 0;
4733 }
4734 
4735 static int
4736 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4737 {
4738 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4739 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4740 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4741 	uint16_t qp_count = 0, vsi_count = 0;
4742 
4743 	if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4744 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4745 		return -EINVAL;
4746 	}
4747 
4748 	i40e_pf_config_vf_rxq_number(dev);
4749 
4750 	/* Add the parameter init for LFC */
4751 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4752 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4753 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4754 
4755 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4756 	pf->max_num_vsi = hw->func_caps.num_vsis;
4757 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4758 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4759 
4760 	/* FDir queue/VSI allocation */
4761 	pf->fdir_qp_offset = 0;
4762 	if (hw->func_caps.fd) {
4763 		pf->flags |= I40E_FLAG_FDIR;
4764 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4765 	} else {
4766 		pf->fdir_nb_qps = 0;
4767 	}
4768 	qp_count += pf->fdir_nb_qps;
4769 	vsi_count += 1;
4770 
4771 	/* LAN queue/VSI allocation */
4772 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4773 	if (!hw->func_caps.rss) {
4774 		pf->lan_nb_qps = 1;
4775 	} else {
4776 		pf->flags |= I40E_FLAG_RSS;
4777 		if (hw->mac.type == I40E_MAC_X722)
4778 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4779 		pf->lan_nb_qps = pf->lan_nb_qp_max;
4780 	}
4781 	qp_count += pf->lan_nb_qps;
4782 	vsi_count += 1;
4783 
4784 	/* VF queue/VSI allocation */
4785 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4786 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4787 		pf->flags |= I40E_FLAG_SRIOV;
4788 		pf->vf_nb_qps = pf->vf_nb_qp_max;
4789 		pf->vf_num = pci_dev->max_vfs;
4790 		PMD_DRV_LOG(DEBUG,
4791 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
4792 			pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4793 	} else {
4794 		pf->vf_nb_qps = 0;
4795 		pf->vf_num = 0;
4796 	}
4797 	qp_count += pf->vf_nb_qps * pf->vf_num;
4798 	vsi_count += pf->vf_num;
4799 
4800 	/* VMDq queue/VSI allocation */
4801 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4802 	pf->vmdq_nb_qps = 0;
4803 	pf->max_nb_vmdq_vsi = 0;
4804 	if (hw->func_caps.vmdq) {
4805 		if (qp_count < hw->func_caps.num_tx_qp &&
4806 			vsi_count < hw->func_caps.num_vsis) {
4807 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4808 				qp_count) / pf->vmdq_nb_qp_max;
4809 
4810 			/* Limit the maximum number of VMDq vsi to the maximum
4811 			 * ethdev can support
4812 			 */
4813 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4814 				hw->func_caps.num_vsis - vsi_count);
4815 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4816 				ETH_64_POOLS);
4817 			if (pf->max_nb_vmdq_vsi) {
4818 				pf->flags |= I40E_FLAG_VMDQ;
4819 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4820 				PMD_DRV_LOG(DEBUG,
4821 					"%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4822 					pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4823 					pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4824 			} else {
4825 				PMD_DRV_LOG(INFO,
4826 					"No enough queues left for VMDq");
4827 			}
4828 		} else {
4829 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4830 		}
4831 	}
4832 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4833 	vsi_count += pf->max_nb_vmdq_vsi;
4834 
4835 	if (hw->func_caps.dcb)
4836 		pf->flags |= I40E_FLAG_DCB;
4837 
4838 	if (qp_count > hw->func_caps.num_tx_qp) {
4839 		PMD_DRV_LOG(ERR,
4840 			"Failed to allocate %u queues, which exceeds the hardware maximum %u",
4841 			qp_count, hw->func_caps.num_tx_qp);
4842 		return -EINVAL;
4843 	}
4844 	if (vsi_count > hw->func_caps.num_vsis) {
4845 		PMD_DRV_LOG(ERR,
4846 			"Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4847 			vsi_count, hw->func_caps.num_vsis);
4848 		return -EINVAL;
4849 	}
4850 
4851 	return 0;
4852 }
4853 
4854 static int
4855 i40e_pf_get_switch_config(struct i40e_pf *pf)
4856 {
4857 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4858 	struct i40e_aqc_get_switch_config_resp *switch_config;
4859 	struct i40e_aqc_switch_config_element_resp *element;
4860 	uint16_t start_seid = 0, num_reported;
4861 	int ret;
4862 
4863 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4864 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4865 	if (!switch_config) {
4866 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
4867 		return -ENOMEM;
4868 	}
4869 
4870 	/* Get the switch configurations */
4871 	ret = i40e_aq_get_switch_config(hw, switch_config,
4872 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
4873 	if (ret != I40E_SUCCESS) {
4874 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4875 		goto fail;
4876 	}
4877 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4878 	if (num_reported != 1) { /* The number should be 1 */
4879 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4880 		goto fail;
4881 	}
4882 
4883 	/* Parse the switch configuration elements */
4884 	element = &(switch_config->element[0]);
4885 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4886 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4887 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4888 	} else
4889 		PMD_DRV_LOG(INFO, "Unknown element type");
4890 
4891 fail:
4892 	rte_free(switch_config);
4893 
4894 	return ret;
4895 }
4896 
4897 static int
4898 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4899 			uint32_t num)
4900 {
4901 	struct pool_entry *entry;
4902 
4903 	if (pool == NULL || num == 0)
4904 		return -EINVAL;
4905 
4906 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4907 	if (entry == NULL) {
4908 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4909 		return -ENOMEM;
4910 	}
4911 
4912 	/* queue heap initialize */
4913 	pool->num_free = num;
4914 	pool->num_alloc = 0;
4915 	pool->base = base;
4916 	LIST_INIT(&pool->alloc_list);
4917 	LIST_INIT(&pool->free_list);
4918 
4919 	/* Initialize element  */
4920 	entry->base = 0;
4921 	entry->len = num;
4922 
4923 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
4924 	return 0;
4925 }
4926 
4927 static void
4928 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4929 {
4930 	struct pool_entry *entry, *next_entry;
4931 
4932 	if (pool == NULL)
4933 		return;
4934 
4935 	for (entry = LIST_FIRST(&pool->alloc_list);
4936 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4937 			entry = next_entry) {
4938 		LIST_REMOVE(entry, next);
4939 		rte_free(entry);
4940 	}
4941 
4942 	for (entry = LIST_FIRST(&pool->free_list);
4943 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4944 			entry = next_entry) {
4945 		LIST_REMOVE(entry, next);
4946 		rte_free(entry);
4947 	}
4948 
4949 	pool->num_free = 0;
4950 	pool->num_alloc = 0;
4951 	pool->base = 0;
4952 	LIST_INIT(&pool->alloc_list);
4953 	LIST_INIT(&pool->free_list);
4954 }
4955 
4956 static int
4957 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4958 		       uint32_t base)
4959 {
4960 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4961 	uint32_t pool_offset;
4962 	uint16_t len;
4963 	int insert;
4964 
4965 	if (pool == NULL) {
4966 		PMD_DRV_LOG(ERR, "Invalid parameter");
4967 		return -EINVAL;
4968 	}
4969 
4970 	pool_offset = base - pool->base;
4971 	/* Lookup in alloc list */
4972 	LIST_FOREACH(entry, &pool->alloc_list, next) {
4973 		if (entry->base == pool_offset) {
4974 			valid_entry = entry;
4975 			LIST_REMOVE(entry, next);
4976 			break;
4977 		}
4978 	}
4979 
4980 	/* Not find, return */
4981 	if (valid_entry == NULL) {
4982 		PMD_DRV_LOG(ERR, "Failed to find entry");
4983 		return -EINVAL;
4984 	}
4985 
4986 	/**
4987 	 * Found it, move it to free list  and try to merge.
4988 	 * In order to make merge easier, always sort it by qbase.
4989 	 * Find adjacent prev and last entries.
4990 	 */
4991 	prev = next = NULL;
4992 	LIST_FOREACH(entry, &pool->free_list, next) {
4993 		if (entry->base > valid_entry->base) {
4994 			next = entry;
4995 			break;
4996 		}
4997 		prev = entry;
4998 	}
4999 
5000 	insert = 0;
5001 	len = valid_entry->len;
5002 	/* Try to merge with next one*/
5003 	if (next != NULL) {
5004 		/* Merge with next one */
5005 		if (valid_entry->base + len == next->base) {
5006 			next->base = valid_entry->base;
5007 			next->len += len;
5008 			rte_free(valid_entry);
5009 			valid_entry = next;
5010 			insert = 1;
5011 		}
5012 	}
5013 
5014 	if (prev != NULL) {
5015 		/* Merge with previous one */
5016 		if (prev->base + prev->len == valid_entry->base) {
5017 			prev->len += len;
5018 			/* If it merge with next one, remove next node */
5019 			if (insert == 1) {
5020 				LIST_REMOVE(valid_entry, next);
5021 				rte_free(valid_entry);
5022 				valid_entry = NULL;
5023 			} else {
5024 				rte_free(valid_entry);
5025 				valid_entry = NULL;
5026 				insert = 1;
5027 			}
5028 		}
5029 	}
5030 
5031 	/* Not find any entry to merge, insert */
5032 	if (insert == 0) {
5033 		if (prev != NULL)
5034 			LIST_INSERT_AFTER(prev, valid_entry, next);
5035 		else if (next != NULL)
5036 			LIST_INSERT_BEFORE(next, valid_entry, next);
5037 		else /* It's empty list, insert to head */
5038 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5039 	}
5040 
5041 	pool->num_free += len;
5042 	pool->num_alloc -= len;
5043 
5044 	return 0;
5045 }
5046 
5047 static int
5048 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5049 		       uint16_t num)
5050 {
5051 	struct pool_entry *entry, *valid_entry;
5052 
5053 	if (pool == NULL || num == 0) {
5054 		PMD_DRV_LOG(ERR, "Invalid parameter");
5055 		return -EINVAL;
5056 	}
5057 
5058 	if (pool->num_free < num) {
5059 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5060 			    num, pool->num_free);
5061 		return -ENOMEM;
5062 	}
5063 
5064 	valid_entry = NULL;
5065 	/* Lookup  in free list and find most fit one */
5066 	LIST_FOREACH(entry, &pool->free_list, next) {
5067 		if (entry->len >= num) {
5068 			/* Find best one */
5069 			if (entry->len == num) {
5070 				valid_entry = entry;
5071 				break;
5072 			}
5073 			if (valid_entry == NULL || valid_entry->len > entry->len)
5074 				valid_entry = entry;
5075 		}
5076 	}
5077 
5078 	/* Not find one to satisfy the request, return */
5079 	if (valid_entry == NULL) {
5080 		PMD_DRV_LOG(ERR, "No valid entry found");
5081 		return -ENOMEM;
5082 	}
5083 	/**
5084 	 * The entry have equal queue number as requested,
5085 	 * remove it from alloc_list.
5086 	 */
5087 	if (valid_entry->len == num) {
5088 		LIST_REMOVE(valid_entry, next);
5089 	} else {
5090 		/**
5091 		 * The entry have more numbers than requested,
5092 		 * create a new entry for alloc_list and minus its
5093 		 * queue base and number in free_list.
5094 		 */
5095 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5096 		if (entry == NULL) {
5097 			PMD_DRV_LOG(ERR,
5098 				"Failed to allocate memory for resource pool");
5099 			return -ENOMEM;
5100 		}
5101 		entry->base = valid_entry->base;
5102 		entry->len = num;
5103 		valid_entry->base += num;
5104 		valid_entry->len -= num;
5105 		valid_entry = entry;
5106 	}
5107 
5108 	/* Insert it into alloc list, not sorted */
5109 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5110 
5111 	pool->num_free -= valid_entry->len;
5112 	pool->num_alloc += valid_entry->len;
5113 
5114 	return valid_entry->base + pool->base;
5115 }
5116 
5117 /**
5118  * bitmap_is_subset - Check whether src2 is subset of src1
5119  **/
5120 static inline int
5121 bitmap_is_subset(uint8_t src1, uint8_t src2)
5122 {
5123 	return !((src1 ^ src2) & src2);
5124 }
5125 
5126 static enum i40e_status_code
5127 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5128 {
5129 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5130 
5131 	/* If DCB is not supported, only default TC is supported */
5132 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5133 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5134 		return I40E_NOT_SUPPORTED;
5135 	}
5136 
5137 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5138 		PMD_DRV_LOG(ERR,
5139 			"Enabled TC map 0x%x not applicable to HW support 0x%x",
5140 			hw->func_caps.enabled_tcmap, enabled_tcmap);
5141 		return I40E_NOT_SUPPORTED;
5142 	}
5143 	return I40E_SUCCESS;
5144 }
5145 
5146 int
5147 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5148 				struct i40e_vsi_vlan_pvid_info *info)
5149 {
5150 	struct i40e_hw *hw;
5151 	struct i40e_vsi_context ctxt;
5152 	uint8_t vlan_flags = 0;
5153 	int ret;
5154 
5155 	if (vsi == NULL || info == NULL) {
5156 		PMD_DRV_LOG(ERR, "invalid parameters");
5157 		return I40E_ERR_PARAM;
5158 	}
5159 
5160 	if (info->on) {
5161 		vsi->info.pvid = info->config.pvid;
5162 		/**
5163 		 * If insert pvid is enabled, only tagged pkts are
5164 		 * allowed to be sent out.
5165 		 */
5166 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5167 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5168 	} else {
5169 		vsi->info.pvid = 0;
5170 		if (info->config.reject.tagged == 0)
5171 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5172 
5173 		if (info->config.reject.untagged == 0)
5174 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5175 	}
5176 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5177 					I40E_AQ_VSI_PVLAN_MODE_MASK);
5178 	vsi->info.port_vlan_flags |= vlan_flags;
5179 	vsi->info.valid_sections =
5180 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5181 	memset(&ctxt, 0, sizeof(ctxt));
5182 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5183 	ctxt.seid = vsi->seid;
5184 
5185 	hw = I40E_VSI_TO_HW(vsi);
5186 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5187 	if (ret != I40E_SUCCESS)
5188 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
5189 
5190 	return ret;
5191 }
5192 
5193 static int
5194 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5195 {
5196 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5197 	int i, ret;
5198 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5199 
5200 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5201 	if (ret != I40E_SUCCESS)
5202 		return ret;
5203 
5204 	if (!vsi->seid) {
5205 		PMD_DRV_LOG(ERR, "seid not valid");
5206 		return -EINVAL;
5207 	}
5208 
5209 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5210 	tc_bw_data.tc_valid_bits = enabled_tcmap;
5211 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5212 		tc_bw_data.tc_bw_credits[i] =
5213 			(enabled_tcmap & (1 << i)) ? 1 : 0;
5214 
5215 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5216 	if (ret != I40E_SUCCESS) {
5217 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5218 		return ret;
5219 	}
5220 
5221 	rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5222 					sizeof(vsi->info.qs_handle));
5223 	return I40E_SUCCESS;
5224 }
5225 
5226 static enum i40e_status_code
5227 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5228 				 struct i40e_aqc_vsi_properties_data *info,
5229 				 uint8_t enabled_tcmap)
5230 {
5231 	enum i40e_status_code ret;
5232 	int i, total_tc = 0;
5233 	uint16_t qpnum_per_tc, bsf, qp_idx;
5234 
5235 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5236 	if (ret != I40E_SUCCESS)
5237 		return ret;
5238 
5239 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5240 		if (enabled_tcmap & (1 << i))
5241 			total_tc++;
5242 	if (total_tc == 0)
5243 		total_tc = 1;
5244 	vsi->enabled_tc = enabled_tcmap;
5245 
5246 	/* Number of queues per enabled TC */
5247 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5248 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5249 	bsf = rte_bsf32(qpnum_per_tc);
5250 
5251 	/* Adjust the queue number to actual queues that can be applied */
5252 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5253 		vsi->nb_qps = qpnum_per_tc * total_tc;
5254 
5255 	/**
5256 	 * Configure TC and queue mapping parameters, for enabled TC,
5257 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5258 	 * default queue will serve it.
5259 	 */
5260 	qp_idx = 0;
5261 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5262 		if (vsi->enabled_tc & (1 << i)) {
5263 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5264 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5265 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5266 			qp_idx += qpnum_per_tc;
5267 		} else
5268 			info->tc_mapping[i] = 0;
5269 	}
5270 
5271 	/* Associate queue number with VSI */
5272 	if (vsi->type == I40E_VSI_SRIOV) {
5273 		info->mapping_flags |=
5274 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5275 		for (i = 0; i < vsi->nb_qps; i++)
5276 			info->queue_mapping[i] =
5277 				rte_cpu_to_le_16(vsi->base_queue + i);
5278 	} else {
5279 		info->mapping_flags |=
5280 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5281 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5282 	}
5283 	info->valid_sections |=
5284 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5285 
5286 	return I40E_SUCCESS;
5287 }
5288 
5289 static int
5290 i40e_veb_release(struct i40e_veb *veb)
5291 {
5292 	struct i40e_vsi *vsi;
5293 	struct i40e_hw *hw;
5294 
5295 	if (veb == NULL)
5296 		return -EINVAL;
5297 
5298 	if (!TAILQ_EMPTY(&veb->head)) {
5299 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5300 		return -EACCES;
5301 	}
5302 	/* associate_vsi field is NULL for floating VEB */
5303 	if (veb->associate_vsi != NULL) {
5304 		vsi = veb->associate_vsi;
5305 		hw = I40E_VSI_TO_HW(vsi);
5306 
5307 		vsi->uplink_seid = veb->uplink_seid;
5308 		vsi->veb = NULL;
5309 	} else {
5310 		veb->associate_pf->main_vsi->floating_veb = NULL;
5311 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5312 	}
5313 
5314 	i40e_aq_delete_element(hw, veb->seid, NULL);
5315 	rte_free(veb);
5316 	return I40E_SUCCESS;
5317 }
5318 
5319 /* Setup a veb */
5320 static struct i40e_veb *
5321 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5322 {
5323 	struct i40e_veb *veb;
5324 	int ret;
5325 	struct i40e_hw *hw;
5326 
5327 	if (pf == NULL) {
5328 		PMD_DRV_LOG(ERR,
5329 			    "veb setup failed, associated PF shouldn't null");
5330 		return NULL;
5331 	}
5332 	hw = I40E_PF_TO_HW(pf);
5333 
5334 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5335 	if (!veb) {
5336 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5337 		goto fail;
5338 	}
5339 
5340 	veb->associate_vsi = vsi;
5341 	veb->associate_pf = pf;
5342 	TAILQ_INIT(&veb->head);
5343 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5344 
5345 	/* create floating veb if vsi is NULL */
5346 	if (vsi != NULL) {
5347 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5348 				      I40E_DEFAULT_TCMAP, false,
5349 				      &veb->seid, false, NULL);
5350 	} else {
5351 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5352 				      true, &veb->seid, false, NULL);
5353 	}
5354 
5355 	if (ret != I40E_SUCCESS) {
5356 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5357 			    hw->aq.asq_last_status);
5358 		goto fail;
5359 	}
5360 	veb->enabled_tc = I40E_DEFAULT_TCMAP;
5361 
5362 	/* get statistics index */
5363 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5364 				&veb->stats_idx, NULL, NULL, NULL);
5365 	if (ret != I40E_SUCCESS) {
5366 		PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5367 			    hw->aq.asq_last_status);
5368 		goto fail;
5369 	}
5370 	/* Get VEB bandwidth, to be implemented */
5371 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
5372 	if (vsi)
5373 		vsi->uplink_seid = veb->seid;
5374 
5375 	return veb;
5376 fail:
5377 	rte_free(veb);
5378 	return NULL;
5379 }
5380 
5381 int
5382 i40e_vsi_release(struct i40e_vsi *vsi)
5383 {
5384 	struct i40e_pf *pf;
5385 	struct i40e_hw *hw;
5386 	struct i40e_vsi_list *vsi_list;
5387 	void *temp;
5388 	int ret;
5389 	struct i40e_mac_filter *f;
5390 	uint16_t user_param;
5391 
5392 	if (!vsi)
5393 		return I40E_SUCCESS;
5394 
5395 	if (!vsi->adapter)
5396 		return -EFAULT;
5397 
5398 	user_param = vsi->user_param;
5399 
5400 	pf = I40E_VSI_TO_PF(vsi);
5401 	hw = I40E_VSI_TO_HW(vsi);
5402 
5403 	/* VSI has child to attach, release child first */
5404 	if (vsi->veb) {
5405 		RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5406 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5407 				return -1;
5408 		}
5409 		i40e_veb_release(vsi->veb);
5410 	}
5411 
5412 	if (vsi->floating_veb) {
5413 		RTE_TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head,
5414 			list, temp) {
5415 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5416 				return -1;
5417 		}
5418 	}
5419 
5420 	/* Remove all macvlan filters of the VSI */
5421 	i40e_vsi_remove_all_macvlan_filter(vsi);
5422 	RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5423 		rte_free(f);
5424 
5425 	if (vsi->type != I40E_VSI_MAIN &&
5426 	    ((vsi->type != I40E_VSI_SRIOV) ||
5427 	    !pf->floating_veb_list[user_param])) {
5428 		/* Remove vsi from parent's sibling list */
5429 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5430 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5431 			return I40E_ERR_PARAM;
5432 		}
5433 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5434 				&vsi->sib_vsi_list, list);
5435 
5436 		/* Remove all switch element of the VSI */
5437 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5438 		if (ret != I40E_SUCCESS)
5439 			PMD_DRV_LOG(ERR, "Failed to delete element");
5440 	}
5441 
5442 	if ((vsi->type == I40E_VSI_SRIOV) &&
5443 	    pf->floating_veb_list[user_param]) {
5444 		/* Remove vsi from parent's sibling list */
5445 		if (vsi->parent_vsi == NULL ||
5446 		    vsi->parent_vsi->floating_veb == NULL) {
5447 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5448 			return I40E_ERR_PARAM;
5449 		}
5450 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5451 			     &vsi->sib_vsi_list, list);
5452 
5453 		/* Remove all switch element of the VSI */
5454 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5455 		if (ret != I40E_SUCCESS)
5456 			PMD_DRV_LOG(ERR, "Failed to delete element");
5457 	}
5458 
5459 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5460 
5461 	if (vsi->type != I40E_VSI_SRIOV)
5462 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5463 	rte_free(vsi);
5464 
5465 	return I40E_SUCCESS;
5466 }
5467 
5468 static int
5469 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5470 {
5471 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5472 	struct i40e_aqc_remove_macvlan_element_data def_filter;
5473 	struct i40e_mac_filter_info filter;
5474 	int ret;
5475 
5476 	if (vsi->type != I40E_VSI_MAIN)
5477 		return I40E_ERR_CONFIG;
5478 	memset(&def_filter, 0, sizeof(def_filter));
5479 	rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5480 					ETH_ADDR_LEN);
5481 	def_filter.vlan_tag = 0;
5482 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5483 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5484 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5485 	if (ret != I40E_SUCCESS) {
5486 		struct i40e_mac_filter *f;
5487 		struct rte_ether_addr *mac;
5488 
5489 		PMD_DRV_LOG(DEBUG,
5490 			    "Cannot remove the default macvlan filter");
5491 		/* It needs to add the permanent mac into mac list */
5492 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5493 		if (f == NULL) {
5494 			PMD_DRV_LOG(ERR, "failed to allocate memory");
5495 			return I40E_ERR_NO_MEMORY;
5496 		}
5497 		mac = &f->mac_info.mac_addr;
5498 		rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5499 				ETH_ADDR_LEN);
5500 		f->mac_info.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5501 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5502 		vsi->mac_num++;
5503 
5504 		return ret;
5505 	}
5506 	rte_memcpy(&filter.mac_addr,
5507 		(struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5508 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5509 	return i40e_vsi_add_mac(vsi, &filter);
5510 }
5511 
5512 /*
5513  * i40e_vsi_get_bw_config - Query VSI BW Information
5514  * @vsi: the VSI to be queried
5515  *
5516  * Returns 0 on success, negative value on failure
5517  */
5518 static enum i40e_status_code
5519 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5520 {
5521 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5522 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5523 	struct i40e_hw *hw = &vsi->adapter->hw;
5524 	i40e_status ret;
5525 	int i;
5526 	uint32_t bw_max;
5527 
5528 	memset(&bw_config, 0, sizeof(bw_config));
5529 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5530 	if (ret != I40E_SUCCESS) {
5531 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5532 			    hw->aq.asq_last_status);
5533 		return ret;
5534 	}
5535 
5536 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5537 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5538 					&ets_sla_config, NULL);
5539 	if (ret != I40E_SUCCESS) {
5540 		PMD_DRV_LOG(ERR,
5541 			"VSI failed to get TC bandwdith configuration %u",
5542 			hw->aq.asq_last_status);
5543 		return ret;
5544 	}
5545 
5546 	/* store and print out BW info */
5547 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5548 	vsi->bw_info.bw_max = bw_config.max_bw;
5549 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5550 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5551 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5552 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5553 		     I40E_16_BIT_WIDTH);
5554 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5555 		vsi->bw_info.bw_ets_share_credits[i] =
5556 				ets_sla_config.share_credits[i];
5557 		vsi->bw_info.bw_ets_credits[i] =
5558 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
5559 		/* 4 bits per TC, 4th bit is reserved */
5560 		vsi->bw_info.bw_ets_max[i] =
5561 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5562 				  RTE_LEN2MASK(3, uint8_t));
5563 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5564 			    vsi->bw_info.bw_ets_share_credits[i]);
5565 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5566 			    vsi->bw_info.bw_ets_credits[i]);
5567 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5568 			    vsi->bw_info.bw_ets_max[i]);
5569 	}
5570 
5571 	return I40E_SUCCESS;
5572 }
5573 
5574 /* i40e_enable_pf_lb
5575  * @pf: pointer to the pf structure
5576  *
5577  * allow loopback on pf
5578  */
5579 static inline void
5580 i40e_enable_pf_lb(struct i40e_pf *pf)
5581 {
5582 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5583 	struct i40e_vsi_context ctxt;
5584 	int ret;
5585 
5586 	/* Use the FW API if FW >= v5.0 */
5587 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5588 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5589 		return;
5590 	}
5591 
5592 	memset(&ctxt, 0, sizeof(ctxt));
5593 	ctxt.seid = pf->main_vsi_seid;
5594 	ctxt.pf_num = hw->pf_id;
5595 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5596 	if (ret) {
5597 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5598 			    ret, hw->aq.asq_last_status);
5599 		return;
5600 	}
5601 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5602 	ctxt.info.valid_sections =
5603 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5604 	ctxt.info.switch_id |=
5605 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5606 
5607 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5608 	if (ret)
5609 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5610 			    hw->aq.asq_last_status);
5611 }
5612 
5613 /* Setup a VSI */
5614 struct i40e_vsi *
5615 i40e_vsi_setup(struct i40e_pf *pf,
5616 	       enum i40e_vsi_type type,
5617 	       struct i40e_vsi *uplink_vsi,
5618 	       uint16_t user_param)
5619 {
5620 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5621 	struct i40e_vsi *vsi;
5622 	struct i40e_mac_filter_info filter;
5623 	int ret;
5624 	struct i40e_vsi_context ctxt;
5625 	struct rte_ether_addr broadcast =
5626 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5627 
5628 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5629 	    uplink_vsi == NULL) {
5630 		PMD_DRV_LOG(ERR,
5631 			"VSI setup failed, VSI link shouldn't be NULL");
5632 		return NULL;
5633 	}
5634 
5635 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5636 		PMD_DRV_LOG(ERR,
5637 			"VSI setup failed, MAIN VSI uplink VSI should be NULL");
5638 		return NULL;
5639 	}
5640 
5641 	/* two situations
5642 	 * 1.type is not MAIN and uplink vsi is not NULL
5643 	 * If uplink vsi didn't setup VEB, create one first under veb field
5644 	 * 2.type is SRIOV and the uplink is NULL
5645 	 * If floating VEB is NULL, create one veb under floating veb field
5646 	 */
5647 
5648 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5649 	    uplink_vsi->veb == NULL) {
5650 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5651 
5652 		if (uplink_vsi->veb == NULL) {
5653 			PMD_DRV_LOG(ERR, "VEB setup failed");
5654 			return NULL;
5655 		}
5656 		/* set ALLOWLOOPBACk on pf, when veb is created */
5657 		i40e_enable_pf_lb(pf);
5658 	}
5659 
5660 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5661 	    pf->main_vsi->floating_veb == NULL) {
5662 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5663 
5664 		if (pf->main_vsi->floating_veb == NULL) {
5665 			PMD_DRV_LOG(ERR, "VEB setup failed");
5666 			return NULL;
5667 		}
5668 	}
5669 
5670 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5671 	if (!vsi) {
5672 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5673 		return NULL;
5674 	}
5675 	TAILQ_INIT(&vsi->mac_list);
5676 	vsi->type = type;
5677 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5678 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5679 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5680 	vsi->user_param = user_param;
5681 	vsi->vlan_anti_spoof_on = 0;
5682 	vsi->vlan_filter_on = 0;
5683 	/* Allocate queues */
5684 	switch (vsi->type) {
5685 	case I40E_VSI_MAIN  :
5686 		vsi->nb_qps = pf->lan_nb_qps;
5687 		break;
5688 	case I40E_VSI_SRIOV :
5689 		vsi->nb_qps = pf->vf_nb_qps;
5690 		break;
5691 	case I40E_VSI_VMDQ2:
5692 		vsi->nb_qps = pf->vmdq_nb_qps;
5693 		break;
5694 	case I40E_VSI_FDIR:
5695 		vsi->nb_qps = pf->fdir_nb_qps;
5696 		break;
5697 	default:
5698 		goto fail_mem;
5699 	}
5700 	/*
5701 	 * The filter status descriptor is reported in rx queue 0,
5702 	 * while the tx queue for fdir filter programming has no
5703 	 * such constraints, can be non-zero queues.
5704 	 * To simplify it, choose FDIR vsi use queue 0 pair.
5705 	 * To make sure it will use queue 0 pair, queue allocation
5706 	 * need be done before this function is called
5707 	 */
5708 	if (type != I40E_VSI_FDIR) {
5709 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5710 			if (ret < 0) {
5711 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5712 						vsi->seid, ret);
5713 				goto fail_mem;
5714 			}
5715 			vsi->base_queue = ret;
5716 	} else
5717 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
5718 
5719 	/* VF has MSIX interrupt in VF range, don't allocate here */
5720 	if (type == I40E_VSI_MAIN) {
5721 		if (pf->support_multi_driver) {
5722 			/* If support multi-driver, need to use INT0 instead of
5723 			 * allocating from msix pool. The Msix pool is init from
5724 			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
5725 			 * to 1 without calling i40e_res_pool_alloc.
5726 			 */
5727 			vsi->msix_intr = 0;
5728 			vsi->nb_msix = 1;
5729 		} else {
5730 			ret = i40e_res_pool_alloc(&pf->msix_pool,
5731 						  RTE_MIN(vsi->nb_qps,
5732 						     RTE_MAX_RXTX_INTR_VEC_ID));
5733 			if (ret < 0) {
5734 				PMD_DRV_LOG(ERR,
5735 					    "VSI MAIN %d get heap failed %d",
5736 					    vsi->seid, ret);
5737 				goto fail_queue_alloc;
5738 			}
5739 			vsi->msix_intr = ret;
5740 			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5741 					       RTE_MAX_RXTX_INTR_VEC_ID);
5742 		}
5743 	} else if (type != I40E_VSI_SRIOV) {
5744 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5745 		if (ret < 0) {
5746 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5747 			if (type != I40E_VSI_FDIR)
5748 				goto fail_queue_alloc;
5749 			vsi->msix_intr = 0;
5750 			vsi->nb_msix = 0;
5751 		} else {
5752 			vsi->msix_intr = ret;
5753 			vsi->nb_msix = 1;
5754 		}
5755 	} else {
5756 		vsi->msix_intr = 0;
5757 		vsi->nb_msix = 0;
5758 	}
5759 
5760 	/* Add VSI */
5761 	if (type == I40E_VSI_MAIN) {
5762 		/* For main VSI, no need to add since it's default one */
5763 		vsi->uplink_seid = pf->mac_seid;
5764 		vsi->seid = pf->main_vsi_seid;
5765 		/* Bind queues with specific MSIX interrupt */
5766 		/**
5767 		 * Needs 2 interrupt at least, one for misc cause which will
5768 		 * enabled from OS side, Another for queues binding the
5769 		 * interrupt from device side only.
5770 		 */
5771 
5772 		/* Get default VSI parameters from hardware */
5773 		memset(&ctxt, 0, sizeof(ctxt));
5774 		ctxt.seid = vsi->seid;
5775 		ctxt.pf_num = hw->pf_id;
5776 		ctxt.uplink_seid = vsi->uplink_seid;
5777 		ctxt.vf_num = 0;
5778 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5779 		if (ret != I40E_SUCCESS) {
5780 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
5781 			goto fail_msix_alloc;
5782 		}
5783 		rte_memcpy(&vsi->info, &ctxt.info,
5784 			sizeof(struct i40e_aqc_vsi_properties_data));
5785 		vsi->vsi_id = ctxt.vsi_number;
5786 		vsi->info.valid_sections = 0;
5787 
5788 		/* Configure tc, enabled TC0 only */
5789 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5790 			I40E_SUCCESS) {
5791 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5792 			goto fail_msix_alloc;
5793 		}
5794 
5795 		/* TC, queue mapping */
5796 		memset(&ctxt, 0, sizeof(ctxt));
5797 		vsi->info.valid_sections |=
5798 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5799 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5800 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5801 		rte_memcpy(&ctxt.info, &vsi->info,
5802 			sizeof(struct i40e_aqc_vsi_properties_data));
5803 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5804 						I40E_DEFAULT_TCMAP);
5805 		if (ret != I40E_SUCCESS) {
5806 			PMD_DRV_LOG(ERR,
5807 				"Failed to configure TC queue mapping");
5808 			goto fail_msix_alloc;
5809 		}
5810 		ctxt.seid = vsi->seid;
5811 		ctxt.pf_num = hw->pf_id;
5812 		ctxt.uplink_seid = vsi->uplink_seid;
5813 		ctxt.vf_num = 0;
5814 
5815 		/* Update VSI parameters */
5816 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5817 		if (ret != I40E_SUCCESS) {
5818 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
5819 			goto fail_msix_alloc;
5820 		}
5821 
5822 		rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5823 						sizeof(vsi->info.tc_mapping));
5824 		rte_memcpy(&vsi->info.queue_mapping,
5825 				&ctxt.info.queue_mapping,
5826 			sizeof(vsi->info.queue_mapping));
5827 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
5828 		vsi->info.valid_sections = 0;
5829 
5830 		rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5831 				ETH_ADDR_LEN);
5832 
5833 		/**
5834 		 * Updating default filter settings are necessary to prevent
5835 		 * reception of tagged packets.
5836 		 * Some old firmware configurations load a default macvlan
5837 		 * filter which accepts both tagged and untagged packets.
5838 		 * The updating is to use a normal filter instead if needed.
5839 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
5840 		 * The firmware with correct configurations load the default
5841 		 * macvlan filter which is expected and cannot be removed.
5842 		 */
5843 		i40e_update_default_filter_setting(vsi);
5844 		i40e_config_qinq(hw, vsi);
5845 	} else if (type == I40E_VSI_SRIOV) {
5846 		memset(&ctxt, 0, sizeof(ctxt));
5847 		/**
5848 		 * For other VSI, the uplink_seid equals to uplink VSI's
5849 		 * uplink_seid since they share same VEB
5850 		 */
5851 		if (uplink_vsi == NULL)
5852 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5853 		else
5854 			vsi->uplink_seid = uplink_vsi->uplink_seid;
5855 		ctxt.pf_num = hw->pf_id;
5856 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5857 		ctxt.uplink_seid = vsi->uplink_seid;
5858 		ctxt.connection_type = 0x1;
5859 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5860 
5861 		/* Use the VEB configuration if FW >= v5.0 */
5862 		if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5863 			/* Configure switch ID */
5864 			ctxt.info.valid_sections |=
5865 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5866 			ctxt.info.switch_id =
5867 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5868 		}
5869 
5870 		/* Configure port/vlan */
5871 		ctxt.info.valid_sections |=
5872 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5873 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5874 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5875 						hw->func_caps.enabled_tcmap);
5876 		if (ret != I40E_SUCCESS) {
5877 			PMD_DRV_LOG(ERR,
5878 				"Failed to configure TC queue mapping");
5879 			goto fail_msix_alloc;
5880 		}
5881 
5882 		ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5883 		ctxt.info.valid_sections |=
5884 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5885 		/**
5886 		 * Since VSI is not created yet, only configure parameter,
5887 		 * will add vsi below.
5888 		 */
5889 
5890 		i40e_config_qinq(hw, vsi);
5891 	} else if (type == I40E_VSI_VMDQ2) {
5892 		memset(&ctxt, 0, sizeof(ctxt));
5893 		/*
5894 		 * For other VSI, the uplink_seid equals to uplink VSI's
5895 		 * uplink_seid since they share same VEB
5896 		 */
5897 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5898 		ctxt.pf_num = hw->pf_id;
5899 		ctxt.vf_num = 0;
5900 		ctxt.uplink_seid = vsi->uplink_seid;
5901 		ctxt.connection_type = 0x1;
5902 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5903 
5904 		ctxt.info.valid_sections |=
5905 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5906 		/* user_param carries flag to enable loop back */
5907 		if (user_param) {
5908 			ctxt.info.switch_id =
5909 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5910 			ctxt.info.switch_id |=
5911 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5912 		}
5913 
5914 		/* Configure port/vlan */
5915 		ctxt.info.valid_sections |=
5916 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5917 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5918 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5919 						I40E_DEFAULT_TCMAP);
5920 		if (ret != I40E_SUCCESS) {
5921 			PMD_DRV_LOG(ERR,
5922 				"Failed to configure TC queue mapping");
5923 			goto fail_msix_alloc;
5924 		}
5925 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5926 		ctxt.info.valid_sections |=
5927 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5928 	} else if (type == I40E_VSI_FDIR) {
5929 		memset(&ctxt, 0, sizeof(ctxt));
5930 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5931 		ctxt.pf_num = hw->pf_id;
5932 		ctxt.vf_num = 0;
5933 		ctxt.uplink_seid = vsi->uplink_seid;
5934 		ctxt.connection_type = 0x1;     /* regular data port */
5935 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5936 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5937 						I40E_DEFAULT_TCMAP);
5938 		if (ret != I40E_SUCCESS) {
5939 			PMD_DRV_LOG(ERR,
5940 				"Failed to configure TC queue mapping.");
5941 			goto fail_msix_alloc;
5942 		}
5943 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5944 		ctxt.info.valid_sections |=
5945 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5946 	} else {
5947 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5948 		goto fail_msix_alloc;
5949 	}
5950 
5951 	if (vsi->type != I40E_VSI_MAIN) {
5952 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5953 		if (ret != I40E_SUCCESS) {
5954 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5955 				    hw->aq.asq_last_status);
5956 			goto fail_msix_alloc;
5957 		}
5958 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5959 		vsi->info.valid_sections = 0;
5960 		vsi->seid = ctxt.seid;
5961 		vsi->vsi_id = ctxt.vsi_number;
5962 		vsi->sib_vsi_list.vsi = vsi;
5963 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5964 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5965 					  &vsi->sib_vsi_list, list);
5966 		} else {
5967 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5968 					  &vsi->sib_vsi_list, list);
5969 		}
5970 	}
5971 
5972 	/* MAC/VLAN configuration */
5973 	rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
5974 	filter.filter_type = I40E_MACVLAN_PERFECT_MATCH;
5975 
5976 	ret = i40e_vsi_add_mac(vsi, &filter);
5977 	if (ret != I40E_SUCCESS) {
5978 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5979 		goto fail_msix_alloc;
5980 	}
5981 
5982 	/* Get VSI BW information */
5983 	i40e_vsi_get_bw_config(vsi);
5984 	return vsi;
5985 fail_msix_alloc:
5986 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5987 fail_queue_alloc:
5988 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5989 fail_mem:
5990 	rte_free(vsi);
5991 	return NULL;
5992 }
5993 
5994 /* Configure vlan filter on or off */
5995 int
5996 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5997 {
5998 	int i, num;
5999 	struct i40e_mac_filter *f;
6000 	void *temp;
6001 	struct i40e_mac_filter_info *mac_filter;
6002 	enum i40e_mac_filter_type desired_filter;
6003 	int ret = I40E_SUCCESS;
6004 
6005 	if (on) {
6006 		/* Filter to match MAC and VLAN */
6007 		desired_filter = I40E_MACVLAN_PERFECT_MATCH;
6008 	} else {
6009 		/* Filter to match only MAC */
6010 		desired_filter = I40E_MAC_PERFECT_MATCH;
6011 	}
6012 
6013 	num = vsi->mac_num;
6014 
6015 	mac_filter = rte_zmalloc("mac_filter_info_data",
6016 				 num * sizeof(*mac_filter), 0);
6017 	if (mac_filter == NULL) {
6018 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6019 		return I40E_ERR_NO_MEMORY;
6020 	}
6021 
6022 	i = 0;
6023 
6024 	/* Remove all existing mac */
6025 	RTE_TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
6026 		mac_filter[i] = f->mac_info;
6027 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
6028 		if (ret) {
6029 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6030 				    on ? "enable" : "disable");
6031 			goto DONE;
6032 		}
6033 		i++;
6034 	}
6035 
6036 	/* Override with new filter */
6037 	for (i = 0; i < num; i++) {
6038 		mac_filter[i].filter_type = desired_filter;
6039 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
6040 		if (ret) {
6041 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6042 				    on ? "enable" : "disable");
6043 			goto DONE;
6044 		}
6045 	}
6046 
6047 DONE:
6048 	rte_free(mac_filter);
6049 	return ret;
6050 }
6051 
6052 /* Configure vlan stripping on or off */
6053 int
6054 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6055 {
6056 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6057 	struct i40e_vsi_context ctxt;
6058 	uint8_t vlan_flags;
6059 	int ret = I40E_SUCCESS;
6060 
6061 	/* Check if it has been already on or off */
6062 	if (vsi->info.valid_sections &
6063 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6064 		if (on) {
6065 			if ((vsi->info.port_vlan_flags &
6066 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6067 				return 0; /* already on */
6068 		} else {
6069 			if ((vsi->info.port_vlan_flags &
6070 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6071 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
6072 				return 0; /* already off */
6073 		}
6074 	}
6075 
6076 	if (on)
6077 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6078 	else
6079 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6080 	vsi->info.valid_sections =
6081 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6082 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6083 	vsi->info.port_vlan_flags |= vlan_flags;
6084 	ctxt.seid = vsi->seid;
6085 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6086 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6087 	if (ret)
6088 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6089 			    on ? "enable" : "disable");
6090 
6091 	return ret;
6092 }
6093 
6094 static int
6095 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6096 {
6097 	struct rte_eth_dev_data *data = dev->data;
6098 	int ret;
6099 	int mask = 0;
6100 
6101 	/* Apply vlan offload setting */
6102 	mask = ETH_VLAN_STRIP_MASK |
6103 	       ETH_QINQ_STRIP_MASK |
6104 	       ETH_VLAN_FILTER_MASK |
6105 	       ETH_VLAN_EXTEND_MASK;
6106 	ret = i40e_vlan_offload_set(dev, mask);
6107 	if (ret) {
6108 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6109 		return ret;
6110 	}
6111 
6112 	/* Apply pvid setting */
6113 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6114 				data->dev_conf.txmode.hw_vlan_insert_pvid);
6115 	if (ret)
6116 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
6117 
6118 	return ret;
6119 }
6120 
6121 static int
6122 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6123 {
6124 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6125 
6126 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6127 }
6128 
6129 static int
6130 i40e_update_flow_control(struct i40e_hw *hw)
6131 {
6132 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6133 	struct i40e_link_status link_status;
6134 	uint32_t rxfc = 0, txfc = 0, reg;
6135 	uint8_t an_info;
6136 	int ret;
6137 
6138 	memset(&link_status, 0, sizeof(link_status));
6139 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6140 	if (ret != I40E_SUCCESS) {
6141 		PMD_DRV_LOG(ERR, "Failed to get link status information");
6142 		goto write_reg; /* Disable flow control */
6143 	}
6144 
6145 	an_info = hw->phy.link_info.an_info;
6146 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6147 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6148 		ret = I40E_ERR_NOT_READY;
6149 		goto write_reg; /* Disable flow control */
6150 	}
6151 	/**
6152 	 * If link auto negotiation is enabled, flow control needs to
6153 	 * be configured according to it
6154 	 */
6155 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
6156 	case I40E_LINK_PAUSE_RXTX:
6157 		rxfc = 1;
6158 		txfc = 1;
6159 		hw->fc.current_mode = I40E_FC_FULL;
6160 		break;
6161 	case I40E_AQ_LINK_PAUSE_RX:
6162 		rxfc = 1;
6163 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
6164 		break;
6165 	case I40E_AQ_LINK_PAUSE_TX:
6166 		txfc = 1;
6167 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
6168 		break;
6169 	default:
6170 		hw->fc.current_mode = I40E_FC_NONE;
6171 		break;
6172 	}
6173 
6174 write_reg:
6175 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6176 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6177 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6178 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6179 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6180 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6181 
6182 	return ret;
6183 }
6184 
6185 /* PF setup */
6186 static int
6187 i40e_pf_setup(struct i40e_pf *pf)
6188 {
6189 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6190 	struct i40e_filter_control_settings settings;
6191 	struct i40e_vsi *vsi;
6192 	int ret;
6193 
6194 	/* Clear all stats counters */
6195 	pf->offset_loaded = FALSE;
6196 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6197 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6198 	memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6199 	memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6200 
6201 	ret = i40e_pf_get_switch_config(pf);
6202 	if (ret != I40E_SUCCESS) {
6203 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6204 		return ret;
6205 	}
6206 
6207 	ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6208 	if (ret)
6209 		PMD_INIT_LOG(WARNING,
6210 			"failed to allocate switch domain for device %d", ret);
6211 
6212 	if (pf->flags & I40E_FLAG_FDIR) {
6213 		/* make queue allocated first, let FDIR use queue pair 0*/
6214 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6215 		if (ret != I40E_FDIR_QUEUE_ID) {
6216 			PMD_DRV_LOG(ERR,
6217 				"queue allocation fails for FDIR: ret =%d",
6218 				ret);
6219 			pf->flags &= ~I40E_FLAG_FDIR;
6220 		}
6221 	}
6222 	/*  main VSI setup */
6223 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6224 	if (!vsi) {
6225 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6226 		return I40E_ERR_NOT_READY;
6227 	}
6228 	pf->main_vsi = vsi;
6229 
6230 	/* Configure filter control */
6231 	memset(&settings, 0, sizeof(settings));
6232 	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6233 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6234 	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6235 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6236 	else {
6237 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6238 			hw->func_caps.rss_table_size);
6239 		return I40E_ERR_PARAM;
6240 	}
6241 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6242 		hw->func_caps.rss_table_size);
6243 	pf->hash_lut_size = hw->func_caps.rss_table_size;
6244 
6245 	/* Enable ethtype and macvlan filters */
6246 	settings.enable_ethtype = TRUE;
6247 	settings.enable_macvlan = TRUE;
6248 	ret = i40e_set_filter_control(hw, &settings);
6249 	if (ret)
6250 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6251 								ret);
6252 
6253 	/* Update flow control according to the auto negotiation */
6254 	i40e_update_flow_control(hw);
6255 
6256 	return I40E_SUCCESS;
6257 }
6258 
6259 int
6260 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6261 {
6262 	uint32_t reg;
6263 	uint16_t j;
6264 
6265 	/**
6266 	 * Set or clear TX Queue Disable flags,
6267 	 * which is required by hardware.
6268 	 */
6269 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
6270 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6271 
6272 	/* Wait until the request is finished */
6273 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6274 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6275 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6276 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6277 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6278 							& 0x1))) {
6279 			break;
6280 		}
6281 	}
6282 	if (on) {
6283 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6284 			return I40E_SUCCESS; /* already on, skip next steps */
6285 
6286 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6287 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6288 	} else {
6289 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6290 			return I40E_SUCCESS; /* already off, skip next steps */
6291 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6292 	}
6293 	/* Write the register */
6294 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6295 	/* Check the result */
6296 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6297 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6298 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6299 		if (on) {
6300 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6301 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6302 				break;
6303 		} else {
6304 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6305 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6306 				break;
6307 		}
6308 	}
6309 	/* Check if it is timeout */
6310 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6311 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6312 			    (on ? "enable" : "disable"), q_idx);
6313 		return I40E_ERR_TIMEOUT;
6314 	}
6315 
6316 	return I40E_SUCCESS;
6317 }
6318 
6319 int
6320 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6321 {
6322 	uint32_t reg;
6323 	uint16_t j;
6324 
6325 	/* Wait until the request is finished */
6326 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6327 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6328 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6329 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6330 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6331 			break;
6332 	}
6333 
6334 	if (on) {
6335 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6336 			return I40E_SUCCESS; /* Already on, skip next steps */
6337 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6338 	} else {
6339 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6340 			return I40E_SUCCESS; /* Already off, skip next steps */
6341 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6342 	}
6343 
6344 	/* Write the register */
6345 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6346 	/* Check the result */
6347 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6348 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6349 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6350 		if (on) {
6351 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6352 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6353 				break;
6354 		} else {
6355 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6356 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6357 				break;
6358 		}
6359 	}
6360 
6361 	/* Check if it is timeout */
6362 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6363 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6364 			    (on ? "enable" : "disable"), q_idx);
6365 		return I40E_ERR_TIMEOUT;
6366 	}
6367 
6368 	return I40E_SUCCESS;
6369 }
6370 
6371 /* Initialize VSI for TX */
6372 static int
6373 i40e_dev_tx_init(struct i40e_pf *pf)
6374 {
6375 	struct rte_eth_dev_data *data = pf->dev_data;
6376 	uint16_t i;
6377 	uint32_t ret = I40E_SUCCESS;
6378 	struct i40e_tx_queue *txq;
6379 
6380 	for (i = 0; i < data->nb_tx_queues; i++) {
6381 		txq = data->tx_queues[i];
6382 		if (!txq || !txq->q_set)
6383 			continue;
6384 		ret = i40e_tx_queue_init(txq);
6385 		if (ret != I40E_SUCCESS)
6386 			break;
6387 	}
6388 	if (ret == I40E_SUCCESS)
6389 		i40e_set_tx_function(&rte_eth_devices[pf->dev_data->port_id]);
6390 
6391 	return ret;
6392 }
6393 
6394 /* Initialize VSI for RX */
6395 static int
6396 i40e_dev_rx_init(struct i40e_pf *pf)
6397 {
6398 	struct rte_eth_dev_data *data = pf->dev_data;
6399 	int ret = I40E_SUCCESS;
6400 	uint16_t i;
6401 	struct i40e_rx_queue *rxq;
6402 
6403 	i40e_pf_config_rss(pf);
6404 	for (i = 0; i < data->nb_rx_queues; i++) {
6405 		rxq = data->rx_queues[i];
6406 		if (!rxq || !rxq->q_set)
6407 			continue;
6408 
6409 		ret = i40e_rx_queue_init(rxq);
6410 		if (ret != I40E_SUCCESS) {
6411 			PMD_DRV_LOG(ERR,
6412 				"Failed to do RX queue initialization");
6413 			break;
6414 		}
6415 	}
6416 	if (ret == I40E_SUCCESS)
6417 		i40e_set_rx_function(&rte_eth_devices[pf->dev_data->port_id]);
6418 
6419 	return ret;
6420 }
6421 
6422 static int
6423 i40e_dev_rxtx_init(struct i40e_pf *pf)
6424 {
6425 	int err;
6426 
6427 	err = i40e_dev_tx_init(pf);
6428 	if (err) {
6429 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6430 		return err;
6431 	}
6432 	err = i40e_dev_rx_init(pf);
6433 	if (err) {
6434 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6435 		return err;
6436 	}
6437 
6438 	return err;
6439 }
6440 
6441 static int
6442 i40e_vmdq_setup(struct rte_eth_dev *dev)
6443 {
6444 	struct rte_eth_conf *conf = &dev->data->dev_conf;
6445 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6446 	int i, err, conf_vsis, j, loop;
6447 	struct i40e_vsi *vsi;
6448 	struct i40e_vmdq_info *vmdq_info;
6449 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
6450 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6451 
6452 	/*
6453 	 * Disable interrupt to avoid message from VF. Furthermore, it will
6454 	 * avoid race condition in VSI creation/destroy.
6455 	 */
6456 	i40e_pf_disable_irq0(hw);
6457 
6458 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6459 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6460 		return -ENOTSUP;
6461 	}
6462 
6463 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6464 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
6465 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6466 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6467 			pf->max_nb_vmdq_vsi);
6468 		return -ENOTSUP;
6469 	}
6470 
6471 	if (pf->vmdq != NULL) {
6472 		PMD_INIT_LOG(INFO, "VMDQ already configured");
6473 		return 0;
6474 	}
6475 
6476 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
6477 				sizeof(*vmdq_info) * conf_vsis, 0);
6478 
6479 	if (pf->vmdq == NULL) {
6480 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
6481 		return -ENOMEM;
6482 	}
6483 
6484 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6485 
6486 	/* Create VMDQ VSI */
6487 	for (i = 0; i < conf_vsis; i++) {
6488 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6489 				vmdq_conf->enable_loop_back);
6490 		if (vsi == NULL) {
6491 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6492 			err = -1;
6493 			goto err_vsi_setup;
6494 		}
6495 		vmdq_info = &pf->vmdq[i];
6496 		vmdq_info->pf = pf;
6497 		vmdq_info->vsi = vsi;
6498 	}
6499 	pf->nb_cfg_vmdq_vsi = conf_vsis;
6500 
6501 	/* Configure Vlan */
6502 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6503 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6504 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6505 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6506 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6507 					vmdq_conf->pool_map[i].vlan_id, j);
6508 
6509 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6510 						vmdq_conf->pool_map[i].vlan_id);
6511 				if (err) {
6512 					PMD_INIT_LOG(ERR, "Failed to add vlan");
6513 					err = -1;
6514 					goto err_vsi_setup;
6515 				}
6516 			}
6517 		}
6518 	}
6519 
6520 	i40e_pf_enable_irq0(hw);
6521 
6522 	return 0;
6523 
6524 err_vsi_setup:
6525 	for (i = 0; i < conf_vsis; i++)
6526 		if (pf->vmdq[i].vsi == NULL)
6527 			break;
6528 		else
6529 			i40e_vsi_release(pf->vmdq[i].vsi);
6530 
6531 	rte_free(pf->vmdq);
6532 	pf->vmdq = NULL;
6533 	i40e_pf_enable_irq0(hw);
6534 	return err;
6535 }
6536 
6537 static void
6538 i40e_stat_update_32(struct i40e_hw *hw,
6539 		   uint32_t reg,
6540 		   bool offset_loaded,
6541 		   uint64_t *offset,
6542 		   uint64_t *stat)
6543 {
6544 	uint64_t new_data;
6545 
6546 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
6547 	if (!offset_loaded)
6548 		*offset = new_data;
6549 
6550 	if (new_data >= *offset)
6551 		*stat = (uint64_t)(new_data - *offset);
6552 	else
6553 		*stat = (uint64_t)((new_data +
6554 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6555 }
6556 
6557 static void
6558 i40e_stat_update_48(struct i40e_hw *hw,
6559 		   uint32_t hireg,
6560 		   uint32_t loreg,
6561 		   bool offset_loaded,
6562 		   uint64_t *offset,
6563 		   uint64_t *stat)
6564 {
6565 	uint64_t new_data;
6566 
6567 	if (hw->device_id == I40E_DEV_ID_QEMU) {
6568 		new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6569 		new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6570 				I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6571 	} else {
6572 		new_data = I40E_READ_REG64(hw, loreg);
6573 	}
6574 
6575 	if (!offset_loaded)
6576 		*offset = new_data;
6577 
6578 	if (new_data >= *offset)
6579 		*stat = new_data - *offset;
6580 	else
6581 		*stat = (uint64_t)((new_data +
6582 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6583 
6584 	*stat &= I40E_48_BIT_MASK;
6585 }
6586 
6587 /* Disable IRQ0 */
6588 void
6589 i40e_pf_disable_irq0(struct i40e_hw *hw)
6590 {
6591 	/* Disable all interrupt types */
6592 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6593 		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6594 	I40E_WRITE_FLUSH(hw);
6595 }
6596 
6597 /* Enable IRQ0 */
6598 void
6599 i40e_pf_enable_irq0(struct i40e_hw *hw)
6600 {
6601 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6602 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
6603 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6604 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6605 	I40E_WRITE_FLUSH(hw);
6606 }
6607 
6608 static void
6609 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6610 {
6611 	/* read pending request and disable first */
6612 	i40e_pf_disable_irq0(hw);
6613 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6614 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6615 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6616 
6617 	if (no_queue)
6618 		/* Link no queues with irq0 */
6619 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6620 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6621 }
6622 
6623 static void
6624 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6625 {
6626 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6627 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6628 	int i;
6629 	uint16_t abs_vf_id;
6630 	uint32_t index, offset, val;
6631 
6632 	if (!pf->vfs)
6633 		return;
6634 	/**
6635 	 * Try to find which VF trigger a reset, use absolute VF id to access
6636 	 * since the reg is global register.
6637 	 */
6638 	for (i = 0; i < pf->vf_num; i++) {
6639 		abs_vf_id = hw->func_caps.vf_base_id + i;
6640 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6641 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6642 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6643 		/* VFR event occurred */
6644 		if (val & (0x1 << offset)) {
6645 			int ret;
6646 
6647 			/* Clear the event first */
6648 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6649 							(0x1 << offset));
6650 			PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6651 			/**
6652 			 * Only notify a VF reset event occurred,
6653 			 * don't trigger another SW reset
6654 			 */
6655 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6656 			if (ret != I40E_SUCCESS)
6657 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
6658 		}
6659 	}
6660 }
6661 
6662 static void
6663 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6664 {
6665 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6666 	int i;
6667 
6668 	for (i = 0; i < pf->vf_num; i++)
6669 		i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6670 }
6671 
6672 static void
6673 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6674 {
6675 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6676 	struct i40e_arq_event_info info;
6677 	uint16_t pending, opcode;
6678 	int ret;
6679 
6680 	info.buf_len = I40E_AQ_BUF_SZ;
6681 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6682 	if (!info.msg_buf) {
6683 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
6684 		return;
6685 	}
6686 
6687 	pending = 1;
6688 	while (pending) {
6689 		ret = i40e_clean_arq_element(hw, &info, &pending);
6690 
6691 		if (ret != I40E_SUCCESS) {
6692 			PMD_DRV_LOG(INFO,
6693 				"Failed to read msg from AdminQ, aq_err: %u",
6694 				hw->aq.asq_last_status);
6695 			break;
6696 		}
6697 		opcode = rte_le_to_cpu_16(info.desc.opcode);
6698 
6699 		switch (opcode) {
6700 		case i40e_aqc_opc_send_msg_to_pf:
6701 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6702 			i40e_pf_host_handle_vf_msg(dev,
6703 					rte_le_to_cpu_16(info.desc.retval),
6704 					rte_le_to_cpu_32(info.desc.cookie_high),
6705 					rte_le_to_cpu_32(info.desc.cookie_low),
6706 					info.msg_buf,
6707 					info.msg_len);
6708 			break;
6709 		case i40e_aqc_opc_get_link_status:
6710 			ret = i40e_dev_link_update(dev, 0);
6711 			if (!ret)
6712 				rte_eth_dev_callback_process(dev,
6713 					RTE_ETH_EVENT_INTR_LSC, NULL);
6714 			break;
6715 		default:
6716 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6717 				    opcode);
6718 			break;
6719 		}
6720 	}
6721 	rte_free(info.msg_buf);
6722 }
6723 
6724 static void
6725 i40e_handle_mdd_event(struct rte_eth_dev *dev)
6726 {
6727 #define I40E_MDD_CLEAR32 0xFFFFFFFF
6728 #define I40E_MDD_CLEAR16 0xFFFF
6729 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6730 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6731 	bool mdd_detected = false;
6732 	struct i40e_pf_vf *vf;
6733 	uint32_t reg;
6734 	int i;
6735 
6736 	/* find what triggered the MDD event */
6737 	reg = I40E_READ_REG(hw, I40E_GL_MDET_TX);
6738 	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6739 		uint8_t pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6740 				I40E_GL_MDET_TX_PF_NUM_SHIFT;
6741 		uint16_t vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6742 				I40E_GL_MDET_TX_VF_NUM_SHIFT;
6743 		uint8_t event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6744 				I40E_GL_MDET_TX_EVENT_SHIFT;
6745 		uint16_t queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6746 				I40E_GL_MDET_TX_QUEUE_SHIFT) -
6747 					hw->func_caps.base_queue;
6748 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX "
6749 			"queue %d PF number 0x%02x VF number 0x%02x device %s\n",
6750 				event, queue, pf_num, vf_num, dev->data->name);
6751 		I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32);
6752 		mdd_detected = true;
6753 	}
6754 	reg = I40E_READ_REG(hw, I40E_GL_MDET_RX);
6755 	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6756 		uint8_t func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6757 				I40E_GL_MDET_RX_FUNCTION_SHIFT;
6758 		uint8_t event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6759 				I40E_GL_MDET_RX_EVENT_SHIFT;
6760 		uint16_t queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6761 				I40E_GL_MDET_RX_QUEUE_SHIFT) -
6762 					hw->func_caps.base_queue;
6763 
6764 		PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX "
6765 				"queue %d of function 0x%02x device %s\n",
6766 					event, queue, func, dev->data->name);
6767 		I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32);
6768 		mdd_detected = true;
6769 	}
6770 
6771 	if (mdd_detected) {
6772 		reg = I40E_READ_REG(hw, I40E_PF_MDET_TX);
6773 		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6774 			I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16);
6775 			PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n");
6776 		}
6777 		reg = I40E_READ_REG(hw, I40E_PF_MDET_RX);
6778 		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6779 			I40E_WRITE_REG(hw, I40E_PF_MDET_RX,
6780 					I40E_MDD_CLEAR16);
6781 			PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n");
6782 		}
6783 	}
6784 
6785 	/* see if one of the VFs needs its hand slapped */
6786 	for (i = 0; i < pf->vf_num && mdd_detected; i++) {
6787 		vf = &pf->vfs[i];
6788 		reg = I40E_READ_REG(hw, I40E_VP_MDET_TX(i));
6789 		if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6790 			I40E_WRITE_REG(hw, I40E_VP_MDET_TX(i),
6791 					I40E_MDD_CLEAR16);
6792 			vf->num_mdd_events++;
6793 			PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-"
6794 					PRIu64 "times\n",
6795 					i, vf->num_mdd_events);
6796 		}
6797 
6798 		reg = I40E_READ_REG(hw, I40E_VP_MDET_RX(i));
6799 		if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6800 			I40E_WRITE_REG(hw, I40E_VP_MDET_RX(i),
6801 					I40E_MDD_CLEAR16);
6802 			vf->num_mdd_events++;
6803 			PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-"
6804 					PRIu64 "times\n",
6805 					i, vf->num_mdd_events);
6806 		}
6807 	}
6808 }
6809 
6810 /**
6811  * Interrupt handler triggered by NIC  for handling
6812  * specific interrupt.
6813  *
6814  * @param handle
6815  *  Pointer to interrupt handle.
6816  * @param param
6817  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6818  *
6819  * @return
6820  *  void
6821  */
6822 static void
6823 i40e_dev_interrupt_handler(void *param)
6824 {
6825 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6826 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6827 	uint32_t icr0;
6828 
6829 	/* Disable interrupt */
6830 	i40e_pf_disable_irq0(hw);
6831 
6832 	/* read out interrupt causes */
6833 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6834 
6835 	/* No interrupt event indicated */
6836 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6837 		PMD_DRV_LOG(INFO, "No interrupt event");
6838 		goto done;
6839 	}
6840 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6841 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6842 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6843 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6844 		i40e_handle_mdd_event(dev);
6845 	}
6846 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6847 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6848 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6849 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6850 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6851 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6852 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6853 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6854 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6855 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6856 
6857 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6858 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6859 		i40e_dev_handle_vfr_event(dev);
6860 	}
6861 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6862 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6863 		i40e_dev_handle_aq_msg(dev);
6864 	}
6865 
6866 done:
6867 	/* Enable interrupt */
6868 	i40e_pf_enable_irq0(hw);
6869 }
6870 
6871 static void
6872 i40e_dev_alarm_handler(void *param)
6873 {
6874 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6875 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6876 	uint32_t icr0;
6877 
6878 	/* Disable interrupt */
6879 	i40e_pf_disable_irq0(hw);
6880 
6881 	/* read out interrupt causes */
6882 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6883 
6884 	/* No interrupt event indicated */
6885 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6886 		goto done;
6887 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6888 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6889 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
6890 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6891 		i40e_handle_mdd_event(dev);
6892 	}
6893 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6894 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6895 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6896 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6897 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6898 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6899 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6900 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6901 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6902 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6903 
6904 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6905 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6906 		i40e_dev_handle_vfr_event(dev);
6907 	}
6908 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6909 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6910 		i40e_dev_handle_aq_msg(dev);
6911 	}
6912 
6913 done:
6914 	/* Enable interrupt */
6915 	i40e_pf_enable_irq0(hw);
6916 	rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6917 			  i40e_dev_alarm_handler, dev);
6918 }
6919 
6920 int
6921 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6922 			 struct i40e_macvlan_filter *filter,
6923 			 int total)
6924 {
6925 	int ele_num, ele_buff_size;
6926 	int num, actual_num, i;
6927 	uint16_t flags;
6928 	int ret = I40E_SUCCESS;
6929 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6930 	struct i40e_aqc_add_macvlan_element_data *req_list;
6931 
6932 	if (filter == NULL  || total == 0)
6933 		return I40E_ERR_PARAM;
6934 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6935 	ele_buff_size = hw->aq.asq_buf_size;
6936 
6937 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6938 	if (req_list == NULL) {
6939 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6940 		return I40E_ERR_NO_MEMORY;
6941 	}
6942 
6943 	num = 0;
6944 	do {
6945 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6946 		memset(req_list, 0, ele_buff_size);
6947 
6948 		for (i = 0; i < actual_num; i++) {
6949 			rte_memcpy(req_list[i].mac_addr,
6950 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6951 			req_list[i].vlan_tag =
6952 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6953 
6954 			switch (filter[num + i].filter_type) {
6955 			case I40E_MAC_PERFECT_MATCH:
6956 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6957 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6958 				break;
6959 			case I40E_MACVLAN_PERFECT_MATCH:
6960 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6961 				break;
6962 			case I40E_MAC_HASH_MATCH:
6963 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6964 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6965 				break;
6966 			case I40E_MACVLAN_HASH_MATCH:
6967 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6968 				break;
6969 			default:
6970 				PMD_DRV_LOG(ERR, "Invalid MAC match type");
6971 				ret = I40E_ERR_PARAM;
6972 				goto DONE;
6973 			}
6974 
6975 			req_list[i].queue_number = 0;
6976 
6977 			req_list[i].flags = rte_cpu_to_le_16(flags);
6978 		}
6979 
6980 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6981 						actual_num, NULL);
6982 		if (ret != I40E_SUCCESS) {
6983 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6984 			goto DONE;
6985 		}
6986 		num += actual_num;
6987 	} while (num < total);
6988 
6989 DONE:
6990 	rte_free(req_list);
6991 	return ret;
6992 }
6993 
6994 int
6995 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6996 			    struct i40e_macvlan_filter *filter,
6997 			    int total)
6998 {
6999 	int ele_num, ele_buff_size;
7000 	int num, actual_num, i;
7001 	uint16_t flags;
7002 	int ret = I40E_SUCCESS;
7003 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7004 	struct i40e_aqc_remove_macvlan_element_data *req_list;
7005 
7006 	if (filter == NULL  || total == 0)
7007 		return I40E_ERR_PARAM;
7008 
7009 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
7010 	ele_buff_size = hw->aq.asq_buf_size;
7011 
7012 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
7013 	if (req_list == NULL) {
7014 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
7015 		return I40E_ERR_NO_MEMORY;
7016 	}
7017 
7018 	num = 0;
7019 	do {
7020 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
7021 		memset(req_list, 0, ele_buff_size);
7022 
7023 		for (i = 0; i < actual_num; i++) {
7024 			rte_memcpy(req_list[i].mac_addr,
7025 				&filter[num + i].macaddr, ETH_ADDR_LEN);
7026 			req_list[i].vlan_tag =
7027 				rte_cpu_to_le_16(filter[num + i].vlan_id);
7028 
7029 			switch (filter[num + i].filter_type) {
7030 			case I40E_MAC_PERFECT_MATCH:
7031 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
7032 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7033 				break;
7034 			case I40E_MACVLAN_PERFECT_MATCH:
7035 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7036 				break;
7037 			case I40E_MAC_HASH_MATCH:
7038 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
7039 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7040 				break;
7041 			case I40E_MACVLAN_HASH_MATCH:
7042 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
7043 				break;
7044 			default:
7045 				PMD_DRV_LOG(ERR, "Invalid MAC filter type");
7046 				ret = I40E_ERR_PARAM;
7047 				goto DONE;
7048 			}
7049 			req_list[i].flags = rte_cpu_to_le_16(flags);
7050 		}
7051 
7052 		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
7053 						actual_num, NULL);
7054 		if (ret != I40E_SUCCESS) {
7055 			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7056 			goto DONE;
7057 		}
7058 		num += actual_num;
7059 	} while (num < total);
7060 
7061 DONE:
7062 	rte_free(req_list);
7063 	return ret;
7064 }
7065 
7066 /* Find out specific MAC filter */
7067 static struct i40e_mac_filter *
7068 i40e_find_mac_filter(struct i40e_vsi *vsi,
7069 			 struct rte_ether_addr *macaddr)
7070 {
7071 	struct i40e_mac_filter *f;
7072 
7073 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7074 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7075 			return f;
7076 	}
7077 
7078 	return NULL;
7079 }
7080 
7081 static bool
7082 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7083 			 uint16_t vlan_id)
7084 {
7085 	uint32_t vid_idx, vid_bit;
7086 
7087 	if (vlan_id > ETH_VLAN_ID_MAX)
7088 		return 0;
7089 
7090 	vid_idx = I40E_VFTA_IDX(vlan_id);
7091 	vid_bit = I40E_VFTA_BIT(vlan_id);
7092 
7093 	if (vsi->vfta[vid_idx] & vid_bit)
7094 		return 1;
7095 	else
7096 		return 0;
7097 }
7098 
7099 static void
7100 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7101 		       uint16_t vlan_id, bool on)
7102 {
7103 	uint32_t vid_idx, vid_bit;
7104 
7105 	vid_idx = I40E_VFTA_IDX(vlan_id);
7106 	vid_bit = I40E_VFTA_BIT(vlan_id);
7107 
7108 	if (on)
7109 		vsi->vfta[vid_idx] |= vid_bit;
7110 	else
7111 		vsi->vfta[vid_idx] &= ~vid_bit;
7112 }
7113 
7114 void
7115 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7116 		     uint16_t vlan_id, bool on)
7117 {
7118 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7119 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7120 	int ret;
7121 
7122 	if (vlan_id > ETH_VLAN_ID_MAX)
7123 		return;
7124 
7125 	i40e_store_vlan_filter(vsi, vlan_id, on);
7126 
7127 	if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7128 		return;
7129 
7130 	vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7131 
7132 	if (on) {
7133 		ret = i40e_aq_add_vlan(hw, vsi->seid,
7134 				       &vlan_data, 1, NULL);
7135 		if (ret != I40E_SUCCESS)
7136 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7137 	} else {
7138 		ret = i40e_aq_remove_vlan(hw, vsi->seid,
7139 					  &vlan_data, 1, NULL);
7140 		if (ret != I40E_SUCCESS)
7141 			PMD_DRV_LOG(ERR,
7142 				    "Failed to remove vlan filter");
7143 	}
7144 }
7145 
7146 /**
7147  * Find all vlan options for specific mac addr,
7148  * return with actual vlan found.
7149  */
7150 int
7151 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7152 			   struct i40e_macvlan_filter *mv_f,
7153 			   int num, struct rte_ether_addr *addr)
7154 {
7155 	int i;
7156 	uint32_t j, k;
7157 
7158 	/**
7159 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
7160 	 * although the code looks complex.
7161 	  */
7162 	if (num < vsi->vlan_num)
7163 		return I40E_ERR_PARAM;
7164 
7165 	i = 0;
7166 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
7167 		if (vsi->vfta[j]) {
7168 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7169 				if (vsi->vfta[j] & (1 << k)) {
7170 					if (i > num - 1) {
7171 						PMD_DRV_LOG(ERR,
7172 							"vlan number doesn't match");
7173 						return I40E_ERR_PARAM;
7174 					}
7175 					rte_memcpy(&mv_f[i].macaddr,
7176 							addr, ETH_ADDR_LEN);
7177 					mv_f[i].vlan_id =
7178 						j * I40E_UINT32_BIT_SIZE + k;
7179 					i++;
7180 				}
7181 			}
7182 		}
7183 	}
7184 	return I40E_SUCCESS;
7185 }
7186 
7187 static inline int
7188 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7189 			   struct i40e_macvlan_filter *mv_f,
7190 			   int num,
7191 			   uint16_t vlan)
7192 {
7193 	int i = 0;
7194 	struct i40e_mac_filter *f;
7195 
7196 	if (num < vsi->mac_num)
7197 		return I40E_ERR_PARAM;
7198 
7199 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7200 		if (i > num - 1) {
7201 			PMD_DRV_LOG(ERR, "buffer number not match");
7202 			return I40E_ERR_PARAM;
7203 		}
7204 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7205 				ETH_ADDR_LEN);
7206 		mv_f[i].vlan_id = vlan;
7207 		mv_f[i].filter_type = f->mac_info.filter_type;
7208 		i++;
7209 	}
7210 
7211 	return I40E_SUCCESS;
7212 }
7213 
7214 static int
7215 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7216 {
7217 	int i, j, num;
7218 	struct i40e_mac_filter *f;
7219 	struct i40e_macvlan_filter *mv_f;
7220 	int ret = I40E_SUCCESS;
7221 
7222 	if (vsi == NULL || vsi->mac_num == 0)
7223 		return I40E_ERR_PARAM;
7224 
7225 	/* Case that no vlan is set */
7226 	if (vsi->vlan_num == 0)
7227 		num = vsi->mac_num;
7228 	else
7229 		num = vsi->mac_num * vsi->vlan_num;
7230 
7231 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7232 	if (mv_f == NULL) {
7233 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7234 		return I40E_ERR_NO_MEMORY;
7235 	}
7236 
7237 	i = 0;
7238 	if (vsi->vlan_num == 0) {
7239 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7240 			rte_memcpy(&mv_f[i].macaddr,
7241 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
7242 			mv_f[i].filter_type = f->mac_info.filter_type;
7243 			mv_f[i].vlan_id = 0;
7244 			i++;
7245 		}
7246 	} else {
7247 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7248 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7249 					vsi->vlan_num, &f->mac_info.mac_addr);
7250 			if (ret != I40E_SUCCESS)
7251 				goto DONE;
7252 			for (j = i; j < i + vsi->vlan_num; j++)
7253 				mv_f[j].filter_type = f->mac_info.filter_type;
7254 			i += vsi->vlan_num;
7255 		}
7256 	}
7257 
7258 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7259 DONE:
7260 	rte_free(mv_f);
7261 
7262 	return ret;
7263 }
7264 
7265 int
7266 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7267 {
7268 	struct i40e_macvlan_filter *mv_f;
7269 	int mac_num;
7270 	int ret = I40E_SUCCESS;
7271 
7272 	if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7273 		return I40E_ERR_PARAM;
7274 
7275 	/* If it's already set, just return */
7276 	if (i40e_find_vlan_filter(vsi,vlan))
7277 		return I40E_SUCCESS;
7278 
7279 	mac_num = vsi->mac_num;
7280 
7281 	if (mac_num == 0) {
7282 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7283 		return I40E_ERR_PARAM;
7284 	}
7285 
7286 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7287 
7288 	if (mv_f == NULL) {
7289 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7290 		return I40E_ERR_NO_MEMORY;
7291 	}
7292 
7293 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7294 
7295 	if (ret != I40E_SUCCESS)
7296 		goto DONE;
7297 
7298 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7299 
7300 	if (ret != I40E_SUCCESS)
7301 		goto DONE;
7302 
7303 	i40e_set_vlan_filter(vsi, vlan, 1);
7304 
7305 	vsi->vlan_num++;
7306 	ret = I40E_SUCCESS;
7307 DONE:
7308 	rte_free(mv_f);
7309 	return ret;
7310 }
7311 
7312 int
7313 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7314 {
7315 	struct i40e_macvlan_filter *mv_f;
7316 	int mac_num;
7317 	int ret = I40E_SUCCESS;
7318 
7319 	/**
7320 	 * Vlan 0 is the generic filter for untagged packets
7321 	 * and can't be removed.
7322 	 */
7323 	if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7324 		return I40E_ERR_PARAM;
7325 
7326 	/* If can't find it, just return */
7327 	if (!i40e_find_vlan_filter(vsi, vlan))
7328 		return I40E_ERR_PARAM;
7329 
7330 	mac_num = vsi->mac_num;
7331 
7332 	if (mac_num == 0) {
7333 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7334 		return I40E_ERR_PARAM;
7335 	}
7336 
7337 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7338 
7339 	if (mv_f == NULL) {
7340 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7341 		return I40E_ERR_NO_MEMORY;
7342 	}
7343 
7344 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7345 
7346 	if (ret != I40E_SUCCESS)
7347 		goto DONE;
7348 
7349 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7350 
7351 	if (ret != I40E_SUCCESS)
7352 		goto DONE;
7353 
7354 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
7355 	if (vsi->vlan_num == 1) {
7356 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7357 		if (ret != I40E_SUCCESS)
7358 			goto DONE;
7359 
7360 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7361 		if (ret != I40E_SUCCESS)
7362 			goto DONE;
7363 	}
7364 
7365 	i40e_set_vlan_filter(vsi, vlan, 0);
7366 
7367 	vsi->vlan_num--;
7368 	ret = I40E_SUCCESS;
7369 DONE:
7370 	rte_free(mv_f);
7371 	return ret;
7372 }
7373 
7374 int
7375 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7376 {
7377 	struct i40e_mac_filter *f;
7378 	struct i40e_macvlan_filter *mv_f;
7379 	int i, vlan_num = 0;
7380 	int ret = I40E_SUCCESS;
7381 
7382 	/* If it's add and we've config it, return */
7383 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7384 	if (f != NULL)
7385 		return I40E_SUCCESS;
7386 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7387 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7388 
7389 		/**
7390 		 * If vlan_num is 0, that's the first time to add mac,
7391 		 * set mask for vlan_id 0.
7392 		 */
7393 		if (vsi->vlan_num == 0) {
7394 			i40e_set_vlan_filter(vsi, 0, 1);
7395 			vsi->vlan_num = 1;
7396 		}
7397 		vlan_num = vsi->vlan_num;
7398 	} else if (mac_filter->filter_type == I40E_MAC_PERFECT_MATCH ||
7399 			mac_filter->filter_type == I40E_MAC_HASH_MATCH)
7400 		vlan_num = 1;
7401 
7402 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7403 	if (mv_f == NULL) {
7404 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7405 		return I40E_ERR_NO_MEMORY;
7406 	}
7407 
7408 	for (i = 0; i < vlan_num; i++) {
7409 		mv_f[i].filter_type = mac_filter->filter_type;
7410 		rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7411 				ETH_ADDR_LEN);
7412 	}
7413 
7414 	if (mac_filter->filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7415 		mac_filter->filter_type == I40E_MACVLAN_HASH_MATCH) {
7416 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7417 					&mac_filter->mac_addr);
7418 		if (ret != I40E_SUCCESS)
7419 			goto DONE;
7420 	}
7421 
7422 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7423 	if (ret != I40E_SUCCESS)
7424 		goto DONE;
7425 
7426 	/* Add the mac addr into mac list */
7427 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7428 	if (f == NULL) {
7429 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7430 		ret = I40E_ERR_NO_MEMORY;
7431 		goto DONE;
7432 	}
7433 	rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7434 			ETH_ADDR_LEN);
7435 	f->mac_info.filter_type = mac_filter->filter_type;
7436 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7437 	vsi->mac_num++;
7438 
7439 	ret = I40E_SUCCESS;
7440 DONE:
7441 	rte_free(mv_f);
7442 
7443 	return ret;
7444 }
7445 
7446 int
7447 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7448 {
7449 	struct i40e_mac_filter *f;
7450 	struct i40e_macvlan_filter *mv_f;
7451 	int i, vlan_num;
7452 	enum i40e_mac_filter_type filter_type;
7453 	int ret = I40E_SUCCESS;
7454 
7455 	/* Can't find it, return an error */
7456 	f = i40e_find_mac_filter(vsi, addr);
7457 	if (f == NULL)
7458 		return I40E_ERR_PARAM;
7459 
7460 	vlan_num = vsi->vlan_num;
7461 	filter_type = f->mac_info.filter_type;
7462 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7463 		filter_type == I40E_MACVLAN_HASH_MATCH) {
7464 		if (vlan_num == 0) {
7465 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7466 			return I40E_ERR_PARAM;
7467 		}
7468 	} else if (filter_type == I40E_MAC_PERFECT_MATCH ||
7469 			filter_type == I40E_MAC_HASH_MATCH)
7470 		vlan_num = 1;
7471 
7472 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7473 	if (mv_f == NULL) {
7474 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7475 		return I40E_ERR_NO_MEMORY;
7476 	}
7477 
7478 	for (i = 0; i < vlan_num; i++) {
7479 		mv_f[i].filter_type = filter_type;
7480 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7481 				ETH_ADDR_LEN);
7482 	}
7483 	if (filter_type == I40E_MACVLAN_PERFECT_MATCH ||
7484 			filter_type == I40E_MACVLAN_HASH_MATCH) {
7485 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7486 		if (ret != I40E_SUCCESS)
7487 			goto DONE;
7488 	}
7489 
7490 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7491 	if (ret != I40E_SUCCESS)
7492 		goto DONE;
7493 
7494 	/* Remove the mac addr into mac list */
7495 	TAILQ_REMOVE(&vsi->mac_list, f, next);
7496 	rte_free(f);
7497 	vsi->mac_num--;
7498 
7499 	ret = I40E_SUCCESS;
7500 DONE:
7501 	rte_free(mv_f);
7502 	return ret;
7503 }
7504 
7505 /* Configure hash enable flags for RSS */
7506 uint64_t
7507 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7508 {
7509 	uint64_t hena = 0;
7510 	int i;
7511 
7512 	if (!flags)
7513 		return hena;
7514 
7515 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7516 		if (flags & (1ULL << i))
7517 			hena |= adapter->pctypes_tbl[i];
7518 	}
7519 
7520 	return hena;
7521 }
7522 
7523 /* Parse the hash enable flags */
7524 uint64_t
7525 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7526 {
7527 	uint64_t rss_hf = 0;
7528 
7529 	if (!flags)
7530 		return rss_hf;
7531 	int i;
7532 
7533 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7534 		if (flags & adapter->pctypes_tbl[i])
7535 			rss_hf |= (1ULL << i);
7536 	}
7537 	return rss_hf;
7538 }
7539 
7540 /* Disable RSS */
7541 void
7542 i40e_pf_disable_rss(struct i40e_pf *pf)
7543 {
7544 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7545 
7546 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7547 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7548 	I40E_WRITE_FLUSH(hw);
7549 }
7550 
7551 int
7552 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7553 {
7554 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7555 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7556 	uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7557 			   I40E_VFQF_HKEY_MAX_INDEX :
7558 			   I40E_PFQF_HKEY_MAX_INDEX;
7559 
7560 	if (!key || key_len == 0) {
7561 		PMD_DRV_LOG(DEBUG, "No key to be configured");
7562 		return 0;
7563 	} else if (key_len != (key_idx + 1) *
7564 		sizeof(uint32_t)) {
7565 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7566 		return -EINVAL;
7567 	}
7568 
7569 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7570 		struct i40e_aqc_get_set_rss_key_data *key_dw =
7571 				(struct i40e_aqc_get_set_rss_key_data *)key;
7572 		enum i40e_status_code status =
7573 				i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7574 
7575 		if (status) {
7576 			PMD_DRV_LOG(ERR,
7577 				    "Failed to configure RSS key via AQ, error status: %d",
7578 				    status);
7579 			return -EIO;
7580 		}
7581 	} else {
7582 		uint32_t *hash_key = (uint32_t *)key;
7583 		uint16_t i;
7584 
7585 		if (vsi->type == I40E_VSI_SRIOV) {
7586 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7587 				I40E_WRITE_REG(
7588 					hw,
7589 					I40E_VFQF_HKEY1(i, vsi->user_param),
7590 					hash_key[i]);
7591 
7592 		} else {
7593 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7594 				I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7595 					       hash_key[i]);
7596 		}
7597 		I40E_WRITE_FLUSH(hw);
7598 	}
7599 
7600 	return 0;
7601 }
7602 
7603 static int
7604 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7605 {
7606 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7607 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7608 	uint32_t reg;
7609 	int ret;
7610 
7611 	if (!key || !key_len)
7612 		return 0;
7613 
7614 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7615 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7616 			(struct i40e_aqc_get_set_rss_key_data *)key);
7617 		if (ret) {
7618 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7619 			return ret;
7620 		}
7621 	} else {
7622 		uint32_t *key_dw = (uint32_t *)key;
7623 		uint16_t i;
7624 
7625 		if (vsi->type == I40E_VSI_SRIOV) {
7626 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7627 				reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7628 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7629 			}
7630 			*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7631 				   sizeof(uint32_t);
7632 		} else {
7633 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7634 				reg = I40E_PFQF_HKEY(i);
7635 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7636 			}
7637 			*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7638 				   sizeof(uint32_t);
7639 		}
7640 	}
7641 	return 0;
7642 }
7643 
7644 static int
7645 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7646 {
7647 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7648 	uint64_t hena;
7649 	int ret;
7650 
7651 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7652 			       rss_conf->rss_key_len);
7653 	if (ret)
7654 		return ret;
7655 
7656 	hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7657 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7658 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7659 	I40E_WRITE_FLUSH(hw);
7660 
7661 	return 0;
7662 }
7663 
7664 static int
7665 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7666 			 struct rte_eth_rss_conf *rss_conf)
7667 {
7668 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7669 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7670 	uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7671 	uint64_t hena;
7672 
7673 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7674 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7675 
7676 	if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7677 		if (rss_hf != 0) /* Enable RSS */
7678 			return -EINVAL;
7679 		return 0; /* Nothing to do */
7680 	}
7681 	/* RSS enabled */
7682 	if (rss_hf == 0) /* Disable RSS */
7683 		return -EINVAL;
7684 
7685 	return i40e_hw_rss_hash_set(pf, rss_conf);
7686 }
7687 
7688 static int
7689 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7690 			   struct rte_eth_rss_conf *rss_conf)
7691 {
7692 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7693 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7694 	uint64_t hena;
7695 	int ret;
7696 
7697 	if (!rss_conf)
7698 		return -EINVAL;
7699 
7700 	ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7701 			 &rss_conf->rss_key_len);
7702 	if (ret)
7703 		return ret;
7704 
7705 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7706 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7707 	rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7708 
7709 	return 0;
7710 }
7711 
7712 static int
7713 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7714 {
7715 	switch (filter_type) {
7716 	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7717 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7718 		break;
7719 	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7720 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7721 		break;
7722 	case RTE_TUNNEL_FILTER_IMAC_TENID:
7723 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7724 		break;
7725 	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7726 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7727 		break;
7728 	case ETH_TUNNEL_FILTER_IMAC:
7729 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7730 		break;
7731 	case ETH_TUNNEL_FILTER_OIP:
7732 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7733 		break;
7734 	case ETH_TUNNEL_FILTER_IIP:
7735 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7736 		break;
7737 	default:
7738 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7739 		return -EINVAL;
7740 	}
7741 
7742 	return 0;
7743 }
7744 
7745 /* Convert tunnel filter structure */
7746 static int
7747 i40e_tunnel_filter_convert(
7748 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7749 	struct i40e_tunnel_filter *tunnel_filter)
7750 {
7751 	rte_ether_addr_copy((struct rte_ether_addr *)
7752 			&cld_filter->element.outer_mac,
7753 		(struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7754 	rte_ether_addr_copy((struct rte_ether_addr *)
7755 			&cld_filter->element.inner_mac,
7756 		(struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7757 	tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7758 	if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7759 	     I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7760 	    I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7761 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7762 	else
7763 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7764 	tunnel_filter->input.flags = cld_filter->element.flags;
7765 	tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7766 	tunnel_filter->queue = cld_filter->element.queue_number;
7767 	rte_memcpy(tunnel_filter->input.general_fields,
7768 		   cld_filter->general_fields,
7769 		   sizeof(cld_filter->general_fields));
7770 
7771 	return 0;
7772 }
7773 
7774 /* Check if there exists the tunnel filter */
7775 struct i40e_tunnel_filter *
7776 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7777 			     const struct i40e_tunnel_filter_input *input)
7778 {
7779 	int ret;
7780 
7781 	ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7782 	if (ret < 0)
7783 		return NULL;
7784 
7785 	return tunnel_rule->hash_map[ret];
7786 }
7787 
7788 /* Add a tunnel filter into the SW list */
7789 static int
7790 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7791 			     struct i40e_tunnel_filter *tunnel_filter)
7792 {
7793 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7794 	int ret;
7795 
7796 	ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7797 	if (ret < 0) {
7798 		PMD_DRV_LOG(ERR,
7799 			    "Failed to insert tunnel filter to hash table %d!",
7800 			    ret);
7801 		return ret;
7802 	}
7803 	rule->hash_map[ret] = tunnel_filter;
7804 
7805 	TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7806 
7807 	return 0;
7808 }
7809 
7810 /* Delete a tunnel filter from the SW list */
7811 int
7812 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7813 			  struct i40e_tunnel_filter_input *input)
7814 {
7815 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7816 	struct i40e_tunnel_filter *tunnel_filter;
7817 	int ret;
7818 
7819 	ret = rte_hash_del_key(rule->hash_table, input);
7820 	if (ret < 0) {
7821 		PMD_DRV_LOG(ERR,
7822 			    "Failed to delete tunnel filter to hash table %d!",
7823 			    ret);
7824 		return ret;
7825 	}
7826 	tunnel_filter = rule->hash_map[ret];
7827 	rule->hash_map[ret] = NULL;
7828 
7829 	TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7830 	rte_free(tunnel_filter);
7831 
7832 	return 0;
7833 }
7834 
7835 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7836 #define I40E_TR_VXLAN_GRE_KEY_MASK		0x4
7837 #define I40E_TR_GENEVE_KEY_MASK			0x8
7838 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK		0x40
7839 #define I40E_TR_GRE_KEY_MASK			0x400
7840 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
7841 #define I40E_TR_GRE_NO_KEY_MASK			0x8000
7842 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
7843 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
7844 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
7845 #define I40E_DIRECTION_INGRESS_KEY		0x8000
7846 #define I40E_TR_L4_TYPE_TCP			0x2
7847 #define I40E_TR_L4_TYPE_UDP			0x4
7848 #define I40E_TR_L4_TYPE_SCTP			0x8
7849 
7850 static enum
7851 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7852 {
7853 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7854 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7855 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7856 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7857 	enum i40e_status_code status = I40E_SUCCESS;
7858 
7859 	if (pf->support_multi_driver) {
7860 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7861 		return I40E_NOT_SUPPORTED;
7862 	}
7863 
7864 	memset(&filter_replace, 0,
7865 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7866 	memset(&filter_replace_buf, 0,
7867 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7868 
7869 	/* create L1 filter */
7870 	filter_replace.old_filter_type =
7871 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7872 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7873 	filter_replace.tr_bit = 0;
7874 
7875 	/* Prepare the buffer, 3 entries */
7876 	filter_replace_buf.data[0] =
7877 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7878 	filter_replace_buf.data[0] |=
7879 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7880 	filter_replace_buf.data[2] = 0xFF;
7881 	filter_replace_buf.data[3] = 0xFF;
7882 	filter_replace_buf.data[4] =
7883 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7884 	filter_replace_buf.data[4] |=
7885 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7886 	filter_replace_buf.data[7] = 0xF0;
7887 	filter_replace_buf.data[8]
7888 		= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7889 	filter_replace_buf.data[8] |=
7890 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7891 	filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7892 		I40E_TR_GENEVE_KEY_MASK |
7893 		I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7894 	filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7895 		I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7896 		I40E_TR_GRE_NO_KEY_MASK) >> 8;
7897 
7898 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7899 					       &filter_replace_buf);
7900 	if (!status && (filter_replace.old_filter_type !=
7901 			filter_replace.new_filter_type))
7902 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7903 			    " original: 0x%x, new: 0x%x",
7904 			    dev->device->name,
7905 			    filter_replace.old_filter_type,
7906 			    filter_replace.new_filter_type);
7907 
7908 	return status;
7909 }
7910 
7911 static enum
7912 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7913 {
7914 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7915 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7916 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7917 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7918 	enum i40e_status_code status = I40E_SUCCESS;
7919 
7920 	if (pf->support_multi_driver) {
7921 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7922 		return I40E_NOT_SUPPORTED;
7923 	}
7924 
7925 	/* For MPLSoUDP */
7926 	memset(&filter_replace, 0,
7927 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7928 	memset(&filter_replace_buf, 0,
7929 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7930 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7931 		I40E_AQC_MIRROR_CLOUD_FILTER;
7932 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7933 	filter_replace.new_filter_type =
7934 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
7935 	/* Prepare the buffer, 2 entries */
7936 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7937 	filter_replace_buf.data[0] |=
7938 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7939 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7940 	filter_replace_buf.data[4] |=
7941 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7942 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7943 					       &filter_replace_buf);
7944 	if (status < 0)
7945 		return status;
7946 	if (filter_replace.old_filter_type !=
7947 	    filter_replace.new_filter_type)
7948 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7949 			    " original: 0x%x, new: 0x%x",
7950 			    dev->device->name,
7951 			    filter_replace.old_filter_type,
7952 			    filter_replace.new_filter_type);
7953 
7954 	/* For MPLSoGRE */
7955 	memset(&filter_replace, 0,
7956 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7957 	memset(&filter_replace_buf, 0,
7958 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7959 
7960 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7961 		I40E_AQC_MIRROR_CLOUD_FILTER;
7962 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7963 	filter_replace.new_filter_type =
7964 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
7965 	/* Prepare the buffer, 2 entries */
7966 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7967 	filter_replace_buf.data[0] |=
7968 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7969 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7970 	filter_replace_buf.data[4] |=
7971 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7972 
7973 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7974 					       &filter_replace_buf);
7975 	if (!status && (filter_replace.old_filter_type !=
7976 			filter_replace.new_filter_type))
7977 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7978 			    " original: 0x%x, new: 0x%x",
7979 			    dev->device->name,
7980 			    filter_replace.old_filter_type,
7981 			    filter_replace.new_filter_type);
7982 
7983 	return status;
7984 }
7985 
7986 static enum i40e_status_code
7987 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7988 {
7989 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7990 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7991 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7992 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
7993 	enum i40e_status_code status = I40E_SUCCESS;
7994 
7995 	if (pf->support_multi_driver) {
7996 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7997 		return I40E_NOT_SUPPORTED;
7998 	}
7999 
8000 	/* For GTP-C */
8001 	memset(&filter_replace, 0,
8002 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8003 	memset(&filter_replace_buf, 0,
8004 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8005 	/* create L1 filter */
8006 	filter_replace.old_filter_type =
8007 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8008 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8009 	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8010 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8011 	/* Prepare the buffer, 2 entries */
8012 	filter_replace_buf.data[0] =
8013 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8014 	filter_replace_buf.data[0] |=
8015 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8016 	filter_replace_buf.data[2] = 0xFF;
8017 	filter_replace_buf.data[3] = 0xFF;
8018 	filter_replace_buf.data[4] =
8019 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8020 	filter_replace_buf.data[4] |=
8021 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8022 	filter_replace_buf.data[6] = 0xFF;
8023 	filter_replace_buf.data[7] = 0xFF;
8024 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8025 					       &filter_replace_buf);
8026 	if (status < 0)
8027 		return status;
8028 	if (filter_replace.old_filter_type !=
8029 	    filter_replace.new_filter_type)
8030 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8031 			    " original: 0x%x, new: 0x%x",
8032 			    dev->device->name,
8033 			    filter_replace.old_filter_type,
8034 			    filter_replace.new_filter_type);
8035 
8036 	/* for GTP-U */
8037 	memset(&filter_replace, 0,
8038 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8039 	memset(&filter_replace_buf, 0,
8040 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8041 	/* create L1 filter */
8042 	filter_replace.old_filter_type =
8043 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8044 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8045 	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8046 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8047 	/* Prepare the buffer, 2 entries */
8048 	filter_replace_buf.data[0] =
8049 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8050 	filter_replace_buf.data[0] |=
8051 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8052 	filter_replace_buf.data[2] = 0xFF;
8053 	filter_replace_buf.data[3] = 0xFF;
8054 	filter_replace_buf.data[4] =
8055 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8056 	filter_replace_buf.data[4] |=
8057 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8058 	filter_replace_buf.data[6] = 0xFF;
8059 	filter_replace_buf.data[7] = 0xFF;
8060 
8061 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8062 					       &filter_replace_buf);
8063 	if (!status && (filter_replace.old_filter_type !=
8064 			filter_replace.new_filter_type))
8065 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8066 			    " original: 0x%x, new: 0x%x",
8067 			    dev->device->name,
8068 			    filter_replace.old_filter_type,
8069 			    filter_replace.new_filter_type);
8070 
8071 	return status;
8072 }
8073 
8074 static enum
8075 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8076 {
8077 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8078 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8079 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8080 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8081 	enum i40e_status_code status = I40E_SUCCESS;
8082 
8083 	if (pf->support_multi_driver) {
8084 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8085 		return I40E_NOT_SUPPORTED;
8086 	}
8087 
8088 	/* for GTP-C */
8089 	memset(&filter_replace, 0,
8090 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8091 	memset(&filter_replace_buf, 0,
8092 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8093 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8094 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8095 	filter_replace.new_filter_type =
8096 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8097 	/* Prepare the buffer, 2 entries */
8098 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8099 	filter_replace_buf.data[0] |=
8100 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8101 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8102 	filter_replace_buf.data[4] |=
8103 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8104 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8105 					       &filter_replace_buf);
8106 	if (status < 0)
8107 		return status;
8108 	if (filter_replace.old_filter_type !=
8109 	    filter_replace.new_filter_type)
8110 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8111 			    " original: 0x%x, new: 0x%x",
8112 			    dev->device->name,
8113 			    filter_replace.old_filter_type,
8114 			    filter_replace.new_filter_type);
8115 
8116 	/* for GTP-U */
8117 	memset(&filter_replace, 0,
8118 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8119 	memset(&filter_replace_buf, 0,
8120 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8121 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8122 	filter_replace.old_filter_type =
8123 		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8124 	filter_replace.new_filter_type =
8125 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8126 	/* Prepare the buffer, 2 entries */
8127 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8128 	filter_replace_buf.data[0] |=
8129 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8130 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8131 	filter_replace_buf.data[4] |=
8132 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8133 
8134 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8135 					       &filter_replace_buf);
8136 	if (!status && (filter_replace.old_filter_type !=
8137 			filter_replace.new_filter_type))
8138 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8139 			    " original: 0x%x, new: 0x%x",
8140 			    dev->device->name,
8141 			    filter_replace.old_filter_type,
8142 			    filter_replace.new_filter_type);
8143 
8144 	return status;
8145 }
8146 
8147 static enum i40e_status_code
8148 i40e_replace_port_l1_filter(struct i40e_pf *pf,
8149 			    enum i40e_l4_port_type l4_port_type)
8150 {
8151 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8152 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8153 	enum i40e_status_code status = I40E_SUCCESS;
8154 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8155 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8156 
8157 	if (pf->support_multi_driver) {
8158 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8159 		return I40E_NOT_SUPPORTED;
8160 	}
8161 
8162 	memset(&filter_replace, 0,
8163 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8164 	memset(&filter_replace_buf, 0,
8165 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8166 
8167 	/* create L1 filter */
8168 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8169 		filter_replace.old_filter_type =
8170 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8171 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
8172 		filter_replace_buf.data[8] =
8173 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
8174 	} else {
8175 		filter_replace.old_filter_type =
8176 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
8177 		filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X10;
8178 		filter_replace_buf.data[8] =
8179 			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
8180 	}
8181 
8182 	filter_replace.tr_bit = 0;
8183 	/* Prepare the buffer, 3 entries */
8184 	filter_replace_buf.data[0] =
8185 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
8186 	filter_replace_buf.data[0] |=
8187 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8188 	filter_replace_buf.data[2] = 0x00;
8189 	filter_replace_buf.data[3] =
8190 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
8191 	filter_replace_buf.data[4] =
8192 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
8193 	filter_replace_buf.data[4] |=
8194 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8195 	filter_replace_buf.data[5] = 0x00;
8196 	filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
8197 		I40E_TR_L4_TYPE_TCP |
8198 		I40E_TR_L4_TYPE_SCTP;
8199 	filter_replace_buf.data[7] = 0x00;
8200 	filter_replace_buf.data[8] |=
8201 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8202 	filter_replace_buf.data[9] = 0x00;
8203 	filter_replace_buf.data[10] = 0xFF;
8204 	filter_replace_buf.data[11] = 0xFF;
8205 
8206 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8207 					       &filter_replace_buf);
8208 	if (!status && filter_replace.old_filter_type !=
8209 	    filter_replace.new_filter_type)
8210 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8211 			    " original: 0x%x, new: 0x%x",
8212 			    dev->device->name,
8213 			    filter_replace.old_filter_type,
8214 			    filter_replace.new_filter_type);
8215 
8216 	return status;
8217 }
8218 
8219 static enum i40e_status_code
8220 i40e_replace_port_cloud_filter(struct i40e_pf *pf,
8221 			       enum i40e_l4_port_type l4_port_type)
8222 {
8223 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8224 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8225 	enum i40e_status_code status = I40E_SUCCESS;
8226 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8227 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
8228 
8229 	if (pf->support_multi_driver) {
8230 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8231 		return I40E_NOT_SUPPORTED;
8232 	}
8233 
8234 	memset(&filter_replace, 0,
8235 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8236 	memset(&filter_replace_buf, 0,
8237 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8238 
8239 	if (l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8240 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8241 		filter_replace.new_filter_type =
8242 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8243 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
8244 	} else {
8245 		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
8246 		filter_replace.new_filter_type =
8247 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8248 		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
8249 	}
8250 
8251 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8252 	filter_replace.tr_bit = 0;
8253 	/* Prepare the buffer, 2 entries */
8254 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8255 	filter_replace_buf.data[0] |=
8256 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8257 	filter_replace_buf.data[4] |=
8258 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8259 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8260 					       &filter_replace_buf);
8261 
8262 	if (!status && filter_replace.old_filter_type !=
8263 	    filter_replace.new_filter_type)
8264 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8265 			    " original: 0x%x, new: 0x%x",
8266 			    dev->device->name,
8267 			    filter_replace.old_filter_type,
8268 			    filter_replace.new_filter_type);
8269 
8270 	return status;
8271 }
8272 
8273 int
8274 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8275 		      struct i40e_tunnel_filter_conf *tunnel_filter,
8276 		      uint8_t add)
8277 {
8278 	uint16_t ip_type;
8279 	uint32_t ipv4_addr, ipv4_addr_le;
8280 	uint8_t i, tun_type = 0;
8281 	/* internal variable to convert ipv6 byte order */
8282 	uint32_t convert_ipv6[4];
8283 	int val, ret = 0;
8284 	struct i40e_pf_vf *vf = NULL;
8285 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8286 	struct i40e_vsi *vsi;
8287 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8288 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
8289 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8290 	struct i40e_tunnel_filter *tunnel, *node;
8291 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8292 	uint32_t teid_le;
8293 	bool big_buffer = 0;
8294 
8295 	cld_filter = rte_zmalloc("tunnel_filter",
8296 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8297 			 0);
8298 
8299 	if (cld_filter == NULL) {
8300 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8301 		return -ENOMEM;
8302 	}
8303 	pfilter = cld_filter;
8304 
8305 	rte_ether_addr_copy(&tunnel_filter->outer_mac,
8306 			(struct rte_ether_addr *)&pfilter->element.outer_mac);
8307 	rte_ether_addr_copy(&tunnel_filter->inner_mac,
8308 			(struct rte_ether_addr *)&pfilter->element.inner_mac);
8309 
8310 	pfilter->element.inner_vlan =
8311 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8312 	if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8313 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8314 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8315 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8316 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
8317 				&ipv4_addr_le,
8318 				sizeof(pfilter->element.ipaddr.v4.data));
8319 	} else {
8320 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8321 		for (i = 0; i < 4; i++) {
8322 			convert_ipv6[i] =
8323 			rte_cpu_to_le_32(rte_be_to_cpu_32(
8324 					 tunnel_filter->ip_addr.ipv6_addr[i]));
8325 		}
8326 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
8327 			   &convert_ipv6,
8328 			   sizeof(pfilter->element.ipaddr.v6.data));
8329 	}
8330 
8331 	/* check tunneled type */
8332 	switch (tunnel_filter->tunnel_type) {
8333 	case I40E_TUNNEL_TYPE_VXLAN:
8334 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8335 		break;
8336 	case I40E_TUNNEL_TYPE_NVGRE:
8337 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8338 		break;
8339 	case I40E_TUNNEL_TYPE_IP_IN_GRE:
8340 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8341 		break;
8342 	case I40E_TUNNEL_TYPE_MPLSoUDP:
8343 		if (!pf->mpls_replace_flag) {
8344 			i40e_replace_mpls_l1_filter(pf);
8345 			i40e_replace_mpls_cloud_filter(pf);
8346 			pf->mpls_replace_flag = 1;
8347 		}
8348 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8349 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8350 			teid_le >> 4;
8351 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8352 			(teid_le & 0xF) << 12;
8353 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8354 			0x40;
8355 		big_buffer = 1;
8356 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8357 		break;
8358 	case I40E_TUNNEL_TYPE_MPLSoGRE:
8359 		if (!pf->mpls_replace_flag) {
8360 			i40e_replace_mpls_l1_filter(pf);
8361 			i40e_replace_mpls_cloud_filter(pf);
8362 			pf->mpls_replace_flag = 1;
8363 		}
8364 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8365 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8366 			teid_le >> 4;
8367 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8368 			(teid_le & 0xF) << 12;
8369 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8370 			0x0;
8371 		big_buffer = 1;
8372 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8373 		break;
8374 	case I40E_TUNNEL_TYPE_GTPC:
8375 		if (!pf->gtp_replace_flag) {
8376 			i40e_replace_gtp_l1_filter(pf);
8377 			i40e_replace_gtp_cloud_filter(pf);
8378 			pf->gtp_replace_flag = 1;
8379 		}
8380 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8381 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8382 			(teid_le >> 16) & 0xFFFF;
8383 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8384 			teid_le & 0xFFFF;
8385 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8386 			0x0;
8387 		big_buffer = 1;
8388 		break;
8389 	case I40E_TUNNEL_TYPE_GTPU:
8390 		if (!pf->gtp_replace_flag) {
8391 			i40e_replace_gtp_l1_filter(pf);
8392 			i40e_replace_gtp_cloud_filter(pf);
8393 			pf->gtp_replace_flag = 1;
8394 		}
8395 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8396 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8397 			(teid_le >> 16) & 0xFFFF;
8398 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8399 			teid_le & 0xFFFF;
8400 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8401 			0x0;
8402 		big_buffer = 1;
8403 		break;
8404 	case I40E_TUNNEL_TYPE_QINQ:
8405 		if (!pf->qinq_replace_flag) {
8406 			ret = i40e_cloud_filter_qinq_create(pf);
8407 			if (ret < 0)
8408 				PMD_DRV_LOG(DEBUG,
8409 					    "QinQ tunnel filter already created.");
8410 			pf->qinq_replace_flag = 1;
8411 		}
8412 		/*	Add in the General fields the values of
8413 		 *	the Outer and Inner VLAN
8414 		 *	Big Buffer should be set, see changes in
8415 		 *	i40e_aq_add_cloud_filters
8416 		 */
8417 		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8418 		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8419 		big_buffer = 1;
8420 		break;
8421 	case I40E_CLOUD_TYPE_UDP:
8422 	case I40E_CLOUD_TYPE_TCP:
8423 	case I40E_CLOUD_TYPE_SCTP:
8424 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC) {
8425 			if (!pf->sport_replace_flag) {
8426 				i40e_replace_port_l1_filter(pf,
8427 						tunnel_filter->l4_port_type);
8428 				i40e_replace_port_cloud_filter(pf,
8429 						tunnel_filter->l4_port_type);
8430 				pf->sport_replace_flag = 1;
8431 			}
8432 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8433 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8434 				I40E_DIRECTION_INGRESS_KEY;
8435 
8436 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8437 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8438 					I40E_TR_L4_TYPE_UDP;
8439 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8440 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8441 					I40E_TR_L4_TYPE_TCP;
8442 			else
8443 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8444 					I40E_TR_L4_TYPE_SCTP;
8445 
8446 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8447 				(teid_le >> 16) & 0xFFFF;
8448 			big_buffer = 1;
8449 		} else {
8450 			if (!pf->dport_replace_flag) {
8451 				i40e_replace_port_l1_filter(pf,
8452 						tunnel_filter->l4_port_type);
8453 				i40e_replace_port_cloud_filter(pf,
8454 						tunnel_filter->l4_port_type);
8455 				pf->dport_replace_flag = 1;
8456 			}
8457 			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8458 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
8459 				I40E_DIRECTION_INGRESS_KEY;
8460 
8461 			if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP)
8462 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8463 					I40E_TR_L4_TYPE_UDP;
8464 			else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP)
8465 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8466 					I40E_TR_L4_TYPE_TCP;
8467 			else
8468 				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
8469 					I40E_TR_L4_TYPE_SCTP;
8470 
8471 			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
8472 				(teid_le >> 16) & 0xFFFF;
8473 			big_buffer = 1;
8474 		}
8475 
8476 		break;
8477 	default:
8478 		/* Other tunnel types is not supported. */
8479 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8480 		rte_free(cld_filter);
8481 		return -EINVAL;
8482 	}
8483 
8484 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8485 		pfilter->element.flags =
8486 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8487 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8488 		pfilter->element.flags =
8489 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8490 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8491 		pfilter->element.flags =
8492 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8493 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8494 		pfilter->element.flags =
8495 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8496 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8497 		pfilter->element.flags |=
8498 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8499 	else if (tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_UDP ||
8500 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_TCP ||
8501 		 tunnel_filter->tunnel_type == I40E_CLOUD_TYPE_SCTP) {
8502 		if (tunnel_filter->l4_port_type == I40E_L4_PORT_TYPE_SRC)
8503 			pfilter->element.flags |=
8504 				I40E_AQC_ADD_CLOUD_FILTER_0X11;
8505 		else
8506 			pfilter->element.flags |=
8507 				I40E_AQC_ADD_CLOUD_FILTER_0X10;
8508 	} else {
8509 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8510 						&pfilter->element.flags);
8511 		if (val < 0) {
8512 			rte_free(cld_filter);
8513 			return -EINVAL;
8514 		}
8515 	}
8516 
8517 	pfilter->element.flags |= rte_cpu_to_le_16(
8518 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8519 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8520 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8521 	pfilter->element.queue_number =
8522 		rte_cpu_to_le_16(tunnel_filter->queue_id);
8523 
8524 	if (!tunnel_filter->is_to_vf)
8525 		vsi = pf->main_vsi;
8526 	else {
8527 		if (tunnel_filter->vf_id >= pf->vf_num) {
8528 			PMD_DRV_LOG(ERR, "Invalid argument.");
8529 			rte_free(cld_filter);
8530 			return -EINVAL;
8531 		}
8532 		vf = &pf->vfs[tunnel_filter->vf_id];
8533 		vsi = vf->vsi;
8534 	}
8535 
8536 	/* Check if there is the filter in SW list */
8537 	memset(&check_filter, 0, sizeof(check_filter));
8538 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
8539 	check_filter.is_to_vf = tunnel_filter->is_to_vf;
8540 	check_filter.vf_id = tunnel_filter->vf_id;
8541 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8542 	if (add && node) {
8543 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8544 		rte_free(cld_filter);
8545 		return -EINVAL;
8546 	}
8547 
8548 	if (!add && !node) {
8549 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8550 		rte_free(cld_filter);
8551 		return -EINVAL;
8552 	}
8553 
8554 	if (add) {
8555 		if (big_buffer)
8556 			ret = i40e_aq_add_cloud_filters_bb(hw,
8557 						   vsi->seid, cld_filter, 1);
8558 		else
8559 			ret = i40e_aq_add_cloud_filters(hw,
8560 					vsi->seid, &cld_filter->element, 1);
8561 		if (ret < 0) {
8562 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8563 			rte_free(cld_filter);
8564 			return -ENOTSUP;
8565 		}
8566 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8567 		if (tunnel == NULL) {
8568 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8569 			rte_free(cld_filter);
8570 			return -ENOMEM;
8571 		}
8572 
8573 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8574 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8575 		if (ret < 0)
8576 			rte_free(tunnel);
8577 	} else {
8578 		if (big_buffer)
8579 			ret = i40e_aq_rem_cloud_filters_bb(
8580 				hw, vsi->seid, cld_filter, 1);
8581 		else
8582 			ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8583 						&cld_filter->element, 1);
8584 		if (ret < 0) {
8585 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8586 			rte_free(cld_filter);
8587 			return -ENOTSUP;
8588 		}
8589 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8590 	}
8591 
8592 	rte_free(cld_filter);
8593 	return ret;
8594 }
8595 
8596 static int
8597 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8598 {
8599 	uint8_t i;
8600 
8601 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8602 		if (pf->vxlan_ports[i] == port)
8603 			return i;
8604 	}
8605 
8606 	return -1;
8607 }
8608 
8609 static int
8610 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8611 {
8612 	int  idx, ret;
8613 	uint8_t filter_idx = 0;
8614 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8615 
8616 	idx = i40e_get_vxlan_port_idx(pf, port);
8617 
8618 	/* Check if port already exists */
8619 	if (idx >= 0) {
8620 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8621 		return -EINVAL;
8622 	}
8623 
8624 	/* Now check if there is space to add the new port */
8625 	idx = i40e_get_vxlan_port_idx(pf, 0);
8626 	if (idx < 0) {
8627 		PMD_DRV_LOG(ERR,
8628 			"Maximum number of UDP ports reached, not adding port %d",
8629 			port);
8630 		return -ENOSPC;
8631 	}
8632 
8633 	ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8634 					&filter_idx, NULL);
8635 	if (ret < 0) {
8636 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8637 		return -1;
8638 	}
8639 
8640 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8641 			 port,  filter_idx);
8642 
8643 	/* New port: add it and mark its index in the bitmap */
8644 	pf->vxlan_ports[idx] = port;
8645 	pf->vxlan_bitmap |= (1 << idx);
8646 
8647 	if (!(pf->flags & I40E_FLAG_VXLAN))
8648 		pf->flags |= I40E_FLAG_VXLAN;
8649 
8650 	return 0;
8651 }
8652 
8653 static int
8654 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8655 {
8656 	int idx;
8657 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8658 
8659 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
8660 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8661 		return -EINVAL;
8662 	}
8663 
8664 	idx = i40e_get_vxlan_port_idx(pf, port);
8665 
8666 	if (idx < 0) {
8667 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8668 		return -EINVAL;
8669 	}
8670 
8671 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8672 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8673 		return -1;
8674 	}
8675 
8676 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8677 			port, idx);
8678 
8679 	pf->vxlan_ports[idx] = 0;
8680 	pf->vxlan_bitmap &= ~(1 << idx);
8681 
8682 	if (!pf->vxlan_bitmap)
8683 		pf->flags &= ~I40E_FLAG_VXLAN;
8684 
8685 	return 0;
8686 }
8687 
8688 /* Add UDP tunneling port */
8689 static int
8690 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8691 			     struct rte_eth_udp_tunnel *udp_tunnel)
8692 {
8693 	int ret = 0;
8694 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8695 
8696 	if (udp_tunnel == NULL)
8697 		return -EINVAL;
8698 
8699 	switch (udp_tunnel->prot_type) {
8700 	case RTE_TUNNEL_TYPE_VXLAN:
8701 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8702 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
8703 		break;
8704 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
8705 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8706 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8707 		break;
8708 	case RTE_TUNNEL_TYPE_GENEVE:
8709 	case RTE_TUNNEL_TYPE_TEREDO:
8710 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8711 		ret = -1;
8712 		break;
8713 
8714 	default:
8715 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8716 		ret = -1;
8717 		break;
8718 	}
8719 
8720 	return ret;
8721 }
8722 
8723 /* Remove UDP tunneling port */
8724 static int
8725 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8726 			     struct rte_eth_udp_tunnel *udp_tunnel)
8727 {
8728 	int ret = 0;
8729 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8730 
8731 	if (udp_tunnel == NULL)
8732 		return -EINVAL;
8733 
8734 	switch (udp_tunnel->prot_type) {
8735 	case RTE_TUNNEL_TYPE_VXLAN:
8736 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
8737 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8738 		break;
8739 	case RTE_TUNNEL_TYPE_GENEVE:
8740 	case RTE_TUNNEL_TYPE_TEREDO:
8741 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8742 		ret = -1;
8743 		break;
8744 	default:
8745 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8746 		ret = -1;
8747 		break;
8748 	}
8749 
8750 	return ret;
8751 }
8752 
8753 /* Calculate the maximum number of contiguous PF queues that are configured */
8754 int
8755 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8756 {
8757 	struct rte_eth_dev_data *data = pf->dev_data;
8758 	int i, num;
8759 	struct i40e_rx_queue *rxq;
8760 
8761 	num = 0;
8762 	for (i = 0; i < pf->lan_nb_qps; i++) {
8763 		rxq = data->rx_queues[i];
8764 		if (rxq && rxq->q_set)
8765 			num++;
8766 		else
8767 			break;
8768 	}
8769 
8770 	return num;
8771 }
8772 
8773 /* Reset the global configure of hash function and input sets */
8774 static void
8775 i40e_pf_global_rss_reset(struct i40e_pf *pf)
8776 {
8777 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8778 	uint32_t reg, reg_val;
8779 	int i;
8780 
8781 	/* Reset global RSS function sets */
8782 	reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8783 	if (!(reg_val & I40E_GLQF_CTL_HTOEP_MASK)) {
8784 		reg_val |= I40E_GLQF_CTL_HTOEP_MASK;
8785 		i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg_val);
8786 	}
8787 
8788 	for (i = 0; i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) {
8789 		uint64_t inset;
8790 		int j, pctype;
8791 
8792 		if (hw->mac.type == I40E_MAC_X722)
8793 			pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(i));
8794 		else
8795 			pctype = i;
8796 
8797 		/* Reset pctype insets */
8798 		inset = i40e_get_default_input_set(i);
8799 		if (inset) {
8800 			pf->hash_input_set[pctype] = inset;
8801 			inset = i40e_translate_input_set_reg(hw->mac.type,
8802 							     inset);
8803 
8804 			reg = I40E_GLQF_HASH_INSET(0, pctype);
8805 			i40e_check_write_global_reg(hw, reg, (uint32_t)inset);
8806 			reg = I40E_GLQF_HASH_INSET(1, pctype);
8807 			i40e_check_write_global_reg(hw, reg,
8808 						    (uint32_t)(inset >> 32));
8809 
8810 			/* Clear unused mask registers of the pctype */
8811 			for (j = 0; j < I40E_INSET_MASK_NUM_REG; j++) {
8812 				reg = I40E_GLQF_HASH_MSK(j, pctype);
8813 				i40e_check_write_global_reg(hw, reg, 0);
8814 			}
8815 		}
8816 
8817 		/* Reset pctype symmetric sets */
8818 		reg = I40E_GLQF_HSYM(pctype);
8819 		reg_val = i40e_read_rx_ctl(hw, reg);
8820 		if (reg_val & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8821 			reg_val &= ~I40E_GLQF_HSYM_SYMH_ENA_MASK;
8822 			i40e_write_global_rx_ctl(hw, reg, reg_val);
8823 		}
8824 	}
8825 	I40E_WRITE_FLUSH(hw);
8826 }
8827 
8828 int
8829 i40e_pf_reset_rss_reta(struct i40e_pf *pf)
8830 {
8831 	struct i40e_hw *hw = &pf->adapter->hw;
8832 	uint8_t lut[ETH_RSS_RETA_SIZE_512];
8833 	uint32_t i;
8834 	int num;
8835 
8836 	/* If both VMDQ and RSS enabled, not all of PF queues are
8837 	 * configured. It's necessary to calculate the actual PF
8838 	 * queues that are configured.
8839 	 */
8840 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8841 		num = i40e_pf_calc_configured_queues_num(pf);
8842 	else
8843 		num = pf->dev_data->nb_rx_queues;
8844 
8845 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8846 	if (num <= 0)
8847 		return 0;
8848 
8849 	for (i = 0; i < hw->func_caps.rss_table_size; i++)
8850 		lut[i] = (uint8_t)(i % (uint32_t)num);
8851 
8852 	return i40e_set_rss_lut(pf->main_vsi, lut, (uint16_t)i);
8853 }
8854 
8855 int
8856 i40e_pf_reset_rss_key(struct i40e_pf *pf)
8857 {
8858 	const uint8_t key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8859 			sizeof(uint32_t);
8860 	uint8_t *rss_key;
8861 
8862 	/* Reset key */
8863 	rss_key = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key;
8864 	if (!rss_key ||
8865 	    pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_key_len < key_len) {
8866 		static uint32_t rss_key_default[] = {0x6b793944,
8867 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8868 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8869 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8870 
8871 		rss_key = (uint8_t *)rss_key_default;
8872 	}
8873 
8874 	return i40e_set_rss_key(pf->main_vsi, rss_key, key_len);
8875 }
8876 
8877 static int
8878 i40e_pf_rss_reset(struct i40e_pf *pf)
8879 {
8880 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8881 
8882 	int ret;
8883 
8884 	pf->hash_filter_enabled = 0;
8885 	i40e_pf_disable_rss(pf);
8886 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
8887 
8888 	if (!pf->support_multi_driver)
8889 		i40e_pf_global_rss_reset(pf);
8890 
8891 	/* Reset RETA table */
8892 	if (pf->adapter->rss_reta_updated == 0) {
8893 		ret = i40e_pf_reset_rss_reta(pf);
8894 		if (ret)
8895 			return ret;
8896 	}
8897 
8898 	return i40e_pf_reset_rss_key(pf);
8899 }
8900 
8901 /* Configure RSS */
8902 int
8903 i40e_pf_config_rss(struct i40e_pf *pf)
8904 {
8905 	struct i40e_hw *hw;
8906 	enum rte_eth_rx_mq_mode mq_mode;
8907 	uint64_t rss_hf, hena;
8908 	int ret;
8909 
8910 	ret = i40e_pf_rss_reset(pf);
8911 	if (ret) {
8912 		PMD_DRV_LOG(ERR, "Reset RSS failed, RSS has been disabled");
8913 		return ret;
8914 	}
8915 
8916 	rss_hf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
8917 	mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8918 	if (!(rss_hf & pf->adapter->flow_types_mask) ||
8919 	    !(mq_mode & ETH_MQ_RX_RSS_FLAG))
8920 		return 0;
8921 
8922 	hw = I40E_PF_TO_HW(pf);
8923 	hena = i40e_config_hena(pf->adapter, rss_hf);
8924 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
8925 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
8926 	I40E_WRITE_FLUSH(hw);
8927 
8928 	return 0;
8929 }
8930 
8931 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8932 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8933 int
8934 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8935 {
8936 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8937 	uint32_t val, reg;
8938 	int ret = -EINVAL;
8939 
8940 	if (pf->support_multi_driver) {
8941 		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8942 		return -ENOTSUP;
8943 	}
8944 
8945 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8946 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8947 
8948 	if (len == 3) {
8949 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8950 	} else if (len == 4) {
8951 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8952 	} else {
8953 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8954 		return ret;
8955 	}
8956 
8957 	if (reg != val) {
8958 		ret = i40e_aq_debug_write_global_register(hw,
8959 						   I40E_GL_PRS_FVBM(2),
8960 						   reg, NULL);
8961 		if (ret != 0)
8962 			return ret;
8963 		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8964 			    "with value 0x%08x",
8965 			    I40E_GL_PRS_FVBM(2), reg);
8966 	} else {
8967 		ret = 0;
8968 	}
8969 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8970 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8971 
8972 	return ret;
8973 }
8974 
8975 /* Set the symmetric hash enable configurations per port */
8976 void
8977 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8978 {
8979 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8980 
8981 	if (enable > 0) {
8982 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)
8983 			return;
8984 
8985 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8986 	} else {
8987 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK))
8988 			return;
8989 
8990 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8991 	}
8992 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8993 	I40E_WRITE_FLUSH(hw);
8994 }
8995 
8996 /**
8997  * Valid input sets for hash and flow director filters per PCTYPE
8998  */
8999 static uint64_t
9000 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9001 		enum rte_filter_type filter)
9002 {
9003 	uint64_t valid;
9004 
9005 	static const uint64_t valid_hash_inset_table[] = {
9006 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9007 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9008 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9009 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9010 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9011 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9012 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9013 			I40E_INSET_FLEX_PAYLOAD,
9014 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9015 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9016 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9017 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9018 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9019 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9020 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9021 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9022 			I40E_INSET_FLEX_PAYLOAD,
9023 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9024 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9025 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9026 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9027 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9028 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9029 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9030 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9031 			I40E_INSET_FLEX_PAYLOAD,
9032 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9033 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9034 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9035 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9036 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9037 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9038 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9039 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9040 			I40E_INSET_FLEX_PAYLOAD,
9041 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9042 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9043 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9044 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9045 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9046 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9047 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9048 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9049 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9050 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9051 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9052 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9053 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9054 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9055 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9056 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9057 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9058 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9059 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9060 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9061 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9062 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9063 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9064 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9065 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9066 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9067 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9068 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9069 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9070 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9071 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9072 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9073 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9074 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9075 			I40E_INSET_FLEX_PAYLOAD,
9076 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9077 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9078 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9079 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9080 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9081 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9082 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9083 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9084 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9085 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9086 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9087 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9088 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9089 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9090 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9091 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9092 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9093 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9094 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9095 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9096 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9097 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9098 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9099 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9100 			I40E_INSET_FLEX_PAYLOAD,
9101 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9102 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9103 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9104 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9105 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9106 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9107 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9108 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9109 			I40E_INSET_FLEX_PAYLOAD,
9110 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9111 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9112 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9113 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9114 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9115 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9116 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9117 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9118 			I40E_INSET_FLEX_PAYLOAD,
9119 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9120 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9121 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9122 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9123 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9124 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9125 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9126 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9127 			I40E_INSET_FLEX_PAYLOAD,
9128 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9129 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9130 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9131 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9132 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9133 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9134 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9135 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9136 			I40E_INSET_FLEX_PAYLOAD,
9137 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9138 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9139 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9140 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9141 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9142 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9143 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9144 			I40E_INSET_FLEX_PAYLOAD,
9145 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9146 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9147 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9148 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9149 			I40E_INSET_FLEX_PAYLOAD,
9150 	};
9151 
9152 	/**
9153 	 * Flow director supports only fields defined in
9154 	 * union rte_eth_fdir_flow.
9155 	 */
9156 	static const uint64_t valid_fdir_inset_table[] = {
9157 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9158 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9159 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9160 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9161 		I40E_INSET_IPV4_TTL,
9162 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9163 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9164 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9165 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9166 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9167 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9168 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9169 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9170 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9171 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9172 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9173 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9174 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9175 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9176 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9177 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9178 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9179 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9180 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9181 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9182 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9183 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9184 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9185 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9186 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9187 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9188 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9189 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9190 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9191 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9192 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9193 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9194 		I40E_INSET_SCTP_VT,
9195 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9196 		I40E_INSET_DMAC | I40E_INSET_SMAC |
9197 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9198 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9199 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9200 		I40E_INSET_IPV4_TTL,
9201 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9202 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9203 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9204 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9205 		I40E_INSET_IPV6_HOP_LIMIT,
9206 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9207 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9208 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9209 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9210 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9211 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9212 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9213 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9214 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9215 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9216 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9217 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9218 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9219 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9220 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9221 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9222 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9223 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9224 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9225 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9226 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9227 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9228 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9229 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9230 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9231 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9232 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9233 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9234 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9235 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9236 		I40E_INSET_SCTP_VT,
9237 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9238 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9239 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9240 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9241 		I40E_INSET_IPV6_HOP_LIMIT,
9242 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9243 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9244 		I40E_INSET_LAST_ETHER_TYPE,
9245 	};
9246 
9247 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9248 		return 0;
9249 	if (filter == RTE_ETH_FILTER_HASH)
9250 		valid = valid_hash_inset_table[pctype];
9251 	else
9252 		valid = valid_fdir_inset_table[pctype];
9253 
9254 	return valid;
9255 }
9256 
9257 /**
9258  * Validate if the input set is allowed for a specific PCTYPE
9259  */
9260 int
9261 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9262 		enum rte_filter_type filter, uint64_t inset)
9263 {
9264 	uint64_t valid;
9265 
9266 	valid = i40e_get_valid_input_set(pctype, filter);
9267 	if (inset & (~valid))
9268 		return -EINVAL;
9269 
9270 	return 0;
9271 }
9272 
9273 /* default input set fields combination per pctype */
9274 uint64_t
9275 i40e_get_default_input_set(uint16_t pctype)
9276 {
9277 	static const uint64_t default_inset_table[] = {
9278 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9279 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9280 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9281 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9282 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9283 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9284 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9285 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9286 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9287 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9288 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9289 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9290 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9291 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9292 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9293 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9294 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9295 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9296 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9297 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9298 			I40E_INSET_SCTP_VT,
9299 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9300 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9301 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9302 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9303 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9304 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9305 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9306 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9307 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9308 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9309 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9310 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9311 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9312 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9313 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9314 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9315 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9316 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9317 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9318 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9319 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9320 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9321 			I40E_INSET_SCTP_VT,
9322 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9323 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9324 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9325 			I40E_INSET_LAST_ETHER_TYPE,
9326 	};
9327 
9328 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9329 		return 0;
9330 
9331 	return default_inset_table[pctype];
9332 }
9333 
9334 /**
9335  * Translate the input set from bit masks to register aware bit masks
9336  * and vice versa
9337  */
9338 uint64_t
9339 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9340 {
9341 	uint64_t val = 0;
9342 	uint16_t i;
9343 
9344 	struct inset_map {
9345 		uint64_t inset;
9346 		uint64_t inset_reg;
9347 	};
9348 
9349 	static const struct inset_map inset_map_common[] = {
9350 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9351 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9352 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9353 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9354 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9355 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9356 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9357 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9358 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9359 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9360 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9361 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9362 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9363 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9364 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9365 		{I40E_INSET_TUNNEL_DMAC,
9366 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9367 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9368 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9369 		{I40E_INSET_TUNNEL_SRC_PORT,
9370 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9371 		{I40E_INSET_TUNNEL_DST_PORT,
9372 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9373 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9374 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9375 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9376 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9377 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9378 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9379 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9380 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9381 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9382 	};
9383 
9384     /* some different registers map in x722*/
9385 	static const struct inset_map inset_map_diff_x722[] = {
9386 		{I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9387 		{I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9388 		{I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9389 		{I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9390 	};
9391 
9392 	static const struct inset_map inset_map_diff_not_x722[] = {
9393 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9394 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9395 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9396 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9397 	};
9398 
9399 	if (input == 0)
9400 		return val;
9401 
9402 	/* Translate input set to register aware inset */
9403 	if (type == I40E_MAC_X722) {
9404 		for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9405 			if (input & inset_map_diff_x722[i].inset)
9406 				val |= inset_map_diff_x722[i].inset_reg;
9407 		}
9408 	} else {
9409 		for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9410 			if (input & inset_map_diff_not_x722[i].inset)
9411 				val |= inset_map_diff_not_x722[i].inset_reg;
9412 		}
9413 	}
9414 
9415 	for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9416 		if (input & inset_map_common[i].inset)
9417 			val |= inset_map_common[i].inset_reg;
9418 	}
9419 
9420 	return val;
9421 }
9422 
9423 static int
9424 i40e_get_inset_field_offset(struct i40e_hw *hw, uint32_t pit_reg_start,
9425 			    uint32_t pit_reg_count, uint32_t hdr_off)
9426 {
9427 	const uint32_t pit_reg_end = pit_reg_start + pit_reg_count;
9428 	uint32_t field_off = I40E_FDIR_FIELD_OFFSET(hdr_off);
9429 	uint32_t i, reg_val, src_off, count;
9430 
9431 	for (i = pit_reg_start; i < pit_reg_end; i++) {
9432 		reg_val = i40e_read_rx_ctl(hw, I40E_GLQF_PIT(i));
9433 
9434 		src_off = I40E_GLQF_PIT_SOURCE_OFF_GET(reg_val);
9435 		count = I40E_GLQF_PIT_FSIZE_GET(reg_val);
9436 
9437 		if (src_off <= field_off && (src_off + count) > field_off)
9438 			break;
9439 	}
9440 
9441 	if (i >= pit_reg_end) {
9442 		PMD_DRV_LOG(ERR,
9443 			    "Hardware GLQF_PIT configuration does not support this field mask");
9444 		return -1;
9445 	}
9446 
9447 	return I40E_GLQF_PIT_DEST_OFF_GET(reg_val) + field_off - src_off;
9448 }
9449 
9450 int
9451 i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset,
9452 			     uint32_t *mask, uint8_t nb_elem)
9453 {
9454 	static const uint64_t mask_inset[] = {
9455 		I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL,
9456 		I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT };
9457 
9458 	static const struct {
9459 		uint64_t inset;
9460 		uint32_t mask;
9461 		uint32_t offset;
9462 	} inset_mask_offset_map[] = {
9463 		{ I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK,
9464 		  offsetof(struct rte_ipv4_hdr, type_of_service) },
9465 
9466 		{ I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK,
9467 		  offsetof(struct rte_ipv4_hdr, next_proto_id) },
9468 
9469 		{ I40E_INSET_IPV4_TTL, I40E_INSET_IPV4_TTL_MASK,
9470 		  offsetof(struct rte_ipv4_hdr, time_to_live) },
9471 
9472 		{ I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK,
9473 		  offsetof(struct rte_ipv6_hdr, vtc_flow) },
9474 
9475 		{ I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK,
9476 		  offsetof(struct rte_ipv6_hdr, proto) },
9477 
9478 		{ I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK,
9479 		  offsetof(struct rte_ipv6_hdr, hop_limits) },
9480 	};
9481 
9482 	uint32_t i;
9483 	int idx = 0;
9484 
9485 	assert(mask);
9486 	if (!inset)
9487 		return 0;
9488 
9489 	for (i = 0; i < RTE_DIM(mask_inset); i++) {
9490 		/* Clear the inset bit, if no MASK is required,
9491 		 * for example proto + ttl
9492 		 */
9493 		if ((mask_inset[i] & inset) == mask_inset[i]) {
9494 			inset &= ~mask_inset[i];
9495 			if (!inset)
9496 				return 0;
9497 		}
9498 	}
9499 
9500 	for (i = 0; i < RTE_DIM(inset_mask_offset_map); i++) {
9501 		uint32_t pit_start, pit_count;
9502 		int offset;
9503 
9504 		if (!(inset_mask_offset_map[i].inset & inset))
9505 			continue;
9506 
9507 		if (inset_mask_offset_map[i].inset &
9508 		    (I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9509 		     I40E_INSET_IPV4_TTL)) {
9510 			pit_start = I40E_GLQF_PIT_IPV4_START;
9511 			pit_count = I40E_GLQF_PIT_IPV4_COUNT;
9512 		} else {
9513 			pit_start = I40E_GLQF_PIT_IPV6_START;
9514 			pit_count = I40E_GLQF_PIT_IPV6_COUNT;
9515 		}
9516 
9517 		offset = i40e_get_inset_field_offset(hw, pit_start, pit_count,
9518 				inset_mask_offset_map[i].offset);
9519 
9520 		if (offset < 0)
9521 			return -EINVAL;
9522 
9523 		if (idx >= nb_elem) {
9524 			PMD_DRV_LOG(ERR,
9525 				    "Configuration of inset mask out of range %u",
9526 				    nb_elem);
9527 			return -ERANGE;
9528 		}
9529 
9530 		mask[idx] = I40E_GLQF_PIT_BUILD((uint32_t)offset,
9531 						inset_mask_offset_map[i].mask);
9532 		idx++;
9533 	}
9534 
9535 	return idx;
9536 }
9537 
9538 void
9539 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9540 {
9541 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9542 
9543 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9544 	if (reg != val)
9545 		i40e_write_rx_ctl(hw, addr, val);
9546 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9547 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
9548 }
9549 
9550 void
9551 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9552 {
9553 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9554 	struct rte_eth_dev_data *dev_data =
9555 		((struct i40e_adapter *)hw->back)->pf.dev_data;
9556 	struct rte_eth_dev *dev = &rte_eth_devices[dev_data->port_id];
9557 
9558 	if (reg != val) {
9559 		i40e_write_rx_ctl(hw, addr, val);
9560 		PMD_DRV_LOG(WARNING,
9561 			    "i40e device %s changed global register [0x%08x]."
9562 			    " original: 0x%08x, new: 0x%08x",
9563 			    dev->device->name, addr, reg,
9564 			    (uint32_t)i40e_read_rx_ctl(hw, addr));
9565 	}
9566 }
9567 
9568 static void
9569 i40e_filter_input_set_init(struct i40e_pf *pf)
9570 {
9571 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9572 	enum i40e_filter_pctype pctype;
9573 	uint64_t input_set, inset_reg;
9574 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9575 	int num, i;
9576 	uint16_t flow_type;
9577 
9578 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9579 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9580 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9581 
9582 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9583 			continue;
9584 
9585 		input_set = i40e_get_default_input_set(pctype);
9586 
9587 		num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9588 						   I40E_INSET_MASK_NUM_REG);
9589 		if (num < 0)
9590 			return;
9591 		if (pf->support_multi_driver && num > 0) {
9592 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9593 			return;
9594 		}
9595 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9596 					input_set);
9597 
9598 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9599 				      (uint32_t)(inset_reg & UINT32_MAX));
9600 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9601 				     (uint32_t)((inset_reg >>
9602 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
9603 		if (!pf->support_multi_driver) {
9604 			i40e_check_write_global_reg(hw,
9605 					    I40E_GLQF_HASH_INSET(0, pctype),
9606 					    (uint32_t)(inset_reg & UINT32_MAX));
9607 			i40e_check_write_global_reg(hw,
9608 					     I40E_GLQF_HASH_INSET(1, pctype),
9609 					     (uint32_t)((inset_reg >>
9610 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
9611 
9612 			for (i = 0; i < num; i++) {
9613 				i40e_check_write_global_reg(hw,
9614 						    I40E_GLQF_FD_MSK(i, pctype),
9615 						    mask_reg[i]);
9616 				i40e_check_write_global_reg(hw,
9617 						  I40E_GLQF_HASH_MSK(i, pctype),
9618 						  mask_reg[i]);
9619 			}
9620 			/*clear unused mask registers of the pctype */
9621 			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9622 				i40e_check_write_global_reg(hw,
9623 						    I40E_GLQF_FD_MSK(i, pctype),
9624 						    0);
9625 				i40e_check_write_global_reg(hw,
9626 						  I40E_GLQF_HASH_MSK(i, pctype),
9627 						  0);
9628 			}
9629 		} else {
9630 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9631 		}
9632 		I40E_WRITE_FLUSH(hw);
9633 
9634 		/* store the default input set */
9635 		if (!pf->support_multi_driver)
9636 			pf->hash_input_set[pctype] = input_set;
9637 		pf->fdir.input_set[pctype] = input_set;
9638 	}
9639 }
9640 
9641 int
9642 i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set,
9643 		    uint32_t pctype, bool add)
9644 {
9645 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9646 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9647 	uint64_t inset_reg = 0;
9648 	int num, i;
9649 
9650 	if (pf->support_multi_driver) {
9651 		PMD_DRV_LOG(ERR,
9652 			    "Modify input set is not permitted when multi-driver enabled.");
9653 		return -EPERM;
9654 	}
9655 
9656 	/* For X722, get translated pctype in fd pctype register */
9657 	if (hw->mac.type == I40E_MAC_X722)
9658 		pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype));
9659 
9660 	if (add) {
9661 		/* get inset value in register */
9662 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9663 		inset_reg <<= I40E_32_BIT_WIDTH;
9664 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9665 		input_set |= pf->hash_input_set[pctype];
9666 	}
9667 	num = i40e_generate_inset_mask_reg(hw, input_set, mask_reg,
9668 					   I40E_INSET_MASK_NUM_REG);
9669 	if (num < 0)
9670 		return -EINVAL;
9671 
9672 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9673 
9674 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9675 				    (uint32_t)(inset_reg & UINT32_MAX));
9676 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9677 				    (uint32_t)((inset_reg >>
9678 				    I40E_32_BIT_WIDTH) & UINT32_MAX));
9679 
9680 	for (i = 0; i < num; i++)
9681 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9682 					    mask_reg[i]);
9683 	/*clear unused mask registers of the pctype */
9684 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9685 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9686 					    0);
9687 	I40E_WRITE_FLUSH(hw);
9688 
9689 	pf->hash_input_set[pctype] = input_set;
9690 	return 0;
9691 }
9692 
9693 /* Convert ethertype filter structure */
9694 static int
9695 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9696 			      struct i40e_ethertype_filter *filter)
9697 {
9698 	rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
9699 		RTE_ETHER_ADDR_LEN);
9700 	filter->input.ether_type = input->ether_type;
9701 	filter->flags = input->flags;
9702 	filter->queue = input->queue;
9703 
9704 	return 0;
9705 }
9706 
9707 /* Check if there exists the ehtertype filter */
9708 struct i40e_ethertype_filter *
9709 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9710 				const struct i40e_ethertype_filter_input *input)
9711 {
9712 	int ret;
9713 
9714 	ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9715 	if (ret < 0)
9716 		return NULL;
9717 
9718 	return ethertype_rule->hash_map[ret];
9719 }
9720 
9721 /* Add ethertype filter in SW list */
9722 static int
9723 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9724 				struct i40e_ethertype_filter *filter)
9725 {
9726 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9727 	int ret;
9728 
9729 	ret = rte_hash_add_key(rule->hash_table, &filter->input);
9730 	if (ret < 0) {
9731 		PMD_DRV_LOG(ERR,
9732 			    "Failed to insert ethertype filter"
9733 			    " to hash table %d!",
9734 			    ret);
9735 		return ret;
9736 	}
9737 	rule->hash_map[ret] = filter;
9738 
9739 	TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9740 
9741 	return 0;
9742 }
9743 
9744 /* Delete ethertype filter in SW list */
9745 int
9746 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9747 			     struct i40e_ethertype_filter_input *input)
9748 {
9749 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9750 	struct i40e_ethertype_filter *filter;
9751 	int ret;
9752 
9753 	ret = rte_hash_del_key(rule->hash_table, input);
9754 	if (ret < 0) {
9755 		PMD_DRV_LOG(ERR,
9756 			    "Failed to delete ethertype filter"
9757 			    " to hash table %d!",
9758 			    ret);
9759 		return ret;
9760 	}
9761 	filter = rule->hash_map[ret];
9762 	rule->hash_map[ret] = NULL;
9763 
9764 	TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9765 	rte_free(filter);
9766 
9767 	return 0;
9768 }
9769 
9770 /*
9771  * Configure ethertype filter, which can director packet by filtering
9772  * with mac address and ether_type or only ether_type
9773  */
9774 int
9775 i40e_ethertype_filter_set(struct i40e_pf *pf,
9776 			struct rte_eth_ethertype_filter *filter,
9777 			bool add)
9778 {
9779 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9780 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9781 	struct i40e_ethertype_filter *ethertype_filter, *node;
9782 	struct i40e_ethertype_filter check_filter;
9783 	struct i40e_control_filter_stats stats;
9784 	uint16_t flags = 0;
9785 	int ret;
9786 
9787 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
9788 		PMD_DRV_LOG(ERR, "Invalid queue ID");
9789 		return -EINVAL;
9790 	}
9791 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
9792 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
9793 		PMD_DRV_LOG(ERR,
9794 			"unsupported ether_type(0x%04x) in control packet filter.",
9795 			filter->ether_type);
9796 		return -EINVAL;
9797 	}
9798 	if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
9799 		PMD_DRV_LOG(WARNING,
9800 			"filter vlan ether_type in first tag is not supported.");
9801 
9802 	/* Check if there is the filter in SW list */
9803 	memset(&check_filter, 0, sizeof(check_filter));
9804 	i40e_ethertype_filter_convert(filter, &check_filter);
9805 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9806 					       &check_filter.input);
9807 	if (add && node) {
9808 		PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9809 		return -EINVAL;
9810 	}
9811 
9812 	if (!add && !node) {
9813 		PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9814 		return -EINVAL;
9815 	}
9816 
9817 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9818 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9819 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9820 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9821 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9822 
9823 	memset(&stats, 0, sizeof(stats));
9824 	ret = i40e_aq_add_rem_control_packet_filter(hw,
9825 			filter->mac_addr.addr_bytes,
9826 			filter->ether_type, flags,
9827 			pf->main_vsi->seid,
9828 			filter->queue, add, &stats, NULL);
9829 
9830 	PMD_DRV_LOG(INFO,
9831 		"add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9832 		ret, stats.mac_etype_used, stats.etype_used,
9833 		stats.mac_etype_free, stats.etype_free);
9834 	if (ret < 0)
9835 		return -ENOSYS;
9836 
9837 	/* Add or delete a filter in SW list */
9838 	if (add) {
9839 		ethertype_filter = rte_zmalloc("ethertype_filter",
9840 				       sizeof(*ethertype_filter), 0);
9841 		if (ethertype_filter == NULL) {
9842 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9843 			return -ENOMEM;
9844 		}
9845 
9846 		rte_memcpy(ethertype_filter, &check_filter,
9847 			   sizeof(check_filter));
9848 		ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9849 		if (ret < 0)
9850 			rte_free(ethertype_filter);
9851 	} else {
9852 		ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9853 	}
9854 
9855 	return ret;
9856 }
9857 
9858 static int
9859 i40e_dev_flow_ops_get(struct rte_eth_dev *dev,
9860 		      const struct rte_flow_ops **ops)
9861 {
9862 	if (dev == NULL)
9863 		return -EINVAL;
9864 
9865 	*ops = &i40e_flow_ops;
9866 	return 0;
9867 }
9868 
9869 /*
9870  * Check and enable Extended Tag.
9871  * Enabling Extended Tag is important for 40G performance.
9872  */
9873 static void
9874 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9875 {
9876 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9877 	uint32_t buf = 0;
9878 	int ret;
9879 
9880 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9881 				      PCI_DEV_CAP_REG);
9882 	if (ret < 0) {
9883 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9884 			    PCI_DEV_CAP_REG);
9885 		return;
9886 	}
9887 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9888 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9889 		return;
9890 	}
9891 
9892 	buf = 0;
9893 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9894 				      PCI_DEV_CTRL_REG);
9895 	if (ret < 0) {
9896 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9897 			    PCI_DEV_CTRL_REG);
9898 		return;
9899 	}
9900 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9901 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9902 		return;
9903 	}
9904 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9905 	ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9906 				       PCI_DEV_CTRL_REG);
9907 	if (ret < 0) {
9908 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9909 			    PCI_DEV_CTRL_REG);
9910 		return;
9911 	}
9912 }
9913 
9914 /*
9915  * As some registers wouldn't be reset unless a global hardware reset,
9916  * hardware initialization is needed to put those registers into an
9917  * expected initial state.
9918  */
9919 static void
9920 i40e_hw_init(struct rte_eth_dev *dev)
9921 {
9922 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9923 
9924 	i40e_enable_extended_tag(dev);
9925 
9926 	/* clear the PF Queue Filter control register */
9927 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9928 
9929 	/* Disable symmetric hash per port */
9930 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
9931 }
9932 
9933 /*
9934  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9935  * however this function will return only one highest pctype index,
9936  * which is not quite correct. This is known problem of i40e driver
9937  * and needs to be fixed later.
9938  */
9939 enum i40e_filter_pctype
9940 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9941 {
9942 	int i;
9943 	uint64_t pctype_mask;
9944 
9945 	if (flow_type < I40E_FLOW_TYPE_MAX) {
9946 		pctype_mask = adapter->pctypes_tbl[flow_type];
9947 		for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
9948 			if (pctype_mask & (1ULL << i))
9949 				return (enum i40e_filter_pctype)i;
9950 		}
9951 	}
9952 	return I40E_FILTER_PCTYPE_INVALID;
9953 }
9954 
9955 uint16_t
9956 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
9957 			enum i40e_filter_pctype pctype)
9958 {
9959 	uint16_t flowtype;
9960 	uint64_t pctype_mask = 1ULL << pctype;
9961 
9962 	for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
9963 	     flowtype++) {
9964 		if (adapter->pctypes_tbl[flowtype] & pctype_mask)
9965 			return flowtype;
9966 	}
9967 
9968 	return RTE_ETH_FLOW_UNKNOWN;
9969 }
9970 
9971 /*
9972  * On X710, performance number is far from the expectation on recent firmware
9973  * versions; on XL710, performance number is also far from the expectation on
9974  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
9975  * mode is enabled and port MAC address is equal to the packet destination MAC
9976  * address. The fix for this issue may not be integrated in the following
9977  * firmware version. So the workaround in software driver is needed. It needs
9978  * to modify the initial values of 3 internal only registers for both X710 and
9979  * XL710. Note that the values for X710 or XL710 could be different, and the
9980  * workaround can be removed when it is fixed in firmware in the future.
9981  */
9982 
9983 /* For both X710 and XL710 */
9984 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1	0x10000200
9985 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2	0x203F0200
9986 #define I40E_GL_SWR_PRI_JOIN_MAP_0		0x26CE00
9987 
9988 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
9989 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
9990 
9991 /* For X722 */
9992 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
9993 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
9994 
9995 /* For X710 */
9996 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
9997 /* For XL710 */
9998 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
9999 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10000 
10001 /*
10002  * GL_SWR_PM_UP_THR:
10003  * The value is not impacted from the link speed, its value is set according
10004  * to the total number of ports for a better pipe-monitor configuration.
10005  */
10006 static bool
10007 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10008 {
10009 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10010 		.device_id = (dev),   \
10011 		.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10012 
10013 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10014 		.device_id = (dev),   \
10015 		.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10016 
10017 	static const struct {
10018 		uint16_t device_id;
10019 		uint32_t val;
10020 	} swr_pm_table[] = {
10021 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10022 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10023 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10024 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10025 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_X722) },
10026 
10027 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10028 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10029 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10030 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10031 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10032 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10033 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10034 	};
10035 	uint32_t i;
10036 
10037 	if (value == NULL) {
10038 		PMD_DRV_LOG(ERR, "value is NULL");
10039 		return false;
10040 	}
10041 
10042 	for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10043 		if (hw->device_id == swr_pm_table[i].device_id) {
10044 			*value = swr_pm_table[i].val;
10045 
10046 			PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10047 				    "value - 0x%08x",
10048 				    hw->device_id, *value);
10049 			return true;
10050 		}
10051 	}
10052 
10053 	return false;
10054 }
10055 
10056 static int
10057 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10058 {
10059 	enum i40e_status_code status;
10060 	struct i40e_aq_get_phy_abilities_resp phy_ab;
10061 	int ret = -ENOTSUP;
10062 	int retries = 0;
10063 
10064 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10065 					      NULL);
10066 
10067 	while (status) {
10068 		PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10069 			status);
10070 		retries++;
10071 		rte_delay_us(100000);
10072 		if  (retries < 5)
10073 			status = i40e_aq_get_phy_capabilities(hw, false,
10074 					true, &phy_ab, NULL);
10075 		else
10076 			return ret;
10077 	}
10078 	return 0;
10079 }
10080 
10081 static void
10082 i40e_configure_registers(struct i40e_hw *hw)
10083 {
10084 	static struct {
10085 		uint32_t addr;
10086 		uint64_t val;
10087 	} reg_table[] = {
10088 		{I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10089 		{I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10090 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10091 	};
10092 	uint64_t reg;
10093 	uint32_t i;
10094 	int ret;
10095 
10096 	for (i = 0; i < RTE_DIM(reg_table); i++) {
10097 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10098 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10099 				reg_table[i].val =
10100 					I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10101 			else /* For X710/XL710/XXV710 */
10102 				if (hw->aq.fw_maj_ver < 6)
10103 					reg_table[i].val =
10104 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10105 				else
10106 					reg_table[i].val =
10107 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10108 		}
10109 
10110 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10111 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10112 				reg_table[i].val =
10113 					I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10114 			else /* For X710/XL710/XXV710 */
10115 				reg_table[i].val =
10116 					I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10117 		}
10118 
10119 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10120 			uint32_t cfg_val;
10121 
10122 			if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10123 				PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10124 					    "GL_SWR_PM_UP_THR value fixup",
10125 					    hw->device_id);
10126 				continue;
10127 			}
10128 
10129 			reg_table[i].val = cfg_val;
10130 		}
10131 
10132 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10133 							&reg, NULL);
10134 		if (ret < 0) {
10135 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10136 							reg_table[i].addr);
10137 			break;
10138 		}
10139 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10140 						reg_table[i].addr, reg);
10141 		if (reg == reg_table[i].val)
10142 			continue;
10143 
10144 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10145 						reg_table[i].val, NULL);
10146 		if (ret < 0) {
10147 			PMD_DRV_LOG(ERR,
10148 				"Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10149 				reg_table[i].val, reg_table[i].addr);
10150 			break;
10151 		}
10152 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10153 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10154 	}
10155 }
10156 
10157 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10158 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10159 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10160 static int
10161 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10162 {
10163 	uint32_t reg;
10164 	int ret;
10165 
10166 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10167 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10168 		return -EINVAL;
10169 	}
10170 
10171 	/* Configure for double VLAN RX stripping */
10172 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10173 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10174 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
10175 		ret = i40e_aq_debug_write_register(hw,
10176 						   I40E_VSI_TSR(vsi->vsi_id),
10177 						   reg, NULL);
10178 		if (ret < 0) {
10179 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10180 				    vsi->vsi_id);
10181 			return I40E_ERR_CONFIG;
10182 		}
10183 	}
10184 
10185 	/* Configure for double VLAN TX insertion */
10186 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10187 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10188 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10189 		ret = i40e_aq_debug_write_register(hw,
10190 						   I40E_VSI_L2TAGSTXVALID(
10191 						   vsi->vsi_id), reg, NULL);
10192 		if (ret < 0) {
10193 			PMD_DRV_LOG(ERR,
10194 				"Failed to update VSI_L2TAGSTXVALID[%d]",
10195 				vsi->vsi_id);
10196 			return I40E_ERR_CONFIG;
10197 		}
10198 	}
10199 
10200 	return 0;
10201 }
10202 
10203 static uint64_t
10204 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10205 {
10206 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10207 	uint64_t systim_cycles;
10208 
10209 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10210 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10211 			<< 32;
10212 
10213 	return systim_cycles;
10214 }
10215 
10216 static uint64_t
10217 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10218 {
10219 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10220 	uint64_t rx_tstamp;
10221 
10222 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10223 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10224 			<< 32;
10225 
10226 	return rx_tstamp;
10227 }
10228 
10229 static uint64_t
10230 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10231 {
10232 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10233 	uint64_t tx_tstamp;
10234 
10235 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10236 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10237 			<< 32;
10238 
10239 	return tx_tstamp;
10240 }
10241 
10242 static void
10243 i40e_start_timecounters(struct rte_eth_dev *dev)
10244 {
10245 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10246 	struct i40e_adapter *adapter = dev->data->dev_private;
10247 	struct rte_eth_link link;
10248 	uint32_t tsync_inc_l;
10249 	uint32_t tsync_inc_h;
10250 
10251 	/* Get current link speed. */
10252 	i40e_dev_link_update(dev, 1);
10253 	rte_eth_linkstatus_get(dev, &link);
10254 
10255 	switch (link.link_speed) {
10256 	case ETH_SPEED_NUM_40G:
10257 	case ETH_SPEED_NUM_25G:
10258 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10259 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10260 		break;
10261 	case ETH_SPEED_NUM_10G:
10262 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10263 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10264 		break;
10265 	case ETH_SPEED_NUM_1G:
10266 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10267 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10268 		break;
10269 	default:
10270 		tsync_inc_l = 0x0;
10271 		tsync_inc_h = 0x0;
10272 	}
10273 
10274 	/* Set the timesync increment value. */
10275 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10276 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10277 
10278 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10279 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10280 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10281 
10282 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10283 	adapter->systime_tc.cc_shift = 0;
10284 	adapter->systime_tc.nsec_mask = 0;
10285 
10286 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10287 	adapter->rx_tstamp_tc.cc_shift = 0;
10288 	adapter->rx_tstamp_tc.nsec_mask = 0;
10289 
10290 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10291 	adapter->tx_tstamp_tc.cc_shift = 0;
10292 	adapter->tx_tstamp_tc.nsec_mask = 0;
10293 }
10294 
10295 static int
10296 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10297 {
10298 	struct i40e_adapter *adapter = dev->data->dev_private;
10299 
10300 	adapter->systime_tc.nsec += delta;
10301 	adapter->rx_tstamp_tc.nsec += delta;
10302 	adapter->tx_tstamp_tc.nsec += delta;
10303 
10304 	return 0;
10305 }
10306 
10307 static int
10308 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10309 {
10310 	uint64_t ns;
10311 	struct i40e_adapter *adapter = dev->data->dev_private;
10312 
10313 	ns = rte_timespec_to_ns(ts);
10314 
10315 	/* Set the timecounters to a new value. */
10316 	adapter->systime_tc.nsec = ns;
10317 	adapter->rx_tstamp_tc.nsec = ns;
10318 	adapter->tx_tstamp_tc.nsec = ns;
10319 
10320 	return 0;
10321 }
10322 
10323 static int
10324 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10325 {
10326 	uint64_t ns, systime_cycles;
10327 	struct i40e_adapter *adapter = dev->data->dev_private;
10328 
10329 	systime_cycles = i40e_read_systime_cyclecounter(dev);
10330 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10331 	*ts = rte_ns_to_timespec(ns);
10332 
10333 	return 0;
10334 }
10335 
10336 static int
10337 i40e_timesync_enable(struct rte_eth_dev *dev)
10338 {
10339 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10340 	uint32_t tsync_ctl_l;
10341 	uint32_t tsync_ctl_h;
10342 
10343 	/* Stop the timesync system time. */
10344 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10345 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10346 	/* Reset the timesync system time value. */
10347 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10348 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10349 
10350 	i40e_start_timecounters(dev);
10351 
10352 	/* Clear timesync registers. */
10353 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10354 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10355 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10356 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10357 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10358 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10359 
10360 	/* Enable timestamping of PTP packets. */
10361 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10362 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10363 
10364 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10365 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10366 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10367 
10368 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10369 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10370 
10371 	return 0;
10372 }
10373 
10374 static int
10375 i40e_timesync_disable(struct rte_eth_dev *dev)
10376 {
10377 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10378 	uint32_t tsync_ctl_l;
10379 	uint32_t tsync_ctl_h;
10380 
10381 	/* Disable timestamping of transmitted PTP packets. */
10382 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10383 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10384 
10385 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10386 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10387 
10388 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10389 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10390 
10391 	/* Reset the timesync increment value. */
10392 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10393 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10394 
10395 	return 0;
10396 }
10397 
10398 static int
10399 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10400 				struct timespec *timestamp, uint32_t flags)
10401 {
10402 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10403 	struct i40e_adapter *adapter = dev->data->dev_private;
10404 	uint32_t sync_status;
10405 	uint32_t index = flags & 0x03;
10406 	uint64_t rx_tstamp_cycles;
10407 	uint64_t ns;
10408 
10409 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10410 	if ((sync_status & (1 << index)) == 0)
10411 		return -EINVAL;
10412 
10413 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10414 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10415 	*timestamp = rte_ns_to_timespec(ns);
10416 
10417 	return 0;
10418 }
10419 
10420 static int
10421 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10422 				struct timespec *timestamp)
10423 {
10424 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10425 	struct i40e_adapter *adapter = dev->data->dev_private;
10426 	uint32_t sync_status;
10427 	uint64_t tx_tstamp_cycles;
10428 	uint64_t ns;
10429 
10430 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10431 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10432 		return -EINVAL;
10433 
10434 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10435 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10436 	*timestamp = rte_ns_to_timespec(ns);
10437 
10438 	return 0;
10439 }
10440 
10441 /*
10442  * i40e_parse_dcb_configure - parse dcb configure from user
10443  * @dev: the device being configured
10444  * @dcb_cfg: pointer of the result of parse
10445  * @*tc_map: bit map of enabled traffic classes
10446  *
10447  * Returns 0 on success, negative value on failure
10448  */
10449 static int
10450 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10451 			 struct i40e_dcbx_config *dcb_cfg,
10452 			 uint8_t *tc_map)
10453 {
10454 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10455 	uint8_t i, tc_bw, bw_lf;
10456 
10457 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10458 
10459 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10460 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10461 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10462 		return -EINVAL;
10463 	}
10464 
10465 	/* assume each tc has the same bw */
10466 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10467 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10468 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10469 	/* to ensure the sum of tcbw is equal to 100 */
10470 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10471 	for (i = 0; i < bw_lf; i++)
10472 		dcb_cfg->etscfg.tcbwtable[i]++;
10473 
10474 	/* assume each tc has the same Transmission Selection Algorithm */
10475 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10476 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10477 
10478 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10479 		dcb_cfg->etscfg.prioritytable[i] =
10480 				dcb_rx_conf->dcb_tc[i];
10481 
10482 	/* FW needs one App to configure HW */
10483 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10484 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10485 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10486 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10487 
10488 	if (dcb_rx_conf->nb_tcs == 0)
10489 		*tc_map = 1; /* tc0 only */
10490 	else
10491 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10492 
10493 	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10494 		dcb_cfg->pfc.willing = 0;
10495 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10496 		dcb_cfg->pfc.pfcenable = *tc_map;
10497 	}
10498 	return 0;
10499 }
10500 
10501 
10502 static enum i40e_status_code
10503 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10504 			      struct i40e_aqc_vsi_properties_data *info,
10505 			      uint8_t enabled_tcmap)
10506 {
10507 	enum i40e_status_code ret;
10508 	int i, total_tc = 0;
10509 	uint16_t qpnum_per_tc, bsf, qp_idx;
10510 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10511 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10512 	uint16_t used_queues;
10513 
10514 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10515 	if (ret != I40E_SUCCESS)
10516 		return ret;
10517 
10518 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10519 		if (enabled_tcmap & (1 << i))
10520 			total_tc++;
10521 	}
10522 	if (total_tc == 0)
10523 		total_tc = 1;
10524 	vsi->enabled_tc = enabled_tcmap;
10525 
10526 	/* different VSI has different queues assigned */
10527 	if (vsi->type == I40E_VSI_MAIN)
10528 		used_queues = dev_data->nb_rx_queues -
10529 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10530 	else if (vsi->type == I40E_VSI_VMDQ2)
10531 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10532 	else {
10533 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
10534 		return I40E_ERR_NO_AVAILABLE_VSI;
10535 	}
10536 
10537 	qpnum_per_tc = used_queues / total_tc;
10538 	/* Number of queues per enabled TC */
10539 	if (qpnum_per_tc == 0) {
10540 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10541 		return I40E_ERR_INVALID_QP_ID;
10542 	}
10543 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10544 				I40E_MAX_Q_PER_TC);
10545 	bsf = rte_bsf32(qpnum_per_tc);
10546 
10547 	/**
10548 	 * Configure TC and queue mapping parameters, for enabled TC,
10549 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10550 	 * default queue will serve it.
10551 	 */
10552 	qp_idx = 0;
10553 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10554 		if (vsi->enabled_tc & (1 << i)) {
10555 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10556 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10557 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10558 			qp_idx += qpnum_per_tc;
10559 		} else
10560 			info->tc_mapping[i] = 0;
10561 	}
10562 
10563 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10564 	if (vsi->type == I40E_VSI_SRIOV) {
10565 		info->mapping_flags |=
10566 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10567 		for (i = 0; i < vsi->nb_qps; i++)
10568 			info->queue_mapping[i] =
10569 				rte_cpu_to_le_16(vsi->base_queue + i);
10570 	} else {
10571 		info->mapping_flags |=
10572 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10573 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10574 	}
10575 	info->valid_sections |=
10576 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10577 
10578 	return I40E_SUCCESS;
10579 }
10580 
10581 /*
10582  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10583  * @veb: VEB to be configured
10584  * @tc_map: enabled TC bitmap
10585  *
10586  * Returns 0 on success, negative value on failure
10587  */
10588 static enum i40e_status_code
10589 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10590 {
10591 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10592 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10593 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10594 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10595 	enum i40e_status_code ret = I40E_SUCCESS;
10596 	int i;
10597 	uint32_t bw_max;
10598 
10599 	/* Check if enabled_tc is same as existing or new TCs */
10600 	if (veb->enabled_tc == tc_map)
10601 		return ret;
10602 
10603 	/* configure tc bandwidth */
10604 	memset(&veb_bw, 0, sizeof(veb_bw));
10605 	veb_bw.tc_valid_bits = tc_map;
10606 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10607 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10608 		if (tc_map & BIT_ULL(i))
10609 			veb_bw.tc_bw_share_credits[i] = 1;
10610 	}
10611 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10612 						   &veb_bw, NULL);
10613 	if (ret) {
10614 		PMD_INIT_LOG(ERR,
10615 			"AQ command Config switch_comp BW allocation per TC failed = %d",
10616 			hw->aq.asq_last_status);
10617 		return ret;
10618 	}
10619 
10620 	memset(&ets_query, 0, sizeof(ets_query));
10621 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
10622 						   &ets_query, NULL);
10623 	if (ret != I40E_SUCCESS) {
10624 		PMD_DRV_LOG(ERR,
10625 			"Failed to get switch_comp ETS configuration %u",
10626 			hw->aq.asq_last_status);
10627 		return ret;
10628 	}
10629 	memset(&bw_query, 0, sizeof(bw_query));
10630 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
10631 						  &bw_query, NULL);
10632 	if (ret != I40E_SUCCESS) {
10633 		PMD_DRV_LOG(ERR,
10634 			"Failed to get switch_comp bandwidth configuration %u",
10635 			hw->aq.asq_last_status);
10636 		return ret;
10637 	}
10638 
10639 	/* store and print out BW info */
10640 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
10641 	veb->bw_info.bw_max = ets_query.tc_bw_max;
10642 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
10643 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
10644 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
10645 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
10646 		     I40E_16_BIT_WIDTH);
10647 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10648 		veb->bw_info.bw_ets_share_credits[i] =
10649 				bw_query.tc_bw_share_credits[i];
10650 		veb->bw_info.bw_ets_credits[i] =
10651 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
10652 		/* 4 bits per TC, 4th bit is reserved */
10653 		veb->bw_info.bw_ets_max[i] =
10654 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
10655 				  RTE_LEN2MASK(3, uint8_t));
10656 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
10657 			    veb->bw_info.bw_ets_share_credits[i]);
10658 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
10659 			    veb->bw_info.bw_ets_credits[i]);
10660 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
10661 			    veb->bw_info.bw_ets_max[i]);
10662 	}
10663 
10664 	veb->enabled_tc = tc_map;
10665 
10666 	return ret;
10667 }
10668 
10669 
10670 /*
10671  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
10672  * @vsi: VSI to be configured
10673  * @tc_map: enabled TC bitmap
10674  *
10675  * Returns 0 on success, negative value on failure
10676  */
10677 static enum i40e_status_code
10678 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
10679 {
10680 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
10681 	struct i40e_vsi_context ctxt;
10682 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
10683 	enum i40e_status_code ret = I40E_SUCCESS;
10684 	int i;
10685 
10686 	/* Check if enabled_tc is same as existing or new TCs */
10687 	if (vsi->enabled_tc == tc_map)
10688 		return ret;
10689 
10690 	/* configure tc bandwidth */
10691 	memset(&bw_data, 0, sizeof(bw_data));
10692 	bw_data.tc_valid_bits = tc_map;
10693 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
10694 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10695 		if (tc_map & BIT_ULL(i))
10696 			bw_data.tc_bw_credits[i] = 1;
10697 	}
10698 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
10699 	if (ret) {
10700 		PMD_INIT_LOG(ERR,
10701 			"AQ command Config VSI BW allocation per TC failed = %d",
10702 			hw->aq.asq_last_status);
10703 		goto out;
10704 	}
10705 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
10706 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
10707 
10708 	/* Update Queue Pairs Mapping for currently enabled UPs */
10709 	ctxt.seid = vsi->seid;
10710 	ctxt.pf_num = hw->pf_id;
10711 	ctxt.vf_num = 0;
10712 	ctxt.uplink_seid = vsi->uplink_seid;
10713 	ctxt.info = vsi->info;
10714 	i40e_get_cap(hw);
10715 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
10716 	if (ret)
10717 		goto out;
10718 
10719 	/* Update the VSI after updating the VSI queue-mapping information */
10720 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
10721 	if (ret) {
10722 		PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
10723 			hw->aq.asq_last_status);
10724 		goto out;
10725 	}
10726 	/* update the local VSI info with updated queue map */
10727 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
10728 					sizeof(vsi->info.tc_mapping));
10729 	rte_memcpy(&vsi->info.queue_mapping,
10730 			&ctxt.info.queue_mapping,
10731 		sizeof(vsi->info.queue_mapping));
10732 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
10733 	vsi->info.valid_sections = 0;
10734 
10735 	/* query and update current VSI BW information */
10736 	ret = i40e_vsi_get_bw_config(vsi);
10737 	if (ret) {
10738 		PMD_INIT_LOG(ERR,
10739 			 "Failed updating vsi bw info, err %s aq_err %s",
10740 			 i40e_stat_str(hw, ret),
10741 			 i40e_aq_str(hw, hw->aq.asq_last_status));
10742 		goto out;
10743 	}
10744 
10745 	vsi->enabled_tc = tc_map;
10746 
10747 out:
10748 	return ret;
10749 }
10750 
10751 /*
10752  * i40e_dcb_hw_configure - program the dcb setting to hw
10753  * @pf: pf the configuration is taken on
10754  * @new_cfg: new configuration
10755  * @tc_map: enabled TC bitmap
10756  *
10757  * Returns 0 on success, negative value on failure
10758  */
10759 static enum i40e_status_code
10760 i40e_dcb_hw_configure(struct i40e_pf *pf,
10761 		      struct i40e_dcbx_config *new_cfg,
10762 		      uint8_t tc_map)
10763 {
10764 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10765 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
10766 	struct i40e_vsi *main_vsi = pf->main_vsi;
10767 	struct i40e_vsi_list *vsi_list;
10768 	enum i40e_status_code ret;
10769 	int i;
10770 	uint32_t val;
10771 
10772 	/* Use the FW API if FW > v4.4*/
10773 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
10774 	      (hw->aq.fw_maj_ver >= 5))) {
10775 		PMD_INIT_LOG(ERR,
10776 			"FW < v4.4, can not use FW LLDP API to configure DCB");
10777 		return I40E_ERR_FIRMWARE_API_VERSION;
10778 	}
10779 
10780 	/* Check if need reconfiguration */
10781 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
10782 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
10783 		return I40E_SUCCESS;
10784 	}
10785 
10786 	/* Copy the new config to the current config */
10787 	*old_cfg = *new_cfg;
10788 	old_cfg->etsrec = old_cfg->etscfg;
10789 	ret = i40e_set_dcb_config(hw);
10790 	if (ret) {
10791 		PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
10792 			 i40e_stat_str(hw, ret),
10793 			 i40e_aq_str(hw, hw->aq.asq_last_status));
10794 		return ret;
10795 	}
10796 	/* set receive Arbiter to RR mode and ETS scheme by default */
10797 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
10798 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
10799 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
10800 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
10801 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
10802 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
10803 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
10804 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
10805 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
10806 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
10807 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
10808 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
10809 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
10810 	}
10811 	/* get local mib to check whether it is configured correctly */
10812 	/* IEEE mode */
10813 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
10814 	/* Get Local DCB Config */
10815 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
10816 				     &hw->local_dcbx_config);
10817 
10818 	/* if Veb is created, need to update TC of it at first */
10819 	if (main_vsi->veb) {
10820 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
10821 		if (ret)
10822 			PMD_INIT_LOG(WARNING,
10823 				 "Failed configuring TC for VEB seid=%d",
10824 				 main_vsi->veb->seid);
10825 	}
10826 	/* Update each VSI */
10827 	i40e_vsi_config_tc(main_vsi, tc_map);
10828 	if (main_vsi->veb) {
10829 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
10830 			/* Beside main VSI and VMDQ VSIs, only enable default
10831 			 * TC for other VSIs
10832 			 */
10833 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
10834 				ret = i40e_vsi_config_tc(vsi_list->vsi,
10835 							 tc_map);
10836 			else
10837 				ret = i40e_vsi_config_tc(vsi_list->vsi,
10838 							 I40E_DEFAULT_TCMAP);
10839 			if (ret)
10840 				PMD_INIT_LOG(WARNING,
10841 					"Failed configuring TC for VSI seid=%d",
10842 					vsi_list->vsi->seid);
10843 			/* continue */
10844 		}
10845 	}
10846 	return I40E_SUCCESS;
10847 }
10848 
10849 /*
10850  * i40e_dcb_init_configure - initial dcb config
10851  * @dev: device being configured
10852  * @sw_dcb: indicate whether dcb is sw configured or hw offload
10853  *
10854  * Returns 0 on success, negative value on failure
10855  */
10856 int
10857 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
10858 {
10859 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10860 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10861 	int i, ret = 0;
10862 
10863 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
10864 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10865 		return -ENOTSUP;
10866 	}
10867 
10868 	/* DCB initialization:
10869 	 * Update DCB configuration from the Firmware and configure
10870 	 * LLDP MIB change event.
10871 	 */
10872 	if (sw_dcb == TRUE) {
10873 		/* Stopping lldp is necessary for DPDK, but it will cause
10874 		 * DCB init failed. For i40e_init_dcb(), the prerequisite
10875 		 * for successful initialization of DCB is that LLDP is
10876 		 * enabled. So it is needed to start lldp before DCB init
10877 		 * and stop it after initialization.
10878 		 */
10879 		ret = i40e_aq_start_lldp(hw, true, NULL);
10880 		if (ret != I40E_SUCCESS)
10881 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10882 
10883 		ret = i40e_init_dcb(hw, true);
10884 		/* If lldp agent is stopped, the return value from
10885 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
10886 		 * adminq status. Otherwise, it should return success.
10887 		 */
10888 		if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
10889 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
10890 			memset(&hw->local_dcbx_config, 0,
10891 				sizeof(struct i40e_dcbx_config));
10892 			/* set dcb default configuration */
10893 			hw->local_dcbx_config.etscfg.willing = 0;
10894 			hw->local_dcbx_config.etscfg.maxtcs = 0;
10895 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
10896 			hw->local_dcbx_config.etscfg.tsatable[0] =
10897 						I40E_IEEE_TSA_ETS;
10898 			/* all UPs mapping to TC0 */
10899 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10900 				hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
10901 			hw->local_dcbx_config.etsrec =
10902 				hw->local_dcbx_config.etscfg;
10903 			hw->local_dcbx_config.pfc.willing = 0;
10904 			hw->local_dcbx_config.pfc.pfccap =
10905 						I40E_MAX_TRAFFIC_CLASS;
10906 			/* FW needs one App to configure HW */
10907 			hw->local_dcbx_config.numapps = 1;
10908 			hw->local_dcbx_config.app[0].selector =
10909 						I40E_APP_SEL_ETHTYPE;
10910 			hw->local_dcbx_config.app[0].priority = 3;
10911 			hw->local_dcbx_config.app[0].protocolid =
10912 						I40E_APP_PROTOID_FCOE;
10913 			ret = i40e_set_dcb_config(hw);
10914 			if (ret) {
10915 				PMD_INIT_LOG(ERR,
10916 					"default dcb config fails. err = %d, aq_err = %d.",
10917 					ret, hw->aq.asq_last_status);
10918 				return -ENOSYS;
10919 			}
10920 		} else {
10921 			PMD_INIT_LOG(ERR,
10922 				"DCB initialization in FW fails, err = %d, aq_err = %d.",
10923 				ret, hw->aq.asq_last_status);
10924 			return -ENOTSUP;
10925 		}
10926 
10927 		if (i40e_need_stop_lldp(dev)) {
10928 			ret = i40e_aq_stop_lldp(hw, true, true, NULL);
10929 			if (ret != I40E_SUCCESS)
10930 				PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
10931 		}
10932 	} else {
10933 		ret = i40e_aq_start_lldp(hw, true, NULL);
10934 		if (ret != I40E_SUCCESS)
10935 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
10936 
10937 		ret = i40e_init_dcb(hw, true);
10938 		if (!ret) {
10939 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
10940 				PMD_INIT_LOG(ERR,
10941 					"HW doesn't support DCBX offload.");
10942 				return -ENOTSUP;
10943 			}
10944 		} else {
10945 			PMD_INIT_LOG(ERR,
10946 				"DCBX configuration failed, err = %d, aq_err = %d.",
10947 				ret, hw->aq.asq_last_status);
10948 			return -ENOTSUP;
10949 		}
10950 	}
10951 	return 0;
10952 }
10953 
10954 /*
10955  * i40e_dcb_setup - setup dcb related config
10956  * @dev: device being configured
10957  *
10958  * Returns 0 on success, negative value on failure
10959  */
10960 static int
10961 i40e_dcb_setup(struct rte_eth_dev *dev)
10962 {
10963 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10964 	struct i40e_dcbx_config dcb_cfg;
10965 	uint8_t tc_map = 0;
10966 	int ret = 0;
10967 
10968 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
10969 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
10970 		return -ENOTSUP;
10971 	}
10972 
10973 	if (pf->vf_num != 0)
10974 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
10975 
10976 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
10977 	if (ret) {
10978 		PMD_INIT_LOG(ERR, "invalid dcb config");
10979 		return -EINVAL;
10980 	}
10981 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
10982 	if (ret) {
10983 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
10984 		return -ENOSYS;
10985 	}
10986 
10987 	return 0;
10988 }
10989 
10990 static int
10991 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
10992 		      struct rte_eth_dcb_info *dcb_info)
10993 {
10994 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10995 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10996 	struct i40e_vsi *vsi = pf->main_vsi;
10997 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
10998 	uint16_t bsf, tc_mapping;
10999 	int i, j = 0;
11000 
11001 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11002 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11003 	else
11004 		dcb_info->nb_tcs = 1;
11005 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11006 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11007 	for (i = 0; i < dcb_info->nb_tcs; i++)
11008 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11009 
11010 	/* get queue mapping if vmdq is disabled */
11011 	if (!pf->nb_cfg_vmdq_vsi) {
11012 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11013 			if (!(vsi->enabled_tc & (1 << i)))
11014 				continue;
11015 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11016 			dcb_info->tc_queue.tc_rxq[j][i].base =
11017 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11018 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11019 			dcb_info->tc_queue.tc_txq[j][i].base =
11020 				dcb_info->tc_queue.tc_rxq[j][i].base;
11021 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11022 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11023 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11024 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11025 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11026 		}
11027 		return 0;
11028 	}
11029 
11030 	/* get queue mapping if vmdq is enabled */
11031 	do {
11032 		vsi = pf->vmdq[j].vsi;
11033 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11034 			if (!(vsi->enabled_tc & (1 << i)))
11035 				continue;
11036 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11037 			dcb_info->tc_queue.tc_rxq[j][i].base =
11038 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11039 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11040 			dcb_info->tc_queue.tc_txq[j][i].base =
11041 				dcb_info->tc_queue.tc_rxq[j][i].base;
11042 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11043 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11044 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11045 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11046 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11047 		}
11048 		j++;
11049 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11050 	return 0;
11051 }
11052 
11053 static int
11054 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11055 {
11056 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11057 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11058 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11059 	uint16_t msix_intr;
11060 
11061 	msix_intr = intr_handle->intr_vec[queue_id];
11062 	if (msix_intr == I40E_MISC_VEC_ID)
11063 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11064 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
11065 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11066 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11067 	else
11068 		I40E_WRITE_REG(hw,
11069 			       I40E_PFINT_DYN_CTLN(msix_intr -
11070 						   I40E_RX_VEC_START),
11071 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
11072 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11073 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11074 
11075 	I40E_WRITE_FLUSH(hw);
11076 	rte_intr_ack(&pci_dev->intr_handle);
11077 
11078 	return 0;
11079 }
11080 
11081 static int
11082 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11083 {
11084 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11085 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11086 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11087 	uint16_t msix_intr;
11088 
11089 	msix_intr = intr_handle->intr_vec[queue_id];
11090 	if (msix_intr == I40E_MISC_VEC_ID)
11091 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11092 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11093 	else
11094 		I40E_WRITE_REG(hw,
11095 			       I40E_PFINT_DYN_CTLN(msix_intr -
11096 						   I40E_RX_VEC_START),
11097 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11098 	I40E_WRITE_FLUSH(hw);
11099 
11100 	return 0;
11101 }
11102 
11103 /**
11104  * This function is used to check if the register is valid.
11105  * Below is the valid registers list for X722 only:
11106  * 0x2b800--0x2bb00
11107  * 0x38700--0x38a00
11108  * 0x3d800--0x3db00
11109  * 0x208e00--0x209000
11110  * 0x20be00--0x20c000
11111  * 0x263c00--0x264000
11112  * 0x265c00--0x266000
11113  */
11114 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11115 {
11116 	if ((type != I40E_MAC_X722) &&
11117 	    ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11118 	     (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11119 	     (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11120 	     (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11121 	     (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11122 	     (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11123 	     (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11124 		return 0;
11125 	else
11126 		return 1;
11127 }
11128 
11129 static int i40e_get_regs(struct rte_eth_dev *dev,
11130 			 struct rte_dev_reg_info *regs)
11131 {
11132 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11133 	uint32_t *ptr_data = regs->data;
11134 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11135 	const struct i40e_reg_info *reg_info;
11136 
11137 	if (ptr_data == NULL) {
11138 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11139 		regs->width = sizeof(uint32_t);
11140 		return 0;
11141 	}
11142 
11143 	/* The first few registers have to be read using AQ operations */
11144 	reg_idx = 0;
11145 	while (i40e_regs_adminq[reg_idx].name) {
11146 		reg_info = &i40e_regs_adminq[reg_idx++];
11147 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11148 			for (arr_idx2 = 0;
11149 					arr_idx2 <= reg_info->count2;
11150 					arr_idx2++) {
11151 				reg_offset = arr_idx * reg_info->stride1 +
11152 					arr_idx2 * reg_info->stride2;
11153 				reg_offset += reg_info->base_addr;
11154 				ptr_data[reg_offset >> 2] =
11155 					i40e_read_rx_ctl(hw, reg_offset);
11156 			}
11157 	}
11158 
11159 	/* The remaining registers can be read using primitives */
11160 	reg_idx = 0;
11161 	while (i40e_regs_others[reg_idx].name) {
11162 		reg_info = &i40e_regs_others[reg_idx++];
11163 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11164 			for (arr_idx2 = 0;
11165 					arr_idx2 <= reg_info->count2;
11166 					arr_idx2++) {
11167 				reg_offset = arr_idx * reg_info->stride1 +
11168 					arr_idx2 * reg_info->stride2;
11169 				reg_offset += reg_info->base_addr;
11170 				if (!i40e_valid_regs(hw->mac.type, reg_offset))
11171 					ptr_data[reg_offset >> 2] = 0;
11172 				else
11173 					ptr_data[reg_offset >> 2] =
11174 						I40E_READ_REG(hw, reg_offset);
11175 			}
11176 	}
11177 
11178 	return 0;
11179 }
11180 
11181 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11182 {
11183 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11184 
11185 	/* Convert word count to byte count */
11186 	return hw->nvm.sr_size << 1;
11187 }
11188 
11189 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11190 			   struct rte_dev_eeprom_info *eeprom)
11191 {
11192 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11193 	uint16_t *data = eeprom->data;
11194 	uint16_t offset, length, cnt_words;
11195 	int ret_code;
11196 
11197 	offset = eeprom->offset >> 1;
11198 	length = eeprom->length >> 1;
11199 	cnt_words = length;
11200 
11201 	if (offset > hw->nvm.sr_size ||
11202 		offset + length > hw->nvm.sr_size) {
11203 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11204 		return -EINVAL;
11205 	}
11206 
11207 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11208 
11209 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11210 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
11211 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
11212 		return -EIO;
11213 	}
11214 
11215 	return 0;
11216 }
11217 
11218 static int i40e_get_module_info(struct rte_eth_dev *dev,
11219 				struct rte_eth_dev_module_info *modinfo)
11220 {
11221 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11222 	uint32_t sff8472_comp = 0;
11223 	uint32_t sff8472_swap = 0;
11224 	uint32_t sff8636_rev = 0;
11225 	i40e_status status;
11226 	uint32_t type = 0;
11227 
11228 	/* Check if firmware supports reading module EEPROM. */
11229 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11230 		PMD_DRV_LOG(ERR,
11231 			    "Module EEPROM memory read not supported. "
11232 			    "Please update the NVM image.\n");
11233 		return -EINVAL;
11234 	}
11235 
11236 	status = i40e_update_link_info(hw);
11237 	if (status)
11238 		return -EIO;
11239 
11240 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11241 		PMD_DRV_LOG(ERR,
11242 			    "Cannot read module EEPROM memory. "
11243 			    "No module connected.\n");
11244 		return -EINVAL;
11245 	}
11246 
11247 	type = hw->phy.link_info.module_type[0];
11248 
11249 	switch (type) {
11250 	case I40E_MODULE_TYPE_SFP:
11251 		status = i40e_aq_get_phy_register(hw,
11252 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11253 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11254 				I40E_MODULE_SFF_8472_COMP,
11255 				&sff8472_comp, NULL);
11256 		if (status)
11257 			return -EIO;
11258 
11259 		status = i40e_aq_get_phy_register(hw,
11260 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11261 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11262 				I40E_MODULE_SFF_8472_SWAP,
11263 				&sff8472_swap, NULL);
11264 		if (status)
11265 			return -EIO;
11266 
11267 		/* Check if the module requires address swap to access
11268 		 * the other EEPROM memory page.
11269 		 */
11270 		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11271 			PMD_DRV_LOG(WARNING,
11272 				    "Module address swap to access "
11273 				    "page 0xA2 is not supported.\n");
11274 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11275 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11276 		} else if (sff8472_comp == 0x00) {
11277 			/* Module is not SFF-8472 compliant */
11278 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11279 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11280 		} else {
11281 			modinfo->type = RTE_ETH_MODULE_SFF_8472;
11282 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11283 		}
11284 		break;
11285 	case I40E_MODULE_TYPE_QSFP_PLUS:
11286 		/* Read from memory page 0. */
11287 		status = i40e_aq_get_phy_register(hw,
11288 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11289 				0, 1,
11290 				I40E_MODULE_REVISION_ADDR,
11291 				&sff8636_rev, NULL);
11292 		if (status)
11293 			return -EIO;
11294 		/* Determine revision compliance byte */
11295 		if (sff8636_rev > 0x02) {
11296 			/* Module is SFF-8636 compliant */
11297 			modinfo->type = RTE_ETH_MODULE_SFF_8636;
11298 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11299 		} else {
11300 			modinfo->type = RTE_ETH_MODULE_SFF_8436;
11301 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11302 		}
11303 		break;
11304 	case I40E_MODULE_TYPE_QSFP28:
11305 		modinfo->type = RTE_ETH_MODULE_SFF_8636;
11306 		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11307 		break;
11308 	default:
11309 		PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11310 		return -EINVAL;
11311 	}
11312 	return 0;
11313 }
11314 
11315 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11316 				  struct rte_dev_eeprom_info *info)
11317 {
11318 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11319 	bool is_sfp = false;
11320 	i40e_status status;
11321 	uint8_t *data;
11322 	uint32_t value = 0;
11323 	uint32_t i;
11324 
11325 	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11326 		is_sfp = true;
11327 
11328 	data = info->data;
11329 	for (i = 0; i < info->length; i++) {
11330 		u32 offset = i + info->offset;
11331 		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11332 
11333 		/* Check if we need to access the other memory page */
11334 		if (is_sfp) {
11335 			if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11336 				offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11337 				addr = I40E_I2C_EEPROM_DEV_ADDR2;
11338 			}
11339 		} else {
11340 			while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11341 				/* Compute memory page number and offset. */
11342 				offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11343 				addr++;
11344 			}
11345 		}
11346 		status = i40e_aq_get_phy_register(hw,
11347 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11348 				addr, 1, offset, &value, NULL);
11349 		if (status)
11350 			return -EIO;
11351 		data[i] = (uint8_t)value;
11352 	}
11353 	return 0;
11354 }
11355 
11356 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11357 				     struct rte_ether_addr *mac_addr)
11358 {
11359 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11360 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11361 	struct i40e_vsi *vsi = pf->main_vsi;
11362 	struct i40e_mac_filter_info mac_filter;
11363 	struct i40e_mac_filter *f;
11364 	int ret;
11365 
11366 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
11367 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11368 		return -EINVAL;
11369 	}
11370 
11371 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
11372 		if (rte_is_same_ether_addr(&pf->dev_addr,
11373 						&f->mac_info.mac_addr))
11374 			break;
11375 	}
11376 
11377 	if (f == NULL) {
11378 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11379 		return -EIO;
11380 	}
11381 
11382 	mac_filter = f->mac_info;
11383 	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11384 	if (ret != I40E_SUCCESS) {
11385 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11386 		return -EIO;
11387 	}
11388 	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11389 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
11390 	if (ret != I40E_SUCCESS) {
11391 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
11392 		return -EIO;
11393 	}
11394 	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11395 
11396 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11397 					mac_addr->addr_bytes, NULL);
11398 	if (ret != I40E_SUCCESS) {
11399 		PMD_DRV_LOG(ERR, "Failed to change mac");
11400 		return -EIO;
11401 	}
11402 
11403 	return 0;
11404 }
11405 
11406 static int
11407 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11408 {
11409 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11410 	struct rte_eth_dev_data *dev_data = pf->dev_data;
11411 	uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11412 	int ret = 0;
11413 
11414 	/* check if mtu is within the allowed range */
11415 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
11416 		return -EINVAL;
11417 
11418 	/* mtu setting is forbidden if port is start */
11419 	if (dev_data->dev_started) {
11420 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11421 			    dev_data->port_id);
11422 		return -EBUSY;
11423 	}
11424 
11425 	if (frame_size > I40E_ETH_MAX_LEN)
11426 		dev_data->dev_conf.rxmode.offloads |=
11427 			DEV_RX_OFFLOAD_JUMBO_FRAME;
11428 	else
11429 		dev_data->dev_conf.rxmode.offloads &=
11430 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
11431 
11432 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11433 
11434 	return ret;
11435 }
11436 
11437 /* Restore ethertype filter */
11438 static void
11439 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11440 {
11441 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11442 	struct i40e_ethertype_filter_list
11443 		*ethertype_list = &pf->ethertype.ethertype_list;
11444 	struct i40e_ethertype_filter *f;
11445 	struct i40e_control_filter_stats stats;
11446 	uint16_t flags;
11447 
11448 	TAILQ_FOREACH(f, ethertype_list, rules) {
11449 		flags = 0;
11450 		if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11451 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11452 		if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11453 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11454 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11455 
11456 		memset(&stats, 0, sizeof(stats));
11457 		i40e_aq_add_rem_control_packet_filter(hw,
11458 					    f->input.mac_addr.addr_bytes,
11459 					    f->input.ether_type,
11460 					    flags, pf->main_vsi->seid,
11461 					    f->queue, 1, &stats, NULL);
11462 	}
11463 	PMD_DRV_LOG(INFO, "Ethertype filter:"
11464 		    " mac_etype_used = %u, etype_used = %u,"
11465 		    " mac_etype_free = %u, etype_free = %u",
11466 		    stats.mac_etype_used, stats.etype_used,
11467 		    stats.mac_etype_free, stats.etype_free);
11468 }
11469 
11470 /* Restore tunnel filter */
11471 static void
11472 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11473 {
11474 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11475 	struct i40e_vsi *vsi;
11476 	struct i40e_pf_vf *vf;
11477 	struct i40e_tunnel_filter_list
11478 		*tunnel_list = &pf->tunnel.tunnel_list;
11479 	struct i40e_tunnel_filter *f;
11480 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
11481 	bool big_buffer = 0;
11482 
11483 	TAILQ_FOREACH(f, tunnel_list, rules) {
11484 		if (!f->is_to_vf)
11485 			vsi = pf->main_vsi;
11486 		else {
11487 			vf = &pf->vfs[f->vf_id];
11488 			vsi = vf->vsi;
11489 		}
11490 		memset(&cld_filter, 0, sizeof(cld_filter));
11491 		rte_ether_addr_copy((struct rte_ether_addr *)
11492 				&f->input.outer_mac,
11493 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
11494 		rte_ether_addr_copy((struct rte_ether_addr *)
11495 				&f->input.inner_mac,
11496 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
11497 		cld_filter.element.inner_vlan = f->input.inner_vlan;
11498 		cld_filter.element.flags = f->input.flags;
11499 		cld_filter.element.tenant_id = f->input.tenant_id;
11500 		cld_filter.element.queue_number = f->queue;
11501 		rte_memcpy(cld_filter.general_fields,
11502 			   f->input.general_fields,
11503 			   sizeof(f->input.general_fields));
11504 
11505 		if (((f->input.flags &
11506 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11507 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11508 		    ((f->input.flags &
11509 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11510 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11511 		    ((f->input.flags &
11512 		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11513 		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
11514 			big_buffer = 1;
11515 
11516 		if (big_buffer)
11517 			i40e_aq_add_cloud_filters_bb(hw,
11518 					vsi->seid, &cld_filter, 1);
11519 		else
11520 			i40e_aq_add_cloud_filters(hw, vsi->seid,
11521 						  &cld_filter.element, 1);
11522 	}
11523 }
11524 
11525 static void
11526 i40e_filter_restore(struct i40e_pf *pf)
11527 {
11528 	i40e_ethertype_filter_restore(pf);
11529 	i40e_tunnel_filter_restore(pf);
11530 	i40e_fdir_filter_restore(pf);
11531 	(void)i40e_hash_filter_restore(pf);
11532 }
11533 
11534 bool
11535 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11536 {
11537 	if (strcmp(dev->device->driver->name, drv->driver.name))
11538 		return false;
11539 
11540 	return true;
11541 }
11542 
11543 bool
11544 is_i40e_supported(struct rte_eth_dev *dev)
11545 {
11546 	return is_device_supported(dev, &rte_i40e_pmd);
11547 }
11548 
11549 struct i40e_customized_pctype*
11550 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11551 {
11552 	int i;
11553 
11554 	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11555 		if (pf->customized_pctype[i].index == index)
11556 			return &pf->customized_pctype[i];
11557 	}
11558 	return NULL;
11559 }
11560 
11561 static int
11562 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11563 			      uint32_t pkg_size, uint32_t proto_num,
11564 			      struct rte_pmd_i40e_proto_info *proto,
11565 			      enum rte_pmd_i40e_package_op op)
11566 {
11567 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11568 	uint32_t pctype_num;
11569 	struct rte_pmd_i40e_ptype_info *pctype;
11570 	uint32_t buff_size;
11571 	struct i40e_customized_pctype *new_pctype = NULL;
11572 	uint8_t proto_id;
11573 	uint8_t pctype_value;
11574 	char name[64];
11575 	uint32_t i, j, n;
11576 	int ret;
11577 
11578 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11579 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11580 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11581 		return -1;
11582 	}
11583 
11584 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11585 				(uint8_t *)&pctype_num, sizeof(pctype_num),
11586 				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11587 	if (ret) {
11588 		PMD_DRV_LOG(ERR, "Failed to get pctype number");
11589 		return -1;
11590 	}
11591 	if (!pctype_num) {
11592 		PMD_DRV_LOG(INFO, "No new pctype added");
11593 		return -1;
11594 	}
11595 
11596 	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11597 	pctype = rte_zmalloc("new_pctype", buff_size, 0);
11598 	if (!pctype) {
11599 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11600 		return -1;
11601 	}
11602 	/* get information about new pctype list */
11603 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11604 					(uint8_t *)pctype, buff_size,
11605 					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11606 	if (ret) {
11607 		PMD_DRV_LOG(ERR, "Failed to get pctype list");
11608 		rte_free(pctype);
11609 		return -1;
11610 	}
11611 
11612 	/* Update customized pctype. */
11613 	for (i = 0; i < pctype_num; i++) {
11614 		pctype_value = pctype[i].ptype_id;
11615 		memset(name, 0, sizeof(name));
11616 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11617 			proto_id = pctype[i].protocols[j];
11618 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11619 				continue;
11620 			for (n = 0; n < proto_num; n++) {
11621 				if (proto[n].proto_id != proto_id)
11622 					continue;
11623 				strlcat(name, proto[n].name, sizeof(name));
11624 				strlcat(name, "_", sizeof(name));
11625 				break;
11626 			}
11627 		}
11628 		name[strlen(name) - 1] = '\0';
11629 		PMD_DRV_LOG(INFO, "name = %s\n", name);
11630 		if (!strcmp(name, "GTPC"))
11631 			new_pctype =
11632 				i40e_find_customized_pctype(pf,
11633 						      I40E_CUSTOMIZED_GTPC);
11634 		else if (!strcmp(name, "GTPU_IPV4"))
11635 			new_pctype =
11636 				i40e_find_customized_pctype(pf,
11637 						   I40E_CUSTOMIZED_GTPU_IPV4);
11638 		else if (!strcmp(name, "GTPU_IPV6"))
11639 			new_pctype =
11640 				i40e_find_customized_pctype(pf,
11641 						   I40E_CUSTOMIZED_GTPU_IPV6);
11642 		else if (!strcmp(name, "GTPU"))
11643 			new_pctype =
11644 				i40e_find_customized_pctype(pf,
11645 						      I40E_CUSTOMIZED_GTPU);
11646 		else if (!strcmp(name, "IPV4_L2TPV3"))
11647 			new_pctype =
11648 				i40e_find_customized_pctype(pf,
11649 						I40E_CUSTOMIZED_IPV4_L2TPV3);
11650 		else if (!strcmp(name, "IPV6_L2TPV3"))
11651 			new_pctype =
11652 				i40e_find_customized_pctype(pf,
11653 						I40E_CUSTOMIZED_IPV6_L2TPV3);
11654 		else if (!strcmp(name, "IPV4_ESP"))
11655 			new_pctype =
11656 				i40e_find_customized_pctype(pf,
11657 						I40E_CUSTOMIZED_ESP_IPV4);
11658 		else if (!strcmp(name, "IPV6_ESP"))
11659 			new_pctype =
11660 				i40e_find_customized_pctype(pf,
11661 						I40E_CUSTOMIZED_ESP_IPV6);
11662 		else if (!strcmp(name, "IPV4_UDP_ESP"))
11663 			new_pctype =
11664 				i40e_find_customized_pctype(pf,
11665 						I40E_CUSTOMIZED_ESP_IPV4_UDP);
11666 		else if (!strcmp(name, "IPV6_UDP_ESP"))
11667 			new_pctype =
11668 				i40e_find_customized_pctype(pf,
11669 						I40E_CUSTOMIZED_ESP_IPV6_UDP);
11670 		else if (!strcmp(name, "IPV4_AH"))
11671 			new_pctype =
11672 				i40e_find_customized_pctype(pf,
11673 						I40E_CUSTOMIZED_AH_IPV4);
11674 		else if (!strcmp(name, "IPV6_AH"))
11675 			new_pctype =
11676 				i40e_find_customized_pctype(pf,
11677 						I40E_CUSTOMIZED_AH_IPV6);
11678 		if (new_pctype) {
11679 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11680 				new_pctype->pctype = pctype_value;
11681 				new_pctype->valid = true;
11682 			} else {
11683 				new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11684 				new_pctype->valid = false;
11685 			}
11686 		}
11687 	}
11688 
11689 	rte_free(pctype);
11690 	return 0;
11691 }
11692 
11693 static int
11694 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
11695 			     uint32_t pkg_size, uint32_t proto_num,
11696 			     struct rte_pmd_i40e_proto_info *proto,
11697 			     enum rte_pmd_i40e_package_op op)
11698 {
11699 	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
11700 	uint16_t port_id = dev->data->port_id;
11701 	uint32_t ptype_num;
11702 	struct rte_pmd_i40e_ptype_info *ptype;
11703 	uint32_t buff_size;
11704 	uint8_t proto_id;
11705 	char name[RTE_PMD_I40E_DDP_NAME_SIZE];
11706 	uint32_t i, j, n;
11707 	bool in_tunnel;
11708 	int ret;
11709 
11710 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11711 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11712 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11713 		return -1;
11714 	}
11715 
11716 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
11717 		rte_pmd_i40e_ptype_mapping_reset(port_id);
11718 		return 0;
11719 	}
11720 
11721 	/* get information about new ptype num */
11722 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11723 				(uint8_t *)&ptype_num, sizeof(ptype_num),
11724 				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
11725 	if (ret) {
11726 		PMD_DRV_LOG(ERR, "Failed to get ptype number");
11727 		return ret;
11728 	}
11729 	if (!ptype_num) {
11730 		PMD_DRV_LOG(INFO, "No new ptype added");
11731 		return -1;
11732 	}
11733 
11734 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
11735 	ptype = rte_zmalloc("new_ptype", buff_size, 0);
11736 	if (!ptype) {
11737 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11738 		return -1;
11739 	}
11740 
11741 	/* get information about new ptype list */
11742 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11743 					(uint8_t *)ptype, buff_size,
11744 					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
11745 	if (ret) {
11746 		PMD_DRV_LOG(ERR, "Failed to get ptype list");
11747 		rte_free(ptype);
11748 		return ret;
11749 	}
11750 
11751 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
11752 	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
11753 	if (!ptype_mapping) {
11754 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11755 		rte_free(ptype);
11756 		return -1;
11757 	}
11758 
11759 	/* Update ptype mapping table. */
11760 	for (i = 0; i < ptype_num; i++) {
11761 		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
11762 		ptype_mapping[i].sw_ptype = 0;
11763 		in_tunnel = false;
11764 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11765 			proto_id = ptype[i].protocols[j];
11766 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11767 				continue;
11768 			for (n = 0; n < proto_num; n++) {
11769 				if (proto[n].proto_id != proto_id)
11770 					continue;
11771 				memset(name, 0, sizeof(name));
11772 				strcpy(name, proto[n].name);
11773 				PMD_DRV_LOG(INFO, "name = %s\n", name);
11774 				if (!strncasecmp(name, "PPPOE", 5))
11775 					ptype_mapping[i].sw_ptype |=
11776 						RTE_PTYPE_L2_ETHER_PPPOE;
11777 				else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11778 					 !in_tunnel) {
11779 					ptype_mapping[i].sw_ptype |=
11780 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11781 					ptype_mapping[i].sw_ptype |=
11782 						RTE_PTYPE_L4_FRAG;
11783 				} else if (!strncasecmp(name, "IPV4FRAG", 8) &&
11784 					   in_tunnel) {
11785 					ptype_mapping[i].sw_ptype |=
11786 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11787 					ptype_mapping[i].sw_ptype |=
11788 						RTE_PTYPE_INNER_L4_FRAG;
11789 				} else if (!strncasecmp(name, "OIPV4", 5)) {
11790 					ptype_mapping[i].sw_ptype |=
11791 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11792 					in_tunnel = true;
11793 				} else if (!strncasecmp(name, "IPV4", 4) &&
11794 					   !in_tunnel)
11795 					ptype_mapping[i].sw_ptype |=
11796 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
11797 				else if (!strncasecmp(name, "IPV4", 4) &&
11798 					 in_tunnel)
11799 					ptype_mapping[i].sw_ptype |=
11800 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
11801 				else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11802 					 !in_tunnel) {
11803 					ptype_mapping[i].sw_ptype |=
11804 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11805 					ptype_mapping[i].sw_ptype |=
11806 						RTE_PTYPE_L4_FRAG;
11807 				} else if (!strncasecmp(name, "IPV6FRAG", 8) &&
11808 					   in_tunnel) {
11809 					ptype_mapping[i].sw_ptype |=
11810 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11811 					ptype_mapping[i].sw_ptype |=
11812 						RTE_PTYPE_INNER_L4_FRAG;
11813 				} else if (!strncasecmp(name, "OIPV6", 5)) {
11814 					ptype_mapping[i].sw_ptype |=
11815 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11816 					in_tunnel = true;
11817 				} else if (!strncasecmp(name, "IPV6", 4) &&
11818 					   !in_tunnel)
11819 					ptype_mapping[i].sw_ptype |=
11820 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
11821 				else if (!strncasecmp(name, "IPV6", 4) &&
11822 					 in_tunnel)
11823 					ptype_mapping[i].sw_ptype |=
11824 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
11825 				else if (!strncasecmp(name, "UDP", 3) &&
11826 					 !in_tunnel)
11827 					ptype_mapping[i].sw_ptype |=
11828 						RTE_PTYPE_L4_UDP;
11829 				else if (!strncasecmp(name, "UDP", 3) &&
11830 					 in_tunnel)
11831 					ptype_mapping[i].sw_ptype |=
11832 						RTE_PTYPE_INNER_L4_UDP;
11833 				else if (!strncasecmp(name, "TCP", 3) &&
11834 					 !in_tunnel)
11835 					ptype_mapping[i].sw_ptype |=
11836 						RTE_PTYPE_L4_TCP;
11837 				else if (!strncasecmp(name, "TCP", 3) &&
11838 					 in_tunnel)
11839 					ptype_mapping[i].sw_ptype |=
11840 						RTE_PTYPE_INNER_L4_TCP;
11841 				else if (!strncasecmp(name, "SCTP", 4) &&
11842 					 !in_tunnel)
11843 					ptype_mapping[i].sw_ptype |=
11844 						RTE_PTYPE_L4_SCTP;
11845 				else if (!strncasecmp(name, "SCTP", 4) &&
11846 					 in_tunnel)
11847 					ptype_mapping[i].sw_ptype |=
11848 						RTE_PTYPE_INNER_L4_SCTP;
11849 				else if ((!strncasecmp(name, "ICMP", 4) ||
11850 					  !strncasecmp(name, "ICMPV6", 6)) &&
11851 					 !in_tunnel)
11852 					ptype_mapping[i].sw_ptype |=
11853 						RTE_PTYPE_L4_ICMP;
11854 				else if ((!strncasecmp(name, "ICMP", 4) ||
11855 					  !strncasecmp(name, "ICMPV6", 6)) &&
11856 					 in_tunnel)
11857 					ptype_mapping[i].sw_ptype |=
11858 						RTE_PTYPE_INNER_L4_ICMP;
11859 				else if (!strncasecmp(name, "GTPC", 4)) {
11860 					ptype_mapping[i].sw_ptype |=
11861 						RTE_PTYPE_TUNNEL_GTPC;
11862 					in_tunnel = true;
11863 				} else if (!strncasecmp(name, "GTPU", 4)) {
11864 					ptype_mapping[i].sw_ptype |=
11865 						RTE_PTYPE_TUNNEL_GTPU;
11866 					in_tunnel = true;
11867 				} else if (!strncasecmp(name, "ESP", 3)) {
11868 					ptype_mapping[i].sw_ptype |=
11869 						RTE_PTYPE_TUNNEL_ESP;
11870 					in_tunnel = true;
11871 				} else if (!strncasecmp(name, "GRENAT", 6)) {
11872 					ptype_mapping[i].sw_ptype |=
11873 						RTE_PTYPE_TUNNEL_GRENAT;
11874 					in_tunnel = true;
11875 				} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
11876 					   !strncasecmp(name, "L2TPV2", 6) ||
11877 					   !strncasecmp(name, "L2TPV3", 6)) {
11878 					ptype_mapping[i].sw_ptype |=
11879 						RTE_PTYPE_TUNNEL_L2TP;
11880 					in_tunnel = true;
11881 				}
11882 
11883 				break;
11884 			}
11885 		}
11886 	}
11887 
11888 	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
11889 						ptype_num, 0);
11890 	if (ret)
11891 		PMD_DRV_LOG(ERR, "Failed to update ptype mapping table.");
11892 
11893 	rte_free(ptype_mapping);
11894 	rte_free(ptype);
11895 	return ret;
11896 }
11897 
11898 void
11899 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
11900 			    uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
11901 {
11902 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11903 	uint32_t proto_num;
11904 	struct rte_pmd_i40e_proto_info *proto;
11905 	uint32_t buff_size;
11906 	uint32_t i;
11907 	int ret;
11908 
11909 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11910 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11911 		PMD_DRV_LOG(ERR, "Unsupported operation.");
11912 		return;
11913 	}
11914 
11915 	/* get information about protocol number */
11916 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11917 				       (uint8_t *)&proto_num, sizeof(proto_num),
11918 				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
11919 	if (ret) {
11920 		PMD_DRV_LOG(ERR, "Failed to get protocol number");
11921 		return;
11922 	}
11923 	if (!proto_num) {
11924 		PMD_DRV_LOG(INFO, "No new protocol added");
11925 		return;
11926 	}
11927 
11928 	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
11929 	proto = rte_zmalloc("new_proto", buff_size, 0);
11930 	if (!proto) {
11931 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
11932 		return;
11933 	}
11934 
11935 	/* get information about protocol list */
11936 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11937 					(uint8_t *)proto, buff_size,
11938 					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
11939 	if (ret) {
11940 		PMD_DRV_LOG(ERR, "Failed to get protocol list");
11941 		rte_free(proto);
11942 		return;
11943 	}
11944 
11945 	/* Check if GTP is supported. */
11946 	for (i = 0; i < proto_num; i++) {
11947 		if (!strncmp(proto[i].name, "GTP", 3)) {
11948 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
11949 				pf->gtp_support = true;
11950 			else
11951 				pf->gtp_support = false;
11952 			break;
11953 		}
11954 	}
11955 
11956 	/* Check if ESP is supported. */
11957 	for (i = 0; i < proto_num; i++) {
11958 		if (!strncmp(proto[i].name, "ESP", 3)) {
11959 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
11960 				pf->esp_support = true;
11961 			else
11962 				pf->esp_support = false;
11963 			break;
11964 		}
11965 	}
11966 
11967 	/* Update customized pctype info */
11968 	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
11969 					    proto_num, proto, op);
11970 	if (ret)
11971 		PMD_DRV_LOG(INFO, "No pctype is updated.");
11972 
11973 	/* Update customized ptype info */
11974 	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
11975 					   proto_num, proto, op);
11976 	if (ret)
11977 		PMD_DRV_LOG(INFO, "No ptype is updated.");
11978 
11979 	rte_free(proto);
11980 }
11981 
11982 /* Create a QinQ cloud filter
11983  *
11984  * The Fortville NIC has limited resources for tunnel filters,
11985  * so we can only reuse existing filters.
11986  *
11987  * In step 1 we define which Field Vector fields can be used for
11988  * filter types.
11989  * As we do not have the inner tag defined as a field,
11990  * we have to define it first, by reusing one of L1 entries.
11991  *
11992  * In step 2 we are replacing one of existing filter types with
11993  * a new one for QinQ.
11994  * As we reusing L1 and replacing L2, some of the default filter
11995  * types will disappear,which depends on L1 and L2 entries we reuse.
11996  *
11997  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
11998  *
11999  * 1.	Create L1 filter of outer vlan (12b) which will be in use
12000  *		later when we define the cloud filter.
12001  *	a.	Valid_flags.replace_cloud = 0
12002  *	b.	Old_filter = 10 (Stag_Inner_Vlan)
12003  *	c.	New_filter = 0x10
12004  *	d.	TR bit = 0xff (optional, not used here)
12005  *	e.	Buffer – 2 entries:
12006  *		i.	Byte 0 = 8 (outer vlan FV index).
12007  *			Byte 1 = 0 (rsv)
12008  *			Byte 2-3 = 0x0fff
12009  *		ii.	Byte 0 = 37 (inner vlan FV index).
12010  *			Byte 1 =0 (rsv)
12011  *			Byte 2-3 = 0x0fff
12012  *
12013  * Step 2:
12014  * 2.	Create cloud filter using two L1 filters entries: stag and
12015  *		new filter(outer vlan+ inner vlan)
12016  *	a.	Valid_flags.replace_cloud = 1
12017  *	b.	Old_filter = 1 (instead of outer IP)
12018  *	c.	New_filter = 0x10
12019  *	d.	Buffer – 2 entries:
12020  *		i.	Byte 0 = 0x80 | 7 (valid | Stag).
12021  *			Byte 1-3 = 0 (rsv)
12022  *		ii.	Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12023  *			Byte 9-11 = 0 (rsv)
12024  */
12025 static int
12026 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12027 {
12028 	int ret = -ENOTSUP;
12029 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12030 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12031 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12032 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
12033 
12034 	if (pf->support_multi_driver) {
12035 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12036 		return ret;
12037 	}
12038 
12039 	/* Init */
12040 	memset(&filter_replace, 0,
12041 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12042 	memset(&filter_replace_buf, 0,
12043 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12044 
12045 	/* create L1 filter */
12046 	filter_replace.old_filter_type =
12047 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12048 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12049 	filter_replace.tr_bit = 0;
12050 
12051 	/* Prepare the buffer, 2 entries */
12052 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12053 	filter_replace_buf.data[0] |=
12054 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12055 	/* Field Vector 12b mask */
12056 	filter_replace_buf.data[2] = 0xff;
12057 	filter_replace_buf.data[3] = 0x0f;
12058 	filter_replace_buf.data[4] =
12059 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12060 	filter_replace_buf.data[4] |=
12061 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12062 	/* Field Vector 12b mask */
12063 	filter_replace_buf.data[6] = 0xff;
12064 	filter_replace_buf.data[7] = 0x0f;
12065 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12066 			&filter_replace_buf);
12067 	if (ret != I40E_SUCCESS)
12068 		return ret;
12069 
12070 	if (filter_replace.old_filter_type !=
12071 	    filter_replace.new_filter_type)
12072 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12073 			    " original: 0x%x, new: 0x%x",
12074 			    dev->device->name,
12075 			    filter_replace.old_filter_type,
12076 			    filter_replace.new_filter_type);
12077 
12078 	/* Apply the second L2 cloud filter */
12079 	memset(&filter_replace, 0,
12080 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12081 	memset(&filter_replace_buf, 0,
12082 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12083 
12084 	/* create L2 filter, input for L2 filter will be L1 filter  */
12085 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12086 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12087 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12088 
12089 	/* Prepare the buffer, 2 entries */
12090 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12091 	filter_replace_buf.data[0] |=
12092 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12093 	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12094 	filter_replace_buf.data[4] |=
12095 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12096 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12097 			&filter_replace_buf);
12098 	if (!ret && (filter_replace.old_filter_type !=
12099 		     filter_replace.new_filter_type))
12100 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12101 			    " original: 0x%x, new: 0x%x",
12102 			    dev->device->name,
12103 			    filter_replace.old_filter_type,
12104 			    filter_replace.new_filter_type);
12105 
12106 	return ret;
12107 }
12108 
12109 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
12110 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
12111 #ifdef RTE_ETHDEV_DEBUG_RX
12112 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_rx, rx, DEBUG);
12113 #endif
12114 #ifdef RTE_ETHDEV_DEBUG_TX
12115 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_tx, tx, DEBUG);
12116 #endif
12117 
12118 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12119 			      ETH_I40E_FLOATING_VEB_ARG "=1"
12120 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12121 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12122 			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
12123