xref: /f-stack/dpdk/drivers/net/i40e/i40e_ethdev.c (revision ebf5cedb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13 
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_tailq.h>
28 #include <rte_hash_crc.h>
29 
30 #include "i40e_logs.h"
31 #include "base/i40e_prototype.h"
32 #include "base/i40e_adminq_cmd.h"
33 #include "base/i40e_type.h"
34 #include "base/i40e_register.h"
35 #include "base/i40e_dcb.h"
36 #include "i40e_ethdev.h"
37 #include "i40e_rxtx.h"
38 #include "i40e_pf.h"
39 #include "i40e_regs.h"
40 #include "rte_pmd_i40e.h"
41 
42 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
43 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
44 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
45 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
46 #define ETH_I40E_USE_LATEST_VEC	"use-latest-supported-vec"
47 #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
48 
49 #define I40E_CLEAR_PXE_WAIT_MS     200
50 
51 /* Maximun number of capability elements */
52 #define I40E_MAX_CAP_ELE_NUM       128
53 
54 /* Wait count and interval */
55 #define I40E_CHK_Q_ENA_COUNT       1000
56 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
57 
58 /* Maximun number of VSI */
59 #define I40E_MAX_NUM_VSIS          (384UL)
60 
61 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
62 
63 /* Flow control default timer */
64 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
65 
66 /* Flow control enable fwd bit */
67 #define I40E_PRTMAC_FWD_CTRL   0x00000001
68 
69 /* Receive Packet Buffer size */
70 #define I40E_RXPBSIZE (968 * 1024)
71 
72 /* Kilobytes shift */
73 #define I40E_KILOSHIFT 10
74 
75 /* Flow control default high water */
76 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
77 
78 /* Flow control default low water */
79 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
80 
81 /* Receive Average Packet Size in Byte*/
82 #define I40E_PACKET_AVERAGE_SIZE 128
83 
84 /* Mask of PF interrupt causes */
85 #define I40E_PFINT_ICR0_ENA_MASK ( \
86 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
87 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
88 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
89 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
90 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
91 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
92 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
93 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
94 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
95 
96 #define I40E_FLOW_TYPES ( \
97 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
98 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
99 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
100 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
101 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
102 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
103 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
104 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
105 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
106 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
107 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
108 
109 /* Additional timesync values. */
110 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
111 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
112 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
113 #define I40E_PRTTSYN_TSYNENA     0x80000000
114 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
115 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
116 
117 /**
118  * Below are values for writing un-exposed registers suggested
119  * by silicon experts
120  */
121 /* Destination MAC address */
122 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
123 /* Source MAC address */
124 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
125 /* Outer (S-Tag) VLAN tag in the outer L2 header */
126 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
127 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
128 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
129 /* Single VLAN tag in the inner L2 header */
130 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
131 /* Source IPv4 address */
132 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
133 /* Destination IPv4 address */
134 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
135 /* Source IPv4 address for X722 */
136 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
137 /* Destination IPv4 address for X722 */
138 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
139 /* IPv4 Protocol for X722 */
140 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
141 /* IPv4 Time to Live for X722 */
142 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
143 /* IPv4 Type of Service (TOS) */
144 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
145 /* IPv4 Protocol */
146 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
147 /* IPv4 Time to Live */
148 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
149 /* Source IPv6 address */
150 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
151 /* Destination IPv6 address */
152 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
153 /* IPv6 Traffic Class (TC) */
154 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
155 /* IPv6 Next Header */
156 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
157 /* IPv6 Hop Limit */
158 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
159 /* Source L4 port */
160 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
161 /* Destination L4 port */
162 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
163 /* SCTP verification tag */
164 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
165 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
166 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
167 /* Source port of tunneling UDP */
168 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
169 /* Destination port of tunneling UDP */
170 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
171 /* UDP Tunneling ID, NVGRE/GRE key */
172 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
173 /* Last ether type */
174 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
175 /* Tunneling outer destination IPv4 address */
176 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
177 /* Tunneling outer destination IPv6 address */
178 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
179 /* 1st word of flex payload */
180 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
181 /* 2nd word of flex payload */
182 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
183 /* 3rd word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
185 /* 4th word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
187 /* 5th word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
189 /* 6th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
191 /* 7th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
193 /* 8th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
195 /* all 8 words flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
197 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
198 
199 #define I40E_TRANSLATE_INSET 0
200 #define I40E_TRANSLATE_REG   1
201 
202 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
203 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
204 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
205 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
206 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
207 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
208 
209 /* PCI offset for querying capability */
210 #define PCI_DEV_CAP_REG            0xA4
211 /* PCI offset for enabling/disabling Extended Tag */
212 #define PCI_DEV_CTRL_REG           0xA8
213 /* Bit mask of Extended Tag capability */
214 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
215 /* Bit shift of Extended Tag enable/disable */
216 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
217 /* Bit mask of Extended Tag enable/disable */
218 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
219 
220 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
221 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
222 static int i40e_dev_configure(struct rte_eth_dev *dev);
223 static int i40e_dev_start(struct rte_eth_dev *dev);
224 static void i40e_dev_stop(struct rte_eth_dev *dev);
225 static void i40e_dev_close(struct rte_eth_dev *dev);
226 static int  i40e_dev_reset(struct rte_eth_dev *dev);
227 static int i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
228 static int i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
229 static int i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
230 static int i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
231 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
232 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
233 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
234 			       struct rte_eth_stats *stats);
235 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
236 			       struct rte_eth_xstat *xstats, unsigned n);
237 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
238 				     struct rte_eth_xstat_name *xstats_names,
239 				     unsigned limit);
240 static int i40e_dev_stats_reset(struct rte_eth_dev *dev);
241 static int i40e_fw_version_get(struct rte_eth_dev *dev,
242 				char *fw_version, size_t fw_size);
243 static int i40e_dev_info_get(struct rte_eth_dev *dev,
244 			     struct rte_eth_dev_info *dev_info);
245 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
246 				uint16_t vlan_id,
247 				int on);
248 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
249 			      enum rte_vlan_type vlan_type,
250 			      uint16_t tpid);
251 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
252 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
253 				      uint16_t queue,
254 				      int on);
255 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
256 static int i40e_dev_led_on(struct rte_eth_dev *dev);
257 static int i40e_dev_led_off(struct rte_eth_dev *dev);
258 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
259 			      struct rte_eth_fc_conf *fc_conf);
260 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
261 			      struct rte_eth_fc_conf *fc_conf);
262 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
263 				       struct rte_eth_pfc_conf *pfc_conf);
264 static int i40e_macaddr_add(struct rte_eth_dev *dev,
265 			    struct rte_ether_addr *mac_addr,
266 			    uint32_t index,
267 			    uint32_t pool);
268 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
269 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
270 				    struct rte_eth_rss_reta_entry64 *reta_conf,
271 				    uint16_t reta_size);
272 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
273 				   struct rte_eth_rss_reta_entry64 *reta_conf,
274 				   uint16_t reta_size);
275 
276 static int i40e_get_cap(struct i40e_hw *hw);
277 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
278 static int i40e_pf_setup(struct i40e_pf *pf);
279 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
280 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
281 static int i40e_dcb_setup(struct rte_eth_dev *dev);
282 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
283 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
284 static void i40e_stat_update_48(struct i40e_hw *hw,
285 			       uint32_t hireg,
286 			       uint32_t loreg,
287 			       bool offset_loaded,
288 			       uint64_t *offset,
289 			       uint64_t *stat);
290 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
291 static void i40e_dev_interrupt_handler(void *param);
292 static void i40e_dev_alarm_handler(void *param);
293 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
294 				uint32_t base, uint32_t num);
295 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
296 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
297 			uint32_t base);
298 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
299 			uint16_t num);
300 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
301 static int i40e_veb_release(struct i40e_veb *veb);
302 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
303 						struct i40e_vsi *vsi);
304 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
305 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
306 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
307 					     struct i40e_macvlan_filter *mv_f,
308 					     int num,
309 					     uint16_t vlan);
310 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
311 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
312 				    struct rte_eth_rss_conf *rss_conf);
313 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
314 				      struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
316 					struct rte_eth_udp_tunnel *udp_tunnel);
317 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
318 					struct rte_eth_udp_tunnel *udp_tunnel);
319 static void i40e_filter_input_set_init(struct i40e_pf *pf);
320 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
321 				enum rte_filter_op filter_op,
322 				void *arg);
323 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
324 				enum rte_filter_type filter_type,
325 				enum rte_filter_op filter_op,
326 				void *arg);
327 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
328 				  struct rte_eth_dcb_info *dcb_info);
329 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
330 static void i40e_configure_registers(struct i40e_hw *hw);
331 static void i40e_hw_init(struct rte_eth_dev *dev);
332 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
333 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
334 						     uint16_t seid,
335 						     uint16_t rule_type,
336 						     uint16_t *entries,
337 						     uint16_t count,
338 						     uint16_t rule_id);
339 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
340 			struct rte_eth_mirror_conf *mirror_conf,
341 			uint8_t sw_id, uint8_t on);
342 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
343 
344 static int i40e_timesync_enable(struct rte_eth_dev *dev);
345 static int i40e_timesync_disable(struct rte_eth_dev *dev);
346 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
347 					   struct timespec *timestamp,
348 					   uint32_t flags);
349 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
350 					   struct timespec *timestamp);
351 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
352 
353 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
354 
355 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
356 				   struct timespec *timestamp);
357 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
358 				    const struct timespec *timestamp);
359 
360 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
361 					 uint16_t queue_id);
362 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
363 					  uint16_t queue_id);
364 
365 static int i40e_get_regs(struct rte_eth_dev *dev,
366 			 struct rte_dev_reg_info *regs);
367 
368 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
369 
370 static int i40e_get_eeprom(struct rte_eth_dev *dev,
371 			   struct rte_dev_eeprom_info *eeprom);
372 
373 static int i40e_get_module_info(struct rte_eth_dev *dev,
374 				struct rte_eth_dev_module_info *modinfo);
375 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
376 				  struct rte_dev_eeprom_info *info);
377 
378 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
379 				      struct rte_ether_addr *mac_addr);
380 
381 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
382 
383 static int i40e_ethertype_filter_convert(
384 	const struct rte_eth_ethertype_filter *input,
385 	struct i40e_ethertype_filter *filter);
386 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
387 				   struct i40e_ethertype_filter *filter);
388 
389 static int i40e_tunnel_filter_convert(
390 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
391 	struct i40e_tunnel_filter *tunnel_filter);
392 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
393 				struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
395 
396 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
397 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
398 static void i40e_filter_restore(struct i40e_pf *pf);
399 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
400 
401 int i40e_logtype_init;
402 int i40e_logtype_driver;
403 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
404 int i40e_logtype_rx;
405 #endif
406 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
407 int i40e_logtype_tx;
408 #endif
409 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
410 int i40e_logtype_tx_free;
411 #endif
412 
413 static const char *const valid_keys[] = {
414 	ETH_I40E_FLOATING_VEB_ARG,
415 	ETH_I40E_FLOATING_VEB_LIST_ARG,
416 	ETH_I40E_SUPPORT_MULTI_DRIVER,
417 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
418 	ETH_I40E_USE_LATEST_VEC,
419 	ETH_I40E_VF_MSG_CFG,
420 	NULL};
421 
422 static const struct rte_pci_id pci_id_i40e_map[] = {
423 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
424 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
425 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
426 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
427 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
428 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
429 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
430 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
431 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
432 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
433 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
434 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
435 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
436 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
437 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
438 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
439 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
440 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
441 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
442 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
443 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X710_N3000) },
444 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_XXV710_N3000) },
445 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC) },
446 	{ .vendor_id = 0, /* sentinel */ },
447 };
448 
449 static const struct eth_dev_ops i40e_eth_dev_ops = {
450 	.dev_configure                = i40e_dev_configure,
451 	.dev_start                    = i40e_dev_start,
452 	.dev_stop                     = i40e_dev_stop,
453 	.dev_close                    = i40e_dev_close,
454 	.dev_reset		      = i40e_dev_reset,
455 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
456 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
457 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
458 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
459 	.dev_set_link_up              = i40e_dev_set_link_up,
460 	.dev_set_link_down            = i40e_dev_set_link_down,
461 	.link_update                  = i40e_dev_link_update,
462 	.stats_get                    = i40e_dev_stats_get,
463 	.xstats_get                   = i40e_dev_xstats_get,
464 	.xstats_get_names             = i40e_dev_xstats_get_names,
465 	.stats_reset                  = i40e_dev_stats_reset,
466 	.xstats_reset                 = i40e_dev_stats_reset,
467 	.fw_version_get               = i40e_fw_version_get,
468 	.dev_infos_get                = i40e_dev_info_get,
469 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
470 	.vlan_filter_set              = i40e_vlan_filter_set,
471 	.vlan_tpid_set                = i40e_vlan_tpid_set,
472 	.vlan_offload_set             = i40e_vlan_offload_set,
473 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
474 	.vlan_pvid_set                = i40e_vlan_pvid_set,
475 	.rx_queue_start               = i40e_dev_rx_queue_start,
476 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
477 	.tx_queue_start               = i40e_dev_tx_queue_start,
478 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
479 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
480 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
481 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
482 	.rx_queue_release             = i40e_dev_rx_queue_release,
483 	.rx_queue_count               = i40e_dev_rx_queue_count,
484 	.rx_descriptor_done           = i40e_dev_rx_descriptor_done,
485 	.rx_descriptor_status         = i40e_dev_rx_descriptor_status,
486 	.tx_descriptor_status         = i40e_dev_tx_descriptor_status,
487 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
488 	.tx_queue_release             = i40e_dev_tx_queue_release,
489 	.dev_led_on                   = i40e_dev_led_on,
490 	.dev_led_off                  = i40e_dev_led_off,
491 	.flow_ctrl_get                = i40e_flow_ctrl_get,
492 	.flow_ctrl_set                = i40e_flow_ctrl_set,
493 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
494 	.mac_addr_add                 = i40e_macaddr_add,
495 	.mac_addr_remove              = i40e_macaddr_remove,
496 	.reta_update                  = i40e_dev_rss_reta_update,
497 	.reta_query                   = i40e_dev_rss_reta_query,
498 	.rss_hash_update              = i40e_dev_rss_hash_update,
499 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
500 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
501 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
502 	.filter_ctrl                  = i40e_dev_filter_ctrl,
503 	.rxq_info_get                 = i40e_rxq_info_get,
504 	.txq_info_get                 = i40e_txq_info_get,
505 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
506 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
507 	.mirror_rule_set              = i40e_mirror_rule_set,
508 	.mirror_rule_reset            = i40e_mirror_rule_reset,
509 	.timesync_enable              = i40e_timesync_enable,
510 	.timesync_disable             = i40e_timesync_disable,
511 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
512 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
513 	.get_dcb_info                 = i40e_dev_get_dcb_info,
514 	.timesync_adjust_time         = i40e_timesync_adjust_time,
515 	.timesync_read_time           = i40e_timesync_read_time,
516 	.timesync_write_time          = i40e_timesync_write_time,
517 	.get_reg                      = i40e_get_regs,
518 	.get_eeprom_length            = i40e_get_eeprom_length,
519 	.get_eeprom                   = i40e_get_eeprom,
520 	.get_module_info              = i40e_get_module_info,
521 	.get_module_eeprom            = i40e_get_module_eeprom,
522 	.mac_addr_set                 = i40e_set_default_mac_addr,
523 	.mtu_set                      = i40e_dev_mtu_set,
524 	.tm_ops_get                   = i40e_tm_ops_get,
525 };
526 
527 /* store statistics names and its offset in stats structure */
528 struct rte_i40e_xstats_name_off {
529 	char name[RTE_ETH_XSTATS_NAME_SIZE];
530 	unsigned offset;
531 };
532 
533 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
534 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
535 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
536 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
537 	{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
538 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
539 		rx_unknown_protocol)},
540 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
541 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
542 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
543 	{"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
544 };
545 
546 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
547 		sizeof(rte_i40e_stats_strings[0]))
548 
549 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
550 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
551 		tx_dropped_link_down)},
552 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
553 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
554 		illegal_bytes)},
555 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
556 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
557 		mac_local_faults)},
558 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
559 		mac_remote_faults)},
560 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
561 		rx_length_errors)},
562 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
563 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
564 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
565 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
566 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
567 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
568 		rx_size_127)},
569 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
570 		rx_size_255)},
571 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
572 		rx_size_511)},
573 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
574 		rx_size_1023)},
575 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
576 		rx_size_1522)},
577 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
578 		rx_size_big)},
579 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
580 		rx_undersize)},
581 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
582 		rx_oversize)},
583 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
584 		mac_short_packet_dropped)},
585 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
586 		rx_fragments)},
587 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
588 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
589 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
590 		tx_size_127)},
591 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
592 		tx_size_255)},
593 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
594 		tx_size_511)},
595 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
596 		tx_size_1023)},
597 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
598 		tx_size_1522)},
599 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
600 		tx_size_big)},
601 	{"rx_flow_director_atr_match_packets",
602 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
603 	{"rx_flow_director_sb_match_packets",
604 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
605 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
606 		tx_lpi_status)},
607 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
608 		rx_lpi_status)},
609 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
610 		tx_lpi_count)},
611 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
612 		rx_lpi_count)},
613 };
614 
615 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
616 		sizeof(rte_i40e_hw_port_strings[0]))
617 
618 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
619 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
620 		priority_xon_rx)},
621 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
622 		priority_xoff_rx)},
623 };
624 
625 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
626 		sizeof(rte_i40e_rxq_prio_strings[0]))
627 
628 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
629 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
630 		priority_xon_tx)},
631 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
632 		priority_xoff_tx)},
633 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
634 		priority_xon_2_xoff)},
635 };
636 
637 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
638 		sizeof(rte_i40e_txq_prio_strings[0]))
639 
640 static int
641 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
642 	struct rte_pci_device *pci_dev)
643 {
644 	char name[RTE_ETH_NAME_MAX_LEN];
645 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
646 	int i, retval;
647 
648 	if (pci_dev->device.devargs) {
649 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
650 				&eth_da);
651 		if (retval)
652 			return retval;
653 	}
654 
655 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
656 		sizeof(struct i40e_adapter),
657 		eth_dev_pci_specific_init, pci_dev,
658 		eth_i40e_dev_init, NULL);
659 
660 	if (retval || eth_da.nb_representor_ports < 1)
661 		return retval;
662 
663 	/* probe VF representor ports */
664 	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
665 		pci_dev->device.name);
666 
667 	if (pf_ethdev == NULL)
668 		return -ENODEV;
669 
670 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
671 		struct i40e_vf_representor representor = {
672 			.vf_id = eth_da.representor_ports[i],
673 			.switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
674 				pf_ethdev->data->dev_private)->switch_domain_id,
675 			.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
676 				pf_ethdev->data->dev_private)
677 		};
678 
679 		/* representor port net_bdf_port */
680 		snprintf(name, sizeof(name), "net_%s_representor_%d",
681 			pci_dev->device.name, eth_da.representor_ports[i]);
682 
683 		retval = rte_eth_dev_create(&pci_dev->device, name,
684 			sizeof(struct i40e_vf_representor), NULL, NULL,
685 			i40e_vf_representor_init, &representor);
686 
687 		if (retval)
688 			PMD_DRV_LOG(ERR, "failed to create i40e vf "
689 				"representor %s.", name);
690 	}
691 
692 	return 0;
693 }
694 
695 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
696 {
697 	struct rte_eth_dev *ethdev;
698 
699 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
700 	if (!ethdev)
701 		return 0;
702 
703 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
704 		return rte_eth_dev_pci_generic_remove(pci_dev,
705 					i40e_vf_representor_uninit);
706 	else
707 		return rte_eth_dev_pci_generic_remove(pci_dev,
708 						eth_i40e_dev_uninit);
709 }
710 
711 static struct rte_pci_driver rte_i40e_pmd = {
712 	.id_table = pci_id_i40e_map,
713 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
714 	.probe = eth_i40e_pci_probe,
715 	.remove = eth_i40e_pci_remove,
716 };
717 
718 static inline void
719 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
720 			 uint32_t reg_val)
721 {
722 	uint32_t ori_reg_val;
723 	struct rte_eth_dev *dev;
724 
725 	ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
726 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
727 	i40e_write_rx_ctl(hw, reg_addr, reg_val);
728 	if (ori_reg_val != reg_val)
729 		PMD_DRV_LOG(WARNING,
730 			    "i40e device %s changed global register [0x%08x]."
731 			    " original: 0x%08x, new: 0x%08x",
732 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
733 }
734 
735 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
736 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
737 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
738 
739 #ifndef I40E_GLQF_ORT
740 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
741 #endif
742 #ifndef I40E_GLQF_PIT
743 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
744 #endif
745 #ifndef I40E_GLQF_L3_MAP
746 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
747 #endif
748 
749 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
750 {
751 	/*
752 	 * Initialize registers for parsing packet type of QinQ
753 	 * This should be removed from code once proper
754 	 * configuration API is added to avoid configuration conflicts
755 	 * between ports of the same device.
756 	 */
757 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
758 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
759 }
760 
761 static inline void i40e_config_automask(struct i40e_pf *pf)
762 {
763 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
764 	uint32_t val;
765 
766 	/* INTENA flag is not auto-cleared for interrupt */
767 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
768 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
769 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
770 
771 	/* If support multi-driver, PF will use INT0. */
772 	if (!pf->support_multi_driver)
773 		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
774 
775 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
776 }
777 
778 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
779 
780 /*
781  * Add a ethertype filter to drop all flow control frames transmitted
782  * from VSIs.
783 */
784 static void
785 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
786 {
787 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
788 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
789 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
790 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
791 	int ret;
792 
793 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
794 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
795 				pf->main_vsi_seid, 0,
796 				TRUE, NULL, NULL);
797 	if (ret)
798 		PMD_INIT_LOG(ERR,
799 			"Failed to add filter to drop flow control frames from VSIs.");
800 }
801 
802 static int
803 floating_veb_list_handler(__rte_unused const char *key,
804 			  const char *floating_veb_value,
805 			  void *opaque)
806 {
807 	int idx = 0;
808 	unsigned int count = 0;
809 	char *end = NULL;
810 	int min, max;
811 	bool *vf_floating_veb = opaque;
812 
813 	while (isblank(*floating_veb_value))
814 		floating_veb_value++;
815 
816 	/* Reset floating VEB configuration for VFs */
817 	for (idx = 0; idx < I40E_MAX_VF; idx++)
818 		vf_floating_veb[idx] = false;
819 
820 	min = I40E_MAX_VF;
821 	do {
822 		while (isblank(*floating_veb_value))
823 			floating_veb_value++;
824 		if (*floating_veb_value == '\0')
825 			return -1;
826 		errno = 0;
827 		idx = strtoul(floating_veb_value, &end, 10);
828 		if (errno || end == NULL)
829 			return -1;
830 		while (isblank(*end))
831 			end++;
832 		if (*end == '-') {
833 			min = idx;
834 		} else if ((*end == ';') || (*end == '\0')) {
835 			max = idx;
836 			if (min == I40E_MAX_VF)
837 				min = idx;
838 			if (max >= I40E_MAX_VF)
839 				max = I40E_MAX_VF - 1;
840 			for (idx = min; idx <= max; idx++) {
841 				vf_floating_veb[idx] = true;
842 				count++;
843 			}
844 			min = I40E_MAX_VF;
845 		} else {
846 			return -1;
847 		}
848 		floating_veb_value = end + 1;
849 	} while (*end != '\0');
850 
851 	if (count == 0)
852 		return -1;
853 
854 	return 0;
855 }
856 
857 static void
858 config_vf_floating_veb(struct rte_devargs *devargs,
859 		       uint16_t floating_veb,
860 		       bool *vf_floating_veb)
861 {
862 	struct rte_kvargs *kvlist;
863 	int i;
864 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
865 
866 	if (!floating_veb)
867 		return;
868 	/* All the VFs attach to the floating VEB by default
869 	 * when the floating VEB is enabled.
870 	 */
871 	for (i = 0; i < I40E_MAX_VF; i++)
872 		vf_floating_veb[i] = true;
873 
874 	if (devargs == NULL)
875 		return;
876 
877 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
878 	if (kvlist == NULL)
879 		return;
880 
881 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
882 		rte_kvargs_free(kvlist);
883 		return;
884 	}
885 	/* When the floating_veb_list parameter exists, all the VFs
886 	 * will attach to the legacy VEB firstly, then configure VFs
887 	 * to the floating VEB according to the floating_veb_list.
888 	 */
889 	if (rte_kvargs_process(kvlist, floating_veb_list,
890 			       floating_veb_list_handler,
891 			       vf_floating_veb) < 0) {
892 		rte_kvargs_free(kvlist);
893 		return;
894 	}
895 	rte_kvargs_free(kvlist);
896 }
897 
898 static int
899 i40e_check_floating_handler(__rte_unused const char *key,
900 			    const char *value,
901 			    __rte_unused void *opaque)
902 {
903 	if (strcmp(value, "1"))
904 		return -1;
905 
906 	return 0;
907 }
908 
909 static int
910 is_floating_veb_supported(struct rte_devargs *devargs)
911 {
912 	struct rte_kvargs *kvlist;
913 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
914 
915 	if (devargs == NULL)
916 		return 0;
917 
918 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
919 	if (kvlist == NULL)
920 		return 0;
921 
922 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
923 		rte_kvargs_free(kvlist);
924 		return 0;
925 	}
926 	/* Floating VEB is enabled when there's key-value:
927 	 * enable_floating_veb=1
928 	 */
929 	if (rte_kvargs_process(kvlist, floating_veb_key,
930 			       i40e_check_floating_handler, NULL) < 0) {
931 		rte_kvargs_free(kvlist);
932 		return 0;
933 	}
934 	rte_kvargs_free(kvlist);
935 
936 	return 1;
937 }
938 
939 static void
940 config_floating_veb(struct rte_eth_dev *dev)
941 {
942 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
943 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
944 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
945 
946 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
947 
948 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
949 		pf->floating_veb =
950 			is_floating_veb_supported(pci_dev->device.devargs);
951 		config_vf_floating_veb(pci_dev->device.devargs,
952 				       pf->floating_veb,
953 				       pf->floating_veb_list);
954 	} else {
955 		pf->floating_veb = false;
956 	}
957 }
958 
959 #define I40E_L2_TAGS_S_TAG_SHIFT 1
960 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
961 
962 static int
963 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
964 {
965 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
966 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
967 	char ethertype_hash_name[RTE_HASH_NAMESIZE];
968 	int ret;
969 
970 	struct rte_hash_parameters ethertype_hash_params = {
971 		.name = ethertype_hash_name,
972 		.entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
973 		.key_len = sizeof(struct i40e_ethertype_filter_input),
974 		.hash_func = rte_hash_crc,
975 		.hash_func_init_val = 0,
976 		.socket_id = rte_socket_id(),
977 	};
978 
979 	/* Initialize ethertype filter rule list and hash */
980 	TAILQ_INIT(&ethertype_rule->ethertype_list);
981 	snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
982 		 "ethertype_%s", dev->device->name);
983 	ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
984 	if (!ethertype_rule->hash_table) {
985 		PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
986 		return -EINVAL;
987 	}
988 	ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
989 				       sizeof(struct i40e_ethertype_filter *) *
990 				       I40E_MAX_ETHERTYPE_FILTER_NUM,
991 				       0);
992 	if (!ethertype_rule->hash_map) {
993 		PMD_INIT_LOG(ERR,
994 			     "Failed to allocate memory for ethertype hash map!");
995 		ret = -ENOMEM;
996 		goto err_ethertype_hash_map_alloc;
997 	}
998 
999 	return 0;
1000 
1001 err_ethertype_hash_map_alloc:
1002 	rte_hash_free(ethertype_rule->hash_table);
1003 
1004 	return ret;
1005 }
1006 
1007 static int
1008 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
1009 {
1010 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1011 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1012 	char tunnel_hash_name[RTE_HASH_NAMESIZE];
1013 	int ret;
1014 
1015 	struct rte_hash_parameters tunnel_hash_params = {
1016 		.name = tunnel_hash_name,
1017 		.entries = I40E_MAX_TUNNEL_FILTER_NUM,
1018 		.key_len = sizeof(struct i40e_tunnel_filter_input),
1019 		.hash_func = rte_hash_crc,
1020 		.hash_func_init_val = 0,
1021 		.socket_id = rte_socket_id(),
1022 	};
1023 
1024 	/* Initialize tunnel filter rule list and hash */
1025 	TAILQ_INIT(&tunnel_rule->tunnel_list);
1026 	snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1027 		 "tunnel_%s", dev->device->name);
1028 	tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1029 	if (!tunnel_rule->hash_table) {
1030 		PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1031 		return -EINVAL;
1032 	}
1033 	tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1034 				    sizeof(struct i40e_tunnel_filter *) *
1035 				    I40E_MAX_TUNNEL_FILTER_NUM,
1036 				    0);
1037 	if (!tunnel_rule->hash_map) {
1038 		PMD_INIT_LOG(ERR,
1039 			     "Failed to allocate memory for tunnel hash map!");
1040 		ret = -ENOMEM;
1041 		goto err_tunnel_hash_map_alloc;
1042 	}
1043 
1044 	return 0;
1045 
1046 err_tunnel_hash_map_alloc:
1047 	rte_hash_free(tunnel_rule->hash_table);
1048 
1049 	return ret;
1050 }
1051 
1052 static int
1053 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1054 {
1055 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1056 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1057 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1058 	int ret;
1059 
1060 	struct rte_hash_parameters fdir_hash_params = {
1061 		.name = fdir_hash_name,
1062 		.entries = I40E_MAX_FDIR_FILTER_NUM,
1063 		.key_len = sizeof(struct i40e_fdir_input),
1064 		.hash_func = rte_hash_crc,
1065 		.hash_func_init_val = 0,
1066 		.socket_id = rte_socket_id(),
1067 	};
1068 
1069 	/* Initialize flow director filter rule list and hash */
1070 	TAILQ_INIT(&fdir_info->fdir_list);
1071 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1072 		 "fdir_%s", dev->device->name);
1073 	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1074 	if (!fdir_info->hash_table) {
1075 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1076 		return -EINVAL;
1077 	}
1078 	fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1079 					  sizeof(struct i40e_fdir_filter *) *
1080 					  I40E_MAX_FDIR_FILTER_NUM,
1081 					  0);
1082 	if (!fdir_info->hash_map) {
1083 		PMD_INIT_LOG(ERR,
1084 			     "Failed to allocate memory for fdir hash map!");
1085 		ret = -ENOMEM;
1086 		goto err_fdir_hash_map_alloc;
1087 	}
1088 	return 0;
1089 
1090 err_fdir_hash_map_alloc:
1091 	rte_hash_free(fdir_info->hash_table);
1092 
1093 	return ret;
1094 }
1095 
1096 static void
1097 i40e_init_customized_info(struct i40e_pf *pf)
1098 {
1099 	int i;
1100 
1101 	/* Initialize customized pctype */
1102 	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1103 		pf->customized_pctype[i].index = i;
1104 		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1105 		pf->customized_pctype[i].valid = false;
1106 	}
1107 
1108 	pf->gtp_support = false;
1109 }
1110 
1111 void
1112 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1113 {
1114 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1115 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1116 	struct i40e_queue_regions *info = &pf->queue_region;
1117 	uint16_t i;
1118 
1119 	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1120 		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1121 
1122 	memset(info, 0, sizeof(struct i40e_queue_regions));
1123 }
1124 
1125 static int
1126 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1127 			       const char *value,
1128 			       void *opaque)
1129 {
1130 	struct i40e_pf *pf;
1131 	unsigned long support_multi_driver;
1132 	char *end;
1133 
1134 	pf = (struct i40e_pf *)opaque;
1135 
1136 	errno = 0;
1137 	support_multi_driver = strtoul(value, &end, 10);
1138 	if (errno != 0 || end == value || *end != 0) {
1139 		PMD_DRV_LOG(WARNING, "Wrong global configuration");
1140 		return -(EINVAL);
1141 	}
1142 
1143 	if (support_multi_driver == 1 || support_multi_driver == 0)
1144 		pf->support_multi_driver = (bool)support_multi_driver;
1145 	else
1146 		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1147 			    "enable global configuration by default."
1148 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1149 	return 0;
1150 }
1151 
1152 static int
1153 i40e_support_multi_driver(struct rte_eth_dev *dev)
1154 {
1155 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1156 	struct rte_kvargs *kvlist;
1157 	int kvargs_count;
1158 
1159 	/* Enable global configuration by default */
1160 	pf->support_multi_driver = false;
1161 
1162 	if (!dev->device->devargs)
1163 		return 0;
1164 
1165 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1166 	if (!kvlist)
1167 		return -EINVAL;
1168 
1169 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1170 	if (!kvargs_count) {
1171 		rte_kvargs_free(kvlist);
1172 		return 0;
1173 	}
1174 
1175 	if (kvargs_count > 1)
1176 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1177 			    "the first invalid or last valid one is used !",
1178 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1179 
1180 	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1181 			       i40e_parse_multi_drv_handler, pf) < 0) {
1182 		rte_kvargs_free(kvlist);
1183 		return -EINVAL;
1184 	}
1185 
1186 	rte_kvargs_free(kvlist);
1187 	return 0;
1188 }
1189 
1190 static int
1191 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1192 				    uint32_t reg_addr, uint64_t reg_val,
1193 				    struct i40e_asq_cmd_details *cmd_details)
1194 {
1195 	uint64_t ori_reg_val;
1196 	struct rte_eth_dev *dev;
1197 	int ret;
1198 
1199 	ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1200 	if (ret != I40E_SUCCESS) {
1201 		PMD_DRV_LOG(ERR,
1202 			    "Fail to debug read from 0x%08x",
1203 			    reg_addr);
1204 		return -EIO;
1205 	}
1206 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1207 
1208 	if (ori_reg_val != reg_val)
1209 		PMD_DRV_LOG(WARNING,
1210 			    "i40e device %s changed global register [0x%08x]."
1211 			    " original: 0x%"PRIx64", after: 0x%"PRIx64,
1212 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
1213 
1214 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1215 }
1216 
1217 static int
1218 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1219 				const char *value,
1220 				void *opaque)
1221 {
1222 	struct i40e_adapter *ad = opaque;
1223 	int use_latest_vec;
1224 
1225 	use_latest_vec = atoi(value);
1226 
1227 	if (use_latest_vec != 0 && use_latest_vec != 1)
1228 		PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1229 
1230 	ad->use_latest_vec = (uint8_t)use_latest_vec;
1231 
1232 	return 0;
1233 }
1234 
1235 static int
1236 i40e_use_latest_vec(struct rte_eth_dev *dev)
1237 {
1238 	struct i40e_adapter *ad =
1239 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1240 	struct rte_kvargs *kvlist;
1241 	int kvargs_count;
1242 
1243 	ad->use_latest_vec = false;
1244 
1245 	if (!dev->device->devargs)
1246 		return 0;
1247 
1248 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1249 	if (!kvlist)
1250 		return -EINVAL;
1251 
1252 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1253 	if (!kvargs_count) {
1254 		rte_kvargs_free(kvlist);
1255 		return 0;
1256 	}
1257 
1258 	if (kvargs_count > 1)
1259 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1260 			    "the first invalid or last valid one is used !",
1261 			    ETH_I40E_USE_LATEST_VEC);
1262 
1263 	if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1264 				i40e_parse_latest_vec_handler, ad) < 0) {
1265 		rte_kvargs_free(kvlist);
1266 		return -EINVAL;
1267 	}
1268 
1269 	rte_kvargs_free(kvlist);
1270 	return 0;
1271 }
1272 
1273 static int
1274 read_vf_msg_config(__rte_unused const char *key,
1275 			       const char *value,
1276 			       void *opaque)
1277 {
1278 	struct i40e_vf_msg_cfg *cfg = opaque;
1279 
1280 	if (sscanf(value, "%u@%u:%u", &cfg->max_msg, &cfg->period,
1281 			&cfg->ignore_second) != 3) {
1282 		memset(cfg, 0, sizeof(*cfg));
1283 		PMD_DRV_LOG(ERR, "format error! example: "
1284 				"%s=60@120:180", ETH_I40E_VF_MSG_CFG);
1285 		return -EINVAL;
1286 	}
1287 
1288 	/*
1289 	 * If the message validation function been enabled, the 'period'
1290 	 * and 'ignore_second' must greater than 0.
1291 	 */
1292 	if (cfg->max_msg && (!cfg->period || !cfg->ignore_second)) {
1293 		memset(cfg, 0, sizeof(*cfg));
1294 		PMD_DRV_LOG(ERR, "%s error! the second and third"
1295 				" number must be greater than 0!",
1296 				ETH_I40E_VF_MSG_CFG);
1297 		return -EINVAL;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 static int
1304 i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
1305 		struct i40e_vf_msg_cfg *msg_cfg)
1306 {
1307 	struct rte_kvargs *kvlist;
1308 	int kvargs_count;
1309 	int ret = 0;
1310 
1311 	memset(msg_cfg, 0, sizeof(*msg_cfg));
1312 
1313 	if (!dev->device->devargs)
1314 		return ret;
1315 
1316 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1317 	if (!kvlist)
1318 		return -EINVAL;
1319 
1320 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_VF_MSG_CFG);
1321 	if (!kvargs_count)
1322 		goto free_end;
1323 
1324 	if (kvargs_count > 1) {
1325 		PMD_DRV_LOG(ERR, "More than one argument \"%s\"!",
1326 				ETH_I40E_VF_MSG_CFG);
1327 		ret = -EINVAL;
1328 		goto free_end;
1329 	}
1330 
1331 	if (rte_kvargs_process(kvlist, ETH_I40E_VF_MSG_CFG,
1332 			read_vf_msg_config, msg_cfg) < 0)
1333 		ret = -EINVAL;
1334 
1335 free_end:
1336 	rte_kvargs_free(kvlist);
1337 	return ret;
1338 }
1339 
1340 #define I40E_ALARM_INTERVAL 50000 /* us */
1341 
1342 static int
1343 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1344 {
1345 	struct rte_pci_device *pci_dev;
1346 	struct rte_intr_handle *intr_handle;
1347 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1348 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349 	struct i40e_vsi *vsi;
1350 	int ret;
1351 	uint32_t len, val;
1352 	uint8_t aq_fail = 0;
1353 
1354 	PMD_INIT_FUNC_TRACE();
1355 
1356 	dev->dev_ops = &i40e_eth_dev_ops;
1357 	dev->rx_pkt_burst = i40e_recv_pkts;
1358 	dev->tx_pkt_burst = i40e_xmit_pkts;
1359 	dev->tx_pkt_prepare = i40e_prep_pkts;
1360 
1361 	/* for secondary processes, we don't initialise any further as primary
1362 	 * has already done this work. Only check we don't need a different
1363 	 * RX function */
1364 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1365 		i40e_set_rx_function(dev);
1366 		i40e_set_tx_function(dev);
1367 		return 0;
1368 	}
1369 	i40e_set_default_ptype_table(dev);
1370 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1371 	intr_handle = &pci_dev->intr_handle;
1372 
1373 	rte_eth_copy_pci_info(dev, pci_dev);
1374 
1375 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1376 	pf->adapter->eth_dev = dev;
1377 	pf->dev_data = dev->data;
1378 
1379 	hw->back = I40E_PF_TO_ADAPTER(pf);
1380 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1381 	if (!hw->hw_addr) {
1382 		PMD_INIT_LOG(ERR,
1383 			"Hardware is not available, as address is NULL");
1384 		return -ENODEV;
1385 	}
1386 
1387 	hw->vendor_id = pci_dev->id.vendor_id;
1388 	hw->device_id = pci_dev->id.device_id;
1389 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1390 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1391 	hw->bus.device = pci_dev->addr.devid;
1392 	hw->bus.func = pci_dev->addr.function;
1393 	hw->adapter_stopped = 0;
1394 	hw->adapter_closed = 0;
1395 
1396 	/* Init switch device pointer */
1397 	hw->switch_dev = NULL;
1398 
1399 	/*
1400 	 * Switch Tag value should not be identical to either the First Tag
1401 	 * or Second Tag values. So set something other than common Ethertype
1402 	 * for internal switching.
1403 	 */
1404 	hw->switch_tag = 0xffff;
1405 
1406 	val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1407 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1408 		PMD_INIT_LOG(ERR, "\nERROR: "
1409 			"Firmware recovery mode detected. Limiting functionality.\n"
1410 			"Refer to the Intel(R) Ethernet Adapters and Devices "
1411 			"User Guide for details on firmware recovery mode.");
1412 		return -EIO;
1413 	}
1414 
1415 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
1416 	/* Check if need to support multi-driver */
1417 	i40e_support_multi_driver(dev);
1418 	/* Check if users want the latest supported vec path */
1419 	i40e_use_latest_vec(dev);
1420 
1421 	/* Make sure all is clean before doing PF reset */
1422 	i40e_clear_hw(hw);
1423 
1424 	/* Reset here to make sure all is clean for each PF */
1425 	ret = i40e_pf_reset(hw);
1426 	if (ret) {
1427 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1428 		return ret;
1429 	}
1430 
1431 	/* Initialize the shared code (base driver) */
1432 	ret = i40e_init_shared_code(hw);
1433 	if (ret) {
1434 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1435 		return ret;
1436 	}
1437 
1438 	/* Initialize the parameters for adminq */
1439 	i40e_init_adminq_parameter(hw);
1440 	ret = i40e_init_adminq(hw);
1441 	if (ret != I40E_SUCCESS) {
1442 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1443 		return -EIO;
1444 	}
1445 	/* Firmware of SFP x722 does not support adminq option */
1446 	if (hw->device_id == I40E_DEV_ID_SFP_X722)
1447 		hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE;
1448 
1449 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1450 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1451 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1452 		     ((hw->nvm.version >> 12) & 0xf),
1453 		     ((hw->nvm.version >> 4) & 0xff),
1454 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1455 
1456 	/* Initialize the hardware */
1457 	i40e_hw_init(dev);
1458 
1459 	i40e_config_automask(pf);
1460 
1461 	i40e_set_default_pctype_table(dev);
1462 
1463 	/*
1464 	 * To work around the NVM issue, initialize registers
1465 	 * for packet type of QinQ by software.
1466 	 * It should be removed once issues are fixed in NVM.
1467 	 */
1468 	if (!pf->support_multi_driver)
1469 		i40e_GLQF_reg_init(hw);
1470 
1471 	/* Initialize the input set for filters (hash and fd) to default value */
1472 	i40e_filter_input_set_init(pf);
1473 
1474 	/* initialise the L3_MAP register */
1475 	if (!pf->support_multi_driver) {
1476 		ret = i40e_aq_debug_write_global_register(hw,
1477 						   I40E_GLQF_L3_MAP(40),
1478 						   0x00000028,	NULL);
1479 		if (ret)
1480 			PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1481 				     ret);
1482 		PMD_INIT_LOG(DEBUG,
1483 			     "Global register 0x%08x is changed with 0x28",
1484 			     I40E_GLQF_L3_MAP(40));
1485 	}
1486 
1487 	/* Need the special FW version to support floating VEB */
1488 	config_floating_veb(dev);
1489 	/* Clear PXE mode */
1490 	i40e_clear_pxe_mode(hw);
1491 	i40e_dev_sync_phy_type(hw);
1492 
1493 	/*
1494 	 * On X710, performance number is far from the expectation on recent
1495 	 * firmware versions. The fix for this issue may not be integrated in
1496 	 * the following firmware version. So the workaround in software driver
1497 	 * is needed. It needs to modify the initial values of 3 internal only
1498 	 * registers. Note that the workaround can be removed when it is fixed
1499 	 * in firmware in the future.
1500 	 */
1501 	i40e_configure_registers(hw);
1502 
1503 	/* Get hw capabilities */
1504 	ret = i40e_get_cap(hw);
1505 	if (ret != I40E_SUCCESS) {
1506 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1507 		goto err_get_capabilities;
1508 	}
1509 
1510 	/* Initialize parameters for PF */
1511 	ret = i40e_pf_parameter_init(dev);
1512 	if (ret != 0) {
1513 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1514 		goto err_parameter_init;
1515 	}
1516 
1517 	/* Initialize the queue management */
1518 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1519 	if (ret < 0) {
1520 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1521 		goto err_qp_pool_init;
1522 	}
1523 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1524 				hw->func_caps.num_msix_vectors - 1);
1525 	if (ret < 0) {
1526 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1527 		goto err_msix_pool_init;
1528 	}
1529 
1530 	/* Initialize lan hmc */
1531 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1532 				hw->func_caps.num_rx_qp, 0, 0);
1533 	if (ret != I40E_SUCCESS) {
1534 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1535 		goto err_init_lan_hmc;
1536 	}
1537 
1538 	/* Configure lan hmc */
1539 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1540 	if (ret != I40E_SUCCESS) {
1541 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1542 		goto err_configure_lan_hmc;
1543 	}
1544 
1545 	/* Get and check the mac address */
1546 	i40e_get_mac_addr(hw, hw->mac.addr);
1547 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1548 		PMD_INIT_LOG(ERR, "mac address is not valid");
1549 		ret = -EIO;
1550 		goto err_get_mac_addr;
1551 	}
1552 	/* Copy the permanent MAC address */
1553 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1554 			(struct rte_ether_addr *)hw->mac.perm_addr);
1555 
1556 	/* Disable flow control */
1557 	hw->fc.requested_mode = I40E_FC_NONE;
1558 	i40e_set_fc(hw, &aq_fail, TRUE);
1559 
1560 	/* Set the global registers with default ether type value */
1561 	if (!pf->support_multi_driver) {
1562 		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1563 					 RTE_ETHER_TYPE_VLAN);
1564 		if (ret != I40E_SUCCESS) {
1565 			PMD_INIT_LOG(ERR,
1566 				     "Failed to set the default outer "
1567 				     "VLAN ether type");
1568 			goto err_setup_pf_switch;
1569 		}
1570 	}
1571 
1572 	/* PF setup, which includes VSI setup */
1573 	ret = i40e_pf_setup(pf);
1574 	if (ret) {
1575 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1576 		goto err_setup_pf_switch;
1577 	}
1578 
1579 	vsi = pf->main_vsi;
1580 
1581 	/* Disable double vlan by default */
1582 	i40e_vsi_config_double_vlan(vsi, FALSE);
1583 
1584 	/* Disable S-TAG identification when floating_veb is disabled */
1585 	if (!pf->floating_veb) {
1586 		ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1587 		if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1588 			ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1589 			I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1590 		}
1591 	}
1592 
1593 	if (!vsi->max_macaddrs)
1594 		len = RTE_ETHER_ADDR_LEN;
1595 	else
1596 		len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
1597 
1598 	/* Should be after VSI initialized */
1599 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1600 	if (!dev->data->mac_addrs) {
1601 		PMD_INIT_LOG(ERR,
1602 			"Failed to allocated memory for storing mac address");
1603 		goto err_mac_alloc;
1604 	}
1605 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1606 					&dev->data->mac_addrs[0]);
1607 
1608 	/* Pass the information to the rte_eth_dev_close() that it should also
1609 	 * release the private port resources.
1610 	 */
1611 	dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1612 
1613 	/* Init dcb to sw mode by default */
1614 	ret = i40e_dcb_init_configure(dev, TRUE);
1615 	if (ret != I40E_SUCCESS) {
1616 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
1617 		pf->flags &= ~I40E_FLAG_DCB;
1618 	}
1619 	/* Update HW struct after DCB configuration */
1620 	i40e_get_cap(hw);
1621 
1622 	/* initialize pf host driver to setup SRIOV resource if applicable */
1623 	i40e_pf_host_init(dev);
1624 
1625 	/* register callback func to eal lib */
1626 	rte_intr_callback_register(intr_handle,
1627 				   i40e_dev_interrupt_handler, dev);
1628 
1629 	/* configure and enable device interrupt */
1630 	i40e_pf_config_irq0(hw, TRUE);
1631 	i40e_pf_enable_irq0(hw);
1632 
1633 	/* enable uio intr after callback register */
1634 	rte_intr_enable(intr_handle);
1635 
1636 	/* By default disable flexible payload in global configuration */
1637 	if (!pf->support_multi_driver)
1638 		i40e_flex_payload_reg_set_default(hw);
1639 
1640 	/*
1641 	 * Add an ethertype filter to drop all flow control frames transmitted
1642 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1643 	 * frames to wire.
1644 	 */
1645 	i40e_add_tx_flow_control_drop_filter(pf);
1646 
1647 	/* Set the max frame size to 0x2600 by default,
1648 	 * in case other drivers changed the default value.
1649 	 */
1650 	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1651 
1652 	/* initialize mirror rule list */
1653 	TAILQ_INIT(&pf->mirror_list);
1654 
1655 	/* initialize Traffic Manager configuration */
1656 	i40e_tm_conf_init(dev);
1657 
1658 	/* Initialize customized information */
1659 	i40e_init_customized_info(pf);
1660 
1661 	ret = i40e_init_ethtype_filter_list(dev);
1662 	if (ret < 0)
1663 		goto err_init_ethtype_filter_list;
1664 	ret = i40e_init_tunnel_filter_list(dev);
1665 	if (ret < 0)
1666 		goto err_init_tunnel_filter_list;
1667 	ret = i40e_init_fdir_filter_list(dev);
1668 	if (ret < 0)
1669 		goto err_init_fdir_filter_list;
1670 
1671 	/* initialize queue region configuration */
1672 	i40e_init_queue_region_conf(dev);
1673 
1674 	/* initialize rss configuration from rte_flow */
1675 	memset(&pf->rss_info, 0,
1676 		sizeof(struct i40e_rte_flow_rss_conf));
1677 
1678 	/* reset all stats of the device, including pf and main vsi */
1679 	i40e_dev_stats_reset(dev);
1680 
1681 	return 0;
1682 
1683 err_init_fdir_filter_list:
1684 	rte_free(pf->tunnel.hash_table);
1685 	rte_free(pf->tunnel.hash_map);
1686 err_init_tunnel_filter_list:
1687 	rte_free(pf->ethertype.hash_table);
1688 	rte_free(pf->ethertype.hash_map);
1689 err_init_ethtype_filter_list:
1690 	rte_free(dev->data->mac_addrs);
1691 	dev->data->mac_addrs = NULL;
1692 err_mac_alloc:
1693 	i40e_vsi_release(pf->main_vsi);
1694 err_setup_pf_switch:
1695 err_get_mac_addr:
1696 err_configure_lan_hmc:
1697 	(void)i40e_shutdown_lan_hmc(hw);
1698 err_init_lan_hmc:
1699 	i40e_res_pool_destroy(&pf->msix_pool);
1700 err_msix_pool_init:
1701 	i40e_res_pool_destroy(&pf->qp_pool);
1702 err_qp_pool_init:
1703 err_parameter_init:
1704 err_get_capabilities:
1705 	(void)i40e_shutdown_adminq(hw);
1706 
1707 	return ret;
1708 }
1709 
1710 static void
1711 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1712 {
1713 	struct i40e_ethertype_filter *p_ethertype;
1714 	struct i40e_ethertype_rule *ethertype_rule;
1715 
1716 	ethertype_rule = &pf->ethertype;
1717 	/* Remove all ethertype filter rules and hash */
1718 	if (ethertype_rule->hash_map)
1719 		rte_free(ethertype_rule->hash_map);
1720 	if (ethertype_rule->hash_table)
1721 		rte_hash_free(ethertype_rule->hash_table);
1722 
1723 	while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1724 		TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1725 			     p_ethertype, rules);
1726 		rte_free(p_ethertype);
1727 	}
1728 }
1729 
1730 static void
1731 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1732 {
1733 	struct i40e_tunnel_filter *p_tunnel;
1734 	struct i40e_tunnel_rule *tunnel_rule;
1735 
1736 	tunnel_rule = &pf->tunnel;
1737 	/* Remove all tunnel director rules and hash */
1738 	if (tunnel_rule->hash_map)
1739 		rte_free(tunnel_rule->hash_map);
1740 	if (tunnel_rule->hash_table)
1741 		rte_hash_free(tunnel_rule->hash_table);
1742 
1743 	while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1744 		TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1745 		rte_free(p_tunnel);
1746 	}
1747 }
1748 
1749 static void
1750 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1751 {
1752 	struct i40e_fdir_filter *p_fdir;
1753 	struct i40e_fdir_info *fdir_info;
1754 
1755 	fdir_info = &pf->fdir;
1756 	/* Remove all flow director rules and hash */
1757 	if (fdir_info->hash_map)
1758 		rte_free(fdir_info->hash_map);
1759 	if (fdir_info->hash_table)
1760 		rte_hash_free(fdir_info->hash_table);
1761 
1762 	while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1763 		TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1764 		rte_free(p_fdir);
1765 	}
1766 }
1767 
1768 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1769 {
1770 	/*
1771 	 * Disable by default flexible payload
1772 	 * for corresponding L2/L3/L4 layers.
1773 	 */
1774 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1775 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1776 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1777 }
1778 
1779 static int
1780 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1781 {
1782 	struct i40e_hw *hw;
1783 
1784 	PMD_INIT_FUNC_TRACE();
1785 
1786 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1787 		return 0;
1788 
1789 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1790 
1791 	if (hw->adapter_closed == 0)
1792 		i40e_dev_close(dev);
1793 
1794 	return 0;
1795 }
1796 
1797 static int
1798 i40e_dev_configure(struct rte_eth_dev *dev)
1799 {
1800 	struct i40e_adapter *ad =
1801 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1802 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1803 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1805 	int i, ret;
1806 
1807 	ret = i40e_dev_sync_phy_type(hw);
1808 	if (ret)
1809 		return ret;
1810 
1811 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1812 	 * bulk allocation or vector Rx preconditions we will reset it.
1813 	 */
1814 	ad->rx_bulk_alloc_allowed = true;
1815 	ad->rx_vec_allowed = true;
1816 	ad->tx_simple_allowed = true;
1817 	ad->tx_vec_allowed = true;
1818 
1819 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1820 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1821 
1822 	/* Only legacy filter API needs the following fdir config. So when the
1823 	 * legacy filter API is deprecated, the following codes should also be
1824 	 * removed.
1825 	 */
1826 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1827 		ret = i40e_fdir_setup(pf);
1828 		if (ret != I40E_SUCCESS) {
1829 			PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1830 			return -ENOTSUP;
1831 		}
1832 		ret = i40e_fdir_configure(dev);
1833 		if (ret < 0) {
1834 			PMD_DRV_LOG(ERR, "failed to configure fdir.");
1835 			goto err;
1836 		}
1837 	} else
1838 		i40e_fdir_teardown(pf);
1839 
1840 	ret = i40e_dev_init_vlan(dev);
1841 	if (ret < 0)
1842 		goto err;
1843 
1844 	/* VMDQ setup.
1845 	 *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1846 	 *  RSS setting have different requirements.
1847 	 *  General PMD driver call sequence are NIC init, configure,
1848 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1849 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
1850 	 *  applicable. So, VMDQ setting has to be done before
1851 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1852 	 *  For RSS setting, it will try to calculate actual configured RX queue
1853 	 *  number, which will be available after rx_queue_setup(). dev_start()
1854 	 *  function is good to place RSS setup.
1855 	 */
1856 	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1857 		ret = i40e_vmdq_setup(dev);
1858 		if (ret)
1859 			goto err;
1860 	}
1861 
1862 	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1863 		ret = i40e_dcb_setup(dev);
1864 		if (ret) {
1865 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
1866 			goto err_dcb;
1867 		}
1868 	}
1869 
1870 	TAILQ_INIT(&pf->flow_list);
1871 
1872 	return 0;
1873 
1874 err_dcb:
1875 	/* need to release vmdq resource if exists */
1876 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1877 		i40e_vsi_release(pf->vmdq[i].vsi);
1878 		pf->vmdq[i].vsi = NULL;
1879 	}
1880 	rte_free(pf->vmdq);
1881 	pf->vmdq = NULL;
1882 err:
1883 	/* Need to release fdir resource if exists.
1884 	 * Only legacy filter API needs the following fdir config. So when the
1885 	 * legacy filter API is deprecated, the following code should also be
1886 	 * removed.
1887 	 */
1888 	i40e_fdir_teardown(pf);
1889 	return ret;
1890 }
1891 
1892 void
1893 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1894 {
1895 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1896 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1897 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1898 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1899 	uint16_t msix_vect = vsi->msix_intr;
1900 	uint16_t i;
1901 
1902 	for (i = 0; i < vsi->nb_qps; i++) {
1903 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1904 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1905 		rte_wmb();
1906 	}
1907 
1908 	if (vsi->type != I40E_VSI_SRIOV) {
1909 		if (!rte_intr_allow_others(intr_handle)) {
1910 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1911 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1912 			I40E_WRITE_REG(hw,
1913 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1914 				       0);
1915 		} else {
1916 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1917 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1918 			I40E_WRITE_REG(hw,
1919 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1920 						       msix_vect - 1), 0);
1921 		}
1922 	} else {
1923 		uint32_t reg;
1924 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1925 			vsi->user_param + (msix_vect - 1);
1926 
1927 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1928 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1929 	}
1930 	I40E_WRITE_FLUSH(hw);
1931 }
1932 
1933 static void
1934 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1935 		       int base_queue, int nb_queue,
1936 		       uint16_t itr_idx)
1937 {
1938 	int i;
1939 	uint32_t val;
1940 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1941 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1942 
1943 	/* Bind all RX queues to allocated MSIX interrupt */
1944 	for (i = 0; i < nb_queue; i++) {
1945 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1946 			itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1947 			((base_queue + i + 1) <<
1948 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1949 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1950 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1951 
1952 		if (i == nb_queue - 1)
1953 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1954 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1955 	}
1956 
1957 	/* Write first RX queue to Link list register as the head element */
1958 	if (vsi->type != I40E_VSI_SRIOV) {
1959 		uint16_t interval =
1960 			i40e_calc_itr_interval(1, pf->support_multi_driver);
1961 
1962 		if (msix_vect == I40E_MISC_VEC_ID) {
1963 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1964 				       (base_queue <<
1965 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1966 				       (0x0 <<
1967 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1968 			I40E_WRITE_REG(hw,
1969 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1970 				       interval);
1971 		} else {
1972 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1973 				       (base_queue <<
1974 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1975 				       (0x0 <<
1976 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1977 			I40E_WRITE_REG(hw,
1978 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1979 						       msix_vect - 1),
1980 				       interval);
1981 		}
1982 	} else {
1983 		uint32_t reg;
1984 
1985 		if (msix_vect == I40E_MISC_VEC_ID) {
1986 			I40E_WRITE_REG(hw,
1987 				       I40E_VPINT_LNKLST0(vsi->user_param),
1988 				       (base_queue <<
1989 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1990 				       (0x0 <<
1991 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1992 		} else {
1993 			/* num_msix_vectors_vf needs to minus irq0 */
1994 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1995 				vsi->user_param + (msix_vect - 1);
1996 
1997 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1998 				       (base_queue <<
1999 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2000 				       (0x0 <<
2001 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2002 		}
2003 	}
2004 
2005 	I40E_WRITE_FLUSH(hw);
2006 }
2007 
2008 void
2009 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
2010 {
2011 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2012 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2013 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2014 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2015 	uint16_t msix_vect = vsi->msix_intr;
2016 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2017 	uint16_t queue_idx = 0;
2018 	int record = 0;
2019 	int i;
2020 
2021 	for (i = 0; i < vsi->nb_qps; i++) {
2022 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
2023 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
2024 	}
2025 
2026 	/* VF bind interrupt */
2027 	if (vsi->type == I40E_VSI_SRIOV) {
2028 		__vsi_queues_bind_intr(vsi, msix_vect,
2029 				       vsi->base_queue, vsi->nb_qps,
2030 				       itr_idx);
2031 		return;
2032 	}
2033 
2034 	/* PF & VMDq bind interrupt */
2035 	if (rte_intr_dp_is_en(intr_handle)) {
2036 		if (vsi->type == I40E_VSI_MAIN) {
2037 			queue_idx = 0;
2038 			record = 1;
2039 		} else if (vsi->type == I40E_VSI_VMDQ2) {
2040 			struct i40e_vsi *main_vsi =
2041 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2042 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
2043 			record = 1;
2044 		}
2045 	}
2046 
2047 	for (i = 0; i < vsi->nb_used_qps; i++) {
2048 		if (nb_msix <= 1) {
2049 			if (!rte_intr_allow_others(intr_handle))
2050 				/* allow to share MISC_VEC_ID */
2051 				msix_vect = I40E_MISC_VEC_ID;
2052 
2053 			/* no enough msix_vect, map all to one */
2054 			__vsi_queues_bind_intr(vsi, msix_vect,
2055 					       vsi->base_queue + i,
2056 					       vsi->nb_used_qps - i,
2057 					       itr_idx);
2058 			for (; !!record && i < vsi->nb_used_qps; i++)
2059 				intr_handle->intr_vec[queue_idx + i] =
2060 					msix_vect;
2061 			break;
2062 		}
2063 		/* 1:1 queue/msix_vect mapping */
2064 		__vsi_queues_bind_intr(vsi, msix_vect,
2065 				       vsi->base_queue + i, 1,
2066 				       itr_idx);
2067 		if (!!record)
2068 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
2069 
2070 		msix_vect++;
2071 		nb_msix--;
2072 	}
2073 }
2074 
2075 static void
2076 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2077 {
2078 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2079 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2080 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2081 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2082 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2083 	uint16_t msix_intr, i;
2084 
2085 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2086 		for (i = 0; i < vsi->nb_msix; i++) {
2087 			msix_intr = vsi->msix_intr + i;
2088 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2089 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
2090 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2091 				I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2092 		}
2093 	else
2094 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2095 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
2096 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2097 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2098 
2099 	I40E_WRITE_FLUSH(hw);
2100 }
2101 
2102 static void
2103 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2104 {
2105 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2106 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2107 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2108 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2109 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2110 	uint16_t msix_intr, i;
2111 
2112 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2113 		for (i = 0; i < vsi->nb_msix; i++) {
2114 			msix_intr = vsi->msix_intr + i;
2115 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2116 				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2117 		}
2118 	else
2119 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2120 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2121 
2122 	I40E_WRITE_FLUSH(hw);
2123 }
2124 
2125 static inline uint8_t
2126 i40e_parse_link_speeds(uint16_t link_speeds)
2127 {
2128 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2129 
2130 	if (link_speeds & ETH_LINK_SPEED_40G)
2131 		link_speed |= I40E_LINK_SPEED_40GB;
2132 	if (link_speeds & ETH_LINK_SPEED_25G)
2133 		link_speed |= I40E_LINK_SPEED_25GB;
2134 	if (link_speeds & ETH_LINK_SPEED_20G)
2135 		link_speed |= I40E_LINK_SPEED_20GB;
2136 	if (link_speeds & ETH_LINK_SPEED_10G)
2137 		link_speed |= I40E_LINK_SPEED_10GB;
2138 	if (link_speeds & ETH_LINK_SPEED_1G)
2139 		link_speed |= I40E_LINK_SPEED_1GB;
2140 	if (link_speeds & ETH_LINK_SPEED_100M)
2141 		link_speed |= I40E_LINK_SPEED_100MB;
2142 
2143 	return link_speed;
2144 }
2145 
2146 static int
2147 i40e_phy_conf_link(struct i40e_hw *hw,
2148 		   uint8_t abilities,
2149 		   uint8_t force_speed,
2150 		   bool is_up)
2151 {
2152 	enum i40e_status_code status;
2153 	struct i40e_aq_get_phy_abilities_resp phy_ab;
2154 	struct i40e_aq_set_phy_config phy_conf;
2155 	enum i40e_aq_phy_type cnt;
2156 	uint8_t avail_speed;
2157 	uint32_t phy_type_mask = 0;
2158 
2159 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2160 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2161 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2162 			I40E_AQ_PHY_FLAG_LOW_POWER;
2163 	int ret = -ENOTSUP;
2164 
2165 	/* To get phy capabilities of available speeds. */
2166 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2167 					      NULL);
2168 	if (status) {
2169 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2170 				status);
2171 		return ret;
2172 	}
2173 	avail_speed = phy_ab.link_speed;
2174 
2175 	/* To get the current phy config. */
2176 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2177 					      NULL);
2178 	if (status) {
2179 		PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2180 				status);
2181 		return ret;
2182 	}
2183 
2184 	/* If link needs to go up and it is in autoneg mode the speed is OK,
2185 	 * no need to set up again.
2186 	 */
2187 	if (is_up && phy_ab.phy_type != 0 &&
2188 		     abilities & I40E_AQ_PHY_AN_ENABLED &&
2189 		     phy_ab.link_speed != 0)
2190 		return I40E_SUCCESS;
2191 
2192 	memset(&phy_conf, 0, sizeof(phy_conf));
2193 
2194 	/* bits 0-2 use the values from get_phy_abilities_resp */
2195 	abilities &= ~mask;
2196 	abilities |= phy_ab.abilities & mask;
2197 
2198 	phy_conf.abilities = abilities;
2199 
2200 	/* If link needs to go up, but the force speed is not supported,
2201 	 * Warn users and config the default available speeds.
2202 	 */
2203 	if (is_up && !(force_speed & avail_speed)) {
2204 		PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2205 		phy_conf.link_speed = avail_speed;
2206 	} else {
2207 		phy_conf.link_speed = is_up ? force_speed : avail_speed;
2208 	}
2209 
2210 	/* PHY type mask needs to include each type except PHY type extension */
2211 	for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2212 		phy_type_mask |= 1 << cnt;
2213 
2214 	/* use get_phy_abilities_resp value for the rest */
2215 	phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2216 	phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2217 		I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2218 		I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2219 	phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2220 	phy_conf.eee_capability = phy_ab.eee_capability;
2221 	phy_conf.eeer = phy_ab.eeer_val;
2222 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2223 
2224 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2225 		    phy_ab.abilities, phy_ab.link_speed);
2226 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2227 		    phy_conf.abilities, phy_conf.link_speed);
2228 
2229 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2230 	if (status)
2231 		return ret;
2232 
2233 	return I40E_SUCCESS;
2234 }
2235 
2236 static int
2237 i40e_apply_link_speed(struct rte_eth_dev *dev)
2238 {
2239 	uint8_t speed;
2240 	uint8_t abilities = 0;
2241 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2242 	struct rte_eth_conf *conf = &dev->data->dev_conf;
2243 
2244 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2245 		     I40E_AQ_PHY_LINK_ENABLED;
2246 
2247 	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2248 		conf->link_speeds = ETH_LINK_SPEED_40G |
2249 				    ETH_LINK_SPEED_25G |
2250 				    ETH_LINK_SPEED_20G |
2251 				    ETH_LINK_SPEED_10G |
2252 				    ETH_LINK_SPEED_1G |
2253 				    ETH_LINK_SPEED_100M;
2254 
2255 		abilities |= I40E_AQ_PHY_AN_ENABLED;
2256 	} else {
2257 		abilities &= ~I40E_AQ_PHY_AN_ENABLED;
2258 	}
2259 	speed = i40e_parse_link_speeds(conf->link_speeds);
2260 
2261 	return i40e_phy_conf_link(hw, abilities, speed, true);
2262 }
2263 
2264 static int
2265 i40e_dev_start(struct rte_eth_dev *dev)
2266 {
2267 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2268 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2269 	struct i40e_vsi *main_vsi = pf->main_vsi;
2270 	int ret, i;
2271 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2272 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2273 	uint32_t intr_vector = 0;
2274 	struct i40e_vsi *vsi;
2275 
2276 	hw->adapter_stopped = 0;
2277 
2278 	rte_intr_disable(intr_handle);
2279 
2280 	if ((rte_intr_cap_multiple(intr_handle) ||
2281 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2282 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2283 		intr_vector = dev->data->nb_rx_queues;
2284 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
2285 		if (ret)
2286 			return ret;
2287 	}
2288 
2289 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2290 		intr_handle->intr_vec =
2291 			rte_zmalloc("intr_vec",
2292 				    dev->data->nb_rx_queues * sizeof(int),
2293 				    0);
2294 		if (!intr_handle->intr_vec) {
2295 			PMD_INIT_LOG(ERR,
2296 				"Failed to allocate %d rx_queues intr_vec",
2297 				dev->data->nb_rx_queues);
2298 			return -ENOMEM;
2299 		}
2300 	}
2301 
2302 	/* Initialize VSI */
2303 	ret = i40e_dev_rxtx_init(pf);
2304 	if (ret != I40E_SUCCESS) {
2305 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2306 		goto err_up;
2307 	}
2308 
2309 	/* Map queues with MSIX interrupt */
2310 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2311 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2312 	i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2313 	i40e_vsi_enable_queues_intr(main_vsi);
2314 
2315 	/* Map VMDQ VSI queues with MSIX interrupt */
2316 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2317 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2318 		i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2319 					  I40E_ITR_INDEX_DEFAULT);
2320 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2321 	}
2322 
2323 	/* enable FDIR MSIX interrupt */
2324 	if (pf->fdir.fdir_vsi) {
2325 		i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2326 					  I40E_ITR_INDEX_NONE);
2327 		i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2328 	}
2329 
2330 	/* Enable all queues which have been configured */
2331 	ret = i40e_dev_switch_queues(pf, TRUE);
2332 	if (ret != I40E_SUCCESS) {
2333 		PMD_DRV_LOG(ERR, "Failed to enable VSI");
2334 		goto err_up;
2335 	}
2336 
2337 	/* Enable receiving broadcast packets */
2338 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2339 	if (ret != I40E_SUCCESS)
2340 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2341 
2342 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2343 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2344 						true, NULL);
2345 		if (ret != I40E_SUCCESS)
2346 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2347 	}
2348 
2349 	/* Enable the VLAN promiscuous mode. */
2350 	if (pf->vfs) {
2351 		for (i = 0; i < pf->vf_num; i++) {
2352 			vsi = pf->vfs[i].vsi;
2353 			i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2354 						     true, NULL);
2355 		}
2356 	}
2357 
2358 	/* Enable mac loopback mode */
2359 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2360 	    dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2361 		ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2362 		if (ret != I40E_SUCCESS) {
2363 			PMD_DRV_LOG(ERR, "fail to set loopback link");
2364 			goto err_up;
2365 		}
2366 	}
2367 
2368 	/* Apply link configure */
2369 	ret = i40e_apply_link_speed(dev);
2370 	if (I40E_SUCCESS != ret) {
2371 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
2372 		goto err_up;
2373 	}
2374 
2375 	if (!rte_intr_allow_others(intr_handle)) {
2376 		rte_intr_callback_unregister(intr_handle,
2377 					     i40e_dev_interrupt_handler,
2378 					     (void *)dev);
2379 		/* configure and enable device interrupt */
2380 		i40e_pf_config_irq0(hw, FALSE);
2381 		i40e_pf_enable_irq0(hw);
2382 
2383 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2384 			PMD_INIT_LOG(INFO,
2385 				"lsc won't enable because of no intr multiplex");
2386 	} else {
2387 		ret = i40e_aq_set_phy_int_mask(hw,
2388 					       ~(I40E_AQ_EVENT_LINK_UPDOWN |
2389 					       I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2390 					       I40E_AQ_EVENT_MEDIA_NA), NULL);
2391 		if (ret != I40E_SUCCESS)
2392 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2393 
2394 		/* Call get_link_info aq commond to enable/disable LSE */
2395 		i40e_dev_link_update(dev, 0);
2396 	}
2397 
2398 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2399 		rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2400 				  i40e_dev_alarm_handler, dev);
2401 	} else {
2402 		/* enable uio intr after callback register */
2403 		rte_intr_enable(intr_handle);
2404 	}
2405 
2406 	i40e_filter_restore(pf);
2407 
2408 	if (pf->tm_conf.root && !pf->tm_conf.committed)
2409 		PMD_DRV_LOG(WARNING,
2410 			    "please call hierarchy_commit() "
2411 			    "before starting the port");
2412 
2413 	return I40E_SUCCESS;
2414 
2415 err_up:
2416 	i40e_dev_switch_queues(pf, FALSE);
2417 	i40e_dev_clear_queues(dev);
2418 
2419 	return ret;
2420 }
2421 
2422 static void
2423 i40e_dev_stop(struct rte_eth_dev *dev)
2424 {
2425 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2426 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2427 	struct i40e_vsi *main_vsi = pf->main_vsi;
2428 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2429 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2430 	int i;
2431 
2432 	if (hw->adapter_stopped == 1)
2433 		return;
2434 
2435 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2436 		rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2437 		rte_intr_enable(intr_handle);
2438 	}
2439 
2440 	/* Disable all queues */
2441 	i40e_dev_switch_queues(pf, FALSE);
2442 
2443 	/* un-map queues with interrupt registers */
2444 	i40e_vsi_disable_queues_intr(main_vsi);
2445 	i40e_vsi_queues_unbind_intr(main_vsi);
2446 
2447 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2448 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2449 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2450 	}
2451 
2452 	if (pf->fdir.fdir_vsi) {
2453 		i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2454 		i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2455 	}
2456 	/* Clear all queues and release memory */
2457 	i40e_dev_clear_queues(dev);
2458 
2459 	/* Set link down */
2460 	i40e_dev_set_link_down(dev);
2461 
2462 	if (!rte_intr_allow_others(intr_handle))
2463 		/* resume to the default handler */
2464 		rte_intr_callback_register(intr_handle,
2465 					   i40e_dev_interrupt_handler,
2466 					   (void *)dev);
2467 
2468 	/* Clean datapath event and queue/vec mapping */
2469 	rte_intr_efd_disable(intr_handle);
2470 	if (intr_handle->intr_vec) {
2471 		rte_free(intr_handle->intr_vec);
2472 		intr_handle->intr_vec = NULL;
2473 	}
2474 
2475 	/* reset hierarchy commit */
2476 	pf->tm_conf.committed = false;
2477 
2478 	hw->adapter_stopped = 1;
2479 
2480 	pf->adapter->rss_reta_updated = 0;
2481 }
2482 
2483 static void
2484 i40e_dev_close(struct rte_eth_dev *dev)
2485 {
2486 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2487 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2488 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2489 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2490 	struct i40e_mirror_rule *p_mirror;
2491 	struct i40e_filter_control_settings settings;
2492 	struct rte_flow *p_flow;
2493 	uint32_t reg;
2494 	int i;
2495 	int ret;
2496 	uint8_t aq_fail = 0;
2497 	int retries = 0;
2498 
2499 	PMD_INIT_FUNC_TRACE();
2500 
2501 	ret = rte_eth_switch_domain_free(pf->switch_domain_id);
2502 	if (ret)
2503 		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
2504 
2505 
2506 	i40e_dev_stop(dev);
2507 
2508 	/* Remove all mirror rules */
2509 	while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2510 		ret = i40e_aq_del_mirror_rule(hw,
2511 					      pf->main_vsi->veb->seid,
2512 					      p_mirror->rule_type,
2513 					      p_mirror->entries,
2514 					      p_mirror->num_entries,
2515 					      p_mirror->id);
2516 		if (ret < 0)
2517 			PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2518 				    "status = %d, aq_err = %d.", ret,
2519 				    hw->aq.asq_last_status);
2520 
2521 		/* remove mirror software resource anyway */
2522 		TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2523 		rte_free(p_mirror);
2524 		pf->nb_mirror_rule--;
2525 	}
2526 
2527 	i40e_dev_free_queues(dev);
2528 
2529 	/* Disable interrupt */
2530 	i40e_pf_disable_irq0(hw);
2531 	rte_intr_disable(intr_handle);
2532 
2533 	/*
2534 	 * Only legacy filter API needs the following fdir config. So when the
2535 	 * legacy filter API is deprecated, the following code should also be
2536 	 * removed.
2537 	 */
2538 	i40e_fdir_teardown(pf);
2539 
2540 	/* shutdown and destroy the HMC */
2541 	i40e_shutdown_lan_hmc(hw);
2542 
2543 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2544 		i40e_vsi_release(pf->vmdq[i].vsi);
2545 		pf->vmdq[i].vsi = NULL;
2546 	}
2547 	rte_free(pf->vmdq);
2548 	pf->vmdq = NULL;
2549 
2550 	/* release all the existing VSIs and VEBs */
2551 	i40e_vsi_release(pf->main_vsi);
2552 
2553 	/* shutdown the adminq */
2554 	i40e_aq_queue_shutdown(hw, true);
2555 	i40e_shutdown_adminq(hw);
2556 
2557 	i40e_res_pool_destroy(&pf->qp_pool);
2558 	i40e_res_pool_destroy(&pf->msix_pool);
2559 
2560 	/* Disable flexible payload in global configuration */
2561 	if (!pf->support_multi_driver)
2562 		i40e_flex_payload_reg_set_default(hw);
2563 
2564 	/* force a PF reset to clean anything leftover */
2565 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2566 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2567 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2568 	I40E_WRITE_FLUSH(hw);
2569 
2570 	dev->dev_ops = NULL;
2571 	dev->rx_pkt_burst = NULL;
2572 	dev->tx_pkt_burst = NULL;
2573 
2574 	/* Clear PXE mode */
2575 	i40e_clear_pxe_mode(hw);
2576 
2577 	/* Unconfigure filter control */
2578 	memset(&settings, 0, sizeof(settings));
2579 	ret = i40e_set_filter_control(hw, &settings);
2580 	if (ret)
2581 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2582 					ret);
2583 
2584 	/* Disable flow control */
2585 	hw->fc.requested_mode = I40E_FC_NONE;
2586 	i40e_set_fc(hw, &aq_fail, TRUE);
2587 
2588 	/* uninitialize pf host driver */
2589 	i40e_pf_host_uninit(dev);
2590 
2591 	do {
2592 		ret = rte_intr_callback_unregister(intr_handle,
2593 				i40e_dev_interrupt_handler, dev);
2594 		if (ret >= 0 || ret == -ENOENT) {
2595 			break;
2596 		} else if (ret != -EAGAIN) {
2597 			PMD_INIT_LOG(ERR,
2598 				 "intr callback unregister failed: %d",
2599 				 ret);
2600 		}
2601 		i40e_msec_delay(500);
2602 	} while (retries++ < 5);
2603 
2604 	i40e_rm_ethtype_filter_list(pf);
2605 	i40e_rm_tunnel_filter_list(pf);
2606 	i40e_rm_fdir_filter_list(pf);
2607 
2608 	/* Remove all flows */
2609 	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
2610 		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
2611 		rte_free(p_flow);
2612 	}
2613 
2614 	/* Remove all Traffic Manager configuration */
2615 	i40e_tm_conf_uninit(dev);
2616 
2617 	hw->adapter_closed = 1;
2618 }
2619 
2620 /*
2621  * Reset PF device only to re-initialize resources in PMD layer
2622  */
2623 static int
2624 i40e_dev_reset(struct rte_eth_dev *dev)
2625 {
2626 	int ret;
2627 
2628 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2629 	 * its VF to make them align with it. The detailed notification
2630 	 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2631 	 * To avoid unexpected behavior in VF, currently reset of PF with
2632 	 * SR-IOV activation is not supported. It might be supported later.
2633 	 */
2634 	if (dev->data->sriov.active)
2635 		return -ENOTSUP;
2636 
2637 	ret = eth_i40e_dev_uninit(dev);
2638 	if (ret)
2639 		return ret;
2640 
2641 	ret = eth_i40e_dev_init(dev, NULL);
2642 
2643 	return ret;
2644 }
2645 
2646 static int
2647 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2648 {
2649 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2650 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2651 	struct i40e_vsi *vsi = pf->main_vsi;
2652 	int status;
2653 
2654 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2655 						     true, NULL, true);
2656 	if (status != I40E_SUCCESS) {
2657 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2658 		return -EAGAIN;
2659 	}
2660 
2661 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2662 							TRUE, NULL);
2663 	if (status != I40E_SUCCESS) {
2664 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2665 		/* Rollback unicast promiscuous mode */
2666 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2667 						    false, NULL, true);
2668 		return -EAGAIN;
2669 	}
2670 
2671 	return 0;
2672 }
2673 
2674 static int
2675 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2676 {
2677 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2678 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2679 	struct i40e_vsi *vsi = pf->main_vsi;
2680 	int status;
2681 
2682 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2683 						     false, NULL, true);
2684 	if (status != I40E_SUCCESS) {
2685 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2686 		return -EAGAIN;
2687 	}
2688 
2689 	/* must remain in all_multicast mode */
2690 	if (dev->data->all_multicast == 1)
2691 		return 0;
2692 
2693 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2694 							false, NULL);
2695 	if (status != I40E_SUCCESS) {
2696 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2697 		/* Rollback unicast promiscuous mode */
2698 		i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2699 						    true, NULL, true);
2700 		return -EAGAIN;
2701 	}
2702 
2703 	return 0;
2704 }
2705 
2706 static int
2707 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2708 {
2709 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2710 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2711 	struct i40e_vsi *vsi = pf->main_vsi;
2712 	int ret;
2713 
2714 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2715 	if (ret != I40E_SUCCESS) {
2716 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2717 		return -EAGAIN;
2718 	}
2719 
2720 	return 0;
2721 }
2722 
2723 static int
2724 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2725 {
2726 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2727 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2728 	struct i40e_vsi *vsi = pf->main_vsi;
2729 	int ret;
2730 
2731 	if (dev->data->promiscuous == 1)
2732 		return 0; /* must remain in all_multicast mode */
2733 
2734 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2735 				vsi->seid, FALSE, NULL);
2736 	if (ret != I40E_SUCCESS) {
2737 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2738 		return -EAGAIN;
2739 	}
2740 
2741 	return 0;
2742 }
2743 
2744 /*
2745  * Set device link up.
2746  */
2747 static int
2748 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2749 {
2750 	/* re-apply link speed setting */
2751 	return i40e_apply_link_speed(dev);
2752 }
2753 
2754 /*
2755  * Set device link down.
2756  */
2757 static int
2758 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2759 {
2760 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2761 	uint8_t abilities = 0;
2762 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2763 
2764 	abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2765 	return i40e_phy_conf_link(hw, abilities, speed, false);
2766 }
2767 
2768 static __rte_always_inline void
2769 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2770 {
2771 /* Link status registers and values*/
2772 #define I40E_PRTMAC_LINKSTA		0x001E2420
2773 #define I40E_REG_LINK_UP		0x40000080
2774 #define I40E_PRTMAC_MACC		0x001E24E0
2775 #define I40E_REG_MACC_25GB		0x00020000
2776 #define I40E_REG_SPEED_MASK		0x38000000
2777 #define I40E_REG_SPEED_0		0x00000000
2778 #define I40E_REG_SPEED_1		0x08000000
2779 #define I40E_REG_SPEED_2		0x10000000
2780 #define I40E_REG_SPEED_3		0x18000000
2781 #define I40E_REG_SPEED_4		0x20000000
2782 	uint32_t link_speed;
2783 	uint32_t reg_val;
2784 
2785 	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2786 	link_speed = reg_val & I40E_REG_SPEED_MASK;
2787 	reg_val &= I40E_REG_LINK_UP;
2788 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2789 
2790 	if (unlikely(link->link_status == 0))
2791 		return;
2792 
2793 	/* Parse the link status */
2794 	switch (link_speed) {
2795 	case I40E_REG_SPEED_0:
2796 		link->link_speed = ETH_SPEED_NUM_100M;
2797 		break;
2798 	case I40E_REG_SPEED_1:
2799 		link->link_speed = ETH_SPEED_NUM_1G;
2800 		break;
2801 	case I40E_REG_SPEED_2:
2802 		if (hw->mac.type == I40E_MAC_X722)
2803 			link->link_speed = ETH_SPEED_NUM_2_5G;
2804 		else
2805 			link->link_speed = ETH_SPEED_NUM_10G;
2806 		break;
2807 	case I40E_REG_SPEED_3:
2808 		if (hw->mac.type == I40E_MAC_X722) {
2809 			link->link_speed = ETH_SPEED_NUM_5G;
2810 		} else {
2811 			reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2812 
2813 			if (reg_val & I40E_REG_MACC_25GB)
2814 				link->link_speed = ETH_SPEED_NUM_25G;
2815 			else
2816 				link->link_speed = ETH_SPEED_NUM_40G;
2817 		}
2818 		break;
2819 	case I40E_REG_SPEED_4:
2820 		if (hw->mac.type == I40E_MAC_X722)
2821 			link->link_speed = ETH_SPEED_NUM_10G;
2822 		else
2823 			link->link_speed = ETH_SPEED_NUM_20G;
2824 		break;
2825 	default:
2826 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2827 		break;
2828 	}
2829 }
2830 
2831 static __rte_always_inline void
2832 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2833 	bool enable_lse, int wait_to_complete)
2834 {
2835 #define CHECK_INTERVAL             100  /* 100ms */
2836 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2837 	uint32_t rep_cnt = MAX_REPEAT_TIME;
2838 	struct i40e_link_status link_status;
2839 	int status;
2840 
2841 	memset(&link_status, 0, sizeof(link_status));
2842 
2843 	do {
2844 		memset(&link_status, 0, sizeof(link_status));
2845 
2846 		/* Get link status information from hardware */
2847 		status = i40e_aq_get_link_info(hw, enable_lse,
2848 						&link_status, NULL);
2849 		if (unlikely(status != I40E_SUCCESS)) {
2850 			link->link_speed = ETH_SPEED_NUM_NONE;
2851 			link->link_duplex = ETH_LINK_FULL_DUPLEX;
2852 			PMD_DRV_LOG(ERR, "Failed to get link info");
2853 			return;
2854 		}
2855 
2856 		link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2857 		if (!wait_to_complete || link->link_status)
2858 			break;
2859 
2860 		rte_delay_ms(CHECK_INTERVAL);
2861 	} while (--rep_cnt);
2862 
2863 	/* Parse the link status */
2864 	switch (link_status.link_speed) {
2865 	case I40E_LINK_SPEED_100MB:
2866 		link->link_speed = ETH_SPEED_NUM_100M;
2867 		break;
2868 	case I40E_LINK_SPEED_1GB:
2869 		link->link_speed = ETH_SPEED_NUM_1G;
2870 		break;
2871 	case I40E_LINK_SPEED_10GB:
2872 		link->link_speed = ETH_SPEED_NUM_10G;
2873 		break;
2874 	case I40E_LINK_SPEED_20GB:
2875 		link->link_speed = ETH_SPEED_NUM_20G;
2876 		break;
2877 	case I40E_LINK_SPEED_25GB:
2878 		link->link_speed = ETH_SPEED_NUM_25G;
2879 		break;
2880 	case I40E_LINK_SPEED_40GB:
2881 		link->link_speed = ETH_SPEED_NUM_40G;
2882 		break;
2883 	default:
2884 		link->link_speed = ETH_SPEED_NUM_NONE;
2885 		break;
2886 	}
2887 }
2888 
2889 int
2890 i40e_dev_link_update(struct rte_eth_dev *dev,
2891 		     int wait_to_complete)
2892 {
2893 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2894 	struct rte_eth_link link;
2895 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2896 	int ret;
2897 
2898 	memset(&link, 0, sizeof(link));
2899 
2900 	/* i40e uses full duplex only */
2901 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2902 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2903 			ETH_LINK_SPEED_FIXED);
2904 
2905 	if (!wait_to_complete && !enable_lse)
2906 		update_link_reg(hw, &link);
2907 	else
2908 		update_link_aq(hw, &link, enable_lse, wait_to_complete);
2909 
2910 	if (hw->switch_dev)
2911 		rte_eth_linkstatus_get(hw->switch_dev, &link);
2912 
2913 	ret = rte_eth_linkstatus_set(dev, &link);
2914 	i40e_notify_all_vfs_link_status(dev);
2915 
2916 	return ret;
2917 }
2918 
2919 /* Get all the statistics of a VSI */
2920 void
2921 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2922 {
2923 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2924 	struct i40e_eth_stats *nes = &vsi->eth_stats;
2925 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2926 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2927 
2928 	i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2929 			    vsi->offset_loaded, &oes->rx_bytes,
2930 			    &nes->rx_bytes);
2931 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2932 			    vsi->offset_loaded, &oes->rx_unicast,
2933 			    &nes->rx_unicast);
2934 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2935 			    vsi->offset_loaded, &oes->rx_multicast,
2936 			    &nes->rx_multicast);
2937 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2938 			    vsi->offset_loaded, &oes->rx_broadcast,
2939 			    &nes->rx_broadcast);
2940 	/* exclude CRC bytes */
2941 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2942 		nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
2943 
2944 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2945 			    &oes->rx_discards, &nes->rx_discards);
2946 	/* GLV_REPC not supported */
2947 	/* GLV_RMPC not supported */
2948 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2949 			    &oes->rx_unknown_protocol,
2950 			    &nes->rx_unknown_protocol);
2951 	i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2952 			    vsi->offset_loaded, &oes->tx_bytes,
2953 			    &nes->tx_bytes);
2954 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2955 			    vsi->offset_loaded, &oes->tx_unicast,
2956 			    &nes->tx_unicast);
2957 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2958 			    vsi->offset_loaded, &oes->tx_multicast,
2959 			    &nes->tx_multicast);
2960 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2961 			    vsi->offset_loaded,  &oes->tx_broadcast,
2962 			    &nes->tx_broadcast);
2963 	/* GLV_TDPC not supported */
2964 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2965 			    &oes->tx_errors, &nes->tx_errors);
2966 	vsi->offset_loaded = true;
2967 
2968 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2969 		    vsi->vsi_id);
2970 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2971 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2972 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2973 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2974 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2975 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2976 		    nes->rx_unknown_protocol);
2977 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2978 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2979 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2980 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2981 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2982 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2983 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2984 		    vsi->vsi_id);
2985 }
2986 
2987 static void
2988 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2989 {
2990 	unsigned int i;
2991 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2992 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2993 
2994 	/* Get rx/tx bytes of internal transfer packets */
2995 	i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2996 			I40E_GLV_GORCL(hw->port),
2997 			pf->offset_loaded,
2998 			&pf->internal_stats_offset.rx_bytes,
2999 			&pf->internal_stats.rx_bytes);
3000 
3001 	i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
3002 			I40E_GLV_GOTCL(hw->port),
3003 			pf->offset_loaded,
3004 			&pf->internal_stats_offset.tx_bytes,
3005 			&pf->internal_stats.tx_bytes);
3006 	/* Get total internal rx packet count */
3007 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
3008 			    I40E_GLV_UPRCL(hw->port),
3009 			    pf->offset_loaded,
3010 			    &pf->internal_stats_offset.rx_unicast,
3011 			    &pf->internal_stats.rx_unicast);
3012 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
3013 			    I40E_GLV_MPRCL(hw->port),
3014 			    pf->offset_loaded,
3015 			    &pf->internal_stats_offset.rx_multicast,
3016 			    &pf->internal_stats.rx_multicast);
3017 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
3018 			    I40E_GLV_BPRCL(hw->port),
3019 			    pf->offset_loaded,
3020 			    &pf->internal_stats_offset.rx_broadcast,
3021 			    &pf->internal_stats.rx_broadcast);
3022 	/* Get total internal tx packet count */
3023 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
3024 			    I40E_GLV_UPTCL(hw->port),
3025 			    pf->offset_loaded,
3026 			    &pf->internal_stats_offset.tx_unicast,
3027 			    &pf->internal_stats.tx_unicast);
3028 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
3029 			    I40E_GLV_MPTCL(hw->port),
3030 			    pf->offset_loaded,
3031 			    &pf->internal_stats_offset.tx_multicast,
3032 			    &pf->internal_stats.tx_multicast);
3033 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
3034 			    I40E_GLV_BPTCL(hw->port),
3035 			    pf->offset_loaded,
3036 			    &pf->internal_stats_offset.tx_broadcast,
3037 			    &pf->internal_stats.tx_broadcast);
3038 
3039 	/* exclude CRC size */
3040 	pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
3041 		pf->internal_stats.rx_multicast +
3042 		pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
3043 
3044 	/* Get statistics of struct i40e_eth_stats */
3045 	i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
3046 			    I40E_GLPRT_GORCL(hw->port),
3047 			    pf->offset_loaded, &os->eth.rx_bytes,
3048 			    &ns->eth.rx_bytes);
3049 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
3050 			    I40E_GLPRT_UPRCL(hw->port),
3051 			    pf->offset_loaded, &os->eth.rx_unicast,
3052 			    &ns->eth.rx_unicast);
3053 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
3054 			    I40E_GLPRT_MPRCL(hw->port),
3055 			    pf->offset_loaded, &os->eth.rx_multicast,
3056 			    &ns->eth.rx_multicast);
3057 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
3058 			    I40E_GLPRT_BPRCL(hw->port),
3059 			    pf->offset_loaded, &os->eth.rx_broadcast,
3060 			    &ns->eth.rx_broadcast);
3061 	/* Workaround: CRC size should not be included in byte statistics,
3062 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3063 	 * packet.
3064 	 */
3065 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3066 		ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3067 
3068 	/* exclude internal rx bytes
3069 	 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
3070 	 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
3071 	 * value.
3072 	 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
3073 	 */
3074 	if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
3075 		ns->eth.rx_bytes = 0;
3076 	else
3077 		ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
3078 
3079 	if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
3080 		ns->eth.rx_unicast = 0;
3081 	else
3082 		ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
3083 
3084 	if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
3085 		ns->eth.rx_multicast = 0;
3086 	else
3087 		ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
3088 
3089 	if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
3090 		ns->eth.rx_broadcast = 0;
3091 	else
3092 		ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
3093 
3094 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
3095 			    pf->offset_loaded, &os->eth.rx_discards,
3096 			    &ns->eth.rx_discards);
3097 	/* GLPRT_REPC not supported */
3098 	/* GLPRT_RMPC not supported */
3099 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
3100 			    pf->offset_loaded,
3101 			    &os->eth.rx_unknown_protocol,
3102 			    &ns->eth.rx_unknown_protocol);
3103 	i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
3104 			    I40E_GLPRT_GOTCL(hw->port),
3105 			    pf->offset_loaded, &os->eth.tx_bytes,
3106 			    &ns->eth.tx_bytes);
3107 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
3108 			    I40E_GLPRT_UPTCL(hw->port),
3109 			    pf->offset_loaded, &os->eth.tx_unicast,
3110 			    &ns->eth.tx_unicast);
3111 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
3112 			    I40E_GLPRT_MPTCL(hw->port),
3113 			    pf->offset_loaded, &os->eth.tx_multicast,
3114 			    &ns->eth.tx_multicast);
3115 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3116 			    I40E_GLPRT_BPTCL(hw->port),
3117 			    pf->offset_loaded, &os->eth.tx_broadcast,
3118 			    &ns->eth.tx_broadcast);
3119 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3120 		ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3121 
3122 	/* exclude internal tx bytes
3123 	 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3124 	 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3125 	 * value.
3126 	 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3127 	 */
3128 	if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3129 		ns->eth.tx_bytes = 0;
3130 	else
3131 		ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3132 
3133 	if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3134 		ns->eth.tx_unicast = 0;
3135 	else
3136 		ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3137 
3138 	if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3139 		ns->eth.tx_multicast = 0;
3140 	else
3141 		ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3142 
3143 	if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3144 		ns->eth.tx_broadcast = 0;
3145 	else
3146 		ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3147 
3148 	/* GLPRT_TEPC not supported */
3149 
3150 	/* additional port specific stats */
3151 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3152 			    pf->offset_loaded, &os->tx_dropped_link_down,
3153 			    &ns->tx_dropped_link_down);
3154 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3155 			    pf->offset_loaded, &os->crc_errors,
3156 			    &ns->crc_errors);
3157 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3158 			    pf->offset_loaded, &os->illegal_bytes,
3159 			    &ns->illegal_bytes);
3160 	/* GLPRT_ERRBC not supported */
3161 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3162 			    pf->offset_loaded, &os->mac_local_faults,
3163 			    &ns->mac_local_faults);
3164 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3165 			    pf->offset_loaded, &os->mac_remote_faults,
3166 			    &ns->mac_remote_faults);
3167 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3168 			    pf->offset_loaded, &os->rx_length_errors,
3169 			    &ns->rx_length_errors);
3170 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3171 			    pf->offset_loaded, &os->link_xon_rx,
3172 			    &ns->link_xon_rx);
3173 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3174 			    pf->offset_loaded, &os->link_xoff_rx,
3175 			    &ns->link_xoff_rx);
3176 	for (i = 0; i < 8; i++) {
3177 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3178 				    pf->offset_loaded,
3179 				    &os->priority_xon_rx[i],
3180 				    &ns->priority_xon_rx[i]);
3181 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3182 				    pf->offset_loaded,
3183 				    &os->priority_xoff_rx[i],
3184 				    &ns->priority_xoff_rx[i]);
3185 	}
3186 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3187 			    pf->offset_loaded, &os->link_xon_tx,
3188 			    &ns->link_xon_tx);
3189 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3190 			    pf->offset_loaded, &os->link_xoff_tx,
3191 			    &ns->link_xoff_tx);
3192 	for (i = 0; i < 8; i++) {
3193 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3194 				    pf->offset_loaded,
3195 				    &os->priority_xon_tx[i],
3196 				    &ns->priority_xon_tx[i]);
3197 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3198 				    pf->offset_loaded,
3199 				    &os->priority_xoff_tx[i],
3200 				    &ns->priority_xoff_tx[i]);
3201 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3202 				    pf->offset_loaded,
3203 				    &os->priority_xon_2_xoff[i],
3204 				    &ns->priority_xon_2_xoff[i]);
3205 	}
3206 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3207 			    I40E_GLPRT_PRC64L(hw->port),
3208 			    pf->offset_loaded, &os->rx_size_64,
3209 			    &ns->rx_size_64);
3210 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3211 			    I40E_GLPRT_PRC127L(hw->port),
3212 			    pf->offset_loaded, &os->rx_size_127,
3213 			    &ns->rx_size_127);
3214 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3215 			    I40E_GLPRT_PRC255L(hw->port),
3216 			    pf->offset_loaded, &os->rx_size_255,
3217 			    &ns->rx_size_255);
3218 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3219 			    I40E_GLPRT_PRC511L(hw->port),
3220 			    pf->offset_loaded, &os->rx_size_511,
3221 			    &ns->rx_size_511);
3222 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3223 			    I40E_GLPRT_PRC1023L(hw->port),
3224 			    pf->offset_loaded, &os->rx_size_1023,
3225 			    &ns->rx_size_1023);
3226 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3227 			    I40E_GLPRT_PRC1522L(hw->port),
3228 			    pf->offset_loaded, &os->rx_size_1522,
3229 			    &ns->rx_size_1522);
3230 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3231 			    I40E_GLPRT_PRC9522L(hw->port),
3232 			    pf->offset_loaded, &os->rx_size_big,
3233 			    &ns->rx_size_big);
3234 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3235 			    pf->offset_loaded, &os->rx_undersize,
3236 			    &ns->rx_undersize);
3237 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3238 			    pf->offset_loaded, &os->rx_fragments,
3239 			    &ns->rx_fragments);
3240 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3241 			    pf->offset_loaded, &os->rx_oversize,
3242 			    &ns->rx_oversize);
3243 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3244 			    pf->offset_loaded, &os->rx_jabber,
3245 			    &ns->rx_jabber);
3246 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3247 			    I40E_GLPRT_PTC64L(hw->port),
3248 			    pf->offset_loaded, &os->tx_size_64,
3249 			    &ns->tx_size_64);
3250 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3251 			    I40E_GLPRT_PTC127L(hw->port),
3252 			    pf->offset_loaded, &os->tx_size_127,
3253 			    &ns->tx_size_127);
3254 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3255 			    I40E_GLPRT_PTC255L(hw->port),
3256 			    pf->offset_loaded, &os->tx_size_255,
3257 			    &ns->tx_size_255);
3258 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3259 			    I40E_GLPRT_PTC511L(hw->port),
3260 			    pf->offset_loaded, &os->tx_size_511,
3261 			    &ns->tx_size_511);
3262 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3263 			    I40E_GLPRT_PTC1023L(hw->port),
3264 			    pf->offset_loaded, &os->tx_size_1023,
3265 			    &ns->tx_size_1023);
3266 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3267 			    I40E_GLPRT_PTC1522L(hw->port),
3268 			    pf->offset_loaded, &os->tx_size_1522,
3269 			    &ns->tx_size_1522);
3270 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3271 			    I40E_GLPRT_PTC9522L(hw->port),
3272 			    pf->offset_loaded, &os->tx_size_big,
3273 			    &ns->tx_size_big);
3274 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3275 			   pf->offset_loaded,
3276 			   &os->fd_sb_match, &ns->fd_sb_match);
3277 	/* GLPRT_MSPDC not supported */
3278 	/* GLPRT_XEC not supported */
3279 
3280 	pf->offset_loaded = true;
3281 
3282 	if (pf->main_vsi)
3283 		i40e_update_vsi_stats(pf->main_vsi);
3284 }
3285 
3286 /* Get all statistics of a port */
3287 static int
3288 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3289 {
3290 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3291 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3292 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3293 	struct i40e_vsi *vsi;
3294 	unsigned i;
3295 
3296 	/* call read registers - updates values, now write them to struct */
3297 	i40e_read_stats_registers(pf, hw);
3298 
3299 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3300 			pf->main_vsi->eth_stats.rx_multicast +
3301 			pf->main_vsi->eth_stats.rx_broadcast -
3302 			pf->main_vsi->eth_stats.rx_discards;
3303 	stats->opackets = ns->eth.tx_unicast +
3304 			ns->eth.tx_multicast +
3305 			ns->eth.tx_broadcast;
3306 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3307 	stats->obytes   = ns->eth.tx_bytes;
3308 	stats->oerrors  = ns->eth.tx_errors +
3309 			pf->main_vsi->eth_stats.tx_errors;
3310 
3311 	/* Rx Errors */
3312 	stats->imissed  = ns->eth.rx_discards +
3313 			pf->main_vsi->eth_stats.rx_discards;
3314 	stats->ierrors  = ns->crc_errors +
3315 			ns->rx_length_errors + ns->rx_undersize +
3316 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3317 
3318 	if (pf->vfs) {
3319 		for (i = 0; i < pf->vf_num; i++) {
3320 			vsi = pf->vfs[i].vsi;
3321 			i40e_update_vsi_stats(vsi);
3322 
3323 			stats->ipackets += (vsi->eth_stats.rx_unicast +
3324 					vsi->eth_stats.rx_multicast +
3325 					vsi->eth_stats.rx_broadcast -
3326 					vsi->eth_stats.rx_discards);
3327 			stats->ibytes   += vsi->eth_stats.rx_bytes;
3328 			stats->oerrors  += vsi->eth_stats.tx_errors;
3329 			stats->imissed  += vsi->eth_stats.rx_discards;
3330 		}
3331 	}
3332 
3333 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3334 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3335 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3336 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3337 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3338 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3339 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3340 		    ns->eth.rx_unknown_protocol);
3341 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3342 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3343 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3344 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3345 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3346 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3347 
3348 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3349 		    ns->tx_dropped_link_down);
3350 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3351 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3352 		    ns->illegal_bytes);
3353 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3354 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3355 		    ns->mac_local_faults);
3356 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3357 		    ns->mac_remote_faults);
3358 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3359 		    ns->rx_length_errors);
3360 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3361 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3362 	for (i = 0; i < 8; i++) {
3363 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3364 				i, ns->priority_xon_rx[i]);
3365 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3366 				i, ns->priority_xoff_rx[i]);
3367 	}
3368 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3369 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3370 	for (i = 0; i < 8; i++) {
3371 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3372 				i, ns->priority_xon_tx[i]);
3373 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3374 				i, ns->priority_xoff_tx[i]);
3375 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3376 				i, ns->priority_xon_2_xoff[i]);
3377 	}
3378 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3379 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3380 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3381 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3382 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3383 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3384 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3385 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3386 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3387 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3388 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3389 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3390 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3391 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3392 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3393 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3394 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3395 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3396 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3397 			ns->mac_short_packet_dropped);
3398 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3399 		    ns->checksum_error);
3400 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3401 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3402 	return 0;
3403 }
3404 
3405 /* Reset the statistics */
3406 static int
3407 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3408 {
3409 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3410 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3411 
3412 	/* Mark PF and VSI stats to update the offset, aka "reset" */
3413 	pf->offset_loaded = false;
3414 	if (pf->main_vsi)
3415 		pf->main_vsi->offset_loaded = false;
3416 
3417 	/* read the stats, reading current register values into offset */
3418 	i40e_read_stats_registers(pf, hw);
3419 
3420 	return 0;
3421 }
3422 
3423 static uint32_t
3424 i40e_xstats_calc_num(void)
3425 {
3426 	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3427 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
3428 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
3429 }
3430 
3431 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3432 				     struct rte_eth_xstat_name *xstats_names,
3433 				     __rte_unused unsigned limit)
3434 {
3435 	unsigned count = 0;
3436 	unsigned i, prio;
3437 
3438 	if (xstats_names == NULL)
3439 		return i40e_xstats_calc_num();
3440 
3441 	/* Note: limit checked in rte_eth_xstats_names() */
3442 
3443 	/* Get stats from i40e_eth_stats struct */
3444 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3445 		strlcpy(xstats_names[count].name,
3446 			rte_i40e_stats_strings[i].name,
3447 			sizeof(xstats_names[count].name));
3448 		count++;
3449 	}
3450 
3451 	/* Get individiual stats from i40e_hw_port struct */
3452 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3453 		strlcpy(xstats_names[count].name,
3454 			rte_i40e_hw_port_strings[i].name,
3455 			sizeof(xstats_names[count].name));
3456 		count++;
3457 	}
3458 
3459 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3460 		for (prio = 0; prio < 8; prio++) {
3461 			snprintf(xstats_names[count].name,
3462 				 sizeof(xstats_names[count].name),
3463 				 "rx_priority%u_%s", prio,
3464 				 rte_i40e_rxq_prio_strings[i].name);
3465 			count++;
3466 		}
3467 	}
3468 
3469 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3470 		for (prio = 0; prio < 8; prio++) {
3471 			snprintf(xstats_names[count].name,
3472 				 sizeof(xstats_names[count].name),
3473 				 "tx_priority%u_%s", prio,
3474 				 rte_i40e_txq_prio_strings[i].name);
3475 			count++;
3476 		}
3477 	}
3478 	return count;
3479 }
3480 
3481 static int
3482 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3483 		    unsigned n)
3484 {
3485 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3486 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3487 	unsigned i, count, prio;
3488 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
3489 
3490 	count = i40e_xstats_calc_num();
3491 	if (n < count)
3492 		return count;
3493 
3494 	i40e_read_stats_registers(pf, hw);
3495 
3496 	if (xstats == NULL)
3497 		return 0;
3498 
3499 	count = 0;
3500 
3501 	/* Get stats from i40e_eth_stats struct */
3502 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3503 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3504 			rte_i40e_stats_strings[i].offset);
3505 		xstats[count].id = count;
3506 		count++;
3507 	}
3508 
3509 	/* Get individiual stats from i40e_hw_port struct */
3510 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3511 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3512 			rte_i40e_hw_port_strings[i].offset);
3513 		xstats[count].id = count;
3514 		count++;
3515 	}
3516 
3517 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3518 		for (prio = 0; prio < 8; prio++) {
3519 			xstats[count].value =
3520 				*(uint64_t *)(((char *)hw_stats) +
3521 				rte_i40e_rxq_prio_strings[i].offset +
3522 				(sizeof(uint64_t) * prio));
3523 			xstats[count].id = count;
3524 			count++;
3525 		}
3526 	}
3527 
3528 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3529 		for (prio = 0; prio < 8; prio++) {
3530 			xstats[count].value =
3531 				*(uint64_t *)(((char *)hw_stats) +
3532 				rte_i40e_txq_prio_strings[i].offset +
3533 				(sizeof(uint64_t) * prio));
3534 			xstats[count].id = count;
3535 			count++;
3536 		}
3537 	}
3538 
3539 	return count;
3540 }
3541 
3542 static int
3543 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3544 {
3545 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3546 	u32 full_ver;
3547 	u8 ver, patch;
3548 	u16 build;
3549 	int ret;
3550 
3551 	full_ver = hw->nvm.oem_ver;
3552 	ver = (u8)(full_ver >> 24);
3553 	build = (u16)((full_ver >> 8) & 0xffff);
3554 	patch = (u8)(full_ver & 0xff);
3555 
3556 	ret = snprintf(fw_version, fw_size,
3557 		 "%d.%d%d 0x%08x %d.%d.%d",
3558 		 ((hw->nvm.version >> 12) & 0xf),
3559 		 ((hw->nvm.version >> 4) & 0xff),
3560 		 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3561 		 ver, build, patch);
3562 
3563 	ret += 1; /* add the size of '\0' */
3564 	if (fw_size < (u32)ret)
3565 		return ret;
3566 	else
3567 		return 0;
3568 }
3569 
3570 /*
3571  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3572  * the Rx data path does not hang if the FW LLDP is stopped.
3573  * return true if lldp need to stop
3574  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3575  */
3576 static bool
3577 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3578 {
3579 	double nvm_ver;
3580 	char ver_str[64] = {0};
3581 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3582 
3583 	i40e_fw_version_get(dev, ver_str, 64);
3584 	nvm_ver = atof(ver_str);
3585 	if ((hw->mac.type == I40E_MAC_X722 ||
3586 	     hw->mac.type == I40E_MAC_X722_VF) &&
3587 	     ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3588 		return true;
3589 	else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3590 		return true;
3591 
3592 	return false;
3593 }
3594 
3595 static int
3596 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3597 {
3598 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3599 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3600 	struct i40e_vsi *vsi = pf->main_vsi;
3601 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3602 
3603 	dev_info->max_rx_queues = vsi->nb_qps;
3604 	dev_info->max_tx_queues = vsi->nb_qps;
3605 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3606 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3607 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3608 	dev_info->max_vfs = pci_dev->max_vfs;
3609 	dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
3610 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3611 	dev_info->rx_queue_offload_capa = 0;
3612 	dev_info->rx_offload_capa =
3613 		DEV_RX_OFFLOAD_VLAN_STRIP |
3614 		DEV_RX_OFFLOAD_QINQ_STRIP |
3615 		DEV_RX_OFFLOAD_IPV4_CKSUM |
3616 		DEV_RX_OFFLOAD_UDP_CKSUM |
3617 		DEV_RX_OFFLOAD_TCP_CKSUM |
3618 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3619 		DEV_RX_OFFLOAD_KEEP_CRC |
3620 		DEV_RX_OFFLOAD_SCATTER |
3621 		DEV_RX_OFFLOAD_VLAN_EXTEND |
3622 		DEV_RX_OFFLOAD_VLAN_FILTER |
3623 		DEV_RX_OFFLOAD_JUMBO_FRAME |
3624 		DEV_RX_OFFLOAD_RSS_HASH;
3625 
3626 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3627 	dev_info->tx_offload_capa =
3628 		DEV_TX_OFFLOAD_VLAN_INSERT |
3629 		DEV_TX_OFFLOAD_QINQ_INSERT |
3630 		DEV_TX_OFFLOAD_IPV4_CKSUM |
3631 		DEV_TX_OFFLOAD_UDP_CKSUM |
3632 		DEV_TX_OFFLOAD_TCP_CKSUM |
3633 		DEV_TX_OFFLOAD_SCTP_CKSUM |
3634 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3635 		DEV_TX_OFFLOAD_TCP_TSO |
3636 		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3637 		DEV_TX_OFFLOAD_GRE_TNL_TSO |
3638 		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3639 		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3640 		DEV_TX_OFFLOAD_MULTI_SEGS |
3641 		dev_info->tx_queue_offload_capa;
3642 	dev_info->dev_capa =
3643 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3644 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3645 
3646 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3647 						sizeof(uint32_t);
3648 	dev_info->reta_size = pf->hash_lut_size;
3649 	dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3650 
3651 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3652 		.rx_thresh = {
3653 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
3654 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
3655 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
3656 		},
3657 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3658 		.rx_drop_en = 0,
3659 		.offloads = 0,
3660 	};
3661 
3662 	dev_info->default_txconf = (struct rte_eth_txconf) {
3663 		.tx_thresh = {
3664 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
3665 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
3666 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
3667 		},
3668 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3669 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3670 		.offloads = 0,
3671 	};
3672 
3673 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3674 		.nb_max = I40E_MAX_RING_DESC,
3675 		.nb_min = I40E_MIN_RING_DESC,
3676 		.nb_align = I40E_ALIGN_RING_DESC,
3677 	};
3678 
3679 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3680 		.nb_max = I40E_MAX_RING_DESC,
3681 		.nb_min = I40E_MIN_RING_DESC,
3682 		.nb_align = I40E_ALIGN_RING_DESC,
3683 		.nb_seg_max = I40E_TX_MAX_SEG,
3684 		.nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3685 	};
3686 
3687 	if (pf->flags & I40E_FLAG_VMDQ) {
3688 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3689 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3690 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3691 						pf->max_nb_vmdq_vsi;
3692 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3693 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3694 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3695 	}
3696 
3697 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3698 		/* For XL710 */
3699 		dev_info->speed_capa = ETH_LINK_SPEED_40G;
3700 		dev_info->default_rxportconf.nb_queues = 2;
3701 		dev_info->default_txportconf.nb_queues = 2;
3702 		if (dev->data->nb_rx_queues == 1)
3703 			dev_info->default_rxportconf.ring_size = 2048;
3704 		else
3705 			dev_info->default_rxportconf.ring_size = 1024;
3706 		if (dev->data->nb_tx_queues == 1)
3707 			dev_info->default_txportconf.ring_size = 1024;
3708 		else
3709 			dev_info->default_txportconf.ring_size = 512;
3710 
3711 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3712 		/* For XXV710 */
3713 		dev_info->speed_capa = ETH_LINK_SPEED_25G;
3714 		dev_info->default_rxportconf.nb_queues = 1;
3715 		dev_info->default_txportconf.nb_queues = 1;
3716 		dev_info->default_rxportconf.ring_size = 256;
3717 		dev_info->default_txportconf.ring_size = 256;
3718 	} else {
3719 		/* For X710 */
3720 		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3721 		dev_info->default_rxportconf.nb_queues = 1;
3722 		dev_info->default_txportconf.nb_queues = 1;
3723 		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3724 			dev_info->default_rxportconf.ring_size = 512;
3725 			dev_info->default_txportconf.ring_size = 256;
3726 		} else {
3727 			dev_info->default_rxportconf.ring_size = 256;
3728 			dev_info->default_txportconf.ring_size = 256;
3729 		}
3730 	}
3731 	dev_info->default_rxportconf.burst_size = 32;
3732 	dev_info->default_txportconf.burst_size = 32;
3733 
3734 	return 0;
3735 }
3736 
3737 static int
3738 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3739 {
3740 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3741 	struct i40e_vsi *vsi = pf->main_vsi;
3742 	PMD_INIT_FUNC_TRACE();
3743 
3744 	if (on)
3745 		return i40e_vsi_add_vlan(vsi, vlan_id);
3746 	else
3747 		return i40e_vsi_delete_vlan(vsi, vlan_id);
3748 }
3749 
3750 static int
3751 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3752 				enum rte_vlan_type vlan_type,
3753 				uint16_t tpid, int qinq)
3754 {
3755 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3756 	uint64_t reg_r = 0;
3757 	uint64_t reg_w = 0;
3758 	uint16_t reg_id = 3;
3759 	int ret;
3760 
3761 	if (qinq) {
3762 		if (vlan_type == ETH_VLAN_TYPE_OUTER)
3763 			reg_id = 2;
3764 	}
3765 
3766 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3767 					  &reg_r, NULL);
3768 	if (ret != I40E_SUCCESS) {
3769 		PMD_DRV_LOG(ERR,
3770 			   "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3771 			   reg_id);
3772 		return -EIO;
3773 	}
3774 	PMD_DRV_LOG(DEBUG,
3775 		    "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3776 		    reg_id, reg_r);
3777 
3778 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3779 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3780 	if (reg_r == reg_w) {
3781 		PMD_DRV_LOG(DEBUG, "No need to write");
3782 		return 0;
3783 	}
3784 
3785 	ret = i40e_aq_debug_write_global_register(hw,
3786 					   I40E_GL_SWT_L2TAGCTRL(reg_id),
3787 					   reg_w, NULL);
3788 	if (ret != I40E_SUCCESS) {
3789 		PMD_DRV_LOG(ERR,
3790 			    "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3791 			    reg_id);
3792 		return -EIO;
3793 	}
3794 	PMD_DRV_LOG(DEBUG,
3795 		    "Global register 0x%08x is changed with value 0x%08x",
3796 		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3797 
3798 	return 0;
3799 }
3800 
3801 static int
3802 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3803 		   enum rte_vlan_type vlan_type,
3804 		   uint16_t tpid)
3805 {
3806 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3807 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3808 	int qinq = dev->data->dev_conf.rxmode.offloads &
3809 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
3810 	int ret = 0;
3811 
3812 	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3813 	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
3814 	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3815 		PMD_DRV_LOG(ERR,
3816 			    "Unsupported vlan type.");
3817 		return -EINVAL;
3818 	}
3819 
3820 	if (pf->support_multi_driver) {
3821 		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3822 		return -ENOTSUP;
3823 	}
3824 
3825 	/* 802.1ad frames ability is added in NVM API 1.7*/
3826 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3827 		if (qinq) {
3828 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
3829 				hw->first_tag = rte_cpu_to_le_16(tpid);
3830 			else if (vlan_type == ETH_VLAN_TYPE_INNER)
3831 				hw->second_tag = rte_cpu_to_le_16(tpid);
3832 		} else {
3833 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
3834 				hw->second_tag = rte_cpu_to_le_16(tpid);
3835 		}
3836 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3837 		if (ret != I40E_SUCCESS) {
3838 			PMD_DRV_LOG(ERR,
3839 				    "Set switch config failed aq_err: %d",
3840 				    hw->aq.asq_last_status);
3841 			ret = -EIO;
3842 		}
3843 	} else
3844 		/* If NVM API < 1.7, keep the register setting */
3845 		ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3846 						      tpid, qinq);
3847 
3848 	return ret;
3849 }
3850 
3851 static int
3852 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3853 {
3854 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3855 	struct i40e_vsi *vsi = pf->main_vsi;
3856 	struct rte_eth_rxmode *rxmode;
3857 
3858 	if (mask & ETH_QINQ_STRIP_MASK) {
3859 		PMD_DRV_LOG(ERR, "Strip qinq is not supported.");
3860 		return -ENOTSUP;
3861 	}
3862 
3863 	rxmode = &dev->data->dev_conf.rxmode;
3864 	if (mask & ETH_VLAN_FILTER_MASK) {
3865 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3866 			i40e_vsi_config_vlan_filter(vsi, TRUE);
3867 		else
3868 			i40e_vsi_config_vlan_filter(vsi, FALSE);
3869 	}
3870 
3871 	if (mask & ETH_VLAN_STRIP_MASK) {
3872 		/* Enable or disable VLAN stripping */
3873 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3874 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
3875 		else
3876 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
3877 	}
3878 
3879 	if (mask & ETH_VLAN_EXTEND_MASK) {
3880 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3881 			i40e_vsi_config_double_vlan(vsi, TRUE);
3882 			/* Set global registers with default ethertype. */
3883 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3884 					   RTE_ETHER_TYPE_VLAN);
3885 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3886 					   RTE_ETHER_TYPE_VLAN);
3887 		}
3888 		else
3889 			i40e_vsi_config_double_vlan(vsi, FALSE);
3890 	}
3891 
3892 	return 0;
3893 }
3894 
3895 static void
3896 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3897 			  __rte_unused uint16_t queue,
3898 			  __rte_unused int on)
3899 {
3900 	PMD_INIT_FUNC_TRACE();
3901 }
3902 
3903 static int
3904 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3905 {
3906 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3907 	struct i40e_vsi *vsi = pf->main_vsi;
3908 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3909 	struct i40e_vsi_vlan_pvid_info info;
3910 
3911 	memset(&info, 0, sizeof(info));
3912 	info.on = on;
3913 	if (info.on)
3914 		info.config.pvid = pvid;
3915 	else {
3916 		info.config.reject.tagged =
3917 				data->dev_conf.txmode.hw_vlan_reject_tagged;
3918 		info.config.reject.untagged =
3919 				data->dev_conf.txmode.hw_vlan_reject_untagged;
3920 	}
3921 
3922 	return i40e_vsi_vlan_pvid_set(vsi, &info);
3923 }
3924 
3925 static int
3926 i40e_dev_led_on(struct rte_eth_dev *dev)
3927 {
3928 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3929 	uint32_t mode = i40e_led_get(hw);
3930 
3931 	if (mode == 0)
3932 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3933 
3934 	return 0;
3935 }
3936 
3937 static int
3938 i40e_dev_led_off(struct rte_eth_dev *dev)
3939 {
3940 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3941 	uint32_t mode = i40e_led_get(hw);
3942 
3943 	if (mode != 0)
3944 		i40e_led_set(hw, 0, false);
3945 
3946 	return 0;
3947 }
3948 
3949 static int
3950 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3951 {
3952 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3953 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3954 
3955 	fc_conf->pause_time = pf->fc_conf.pause_time;
3956 
3957 	/* read out from register, in case they are modified by other port */
3958 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3959 		I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3960 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3961 		I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3962 
3963 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3964 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3965 
3966 	 /* Return current mode according to actual setting*/
3967 	switch (hw->fc.current_mode) {
3968 	case I40E_FC_FULL:
3969 		fc_conf->mode = RTE_FC_FULL;
3970 		break;
3971 	case I40E_FC_TX_PAUSE:
3972 		fc_conf->mode = RTE_FC_TX_PAUSE;
3973 		break;
3974 	case I40E_FC_RX_PAUSE:
3975 		fc_conf->mode = RTE_FC_RX_PAUSE;
3976 		break;
3977 	case I40E_FC_NONE:
3978 	default:
3979 		fc_conf->mode = RTE_FC_NONE;
3980 	};
3981 
3982 	return 0;
3983 }
3984 
3985 static int
3986 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3987 {
3988 	uint32_t mflcn_reg, fctrl_reg, reg;
3989 	uint32_t max_high_water;
3990 	uint8_t i, aq_failure;
3991 	int err;
3992 	struct i40e_hw *hw;
3993 	struct i40e_pf *pf;
3994 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3995 		[RTE_FC_NONE] = I40E_FC_NONE,
3996 		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3997 		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3998 		[RTE_FC_FULL] = I40E_FC_FULL
3999 	};
4000 
4001 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
4002 
4003 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
4004 	if ((fc_conf->high_water > max_high_water) ||
4005 			(fc_conf->high_water < fc_conf->low_water)) {
4006 		PMD_INIT_LOG(ERR,
4007 			"Invalid high/low water setup value in KB, High_water must be <= %d.",
4008 			max_high_water);
4009 		return -EINVAL;
4010 	}
4011 
4012 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4013 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4014 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
4015 
4016 	pf->fc_conf.pause_time = fc_conf->pause_time;
4017 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
4018 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
4019 
4020 	PMD_INIT_FUNC_TRACE();
4021 
4022 	/* All the link flow control related enable/disable register
4023 	 * configuration is handle by the F/W
4024 	 */
4025 	err = i40e_set_fc(hw, &aq_failure, true);
4026 	if (err < 0)
4027 		return -ENOSYS;
4028 
4029 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
4030 		/* Configure flow control refresh threshold,
4031 		 * the value for stat_tx_pause_refresh_timer[8]
4032 		 * is used for global pause operation.
4033 		 */
4034 
4035 		I40E_WRITE_REG(hw,
4036 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
4037 			       pf->fc_conf.pause_time);
4038 
4039 		/* configure the timer value included in transmitted pause
4040 		 * frame,
4041 		 * the value for stat_tx_pause_quanta[8] is used for global
4042 		 * pause operation
4043 		 */
4044 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
4045 			       pf->fc_conf.pause_time);
4046 
4047 		fctrl_reg = I40E_READ_REG(hw,
4048 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
4049 
4050 		if (fc_conf->mac_ctrl_frame_fwd != 0)
4051 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
4052 		else
4053 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
4054 
4055 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
4056 			       fctrl_reg);
4057 	} else {
4058 		/* Configure pause time (2 TCs per register) */
4059 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
4060 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
4061 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
4062 
4063 		/* Configure flow control refresh threshold value */
4064 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
4065 			       pf->fc_conf.pause_time / 2);
4066 
4067 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4068 
4069 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
4070 		 *depending on configuration
4071 		 */
4072 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
4073 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
4074 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
4075 		} else {
4076 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
4077 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
4078 		}
4079 
4080 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
4081 	}
4082 
4083 	if (!pf->support_multi_driver) {
4084 		/* config water marker both based on the packets and bytes */
4085 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
4086 				 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4087 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4088 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
4089 				  (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4090 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
4091 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
4092 				  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
4093 				  << I40E_KILOSHIFT);
4094 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
4095 				   pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
4096 				   << I40E_KILOSHIFT);
4097 	} else {
4098 		PMD_DRV_LOG(ERR,
4099 			    "Water marker configuration is not supported.");
4100 	}
4101 
4102 	I40E_WRITE_FLUSH(hw);
4103 
4104 	return 0;
4105 }
4106 
4107 static int
4108 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
4109 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
4110 {
4111 	PMD_INIT_FUNC_TRACE();
4112 
4113 	return -ENOSYS;
4114 }
4115 
4116 /* Add a MAC address, and update filters */
4117 static int
4118 i40e_macaddr_add(struct rte_eth_dev *dev,
4119 		 struct rte_ether_addr *mac_addr,
4120 		 __rte_unused uint32_t index,
4121 		 uint32_t pool)
4122 {
4123 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4124 	struct i40e_mac_filter_info mac_filter;
4125 	struct i40e_vsi *vsi;
4126 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4127 	int ret;
4128 
4129 	/* If VMDQ not enabled or configured, return */
4130 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4131 			  !pf->nb_cfg_vmdq_vsi)) {
4132 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4133 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4134 			pool);
4135 		return -ENOTSUP;
4136 	}
4137 
4138 	if (pool > pf->nb_cfg_vmdq_vsi) {
4139 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4140 				pool, pf->nb_cfg_vmdq_vsi);
4141 		return -EINVAL;
4142 	}
4143 
4144 	rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
4145 	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4146 		mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4147 	else
4148 		mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
4149 
4150 	if (pool == 0)
4151 		vsi = pf->main_vsi;
4152 	else
4153 		vsi = pf->vmdq[pool - 1].vsi;
4154 
4155 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
4156 	if (ret != I40E_SUCCESS) {
4157 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4158 		return -ENODEV;
4159 	}
4160 	return 0;
4161 }
4162 
4163 /* Remove a MAC address, and update filters */
4164 static void
4165 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4166 {
4167 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4168 	struct i40e_vsi *vsi;
4169 	struct rte_eth_dev_data *data = dev->data;
4170 	struct rte_ether_addr *macaddr;
4171 	int ret;
4172 	uint32_t i;
4173 	uint64_t pool_sel;
4174 
4175 	macaddr = &(data->mac_addrs[index]);
4176 
4177 	pool_sel = dev->data->mac_pool_sel[index];
4178 
4179 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4180 		if (pool_sel & (1ULL << i)) {
4181 			if (i == 0)
4182 				vsi = pf->main_vsi;
4183 			else {
4184 				/* No VMDQ pool enabled or configured */
4185 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
4186 					(i > pf->nb_cfg_vmdq_vsi)) {
4187 					PMD_DRV_LOG(ERR,
4188 						"No VMDQ pool enabled/configured");
4189 					return;
4190 				}
4191 				vsi = pf->vmdq[i - 1].vsi;
4192 			}
4193 			ret = i40e_vsi_delete_mac(vsi, macaddr);
4194 
4195 			if (ret) {
4196 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4197 				return;
4198 			}
4199 		}
4200 	}
4201 }
4202 
4203 /* Set perfect match or hash match of MAC and VLAN for a VF */
4204 static int
4205 i40e_vf_mac_filter_set(struct i40e_pf *pf,
4206 		 struct rte_eth_mac_filter *filter,
4207 		 bool add)
4208 {
4209 	struct i40e_hw *hw;
4210 	struct i40e_mac_filter_info mac_filter;
4211 	struct rte_ether_addr old_mac;
4212 	struct rte_ether_addr *new_mac;
4213 	struct i40e_pf_vf *vf = NULL;
4214 	uint16_t vf_id;
4215 	int ret;
4216 
4217 	if (pf == NULL) {
4218 		PMD_DRV_LOG(ERR, "Invalid PF argument.");
4219 		return -EINVAL;
4220 	}
4221 	hw = I40E_PF_TO_HW(pf);
4222 
4223 	if (filter == NULL) {
4224 		PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
4225 		return -EINVAL;
4226 	}
4227 
4228 	new_mac = &filter->mac_addr;
4229 
4230 	if (rte_is_zero_ether_addr(new_mac)) {
4231 		PMD_DRV_LOG(ERR, "Invalid ethernet address.");
4232 		return -EINVAL;
4233 	}
4234 
4235 	vf_id = filter->dst_id;
4236 
4237 	if (vf_id > pf->vf_num - 1 || !pf->vfs) {
4238 		PMD_DRV_LOG(ERR, "Invalid argument.");
4239 		return -EINVAL;
4240 	}
4241 	vf = &pf->vfs[vf_id];
4242 
4243 	if (add && rte_is_same_ether_addr(new_mac, &pf->dev_addr)) {
4244 		PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
4245 		return -EINVAL;
4246 	}
4247 
4248 	if (add) {
4249 		rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
4250 		rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
4251 				RTE_ETHER_ADDR_LEN);
4252 		rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
4253 				 RTE_ETHER_ADDR_LEN);
4254 
4255 		mac_filter.filter_type = filter->filter_type;
4256 		ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
4257 		if (ret != I40E_SUCCESS) {
4258 			PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
4259 			return -1;
4260 		}
4261 		rte_ether_addr_copy(new_mac, &pf->dev_addr);
4262 	} else {
4263 		rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
4264 				RTE_ETHER_ADDR_LEN);
4265 		ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
4266 		if (ret != I40E_SUCCESS) {
4267 			PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
4268 			return -1;
4269 		}
4270 
4271 		/* Clear device address as it has been removed */
4272 		if (rte_is_same_ether_addr(&pf->dev_addr, new_mac))
4273 			memset(&pf->dev_addr, 0, sizeof(struct rte_ether_addr));
4274 	}
4275 
4276 	return 0;
4277 }
4278 
4279 /* MAC filter handle */
4280 static int
4281 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4282 		void *arg)
4283 {
4284 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4285 	struct rte_eth_mac_filter *filter;
4286 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4287 	int ret = I40E_NOT_SUPPORTED;
4288 
4289 	filter = (struct rte_eth_mac_filter *)(arg);
4290 
4291 	switch (filter_op) {
4292 	case RTE_ETH_FILTER_NOP:
4293 		ret = I40E_SUCCESS;
4294 		break;
4295 	case RTE_ETH_FILTER_ADD:
4296 		i40e_pf_disable_irq0(hw);
4297 		if (filter->is_vf)
4298 			ret = i40e_vf_mac_filter_set(pf, filter, 1);
4299 		i40e_pf_enable_irq0(hw);
4300 		break;
4301 	case RTE_ETH_FILTER_DELETE:
4302 		i40e_pf_disable_irq0(hw);
4303 		if (filter->is_vf)
4304 			ret = i40e_vf_mac_filter_set(pf, filter, 0);
4305 		i40e_pf_enable_irq0(hw);
4306 		break;
4307 	default:
4308 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4309 		ret = I40E_ERR_PARAM;
4310 		break;
4311 	}
4312 
4313 	return ret;
4314 }
4315 
4316 static int
4317 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4318 {
4319 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4320 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4321 	uint32_t reg;
4322 	int ret;
4323 
4324 	if (!lut)
4325 		return -EINVAL;
4326 
4327 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4328 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4329 					  vsi->type != I40E_VSI_SRIOV,
4330 					  lut, lut_size);
4331 		if (ret) {
4332 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4333 			return ret;
4334 		}
4335 	} else {
4336 		uint32_t *lut_dw = (uint32_t *)lut;
4337 		uint16_t i, lut_size_dw = lut_size / 4;
4338 
4339 		if (vsi->type == I40E_VSI_SRIOV) {
4340 			for (i = 0; i <= lut_size_dw; i++) {
4341 				reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4342 				lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4343 			}
4344 		} else {
4345 			for (i = 0; i < lut_size_dw; i++)
4346 				lut_dw[i] = I40E_READ_REG(hw,
4347 							  I40E_PFQF_HLUT(i));
4348 		}
4349 	}
4350 
4351 	return 0;
4352 }
4353 
4354 int
4355 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4356 {
4357 	struct i40e_pf *pf;
4358 	struct i40e_hw *hw;
4359 	int ret;
4360 
4361 	if (!vsi || !lut)
4362 		return -EINVAL;
4363 
4364 	pf = I40E_VSI_TO_PF(vsi);
4365 	hw = I40E_VSI_TO_HW(vsi);
4366 
4367 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4368 		ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4369 					  vsi->type != I40E_VSI_SRIOV,
4370 					  lut, lut_size);
4371 		if (ret) {
4372 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4373 			return ret;
4374 		}
4375 	} else {
4376 		uint32_t *lut_dw = (uint32_t *)lut;
4377 		uint16_t i, lut_size_dw = lut_size / 4;
4378 
4379 		if (vsi->type == I40E_VSI_SRIOV) {
4380 			for (i = 0; i < lut_size_dw; i++)
4381 				I40E_WRITE_REG(
4382 					hw,
4383 					I40E_VFQF_HLUT1(i, vsi->user_param),
4384 					lut_dw[i]);
4385 		} else {
4386 			for (i = 0; i < lut_size_dw; i++)
4387 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4388 					       lut_dw[i]);
4389 		}
4390 		I40E_WRITE_FLUSH(hw);
4391 	}
4392 
4393 	return 0;
4394 }
4395 
4396 static int
4397 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4398 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4399 			 uint16_t reta_size)
4400 {
4401 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4402 	uint16_t i, lut_size = pf->hash_lut_size;
4403 	uint16_t idx, shift;
4404 	uint8_t *lut;
4405 	int ret;
4406 
4407 	if (reta_size != lut_size ||
4408 		reta_size > ETH_RSS_RETA_SIZE_512) {
4409 		PMD_DRV_LOG(ERR,
4410 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4411 			reta_size, lut_size);
4412 		return -EINVAL;
4413 	}
4414 
4415 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4416 	if (!lut) {
4417 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4418 		return -ENOMEM;
4419 	}
4420 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4421 	if (ret)
4422 		goto out;
4423 	for (i = 0; i < reta_size; i++) {
4424 		idx = i / RTE_RETA_GROUP_SIZE;
4425 		shift = i % RTE_RETA_GROUP_SIZE;
4426 		if (reta_conf[idx].mask & (1ULL << shift))
4427 			lut[i] = reta_conf[idx].reta[shift];
4428 	}
4429 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4430 
4431 	pf->adapter->rss_reta_updated = 1;
4432 
4433 out:
4434 	rte_free(lut);
4435 
4436 	return ret;
4437 }
4438 
4439 static int
4440 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4441 			struct rte_eth_rss_reta_entry64 *reta_conf,
4442 			uint16_t reta_size)
4443 {
4444 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4445 	uint16_t i, lut_size = pf->hash_lut_size;
4446 	uint16_t idx, shift;
4447 	uint8_t *lut;
4448 	int ret;
4449 
4450 	if (reta_size != lut_size ||
4451 		reta_size > ETH_RSS_RETA_SIZE_512) {
4452 		PMD_DRV_LOG(ERR,
4453 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4454 			reta_size, lut_size);
4455 		return -EINVAL;
4456 	}
4457 
4458 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4459 	if (!lut) {
4460 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4461 		return -ENOMEM;
4462 	}
4463 
4464 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4465 	if (ret)
4466 		goto out;
4467 	for (i = 0; i < reta_size; i++) {
4468 		idx = i / RTE_RETA_GROUP_SIZE;
4469 		shift = i % RTE_RETA_GROUP_SIZE;
4470 		if (reta_conf[idx].mask & (1ULL << shift))
4471 			reta_conf[idx].reta[shift] = lut[i];
4472 	}
4473 
4474 out:
4475 	rte_free(lut);
4476 
4477 	return ret;
4478 }
4479 
4480 /**
4481  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4482  * @hw:   pointer to the HW structure
4483  * @mem:  pointer to mem struct to fill out
4484  * @size: size of memory requested
4485  * @alignment: what to align the allocation to
4486  **/
4487 enum i40e_status_code
4488 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4489 			struct i40e_dma_mem *mem,
4490 			u64 size,
4491 			u32 alignment)
4492 {
4493 	const struct rte_memzone *mz = NULL;
4494 	char z_name[RTE_MEMZONE_NAMESIZE];
4495 
4496 	if (!mem)
4497 		return I40E_ERR_PARAM;
4498 
4499 	snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4500 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4501 			RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4502 	if (!mz)
4503 		return I40E_ERR_NO_MEMORY;
4504 
4505 	mem->size = size;
4506 	mem->va = mz->addr;
4507 	mem->pa = mz->iova;
4508 	mem->zone = (const void *)mz;
4509 	PMD_DRV_LOG(DEBUG,
4510 		"memzone %s allocated with physical address: %"PRIu64,
4511 		mz->name, mem->pa);
4512 
4513 	return I40E_SUCCESS;
4514 }
4515 
4516 /**
4517  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4518  * @hw:   pointer to the HW structure
4519  * @mem:  ptr to mem struct to free
4520  **/
4521 enum i40e_status_code
4522 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4523 		    struct i40e_dma_mem *mem)
4524 {
4525 	if (!mem)
4526 		return I40E_ERR_PARAM;
4527 
4528 	PMD_DRV_LOG(DEBUG,
4529 		"memzone %s to be freed with physical address: %"PRIu64,
4530 		((const struct rte_memzone *)mem->zone)->name, mem->pa);
4531 	rte_memzone_free((const struct rte_memzone *)mem->zone);
4532 	mem->zone = NULL;
4533 	mem->va = NULL;
4534 	mem->pa = (u64)0;
4535 
4536 	return I40E_SUCCESS;
4537 }
4538 
4539 /**
4540  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4541  * @hw:   pointer to the HW structure
4542  * @mem:  pointer to mem struct to fill out
4543  * @size: size of memory requested
4544  **/
4545 enum i40e_status_code
4546 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4547 			 struct i40e_virt_mem *mem,
4548 			 u32 size)
4549 {
4550 	if (!mem)
4551 		return I40E_ERR_PARAM;
4552 
4553 	mem->size = size;
4554 	mem->va = rte_zmalloc("i40e", size, 0);
4555 
4556 	if (mem->va)
4557 		return I40E_SUCCESS;
4558 	else
4559 		return I40E_ERR_NO_MEMORY;
4560 }
4561 
4562 /**
4563  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4564  * @hw:   pointer to the HW structure
4565  * @mem:  pointer to mem struct to free
4566  **/
4567 enum i40e_status_code
4568 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4569 		     struct i40e_virt_mem *mem)
4570 {
4571 	if (!mem)
4572 		return I40E_ERR_PARAM;
4573 
4574 	rte_free(mem->va);
4575 	mem->va = NULL;
4576 
4577 	return I40E_SUCCESS;
4578 }
4579 
4580 void
4581 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4582 {
4583 	rte_spinlock_init(&sp->spinlock);
4584 }
4585 
4586 void
4587 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4588 {
4589 	rte_spinlock_lock(&sp->spinlock);
4590 }
4591 
4592 void
4593 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4594 {
4595 	rte_spinlock_unlock(&sp->spinlock);
4596 }
4597 
4598 void
4599 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4600 {
4601 	return;
4602 }
4603 
4604 /**
4605  * Get the hardware capabilities, which will be parsed
4606  * and saved into struct i40e_hw.
4607  */
4608 static int
4609 i40e_get_cap(struct i40e_hw *hw)
4610 {
4611 	struct i40e_aqc_list_capabilities_element_resp *buf;
4612 	uint16_t len, size = 0;
4613 	int ret;
4614 
4615 	/* Calculate a huge enough buff for saving response data temporarily */
4616 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4617 						I40E_MAX_CAP_ELE_NUM;
4618 	buf = rte_zmalloc("i40e", len, 0);
4619 	if (!buf) {
4620 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
4621 		return I40E_ERR_NO_MEMORY;
4622 	}
4623 
4624 	/* Get, parse the capabilities and save it to hw */
4625 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4626 			i40e_aqc_opc_list_func_capabilities, NULL);
4627 	if (ret != I40E_SUCCESS)
4628 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4629 
4630 	/* Free the temporary buffer after being used */
4631 	rte_free(buf);
4632 
4633 	return ret;
4634 }
4635 
4636 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
4637 
4638 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4639 		const char *value,
4640 		void *opaque)
4641 {
4642 	struct i40e_pf *pf;
4643 	unsigned long num;
4644 	char *end;
4645 
4646 	pf = (struct i40e_pf *)opaque;
4647 	RTE_SET_USED(key);
4648 
4649 	errno = 0;
4650 	num = strtoul(value, &end, 0);
4651 	if (errno != 0 || end == value || *end != 0) {
4652 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4653 			    "kept the value = %hu", value, pf->vf_nb_qp_max);
4654 		return -(EINVAL);
4655 	}
4656 
4657 	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4658 		pf->vf_nb_qp_max = (uint16_t)num;
4659 	else
4660 		/* here return 0 to make next valid same argument work */
4661 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4662 			    "power of 2 and equal or less than 16 !, Now it is "
4663 			    "kept the value = %hu", num, pf->vf_nb_qp_max);
4664 
4665 	return 0;
4666 }
4667 
4668 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4669 {
4670 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4671 	struct rte_kvargs *kvlist;
4672 	int kvargs_count;
4673 
4674 	/* set default queue number per VF as 4 */
4675 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4676 
4677 	if (dev->device->devargs == NULL)
4678 		return 0;
4679 
4680 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4681 	if (kvlist == NULL)
4682 		return -(EINVAL);
4683 
4684 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4685 	if (!kvargs_count) {
4686 		rte_kvargs_free(kvlist);
4687 		return 0;
4688 	}
4689 
4690 	if (kvargs_count > 1)
4691 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4692 			    "the first invalid or last valid one is used !",
4693 			    ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4694 
4695 	rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4696 			   i40e_pf_parse_vf_queue_number_handler, pf);
4697 
4698 	rte_kvargs_free(kvlist);
4699 
4700 	return 0;
4701 }
4702 
4703 static int
4704 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4705 {
4706 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4707 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4708 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4709 	uint16_t qp_count = 0, vsi_count = 0;
4710 
4711 	if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4712 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4713 		return -EINVAL;
4714 	}
4715 
4716 	i40e_pf_config_vf_rxq_number(dev);
4717 
4718 	/* Add the parameter init for LFC */
4719 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4720 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4721 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4722 
4723 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4724 	pf->max_num_vsi = hw->func_caps.num_vsis;
4725 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4726 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4727 
4728 	/* FDir queue/VSI allocation */
4729 	pf->fdir_qp_offset = 0;
4730 	if (hw->func_caps.fd) {
4731 		pf->flags |= I40E_FLAG_FDIR;
4732 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4733 	} else {
4734 		pf->fdir_nb_qps = 0;
4735 	}
4736 	qp_count += pf->fdir_nb_qps;
4737 	vsi_count += 1;
4738 
4739 	/* LAN queue/VSI allocation */
4740 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4741 	if (!hw->func_caps.rss) {
4742 		pf->lan_nb_qps = 1;
4743 	} else {
4744 		pf->flags |= I40E_FLAG_RSS;
4745 		if (hw->mac.type == I40E_MAC_X722)
4746 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4747 		pf->lan_nb_qps = pf->lan_nb_qp_max;
4748 	}
4749 	qp_count += pf->lan_nb_qps;
4750 	vsi_count += 1;
4751 
4752 	/* VF queue/VSI allocation */
4753 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4754 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4755 		pf->flags |= I40E_FLAG_SRIOV;
4756 		pf->vf_nb_qps = pf->vf_nb_qp_max;
4757 		pf->vf_num = pci_dev->max_vfs;
4758 		PMD_DRV_LOG(DEBUG,
4759 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
4760 			pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4761 	} else {
4762 		pf->vf_nb_qps = 0;
4763 		pf->vf_num = 0;
4764 	}
4765 	qp_count += pf->vf_nb_qps * pf->vf_num;
4766 	vsi_count += pf->vf_num;
4767 
4768 	/* VMDq queue/VSI allocation */
4769 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4770 	pf->vmdq_nb_qps = 0;
4771 	pf->max_nb_vmdq_vsi = 0;
4772 	if (hw->func_caps.vmdq) {
4773 		if (qp_count < hw->func_caps.num_tx_qp &&
4774 			vsi_count < hw->func_caps.num_vsis) {
4775 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4776 				qp_count) / pf->vmdq_nb_qp_max;
4777 
4778 			/* Limit the maximum number of VMDq vsi to the maximum
4779 			 * ethdev can support
4780 			 */
4781 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4782 				hw->func_caps.num_vsis - vsi_count);
4783 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4784 				ETH_64_POOLS);
4785 			if (pf->max_nb_vmdq_vsi) {
4786 				pf->flags |= I40E_FLAG_VMDQ;
4787 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4788 				PMD_DRV_LOG(DEBUG,
4789 					"%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4790 					pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4791 					pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4792 			} else {
4793 				PMD_DRV_LOG(INFO,
4794 					"No enough queues left for VMDq");
4795 			}
4796 		} else {
4797 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4798 		}
4799 	}
4800 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4801 	vsi_count += pf->max_nb_vmdq_vsi;
4802 
4803 	if (hw->func_caps.dcb)
4804 		pf->flags |= I40E_FLAG_DCB;
4805 
4806 	if (qp_count > hw->func_caps.num_tx_qp) {
4807 		PMD_DRV_LOG(ERR,
4808 			"Failed to allocate %u queues, which exceeds the hardware maximum %u",
4809 			qp_count, hw->func_caps.num_tx_qp);
4810 		return -EINVAL;
4811 	}
4812 	if (vsi_count > hw->func_caps.num_vsis) {
4813 		PMD_DRV_LOG(ERR,
4814 			"Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4815 			vsi_count, hw->func_caps.num_vsis);
4816 		return -EINVAL;
4817 	}
4818 
4819 	return 0;
4820 }
4821 
4822 static int
4823 i40e_pf_get_switch_config(struct i40e_pf *pf)
4824 {
4825 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4826 	struct i40e_aqc_get_switch_config_resp *switch_config;
4827 	struct i40e_aqc_switch_config_element_resp *element;
4828 	uint16_t start_seid = 0, num_reported;
4829 	int ret;
4830 
4831 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4832 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4833 	if (!switch_config) {
4834 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
4835 		return -ENOMEM;
4836 	}
4837 
4838 	/* Get the switch configurations */
4839 	ret = i40e_aq_get_switch_config(hw, switch_config,
4840 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
4841 	if (ret != I40E_SUCCESS) {
4842 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4843 		goto fail;
4844 	}
4845 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4846 	if (num_reported != 1) { /* The number should be 1 */
4847 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4848 		goto fail;
4849 	}
4850 
4851 	/* Parse the switch configuration elements */
4852 	element = &(switch_config->element[0]);
4853 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4854 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4855 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4856 	} else
4857 		PMD_DRV_LOG(INFO, "Unknown element type");
4858 
4859 fail:
4860 	rte_free(switch_config);
4861 
4862 	return ret;
4863 }
4864 
4865 static int
4866 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4867 			uint32_t num)
4868 {
4869 	struct pool_entry *entry;
4870 
4871 	if (pool == NULL || num == 0)
4872 		return -EINVAL;
4873 
4874 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4875 	if (entry == NULL) {
4876 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4877 		return -ENOMEM;
4878 	}
4879 
4880 	/* queue heap initialize */
4881 	pool->num_free = num;
4882 	pool->num_alloc = 0;
4883 	pool->base = base;
4884 	LIST_INIT(&pool->alloc_list);
4885 	LIST_INIT(&pool->free_list);
4886 
4887 	/* Initialize element  */
4888 	entry->base = 0;
4889 	entry->len = num;
4890 
4891 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
4892 	return 0;
4893 }
4894 
4895 static void
4896 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4897 {
4898 	struct pool_entry *entry, *next_entry;
4899 
4900 	if (pool == NULL)
4901 		return;
4902 
4903 	for (entry = LIST_FIRST(&pool->alloc_list);
4904 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4905 			entry = next_entry) {
4906 		LIST_REMOVE(entry, next);
4907 		rte_free(entry);
4908 	}
4909 
4910 	for (entry = LIST_FIRST(&pool->free_list);
4911 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4912 			entry = next_entry) {
4913 		LIST_REMOVE(entry, next);
4914 		rte_free(entry);
4915 	}
4916 
4917 	pool->num_free = 0;
4918 	pool->num_alloc = 0;
4919 	pool->base = 0;
4920 	LIST_INIT(&pool->alloc_list);
4921 	LIST_INIT(&pool->free_list);
4922 }
4923 
4924 static int
4925 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4926 		       uint32_t base)
4927 {
4928 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4929 	uint32_t pool_offset;
4930 	int insert;
4931 
4932 	if (pool == NULL) {
4933 		PMD_DRV_LOG(ERR, "Invalid parameter");
4934 		return -EINVAL;
4935 	}
4936 
4937 	pool_offset = base - pool->base;
4938 	/* Lookup in alloc list */
4939 	LIST_FOREACH(entry, &pool->alloc_list, next) {
4940 		if (entry->base == pool_offset) {
4941 			valid_entry = entry;
4942 			LIST_REMOVE(entry, next);
4943 			break;
4944 		}
4945 	}
4946 
4947 	/* Not find, return */
4948 	if (valid_entry == NULL) {
4949 		PMD_DRV_LOG(ERR, "Failed to find entry");
4950 		return -EINVAL;
4951 	}
4952 
4953 	/**
4954 	 * Found it, move it to free list  and try to merge.
4955 	 * In order to make merge easier, always sort it by qbase.
4956 	 * Find adjacent prev and last entries.
4957 	 */
4958 	prev = next = NULL;
4959 	LIST_FOREACH(entry, &pool->free_list, next) {
4960 		if (entry->base > valid_entry->base) {
4961 			next = entry;
4962 			break;
4963 		}
4964 		prev = entry;
4965 	}
4966 
4967 	insert = 0;
4968 	/* Try to merge with next one*/
4969 	if (next != NULL) {
4970 		/* Merge with next one */
4971 		if (valid_entry->base + valid_entry->len == next->base) {
4972 			next->base = valid_entry->base;
4973 			next->len += valid_entry->len;
4974 			rte_free(valid_entry);
4975 			valid_entry = next;
4976 			insert = 1;
4977 		}
4978 	}
4979 
4980 	if (prev != NULL) {
4981 		/* Merge with previous one */
4982 		if (prev->base + prev->len == valid_entry->base) {
4983 			prev->len += valid_entry->len;
4984 			/* If it merge with next one, remove next node */
4985 			if (insert == 1) {
4986 				LIST_REMOVE(valid_entry, next);
4987 				rte_free(valid_entry);
4988 			} else {
4989 				rte_free(valid_entry);
4990 				insert = 1;
4991 			}
4992 		}
4993 	}
4994 
4995 	/* Not find any entry to merge, insert */
4996 	if (insert == 0) {
4997 		if (prev != NULL)
4998 			LIST_INSERT_AFTER(prev, valid_entry, next);
4999 		else if (next != NULL)
5000 			LIST_INSERT_BEFORE(next, valid_entry, next);
5001 		else /* It's empty list, insert to head */
5002 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
5003 	}
5004 
5005 	pool->num_free += valid_entry->len;
5006 	pool->num_alloc -= valid_entry->len;
5007 
5008 	return 0;
5009 }
5010 
5011 static int
5012 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
5013 		       uint16_t num)
5014 {
5015 	struct pool_entry *entry, *valid_entry;
5016 
5017 	if (pool == NULL || num == 0) {
5018 		PMD_DRV_LOG(ERR, "Invalid parameter");
5019 		return -EINVAL;
5020 	}
5021 
5022 	if (pool->num_free < num) {
5023 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
5024 			    num, pool->num_free);
5025 		return -ENOMEM;
5026 	}
5027 
5028 	valid_entry = NULL;
5029 	/* Lookup  in free list and find most fit one */
5030 	LIST_FOREACH(entry, &pool->free_list, next) {
5031 		if (entry->len >= num) {
5032 			/* Find best one */
5033 			if (entry->len == num) {
5034 				valid_entry = entry;
5035 				break;
5036 			}
5037 			if (valid_entry == NULL || valid_entry->len > entry->len)
5038 				valid_entry = entry;
5039 		}
5040 	}
5041 
5042 	/* Not find one to satisfy the request, return */
5043 	if (valid_entry == NULL) {
5044 		PMD_DRV_LOG(ERR, "No valid entry found");
5045 		return -ENOMEM;
5046 	}
5047 	/**
5048 	 * The entry have equal queue number as requested,
5049 	 * remove it from alloc_list.
5050 	 */
5051 	if (valid_entry->len == num) {
5052 		LIST_REMOVE(valid_entry, next);
5053 	} else {
5054 		/**
5055 		 * The entry have more numbers than requested,
5056 		 * create a new entry for alloc_list and minus its
5057 		 * queue base and number in free_list.
5058 		 */
5059 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
5060 		if (entry == NULL) {
5061 			PMD_DRV_LOG(ERR,
5062 				"Failed to allocate memory for resource pool");
5063 			return -ENOMEM;
5064 		}
5065 		entry->base = valid_entry->base;
5066 		entry->len = num;
5067 		valid_entry->base += num;
5068 		valid_entry->len -= num;
5069 		valid_entry = entry;
5070 	}
5071 
5072 	/* Insert it into alloc list, not sorted */
5073 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
5074 
5075 	pool->num_free -= valid_entry->len;
5076 	pool->num_alloc += valid_entry->len;
5077 
5078 	return valid_entry->base + pool->base;
5079 }
5080 
5081 /**
5082  * bitmap_is_subset - Check whether src2 is subset of src1
5083  **/
5084 static inline int
5085 bitmap_is_subset(uint8_t src1, uint8_t src2)
5086 {
5087 	return !((src1 ^ src2) & src2);
5088 }
5089 
5090 static enum i40e_status_code
5091 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5092 {
5093 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5094 
5095 	/* If DCB is not supported, only default TC is supported */
5096 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
5097 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
5098 		return I40E_NOT_SUPPORTED;
5099 	}
5100 
5101 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
5102 		PMD_DRV_LOG(ERR,
5103 			"Enabled TC map 0x%x not applicable to HW support 0x%x",
5104 			hw->func_caps.enabled_tcmap, enabled_tcmap);
5105 		return I40E_NOT_SUPPORTED;
5106 	}
5107 	return I40E_SUCCESS;
5108 }
5109 
5110 int
5111 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
5112 				struct i40e_vsi_vlan_pvid_info *info)
5113 {
5114 	struct i40e_hw *hw;
5115 	struct i40e_vsi_context ctxt;
5116 	uint8_t vlan_flags = 0;
5117 	int ret;
5118 
5119 	if (vsi == NULL || info == NULL) {
5120 		PMD_DRV_LOG(ERR, "invalid parameters");
5121 		return I40E_ERR_PARAM;
5122 	}
5123 
5124 	if (info->on) {
5125 		vsi->info.pvid = info->config.pvid;
5126 		/**
5127 		 * If insert pvid is enabled, only tagged pkts are
5128 		 * allowed to be sent out.
5129 		 */
5130 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5131 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5132 	} else {
5133 		vsi->info.pvid = 0;
5134 		if (info->config.reject.tagged == 0)
5135 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5136 
5137 		if (info->config.reject.untagged == 0)
5138 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5139 	}
5140 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5141 					I40E_AQ_VSI_PVLAN_MODE_MASK);
5142 	vsi->info.port_vlan_flags |= vlan_flags;
5143 	vsi->info.valid_sections =
5144 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5145 	memset(&ctxt, 0, sizeof(ctxt));
5146 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5147 	ctxt.seid = vsi->seid;
5148 
5149 	hw = I40E_VSI_TO_HW(vsi);
5150 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5151 	if (ret != I40E_SUCCESS)
5152 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
5153 
5154 	return ret;
5155 }
5156 
5157 static int
5158 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5159 {
5160 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5161 	int i, ret;
5162 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5163 
5164 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5165 	if (ret != I40E_SUCCESS)
5166 		return ret;
5167 
5168 	if (!vsi->seid) {
5169 		PMD_DRV_LOG(ERR, "seid not valid");
5170 		return -EINVAL;
5171 	}
5172 
5173 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5174 	tc_bw_data.tc_valid_bits = enabled_tcmap;
5175 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5176 		tc_bw_data.tc_bw_credits[i] =
5177 			(enabled_tcmap & (1 << i)) ? 1 : 0;
5178 
5179 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5180 	if (ret != I40E_SUCCESS) {
5181 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5182 		return ret;
5183 	}
5184 
5185 	rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5186 					sizeof(vsi->info.qs_handle));
5187 	return I40E_SUCCESS;
5188 }
5189 
5190 static enum i40e_status_code
5191 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5192 				 struct i40e_aqc_vsi_properties_data *info,
5193 				 uint8_t enabled_tcmap)
5194 {
5195 	enum i40e_status_code ret;
5196 	int i, total_tc = 0;
5197 	uint16_t qpnum_per_tc, bsf, qp_idx;
5198 
5199 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5200 	if (ret != I40E_SUCCESS)
5201 		return ret;
5202 
5203 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5204 		if (enabled_tcmap & (1 << i))
5205 			total_tc++;
5206 	if (total_tc == 0)
5207 		total_tc = 1;
5208 	vsi->enabled_tc = enabled_tcmap;
5209 
5210 	/* Number of queues per enabled TC */
5211 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5212 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5213 	bsf = rte_bsf32(qpnum_per_tc);
5214 
5215 	/* Adjust the queue number to actual queues that can be applied */
5216 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5217 		vsi->nb_qps = qpnum_per_tc * total_tc;
5218 
5219 	/**
5220 	 * Configure TC and queue mapping parameters, for enabled TC,
5221 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5222 	 * default queue will serve it.
5223 	 */
5224 	qp_idx = 0;
5225 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5226 		if (vsi->enabled_tc & (1 << i)) {
5227 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5228 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5229 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5230 			qp_idx += qpnum_per_tc;
5231 		} else
5232 			info->tc_mapping[i] = 0;
5233 	}
5234 
5235 	/* Associate queue number with VSI */
5236 	if (vsi->type == I40E_VSI_SRIOV) {
5237 		info->mapping_flags |=
5238 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5239 		for (i = 0; i < vsi->nb_qps; i++)
5240 			info->queue_mapping[i] =
5241 				rte_cpu_to_le_16(vsi->base_queue + i);
5242 	} else {
5243 		info->mapping_flags |=
5244 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5245 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5246 	}
5247 	info->valid_sections |=
5248 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5249 
5250 	return I40E_SUCCESS;
5251 }
5252 
5253 static int
5254 i40e_veb_release(struct i40e_veb *veb)
5255 {
5256 	struct i40e_vsi *vsi;
5257 	struct i40e_hw *hw;
5258 
5259 	if (veb == NULL)
5260 		return -EINVAL;
5261 
5262 	if (!TAILQ_EMPTY(&veb->head)) {
5263 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5264 		return -EACCES;
5265 	}
5266 	/* associate_vsi field is NULL for floating VEB */
5267 	if (veb->associate_vsi != NULL) {
5268 		vsi = veb->associate_vsi;
5269 		hw = I40E_VSI_TO_HW(vsi);
5270 
5271 		vsi->uplink_seid = veb->uplink_seid;
5272 		vsi->veb = NULL;
5273 	} else {
5274 		veb->associate_pf->main_vsi->floating_veb = NULL;
5275 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5276 	}
5277 
5278 	i40e_aq_delete_element(hw, veb->seid, NULL);
5279 	rte_free(veb);
5280 	return I40E_SUCCESS;
5281 }
5282 
5283 /* Setup a veb */
5284 static struct i40e_veb *
5285 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5286 {
5287 	struct i40e_veb *veb;
5288 	int ret;
5289 	struct i40e_hw *hw;
5290 
5291 	if (pf == NULL) {
5292 		PMD_DRV_LOG(ERR,
5293 			    "veb setup failed, associated PF shouldn't null");
5294 		return NULL;
5295 	}
5296 	hw = I40E_PF_TO_HW(pf);
5297 
5298 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5299 	if (!veb) {
5300 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5301 		goto fail;
5302 	}
5303 
5304 	veb->associate_vsi = vsi;
5305 	veb->associate_pf = pf;
5306 	TAILQ_INIT(&veb->head);
5307 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5308 
5309 	/* create floating veb if vsi is NULL */
5310 	if (vsi != NULL) {
5311 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5312 				      I40E_DEFAULT_TCMAP, false,
5313 				      &veb->seid, false, NULL);
5314 	} else {
5315 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5316 				      true, &veb->seid, false, NULL);
5317 	}
5318 
5319 	if (ret != I40E_SUCCESS) {
5320 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5321 			    hw->aq.asq_last_status);
5322 		goto fail;
5323 	}
5324 	veb->enabled_tc = I40E_DEFAULT_TCMAP;
5325 
5326 	/* get statistics index */
5327 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5328 				&veb->stats_idx, NULL, NULL, NULL);
5329 	if (ret != I40E_SUCCESS) {
5330 		PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5331 			    hw->aq.asq_last_status);
5332 		goto fail;
5333 	}
5334 	/* Get VEB bandwidth, to be implemented */
5335 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
5336 	if (vsi)
5337 		vsi->uplink_seid = veb->seid;
5338 
5339 	return veb;
5340 fail:
5341 	rte_free(veb);
5342 	return NULL;
5343 }
5344 
5345 int
5346 i40e_vsi_release(struct i40e_vsi *vsi)
5347 {
5348 	struct i40e_pf *pf;
5349 	struct i40e_hw *hw;
5350 	struct i40e_vsi_list *vsi_list;
5351 	void *temp;
5352 	int ret;
5353 	struct i40e_mac_filter *f;
5354 	uint16_t user_param;
5355 
5356 	if (!vsi)
5357 		return I40E_SUCCESS;
5358 
5359 	if (!vsi->adapter)
5360 		return -EFAULT;
5361 
5362 	user_param = vsi->user_param;
5363 
5364 	pf = I40E_VSI_TO_PF(vsi);
5365 	hw = I40E_VSI_TO_HW(vsi);
5366 
5367 	/* VSI has child to attach, release child first */
5368 	if (vsi->veb) {
5369 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5370 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5371 				return -1;
5372 		}
5373 		i40e_veb_release(vsi->veb);
5374 	}
5375 
5376 	if (vsi->floating_veb) {
5377 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5378 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5379 				return -1;
5380 		}
5381 	}
5382 
5383 	/* Remove all macvlan filters of the VSI */
5384 	i40e_vsi_remove_all_macvlan_filter(vsi);
5385 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5386 		rte_free(f);
5387 
5388 	if (vsi->type != I40E_VSI_MAIN &&
5389 	    ((vsi->type != I40E_VSI_SRIOV) ||
5390 	    !pf->floating_veb_list[user_param])) {
5391 		/* Remove vsi from parent's sibling list */
5392 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5393 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5394 			return I40E_ERR_PARAM;
5395 		}
5396 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5397 				&vsi->sib_vsi_list, list);
5398 
5399 		/* Remove all switch element of the VSI */
5400 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5401 		if (ret != I40E_SUCCESS)
5402 			PMD_DRV_LOG(ERR, "Failed to delete element");
5403 	}
5404 
5405 	if ((vsi->type == I40E_VSI_SRIOV) &&
5406 	    pf->floating_veb_list[user_param]) {
5407 		/* Remove vsi from parent's sibling list */
5408 		if (vsi->parent_vsi == NULL ||
5409 		    vsi->parent_vsi->floating_veb == NULL) {
5410 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5411 			return I40E_ERR_PARAM;
5412 		}
5413 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5414 			     &vsi->sib_vsi_list, list);
5415 
5416 		/* Remove all switch element of the VSI */
5417 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5418 		if (ret != I40E_SUCCESS)
5419 			PMD_DRV_LOG(ERR, "Failed to delete element");
5420 	}
5421 
5422 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5423 
5424 	if (vsi->type != I40E_VSI_SRIOV)
5425 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5426 	rte_free(vsi);
5427 
5428 	return I40E_SUCCESS;
5429 }
5430 
5431 static int
5432 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5433 {
5434 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5435 	struct i40e_aqc_remove_macvlan_element_data def_filter;
5436 	struct i40e_mac_filter_info filter;
5437 	int ret;
5438 
5439 	if (vsi->type != I40E_VSI_MAIN)
5440 		return I40E_ERR_CONFIG;
5441 	memset(&def_filter, 0, sizeof(def_filter));
5442 	rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5443 					ETH_ADDR_LEN);
5444 	def_filter.vlan_tag = 0;
5445 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5446 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5447 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5448 	if (ret != I40E_SUCCESS) {
5449 		struct i40e_mac_filter *f;
5450 		struct rte_ether_addr *mac;
5451 
5452 		PMD_DRV_LOG(DEBUG,
5453 			    "Cannot remove the default macvlan filter");
5454 		/* It needs to add the permanent mac into mac list */
5455 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5456 		if (f == NULL) {
5457 			PMD_DRV_LOG(ERR, "failed to allocate memory");
5458 			return I40E_ERR_NO_MEMORY;
5459 		}
5460 		mac = &f->mac_info.mac_addr;
5461 		rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5462 				ETH_ADDR_LEN);
5463 		f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5464 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5465 		vsi->mac_num++;
5466 
5467 		return ret;
5468 	}
5469 	rte_memcpy(&filter.mac_addr,
5470 		(struct rte_ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5471 	filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5472 	return i40e_vsi_add_mac(vsi, &filter);
5473 }
5474 
5475 /*
5476  * i40e_vsi_get_bw_config - Query VSI BW Information
5477  * @vsi: the VSI to be queried
5478  *
5479  * Returns 0 on success, negative value on failure
5480  */
5481 static enum i40e_status_code
5482 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5483 {
5484 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5485 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5486 	struct i40e_hw *hw = &vsi->adapter->hw;
5487 	i40e_status ret;
5488 	int i;
5489 	uint32_t bw_max;
5490 
5491 	memset(&bw_config, 0, sizeof(bw_config));
5492 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5493 	if (ret != I40E_SUCCESS) {
5494 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5495 			    hw->aq.asq_last_status);
5496 		return ret;
5497 	}
5498 
5499 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5500 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5501 					&ets_sla_config, NULL);
5502 	if (ret != I40E_SUCCESS) {
5503 		PMD_DRV_LOG(ERR,
5504 			"VSI failed to get TC bandwdith configuration %u",
5505 			hw->aq.asq_last_status);
5506 		return ret;
5507 	}
5508 
5509 	/* store and print out BW info */
5510 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5511 	vsi->bw_info.bw_max = bw_config.max_bw;
5512 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5513 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5514 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5515 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5516 		     I40E_16_BIT_WIDTH);
5517 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5518 		vsi->bw_info.bw_ets_share_credits[i] =
5519 				ets_sla_config.share_credits[i];
5520 		vsi->bw_info.bw_ets_credits[i] =
5521 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
5522 		/* 4 bits per TC, 4th bit is reserved */
5523 		vsi->bw_info.bw_ets_max[i] =
5524 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5525 				  RTE_LEN2MASK(3, uint8_t));
5526 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5527 			    vsi->bw_info.bw_ets_share_credits[i]);
5528 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5529 			    vsi->bw_info.bw_ets_credits[i]);
5530 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5531 			    vsi->bw_info.bw_ets_max[i]);
5532 	}
5533 
5534 	return I40E_SUCCESS;
5535 }
5536 
5537 /* i40e_enable_pf_lb
5538  * @pf: pointer to the pf structure
5539  *
5540  * allow loopback on pf
5541  */
5542 static inline void
5543 i40e_enable_pf_lb(struct i40e_pf *pf)
5544 {
5545 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5546 	struct i40e_vsi_context ctxt;
5547 	int ret;
5548 
5549 	/* Use the FW API if FW >= v5.0 */
5550 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5551 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5552 		return;
5553 	}
5554 
5555 	memset(&ctxt, 0, sizeof(ctxt));
5556 	ctxt.seid = pf->main_vsi_seid;
5557 	ctxt.pf_num = hw->pf_id;
5558 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5559 	if (ret) {
5560 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5561 			    ret, hw->aq.asq_last_status);
5562 		return;
5563 	}
5564 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5565 	ctxt.info.valid_sections =
5566 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5567 	ctxt.info.switch_id |=
5568 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5569 
5570 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5571 	if (ret)
5572 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5573 			    hw->aq.asq_last_status);
5574 }
5575 
5576 /* Setup a VSI */
5577 struct i40e_vsi *
5578 i40e_vsi_setup(struct i40e_pf *pf,
5579 	       enum i40e_vsi_type type,
5580 	       struct i40e_vsi *uplink_vsi,
5581 	       uint16_t user_param)
5582 {
5583 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5584 	struct i40e_vsi *vsi;
5585 	struct i40e_mac_filter_info filter;
5586 	int ret;
5587 	struct i40e_vsi_context ctxt;
5588 	struct rte_ether_addr broadcast =
5589 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5590 
5591 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5592 	    uplink_vsi == NULL) {
5593 		PMD_DRV_LOG(ERR,
5594 			"VSI setup failed, VSI link shouldn't be NULL");
5595 		return NULL;
5596 	}
5597 
5598 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5599 		PMD_DRV_LOG(ERR,
5600 			"VSI setup failed, MAIN VSI uplink VSI should be NULL");
5601 		return NULL;
5602 	}
5603 
5604 	/* two situations
5605 	 * 1.type is not MAIN and uplink vsi is not NULL
5606 	 * If uplink vsi didn't setup VEB, create one first under veb field
5607 	 * 2.type is SRIOV and the uplink is NULL
5608 	 * If floating VEB is NULL, create one veb under floating veb field
5609 	 */
5610 
5611 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5612 	    uplink_vsi->veb == NULL) {
5613 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5614 
5615 		if (uplink_vsi->veb == NULL) {
5616 			PMD_DRV_LOG(ERR, "VEB setup failed");
5617 			return NULL;
5618 		}
5619 		/* set ALLOWLOOPBACk on pf, when veb is created */
5620 		i40e_enable_pf_lb(pf);
5621 	}
5622 
5623 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5624 	    pf->main_vsi->floating_veb == NULL) {
5625 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5626 
5627 		if (pf->main_vsi->floating_veb == NULL) {
5628 			PMD_DRV_LOG(ERR, "VEB setup failed");
5629 			return NULL;
5630 		}
5631 	}
5632 
5633 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5634 	if (!vsi) {
5635 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5636 		return NULL;
5637 	}
5638 	TAILQ_INIT(&vsi->mac_list);
5639 	vsi->type = type;
5640 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5641 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5642 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5643 	vsi->user_param = user_param;
5644 	vsi->vlan_anti_spoof_on = 0;
5645 	vsi->vlan_filter_on = 0;
5646 	/* Allocate queues */
5647 	switch (vsi->type) {
5648 	case I40E_VSI_MAIN  :
5649 		vsi->nb_qps = pf->lan_nb_qps;
5650 		break;
5651 	case I40E_VSI_SRIOV :
5652 		vsi->nb_qps = pf->vf_nb_qps;
5653 		break;
5654 	case I40E_VSI_VMDQ2:
5655 		vsi->nb_qps = pf->vmdq_nb_qps;
5656 		break;
5657 	case I40E_VSI_FDIR:
5658 		vsi->nb_qps = pf->fdir_nb_qps;
5659 		break;
5660 	default:
5661 		goto fail_mem;
5662 	}
5663 	/*
5664 	 * The filter status descriptor is reported in rx queue 0,
5665 	 * while the tx queue for fdir filter programming has no
5666 	 * such constraints, can be non-zero queues.
5667 	 * To simplify it, choose FDIR vsi use queue 0 pair.
5668 	 * To make sure it will use queue 0 pair, queue allocation
5669 	 * need be done before this function is called
5670 	 */
5671 	if (type != I40E_VSI_FDIR) {
5672 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5673 			if (ret < 0) {
5674 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5675 						vsi->seid, ret);
5676 				goto fail_mem;
5677 			}
5678 			vsi->base_queue = ret;
5679 	} else
5680 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
5681 
5682 	/* VF has MSIX interrupt in VF range, don't allocate here */
5683 	if (type == I40E_VSI_MAIN) {
5684 		if (pf->support_multi_driver) {
5685 			/* If support multi-driver, need to use INT0 instead of
5686 			 * allocating from msix pool. The Msix pool is init from
5687 			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
5688 			 * to 1 without calling i40e_res_pool_alloc.
5689 			 */
5690 			vsi->msix_intr = 0;
5691 			vsi->nb_msix = 1;
5692 		} else {
5693 			ret = i40e_res_pool_alloc(&pf->msix_pool,
5694 						  RTE_MIN(vsi->nb_qps,
5695 						     RTE_MAX_RXTX_INTR_VEC_ID));
5696 			if (ret < 0) {
5697 				PMD_DRV_LOG(ERR,
5698 					    "VSI MAIN %d get heap failed %d",
5699 					    vsi->seid, ret);
5700 				goto fail_queue_alloc;
5701 			}
5702 			vsi->msix_intr = ret;
5703 			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5704 					       RTE_MAX_RXTX_INTR_VEC_ID);
5705 		}
5706 	} else if (type != I40E_VSI_SRIOV) {
5707 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5708 		if (ret < 0) {
5709 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5710 			goto fail_queue_alloc;
5711 		}
5712 		vsi->msix_intr = ret;
5713 		vsi->nb_msix = 1;
5714 	} else {
5715 		vsi->msix_intr = 0;
5716 		vsi->nb_msix = 0;
5717 	}
5718 
5719 	/* Add VSI */
5720 	if (type == I40E_VSI_MAIN) {
5721 		/* For main VSI, no need to add since it's default one */
5722 		vsi->uplink_seid = pf->mac_seid;
5723 		vsi->seid = pf->main_vsi_seid;
5724 		/* Bind queues with specific MSIX interrupt */
5725 		/**
5726 		 * Needs 2 interrupt at least, one for misc cause which will
5727 		 * enabled from OS side, Another for queues binding the
5728 		 * interrupt from device side only.
5729 		 */
5730 
5731 		/* Get default VSI parameters from hardware */
5732 		memset(&ctxt, 0, sizeof(ctxt));
5733 		ctxt.seid = vsi->seid;
5734 		ctxt.pf_num = hw->pf_id;
5735 		ctxt.uplink_seid = vsi->uplink_seid;
5736 		ctxt.vf_num = 0;
5737 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5738 		if (ret != I40E_SUCCESS) {
5739 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
5740 			goto fail_msix_alloc;
5741 		}
5742 		rte_memcpy(&vsi->info, &ctxt.info,
5743 			sizeof(struct i40e_aqc_vsi_properties_data));
5744 		vsi->vsi_id = ctxt.vsi_number;
5745 		vsi->info.valid_sections = 0;
5746 
5747 		/* Configure tc, enabled TC0 only */
5748 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5749 			I40E_SUCCESS) {
5750 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5751 			goto fail_msix_alloc;
5752 		}
5753 
5754 		/* TC, queue mapping */
5755 		memset(&ctxt, 0, sizeof(ctxt));
5756 		vsi->info.valid_sections |=
5757 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5758 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5759 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5760 		rte_memcpy(&ctxt.info, &vsi->info,
5761 			sizeof(struct i40e_aqc_vsi_properties_data));
5762 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5763 						I40E_DEFAULT_TCMAP);
5764 		if (ret != I40E_SUCCESS) {
5765 			PMD_DRV_LOG(ERR,
5766 				"Failed to configure TC queue mapping");
5767 			goto fail_msix_alloc;
5768 		}
5769 		ctxt.seid = vsi->seid;
5770 		ctxt.pf_num = hw->pf_id;
5771 		ctxt.uplink_seid = vsi->uplink_seid;
5772 		ctxt.vf_num = 0;
5773 
5774 		/* Update VSI parameters */
5775 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5776 		if (ret != I40E_SUCCESS) {
5777 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
5778 			goto fail_msix_alloc;
5779 		}
5780 
5781 		rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5782 						sizeof(vsi->info.tc_mapping));
5783 		rte_memcpy(&vsi->info.queue_mapping,
5784 				&ctxt.info.queue_mapping,
5785 			sizeof(vsi->info.queue_mapping));
5786 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
5787 		vsi->info.valid_sections = 0;
5788 
5789 		rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5790 				ETH_ADDR_LEN);
5791 
5792 		/**
5793 		 * Updating default filter settings are necessary to prevent
5794 		 * reception of tagged packets.
5795 		 * Some old firmware configurations load a default macvlan
5796 		 * filter which accepts both tagged and untagged packets.
5797 		 * The updating is to use a normal filter instead if needed.
5798 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
5799 		 * The firmware with correct configurations load the default
5800 		 * macvlan filter which is expected and cannot be removed.
5801 		 */
5802 		i40e_update_default_filter_setting(vsi);
5803 		i40e_config_qinq(hw, vsi);
5804 	} else if (type == I40E_VSI_SRIOV) {
5805 		memset(&ctxt, 0, sizeof(ctxt));
5806 		/**
5807 		 * For other VSI, the uplink_seid equals to uplink VSI's
5808 		 * uplink_seid since they share same VEB
5809 		 */
5810 		if (uplink_vsi == NULL)
5811 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5812 		else
5813 			vsi->uplink_seid = uplink_vsi->uplink_seid;
5814 		ctxt.pf_num = hw->pf_id;
5815 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5816 		ctxt.uplink_seid = vsi->uplink_seid;
5817 		ctxt.connection_type = 0x1;
5818 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5819 
5820 		/* Use the VEB configuration if FW >= v5.0 */
5821 		if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5822 			/* Configure switch ID */
5823 			ctxt.info.valid_sections |=
5824 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5825 			ctxt.info.switch_id =
5826 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5827 		}
5828 
5829 		/* Configure port/vlan */
5830 		ctxt.info.valid_sections |=
5831 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5832 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5833 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5834 						hw->func_caps.enabled_tcmap);
5835 		if (ret != I40E_SUCCESS) {
5836 			PMD_DRV_LOG(ERR,
5837 				"Failed to configure TC queue mapping");
5838 			goto fail_msix_alloc;
5839 		}
5840 
5841 		ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5842 		ctxt.info.valid_sections |=
5843 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5844 		/**
5845 		 * Since VSI is not created yet, only configure parameter,
5846 		 * will add vsi below.
5847 		 */
5848 
5849 		i40e_config_qinq(hw, vsi);
5850 	} else if (type == I40E_VSI_VMDQ2) {
5851 		memset(&ctxt, 0, sizeof(ctxt));
5852 		/*
5853 		 * For other VSI, the uplink_seid equals to uplink VSI's
5854 		 * uplink_seid since they share same VEB
5855 		 */
5856 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5857 		ctxt.pf_num = hw->pf_id;
5858 		ctxt.vf_num = 0;
5859 		ctxt.uplink_seid = vsi->uplink_seid;
5860 		ctxt.connection_type = 0x1;
5861 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5862 
5863 		ctxt.info.valid_sections |=
5864 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5865 		/* user_param carries flag to enable loop back */
5866 		if (user_param) {
5867 			ctxt.info.switch_id =
5868 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5869 			ctxt.info.switch_id |=
5870 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5871 		}
5872 
5873 		/* Configure port/vlan */
5874 		ctxt.info.valid_sections |=
5875 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5876 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5877 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5878 						I40E_DEFAULT_TCMAP);
5879 		if (ret != I40E_SUCCESS) {
5880 			PMD_DRV_LOG(ERR,
5881 				"Failed to configure TC queue mapping");
5882 			goto fail_msix_alloc;
5883 		}
5884 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5885 		ctxt.info.valid_sections |=
5886 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5887 	} else if (type == I40E_VSI_FDIR) {
5888 		memset(&ctxt, 0, sizeof(ctxt));
5889 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5890 		ctxt.pf_num = hw->pf_id;
5891 		ctxt.vf_num = 0;
5892 		ctxt.uplink_seid = vsi->uplink_seid;
5893 		ctxt.connection_type = 0x1;     /* regular data port */
5894 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5895 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5896 						I40E_DEFAULT_TCMAP);
5897 		if (ret != I40E_SUCCESS) {
5898 			PMD_DRV_LOG(ERR,
5899 				"Failed to configure TC queue mapping.");
5900 			goto fail_msix_alloc;
5901 		}
5902 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5903 		ctxt.info.valid_sections |=
5904 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5905 	} else {
5906 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5907 		goto fail_msix_alloc;
5908 	}
5909 
5910 	if (vsi->type != I40E_VSI_MAIN) {
5911 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5912 		if (ret != I40E_SUCCESS) {
5913 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5914 				    hw->aq.asq_last_status);
5915 			goto fail_msix_alloc;
5916 		}
5917 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5918 		vsi->info.valid_sections = 0;
5919 		vsi->seid = ctxt.seid;
5920 		vsi->vsi_id = ctxt.vsi_number;
5921 		vsi->sib_vsi_list.vsi = vsi;
5922 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5923 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5924 					  &vsi->sib_vsi_list, list);
5925 		} else {
5926 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5927 					  &vsi->sib_vsi_list, list);
5928 		}
5929 	}
5930 
5931 	/* MAC/VLAN configuration */
5932 	rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
5933 	filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5934 
5935 	ret = i40e_vsi_add_mac(vsi, &filter);
5936 	if (ret != I40E_SUCCESS) {
5937 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5938 		goto fail_msix_alloc;
5939 	}
5940 
5941 	/* Get VSI BW information */
5942 	i40e_vsi_get_bw_config(vsi);
5943 	return vsi;
5944 fail_msix_alloc:
5945 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5946 fail_queue_alloc:
5947 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5948 fail_mem:
5949 	rte_free(vsi);
5950 	return NULL;
5951 }
5952 
5953 /* Configure vlan filter on or off */
5954 int
5955 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5956 {
5957 	int i, num;
5958 	struct i40e_mac_filter *f;
5959 	void *temp;
5960 	struct i40e_mac_filter_info *mac_filter;
5961 	enum rte_mac_filter_type desired_filter;
5962 	int ret = I40E_SUCCESS;
5963 
5964 	if (on) {
5965 		/* Filter to match MAC and VLAN */
5966 		desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5967 	} else {
5968 		/* Filter to match only MAC */
5969 		desired_filter = RTE_MAC_PERFECT_MATCH;
5970 	}
5971 
5972 	num = vsi->mac_num;
5973 
5974 	mac_filter = rte_zmalloc("mac_filter_info_data",
5975 				 num * sizeof(*mac_filter), 0);
5976 	if (mac_filter == NULL) {
5977 		PMD_DRV_LOG(ERR, "failed to allocate memory");
5978 		return I40E_ERR_NO_MEMORY;
5979 	}
5980 
5981 	i = 0;
5982 
5983 	/* Remove all existing mac */
5984 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5985 		mac_filter[i] = f->mac_info;
5986 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5987 		if (ret) {
5988 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5989 				    on ? "enable" : "disable");
5990 			goto DONE;
5991 		}
5992 		i++;
5993 	}
5994 
5995 	/* Override with new filter */
5996 	for (i = 0; i < num; i++) {
5997 		mac_filter[i].filter_type = desired_filter;
5998 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5999 		if (ret) {
6000 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
6001 				    on ? "enable" : "disable");
6002 			goto DONE;
6003 		}
6004 	}
6005 
6006 DONE:
6007 	rte_free(mac_filter);
6008 	return ret;
6009 }
6010 
6011 /* Configure vlan stripping on or off */
6012 int
6013 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
6014 {
6015 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6016 	struct i40e_vsi_context ctxt;
6017 	uint8_t vlan_flags;
6018 	int ret = I40E_SUCCESS;
6019 
6020 	/* Check if it has been already on or off */
6021 	if (vsi->info.valid_sections &
6022 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
6023 		if (on) {
6024 			if ((vsi->info.port_vlan_flags &
6025 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
6026 				return 0; /* already on */
6027 		} else {
6028 			if ((vsi->info.port_vlan_flags &
6029 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
6030 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
6031 				return 0; /* already off */
6032 		}
6033 	}
6034 
6035 	if (on)
6036 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
6037 	else
6038 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
6039 	vsi->info.valid_sections =
6040 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
6041 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
6042 	vsi->info.port_vlan_flags |= vlan_flags;
6043 	ctxt.seid = vsi->seid;
6044 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
6045 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6046 	if (ret)
6047 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
6048 			    on ? "enable" : "disable");
6049 
6050 	return ret;
6051 }
6052 
6053 static int
6054 i40e_dev_init_vlan(struct rte_eth_dev *dev)
6055 {
6056 	struct rte_eth_dev_data *data = dev->data;
6057 	int ret;
6058 	int mask = 0;
6059 
6060 	/* Apply vlan offload setting */
6061 	mask = ETH_VLAN_STRIP_MASK |
6062 	       ETH_VLAN_FILTER_MASK |
6063 	       ETH_VLAN_EXTEND_MASK;
6064 	ret = i40e_vlan_offload_set(dev, mask);
6065 	if (ret) {
6066 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
6067 		return ret;
6068 	}
6069 
6070 	/* Apply pvid setting */
6071 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
6072 				data->dev_conf.txmode.hw_vlan_insert_pvid);
6073 	if (ret)
6074 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
6075 
6076 	return ret;
6077 }
6078 
6079 static int
6080 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
6081 {
6082 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6083 
6084 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
6085 }
6086 
6087 static int
6088 i40e_update_flow_control(struct i40e_hw *hw)
6089 {
6090 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
6091 	struct i40e_link_status link_status;
6092 	uint32_t rxfc = 0, txfc = 0, reg;
6093 	uint8_t an_info;
6094 	int ret;
6095 
6096 	memset(&link_status, 0, sizeof(link_status));
6097 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
6098 	if (ret != I40E_SUCCESS) {
6099 		PMD_DRV_LOG(ERR, "Failed to get link status information");
6100 		goto write_reg; /* Disable flow control */
6101 	}
6102 
6103 	an_info = hw->phy.link_info.an_info;
6104 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
6105 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
6106 		ret = I40E_ERR_NOT_READY;
6107 		goto write_reg; /* Disable flow control */
6108 	}
6109 	/**
6110 	 * If link auto negotiation is enabled, flow control needs to
6111 	 * be configured according to it
6112 	 */
6113 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
6114 	case I40E_LINK_PAUSE_RXTX:
6115 		rxfc = 1;
6116 		txfc = 1;
6117 		hw->fc.current_mode = I40E_FC_FULL;
6118 		break;
6119 	case I40E_AQ_LINK_PAUSE_RX:
6120 		rxfc = 1;
6121 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
6122 		break;
6123 	case I40E_AQ_LINK_PAUSE_TX:
6124 		txfc = 1;
6125 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
6126 		break;
6127 	default:
6128 		hw->fc.current_mode = I40E_FC_NONE;
6129 		break;
6130 	}
6131 
6132 write_reg:
6133 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6134 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6135 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6136 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6137 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6138 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6139 
6140 	return ret;
6141 }
6142 
6143 /* PF setup */
6144 static int
6145 i40e_pf_setup(struct i40e_pf *pf)
6146 {
6147 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6148 	struct i40e_filter_control_settings settings;
6149 	struct i40e_vsi *vsi;
6150 	int ret;
6151 
6152 	/* Clear all stats counters */
6153 	pf->offset_loaded = FALSE;
6154 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6155 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6156 	memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6157 	memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6158 
6159 	ret = i40e_pf_get_switch_config(pf);
6160 	if (ret != I40E_SUCCESS) {
6161 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6162 		return ret;
6163 	}
6164 
6165 	ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6166 	if (ret)
6167 		PMD_INIT_LOG(WARNING,
6168 			"failed to allocate switch domain for device %d", ret);
6169 
6170 	if (pf->flags & I40E_FLAG_FDIR) {
6171 		/* make queue allocated first, let FDIR use queue pair 0*/
6172 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6173 		if (ret != I40E_FDIR_QUEUE_ID) {
6174 			PMD_DRV_LOG(ERR,
6175 				"queue allocation fails for FDIR: ret =%d",
6176 				ret);
6177 			pf->flags &= ~I40E_FLAG_FDIR;
6178 		}
6179 	}
6180 	/*  main VSI setup */
6181 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6182 	if (!vsi) {
6183 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6184 		return I40E_ERR_NOT_READY;
6185 	}
6186 	pf->main_vsi = vsi;
6187 
6188 	/* Configure filter control */
6189 	memset(&settings, 0, sizeof(settings));
6190 	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6191 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6192 	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6193 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6194 	else {
6195 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6196 			hw->func_caps.rss_table_size);
6197 		return I40E_ERR_PARAM;
6198 	}
6199 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6200 		hw->func_caps.rss_table_size);
6201 	pf->hash_lut_size = hw->func_caps.rss_table_size;
6202 
6203 	/* Enable ethtype and macvlan filters */
6204 	settings.enable_ethtype = TRUE;
6205 	settings.enable_macvlan = TRUE;
6206 	ret = i40e_set_filter_control(hw, &settings);
6207 	if (ret)
6208 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6209 								ret);
6210 
6211 	/* Update flow control according to the auto negotiation */
6212 	i40e_update_flow_control(hw);
6213 
6214 	return I40E_SUCCESS;
6215 }
6216 
6217 int
6218 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6219 {
6220 	uint32_t reg;
6221 	uint16_t j;
6222 
6223 	/**
6224 	 * Set or clear TX Queue Disable flags,
6225 	 * which is required by hardware.
6226 	 */
6227 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
6228 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6229 
6230 	/* Wait until the request is finished */
6231 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6232 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6233 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6234 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6235 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6236 							& 0x1))) {
6237 			break;
6238 		}
6239 	}
6240 	if (on) {
6241 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6242 			return I40E_SUCCESS; /* already on, skip next steps */
6243 
6244 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6245 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6246 	} else {
6247 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6248 			return I40E_SUCCESS; /* already off, skip next steps */
6249 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6250 	}
6251 	/* Write the register */
6252 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6253 	/* Check the result */
6254 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6255 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6256 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6257 		if (on) {
6258 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6259 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6260 				break;
6261 		} else {
6262 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6263 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6264 				break;
6265 		}
6266 	}
6267 	/* Check if it is timeout */
6268 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6269 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6270 			    (on ? "enable" : "disable"), q_idx);
6271 		return I40E_ERR_TIMEOUT;
6272 	}
6273 
6274 	return I40E_SUCCESS;
6275 }
6276 
6277 /* Swith on or off the tx queues */
6278 static int
6279 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
6280 {
6281 	struct rte_eth_dev_data *dev_data = pf->dev_data;
6282 	struct i40e_tx_queue *txq;
6283 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
6284 	uint16_t i;
6285 	int ret;
6286 
6287 	for (i = 0; i < dev_data->nb_tx_queues; i++) {
6288 		txq = dev_data->tx_queues[i];
6289 		/* Don't operate the queue if not configured or
6290 		 * if starting only per queue */
6291 		if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
6292 			continue;
6293 		if (on)
6294 			ret = i40e_dev_tx_queue_start(dev, i);
6295 		else
6296 			ret = i40e_dev_tx_queue_stop(dev, i);
6297 		if ( ret != I40E_SUCCESS)
6298 			return ret;
6299 	}
6300 
6301 	return I40E_SUCCESS;
6302 }
6303 
6304 int
6305 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6306 {
6307 	uint32_t reg;
6308 	uint16_t j;
6309 
6310 	/* Wait until the request is finished */
6311 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6312 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6313 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6314 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6315 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6316 			break;
6317 	}
6318 
6319 	if (on) {
6320 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6321 			return I40E_SUCCESS; /* Already on, skip next steps */
6322 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6323 	} else {
6324 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6325 			return I40E_SUCCESS; /* Already off, skip next steps */
6326 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6327 	}
6328 
6329 	/* Write the register */
6330 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6331 	/* Check the result */
6332 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6333 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6334 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6335 		if (on) {
6336 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6337 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6338 				break;
6339 		} else {
6340 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6341 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6342 				break;
6343 		}
6344 	}
6345 
6346 	/* Check if it is timeout */
6347 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6348 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6349 			    (on ? "enable" : "disable"), q_idx);
6350 		return I40E_ERR_TIMEOUT;
6351 	}
6352 
6353 	return I40E_SUCCESS;
6354 }
6355 /* Switch on or off the rx queues */
6356 static int
6357 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
6358 {
6359 	struct rte_eth_dev_data *dev_data = pf->dev_data;
6360 	struct i40e_rx_queue *rxq;
6361 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
6362 	uint16_t i;
6363 	int ret;
6364 
6365 	for (i = 0; i < dev_data->nb_rx_queues; i++) {
6366 		rxq = dev_data->rx_queues[i];
6367 		/* Don't operate the queue if not configured or
6368 		 * if starting only per queue */
6369 		if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
6370 			continue;
6371 		if (on)
6372 			ret = i40e_dev_rx_queue_start(dev, i);
6373 		else
6374 			ret = i40e_dev_rx_queue_stop(dev, i);
6375 		if (ret != I40E_SUCCESS)
6376 			return ret;
6377 	}
6378 
6379 	return I40E_SUCCESS;
6380 }
6381 
6382 /* Switch on or off all the rx/tx queues */
6383 int
6384 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
6385 {
6386 	int ret;
6387 
6388 	if (on) {
6389 		/* enable rx queues before enabling tx queues */
6390 		ret = i40e_dev_switch_rx_queues(pf, on);
6391 		if (ret) {
6392 			PMD_DRV_LOG(ERR, "Failed to switch rx queues");
6393 			return ret;
6394 		}
6395 		ret = i40e_dev_switch_tx_queues(pf, on);
6396 	} else {
6397 		/* Stop tx queues before stopping rx queues */
6398 		ret = i40e_dev_switch_tx_queues(pf, on);
6399 		if (ret) {
6400 			PMD_DRV_LOG(ERR, "Failed to switch tx queues");
6401 			return ret;
6402 		}
6403 		ret = i40e_dev_switch_rx_queues(pf, on);
6404 	}
6405 
6406 	return ret;
6407 }
6408 
6409 /* Initialize VSI for TX */
6410 static int
6411 i40e_dev_tx_init(struct i40e_pf *pf)
6412 {
6413 	struct rte_eth_dev_data *data = pf->dev_data;
6414 	uint16_t i;
6415 	uint32_t ret = I40E_SUCCESS;
6416 	struct i40e_tx_queue *txq;
6417 
6418 	for (i = 0; i < data->nb_tx_queues; i++) {
6419 		txq = data->tx_queues[i];
6420 		if (!txq || !txq->q_set)
6421 			continue;
6422 		ret = i40e_tx_queue_init(txq);
6423 		if (ret != I40E_SUCCESS)
6424 			break;
6425 	}
6426 	if (ret == I40E_SUCCESS)
6427 		i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6428 				     ->eth_dev);
6429 
6430 	return ret;
6431 }
6432 
6433 /* Initialize VSI for RX */
6434 static int
6435 i40e_dev_rx_init(struct i40e_pf *pf)
6436 {
6437 	struct rte_eth_dev_data *data = pf->dev_data;
6438 	int ret = I40E_SUCCESS;
6439 	uint16_t i;
6440 	struct i40e_rx_queue *rxq;
6441 
6442 	i40e_pf_config_mq_rx(pf);
6443 	for (i = 0; i < data->nb_rx_queues; i++) {
6444 		rxq = data->rx_queues[i];
6445 		if (!rxq || !rxq->q_set)
6446 			continue;
6447 
6448 		ret = i40e_rx_queue_init(rxq);
6449 		if (ret != I40E_SUCCESS) {
6450 			PMD_DRV_LOG(ERR,
6451 				"Failed to do RX queue initialization");
6452 			break;
6453 		}
6454 	}
6455 	if (ret == I40E_SUCCESS)
6456 		i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6457 				     ->eth_dev);
6458 
6459 	return ret;
6460 }
6461 
6462 static int
6463 i40e_dev_rxtx_init(struct i40e_pf *pf)
6464 {
6465 	int err;
6466 
6467 	err = i40e_dev_tx_init(pf);
6468 	if (err) {
6469 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6470 		return err;
6471 	}
6472 	err = i40e_dev_rx_init(pf);
6473 	if (err) {
6474 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6475 		return err;
6476 	}
6477 
6478 	return err;
6479 }
6480 
6481 static int
6482 i40e_vmdq_setup(struct rte_eth_dev *dev)
6483 {
6484 	struct rte_eth_conf *conf = &dev->data->dev_conf;
6485 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6486 	int i, err, conf_vsis, j, loop;
6487 	struct i40e_vsi *vsi;
6488 	struct i40e_vmdq_info *vmdq_info;
6489 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
6490 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6491 
6492 	/*
6493 	 * Disable interrupt to avoid message from VF. Furthermore, it will
6494 	 * avoid race condition in VSI creation/destroy.
6495 	 */
6496 	i40e_pf_disable_irq0(hw);
6497 
6498 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6499 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6500 		return -ENOTSUP;
6501 	}
6502 
6503 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6504 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
6505 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6506 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6507 			pf->max_nb_vmdq_vsi);
6508 		return -ENOTSUP;
6509 	}
6510 
6511 	if (pf->vmdq != NULL) {
6512 		PMD_INIT_LOG(INFO, "VMDQ already configured");
6513 		return 0;
6514 	}
6515 
6516 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
6517 				sizeof(*vmdq_info) * conf_vsis, 0);
6518 
6519 	if (pf->vmdq == NULL) {
6520 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
6521 		return -ENOMEM;
6522 	}
6523 
6524 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6525 
6526 	/* Create VMDQ VSI */
6527 	for (i = 0; i < conf_vsis; i++) {
6528 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6529 				vmdq_conf->enable_loop_back);
6530 		if (vsi == NULL) {
6531 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6532 			err = -1;
6533 			goto err_vsi_setup;
6534 		}
6535 		vmdq_info = &pf->vmdq[i];
6536 		vmdq_info->pf = pf;
6537 		vmdq_info->vsi = vsi;
6538 	}
6539 	pf->nb_cfg_vmdq_vsi = conf_vsis;
6540 
6541 	/* Configure Vlan */
6542 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6543 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6544 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6545 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6546 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6547 					vmdq_conf->pool_map[i].vlan_id, j);
6548 
6549 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6550 						vmdq_conf->pool_map[i].vlan_id);
6551 				if (err) {
6552 					PMD_INIT_LOG(ERR, "Failed to add vlan");
6553 					err = -1;
6554 					goto err_vsi_setup;
6555 				}
6556 			}
6557 		}
6558 	}
6559 
6560 	i40e_pf_enable_irq0(hw);
6561 
6562 	return 0;
6563 
6564 err_vsi_setup:
6565 	for (i = 0; i < conf_vsis; i++)
6566 		if (pf->vmdq[i].vsi == NULL)
6567 			break;
6568 		else
6569 			i40e_vsi_release(pf->vmdq[i].vsi);
6570 
6571 	rte_free(pf->vmdq);
6572 	pf->vmdq = NULL;
6573 	i40e_pf_enable_irq0(hw);
6574 	return err;
6575 }
6576 
6577 static void
6578 i40e_stat_update_32(struct i40e_hw *hw,
6579 		   uint32_t reg,
6580 		   bool offset_loaded,
6581 		   uint64_t *offset,
6582 		   uint64_t *stat)
6583 {
6584 	uint64_t new_data;
6585 
6586 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
6587 	if (!offset_loaded)
6588 		*offset = new_data;
6589 
6590 	if (new_data >= *offset)
6591 		*stat = (uint64_t)(new_data - *offset);
6592 	else
6593 		*stat = (uint64_t)((new_data +
6594 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6595 }
6596 
6597 static void
6598 i40e_stat_update_48(struct i40e_hw *hw,
6599 		   uint32_t hireg,
6600 		   uint32_t loreg,
6601 		   bool offset_loaded,
6602 		   uint64_t *offset,
6603 		   uint64_t *stat)
6604 {
6605 	uint64_t new_data;
6606 
6607 	new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6608 	new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6609 			I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6610 
6611 	if (!offset_loaded)
6612 		*offset = new_data;
6613 
6614 	if (new_data >= *offset)
6615 		*stat = new_data - *offset;
6616 	else
6617 		*stat = (uint64_t)((new_data +
6618 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6619 
6620 	*stat &= I40E_48_BIT_MASK;
6621 }
6622 
6623 /* Disable IRQ0 */
6624 void
6625 i40e_pf_disable_irq0(struct i40e_hw *hw)
6626 {
6627 	/* Disable all interrupt types */
6628 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6629 		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6630 	I40E_WRITE_FLUSH(hw);
6631 }
6632 
6633 /* Enable IRQ0 */
6634 void
6635 i40e_pf_enable_irq0(struct i40e_hw *hw)
6636 {
6637 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6638 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
6639 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6640 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6641 	I40E_WRITE_FLUSH(hw);
6642 }
6643 
6644 static void
6645 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6646 {
6647 	/* read pending request and disable first */
6648 	i40e_pf_disable_irq0(hw);
6649 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6650 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6651 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6652 
6653 	if (no_queue)
6654 		/* Link no queues with irq0 */
6655 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6656 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6657 }
6658 
6659 static void
6660 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6661 {
6662 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6663 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6664 	int i;
6665 	uint16_t abs_vf_id;
6666 	uint32_t index, offset, val;
6667 
6668 	if (!pf->vfs)
6669 		return;
6670 	/**
6671 	 * Try to find which VF trigger a reset, use absolute VF id to access
6672 	 * since the reg is global register.
6673 	 */
6674 	for (i = 0; i < pf->vf_num; i++) {
6675 		abs_vf_id = hw->func_caps.vf_base_id + i;
6676 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6677 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6678 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6679 		/* VFR event occurred */
6680 		if (val & (0x1 << offset)) {
6681 			int ret;
6682 
6683 			/* Clear the event first */
6684 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6685 							(0x1 << offset));
6686 			PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6687 			/**
6688 			 * Only notify a VF reset event occurred,
6689 			 * don't trigger another SW reset
6690 			 */
6691 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6692 			if (ret != I40E_SUCCESS)
6693 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
6694 		}
6695 	}
6696 }
6697 
6698 static void
6699 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6700 {
6701 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6702 	int i;
6703 
6704 	for (i = 0; i < pf->vf_num; i++)
6705 		i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6706 }
6707 
6708 static void
6709 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6710 {
6711 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6712 	struct i40e_arq_event_info info;
6713 	uint16_t pending, opcode;
6714 	int ret;
6715 
6716 	info.buf_len = I40E_AQ_BUF_SZ;
6717 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6718 	if (!info.msg_buf) {
6719 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
6720 		return;
6721 	}
6722 
6723 	pending = 1;
6724 	while (pending) {
6725 		ret = i40e_clean_arq_element(hw, &info, &pending);
6726 
6727 		if (ret != I40E_SUCCESS) {
6728 			PMD_DRV_LOG(INFO,
6729 				"Failed to read msg from AdminQ, aq_err: %u",
6730 				hw->aq.asq_last_status);
6731 			break;
6732 		}
6733 		opcode = rte_le_to_cpu_16(info.desc.opcode);
6734 
6735 		switch (opcode) {
6736 		case i40e_aqc_opc_send_msg_to_pf:
6737 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6738 			i40e_pf_host_handle_vf_msg(dev,
6739 					rte_le_to_cpu_16(info.desc.retval),
6740 					rte_le_to_cpu_32(info.desc.cookie_high),
6741 					rte_le_to_cpu_32(info.desc.cookie_low),
6742 					info.msg_buf,
6743 					info.msg_len);
6744 			break;
6745 		case i40e_aqc_opc_get_link_status:
6746 			ret = i40e_dev_link_update(dev, 0);
6747 			if (!ret)
6748 				_rte_eth_dev_callback_process(dev,
6749 					RTE_ETH_EVENT_INTR_LSC, NULL);
6750 			break;
6751 		default:
6752 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6753 				    opcode);
6754 			break;
6755 		}
6756 	}
6757 	rte_free(info.msg_buf);
6758 }
6759 
6760 /**
6761  * Interrupt handler triggered by NIC  for handling
6762  * specific interrupt.
6763  *
6764  * @param handle
6765  *  Pointer to interrupt handle.
6766  * @param param
6767  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6768  *
6769  * @return
6770  *  void
6771  */
6772 static void
6773 i40e_dev_interrupt_handler(void *param)
6774 {
6775 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6776 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6777 	uint32_t icr0;
6778 
6779 	/* Disable interrupt */
6780 	i40e_pf_disable_irq0(hw);
6781 
6782 	/* read out interrupt causes */
6783 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6784 
6785 	/* No interrupt event indicated */
6786 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6787 		PMD_DRV_LOG(INFO, "No interrupt event");
6788 		goto done;
6789 	}
6790 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6791 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6792 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6793 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6794 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6795 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6796 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6797 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6798 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6799 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6800 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6801 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6802 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6803 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6804 
6805 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6806 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6807 		i40e_dev_handle_vfr_event(dev);
6808 	}
6809 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6810 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6811 		i40e_dev_handle_aq_msg(dev);
6812 	}
6813 
6814 done:
6815 	/* Enable interrupt */
6816 	i40e_pf_enable_irq0(hw);
6817 }
6818 
6819 static void
6820 i40e_dev_alarm_handler(void *param)
6821 {
6822 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6823 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6824 	uint32_t icr0;
6825 
6826 	/* Disable interrupt */
6827 	i40e_pf_disable_irq0(hw);
6828 
6829 	/* read out interrupt causes */
6830 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6831 
6832 	/* No interrupt event indicated */
6833 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6834 		goto done;
6835 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6836 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6837 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6838 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6839 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6840 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6841 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6842 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6843 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6844 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6845 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6846 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6847 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6848 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6849 
6850 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6851 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6852 		i40e_dev_handle_vfr_event(dev);
6853 	}
6854 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6855 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6856 		i40e_dev_handle_aq_msg(dev);
6857 	}
6858 
6859 done:
6860 	/* Enable interrupt */
6861 	i40e_pf_enable_irq0(hw);
6862 	rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6863 			  i40e_dev_alarm_handler, dev);
6864 }
6865 
6866 int
6867 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6868 			 struct i40e_macvlan_filter *filter,
6869 			 int total)
6870 {
6871 	int ele_num, ele_buff_size;
6872 	int num, actual_num, i;
6873 	uint16_t flags;
6874 	int ret = I40E_SUCCESS;
6875 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6876 	struct i40e_aqc_add_macvlan_element_data *req_list;
6877 
6878 	if (filter == NULL  || total == 0)
6879 		return I40E_ERR_PARAM;
6880 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6881 	ele_buff_size = hw->aq.asq_buf_size;
6882 
6883 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6884 	if (req_list == NULL) {
6885 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6886 		return I40E_ERR_NO_MEMORY;
6887 	}
6888 
6889 	num = 0;
6890 	do {
6891 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6892 		memset(req_list, 0, ele_buff_size);
6893 
6894 		for (i = 0; i < actual_num; i++) {
6895 			rte_memcpy(req_list[i].mac_addr,
6896 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6897 			req_list[i].vlan_tag =
6898 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6899 
6900 			switch (filter[num + i].filter_type) {
6901 			case RTE_MAC_PERFECT_MATCH:
6902 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6903 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6904 				break;
6905 			case RTE_MACVLAN_PERFECT_MATCH:
6906 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6907 				break;
6908 			case RTE_MAC_HASH_MATCH:
6909 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6910 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6911 				break;
6912 			case RTE_MACVLAN_HASH_MATCH:
6913 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6914 				break;
6915 			default:
6916 				PMD_DRV_LOG(ERR, "Invalid MAC match type");
6917 				ret = I40E_ERR_PARAM;
6918 				goto DONE;
6919 			}
6920 
6921 			req_list[i].queue_number = 0;
6922 
6923 			req_list[i].flags = rte_cpu_to_le_16(flags);
6924 		}
6925 
6926 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6927 						actual_num, NULL);
6928 		if (ret != I40E_SUCCESS) {
6929 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6930 			goto DONE;
6931 		}
6932 		num += actual_num;
6933 	} while (num < total);
6934 
6935 DONE:
6936 	rte_free(req_list);
6937 	return ret;
6938 }
6939 
6940 int
6941 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6942 			    struct i40e_macvlan_filter *filter,
6943 			    int total)
6944 {
6945 	int ele_num, ele_buff_size;
6946 	int num, actual_num, i;
6947 	uint16_t flags;
6948 	int ret = I40E_SUCCESS;
6949 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6950 	struct i40e_aqc_remove_macvlan_element_data *req_list;
6951 
6952 	if (filter == NULL  || total == 0)
6953 		return I40E_ERR_PARAM;
6954 
6955 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6956 	ele_buff_size = hw->aq.asq_buf_size;
6957 
6958 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6959 	if (req_list == NULL) {
6960 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6961 		return I40E_ERR_NO_MEMORY;
6962 	}
6963 
6964 	num = 0;
6965 	do {
6966 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6967 		memset(req_list, 0, ele_buff_size);
6968 
6969 		for (i = 0; i < actual_num; i++) {
6970 			rte_memcpy(req_list[i].mac_addr,
6971 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6972 			req_list[i].vlan_tag =
6973 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6974 
6975 			switch (filter[num + i].filter_type) {
6976 			case RTE_MAC_PERFECT_MATCH:
6977 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6978 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6979 				break;
6980 			case RTE_MACVLAN_PERFECT_MATCH:
6981 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6982 				break;
6983 			case RTE_MAC_HASH_MATCH:
6984 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6985 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6986 				break;
6987 			case RTE_MACVLAN_HASH_MATCH:
6988 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6989 				break;
6990 			default:
6991 				PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6992 				ret = I40E_ERR_PARAM;
6993 				goto DONE;
6994 			}
6995 			req_list[i].flags = rte_cpu_to_le_16(flags);
6996 		}
6997 
6998 		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6999 						actual_num, NULL);
7000 		if (ret != I40E_SUCCESS) {
7001 			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
7002 			goto DONE;
7003 		}
7004 		num += actual_num;
7005 	} while (num < total);
7006 
7007 DONE:
7008 	rte_free(req_list);
7009 	return ret;
7010 }
7011 
7012 /* Find out specific MAC filter */
7013 static struct i40e_mac_filter *
7014 i40e_find_mac_filter(struct i40e_vsi *vsi,
7015 			 struct rte_ether_addr *macaddr)
7016 {
7017 	struct i40e_mac_filter *f;
7018 
7019 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7020 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
7021 			return f;
7022 	}
7023 
7024 	return NULL;
7025 }
7026 
7027 static bool
7028 i40e_find_vlan_filter(struct i40e_vsi *vsi,
7029 			 uint16_t vlan_id)
7030 {
7031 	uint32_t vid_idx, vid_bit;
7032 
7033 	if (vlan_id > ETH_VLAN_ID_MAX)
7034 		return 0;
7035 
7036 	vid_idx = I40E_VFTA_IDX(vlan_id);
7037 	vid_bit = I40E_VFTA_BIT(vlan_id);
7038 
7039 	if (vsi->vfta[vid_idx] & vid_bit)
7040 		return 1;
7041 	else
7042 		return 0;
7043 }
7044 
7045 static void
7046 i40e_store_vlan_filter(struct i40e_vsi *vsi,
7047 		       uint16_t vlan_id, bool on)
7048 {
7049 	uint32_t vid_idx, vid_bit;
7050 
7051 	vid_idx = I40E_VFTA_IDX(vlan_id);
7052 	vid_bit = I40E_VFTA_BIT(vlan_id);
7053 
7054 	if (on)
7055 		vsi->vfta[vid_idx] |= vid_bit;
7056 	else
7057 		vsi->vfta[vid_idx] &= ~vid_bit;
7058 }
7059 
7060 void
7061 i40e_set_vlan_filter(struct i40e_vsi *vsi,
7062 		     uint16_t vlan_id, bool on)
7063 {
7064 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7065 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
7066 	int ret;
7067 
7068 	if (vlan_id > ETH_VLAN_ID_MAX)
7069 		return;
7070 
7071 	i40e_store_vlan_filter(vsi, vlan_id, on);
7072 
7073 	if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
7074 		return;
7075 
7076 	vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
7077 
7078 	if (on) {
7079 		ret = i40e_aq_add_vlan(hw, vsi->seid,
7080 				       &vlan_data, 1, NULL);
7081 		if (ret != I40E_SUCCESS)
7082 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
7083 	} else {
7084 		ret = i40e_aq_remove_vlan(hw, vsi->seid,
7085 					  &vlan_data, 1, NULL);
7086 		if (ret != I40E_SUCCESS)
7087 			PMD_DRV_LOG(ERR,
7088 				    "Failed to remove vlan filter");
7089 	}
7090 }
7091 
7092 /**
7093  * Find all vlan options for specific mac addr,
7094  * return with actual vlan found.
7095  */
7096 int
7097 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
7098 			   struct i40e_macvlan_filter *mv_f,
7099 			   int num, struct rte_ether_addr *addr)
7100 {
7101 	int i;
7102 	uint32_t j, k;
7103 
7104 	/**
7105 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
7106 	 * although the code looks complex.
7107 	  */
7108 	if (num < vsi->vlan_num)
7109 		return I40E_ERR_PARAM;
7110 
7111 	i = 0;
7112 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
7113 		if (vsi->vfta[j]) {
7114 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
7115 				if (vsi->vfta[j] & (1 << k)) {
7116 					if (i > num - 1) {
7117 						PMD_DRV_LOG(ERR,
7118 							"vlan number doesn't match");
7119 						return I40E_ERR_PARAM;
7120 					}
7121 					rte_memcpy(&mv_f[i].macaddr,
7122 							addr, ETH_ADDR_LEN);
7123 					mv_f[i].vlan_id =
7124 						j * I40E_UINT32_BIT_SIZE + k;
7125 					i++;
7126 				}
7127 			}
7128 		}
7129 	}
7130 	return I40E_SUCCESS;
7131 }
7132 
7133 static inline int
7134 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7135 			   struct i40e_macvlan_filter *mv_f,
7136 			   int num,
7137 			   uint16_t vlan)
7138 {
7139 	int i = 0;
7140 	struct i40e_mac_filter *f;
7141 
7142 	if (num < vsi->mac_num)
7143 		return I40E_ERR_PARAM;
7144 
7145 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7146 		if (i > num - 1) {
7147 			PMD_DRV_LOG(ERR, "buffer number not match");
7148 			return I40E_ERR_PARAM;
7149 		}
7150 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7151 				ETH_ADDR_LEN);
7152 		mv_f[i].vlan_id = vlan;
7153 		mv_f[i].filter_type = f->mac_info.filter_type;
7154 		i++;
7155 	}
7156 
7157 	return I40E_SUCCESS;
7158 }
7159 
7160 static int
7161 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7162 {
7163 	int i, j, num;
7164 	struct i40e_mac_filter *f;
7165 	struct i40e_macvlan_filter *mv_f;
7166 	int ret = I40E_SUCCESS;
7167 
7168 	if (vsi == NULL || vsi->mac_num == 0)
7169 		return I40E_ERR_PARAM;
7170 
7171 	/* Case that no vlan is set */
7172 	if (vsi->vlan_num == 0)
7173 		num = vsi->mac_num;
7174 	else
7175 		num = vsi->mac_num * vsi->vlan_num;
7176 
7177 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7178 	if (mv_f == NULL) {
7179 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7180 		return I40E_ERR_NO_MEMORY;
7181 	}
7182 
7183 	i = 0;
7184 	if (vsi->vlan_num == 0) {
7185 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7186 			rte_memcpy(&mv_f[i].macaddr,
7187 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
7188 			mv_f[i].filter_type = f->mac_info.filter_type;
7189 			mv_f[i].vlan_id = 0;
7190 			i++;
7191 		}
7192 	} else {
7193 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7194 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7195 					vsi->vlan_num, &f->mac_info.mac_addr);
7196 			if (ret != I40E_SUCCESS)
7197 				goto DONE;
7198 			for (j = i; j < i + vsi->vlan_num; j++)
7199 				mv_f[j].filter_type = f->mac_info.filter_type;
7200 			i += vsi->vlan_num;
7201 		}
7202 	}
7203 
7204 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7205 DONE:
7206 	rte_free(mv_f);
7207 
7208 	return ret;
7209 }
7210 
7211 int
7212 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7213 {
7214 	struct i40e_macvlan_filter *mv_f;
7215 	int mac_num;
7216 	int ret = I40E_SUCCESS;
7217 
7218 	if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
7219 		return I40E_ERR_PARAM;
7220 
7221 	/* If it's already set, just return */
7222 	if (i40e_find_vlan_filter(vsi,vlan))
7223 		return I40E_SUCCESS;
7224 
7225 	mac_num = vsi->mac_num;
7226 
7227 	if (mac_num == 0) {
7228 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7229 		return I40E_ERR_PARAM;
7230 	}
7231 
7232 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7233 
7234 	if (mv_f == NULL) {
7235 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7236 		return I40E_ERR_NO_MEMORY;
7237 	}
7238 
7239 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7240 
7241 	if (ret != I40E_SUCCESS)
7242 		goto DONE;
7243 
7244 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7245 
7246 	if (ret != I40E_SUCCESS)
7247 		goto DONE;
7248 
7249 	i40e_set_vlan_filter(vsi, vlan, 1);
7250 
7251 	vsi->vlan_num++;
7252 	ret = I40E_SUCCESS;
7253 DONE:
7254 	rte_free(mv_f);
7255 	return ret;
7256 }
7257 
7258 int
7259 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7260 {
7261 	struct i40e_macvlan_filter *mv_f;
7262 	int mac_num;
7263 	int ret = I40E_SUCCESS;
7264 
7265 	/**
7266 	 * Vlan 0 is the generic filter for untagged packets
7267 	 * and can't be removed.
7268 	 */
7269 	if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
7270 		return I40E_ERR_PARAM;
7271 
7272 	/* If can't find it, just return */
7273 	if (!i40e_find_vlan_filter(vsi, vlan))
7274 		return I40E_ERR_PARAM;
7275 
7276 	mac_num = vsi->mac_num;
7277 
7278 	if (mac_num == 0) {
7279 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7280 		return I40E_ERR_PARAM;
7281 	}
7282 
7283 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7284 
7285 	if (mv_f == NULL) {
7286 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7287 		return I40E_ERR_NO_MEMORY;
7288 	}
7289 
7290 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7291 
7292 	if (ret != I40E_SUCCESS)
7293 		goto DONE;
7294 
7295 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7296 
7297 	if (ret != I40E_SUCCESS)
7298 		goto DONE;
7299 
7300 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
7301 	if (vsi->vlan_num == 1) {
7302 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7303 		if (ret != I40E_SUCCESS)
7304 			goto DONE;
7305 
7306 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7307 		if (ret != I40E_SUCCESS)
7308 			goto DONE;
7309 	}
7310 
7311 	i40e_set_vlan_filter(vsi, vlan, 0);
7312 
7313 	vsi->vlan_num--;
7314 	ret = I40E_SUCCESS;
7315 DONE:
7316 	rte_free(mv_f);
7317 	return ret;
7318 }
7319 
7320 int
7321 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7322 {
7323 	struct i40e_mac_filter *f;
7324 	struct i40e_macvlan_filter *mv_f;
7325 	int i, vlan_num = 0;
7326 	int ret = I40E_SUCCESS;
7327 
7328 	/* If it's add and we've config it, return */
7329 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7330 	if (f != NULL)
7331 		return I40E_SUCCESS;
7332 	if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7333 		(mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7334 
7335 		/**
7336 		 * If vlan_num is 0, that's the first time to add mac,
7337 		 * set mask for vlan_id 0.
7338 		 */
7339 		if (vsi->vlan_num == 0) {
7340 			i40e_set_vlan_filter(vsi, 0, 1);
7341 			vsi->vlan_num = 1;
7342 		}
7343 		vlan_num = vsi->vlan_num;
7344 	} else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7345 			(mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7346 		vlan_num = 1;
7347 
7348 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7349 	if (mv_f == NULL) {
7350 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7351 		return I40E_ERR_NO_MEMORY;
7352 	}
7353 
7354 	for (i = 0; i < vlan_num; i++) {
7355 		mv_f[i].filter_type = mac_filter->filter_type;
7356 		rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7357 				ETH_ADDR_LEN);
7358 	}
7359 
7360 	if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7361 		mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7362 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7363 					&mac_filter->mac_addr);
7364 		if (ret != I40E_SUCCESS)
7365 			goto DONE;
7366 	}
7367 
7368 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7369 	if (ret != I40E_SUCCESS)
7370 		goto DONE;
7371 
7372 	/* Add the mac addr into mac list */
7373 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7374 	if (f == NULL) {
7375 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7376 		ret = I40E_ERR_NO_MEMORY;
7377 		goto DONE;
7378 	}
7379 	rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7380 			ETH_ADDR_LEN);
7381 	f->mac_info.filter_type = mac_filter->filter_type;
7382 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7383 	vsi->mac_num++;
7384 
7385 	ret = I40E_SUCCESS;
7386 DONE:
7387 	rte_free(mv_f);
7388 
7389 	return ret;
7390 }
7391 
7392 int
7393 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr)
7394 {
7395 	struct i40e_mac_filter *f;
7396 	struct i40e_macvlan_filter *mv_f;
7397 	int i, vlan_num;
7398 	enum rte_mac_filter_type filter_type;
7399 	int ret = I40E_SUCCESS;
7400 
7401 	/* Can't find it, return an error */
7402 	f = i40e_find_mac_filter(vsi, addr);
7403 	if (f == NULL)
7404 		return I40E_ERR_PARAM;
7405 
7406 	vlan_num = vsi->vlan_num;
7407 	filter_type = f->mac_info.filter_type;
7408 	if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7409 		filter_type == RTE_MACVLAN_HASH_MATCH) {
7410 		if (vlan_num == 0) {
7411 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7412 			return I40E_ERR_PARAM;
7413 		}
7414 	} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7415 			filter_type == RTE_MAC_HASH_MATCH)
7416 		vlan_num = 1;
7417 
7418 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7419 	if (mv_f == NULL) {
7420 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7421 		return I40E_ERR_NO_MEMORY;
7422 	}
7423 
7424 	for (i = 0; i < vlan_num; i++) {
7425 		mv_f[i].filter_type = filter_type;
7426 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7427 				ETH_ADDR_LEN);
7428 	}
7429 	if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7430 			filter_type == RTE_MACVLAN_HASH_MATCH) {
7431 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7432 		if (ret != I40E_SUCCESS)
7433 			goto DONE;
7434 	}
7435 
7436 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7437 	if (ret != I40E_SUCCESS)
7438 		goto DONE;
7439 
7440 	/* Remove the mac addr into mac list */
7441 	TAILQ_REMOVE(&vsi->mac_list, f, next);
7442 	rte_free(f);
7443 	vsi->mac_num--;
7444 
7445 	ret = I40E_SUCCESS;
7446 DONE:
7447 	rte_free(mv_f);
7448 	return ret;
7449 }
7450 
7451 /* Configure hash enable flags for RSS */
7452 uint64_t
7453 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7454 {
7455 	uint64_t hena = 0;
7456 	int i;
7457 
7458 	if (!flags)
7459 		return hena;
7460 
7461 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7462 		if (flags & (1ULL << i))
7463 			hena |= adapter->pctypes_tbl[i];
7464 	}
7465 
7466 	return hena;
7467 }
7468 
7469 /* Parse the hash enable flags */
7470 uint64_t
7471 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7472 {
7473 	uint64_t rss_hf = 0;
7474 
7475 	if (!flags)
7476 		return rss_hf;
7477 	int i;
7478 
7479 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7480 		if (flags & adapter->pctypes_tbl[i])
7481 			rss_hf |= (1ULL << i);
7482 	}
7483 	return rss_hf;
7484 }
7485 
7486 /* Disable RSS */
7487 static void
7488 i40e_pf_disable_rss(struct i40e_pf *pf)
7489 {
7490 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7491 
7492 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7493 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7494 	I40E_WRITE_FLUSH(hw);
7495 }
7496 
7497 int
7498 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7499 {
7500 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7501 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7502 	uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7503 			   I40E_VFQF_HKEY_MAX_INDEX :
7504 			   I40E_PFQF_HKEY_MAX_INDEX;
7505 	int ret = 0;
7506 
7507 	if (!key || key_len == 0) {
7508 		PMD_DRV_LOG(DEBUG, "No key to be configured");
7509 		return 0;
7510 	} else if (key_len != (key_idx + 1) *
7511 		sizeof(uint32_t)) {
7512 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7513 		return -EINVAL;
7514 	}
7515 
7516 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7517 		struct i40e_aqc_get_set_rss_key_data *key_dw =
7518 			(struct i40e_aqc_get_set_rss_key_data *)key;
7519 
7520 		ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7521 		if (ret)
7522 			PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7523 	} else {
7524 		uint32_t *hash_key = (uint32_t *)key;
7525 		uint16_t i;
7526 
7527 		if (vsi->type == I40E_VSI_SRIOV) {
7528 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7529 				I40E_WRITE_REG(
7530 					hw,
7531 					I40E_VFQF_HKEY1(i, vsi->user_param),
7532 					hash_key[i]);
7533 
7534 		} else {
7535 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7536 				I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7537 					       hash_key[i]);
7538 		}
7539 		I40E_WRITE_FLUSH(hw);
7540 	}
7541 
7542 	return ret;
7543 }
7544 
7545 static int
7546 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7547 {
7548 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7549 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7550 	uint32_t reg;
7551 	int ret;
7552 
7553 	if (!key || !key_len)
7554 		return 0;
7555 
7556 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7557 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7558 			(struct i40e_aqc_get_set_rss_key_data *)key);
7559 		if (ret) {
7560 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7561 			return ret;
7562 		}
7563 	} else {
7564 		uint32_t *key_dw = (uint32_t *)key;
7565 		uint16_t i;
7566 
7567 		if (vsi->type == I40E_VSI_SRIOV) {
7568 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7569 				reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7570 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7571 			}
7572 			*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7573 				   sizeof(uint32_t);
7574 		} else {
7575 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7576 				reg = I40E_PFQF_HKEY(i);
7577 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7578 			}
7579 			*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7580 				   sizeof(uint32_t);
7581 		}
7582 	}
7583 	return 0;
7584 }
7585 
7586 static int
7587 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7588 {
7589 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7590 	uint64_t hena;
7591 	int ret;
7592 
7593 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7594 			       rss_conf->rss_key_len);
7595 	if (ret)
7596 		return ret;
7597 
7598 	hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7599 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7600 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7601 	I40E_WRITE_FLUSH(hw);
7602 
7603 	return 0;
7604 }
7605 
7606 static int
7607 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7608 			 struct rte_eth_rss_conf *rss_conf)
7609 {
7610 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7611 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7612 	uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7613 	uint64_t hena;
7614 
7615 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7616 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7617 
7618 	if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7619 		if (rss_hf != 0) /* Enable RSS */
7620 			return -EINVAL;
7621 		return 0; /* Nothing to do */
7622 	}
7623 	/* RSS enabled */
7624 	if (rss_hf == 0) /* Disable RSS */
7625 		return -EINVAL;
7626 
7627 	return i40e_hw_rss_hash_set(pf, rss_conf);
7628 }
7629 
7630 static int
7631 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7632 			   struct rte_eth_rss_conf *rss_conf)
7633 {
7634 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7635 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7636 	uint64_t hena;
7637 	int ret;
7638 
7639 	if (!rss_conf)
7640 		return -EINVAL;
7641 
7642 	ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7643 			 &rss_conf->rss_key_len);
7644 	if (ret)
7645 		return ret;
7646 
7647 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7648 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7649 	rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7650 
7651 	return 0;
7652 }
7653 
7654 static int
7655 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7656 {
7657 	switch (filter_type) {
7658 	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7659 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7660 		break;
7661 	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7662 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7663 		break;
7664 	case RTE_TUNNEL_FILTER_IMAC_TENID:
7665 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7666 		break;
7667 	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7668 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7669 		break;
7670 	case ETH_TUNNEL_FILTER_IMAC:
7671 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7672 		break;
7673 	case ETH_TUNNEL_FILTER_OIP:
7674 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7675 		break;
7676 	case ETH_TUNNEL_FILTER_IIP:
7677 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7678 		break;
7679 	default:
7680 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7681 		return -EINVAL;
7682 	}
7683 
7684 	return 0;
7685 }
7686 
7687 /* Convert tunnel filter structure */
7688 static int
7689 i40e_tunnel_filter_convert(
7690 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7691 	struct i40e_tunnel_filter *tunnel_filter)
7692 {
7693 	rte_ether_addr_copy((struct rte_ether_addr *)
7694 			&cld_filter->element.outer_mac,
7695 		(struct rte_ether_addr *)&tunnel_filter->input.outer_mac);
7696 	rte_ether_addr_copy((struct rte_ether_addr *)
7697 			&cld_filter->element.inner_mac,
7698 		(struct rte_ether_addr *)&tunnel_filter->input.inner_mac);
7699 	tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7700 	if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7701 	     I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7702 	    I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7703 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7704 	else
7705 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7706 	tunnel_filter->input.flags = cld_filter->element.flags;
7707 	tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7708 	tunnel_filter->queue = cld_filter->element.queue_number;
7709 	rte_memcpy(tunnel_filter->input.general_fields,
7710 		   cld_filter->general_fields,
7711 		   sizeof(cld_filter->general_fields));
7712 
7713 	return 0;
7714 }
7715 
7716 /* Check if there exists the tunnel filter */
7717 struct i40e_tunnel_filter *
7718 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7719 			     const struct i40e_tunnel_filter_input *input)
7720 {
7721 	int ret;
7722 
7723 	ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7724 	if (ret < 0)
7725 		return NULL;
7726 
7727 	return tunnel_rule->hash_map[ret];
7728 }
7729 
7730 /* Add a tunnel filter into the SW list */
7731 static int
7732 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7733 			     struct i40e_tunnel_filter *tunnel_filter)
7734 {
7735 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7736 	int ret;
7737 
7738 	ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7739 	if (ret < 0) {
7740 		PMD_DRV_LOG(ERR,
7741 			    "Failed to insert tunnel filter to hash table %d!",
7742 			    ret);
7743 		return ret;
7744 	}
7745 	rule->hash_map[ret] = tunnel_filter;
7746 
7747 	TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7748 
7749 	return 0;
7750 }
7751 
7752 /* Delete a tunnel filter from the SW list */
7753 int
7754 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7755 			  struct i40e_tunnel_filter_input *input)
7756 {
7757 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7758 	struct i40e_tunnel_filter *tunnel_filter;
7759 	int ret;
7760 
7761 	ret = rte_hash_del_key(rule->hash_table, input);
7762 	if (ret < 0) {
7763 		PMD_DRV_LOG(ERR,
7764 			    "Failed to delete tunnel filter to hash table %d!",
7765 			    ret);
7766 		return ret;
7767 	}
7768 	tunnel_filter = rule->hash_map[ret];
7769 	rule->hash_map[ret] = NULL;
7770 
7771 	TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7772 	rte_free(tunnel_filter);
7773 
7774 	return 0;
7775 }
7776 
7777 int
7778 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7779 			struct rte_eth_tunnel_filter_conf *tunnel_filter,
7780 			uint8_t add)
7781 {
7782 	uint16_t ip_type;
7783 	uint32_t ipv4_addr, ipv4_addr_le;
7784 	uint8_t i, tun_type = 0;
7785 	/* internal varialbe to convert ipv6 byte order */
7786 	uint32_t convert_ipv6[4];
7787 	int val, ret = 0;
7788 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7789 	struct i40e_vsi *vsi = pf->main_vsi;
7790 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
7791 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
7792 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7793 	struct i40e_tunnel_filter *tunnel, *node;
7794 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7795 
7796 	cld_filter = rte_zmalloc("tunnel_filter",
7797 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7798 	0);
7799 
7800 	if (NULL == cld_filter) {
7801 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7802 		return -ENOMEM;
7803 	}
7804 	pfilter = cld_filter;
7805 
7806 	rte_ether_addr_copy(&tunnel_filter->outer_mac,
7807 			(struct rte_ether_addr *)&pfilter->element.outer_mac);
7808 	rte_ether_addr_copy(&tunnel_filter->inner_mac,
7809 			(struct rte_ether_addr *)&pfilter->element.inner_mac);
7810 
7811 	pfilter->element.inner_vlan =
7812 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7813 	if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7814 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7815 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7816 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7817 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
7818 				&ipv4_addr_le,
7819 				sizeof(pfilter->element.ipaddr.v4.data));
7820 	} else {
7821 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7822 		for (i = 0; i < 4; i++) {
7823 			convert_ipv6[i] =
7824 			rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7825 		}
7826 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
7827 			   &convert_ipv6,
7828 			   sizeof(pfilter->element.ipaddr.v6.data));
7829 	}
7830 
7831 	/* check tunneled type */
7832 	switch (tunnel_filter->tunnel_type) {
7833 	case RTE_TUNNEL_TYPE_VXLAN:
7834 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7835 		break;
7836 	case RTE_TUNNEL_TYPE_NVGRE:
7837 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7838 		break;
7839 	case RTE_TUNNEL_TYPE_IP_IN_GRE:
7840 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7841 		break;
7842 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
7843 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
7844 		break;
7845 	default:
7846 		/* Other tunnel types is not supported. */
7847 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7848 		rte_free(cld_filter);
7849 		return -EINVAL;
7850 	}
7851 
7852 	val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7853 				       &pfilter->element.flags);
7854 	if (val < 0) {
7855 		rte_free(cld_filter);
7856 		return -EINVAL;
7857 	}
7858 
7859 	pfilter->element.flags |= rte_cpu_to_le_16(
7860 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7861 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7862 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7863 	pfilter->element.queue_number =
7864 		rte_cpu_to_le_16(tunnel_filter->queue_id);
7865 
7866 	/* Check if there is the filter in SW list */
7867 	memset(&check_filter, 0, sizeof(check_filter));
7868 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
7869 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7870 	if (add && node) {
7871 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7872 		rte_free(cld_filter);
7873 		return -EINVAL;
7874 	}
7875 
7876 	if (!add && !node) {
7877 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7878 		rte_free(cld_filter);
7879 		return -EINVAL;
7880 	}
7881 
7882 	if (add) {
7883 		ret = i40e_aq_add_cloud_filters(hw,
7884 					vsi->seid, &cld_filter->element, 1);
7885 		if (ret < 0) {
7886 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7887 			rte_free(cld_filter);
7888 			return -ENOTSUP;
7889 		}
7890 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7891 		if (tunnel == NULL) {
7892 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7893 			rte_free(cld_filter);
7894 			return -ENOMEM;
7895 		}
7896 
7897 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7898 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7899 		if (ret < 0)
7900 			rte_free(tunnel);
7901 	} else {
7902 		ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
7903 						   &cld_filter->element, 1);
7904 		if (ret < 0) {
7905 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7906 			rte_free(cld_filter);
7907 			return -ENOTSUP;
7908 		}
7909 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7910 	}
7911 
7912 	rte_free(cld_filter);
7913 	return ret;
7914 }
7915 
7916 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7917 #define I40E_TR_VXLAN_GRE_KEY_MASK		0x4
7918 #define I40E_TR_GENEVE_KEY_MASK			0x8
7919 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK		0x40
7920 #define I40E_TR_GRE_KEY_MASK			0x400
7921 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
7922 #define I40E_TR_GRE_NO_KEY_MASK			0x8000
7923 
7924 static enum
7925 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7926 {
7927 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7928 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7929 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7930 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7931 	enum i40e_status_code status = I40E_SUCCESS;
7932 
7933 	if (pf->support_multi_driver) {
7934 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7935 		return I40E_NOT_SUPPORTED;
7936 	}
7937 
7938 	memset(&filter_replace, 0,
7939 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7940 	memset(&filter_replace_buf, 0,
7941 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7942 
7943 	/* create L1 filter */
7944 	filter_replace.old_filter_type =
7945 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7946 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7947 	filter_replace.tr_bit = 0;
7948 
7949 	/* Prepare the buffer, 3 entries */
7950 	filter_replace_buf.data[0] =
7951 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7952 	filter_replace_buf.data[0] |=
7953 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7954 	filter_replace_buf.data[2] = 0xFF;
7955 	filter_replace_buf.data[3] = 0xFF;
7956 	filter_replace_buf.data[4] =
7957 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7958 	filter_replace_buf.data[4] |=
7959 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7960 	filter_replace_buf.data[7] = 0xF0;
7961 	filter_replace_buf.data[8]
7962 		= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7963 	filter_replace_buf.data[8] |=
7964 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7965 	filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7966 		I40E_TR_GENEVE_KEY_MASK |
7967 		I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7968 	filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7969 		I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7970 		I40E_TR_GRE_NO_KEY_MASK) >> 8;
7971 
7972 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7973 					       &filter_replace_buf);
7974 	if (!status && (filter_replace.old_filter_type !=
7975 			filter_replace.new_filter_type))
7976 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7977 			    " original: 0x%x, new: 0x%x",
7978 			    dev->device->name,
7979 			    filter_replace.old_filter_type,
7980 			    filter_replace.new_filter_type);
7981 
7982 	return status;
7983 }
7984 
7985 static enum
7986 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7987 {
7988 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7989 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7990 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7991 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7992 	enum i40e_status_code status = I40E_SUCCESS;
7993 
7994 	if (pf->support_multi_driver) {
7995 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7996 		return I40E_NOT_SUPPORTED;
7997 	}
7998 
7999 	/* For MPLSoUDP */
8000 	memset(&filter_replace, 0,
8001 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8002 	memset(&filter_replace_buf, 0,
8003 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8004 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8005 		I40E_AQC_MIRROR_CLOUD_FILTER;
8006 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
8007 	filter_replace.new_filter_type =
8008 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8009 	/* Prepare the buffer, 2 entries */
8010 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8011 	filter_replace_buf.data[0] |=
8012 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8013 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8014 	filter_replace_buf.data[4] |=
8015 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8016 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8017 					       &filter_replace_buf);
8018 	if (status < 0)
8019 		return status;
8020 	if (filter_replace.old_filter_type !=
8021 	    filter_replace.new_filter_type)
8022 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8023 			    " original: 0x%x, new: 0x%x",
8024 			    dev->device->name,
8025 			    filter_replace.old_filter_type,
8026 			    filter_replace.new_filter_type);
8027 
8028 	/* For MPLSoGRE */
8029 	memset(&filter_replace, 0,
8030 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8031 	memset(&filter_replace_buf, 0,
8032 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8033 
8034 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
8035 		I40E_AQC_MIRROR_CLOUD_FILTER;
8036 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
8037 	filter_replace.new_filter_type =
8038 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8039 	/* Prepare the buffer, 2 entries */
8040 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8041 	filter_replace_buf.data[0] |=
8042 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8043 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
8044 	filter_replace_buf.data[4] |=
8045 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8046 
8047 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8048 					       &filter_replace_buf);
8049 	if (!status && (filter_replace.old_filter_type !=
8050 			filter_replace.new_filter_type))
8051 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8052 			    " original: 0x%x, new: 0x%x",
8053 			    dev->device->name,
8054 			    filter_replace.old_filter_type,
8055 			    filter_replace.new_filter_type);
8056 
8057 	return status;
8058 }
8059 
8060 static enum i40e_status_code
8061 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
8062 {
8063 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8064 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8065 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8066 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8067 	enum i40e_status_code status = I40E_SUCCESS;
8068 
8069 	if (pf->support_multi_driver) {
8070 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
8071 		return I40E_NOT_SUPPORTED;
8072 	}
8073 
8074 	/* For GTP-C */
8075 	memset(&filter_replace, 0,
8076 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8077 	memset(&filter_replace_buf, 0,
8078 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8079 	/* create L1 filter */
8080 	filter_replace.old_filter_type =
8081 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
8082 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
8083 	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
8084 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8085 	/* Prepare the buffer, 2 entries */
8086 	filter_replace_buf.data[0] =
8087 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8088 	filter_replace_buf.data[0] |=
8089 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8090 	filter_replace_buf.data[2] = 0xFF;
8091 	filter_replace_buf.data[3] = 0xFF;
8092 	filter_replace_buf.data[4] =
8093 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8094 	filter_replace_buf.data[4] |=
8095 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8096 	filter_replace_buf.data[6] = 0xFF;
8097 	filter_replace_buf.data[7] = 0xFF;
8098 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8099 					       &filter_replace_buf);
8100 	if (status < 0)
8101 		return status;
8102 	if (filter_replace.old_filter_type !=
8103 	    filter_replace.new_filter_type)
8104 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8105 			    " original: 0x%x, new: 0x%x",
8106 			    dev->device->name,
8107 			    filter_replace.old_filter_type,
8108 			    filter_replace.new_filter_type);
8109 
8110 	/* for GTP-U */
8111 	memset(&filter_replace, 0,
8112 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8113 	memset(&filter_replace_buf, 0,
8114 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8115 	/* create L1 filter */
8116 	filter_replace.old_filter_type =
8117 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
8118 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
8119 	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
8120 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8121 	/* Prepare the buffer, 2 entries */
8122 	filter_replace_buf.data[0] =
8123 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8124 	filter_replace_buf.data[0] |=
8125 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8126 	filter_replace_buf.data[2] = 0xFF;
8127 	filter_replace_buf.data[3] = 0xFF;
8128 	filter_replace_buf.data[4] =
8129 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8130 	filter_replace_buf.data[4] |=
8131 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8132 	filter_replace_buf.data[6] = 0xFF;
8133 	filter_replace_buf.data[7] = 0xFF;
8134 
8135 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8136 					       &filter_replace_buf);
8137 	if (!status && (filter_replace.old_filter_type !=
8138 			filter_replace.new_filter_type))
8139 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8140 			    " original: 0x%x, new: 0x%x",
8141 			    dev->device->name,
8142 			    filter_replace.old_filter_type,
8143 			    filter_replace.new_filter_type);
8144 
8145 	return status;
8146 }
8147 
8148 static enum
8149 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8150 {
8151 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8152 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8153 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8154 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8155 	enum i40e_status_code status = I40E_SUCCESS;
8156 
8157 	if (pf->support_multi_driver) {
8158 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8159 		return I40E_NOT_SUPPORTED;
8160 	}
8161 
8162 	/* for GTP-C */
8163 	memset(&filter_replace, 0,
8164 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8165 	memset(&filter_replace_buf, 0,
8166 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8167 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8168 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8169 	filter_replace.new_filter_type =
8170 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8171 	/* Prepare the buffer, 2 entries */
8172 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8173 	filter_replace_buf.data[0] |=
8174 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8175 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8176 	filter_replace_buf.data[4] |=
8177 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8178 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8179 					       &filter_replace_buf);
8180 	if (status < 0)
8181 		return status;
8182 	if (filter_replace.old_filter_type !=
8183 	    filter_replace.new_filter_type)
8184 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8185 			    " original: 0x%x, new: 0x%x",
8186 			    dev->device->name,
8187 			    filter_replace.old_filter_type,
8188 			    filter_replace.new_filter_type);
8189 
8190 	/* for GTP-U */
8191 	memset(&filter_replace, 0,
8192 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8193 	memset(&filter_replace_buf, 0,
8194 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8195 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8196 	filter_replace.old_filter_type =
8197 		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8198 	filter_replace.new_filter_type =
8199 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8200 	/* Prepare the buffer, 2 entries */
8201 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8202 	filter_replace_buf.data[0] |=
8203 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8204 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8205 	filter_replace_buf.data[4] |=
8206 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8207 
8208 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8209 					       &filter_replace_buf);
8210 	if (!status && (filter_replace.old_filter_type !=
8211 			filter_replace.new_filter_type))
8212 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8213 			    " original: 0x%x, new: 0x%x",
8214 			    dev->device->name,
8215 			    filter_replace.old_filter_type,
8216 			    filter_replace.new_filter_type);
8217 
8218 	return status;
8219 }
8220 
8221 int
8222 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8223 		      struct i40e_tunnel_filter_conf *tunnel_filter,
8224 		      uint8_t add)
8225 {
8226 	uint16_t ip_type;
8227 	uint32_t ipv4_addr, ipv4_addr_le;
8228 	uint8_t i, tun_type = 0;
8229 	/* internal variable to convert ipv6 byte order */
8230 	uint32_t convert_ipv6[4];
8231 	int val, ret = 0;
8232 	struct i40e_pf_vf *vf = NULL;
8233 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8234 	struct i40e_vsi *vsi;
8235 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8236 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
8237 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8238 	struct i40e_tunnel_filter *tunnel, *node;
8239 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8240 	uint32_t teid_le;
8241 	bool big_buffer = 0;
8242 
8243 	cld_filter = rte_zmalloc("tunnel_filter",
8244 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8245 			 0);
8246 
8247 	if (cld_filter == NULL) {
8248 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8249 		return -ENOMEM;
8250 	}
8251 	pfilter = cld_filter;
8252 
8253 	rte_ether_addr_copy(&tunnel_filter->outer_mac,
8254 			(struct rte_ether_addr *)&pfilter->element.outer_mac);
8255 	rte_ether_addr_copy(&tunnel_filter->inner_mac,
8256 			(struct rte_ether_addr *)&pfilter->element.inner_mac);
8257 
8258 	pfilter->element.inner_vlan =
8259 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8260 	if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8261 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8262 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8263 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8264 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
8265 				&ipv4_addr_le,
8266 				sizeof(pfilter->element.ipaddr.v4.data));
8267 	} else {
8268 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8269 		for (i = 0; i < 4; i++) {
8270 			convert_ipv6[i] =
8271 			rte_cpu_to_le_32(rte_be_to_cpu_32(
8272 					 tunnel_filter->ip_addr.ipv6_addr[i]));
8273 		}
8274 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
8275 			   &convert_ipv6,
8276 			   sizeof(pfilter->element.ipaddr.v6.data));
8277 	}
8278 
8279 	/* check tunneled type */
8280 	switch (tunnel_filter->tunnel_type) {
8281 	case I40E_TUNNEL_TYPE_VXLAN:
8282 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8283 		break;
8284 	case I40E_TUNNEL_TYPE_NVGRE:
8285 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8286 		break;
8287 	case I40E_TUNNEL_TYPE_IP_IN_GRE:
8288 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8289 		break;
8290 	case I40E_TUNNEL_TYPE_MPLSoUDP:
8291 		if (!pf->mpls_replace_flag) {
8292 			i40e_replace_mpls_l1_filter(pf);
8293 			i40e_replace_mpls_cloud_filter(pf);
8294 			pf->mpls_replace_flag = 1;
8295 		}
8296 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8297 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8298 			teid_le >> 4;
8299 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8300 			(teid_le & 0xF) << 12;
8301 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8302 			0x40;
8303 		big_buffer = 1;
8304 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8305 		break;
8306 	case I40E_TUNNEL_TYPE_MPLSoGRE:
8307 		if (!pf->mpls_replace_flag) {
8308 			i40e_replace_mpls_l1_filter(pf);
8309 			i40e_replace_mpls_cloud_filter(pf);
8310 			pf->mpls_replace_flag = 1;
8311 		}
8312 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8313 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8314 			teid_le >> 4;
8315 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8316 			(teid_le & 0xF) << 12;
8317 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8318 			0x0;
8319 		big_buffer = 1;
8320 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8321 		break;
8322 	case I40E_TUNNEL_TYPE_GTPC:
8323 		if (!pf->gtp_replace_flag) {
8324 			i40e_replace_gtp_l1_filter(pf);
8325 			i40e_replace_gtp_cloud_filter(pf);
8326 			pf->gtp_replace_flag = 1;
8327 		}
8328 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8329 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8330 			(teid_le >> 16) & 0xFFFF;
8331 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8332 			teid_le & 0xFFFF;
8333 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8334 			0x0;
8335 		big_buffer = 1;
8336 		break;
8337 	case I40E_TUNNEL_TYPE_GTPU:
8338 		if (!pf->gtp_replace_flag) {
8339 			i40e_replace_gtp_l1_filter(pf);
8340 			i40e_replace_gtp_cloud_filter(pf);
8341 			pf->gtp_replace_flag = 1;
8342 		}
8343 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8344 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8345 			(teid_le >> 16) & 0xFFFF;
8346 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8347 			teid_le & 0xFFFF;
8348 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8349 			0x0;
8350 		big_buffer = 1;
8351 		break;
8352 	case I40E_TUNNEL_TYPE_QINQ:
8353 		if (!pf->qinq_replace_flag) {
8354 			ret = i40e_cloud_filter_qinq_create(pf);
8355 			if (ret < 0)
8356 				PMD_DRV_LOG(DEBUG,
8357 					    "QinQ tunnel filter already created.");
8358 			pf->qinq_replace_flag = 1;
8359 		}
8360 		/*	Add in the General fields the values of
8361 		 *	the Outer and Inner VLAN
8362 		 *	Big Buffer should be set, see changes in
8363 		 *	i40e_aq_add_cloud_filters
8364 		 */
8365 		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8366 		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8367 		big_buffer = 1;
8368 		break;
8369 	default:
8370 		/* Other tunnel types is not supported. */
8371 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8372 		rte_free(cld_filter);
8373 		return -EINVAL;
8374 	}
8375 
8376 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8377 		pfilter->element.flags =
8378 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8379 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8380 		pfilter->element.flags =
8381 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8382 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8383 		pfilter->element.flags =
8384 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8385 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8386 		pfilter->element.flags =
8387 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8388 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8389 		pfilter->element.flags |=
8390 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8391 	else {
8392 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8393 						&pfilter->element.flags);
8394 		if (val < 0) {
8395 			rte_free(cld_filter);
8396 			return -EINVAL;
8397 		}
8398 	}
8399 
8400 	pfilter->element.flags |= rte_cpu_to_le_16(
8401 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8402 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8403 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8404 	pfilter->element.queue_number =
8405 		rte_cpu_to_le_16(tunnel_filter->queue_id);
8406 
8407 	if (!tunnel_filter->is_to_vf)
8408 		vsi = pf->main_vsi;
8409 	else {
8410 		if (tunnel_filter->vf_id >= pf->vf_num) {
8411 			PMD_DRV_LOG(ERR, "Invalid argument.");
8412 			rte_free(cld_filter);
8413 			return -EINVAL;
8414 		}
8415 		vf = &pf->vfs[tunnel_filter->vf_id];
8416 		vsi = vf->vsi;
8417 	}
8418 
8419 	/* Check if there is the filter in SW list */
8420 	memset(&check_filter, 0, sizeof(check_filter));
8421 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
8422 	check_filter.is_to_vf = tunnel_filter->is_to_vf;
8423 	check_filter.vf_id = tunnel_filter->vf_id;
8424 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8425 	if (add && node) {
8426 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8427 		rte_free(cld_filter);
8428 		return -EINVAL;
8429 	}
8430 
8431 	if (!add && !node) {
8432 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8433 		rte_free(cld_filter);
8434 		return -EINVAL;
8435 	}
8436 
8437 	if (add) {
8438 		if (big_buffer)
8439 			ret = i40e_aq_add_cloud_filters_bb(hw,
8440 						   vsi->seid, cld_filter, 1);
8441 		else
8442 			ret = i40e_aq_add_cloud_filters(hw,
8443 					vsi->seid, &cld_filter->element, 1);
8444 		if (ret < 0) {
8445 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8446 			rte_free(cld_filter);
8447 			return -ENOTSUP;
8448 		}
8449 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8450 		if (tunnel == NULL) {
8451 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8452 			rte_free(cld_filter);
8453 			return -ENOMEM;
8454 		}
8455 
8456 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8457 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8458 		if (ret < 0)
8459 			rte_free(tunnel);
8460 	} else {
8461 		if (big_buffer)
8462 			ret = i40e_aq_rem_cloud_filters_bb(
8463 				hw, vsi->seid, cld_filter, 1);
8464 		else
8465 			ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8466 						&cld_filter->element, 1);
8467 		if (ret < 0) {
8468 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8469 			rte_free(cld_filter);
8470 			return -ENOTSUP;
8471 		}
8472 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8473 	}
8474 
8475 	rte_free(cld_filter);
8476 	return ret;
8477 }
8478 
8479 static int
8480 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8481 {
8482 	uint8_t i;
8483 
8484 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8485 		if (pf->vxlan_ports[i] == port)
8486 			return i;
8487 	}
8488 
8489 	return -1;
8490 }
8491 
8492 static int
8493 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port, int udp_type)
8494 {
8495 	int  idx, ret;
8496 	uint8_t filter_idx = 0;
8497 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8498 
8499 	idx = i40e_get_vxlan_port_idx(pf, port);
8500 
8501 	/* Check if port already exists */
8502 	if (idx >= 0) {
8503 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8504 		return -EINVAL;
8505 	}
8506 
8507 	/* Now check if there is space to add the new port */
8508 	idx = i40e_get_vxlan_port_idx(pf, 0);
8509 	if (idx < 0) {
8510 		PMD_DRV_LOG(ERR,
8511 			"Maximum number of UDP ports reached, not adding port %d",
8512 			port);
8513 		return -ENOSPC;
8514 	}
8515 
8516 	ret =  i40e_aq_add_udp_tunnel(hw, port, udp_type,
8517 					&filter_idx, NULL);
8518 	if (ret < 0) {
8519 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8520 		return -1;
8521 	}
8522 
8523 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8524 			 port,  filter_idx);
8525 
8526 	/* New port: add it and mark its index in the bitmap */
8527 	pf->vxlan_ports[idx] = port;
8528 	pf->vxlan_bitmap |= (1 << idx);
8529 
8530 	if (!(pf->flags & I40E_FLAG_VXLAN))
8531 		pf->flags |= I40E_FLAG_VXLAN;
8532 
8533 	return 0;
8534 }
8535 
8536 static int
8537 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8538 {
8539 	int idx;
8540 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8541 
8542 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
8543 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8544 		return -EINVAL;
8545 	}
8546 
8547 	idx = i40e_get_vxlan_port_idx(pf, port);
8548 
8549 	if (idx < 0) {
8550 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8551 		return -EINVAL;
8552 	}
8553 
8554 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8555 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8556 		return -1;
8557 	}
8558 
8559 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8560 			port, idx);
8561 
8562 	pf->vxlan_ports[idx] = 0;
8563 	pf->vxlan_bitmap &= ~(1 << idx);
8564 
8565 	if (!pf->vxlan_bitmap)
8566 		pf->flags &= ~I40E_FLAG_VXLAN;
8567 
8568 	return 0;
8569 }
8570 
8571 /* Add UDP tunneling port */
8572 static int
8573 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8574 			     struct rte_eth_udp_tunnel *udp_tunnel)
8575 {
8576 	int ret = 0;
8577 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8578 
8579 	if (udp_tunnel == NULL)
8580 		return -EINVAL;
8581 
8582 	switch (udp_tunnel->prot_type) {
8583 	case RTE_TUNNEL_TYPE_VXLAN:
8584 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8585 					  I40E_AQC_TUNNEL_TYPE_VXLAN);
8586 		break;
8587 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
8588 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port,
8589 					  I40E_AQC_TUNNEL_TYPE_VXLAN_GPE);
8590 		break;
8591 	case RTE_TUNNEL_TYPE_GENEVE:
8592 	case RTE_TUNNEL_TYPE_TEREDO:
8593 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8594 		ret = -1;
8595 		break;
8596 
8597 	default:
8598 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8599 		ret = -1;
8600 		break;
8601 	}
8602 
8603 	return ret;
8604 }
8605 
8606 /* Remove UDP tunneling port */
8607 static int
8608 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8609 			     struct rte_eth_udp_tunnel *udp_tunnel)
8610 {
8611 	int ret = 0;
8612 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8613 
8614 	if (udp_tunnel == NULL)
8615 		return -EINVAL;
8616 
8617 	switch (udp_tunnel->prot_type) {
8618 	case RTE_TUNNEL_TYPE_VXLAN:
8619 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
8620 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8621 		break;
8622 	case RTE_TUNNEL_TYPE_GENEVE:
8623 	case RTE_TUNNEL_TYPE_TEREDO:
8624 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8625 		ret = -1;
8626 		break;
8627 	default:
8628 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8629 		ret = -1;
8630 		break;
8631 	}
8632 
8633 	return ret;
8634 }
8635 
8636 /* Calculate the maximum number of contiguous PF queues that are configured */
8637 static int
8638 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8639 {
8640 	struct rte_eth_dev_data *data = pf->dev_data;
8641 	int i, num;
8642 	struct i40e_rx_queue *rxq;
8643 
8644 	num = 0;
8645 	for (i = 0; i < pf->lan_nb_qps; i++) {
8646 		rxq = data->rx_queues[i];
8647 		if (rxq && rxq->q_set)
8648 			num++;
8649 		else
8650 			break;
8651 	}
8652 
8653 	return num;
8654 }
8655 
8656 /* Configure RSS */
8657 static int
8658 i40e_pf_config_rss(struct i40e_pf *pf)
8659 {
8660 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8661 	struct rte_eth_rss_conf rss_conf;
8662 	uint32_t i, lut = 0;
8663 	uint16_t j, num;
8664 
8665 	/*
8666 	 * If both VMDQ and RSS enabled, not all of PF queues are configured.
8667 	 * It's necessary to calculate the actual PF queues that are configured.
8668 	 */
8669 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8670 		num = i40e_pf_calc_configured_queues_num(pf);
8671 	else
8672 		num = pf->dev_data->nb_rx_queues;
8673 
8674 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8675 	PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8676 			num);
8677 
8678 	if (num == 0) {
8679 		PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8680 		return -ENOTSUP;
8681 	}
8682 
8683 	if (pf->adapter->rss_reta_updated == 0) {
8684 		for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8685 			if (j == num)
8686 				j = 0;
8687 			lut = (lut << 8) | (j & ((0x1 <<
8688 				hw->func_caps.rss_table_entry_width) - 1));
8689 			if ((i & 3) == 3)
8690 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
8691 					       rte_bswap32(lut));
8692 		}
8693 	}
8694 
8695 	rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8696 	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8697 		i40e_pf_disable_rss(pf);
8698 		return 0;
8699 	}
8700 	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8701 		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8702 		/* Random default keys */
8703 		static uint32_t rss_key_default[] = {0x6b793944,
8704 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8705 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8706 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8707 
8708 		rss_conf.rss_key = (uint8_t *)rss_key_default;
8709 		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8710 							sizeof(uint32_t);
8711 	}
8712 
8713 	return i40e_hw_rss_hash_set(pf, &rss_conf);
8714 }
8715 
8716 static int
8717 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8718 			       struct rte_eth_tunnel_filter_conf *filter)
8719 {
8720 	if (pf == NULL || filter == NULL) {
8721 		PMD_DRV_LOG(ERR, "Invalid parameter");
8722 		return -EINVAL;
8723 	}
8724 
8725 	if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8726 		PMD_DRV_LOG(ERR, "Invalid queue ID");
8727 		return -EINVAL;
8728 	}
8729 
8730 	if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
8731 		PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8732 		return -EINVAL;
8733 	}
8734 
8735 	if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8736 		(rte_is_zero_ether_addr(&filter->outer_mac))) {
8737 		PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8738 		return -EINVAL;
8739 	}
8740 
8741 	if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8742 		(rte_is_zero_ether_addr(&filter->inner_mac))) {
8743 		PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8744 		return -EINVAL;
8745 	}
8746 
8747 	return 0;
8748 }
8749 
8750 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8751 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8752 static int
8753 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8754 {
8755 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8756 	uint32_t val, reg;
8757 	int ret = -EINVAL;
8758 
8759 	if (pf->support_multi_driver) {
8760 		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8761 		return -ENOTSUP;
8762 	}
8763 
8764 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8765 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8766 
8767 	if (len == 3) {
8768 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8769 	} else if (len == 4) {
8770 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8771 	} else {
8772 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8773 		return ret;
8774 	}
8775 
8776 	if (reg != val) {
8777 		ret = i40e_aq_debug_write_global_register(hw,
8778 						   I40E_GL_PRS_FVBM(2),
8779 						   reg, NULL);
8780 		if (ret != 0)
8781 			return ret;
8782 		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8783 			    "with value 0x%08x",
8784 			    I40E_GL_PRS_FVBM(2), reg);
8785 	} else {
8786 		ret = 0;
8787 	}
8788 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8789 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8790 
8791 	return ret;
8792 }
8793 
8794 static int
8795 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8796 {
8797 	int ret = -EINVAL;
8798 
8799 	if (!hw || !cfg)
8800 		return -EINVAL;
8801 
8802 	switch (cfg->cfg_type) {
8803 	case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8804 		ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8805 		break;
8806 	default:
8807 		PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8808 		break;
8809 	}
8810 
8811 	return ret;
8812 }
8813 
8814 static int
8815 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8816 			       enum rte_filter_op filter_op,
8817 			       void *arg)
8818 {
8819 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8820 	int ret = I40E_ERR_PARAM;
8821 
8822 	switch (filter_op) {
8823 	case RTE_ETH_FILTER_SET:
8824 		ret = i40e_dev_global_config_set(hw,
8825 			(struct rte_eth_global_cfg *)arg);
8826 		break;
8827 	default:
8828 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8829 		break;
8830 	}
8831 
8832 	return ret;
8833 }
8834 
8835 static int
8836 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8837 			  enum rte_filter_op filter_op,
8838 			  void *arg)
8839 {
8840 	struct rte_eth_tunnel_filter_conf *filter;
8841 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8842 	int ret = I40E_SUCCESS;
8843 
8844 	filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8845 
8846 	if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8847 		return I40E_ERR_PARAM;
8848 
8849 	switch (filter_op) {
8850 	case RTE_ETH_FILTER_NOP:
8851 		if (!(pf->flags & I40E_FLAG_VXLAN))
8852 			ret = I40E_NOT_SUPPORTED;
8853 		break;
8854 	case RTE_ETH_FILTER_ADD:
8855 		ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8856 		break;
8857 	case RTE_ETH_FILTER_DELETE:
8858 		ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8859 		break;
8860 	default:
8861 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8862 		ret = I40E_ERR_PARAM;
8863 		break;
8864 	}
8865 
8866 	return ret;
8867 }
8868 
8869 static int
8870 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8871 {
8872 	int ret = 0;
8873 	enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8874 
8875 	/* RSS setup */
8876 	if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8877 		ret = i40e_pf_config_rss(pf);
8878 	else
8879 		i40e_pf_disable_rss(pf);
8880 
8881 	return ret;
8882 }
8883 
8884 /* Get the symmetric hash enable configurations per port */
8885 static void
8886 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8887 {
8888 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8889 
8890 	*enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8891 }
8892 
8893 /* Set the symmetric hash enable configurations per port */
8894 static void
8895 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8896 {
8897 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8898 
8899 	if (enable > 0) {
8900 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8901 			PMD_DRV_LOG(INFO,
8902 				"Symmetric hash has already been enabled");
8903 			return;
8904 		}
8905 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8906 	} else {
8907 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8908 			PMD_DRV_LOG(INFO,
8909 				"Symmetric hash has already been disabled");
8910 			return;
8911 		}
8912 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8913 	}
8914 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8915 	I40E_WRITE_FLUSH(hw);
8916 }
8917 
8918 /*
8919  * Get global configurations of hash function type and symmetric hash enable
8920  * per flow type (pctype). Note that global configuration means it affects all
8921  * the ports on the same NIC.
8922  */
8923 static int
8924 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8925 				   struct rte_eth_hash_global_conf *g_cfg)
8926 {
8927 	struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8928 	uint32_t reg;
8929 	uint16_t i, j;
8930 
8931 	memset(g_cfg, 0, sizeof(*g_cfg));
8932 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8933 	if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8934 		g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8935 	else
8936 		g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8937 	PMD_DRV_LOG(DEBUG, "Hash function is %s",
8938 		(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8939 
8940 	/*
8941 	 * As i40e supports less than 64 flow types, only first 64 bits need to
8942 	 * be checked.
8943 	 */
8944 	for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8945 		g_cfg->valid_bit_mask[i] = 0ULL;
8946 		g_cfg->sym_hash_enable_mask[i] = 0ULL;
8947 	}
8948 
8949 	g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8950 
8951 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8952 		if (!adapter->pctypes_tbl[i])
8953 			continue;
8954 		for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8955 		     j < I40E_FILTER_PCTYPE_MAX; j++) {
8956 			if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8957 				reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8958 				if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8959 					g_cfg->sym_hash_enable_mask[0] |=
8960 								(1ULL << i);
8961 				}
8962 			}
8963 		}
8964 	}
8965 
8966 	return 0;
8967 }
8968 
8969 static int
8970 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8971 			      const struct rte_eth_hash_global_conf *g_cfg)
8972 {
8973 	uint32_t i;
8974 	uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8975 
8976 	if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8977 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8978 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8979 		PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8980 						g_cfg->hash_func);
8981 		return -EINVAL;
8982 	}
8983 
8984 	/*
8985 	 * As i40e supports less than 64 flow types, only first 64 bits need to
8986 	 * be checked.
8987 	 */
8988 	mask0 = g_cfg->valid_bit_mask[0];
8989 	for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8990 		if (i == 0) {
8991 			/* Check if any unsupported flow type configured */
8992 			if ((mask0 | i40e_mask) ^ i40e_mask)
8993 				goto mask_err;
8994 		} else {
8995 			if (g_cfg->valid_bit_mask[i])
8996 				goto mask_err;
8997 		}
8998 	}
8999 
9000 	return 0;
9001 
9002 mask_err:
9003 	PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
9004 
9005 	return -EINVAL;
9006 }
9007 
9008 /*
9009  * Set global configurations of hash function type and symmetric hash enable
9010  * per flow type (pctype). Note any modifying global configuration will affect
9011  * all the ports on the same NIC.
9012  */
9013 static int
9014 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
9015 				   struct rte_eth_hash_global_conf *g_cfg)
9016 {
9017 	struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
9018 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9019 	int ret;
9020 	uint16_t i, j;
9021 	uint32_t reg;
9022 	uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
9023 
9024 	if (pf->support_multi_driver) {
9025 		PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
9026 		return -ENOTSUP;
9027 	}
9028 
9029 	/* Check the input parameters */
9030 	ret = i40e_hash_global_config_check(adapter, g_cfg);
9031 	if (ret < 0)
9032 		return ret;
9033 
9034 	/*
9035 	 * As i40e supports less than 64 flow types, only first 64 bits need to
9036 	 * be configured.
9037 	 */
9038 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
9039 		if (mask0 & (1UL << i)) {
9040 			reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
9041 					I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
9042 
9043 			for (j = I40E_FILTER_PCTYPE_INVALID + 1;
9044 			     j < I40E_FILTER_PCTYPE_MAX; j++) {
9045 				if (adapter->pctypes_tbl[i] & (1ULL << j))
9046 					i40e_write_global_rx_ctl(hw,
9047 							  I40E_GLQF_HSYM(j),
9048 							  reg);
9049 			}
9050 		}
9051 	}
9052 
9053 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
9054 	if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
9055 		/* Toeplitz */
9056 		if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
9057 			PMD_DRV_LOG(DEBUG,
9058 				"Hash function already set to Toeplitz");
9059 			goto out;
9060 		}
9061 		reg |= I40E_GLQF_CTL_HTOEP_MASK;
9062 	} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
9063 		/* Simple XOR */
9064 		if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
9065 			PMD_DRV_LOG(DEBUG,
9066 				"Hash function already set to Simple XOR");
9067 			goto out;
9068 		}
9069 		reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
9070 	} else
9071 		/* Use the default, and keep it as it is */
9072 		goto out;
9073 
9074 	i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
9075 
9076 out:
9077 	I40E_WRITE_FLUSH(hw);
9078 
9079 	return 0;
9080 }
9081 
9082 /**
9083  * Valid input sets for hash and flow director filters per PCTYPE
9084  */
9085 static uint64_t
9086 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
9087 		enum rte_filter_type filter)
9088 {
9089 	uint64_t valid;
9090 
9091 	static const uint64_t valid_hash_inset_table[] = {
9092 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9093 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9094 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9095 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
9096 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
9097 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9098 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9099 			I40E_INSET_FLEX_PAYLOAD,
9100 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9101 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9102 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9103 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9104 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9105 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9106 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9107 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9108 			I40E_INSET_FLEX_PAYLOAD,
9109 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9110 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9111 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9112 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9113 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9114 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9115 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9116 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9117 			I40E_INSET_FLEX_PAYLOAD,
9118 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9119 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9120 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9121 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9122 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9123 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9124 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9125 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9126 			I40E_INSET_FLEX_PAYLOAD,
9127 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9128 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9129 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9130 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9131 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9132 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9133 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9134 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9135 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9136 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9137 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9138 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9139 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9140 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9141 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9142 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9143 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9144 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9145 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9146 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9147 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9148 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9149 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9150 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9151 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9152 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9153 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9154 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9155 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9156 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9157 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9158 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9159 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9160 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9161 			I40E_INSET_FLEX_PAYLOAD,
9162 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9163 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9164 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9165 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9166 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9167 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9168 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9169 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9170 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9171 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9172 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9173 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9174 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9175 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9176 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9177 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9178 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9179 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9180 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9181 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9182 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9183 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9184 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9185 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9186 			I40E_INSET_FLEX_PAYLOAD,
9187 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9188 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9189 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9190 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9191 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9192 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9193 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9194 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9195 			I40E_INSET_FLEX_PAYLOAD,
9196 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9197 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9198 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9199 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9200 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9201 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9202 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9203 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9204 			I40E_INSET_FLEX_PAYLOAD,
9205 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9206 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9207 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9208 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9209 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9210 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9211 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9212 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9213 			I40E_INSET_FLEX_PAYLOAD,
9214 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9215 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9216 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9217 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9218 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9219 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9220 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9221 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9222 			I40E_INSET_FLEX_PAYLOAD,
9223 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9224 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9225 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9226 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9227 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9228 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9229 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9230 			I40E_INSET_FLEX_PAYLOAD,
9231 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9232 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9233 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9234 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9235 			I40E_INSET_FLEX_PAYLOAD,
9236 	};
9237 
9238 	/**
9239 	 * Flow director supports only fields defined in
9240 	 * union rte_eth_fdir_flow.
9241 	 */
9242 	static const uint64_t valid_fdir_inset_table[] = {
9243 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9244 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9245 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9246 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9247 		I40E_INSET_IPV4_TTL,
9248 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9249 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9250 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9251 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9252 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9253 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9254 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9255 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9256 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9257 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9258 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9259 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9260 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9261 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9262 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9263 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9264 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9265 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9266 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9267 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9268 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9269 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9270 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9271 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9272 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9273 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9274 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9275 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9276 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9277 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9278 		I40E_INSET_SCTP_VT,
9279 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9280 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9281 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9282 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9283 		I40E_INSET_IPV4_TTL,
9284 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9285 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9286 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9287 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9288 		I40E_INSET_IPV6_HOP_LIMIT,
9289 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9290 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9291 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9292 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9293 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9294 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9295 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9296 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9297 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9298 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9299 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9300 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9301 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9302 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9303 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9304 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9305 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9306 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9307 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9308 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9309 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9310 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9311 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9312 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9313 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9314 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9315 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9316 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9317 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9318 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9319 		I40E_INSET_SCTP_VT,
9320 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9321 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9322 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9323 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9324 		I40E_INSET_IPV6_HOP_LIMIT,
9325 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9326 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9327 		I40E_INSET_LAST_ETHER_TYPE,
9328 	};
9329 
9330 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9331 		return 0;
9332 	if (filter == RTE_ETH_FILTER_HASH)
9333 		valid = valid_hash_inset_table[pctype];
9334 	else
9335 		valid = valid_fdir_inset_table[pctype];
9336 
9337 	return valid;
9338 }
9339 
9340 /**
9341  * Validate if the input set is allowed for a specific PCTYPE
9342  */
9343 int
9344 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9345 		enum rte_filter_type filter, uint64_t inset)
9346 {
9347 	uint64_t valid;
9348 
9349 	valid = i40e_get_valid_input_set(pctype, filter);
9350 	if (inset & (~valid))
9351 		return -EINVAL;
9352 
9353 	return 0;
9354 }
9355 
9356 /* default input set fields combination per pctype */
9357 uint64_t
9358 i40e_get_default_input_set(uint16_t pctype)
9359 {
9360 	static const uint64_t default_inset_table[] = {
9361 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9362 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9363 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9364 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9365 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9366 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9367 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9368 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9369 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9370 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9371 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9372 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9373 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9374 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9375 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9376 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9377 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9378 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9379 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9380 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9381 			I40E_INSET_SCTP_VT,
9382 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9383 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9384 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9385 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9386 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9387 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9388 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9389 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9390 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9391 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9392 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9393 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9394 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9395 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9396 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9397 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9398 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9399 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9400 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9401 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9402 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9403 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9404 			I40E_INSET_SCTP_VT,
9405 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9406 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9407 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9408 			I40E_INSET_LAST_ETHER_TYPE,
9409 	};
9410 
9411 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9412 		return 0;
9413 
9414 	return default_inset_table[pctype];
9415 }
9416 
9417 /**
9418  * Parse the input set from index to logical bit masks
9419  */
9420 static int
9421 i40e_parse_input_set(uint64_t *inset,
9422 		     enum i40e_filter_pctype pctype,
9423 		     enum rte_eth_input_set_field *field,
9424 		     uint16_t size)
9425 {
9426 	uint16_t i, j;
9427 	int ret = -EINVAL;
9428 
9429 	static const struct {
9430 		enum rte_eth_input_set_field field;
9431 		uint64_t inset;
9432 	} inset_convert_table[] = {
9433 		{RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9434 		{RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9435 		{RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9436 		{RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9437 		{RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9438 		{RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9439 		{RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9440 		{RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9441 		{RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9442 		{RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9443 		{RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9444 		{RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9445 		{RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9446 		{RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9447 		{RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9448 			I40E_INSET_IPV6_NEXT_HDR},
9449 		{RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9450 			I40E_INSET_IPV6_HOP_LIMIT},
9451 		{RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9452 		{RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9453 		{RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9454 		{RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9455 		{RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9456 		{RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9457 		{RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9458 			I40E_INSET_SCTP_VT},
9459 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9460 			I40E_INSET_TUNNEL_DMAC},
9461 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9462 			I40E_INSET_VLAN_TUNNEL},
9463 		{RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9464 			I40E_INSET_TUNNEL_ID},
9465 		{RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9466 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9467 			I40E_INSET_FLEX_PAYLOAD_W1},
9468 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9469 			I40E_INSET_FLEX_PAYLOAD_W2},
9470 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9471 			I40E_INSET_FLEX_PAYLOAD_W3},
9472 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9473 			I40E_INSET_FLEX_PAYLOAD_W4},
9474 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9475 			I40E_INSET_FLEX_PAYLOAD_W5},
9476 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9477 			I40E_INSET_FLEX_PAYLOAD_W6},
9478 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9479 			I40E_INSET_FLEX_PAYLOAD_W7},
9480 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9481 			I40E_INSET_FLEX_PAYLOAD_W8},
9482 	};
9483 
9484 	if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9485 		return ret;
9486 
9487 	/* Only one item allowed for default or all */
9488 	if (size == 1) {
9489 		if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9490 			*inset = i40e_get_default_input_set(pctype);
9491 			return 0;
9492 		} else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9493 			*inset = I40E_INSET_NONE;
9494 			return 0;
9495 		}
9496 	}
9497 
9498 	for (i = 0, *inset = 0; i < size; i++) {
9499 		for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9500 			if (field[i] == inset_convert_table[j].field) {
9501 				*inset |= inset_convert_table[j].inset;
9502 				break;
9503 			}
9504 		}
9505 
9506 		/* It contains unsupported input set, return immediately */
9507 		if (j == RTE_DIM(inset_convert_table))
9508 			return ret;
9509 	}
9510 
9511 	return 0;
9512 }
9513 
9514 /**
9515  * Translate the input set from bit masks to register aware bit masks
9516  * and vice versa
9517  */
9518 uint64_t
9519 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9520 {
9521 	uint64_t val = 0;
9522 	uint16_t i;
9523 
9524 	struct inset_map {
9525 		uint64_t inset;
9526 		uint64_t inset_reg;
9527 	};
9528 
9529 	static const struct inset_map inset_map_common[] = {
9530 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9531 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9532 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9533 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9534 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9535 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9536 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9537 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9538 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9539 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9540 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9541 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9542 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9543 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9544 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9545 		{I40E_INSET_TUNNEL_DMAC,
9546 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9547 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9548 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9549 		{I40E_INSET_TUNNEL_SRC_PORT,
9550 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9551 		{I40E_INSET_TUNNEL_DST_PORT,
9552 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9553 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9554 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9555 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9556 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9557 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9558 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9559 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9560 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9561 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9562 	};
9563 
9564     /* some different registers map in x722*/
9565 	static const struct inset_map inset_map_diff_x722[] = {
9566 		{I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9567 		{I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9568 		{I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9569 		{I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9570 	};
9571 
9572 	static const struct inset_map inset_map_diff_not_x722[] = {
9573 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9574 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9575 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9576 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9577 	};
9578 
9579 	if (input == 0)
9580 		return val;
9581 
9582 	/* Translate input set to register aware inset */
9583 	if (type == I40E_MAC_X722) {
9584 		for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9585 			if (input & inset_map_diff_x722[i].inset)
9586 				val |= inset_map_diff_x722[i].inset_reg;
9587 		}
9588 	} else {
9589 		for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9590 			if (input & inset_map_diff_not_x722[i].inset)
9591 				val |= inset_map_diff_not_x722[i].inset_reg;
9592 		}
9593 	}
9594 
9595 	for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9596 		if (input & inset_map_common[i].inset)
9597 			val |= inset_map_common[i].inset_reg;
9598 	}
9599 
9600 	return val;
9601 }
9602 
9603 int
9604 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9605 {
9606 	uint8_t i, idx = 0;
9607 	uint64_t inset_need_mask = inset;
9608 
9609 	static const struct {
9610 		uint64_t inset;
9611 		uint32_t mask;
9612 	} inset_mask_map[] = {
9613 		{I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9614 		{I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9615 		{I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9616 		{I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9617 		{I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9618 		{I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9619 		{I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9620 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9621 	};
9622 
9623 	if (!inset || !mask || !nb_elem)
9624 		return 0;
9625 
9626 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9627 		/* Clear the inset bit, if no MASK is required,
9628 		 * for example proto + ttl
9629 		 */
9630 		if ((inset & inset_mask_map[i].inset) ==
9631 		     inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9632 			inset_need_mask &= ~inset_mask_map[i].inset;
9633 		if (!inset_need_mask)
9634 			return 0;
9635 	}
9636 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9637 		if ((inset_need_mask & inset_mask_map[i].inset) ==
9638 		    inset_mask_map[i].inset) {
9639 			if (idx >= nb_elem) {
9640 				PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9641 				return -EINVAL;
9642 			}
9643 			mask[idx] = inset_mask_map[i].mask;
9644 			idx++;
9645 		}
9646 	}
9647 
9648 	return idx;
9649 }
9650 
9651 void
9652 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9653 {
9654 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9655 
9656 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9657 	if (reg != val)
9658 		i40e_write_rx_ctl(hw, addr, val);
9659 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9660 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
9661 }
9662 
9663 void
9664 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9665 {
9666 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9667 	struct rte_eth_dev *dev;
9668 
9669 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9670 	if (reg != val) {
9671 		i40e_write_rx_ctl(hw, addr, val);
9672 		PMD_DRV_LOG(WARNING,
9673 			    "i40e device %s changed global register [0x%08x]."
9674 			    " original: 0x%08x, new: 0x%08x",
9675 			    dev->device->name, addr, reg,
9676 			    (uint32_t)i40e_read_rx_ctl(hw, addr));
9677 	}
9678 }
9679 
9680 static void
9681 i40e_filter_input_set_init(struct i40e_pf *pf)
9682 {
9683 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9684 	enum i40e_filter_pctype pctype;
9685 	uint64_t input_set, inset_reg;
9686 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9687 	int num, i;
9688 	uint16_t flow_type;
9689 
9690 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9691 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9692 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9693 
9694 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9695 			continue;
9696 
9697 		input_set = i40e_get_default_input_set(pctype);
9698 
9699 		num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9700 						   I40E_INSET_MASK_NUM_REG);
9701 		if (num < 0)
9702 			return;
9703 		if (pf->support_multi_driver && num > 0) {
9704 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9705 			return;
9706 		}
9707 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9708 					input_set);
9709 
9710 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9711 				      (uint32_t)(inset_reg & UINT32_MAX));
9712 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9713 				     (uint32_t)((inset_reg >>
9714 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
9715 		if (!pf->support_multi_driver) {
9716 			i40e_check_write_global_reg(hw,
9717 					    I40E_GLQF_HASH_INSET(0, pctype),
9718 					    (uint32_t)(inset_reg & UINT32_MAX));
9719 			i40e_check_write_global_reg(hw,
9720 					     I40E_GLQF_HASH_INSET(1, pctype),
9721 					     (uint32_t)((inset_reg >>
9722 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
9723 
9724 			for (i = 0; i < num; i++) {
9725 				i40e_check_write_global_reg(hw,
9726 						    I40E_GLQF_FD_MSK(i, pctype),
9727 						    mask_reg[i]);
9728 				i40e_check_write_global_reg(hw,
9729 						  I40E_GLQF_HASH_MSK(i, pctype),
9730 						  mask_reg[i]);
9731 			}
9732 			/*clear unused mask registers of the pctype */
9733 			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9734 				i40e_check_write_global_reg(hw,
9735 						    I40E_GLQF_FD_MSK(i, pctype),
9736 						    0);
9737 				i40e_check_write_global_reg(hw,
9738 						  I40E_GLQF_HASH_MSK(i, pctype),
9739 						  0);
9740 			}
9741 		} else {
9742 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9743 		}
9744 		I40E_WRITE_FLUSH(hw);
9745 
9746 		/* store the default input set */
9747 		if (!pf->support_multi_driver)
9748 			pf->hash_input_set[pctype] = input_set;
9749 		pf->fdir.input_set[pctype] = input_set;
9750 	}
9751 }
9752 
9753 int
9754 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9755 			 struct rte_eth_input_set_conf *conf)
9756 {
9757 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9758 	enum i40e_filter_pctype pctype;
9759 	uint64_t input_set, inset_reg = 0;
9760 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9761 	int ret, i, num;
9762 
9763 	if (!conf) {
9764 		PMD_DRV_LOG(ERR, "Invalid pointer");
9765 		return -EFAULT;
9766 	}
9767 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9768 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
9769 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
9770 		return -EINVAL;
9771 	}
9772 
9773 	if (pf->support_multi_driver) {
9774 		PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9775 		return -ENOTSUP;
9776 	}
9777 
9778 	pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9779 	if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9780 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
9781 		return -EINVAL;
9782 	}
9783 
9784 	if (hw->mac.type == I40E_MAC_X722) {
9785 		/* get translated pctype value in fd pctype register */
9786 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9787 			I40E_GLQF_FD_PCTYPES((int)pctype));
9788 	}
9789 
9790 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9791 				   conf->inset_size);
9792 	if (ret) {
9793 		PMD_DRV_LOG(ERR, "Failed to parse input set");
9794 		return -EINVAL;
9795 	}
9796 
9797 	if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9798 		/* get inset value in register */
9799 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9800 		inset_reg <<= I40E_32_BIT_WIDTH;
9801 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9802 		input_set |= pf->hash_input_set[pctype];
9803 	}
9804 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9805 					   I40E_INSET_MASK_NUM_REG);
9806 	if (num < 0)
9807 		return -EINVAL;
9808 
9809 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9810 
9811 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9812 				    (uint32_t)(inset_reg & UINT32_MAX));
9813 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9814 				    (uint32_t)((inset_reg >>
9815 				    I40E_32_BIT_WIDTH) & UINT32_MAX));
9816 
9817 	for (i = 0; i < num; i++)
9818 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9819 					    mask_reg[i]);
9820 	/*clear unused mask registers of the pctype */
9821 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9822 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9823 					    0);
9824 	I40E_WRITE_FLUSH(hw);
9825 
9826 	pf->hash_input_set[pctype] = input_set;
9827 	return 0;
9828 }
9829 
9830 int
9831 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9832 			 struct rte_eth_input_set_conf *conf)
9833 {
9834 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9835 	enum i40e_filter_pctype pctype;
9836 	uint64_t input_set, inset_reg = 0;
9837 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9838 	int ret, i, num;
9839 
9840 	if (!hw || !conf) {
9841 		PMD_DRV_LOG(ERR, "Invalid pointer");
9842 		return -EFAULT;
9843 	}
9844 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9845 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
9846 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
9847 		return -EINVAL;
9848 	}
9849 
9850 	pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9851 
9852 	if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9853 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
9854 		return -EINVAL;
9855 	}
9856 
9857 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9858 				   conf->inset_size);
9859 	if (ret) {
9860 		PMD_DRV_LOG(ERR, "Failed to parse input set");
9861 		return -EINVAL;
9862 	}
9863 
9864 	/* get inset value in register */
9865 	inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9866 	inset_reg <<= I40E_32_BIT_WIDTH;
9867 	inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9868 
9869 	/* Can not change the inset reg for flex payload for fdir,
9870 	 * it is done by writing I40E_PRTQF_FD_FLXINSET
9871 	 * in i40e_set_flex_mask_on_pctype.
9872 	 */
9873 	if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9874 		inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9875 	else
9876 		input_set |= pf->fdir.input_set[pctype];
9877 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9878 					   I40E_INSET_MASK_NUM_REG);
9879 	if (num < 0)
9880 		return -EINVAL;
9881 	if (pf->support_multi_driver && num > 0) {
9882 		PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9883 		return -ENOTSUP;
9884 	}
9885 
9886 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9887 
9888 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9889 			      (uint32_t)(inset_reg & UINT32_MAX));
9890 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9891 			     (uint32_t)((inset_reg >>
9892 			     I40E_32_BIT_WIDTH) & UINT32_MAX));
9893 
9894 	if (!pf->support_multi_driver) {
9895 		for (i = 0; i < num; i++)
9896 			i40e_check_write_global_reg(hw,
9897 						    I40E_GLQF_FD_MSK(i, pctype),
9898 						    mask_reg[i]);
9899 		/*clear unused mask registers of the pctype */
9900 		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9901 			i40e_check_write_global_reg(hw,
9902 						    I40E_GLQF_FD_MSK(i, pctype),
9903 						    0);
9904 	} else {
9905 		PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9906 	}
9907 	I40E_WRITE_FLUSH(hw);
9908 
9909 	pf->fdir.input_set[pctype] = input_set;
9910 	return 0;
9911 }
9912 
9913 static int
9914 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9915 {
9916 	int ret = 0;
9917 
9918 	if (!hw || !info) {
9919 		PMD_DRV_LOG(ERR, "Invalid pointer");
9920 		return -EFAULT;
9921 	}
9922 
9923 	switch (info->info_type) {
9924 	case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9925 		i40e_get_symmetric_hash_enable_per_port(hw,
9926 					&(info->info.enable));
9927 		break;
9928 	case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9929 		ret = i40e_get_hash_filter_global_config(hw,
9930 				&(info->info.global_conf));
9931 		break;
9932 	default:
9933 		PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9934 							info->info_type);
9935 		ret = -EINVAL;
9936 		break;
9937 	}
9938 
9939 	return ret;
9940 }
9941 
9942 static int
9943 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9944 {
9945 	int ret = 0;
9946 
9947 	if (!hw || !info) {
9948 		PMD_DRV_LOG(ERR, "Invalid pointer");
9949 		return -EFAULT;
9950 	}
9951 
9952 	switch (info->info_type) {
9953 	case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9954 		i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9955 		break;
9956 	case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9957 		ret = i40e_set_hash_filter_global_config(hw,
9958 				&(info->info.global_conf));
9959 		break;
9960 	case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9961 		ret = i40e_hash_filter_inset_select(hw,
9962 					       &(info->info.input_set_conf));
9963 		break;
9964 
9965 	default:
9966 		PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9967 							info->info_type);
9968 		ret = -EINVAL;
9969 		break;
9970 	}
9971 
9972 	return ret;
9973 }
9974 
9975 /* Operations for hash function */
9976 static int
9977 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9978 		      enum rte_filter_op filter_op,
9979 		      void *arg)
9980 {
9981 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9982 	int ret = 0;
9983 
9984 	switch (filter_op) {
9985 	case RTE_ETH_FILTER_NOP:
9986 		break;
9987 	case RTE_ETH_FILTER_GET:
9988 		ret = i40e_hash_filter_get(hw,
9989 			(struct rte_eth_hash_filter_info *)arg);
9990 		break;
9991 	case RTE_ETH_FILTER_SET:
9992 		ret = i40e_hash_filter_set(hw,
9993 			(struct rte_eth_hash_filter_info *)arg);
9994 		break;
9995 	default:
9996 		PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9997 								filter_op);
9998 		ret = -ENOTSUP;
9999 		break;
10000 	}
10001 
10002 	return ret;
10003 }
10004 
10005 /* Convert ethertype filter structure */
10006 static int
10007 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
10008 			      struct i40e_ethertype_filter *filter)
10009 {
10010 	rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
10011 		RTE_ETHER_ADDR_LEN);
10012 	filter->input.ether_type = input->ether_type;
10013 	filter->flags = input->flags;
10014 	filter->queue = input->queue;
10015 
10016 	return 0;
10017 }
10018 
10019 /* Check if there exists the ehtertype filter */
10020 struct i40e_ethertype_filter *
10021 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
10022 				const struct i40e_ethertype_filter_input *input)
10023 {
10024 	int ret;
10025 
10026 	ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
10027 	if (ret < 0)
10028 		return NULL;
10029 
10030 	return ethertype_rule->hash_map[ret];
10031 }
10032 
10033 /* Add ethertype filter in SW list */
10034 static int
10035 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
10036 				struct i40e_ethertype_filter *filter)
10037 {
10038 	struct i40e_ethertype_rule *rule = &pf->ethertype;
10039 	int ret;
10040 
10041 	ret = rte_hash_add_key(rule->hash_table, &filter->input);
10042 	if (ret < 0) {
10043 		PMD_DRV_LOG(ERR,
10044 			    "Failed to insert ethertype filter"
10045 			    " to hash table %d!",
10046 			    ret);
10047 		return ret;
10048 	}
10049 	rule->hash_map[ret] = filter;
10050 
10051 	TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
10052 
10053 	return 0;
10054 }
10055 
10056 /* Delete ethertype filter in SW list */
10057 int
10058 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
10059 			     struct i40e_ethertype_filter_input *input)
10060 {
10061 	struct i40e_ethertype_rule *rule = &pf->ethertype;
10062 	struct i40e_ethertype_filter *filter;
10063 	int ret;
10064 
10065 	ret = rte_hash_del_key(rule->hash_table, input);
10066 	if (ret < 0) {
10067 		PMD_DRV_LOG(ERR,
10068 			    "Failed to delete ethertype filter"
10069 			    " to hash table %d!",
10070 			    ret);
10071 		return ret;
10072 	}
10073 	filter = rule->hash_map[ret];
10074 	rule->hash_map[ret] = NULL;
10075 
10076 	TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
10077 	rte_free(filter);
10078 
10079 	return 0;
10080 }
10081 
10082 /*
10083  * Configure ethertype filter, which can director packet by filtering
10084  * with mac address and ether_type or only ether_type
10085  */
10086 int
10087 i40e_ethertype_filter_set(struct i40e_pf *pf,
10088 			struct rte_eth_ethertype_filter *filter,
10089 			bool add)
10090 {
10091 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
10092 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
10093 	struct i40e_ethertype_filter *ethertype_filter, *node;
10094 	struct i40e_ethertype_filter check_filter;
10095 	struct i40e_control_filter_stats stats;
10096 	uint16_t flags = 0;
10097 	int ret;
10098 
10099 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
10100 		PMD_DRV_LOG(ERR, "Invalid queue ID");
10101 		return -EINVAL;
10102 	}
10103 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
10104 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
10105 		PMD_DRV_LOG(ERR,
10106 			"unsupported ether_type(0x%04x) in control packet filter.",
10107 			filter->ether_type);
10108 		return -EINVAL;
10109 	}
10110 	if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
10111 		PMD_DRV_LOG(WARNING,
10112 			"filter vlan ether_type in first tag is not supported.");
10113 
10114 	/* Check if there is the filter in SW list */
10115 	memset(&check_filter, 0, sizeof(check_filter));
10116 	i40e_ethertype_filter_convert(filter, &check_filter);
10117 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
10118 					       &check_filter.input);
10119 	if (add && node) {
10120 		PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
10121 		return -EINVAL;
10122 	}
10123 
10124 	if (!add && !node) {
10125 		PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
10126 		return -EINVAL;
10127 	}
10128 
10129 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10130 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10131 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10132 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10133 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10134 
10135 	memset(&stats, 0, sizeof(stats));
10136 	ret = i40e_aq_add_rem_control_packet_filter(hw,
10137 			filter->mac_addr.addr_bytes,
10138 			filter->ether_type, flags,
10139 			pf->main_vsi->seid,
10140 			filter->queue, add, &stats, NULL);
10141 
10142 	PMD_DRV_LOG(INFO,
10143 		"add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10144 		ret, stats.mac_etype_used, stats.etype_used,
10145 		stats.mac_etype_free, stats.etype_free);
10146 	if (ret < 0)
10147 		return -ENOSYS;
10148 
10149 	/* Add or delete a filter in SW list */
10150 	if (add) {
10151 		ethertype_filter = rte_zmalloc("ethertype_filter",
10152 				       sizeof(*ethertype_filter), 0);
10153 		if (ethertype_filter == NULL) {
10154 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10155 			return -ENOMEM;
10156 		}
10157 
10158 		rte_memcpy(ethertype_filter, &check_filter,
10159 			   sizeof(check_filter));
10160 		ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10161 		if (ret < 0)
10162 			rte_free(ethertype_filter);
10163 	} else {
10164 		ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10165 	}
10166 
10167 	return ret;
10168 }
10169 
10170 /*
10171  * Handle operations for ethertype filter.
10172  */
10173 static int
10174 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
10175 				enum rte_filter_op filter_op,
10176 				void *arg)
10177 {
10178 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10179 	int ret = 0;
10180 
10181 	if (filter_op == RTE_ETH_FILTER_NOP)
10182 		return ret;
10183 
10184 	if (arg == NULL) {
10185 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
10186 			    filter_op);
10187 		return -EINVAL;
10188 	}
10189 
10190 	switch (filter_op) {
10191 	case RTE_ETH_FILTER_ADD:
10192 		ret = i40e_ethertype_filter_set(pf,
10193 			(struct rte_eth_ethertype_filter *)arg,
10194 			TRUE);
10195 		break;
10196 	case RTE_ETH_FILTER_DELETE:
10197 		ret = i40e_ethertype_filter_set(pf,
10198 			(struct rte_eth_ethertype_filter *)arg,
10199 			FALSE);
10200 		break;
10201 	default:
10202 		PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
10203 		ret = -ENOSYS;
10204 		break;
10205 	}
10206 	return ret;
10207 }
10208 
10209 static int
10210 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
10211 		     enum rte_filter_type filter_type,
10212 		     enum rte_filter_op filter_op,
10213 		     void *arg)
10214 {
10215 	int ret = 0;
10216 
10217 	if (dev == NULL)
10218 		return -EINVAL;
10219 
10220 	switch (filter_type) {
10221 	case RTE_ETH_FILTER_NONE:
10222 		/* For global configuration */
10223 		ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
10224 		break;
10225 	case RTE_ETH_FILTER_HASH:
10226 		ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
10227 		break;
10228 	case RTE_ETH_FILTER_MACVLAN:
10229 		ret = i40e_mac_filter_handle(dev, filter_op, arg);
10230 		break;
10231 	case RTE_ETH_FILTER_ETHERTYPE:
10232 		ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
10233 		break;
10234 	case RTE_ETH_FILTER_TUNNEL:
10235 		ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
10236 		break;
10237 	case RTE_ETH_FILTER_FDIR:
10238 		ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
10239 		break;
10240 	case RTE_ETH_FILTER_GENERIC:
10241 		if (filter_op != RTE_ETH_FILTER_GET)
10242 			return -EINVAL;
10243 		*(const void **)arg = &i40e_flow_ops;
10244 		break;
10245 	default:
10246 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
10247 							filter_type);
10248 		ret = -EINVAL;
10249 		break;
10250 	}
10251 
10252 	return ret;
10253 }
10254 
10255 /*
10256  * Check and enable Extended Tag.
10257  * Enabling Extended Tag is important for 40G performance.
10258  */
10259 static void
10260 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10261 {
10262 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10263 	uint32_t buf = 0;
10264 	int ret;
10265 
10266 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10267 				      PCI_DEV_CAP_REG);
10268 	if (ret < 0) {
10269 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10270 			    PCI_DEV_CAP_REG);
10271 		return;
10272 	}
10273 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10274 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10275 		return;
10276 	}
10277 
10278 	buf = 0;
10279 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10280 				      PCI_DEV_CTRL_REG);
10281 	if (ret < 0) {
10282 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10283 			    PCI_DEV_CTRL_REG);
10284 		return;
10285 	}
10286 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10287 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10288 		return;
10289 	}
10290 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10291 	ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10292 				       PCI_DEV_CTRL_REG);
10293 	if (ret < 0) {
10294 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10295 			    PCI_DEV_CTRL_REG);
10296 		return;
10297 	}
10298 }
10299 
10300 /*
10301  * As some registers wouldn't be reset unless a global hardware reset,
10302  * hardware initialization is needed to put those registers into an
10303  * expected initial state.
10304  */
10305 static void
10306 i40e_hw_init(struct rte_eth_dev *dev)
10307 {
10308 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10309 
10310 	i40e_enable_extended_tag(dev);
10311 
10312 	/* clear the PF Queue Filter control register */
10313 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10314 
10315 	/* Disable symmetric hash per port */
10316 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
10317 }
10318 
10319 /*
10320  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10321  * however this function will return only one highest pctype index,
10322  * which is not quite correct. This is known problem of i40e driver
10323  * and needs to be fixed later.
10324  */
10325 enum i40e_filter_pctype
10326 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10327 {
10328 	int i;
10329 	uint64_t pctype_mask;
10330 
10331 	if (flow_type < I40E_FLOW_TYPE_MAX) {
10332 		pctype_mask = adapter->pctypes_tbl[flow_type];
10333 		for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10334 			if (pctype_mask & (1ULL << i))
10335 				return (enum i40e_filter_pctype)i;
10336 		}
10337 	}
10338 	return I40E_FILTER_PCTYPE_INVALID;
10339 }
10340 
10341 uint16_t
10342 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10343 			enum i40e_filter_pctype pctype)
10344 {
10345 	uint16_t flowtype;
10346 	uint64_t pctype_mask = 1ULL << pctype;
10347 
10348 	for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10349 	     flowtype++) {
10350 		if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10351 			return flowtype;
10352 	}
10353 
10354 	return RTE_ETH_FLOW_UNKNOWN;
10355 }
10356 
10357 /*
10358  * On X710, performance number is far from the expectation on recent firmware
10359  * versions; on XL710, performance number is also far from the expectation on
10360  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10361  * mode is enabled and port MAC address is equal to the packet destination MAC
10362  * address. The fix for this issue may not be integrated in the following
10363  * firmware version. So the workaround in software driver is needed. It needs
10364  * to modify the initial values of 3 internal only registers for both X710 and
10365  * XL710. Note that the values for X710 or XL710 could be different, and the
10366  * workaround can be removed when it is fixed in firmware in the future.
10367  */
10368 
10369 /* For both X710 and XL710 */
10370 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1	0x10000200
10371 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2	0x203F0200
10372 #define I40E_GL_SWR_PRI_JOIN_MAP_0		0x26CE00
10373 
10374 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10375 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10376 
10377 /* For X722 */
10378 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10379 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10380 
10381 /* For X710 */
10382 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10383 /* For XL710 */
10384 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10385 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10386 
10387 /*
10388  * GL_SWR_PM_UP_THR:
10389  * The value is not impacted from the link speed, its value is set according
10390  * to the total number of ports for a better pipe-monitor configuration.
10391  */
10392 static bool
10393 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10394 {
10395 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10396 		.device_id = (dev),   \
10397 		.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10398 
10399 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10400 		.device_id = (dev),   \
10401 		.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10402 
10403 	static const struct {
10404 		uint16_t device_id;
10405 		uint32_t val;
10406 	} swr_pm_table[] = {
10407 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10408 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10409 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10410 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10411 
10412 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10413 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10414 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10415 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10416 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10417 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10418 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10419 	};
10420 	uint32_t i;
10421 
10422 	if (value == NULL) {
10423 		PMD_DRV_LOG(ERR, "value is NULL");
10424 		return false;
10425 	}
10426 
10427 	for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10428 		if (hw->device_id == swr_pm_table[i].device_id) {
10429 			*value = swr_pm_table[i].val;
10430 
10431 			PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10432 				    "value - 0x%08x",
10433 				    hw->device_id, *value);
10434 			return true;
10435 		}
10436 	}
10437 
10438 	return false;
10439 }
10440 
10441 static int
10442 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10443 {
10444 	enum i40e_status_code status;
10445 	struct i40e_aq_get_phy_abilities_resp phy_ab;
10446 	int ret = -ENOTSUP;
10447 	int retries = 0;
10448 
10449 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10450 					      NULL);
10451 
10452 	while (status) {
10453 		PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10454 			status);
10455 		retries++;
10456 		rte_delay_us(100000);
10457 		if  (retries < 5)
10458 			status = i40e_aq_get_phy_capabilities(hw, false,
10459 					true, &phy_ab, NULL);
10460 		else
10461 			return ret;
10462 	}
10463 	return 0;
10464 }
10465 
10466 static void
10467 i40e_configure_registers(struct i40e_hw *hw)
10468 {
10469 	static struct {
10470 		uint32_t addr;
10471 		uint64_t val;
10472 	} reg_table[] = {
10473 		{I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10474 		{I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10475 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10476 	};
10477 	uint64_t reg;
10478 	uint32_t i;
10479 	int ret;
10480 
10481 	for (i = 0; i < RTE_DIM(reg_table); i++) {
10482 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10483 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10484 				reg_table[i].val =
10485 					I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10486 			else /* For X710/XL710/XXV710 */
10487 				if (hw->aq.fw_maj_ver < 6)
10488 					reg_table[i].val =
10489 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10490 				else
10491 					reg_table[i].val =
10492 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10493 		}
10494 
10495 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10496 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10497 				reg_table[i].val =
10498 					I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10499 			else /* For X710/XL710/XXV710 */
10500 				reg_table[i].val =
10501 					I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10502 		}
10503 
10504 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10505 			uint32_t cfg_val;
10506 
10507 			if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10508 				PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10509 					    "GL_SWR_PM_UP_THR value fixup",
10510 					    hw->device_id);
10511 				continue;
10512 			}
10513 
10514 			reg_table[i].val = cfg_val;
10515 		}
10516 
10517 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10518 							&reg, NULL);
10519 		if (ret < 0) {
10520 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10521 							reg_table[i].addr);
10522 			break;
10523 		}
10524 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10525 						reg_table[i].addr, reg);
10526 		if (reg == reg_table[i].val)
10527 			continue;
10528 
10529 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10530 						reg_table[i].val, NULL);
10531 		if (ret < 0) {
10532 			PMD_DRV_LOG(ERR,
10533 				"Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10534 				reg_table[i].val, reg_table[i].addr);
10535 			break;
10536 		}
10537 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10538 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10539 	}
10540 }
10541 
10542 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
10543 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10544 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10545 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10546 static int
10547 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10548 {
10549 	uint32_t reg;
10550 	int ret;
10551 
10552 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10553 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10554 		return -EINVAL;
10555 	}
10556 
10557 	/* Configure for double VLAN RX stripping */
10558 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10559 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10560 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
10561 		ret = i40e_aq_debug_write_register(hw,
10562 						   I40E_VSI_TSR(vsi->vsi_id),
10563 						   reg, NULL);
10564 		if (ret < 0) {
10565 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10566 				    vsi->vsi_id);
10567 			return I40E_ERR_CONFIG;
10568 		}
10569 	}
10570 
10571 	/* Configure for double VLAN TX insertion */
10572 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10573 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10574 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10575 		ret = i40e_aq_debug_write_register(hw,
10576 						   I40E_VSI_L2TAGSTXVALID(
10577 						   vsi->vsi_id), reg, NULL);
10578 		if (ret < 0) {
10579 			PMD_DRV_LOG(ERR,
10580 				"Failed to update VSI_L2TAGSTXVALID[%d]",
10581 				vsi->vsi_id);
10582 			return I40E_ERR_CONFIG;
10583 		}
10584 	}
10585 
10586 	return 0;
10587 }
10588 
10589 /**
10590  * i40e_aq_add_mirror_rule
10591  * @hw: pointer to the hardware structure
10592  * @seid: VEB seid to add mirror rule to
10593  * @dst_id: destination vsi seid
10594  * @entries: Buffer which contains the entities to be mirrored
10595  * @count: number of entities contained in the buffer
10596  * @rule_id:the rule_id of the rule to be added
10597  *
10598  * Add a mirror rule for a given veb.
10599  *
10600  **/
10601 static enum i40e_status_code
10602 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10603 			uint16_t seid, uint16_t dst_id,
10604 			uint16_t rule_type, uint16_t *entries,
10605 			uint16_t count, uint16_t *rule_id)
10606 {
10607 	struct i40e_aq_desc desc;
10608 	struct i40e_aqc_add_delete_mirror_rule cmd;
10609 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10610 		(struct i40e_aqc_add_delete_mirror_rule_completion *)
10611 		&desc.params.raw;
10612 	uint16_t buff_len;
10613 	enum i40e_status_code status;
10614 
10615 	i40e_fill_default_direct_cmd_desc(&desc,
10616 					  i40e_aqc_opc_add_mirror_rule);
10617 	memset(&cmd, 0, sizeof(cmd));
10618 
10619 	buff_len = sizeof(uint16_t) * count;
10620 	desc.datalen = rte_cpu_to_le_16(buff_len);
10621 	if (buff_len > 0)
10622 		desc.flags |= rte_cpu_to_le_16(
10623 			(uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10624 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10625 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10626 	cmd.num_entries = rte_cpu_to_le_16(count);
10627 	cmd.seid = rte_cpu_to_le_16(seid);
10628 	cmd.destination = rte_cpu_to_le_16(dst_id);
10629 
10630 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10631 	status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10632 	PMD_DRV_LOG(INFO,
10633 		"i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10634 		hw->aq.asq_last_status, resp->rule_id,
10635 		resp->mirror_rules_used, resp->mirror_rules_free);
10636 	*rule_id = rte_le_to_cpu_16(resp->rule_id);
10637 
10638 	return status;
10639 }
10640 
10641 /**
10642  * i40e_aq_del_mirror_rule
10643  * @hw: pointer to the hardware structure
10644  * @seid: VEB seid to add mirror rule to
10645  * @entries: Buffer which contains the entities to be mirrored
10646  * @count: number of entities contained in the buffer
10647  * @rule_id:the rule_id of the rule to be delete
10648  *
10649  * Delete a mirror rule for a given veb.
10650  *
10651  **/
10652 static enum i40e_status_code
10653 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10654 		uint16_t seid, uint16_t rule_type, uint16_t *entries,
10655 		uint16_t count, uint16_t rule_id)
10656 {
10657 	struct i40e_aq_desc desc;
10658 	struct i40e_aqc_add_delete_mirror_rule cmd;
10659 	uint16_t buff_len = 0;
10660 	enum i40e_status_code status;
10661 	void *buff = NULL;
10662 
10663 	i40e_fill_default_direct_cmd_desc(&desc,
10664 					  i40e_aqc_opc_delete_mirror_rule);
10665 	memset(&cmd, 0, sizeof(cmd));
10666 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10667 		desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10668 							  I40E_AQ_FLAG_RD));
10669 		cmd.num_entries = count;
10670 		buff_len = sizeof(uint16_t) * count;
10671 		desc.datalen = rte_cpu_to_le_16(buff_len);
10672 		buff = (void *)entries;
10673 	} else
10674 		/* rule id is filled in destination field for deleting mirror rule */
10675 		cmd.destination = rte_cpu_to_le_16(rule_id);
10676 
10677 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10678 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10679 	cmd.seid = rte_cpu_to_le_16(seid);
10680 
10681 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10682 	status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10683 
10684 	return status;
10685 }
10686 
10687 /**
10688  * i40e_mirror_rule_set
10689  * @dev: pointer to the hardware structure
10690  * @mirror_conf: mirror rule info
10691  * @sw_id: mirror rule's sw_id
10692  * @on: enable/disable
10693  *
10694  * set a mirror rule.
10695  *
10696  **/
10697 static int
10698 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10699 			struct rte_eth_mirror_conf *mirror_conf,
10700 			uint8_t sw_id, uint8_t on)
10701 {
10702 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10703 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10704 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
10705 	struct i40e_mirror_rule *parent = NULL;
10706 	uint16_t seid, dst_seid, rule_id;
10707 	uint16_t i, j = 0;
10708 	int ret;
10709 
10710 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10711 
10712 	if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10713 		PMD_DRV_LOG(ERR,
10714 			"mirror rule can not be configured without veb or vfs.");
10715 		return -ENOSYS;
10716 	}
10717 	if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10718 		PMD_DRV_LOG(ERR, "mirror table is full.");
10719 		return -ENOSPC;
10720 	}
10721 	if (mirror_conf->dst_pool > pf->vf_num) {
10722 		PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10723 				 mirror_conf->dst_pool);
10724 		return -EINVAL;
10725 	}
10726 
10727 	seid = pf->main_vsi->veb->seid;
10728 
10729 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10730 		if (sw_id <= it->index) {
10731 			mirr_rule = it;
10732 			break;
10733 		}
10734 		parent = it;
10735 	}
10736 	if (mirr_rule && sw_id == mirr_rule->index) {
10737 		if (on) {
10738 			PMD_DRV_LOG(ERR, "mirror rule exists.");
10739 			return -EEXIST;
10740 		} else {
10741 			ret = i40e_aq_del_mirror_rule(hw, seid,
10742 					mirr_rule->rule_type,
10743 					mirr_rule->entries,
10744 					mirr_rule->num_entries, mirr_rule->id);
10745 			if (ret < 0) {
10746 				PMD_DRV_LOG(ERR,
10747 					"failed to remove mirror rule: ret = %d, aq_err = %d.",
10748 					ret, hw->aq.asq_last_status);
10749 				return -ENOSYS;
10750 			}
10751 			TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10752 			rte_free(mirr_rule);
10753 			pf->nb_mirror_rule--;
10754 			return 0;
10755 		}
10756 	} else if (!on) {
10757 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10758 		return -ENOENT;
10759 	}
10760 
10761 	mirr_rule = rte_zmalloc("i40e_mirror_rule",
10762 				sizeof(struct i40e_mirror_rule) , 0);
10763 	if (!mirr_rule) {
10764 		PMD_DRV_LOG(ERR, "failed to allocate memory");
10765 		return I40E_ERR_NO_MEMORY;
10766 	}
10767 	switch (mirror_conf->rule_type) {
10768 	case ETH_MIRROR_VLAN:
10769 		for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10770 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10771 				mirr_rule->entries[j] =
10772 					mirror_conf->vlan.vlan_id[i];
10773 				j++;
10774 			}
10775 		}
10776 		if (j == 0) {
10777 			PMD_DRV_LOG(ERR, "vlan is not specified.");
10778 			rte_free(mirr_rule);
10779 			return -EINVAL;
10780 		}
10781 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10782 		break;
10783 	case ETH_MIRROR_VIRTUAL_POOL_UP:
10784 	case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10785 		/* check if the specified pool bit is out of range */
10786 		if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10787 			PMD_DRV_LOG(ERR, "pool mask is out of range.");
10788 			rte_free(mirr_rule);
10789 			return -EINVAL;
10790 		}
10791 		for (i = 0, j = 0; i < pf->vf_num; i++) {
10792 			if (mirror_conf->pool_mask & (1ULL << i)) {
10793 				mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10794 				j++;
10795 			}
10796 		}
10797 		if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10798 			/* add pf vsi to entries */
10799 			mirr_rule->entries[j] = pf->main_vsi_seid;
10800 			j++;
10801 		}
10802 		if (j == 0) {
10803 			PMD_DRV_LOG(ERR, "pool is not specified.");
10804 			rte_free(mirr_rule);
10805 			return -EINVAL;
10806 		}
10807 		/* egress and ingress in aq commands means from switch but not port */
10808 		mirr_rule->rule_type =
10809 			(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10810 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10811 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10812 		break;
10813 	case ETH_MIRROR_UPLINK_PORT:
10814 		/* egress and ingress in aq commands means from switch but not port*/
10815 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10816 		break;
10817 	case ETH_MIRROR_DOWNLINK_PORT:
10818 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10819 		break;
10820 	default:
10821 		PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10822 			mirror_conf->rule_type);
10823 		rte_free(mirr_rule);
10824 		return -EINVAL;
10825 	}
10826 
10827 	/* If the dst_pool is equal to vf_num, consider it as PF */
10828 	if (mirror_conf->dst_pool == pf->vf_num)
10829 		dst_seid = pf->main_vsi_seid;
10830 	else
10831 		dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10832 
10833 	ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10834 				      mirr_rule->rule_type, mirr_rule->entries,
10835 				      j, &rule_id);
10836 	if (ret < 0) {
10837 		PMD_DRV_LOG(ERR,
10838 			"failed to add mirror rule: ret = %d, aq_err = %d.",
10839 			ret, hw->aq.asq_last_status);
10840 		rte_free(mirr_rule);
10841 		return -ENOSYS;
10842 	}
10843 
10844 	mirr_rule->index = sw_id;
10845 	mirr_rule->num_entries = j;
10846 	mirr_rule->id = rule_id;
10847 	mirr_rule->dst_vsi_seid = dst_seid;
10848 
10849 	if (parent)
10850 		TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10851 	else
10852 		TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10853 
10854 	pf->nb_mirror_rule++;
10855 	return 0;
10856 }
10857 
10858 /**
10859  * i40e_mirror_rule_reset
10860  * @dev: pointer to the device
10861  * @sw_id: mirror rule's sw_id
10862  *
10863  * reset a mirror rule.
10864  *
10865  **/
10866 static int
10867 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10868 {
10869 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10870 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10871 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
10872 	uint16_t seid;
10873 	int ret;
10874 
10875 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10876 
10877 	seid = pf->main_vsi->veb->seid;
10878 
10879 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10880 		if (sw_id == it->index) {
10881 			mirr_rule = it;
10882 			break;
10883 		}
10884 	}
10885 	if (mirr_rule) {
10886 		ret = i40e_aq_del_mirror_rule(hw, seid,
10887 				mirr_rule->rule_type,
10888 				mirr_rule->entries,
10889 				mirr_rule->num_entries, mirr_rule->id);
10890 		if (ret < 0) {
10891 			PMD_DRV_LOG(ERR,
10892 				"failed to remove mirror rule: status = %d, aq_err = %d.",
10893 				ret, hw->aq.asq_last_status);
10894 			return -ENOSYS;
10895 		}
10896 		TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10897 		rte_free(mirr_rule);
10898 		pf->nb_mirror_rule--;
10899 	} else {
10900 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10901 		return -ENOENT;
10902 	}
10903 	return 0;
10904 }
10905 
10906 static uint64_t
10907 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10908 {
10909 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10910 	uint64_t systim_cycles;
10911 
10912 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10913 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10914 			<< 32;
10915 
10916 	return systim_cycles;
10917 }
10918 
10919 static uint64_t
10920 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10921 {
10922 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10923 	uint64_t rx_tstamp;
10924 
10925 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10926 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10927 			<< 32;
10928 
10929 	return rx_tstamp;
10930 }
10931 
10932 static uint64_t
10933 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10934 {
10935 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10936 	uint64_t tx_tstamp;
10937 
10938 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10939 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10940 			<< 32;
10941 
10942 	return tx_tstamp;
10943 }
10944 
10945 static void
10946 i40e_start_timecounters(struct rte_eth_dev *dev)
10947 {
10948 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10949 	struct i40e_adapter *adapter = dev->data->dev_private;
10950 	struct rte_eth_link link;
10951 	uint32_t tsync_inc_l;
10952 	uint32_t tsync_inc_h;
10953 
10954 	/* Get current link speed. */
10955 	i40e_dev_link_update(dev, 1);
10956 	rte_eth_linkstatus_get(dev, &link);
10957 
10958 	switch (link.link_speed) {
10959 	case ETH_SPEED_NUM_40G:
10960 	case ETH_SPEED_NUM_25G:
10961 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10962 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10963 		break;
10964 	case ETH_SPEED_NUM_10G:
10965 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10966 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10967 		break;
10968 	case ETH_SPEED_NUM_1G:
10969 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10970 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10971 		break;
10972 	default:
10973 		tsync_inc_l = 0x0;
10974 		tsync_inc_h = 0x0;
10975 	}
10976 
10977 	/* Set the timesync increment value. */
10978 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10979 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10980 
10981 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10982 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10983 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10984 
10985 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10986 	adapter->systime_tc.cc_shift = 0;
10987 	adapter->systime_tc.nsec_mask = 0;
10988 
10989 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10990 	adapter->rx_tstamp_tc.cc_shift = 0;
10991 	adapter->rx_tstamp_tc.nsec_mask = 0;
10992 
10993 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10994 	adapter->tx_tstamp_tc.cc_shift = 0;
10995 	adapter->tx_tstamp_tc.nsec_mask = 0;
10996 }
10997 
10998 static int
10999 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
11000 {
11001 	struct i40e_adapter *adapter = dev->data->dev_private;
11002 
11003 	adapter->systime_tc.nsec += delta;
11004 	adapter->rx_tstamp_tc.nsec += delta;
11005 	adapter->tx_tstamp_tc.nsec += delta;
11006 
11007 	return 0;
11008 }
11009 
11010 static int
11011 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
11012 {
11013 	uint64_t ns;
11014 	struct i40e_adapter *adapter = dev->data->dev_private;
11015 
11016 	ns = rte_timespec_to_ns(ts);
11017 
11018 	/* Set the timecounters to a new value. */
11019 	adapter->systime_tc.nsec = ns;
11020 	adapter->rx_tstamp_tc.nsec = ns;
11021 	adapter->tx_tstamp_tc.nsec = ns;
11022 
11023 	return 0;
11024 }
11025 
11026 static int
11027 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
11028 {
11029 	uint64_t ns, systime_cycles;
11030 	struct i40e_adapter *adapter = dev->data->dev_private;
11031 
11032 	systime_cycles = i40e_read_systime_cyclecounter(dev);
11033 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
11034 	*ts = rte_ns_to_timespec(ns);
11035 
11036 	return 0;
11037 }
11038 
11039 static int
11040 i40e_timesync_enable(struct rte_eth_dev *dev)
11041 {
11042 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11043 	uint32_t tsync_ctl_l;
11044 	uint32_t tsync_ctl_h;
11045 
11046 	/* Stop the timesync system time. */
11047 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11048 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11049 	/* Reset the timesync system time value. */
11050 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
11051 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
11052 
11053 	i40e_start_timecounters(dev);
11054 
11055 	/* Clear timesync registers. */
11056 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11057 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
11058 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
11059 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
11060 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
11061 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
11062 
11063 	/* Enable timestamping of PTP packets. */
11064 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11065 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
11066 
11067 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11068 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
11069 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
11070 
11071 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11072 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11073 
11074 	return 0;
11075 }
11076 
11077 static int
11078 i40e_timesync_disable(struct rte_eth_dev *dev)
11079 {
11080 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11081 	uint32_t tsync_ctl_l;
11082 	uint32_t tsync_ctl_h;
11083 
11084 	/* Disable timestamping of transmitted PTP packets. */
11085 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
11086 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
11087 
11088 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
11089 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
11090 
11091 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
11092 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
11093 
11094 	/* Reset the timesync increment value. */
11095 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
11096 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
11097 
11098 	return 0;
11099 }
11100 
11101 static int
11102 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
11103 				struct timespec *timestamp, uint32_t flags)
11104 {
11105 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11106 	struct i40e_adapter *adapter = dev->data->dev_private;
11107 	uint32_t sync_status;
11108 	uint32_t index = flags & 0x03;
11109 	uint64_t rx_tstamp_cycles;
11110 	uint64_t ns;
11111 
11112 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
11113 	if ((sync_status & (1 << index)) == 0)
11114 		return -EINVAL;
11115 
11116 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
11117 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
11118 	*timestamp = rte_ns_to_timespec(ns);
11119 
11120 	return 0;
11121 }
11122 
11123 static int
11124 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
11125 				struct timespec *timestamp)
11126 {
11127 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11128 	struct i40e_adapter *adapter = dev->data->dev_private;
11129 	uint32_t sync_status;
11130 	uint64_t tx_tstamp_cycles;
11131 	uint64_t ns;
11132 
11133 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11134 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
11135 		return -EINVAL;
11136 
11137 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
11138 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
11139 	*timestamp = rte_ns_to_timespec(ns);
11140 
11141 	return 0;
11142 }
11143 
11144 /*
11145  * i40e_parse_dcb_configure - parse dcb configure from user
11146  * @dev: the device being configured
11147  * @dcb_cfg: pointer of the result of parse
11148  * @*tc_map: bit map of enabled traffic classes
11149  *
11150  * Returns 0 on success, negative value on failure
11151  */
11152 static int
11153 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
11154 			 struct i40e_dcbx_config *dcb_cfg,
11155 			 uint8_t *tc_map)
11156 {
11157 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
11158 	uint8_t i, tc_bw, bw_lf;
11159 
11160 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
11161 
11162 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
11163 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
11164 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
11165 		return -EINVAL;
11166 	}
11167 
11168 	/* assume each tc has the same bw */
11169 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
11170 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11171 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
11172 	/* to ensure the sum of tcbw is equal to 100 */
11173 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
11174 	for (i = 0; i < bw_lf; i++)
11175 		dcb_cfg->etscfg.tcbwtable[i]++;
11176 
11177 	/* assume each tc has the same Transmission Selection Algorithm */
11178 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11179 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
11180 
11181 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11182 		dcb_cfg->etscfg.prioritytable[i] =
11183 				dcb_rx_conf->dcb_tc[i];
11184 
11185 	/* FW needs one App to configure HW */
11186 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
11187 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
11188 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
11189 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
11190 
11191 	if (dcb_rx_conf->nb_tcs == 0)
11192 		*tc_map = 1; /* tc0 only */
11193 	else
11194 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
11195 
11196 	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
11197 		dcb_cfg->pfc.willing = 0;
11198 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
11199 		dcb_cfg->pfc.pfcenable = *tc_map;
11200 	}
11201 	return 0;
11202 }
11203 
11204 
11205 static enum i40e_status_code
11206 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
11207 			      struct i40e_aqc_vsi_properties_data *info,
11208 			      uint8_t enabled_tcmap)
11209 {
11210 	enum i40e_status_code ret;
11211 	int i, total_tc = 0;
11212 	uint16_t qpnum_per_tc, bsf, qp_idx;
11213 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
11214 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
11215 	uint16_t used_queues;
11216 
11217 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
11218 	if (ret != I40E_SUCCESS)
11219 		return ret;
11220 
11221 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11222 		if (enabled_tcmap & (1 << i))
11223 			total_tc++;
11224 	}
11225 	if (total_tc == 0)
11226 		total_tc = 1;
11227 	vsi->enabled_tc = enabled_tcmap;
11228 
11229 	/* different VSI has different queues assigned */
11230 	if (vsi->type == I40E_VSI_MAIN)
11231 		used_queues = dev_data->nb_rx_queues -
11232 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11233 	else if (vsi->type == I40E_VSI_VMDQ2)
11234 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11235 	else {
11236 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
11237 		return I40E_ERR_NO_AVAILABLE_VSI;
11238 	}
11239 
11240 	qpnum_per_tc = used_queues / total_tc;
11241 	/* Number of queues per enabled TC */
11242 	if (qpnum_per_tc == 0) {
11243 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
11244 		return I40E_ERR_INVALID_QP_ID;
11245 	}
11246 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
11247 				I40E_MAX_Q_PER_TC);
11248 	bsf = rte_bsf32(qpnum_per_tc);
11249 
11250 	/**
11251 	 * Configure TC and queue mapping parameters, for enabled TC,
11252 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
11253 	 * default queue will serve it.
11254 	 */
11255 	qp_idx = 0;
11256 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11257 		if (vsi->enabled_tc & (1 << i)) {
11258 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
11259 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
11260 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
11261 			qp_idx += qpnum_per_tc;
11262 		} else
11263 			info->tc_mapping[i] = 0;
11264 	}
11265 
11266 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
11267 	if (vsi->type == I40E_VSI_SRIOV) {
11268 		info->mapping_flags |=
11269 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
11270 		for (i = 0; i < vsi->nb_qps; i++)
11271 			info->queue_mapping[i] =
11272 				rte_cpu_to_le_16(vsi->base_queue + i);
11273 	} else {
11274 		info->mapping_flags |=
11275 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
11276 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
11277 	}
11278 	info->valid_sections |=
11279 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
11280 
11281 	return I40E_SUCCESS;
11282 }
11283 
11284 /*
11285  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
11286  * @veb: VEB to be configured
11287  * @tc_map: enabled TC bitmap
11288  *
11289  * Returns 0 on success, negative value on failure
11290  */
11291 static enum i40e_status_code
11292 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
11293 {
11294 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
11295 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
11296 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
11297 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
11298 	enum i40e_status_code ret = I40E_SUCCESS;
11299 	int i;
11300 	uint32_t bw_max;
11301 
11302 	/* Check if enabled_tc is same as existing or new TCs */
11303 	if (veb->enabled_tc == tc_map)
11304 		return ret;
11305 
11306 	/* configure tc bandwidth */
11307 	memset(&veb_bw, 0, sizeof(veb_bw));
11308 	veb_bw.tc_valid_bits = tc_map;
11309 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
11310 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11311 		if (tc_map & BIT_ULL(i))
11312 			veb_bw.tc_bw_share_credits[i] = 1;
11313 	}
11314 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
11315 						   &veb_bw, NULL);
11316 	if (ret) {
11317 		PMD_INIT_LOG(ERR,
11318 			"AQ command Config switch_comp BW allocation per TC failed = %d",
11319 			hw->aq.asq_last_status);
11320 		return ret;
11321 	}
11322 
11323 	memset(&ets_query, 0, sizeof(ets_query));
11324 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11325 						   &ets_query, NULL);
11326 	if (ret != I40E_SUCCESS) {
11327 		PMD_DRV_LOG(ERR,
11328 			"Failed to get switch_comp ETS configuration %u",
11329 			hw->aq.asq_last_status);
11330 		return ret;
11331 	}
11332 	memset(&bw_query, 0, sizeof(bw_query));
11333 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11334 						  &bw_query, NULL);
11335 	if (ret != I40E_SUCCESS) {
11336 		PMD_DRV_LOG(ERR,
11337 			"Failed to get switch_comp bandwidth configuration %u",
11338 			hw->aq.asq_last_status);
11339 		return ret;
11340 	}
11341 
11342 	/* store and print out BW info */
11343 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11344 	veb->bw_info.bw_max = ets_query.tc_bw_max;
11345 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11346 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11347 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11348 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11349 		     I40E_16_BIT_WIDTH);
11350 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11351 		veb->bw_info.bw_ets_share_credits[i] =
11352 				bw_query.tc_bw_share_credits[i];
11353 		veb->bw_info.bw_ets_credits[i] =
11354 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11355 		/* 4 bits per TC, 4th bit is reserved */
11356 		veb->bw_info.bw_ets_max[i] =
11357 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11358 				  RTE_LEN2MASK(3, uint8_t));
11359 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11360 			    veb->bw_info.bw_ets_share_credits[i]);
11361 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11362 			    veb->bw_info.bw_ets_credits[i]);
11363 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11364 			    veb->bw_info.bw_ets_max[i]);
11365 	}
11366 
11367 	veb->enabled_tc = tc_map;
11368 
11369 	return ret;
11370 }
11371 
11372 
11373 /*
11374  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11375  * @vsi: VSI to be configured
11376  * @tc_map: enabled TC bitmap
11377  *
11378  * Returns 0 on success, negative value on failure
11379  */
11380 static enum i40e_status_code
11381 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11382 {
11383 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11384 	struct i40e_vsi_context ctxt;
11385 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11386 	enum i40e_status_code ret = I40E_SUCCESS;
11387 	int i;
11388 
11389 	/* Check if enabled_tc is same as existing or new TCs */
11390 	if (vsi->enabled_tc == tc_map)
11391 		return ret;
11392 
11393 	/* configure tc bandwidth */
11394 	memset(&bw_data, 0, sizeof(bw_data));
11395 	bw_data.tc_valid_bits = tc_map;
11396 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
11397 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11398 		if (tc_map & BIT_ULL(i))
11399 			bw_data.tc_bw_credits[i] = 1;
11400 	}
11401 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11402 	if (ret) {
11403 		PMD_INIT_LOG(ERR,
11404 			"AQ command Config VSI BW allocation per TC failed = %d",
11405 			hw->aq.asq_last_status);
11406 		goto out;
11407 	}
11408 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11409 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11410 
11411 	/* Update Queue Pairs Mapping for currently enabled UPs */
11412 	ctxt.seid = vsi->seid;
11413 	ctxt.pf_num = hw->pf_id;
11414 	ctxt.vf_num = 0;
11415 	ctxt.uplink_seid = vsi->uplink_seid;
11416 	ctxt.info = vsi->info;
11417 	i40e_get_cap(hw);
11418 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11419 	if (ret)
11420 		goto out;
11421 
11422 	/* Update the VSI after updating the VSI queue-mapping information */
11423 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11424 	if (ret) {
11425 		PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11426 			hw->aq.asq_last_status);
11427 		goto out;
11428 	}
11429 	/* update the local VSI info with updated queue map */
11430 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11431 					sizeof(vsi->info.tc_mapping));
11432 	rte_memcpy(&vsi->info.queue_mapping,
11433 			&ctxt.info.queue_mapping,
11434 		sizeof(vsi->info.queue_mapping));
11435 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
11436 	vsi->info.valid_sections = 0;
11437 
11438 	/* query and update current VSI BW information */
11439 	ret = i40e_vsi_get_bw_config(vsi);
11440 	if (ret) {
11441 		PMD_INIT_LOG(ERR,
11442 			 "Failed updating vsi bw info, err %s aq_err %s",
11443 			 i40e_stat_str(hw, ret),
11444 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11445 		goto out;
11446 	}
11447 
11448 	vsi->enabled_tc = tc_map;
11449 
11450 out:
11451 	return ret;
11452 }
11453 
11454 /*
11455  * i40e_dcb_hw_configure - program the dcb setting to hw
11456  * @pf: pf the configuration is taken on
11457  * @new_cfg: new configuration
11458  * @tc_map: enabled TC bitmap
11459  *
11460  * Returns 0 on success, negative value on failure
11461  */
11462 static enum i40e_status_code
11463 i40e_dcb_hw_configure(struct i40e_pf *pf,
11464 		      struct i40e_dcbx_config *new_cfg,
11465 		      uint8_t tc_map)
11466 {
11467 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11468 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11469 	struct i40e_vsi *main_vsi = pf->main_vsi;
11470 	struct i40e_vsi_list *vsi_list;
11471 	enum i40e_status_code ret;
11472 	int i;
11473 	uint32_t val;
11474 
11475 	/* Use the FW API if FW > v4.4*/
11476 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11477 	      (hw->aq.fw_maj_ver >= 5))) {
11478 		PMD_INIT_LOG(ERR,
11479 			"FW < v4.4, can not use FW LLDP API to configure DCB");
11480 		return I40E_ERR_FIRMWARE_API_VERSION;
11481 	}
11482 
11483 	/* Check if need reconfiguration */
11484 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11485 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11486 		return I40E_SUCCESS;
11487 	}
11488 
11489 	/* Copy the new config to the current config */
11490 	*old_cfg = *new_cfg;
11491 	old_cfg->etsrec = old_cfg->etscfg;
11492 	ret = i40e_set_dcb_config(hw);
11493 	if (ret) {
11494 		PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11495 			 i40e_stat_str(hw, ret),
11496 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11497 		return ret;
11498 	}
11499 	/* set receive Arbiter to RR mode and ETS scheme by default */
11500 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11501 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11502 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11503 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11504 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11505 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11506 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11507 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11508 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11509 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11510 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11511 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11512 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11513 	}
11514 	/* get local mib to check whether it is configured correctly */
11515 	/* IEEE mode */
11516 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11517 	/* Get Local DCB Config */
11518 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11519 				     &hw->local_dcbx_config);
11520 
11521 	/* if Veb is created, need to update TC of it at first */
11522 	if (main_vsi->veb) {
11523 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11524 		if (ret)
11525 			PMD_INIT_LOG(WARNING,
11526 				 "Failed configuring TC for VEB seid=%d",
11527 				 main_vsi->veb->seid);
11528 	}
11529 	/* Update each VSI */
11530 	i40e_vsi_config_tc(main_vsi, tc_map);
11531 	if (main_vsi->veb) {
11532 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11533 			/* Beside main VSI and VMDQ VSIs, only enable default
11534 			 * TC for other VSIs
11535 			 */
11536 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11537 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11538 							 tc_map);
11539 			else
11540 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11541 							 I40E_DEFAULT_TCMAP);
11542 			if (ret)
11543 				PMD_INIT_LOG(WARNING,
11544 					"Failed configuring TC for VSI seid=%d",
11545 					vsi_list->vsi->seid);
11546 			/* continue */
11547 		}
11548 	}
11549 	return I40E_SUCCESS;
11550 }
11551 
11552 /*
11553  * i40e_dcb_init_configure - initial dcb config
11554  * @dev: device being configured
11555  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11556  *
11557  * Returns 0 on success, negative value on failure
11558  */
11559 int
11560 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11561 {
11562 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11563 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11564 	int i, ret = 0;
11565 
11566 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11567 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11568 		return -ENOTSUP;
11569 	}
11570 
11571 	/* DCB initialization:
11572 	 * Update DCB configuration from the Firmware and configure
11573 	 * LLDP MIB change event.
11574 	 */
11575 	if (sw_dcb == TRUE) {
11576 		if (i40e_need_stop_lldp(dev)) {
11577 			ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
11578 			if (ret != I40E_SUCCESS)
11579 				PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11580 		}
11581 
11582 		ret = i40e_init_dcb(hw);
11583 		/* If lldp agent is stopped, the return value from
11584 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11585 		 * adminq status. Otherwise, it should return success.
11586 		 */
11587 		if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11588 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11589 			memset(&hw->local_dcbx_config, 0,
11590 				sizeof(struct i40e_dcbx_config));
11591 			/* set dcb default configuration */
11592 			hw->local_dcbx_config.etscfg.willing = 0;
11593 			hw->local_dcbx_config.etscfg.maxtcs = 0;
11594 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11595 			hw->local_dcbx_config.etscfg.tsatable[0] =
11596 						I40E_IEEE_TSA_ETS;
11597 			/* all UPs mapping to TC0 */
11598 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11599 				hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11600 			hw->local_dcbx_config.etsrec =
11601 				hw->local_dcbx_config.etscfg;
11602 			hw->local_dcbx_config.pfc.willing = 0;
11603 			hw->local_dcbx_config.pfc.pfccap =
11604 						I40E_MAX_TRAFFIC_CLASS;
11605 			/* FW needs one App to configure HW */
11606 			hw->local_dcbx_config.numapps = 1;
11607 			hw->local_dcbx_config.app[0].selector =
11608 						I40E_APP_SEL_ETHTYPE;
11609 			hw->local_dcbx_config.app[0].priority = 3;
11610 			hw->local_dcbx_config.app[0].protocolid =
11611 						I40E_APP_PROTOID_FCOE;
11612 			ret = i40e_set_dcb_config(hw);
11613 			if (ret) {
11614 				PMD_INIT_LOG(ERR,
11615 					"default dcb config fails. err = %d, aq_err = %d.",
11616 					ret, hw->aq.asq_last_status);
11617 				return -ENOSYS;
11618 			}
11619 		} else {
11620 			PMD_INIT_LOG(ERR,
11621 				"DCB initialization in FW fails, err = %d, aq_err = %d.",
11622 				ret, hw->aq.asq_last_status);
11623 			return -ENOTSUP;
11624 		}
11625 	} else {
11626 		ret = i40e_aq_start_lldp(hw, NULL);
11627 		if (ret != I40E_SUCCESS)
11628 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11629 
11630 		ret = i40e_init_dcb(hw);
11631 		if (!ret) {
11632 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11633 				PMD_INIT_LOG(ERR,
11634 					"HW doesn't support DCBX offload.");
11635 				return -ENOTSUP;
11636 			}
11637 		} else {
11638 			PMD_INIT_LOG(ERR,
11639 				"DCBX configuration failed, err = %d, aq_err = %d.",
11640 				ret, hw->aq.asq_last_status);
11641 			return -ENOTSUP;
11642 		}
11643 	}
11644 	return 0;
11645 }
11646 
11647 /*
11648  * i40e_dcb_setup - setup dcb related config
11649  * @dev: device being configured
11650  *
11651  * Returns 0 on success, negative value on failure
11652  */
11653 static int
11654 i40e_dcb_setup(struct rte_eth_dev *dev)
11655 {
11656 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11657 	struct i40e_dcbx_config dcb_cfg;
11658 	uint8_t tc_map = 0;
11659 	int ret = 0;
11660 
11661 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11662 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11663 		return -ENOTSUP;
11664 	}
11665 
11666 	if (pf->vf_num != 0)
11667 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11668 
11669 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11670 	if (ret) {
11671 		PMD_INIT_LOG(ERR, "invalid dcb config");
11672 		return -EINVAL;
11673 	}
11674 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11675 	if (ret) {
11676 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
11677 		return -ENOSYS;
11678 	}
11679 
11680 	return 0;
11681 }
11682 
11683 static int
11684 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11685 		      struct rte_eth_dcb_info *dcb_info)
11686 {
11687 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11688 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11689 	struct i40e_vsi *vsi = pf->main_vsi;
11690 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11691 	uint16_t bsf, tc_mapping;
11692 	int i, j = 0;
11693 
11694 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11695 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11696 	else
11697 		dcb_info->nb_tcs = 1;
11698 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11699 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11700 	for (i = 0; i < dcb_info->nb_tcs; i++)
11701 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11702 
11703 	/* get queue mapping if vmdq is disabled */
11704 	if (!pf->nb_cfg_vmdq_vsi) {
11705 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11706 			if (!(vsi->enabled_tc & (1 << i)))
11707 				continue;
11708 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11709 			dcb_info->tc_queue.tc_rxq[j][i].base =
11710 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11711 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11712 			dcb_info->tc_queue.tc_txq[j][i].base =
11713 				dcb_info->tc_queue.tc_rxq[j][i].base;
11714 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11715 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11716 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11717 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11718 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11719 		}
11720 		return 0;
11721 	}
11722 
11723 	/* get queue mapping if vmdq is enabled */
11724 	do {
11725 		vsi = pf->vmdq[j].vsi;
11726 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11727 			if (!(vsi->enabled_tc & (1 << i)))
11728 				continue;
11729 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11730 			dcb_info->tc_queue.tc_rxq[j][i].base =
11731 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11732 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11733 			dcb_info->tc_queue.tc_txq[j][i].base =
11734 				dcb_info->tc_queue.tc_rxq[j][i].base;
11735 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11736 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11737 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11738 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11739 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11740 		}
11741 		j++;
11742 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11743 	return 0;
11744 }
11745 
11746 static int
11747 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11748 {
11749 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11750 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11751 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11752 	uint16_t msix_intr;
11753 
11754 	msix_intr = intr_handle->intr_vec[queue_id];
11755 	if (msix_intr == I40E_MISC_VEC_ID)
11756 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11757 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
11758 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11759 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11760 	else
11761 		I40E_WRITE_REG(hw,
11762 			       I40E_PFINT_DYN_CTLN(msix_intr -
11763 						   I40E_RX_VEC_START),
11764 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
11765 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11766 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11767 
11768 	I40E_WRITE_FLUSH(hw);
11769 	rte_intr_ack(&pci_dev->intr_handle);
11770 
11771 	return 0;
11772 }
11773 
11774 static int
11775 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11776 {
11777 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11778 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11779 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11780 	uint16_t msix_intr;
11781 
11782 	msix_intr = intr_handle->intr_vec[queue_id];
11783 	if (msix_intr == I40E_MISC_VEC_ID)
11784 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11785 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11786 	else
11787 		I40E_WRITE_REG(hw,
11788 			       I40E_PFINT_DYN_CTLN(msix_intr -
11789 						   I40E_RX_VEC_START),
11790 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11791 	I40E_WRITE_FLUSH(hw);
11792 
11793 	return 0;
11794 }
11795 
11796 /**
11797  * This function is used to check if the register is valid.
11798  * Below is the valid registers list for X722 only:
11799  * 0x2b800--0x2bb00
11800  * 0x38700--0x38a00
11801  * 0x3d800--0x3db00
11802  * 0x208e00--0x209000
11803  * 0x20be00--0x20c000
11804  * 0x263c00--0x264000
11805  * 0x265c00--0x266000
11806  */
11807 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11808 {
11809 	if ((type != I40E_MAC_X722) &&
11810 	    ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11811 	     (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11812 	     (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11813 	     (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11814 	     (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11815 	     (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11816 	     (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11817 		return 0;
11818 	else
11819 		return 1;
11820 }
11821 
11822 static int i40e_get_regs(struct rte_eth_dev *dev,
11823 			 struct rte_dev_reg_info *regs)
11824 {
11825 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11826 	uint32_t *ptr_data = regs->data;
11827 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11828 	const struct i40e_reg_info *reg_info;
11829 
11830 	if (ptr_data == NULL) {
11831 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11832 		regs->width = sizeof(uint32_t);
11833 		return 0;
11834 	}
11835 
11836 	/* The first few registers have to be read using AQ operations */
11837 	reg_idx = 0;
11838 	while (i40e_regs_adminq[reg_idx].name) {
11839 		reg_info = &i40e_regs_adminq[reg_idx++];
11840 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11841 			for (arr_idx2 = 0;
11842 					arr_idx2 <= reg_info->count2;
11843 					arr_idx2++) {
11844 				reg_offset = arr_idx * reg_info->stride1 +
11845 					arr_idx2 * reg_info->stride2;
11846 				reg_offset += reg_info->base_addr;
11847 				ptr_data[reg_offset >> 2] =
11848 					i40e_read_rx_ctl(hw, reg_offset);
11849 			}
11850 	}
11851 
11852 	/* The remaining registers can be read using primitives */
11853 	reg_idx = 0;
11854 	while (i40e_regs_others[reg_idx].name) {
11855 		reg_info = &i40e_regs_others[reg_idx++];
11856 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11857 			for (arr_idx2 = 0;
11858 					arr_idx2 <= reg_info->count2;
11859 					arr_idx2++) {
11860 				reg_offset = arr_idx * reg_info->stride1 +
11861 					arr_idx2 * reg_info->stride2;
11862 				reg_offset += reg_info->base_addr;
11863 				if (!i40e_valid_regs(hw->mac.type, reg_offset))
11864 					ptr_data[reg_offset >> 2] = 0;
11865 				else
11866 					ptr_data[reg_offset >> 2] =
11867 						I40E_READ_REG(hw, reg_offset);
11868 			}
11869 	}
11870 
11871 	return 0;
11872 }
11873 
11874 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11875 {
11876 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11877 
11878 	/* Convert word count to byte count */
11879 	return hw->nvm.sr_size << 1;
11880 }
11881 
11882 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11883 			   struct rte_dev_eeprom_info *eeprom)
11884 {
11885 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11886 	uint16_t *data = eeprom->data;
11887 	uint16_t offset, length, cnt_words;
11888 	int ret_code;
11889 
11890 	offset = eeprom->offset >> 1;
11891 	length = eeprom->length >> 1;
11892 	cnt_words = length;
11893 
11894 	if (offset > hw->nvm.sr_size ||
11895 		offset + length > hw->nvm.sr_size) {
11896 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11897 		return -EINVAL;
11898 	}
11899 
11900 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11901 
11902 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11903 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
11904 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
11905 		return -EIO;
11906 	}
11907 
11908 	return 0;
11909 }
11910 
11911 static int i40e_get_module_info(struct rte_eth_dev *dev,
11912 				struct rte_eth_dev_module_info *modinfo)
11913 {
11914 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11915 	uint32_t sff8472_comp = 0;
11916 	uint32_t sff8472_swap = 0;
11917 	uint32_t sff8636_rev = 0;
11918 	i40e_status status;
11919 	uint32_t type = 0;
11920 
11921 	/* Check if firmware supports reading module EEPROM. */
11922 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11923 		PMD_DRV_LOG(ERR,
11924 			    "Module EEPROM memory read not supported. "
11925 			    "Please update the NVM image.\n");
11926 		return -EINVAL;
11927 	}
11928 
11929 	status = i40e_update_link_info(hw);
11930 	if (status)
11931 		return -EIO;
11932 
11933 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11934 		PMD_DRV_LOG(ERR,
11935 			    "Cannot read module EEPROM memory. "
11936 			    "No module connected.\n");
11937 		return -EINVAL;
11938 	}
11939 
11940 	type = hw->phy.link_info.module_type[0];
11941 
11942 	switch (type) {
11943 	case I40E_MODULE_TYPE_SFP:
11944 		status = i40e_aq_get_phy_register(hw,
11945 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11946 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11947 				I40E_MODULE_SFF_8472_COMP,
11948 				&sff8472_comp, NULL);
11949 		if (status)
11950 			return -EIO;
11951 
11952 		status = i40e_aq_get_phy_register(hw,
11953 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11954 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11955 				I40E_MODULE_SFF_8472_SWAP,
11956 				&sff8472_swap, NULL);
11957 		if (status)
11958 			return -EIO;
11959 
11960 		/* Check if the module requires address swap to access
11961 		 * the other EEPROM memory page.
11962 		 */
11963 		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11964 			PMD_DRV_LOG(WARNING,
11965 				    "Module address swap to access "
11966 				    "page 0xA2 is not supported.\n");
11967 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11968 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11969 		} else if (sff8472_comp == 0x00) {
11970 			/* Module is not SFF-8472 compliant */
11971 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11972 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11973 		} else {
11974 			modinfo->type = RTE_ETH_MODULE_SFF_8472;
11975 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11976 		}
11977 		break;
11978 	case I40E_MODULE_TYPE_QSFP_PLUS:
11979 		/* Read from memory page 0. */
11980 		status = i40e_aq_get_phy_register(hw,
11981 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11982 				0, 1,
11983 				I40E_MODULE_REVISION_ADDR,
11984 				&sff8636_rev, NULL);
11985 		if (status)
11986 			return -EIO;
11987 		/* Determine revision compliance byte */
11988 		if (sff8636_rev > 0x02) {
11989 			/* Module is SFF-8636 compliant */
11990 			modinfo->type = RTE_ETH_MODULE_SFF_8636;
11991 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11992 		} else {
11993 			modinfo->type = RTE_ETH_MODULE_SFF_8436;
11994 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11995 		}
11996 		break;
11997 	case I40E_MODULE_TYPE_QSFP28:
11998 		modinfo->type = RTE_ETH_MODULE_SFF_8636;
11999 		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
12000 		break;
12001 	default:
12002 		PMD_DRV_LOG(ERR, "Module type unrecognized\n");
12003 		return -EINVAL;
12004 	}
12005 	return 0;
12006 }
12007 
12008 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
12009 				  struct rte_dev_eeprom_info *info)
12010 {
12011 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12012 	bool is_sfp = false;
12013 	i40e_status status;
12014 	uint8_t *data;
12015 	uint32_t value = 0;
12016 	uint32_t i;
12017 
12018 	if (!info || !info->length || !info->data)
12019 		return -EINVAL;
12020 
12021 	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
12022 		is_sfp = true;
12023 
12024 	data = info->data;
12025 	for (i = 0; i < info->length; i++) {
12026 		u32 offset = i + info->offset;
12027 		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
12028 
12029 		/* Check if we need to access the other memory page */
12030 		if (is_sfp) {
12031 			if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
12032 				offset -= RTE_ETH_MODULE_SFF_8079_LEN;
12033 				addr = I40E_I2C_EEPROM_DEV_ADDR2;
12034 			}
12035 		} else {
12036 			while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
12037 				/* Compute memory page number and offset. */
12038 				offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
12039 				addr++;
12040 			}
12041 		}
12042 		status = i40e_aq_get_phy_register(hw,
12043 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
12044 				addr, offset, 1, &value, NULL);
12045 		if (status)
12046 			return -EIO;
12047 		data[i] = (uint8_t)value;
12048 	}
12049 	return 0;
12050 }
12051 
12052 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
12053 				     struct rte_ether_addr *mac_addr)
12054 {
12055 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
12056 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12057 	struct i40e_vsi *vsi = pf->main_vsi;
12058 	struct i40e_mac_filter_info mac_filter;
12059 	struct i40e_mac_filter *f;
12060 	int ret;
12061 
12062 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
12063 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
12064 		return -EINVAL;
12065 	}
12066 
12067 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
12068 		if (rte_is_same_ether_addr(&pf->dev_addr,
12069 						&f->mac_info.mac_addr))
12070 			break;
12071 	}
12072 
12073 	if (f == NULL) {
12074 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
12075 		return -EIO;
12076 	}
12077 
12078 	mac_filter = f->mac_info;
12079 	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
12080 	if (ret != I40E_SUCCESS) {
12081 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
12082 		return -EIO;
12083 	}
12084 	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
12085 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
12086 	if (ret != I40E_SUCCESS) {
12087 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
12088 		return -EIO;
12089 	}
12090 	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
12091 
12092 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
12093 					mac_addr->addr_bytes, NULL);
12094 	if (ret != I40E_SUCCESS) {
12095 		PMD_DRV_LOG(ERR, "Failed to change mac");
12096 		return -EIO;
12097 	}
12098 
12099 	return 0;
12100 }
12101 
12102 static int
12103 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
12104 {
12105 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12106 	struct rte_eth_dev_data *dev_data = pf->dev_data;
12107 	uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
12108 	int ret = 0;
12109 
12110 	/* check if mtu is within the allowed range */
12111 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
12112 		return -EINVAL;
12113 
12114 	/* mtu setting is forbidden if port is start */
12115 	if (dev_data->dev_started) {
12116 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
12117 			    dev_data->port_id);
12118 		return -EBUSY;
12119 	}
12120 
12121 	if (frame_size > RTE_ETHER_MAX_LEN)
12122 		dev_data->dev_conf.rxmode.offloads |=
12123 			DEV_RX_OFFLOAD_JUMBO_FRAME;
12124 	else
12125 		dev_data->dev_conf.rxmode.offloads &=
12126 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
12127 
12128 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
12129 
12130 	return ret;
12131 }
12132 
12133 /* Restore ethertype filter */
12134 static void
12135 i40e_ethertype_filter_restore(struct i40e_pf *pf)
12136 {
12137 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12138 	struct i40e_ethertype_filter_list
12139 		*ethertype_list = &pf->ethertype.ethertype_list;
12140 	struct i40e_ethertype_filter *f;
12141 	struct i40e_control_filter_stats stats;
12142 	uint16_t flags;
12143 
12144 	TAILQ_FOREACH(f, ethertype_list, rules) {
12145 		flags = 0;
12146 		if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
12147 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
12148 		if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
12149 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
12150 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
12151 
12152 		memset(&stats, 0, sizeof(stats));
12153 		i40e_aq_add_rem_control_packet_filter(hw,
12154 					    f->input.mac_addr.addr_bytes,
12155 					    f->input.ether_type,
12156 					    flags, pf->main_vsi->seid,
12157 					    f->queue, 1, &stats, NULL);
12158 	}
12159 	PMD_DRV_LOG(INFO, "Ethertype filter:"
12160 		    " mac_etype_used = %u, etype_used = %u,"
12161 		    " mac_etype_free = %u, etype_free = %u",
12162 		    stats.mac_etype_used, stats.etype_used,
12163 		    stats.mac_etype_free, stats.etype_free);
12164 }
12165 
12166 /* Restore tunnel filter */
12167 static void
12168 i40e_tunnel_filter_restore(struct i40e_pf *pf)
12169 {
12170 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12171 	struct i40e_vsi *vsi;
12172 	struct i40e_pf_vf *vf;
12173 	struct i40e_tunnel_filter_list
12174 		*tunnel_list = &pf->tunnel.tunnel_list;
12175 	struct i40e_tunnel_filter *f;
12176 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
12177 	bool big_buffer = 0;
12178 
12179 	TAILQ_FOREACH(f, tunnel_list, rules) {
12180 		if (!f->is_to_vf)
12181 			vsi = pf->main_vsi;
12182 		else {
12183 			vf = &pf->vfs[f->vf_id];
12184 			vsi = vf->vsi;
12185 		}
12186 		memset(&cld_filter, 0, sizeof(cld_filter));
12187 		rte_ether_addr_copy((struct rte_ether_addr *)
12188 				&f->input.outer_mac,
12189 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
12190 		rte_ether_addr_copy((struct rte_ether_addr *)
12191 				&f->input.inner_mac,
12192 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
12193 		cld_filter.element.inner_vlan = f->input.inner_vlan;
12194 		cld_filter.element.flags = f->input.flags;
12195 		cld_filter.element.tenant_id = f->input.tenant_id;
12196 		cld_filter.element.queue_number = f->queue;
12197 		rte_memcpy(cld_filter.general_fields,
12198 			   f->input.general_fields,
12199 			   sizeof(f->input.general_fields));
12200 
12201 		if (((f->input.flags &
12202 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
12203 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
12204 		    ((f->input.flags &
12205 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
12206 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
12207 		    ((f->input.flags &
12208 		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
12209 		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
12210 			big_buffer = 1;
12211 
12212 		if (big_buffer)
12213 			i40e_aq_add_cloud_filters_bb(hw,
12214 					vsi->seid, &cld_filter, 1);
12215 		else
12216 			i40e_aq_add_cloud_filters(hw, vsi->seid,
12217 						  &cld_filter.element, 1);
12218 	}
12219 }
12220 
12221 /* Restore rss filter */
12222 static inline void
12223 i40e_rss_filter_restore(struct i40e_pf *pf)
12224 {
12225 	struct i40e_rte_flow_rss_conf *conf =
12226 					&pf->rss_info;
12227 	if (conf->conf.queue_num)
12228 		i40e_config_rss_filter(pf, conf, TRUE);
12229 }
12230 
12231 static void
12232 i40e_filter_restore(struct i40e_pf *pf)
12233 {
12234 	i40e_ethertype_filter_restore(pf);
12235 	i40e_tunnel_filter_restore(pf);
12236 	i40e_fdir_filter_restore(pf);
12237 	i40e_rss_filter_restore(pf);
12238 }
12239 
12240 bool
12241 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
12242 {
12243 	if (strcmp(dev->device->driver->name, drv->driver.name))
12244 		return false;
12245 
12246 	return true;
12247 }
12248 
12249 bool
12250 is_i40e_supported(struct rte_eth_dev *dev)
12251 {
12252 	return is_device_supported(dev, &rte_i40e_pmd);
12253 }
12254 
12255 struct i40e_customized_pctype*
12256 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
12257 {
12258 	int i;
12259 
12260 	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
12261 		if (pf->customized_pctype[i].index == index)
12262 			return &pf->customized_pctype[i];
12263 	}
12264 	return NULL;
12265 }
12266 
12267 static int
12268 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
12269 			      uint32_t pkg_size, uint32_t proto_num,
12270 			      struct rte_pmd_i40e_proto_info *proto,
12271 			      enum rte_pmd_i40e_package_op op)
12272 {
12273 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12274 	uint32_t pctype_num;
12275 	struct rte_pmd_i40e_ptype_info *pctype;
12276 	uint32_t buff_size;
12277 	struct i40e_customized_pctype *new_pctype = NULL;
12278 	uint8_t proto_id;
12279 	uint8_t pctype_value;
12280 	char name[64];
12281 	uint32_t i, j, n;
12282 	int ret;
12283 
12284 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12285 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12286 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12287 		return -1;
12288 	}
12289 
12290 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12291 				(uint8_t *)&pctype_num, sizeof(pctype_num),
12292 				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
12293 	if (ret) {
12294 		PMD_DRV_LOG(ERR, "Failed to get pctype number");
12295 		return -1;
12296 	}
12297 	if (!pctype_num) {
12298 		PMD_DRV_LOG(INFO, "No new pctype added");
12299 		return -1;
12300 	}
12301 
12302 	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
12303 	pctype = rte_zmalloc("new_pctype", buff_size, 0);
12304 	if (!pctype) {
12305 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12306 		return -1;
12307 	}
12308 	/* get information about new pctype list */
12309 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12310 					(uint8_t *)pctype, buff_size,
12311 					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
12312 	if (ret) {
12313 		PMD_DRV_LOG(ERR, "Failed to get pctype list");
12314 		rte_free(pctype);
12315 		return -1;
12316 	}
12317 
12318 	/* Update customized pctype. */
12319 	for (i = 0; i < pctype_num; i++) {
12320 		pctype_value = pctype[i].ptype_id;
12321 		memset(name, 0, sizeof(name));
12322 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12323 			proto_id = pctype[i].protocols[j];
12324 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12325 				continue;
12326 			for (n = 0; n < proto_num; n++) {
12327 				if (proto[n].proto_id != proto_id)
12328 					continue;
12329 				strlcat(name, proto[n].name, sizeof(name));
12330 				strlcat(name, "_", sizeof(name));
12331 				break;
12332 			}
12333 		}
12334 		name[strlen(name) - 1] = '\0';
12335 		if (!strcmp(name, "GTPC"))
12336 			new_pctype =
12337 				i40e_find_customized_pctype(pf,
12338 						      I40E_CUSTOMIZED_GTPC);
12339 		else if (!strcmp(name, "GTPU_IPV4"))
12340 			new_pctype =
12341 				i40e_find_customized_pctype(pf,
12342 						   I40E_CUSTOMIZED_GTPU_IPV4);
12343 		else if (!strcmp(name, "GTPU_IPV6"))
12344 			new_pctype =
12345 				i40e_find_customized_pctype(pf,
12346 						   I40E_CUSTOMIZED_GTPU_IPV6);
12347 		else if (!strcmp(name, "GTPU"))
12348 			new_pctype =
12349 				i40e_find_customized_pctype(pf,
12350 						      I40E_CUSTOMIZED_GTPU);
12351 		if (new_pctype) {
12352 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12353 				new_pctype->pctype = pctype_value;
12354 				new_pctype->valid = true;
12355 			} else {
12356 				new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12357 				new_pctype->valid = false;
12358 			}
12359 		}
12360 	}
12361 
12362 	rte_free(pctype);
12363 	return 0;
12364 }
12365 
12366 static int
12367 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12368 			     uint32_t pkg_size, uint32_t proto_num,
12369 			     struct rte_pmd_i40e_proto_info *proto,
12370 			     enum rte_pmd_i40e_package_op op)
12371 {
12372 	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12373 	uint16_t port_id = dev->data->port_id;
12374 	uint32_t ptype_num;
12375 	struct rte_pmd_i40e_ptype_info *ptype;
12376 	uint32_t buff_size;
12377 	uint8_t proto_id;
12378 	char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12379 	uint32_t i, j, n;
12380 	bool in_tunnel;
12381 	int ret;
12382 
12383 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12384 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12385 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12386 		return -1;
12387 	}
12388 
12389 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12390 		rte_pmd_i40e_ptype_mapping_reset(port_id);
12391 		return 0;
12392 	}
12393 
12394 	/* get information about new ptype num */
12395 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12396 				(uint8_t *)&ptype_num, sizeof(ptype_num),
12397 				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12398 	if (ret) {
12399 		PMD_DRV_LOG(ERR, "Failed to get ptype number");
12400 		return ret;
12401 	}
12402 	if (!ptype_num) {
12403 		PMD_DRV_LOG(INFO, "No new ptype added");
12404 		return -1;
12405 	}
12406 
12407 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12408 	ptype = rte_zmalloc("new_ptype", buff_size, 0);
12409 	if (!ptype) {
12410 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12411 		return -1;
12412 	}
12413 
12414 	/* get information about new ptype list */
12415 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12416 					(uint8_t *)ptype, buff_size,
12417 					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12418 	if (ret) {
12419 		PMD_DRV_LOG(ERR, "Failed to get ptype list");
12420 		rte_free(ptype);
12421 		return ret;
12422 	}
12423 
12424 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12425 	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12426 	if (!ptype_mapping) {
12427 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12428 		rte_free(ptype);
12429 		return -1;
12430 	}
12431 
12432 	/* Update ptype mapping table. */
12433 	for (i = 0; i < ptype_num; i++) {
12434 		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12435 		ptype_mapping[i].sw_ptype = 0;
12436 		in_tunnel = false;
12437 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12438 			proto_id = ptype[i].protocols[j];
12439 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12440 				continue;
12441 			for (n = 0; n < proto_num; n++) {
12442 				if (proto[n].proto_id != proto_id)
12443 					continue;
12444 				memset(name, 0, sizeof(name));
12445 				strcpy(name, proto[n].name);
12446 				if (!strncasecmp(name, "PPPOE", 5))
12447 					ptype_mapping[i].sw_ptype |=
12448 						RTE_PTYPE_L2_ETHER_PPPOE;
12449 				else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12450 					 !in_tunnel) {
12451 					ptype_mapping[i].sw_ptype |=
12452 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12453 					ptype_mapping[i].sw_ptype |=
12454 						RTE_PTYPE_L4_FRAG;
12455 				} else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12456 					   in_tunnel) {
12457 					ptype_mapping[i].sw_ptype |=
12458 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12459 					ptype_mapping[i].sw_ptype |=
12460 						RTE_PTYPE_INNER_L4_FRAG;
12461 				} else if (!strncasecmp(name, "OIPV4", 5)) {
12462 					ptype_mapping[i].sw_ptype |=
12463 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12464 					in_tunnel = true;
12465 				} else if (!strncasecmp(name, "IPV4", 4) &&
12466 					   !in_tunnel)
12467 					ptype_mapping[i].sw_ptype |=
12468 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12469 				else if (!strncasecmp(name, "IPV4", 4) &&
12470 					 in_tunnel)
12471 					ptype_mapping[i].sw_ptype |=
12472 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12473 				else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12474 					 !in_tunnel) {
12475 					ptype_mapping[i].sw_ptype |=
12476 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12477 					ptype_mapping[i].sw_ptype |=
12478 						RTE_PTYPE_L4_FRAG;
12479 				} else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12480 					   in_tunnel) {
12481 					ptype_mapping[i].sw_ptype |=
12482 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12483 					ptype_mapping[i].sw_ptype |=
12484 						RTE_PTYPE_INNER_L4_FRAG;
12485 				} else if (!strncasecmp(name, "OIPV6", 5)) {
12486 					ptype_mapping[i].sw_ptype |=
12487 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12488 					in_tunnel = true;
12489 				} else if (!strncasecmp(name, "IPV6", 4) &&
12490 					   !in_tunnel)
12491 					ptype_mapping[i].sw_ptype |=
12492 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12493 				else if (!strncasecmp(name, "IPV6", 4) &&
12494 					 in_tunnel)
12495 					ptype_mapping[i].sw_ptype |=
12496 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12497 				else if (!strncasecmp(name, "UDP", 3) &&
12498 					 !in_tunnel)
12499 					ptype_mapping[i].sw_ptype |=
12500 						RTE_PTYPE_L4_UDP;
12501 				else if (!strncasecmp(name, "UDP", 3) &&
12502 					 in_tunnel)
12503 					ptype_mapping[i].sw_ptype |=
12504 						RTE_PTYPE_INNER_L4_UDP;
12505 				else if (!strncasecmp(name, "TCP", 3) &&
12506 					 !in_tunnel)
12507 					ptype_mapping[i].sw_ptype |=
12508 						RTE_PTYPE_L4_TCP;
12509 				else if (!strncasecmp(name, "TCP", 3) &&
12510 					 in_tunnel)
12511 					ptype_mapping[i].sw_ptype |=
12512 						RTE_PTYPE_INNER_L4_TCP;
12513 				else if (!strncasecmp(name, "SCTP", 4) &&
12514 					 !in_tunnel)
12515 					ptype_mapping[i].sw_ptype |=
12516 						RTE_PTYPE_L4_SCTP;
12517 				else if (!strncasecmp(name, "SCTP", 4) &&
12518 					 in_tunnel)
12519 					ptype_mapping[i].sw_ptype |=
12520 						RTE_PTYPE_INNER_L4_SCTP;
12521 				else if ((!strncasecmp(name, "ICMP", 4) ||
12522 					  !strncasecmp(name, "ICMPV6", 6)) &&
12523 					 !in_tunnel)
12524 					ptype_mapping[i].sw_ptype |=
12525 						RTE_PTYPE_L4_ICMP;
12526 				else if ((!strncasecmp(name, "ICMP", 4) ||
12527 					  !strncasecmp(name, "ICMPV6", 6)) &&
12528 					 in_tunnel)
12529 					ptype_mapping[i].sw_ptype |=
12530 						RTE_PTYPE_INNER_L4_ICMP;
12531 				else if (!strncasecmp(name, "GTPC", 4)) {
12532 					ptype_mapping[i].sw_ptype |=
12533 						RTE_PTYPE_TUNNEL_GTPC;
12534 					in_tunnel = true;
12535 				} else if (!strncasecmp(name, "GTPU", 4)) {
12536 					ptype_mapping[i].sw_ptype |=
12537 						RTE_PTYPE_TUNNEL_GTPU;
12538 					in_tunnel = true;
12539 				} else if (!strncasecmp(name, "GRENAT", 6)) {
12540 					ptype_mapping[i].sw_ptype |=
12541 						RTE_PTYPE_TUNNEL_GRENAT;
12542 					in_tunnel = true;
12543 				} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12544 					   !strncasecmp(name, "L2TPV2", 6)) {
12545 					ptype_mapping[i].sw_ptype |=
12546 						RTE_PTYPE_TUNNEL_L2TP;
12547 					in_tunnel = true;
12548 				}
12549 
12550 				break;
12551 			}
12552 		}
12553 	}
12554 
12555 	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12556 						ptype_num, 0);
12557 	if (ret)
12558 		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
12559 
12560 	rte_free(ptype_mapping);
12561 	rte_free(ptype);
12562 	return ret;
12563 }
12564 
12565 void
12566 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12567 			    uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12568 {
12569 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12570 	uint32_t proto_num;
12571 	struct rte_pmd_i40e_proto_info *proto;
12572 	uint32_t buff_size;
12573 	uint32_t i;
12574 	int ret;
12575 
12576 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12577 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12578 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12579 		return;
12580 	}
12581 
12582 	/* get information about protocol number */
12583 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12584 				       (uint8_t *)&proto_num, sizeof(proto_num),
12585 				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12586 	if (ret) {
12587 		PMD_DRV_LOG(ERR, "Failed to get protocol number");
12588 		return;
12589 	}
12590 	if (!proto_num) {
12591 		PMD_DRV_LOG(INFO, "No new protocol added");
12592 		return;
12593 	}
12594 
12595 	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12596 	proto = rte_zmalloc("new_proto", buff_size, 0);
12597 	if (!proto) {
12598 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12599 		return;
12600 	}
12601 
12602 	/* get information about protocol list */
12603 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12604 					(uint8_t *)proto, buff_size,
12605 					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12606 	if (ret) {
12607 		PMD_DRV_LOG(ERR, "Failed to get protocol list");
12608 		rte_free(proto);
12609 		return;
12610 	}
12611 
12612 	/* Check if GTP is supported. */
12613 	for (i = 0; i < proto_num; i++) {
12614 		if (!strncmp(proto[i].name, "GTP", 3)) {
12615 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12616 				pf->gtp_support = true;
12617 			else
12618 				pf->gtp_support = false;
12619 			break;
12620 		}
12621 	}
12622 
12623 	/* Update customized pctype info */
12624 	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12625 					    proto_num, proto, op);
12626 	if (ret)
12627 		PMD_DRV_LOG(INFO, "No pctype is updated.");
12628 
12629 	/* Update customized ptype info */
12630 	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12631 					   proto_num, proto, op);
12632 	if (ret)
12633 		PMD_DRV_LOG(INFO, "No ptype is updated.");
12634 
12635 	rte_free(proto);
12636 }
12637 
12638 /* Create a QinQ cloud filter
12639  *
12640  * The Fortville NIC has limited resources for tunnel filters,
12641  * so we can only reuse existing filters.
12642  *
12643  * In step 1 we define which Field Vector fields can be used for
12644  * filter types.
12645  * As we do not have the inner tag defined as a field,
12646  * we have to define it first, by reusing one of L1 entries.
12647  *
12648  * In step 2 we are replacing one of existing filter types with
12649  * a new one for QinQ.
12650  * As we reusing L1 and replacing L2, some of the default filter
12651  * types will disappear,which depends on L1 and L2 entries we reuse.
12652  *
12653  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12654  *
12655  * 1.	Create L1 filter of outer vlan (12b) which will be in use
12656  *		later when we define the cloud filter.
12657  *	a.	Valid_flags.replace_cloud = 0
12658  *	b.	Old_filter = 10 (Stag_Inner_Vlan)
12659  *	c.	New_filter = 0x10
12660  *	d.	TR bit = 0xff (optional, not used here)
12661  *	e.	Buffer – 2 entries:
12662  *		i.	Byte 0 = 8 (outer vlan FV index).
12663  *			Byte 1 = 0 (rsv)
12664  *			Byte 2-3 = 0x0fff
12665  *		ii.	Byte 0 = 37 (inner vlan FV index).
12666  *			Byte 1 =0 (rsv)
12667  *			Byte 2-3 = 0x0fff
12668  *
12669  * Step 2:
12670  * 2.	Create cloud filter using two L1 filters entries: stag and
12671  *		new filter(outer vlan+ inner vlan)
12672  *	a.	Valid_flags.replace_cloud = 1
12673  *	b.	Old_filter = 1 (instead of outer IP)
12674  *	c.	New_filter = 0x10
12675  *	d.	Buffer – 2 entries:
12676  *		i.	Byte 0 = 0x80 | 7 (valid | Stag).
12677  *			Byte 1-3 = 0 (rsv)
12678  *		ii.	Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12679  *			Byte 9-11 = 0 (rsv)
12680  */
12681 static int
12682 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12683 {
12684 	int ret = -ENOTSUP;
12685 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12686 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12687 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12688 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
12689 
12690 	if (pf->support_multi_driver) {
12691 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12692 		return ret;
12693 	}
12694 
12695 	/* Init */
12696 	memset(&filter_replace, 0,
12697 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12698 	memset(&filter_replace_buf, 0,
12699 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12700 
12701 	/* create L1 filter */
12702 	filter_replace.old_filter_type =
12703 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12704 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12705 	filter_replace.tr_bit = 0;
12706 
12707 	/* Prepare the buffer, 2 entries */
12708 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12709 	filter_replace_buf.data[0] |=
12710 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12711 	/* Field Vector 12b mask */
12712 	filter_replace_buf.data[2] = 0xff;
12713 	filter_replace_buf.data[3] = 0x0f;
12714 	filter_replace_buf.data[4] =
12715 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12716 	filter_replace_buf.data[4] |=
12717 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12718 	/* Field Vector 12b mask */
12719 	filter_replace_buf.data[6] = 0xff;
12720 	filter_replace_buf.data[7] = 0x0f;
12721 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12722 			&filter_replace_buf);
12723 	if (ret != I40E_SUCCESS)
12724 		return ret;
12725 
12726 	if (filter_replace.old_filter_type !=
12727 	    filter_replace.new_filter_type)
12728 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12729 			    " original: 0x%x, new: 0x%x",
12730 			    dev->device->name,
12731 			    filter_replace.old_filter_type,
12732 			    filter_replace.new_filter_type);
12733 
12734 	/* Apply the second L2 cloud filter */
12735 	memset(&filter_replace, 0,
12736 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12737 	memset(&filter_replace_buf, 0,
12738 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12739 
12740 	/* create L2 filter, input for L2 filter will be L1 filter  */
12741 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12742 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12743 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12744 
12745 	/* Prepare the buffer, 2 entries */
12746 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12747 	filter_replace_buf.data[0] |=
12748 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12749 	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12750 	filter_replace_buf.data[4] |=
12751 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12752 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12753 			&filter_replace_buf);
12754 	if (!ret && (filter_replace.old_filter_type !=
12755 		     filter_replace.new_filter_type))
12756 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12757 			    " original: 0x%x, new: 0x%x",
12758 			    dev->device->name,
12759 			    filter_replace.old_filter_type,
12760 			    filter_replace.new_filter_type);
12761 
12762 	return ret;
12763 }
12764 
12765 int
12766 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12767 		   const struct rte_flow_action_rss *in)
12768 {
12769 	if (in->key_len > RTE_DIM(out->key) ||
12770 	    in->queue_num > RTE_DIM(out->queue))
12771 		return -EINVAL;
12772 	if (!in->key && in->key_len)
12773 		return -EINVAL;
12774 	out->conf = (struct rte_flow_action_rss){
12775 		.func = in->func,
12776 		.level = in->level,
12777 		.types = in->types,
12778 		.key_len = in->key_len,
12779 		.queue_num = in->queue_num,
12780 		.queue = memcpy(out->queue, in->queue,
12781 				sizeof(*in->queue) * in->queue_num),
12782 	};
12783 	if (in->key)
12784 		out->conf.key = memcpy(out->key, in->key, in->key_len);
12785 	return 0;
12786 }
12787 
12788 int
12789 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12790 		     const struct rte_flow_action_rss *with)
12791 {
12792 	return (comp->func == with->func &&
12793 		comp->level == with->level &&
12794 		comp->types == with->types &&
12795 		comp->key_len == with->key_len &&
12796 		comp->queue_num == with->queue_num &&
12797 		!memcmp(comp->key, with->key, with->key_len) &&
12798 		!memcmp(comp->queue, with->queue,
12799 			sizeof(*with->queue) * with->queue_num));
12800 }
12801 
12802 int
12803 i40e_config_rss_filter(struct i40e_pf *pf,
12804 		struct i40e_rte_flow_rss_conf *conf, bool add)
12805 {
12806 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12807 	uint32_t i, lut = 0;
12808 	uint16_t j, num;
12809 	struct rte_eth_rss_conf rss_conf = {
12810 		.rss_key = conf->conf.key_len ?
12811 			(void *)(uintptr_t)conf->conf.key : NULL,
12812 		.rss_key_len = conf->conf.key_len,
12813 		.rss_hf = conf->conf.types,
12814 	};
12815 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12816 
12817 	if (!add) {
12818 		if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12819 			i40e_pf_disable_rss(pf);
12820 			memset(rss_info, 0,
12821 				sizeof(struct i40e_rte_flow_rss_conf));
12822 			return 0;
12823 		}
12824 		return -EINVAL;
12825 	}
12826 
12827 	/* If both VMDQ and RSS enabled, not all of PF queues are configured.
12828 	 * It's necessary to calculate the actual PF queues that are configured.
12829 	 */
12830 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12831 		num = i40e_pf_calc_configured_queues_num(pf);
12832 	else
12833 		num = pf->dev_data->nb_rx_queues;
12834 
12835 	num = RTE_MIN(num, conf->conf.queue_num);
12836 	PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12837 			num);
12838 
12839 	if (num == 0) {
12840 		PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12841 		return -ENOTSUP;
12842 	}
12843 
12844 	/* Fill in redirection table */
12845 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12846 		if (j == num)
12847 			j = 0;
12848 		lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12849 			hw->func_caps.rss_table_entry_width) - 1));
12850 		if ((i & 3) == 3)
12851 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12852 	}
12853 
12854 	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12855 		i40e_pf_disable_rss(pf);
12856 		return 0;
12857 	}
12858 	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12859 		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12860 		/* Random default keys */
12861 		static uint32_t rss_key_default[] = {0x6b793944,
12862 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12863 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12864 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12865 
12866 		rss_conf.rss_key = (uint8_t *)rss_key_default;
12867 		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12868 							sizeof(uint32_t);
12869 		PMD_DRV_LOG(INFO,
12870 			"No valid RSS key config for i40e, using default\n");
12871 	}
12872 
12873 	i40e_hw_rss_hash_set(pf, &rss_conf);
12874 
12875 	if (i40e_rss_conf_init(rss_info, &conf->conf))
12876 		return -EINVAL;
12877 
12878 	return 0;
12879 }
12880 
12881 RTE_INIT(i40e_init_log)
12882 {
12883 	i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12884 	if (i40e_logtype_init >= 0)
12885 		rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12886 	i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12887 	if (i40e_logtype_driver >= 0)
12888 		rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12889 
12890 #ifdef RTE_LIBRTE_I40E_DEBUG_RX
12891 	i40e_logtype_rx = rte_log_register("pmd.net.i40e.rx");
12892 	if (i40e_logtype_rx >= 0)
12893 		rte_log_set_level(i40e_logtype_rx, RTE_LOG_DEBUG);
12894 #endif
12895 
12896 #ifdef RTE_LIBRTE_I40E_DEBUG_TX
12897 	i40e_logtype_tx = rte_log_register("pmd.net.i40e.tx");
12898 	if (i40e_logtype_tx >= 0)
12899 		rte_log_set_level(i40e_logtype_tx, RTE_LOG_DEBUG);
12900 #endif
12901 
12902 #ifdef RTE_LIBRTE_I40E_DEBUG_TX_FREE
12903 	i40e_logtype_tx_free = rte_log_register("pmd.net.i40e.tx_free");
12904 	if (i40e_logtype_tx_free >= 0)
12905 		rte_log_set_level(i40e_logtype_tx_free, RTE_LOG_DEBUG);
12906 #endif
12907 }
12908 
12909 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12910 			      ETH_I40E_FLOATING_VEB_ARG "=1"
12911 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12912 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12913 			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
12914 			      ETH_I40E_USE_LATEST_VEC "=0|1");
12915