xref: /dpdk/drivers/net/i40e/i40e_ethdev.c (revision e00a5eaa)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13 
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_eth_ctrl.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30 
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42 
43 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
45 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
46 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
47 #define ETH_I40E_USE_LATEST_VEC	"use-latest-supported-vec"
48 
49 #define I40E_CLEAR_PXE_WAIT_MS     200
50 
51 /* Maximun number of capability elements */
52 #define I40E_MAX_CAP_ELE_NUM       128
53 
54 /* Wait count and interval */
55 #define I40E_CHK_Q_ENA_COUNT       1000
56 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
57 
58 /* Maximun number of VSI */
59 #define I40E_MAX_NUM_VSIS          (384UL)
60 
61 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
62 
63 /* Flow control default timer */
64 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
65 
66 /* Flow control enable fwd bit */
67 #define I40E_PRTMAC_FWD_CTRL   0x00000001
68 
69 /* Receive Packet Buffer size */
70 #define I40E_RXPBSIZE (968 * 1024)
71 
72 /* Kilobytes shift */
73 #define I40E_KILOSHIFT 10
74 
75 /* Flow control default high water */
76 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
77 
78 /* Flow control default low water */
79 #define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
80 
81 /* Receive Average Packet Size in Byte*/
82 #define I40E_PACKET_AVERAGE_SIZE 128
83 
84 /* Mask of PF interrupt causes */
85 #define I40E_PFINT_ICR0_ENA_MASK ( \
86 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
87 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
88 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
89 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
90 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
91 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
92 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
93 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
94 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
95 
96 #define I40E_FLOW_TYPES ( \
97 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
98 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
99 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
100 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
101 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
102 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
103 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
104 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
105 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
106 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
107 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
108 
109 /* Additional timesync values. */
110 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
111 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
112 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
113 #define I40E_PRTTSYN_TSYNENA     0x80000000
114 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
115 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
116 
117 /**
118  * Below are values for writing un-exposed registers suggested
119  * by silicon experts
120  */
121 /* Destination MAC address */
122 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
123 /* Source MAC address */
124 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
125 /* Outer (S-Tag) VLAN tag in the outer L2 header */
126 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
127 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
128 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
129 /* Single VLAN tag in the inner L2 header */
130 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
131 /* Source IPv4 address */
132 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
133 /* Destination IPv4 address */
134 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
135 /* Source IPv4 address for X722 */
136 #define I40E_X722_REG_INSET_L3_SRC_IP4           0x0006000000000000ULL
137 /* Destination IPv4 address for X722 */
138 #define I40E_X722_REG_INSET_L3_DST_IP4           0x0000060000000000ULL
139 /* IPv4 Protocol for X722 */
140 #define I40E_X722_REG_INSET_L3_IP4_PROTO         0x0010000000000000ULL
141 /* IPv4 Time to Live for X722 */
142 #define I40E_X722_REG_INSET_L3_IP4_TTL           0x0010000000000000ULL
143 /* IPv4 Type of Service (TOS) */
144 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
145 /* IPv4 Protocol */
146 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
147 /* IPv4 Time to Live */
148 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
149 /* Source IPv6 address */
150 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
151 /* Destination IPv6 address */
152 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
153 /* IPv6 Traffic Class (TC) */
154 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
155 /* IPv6 Next Header */
156 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
157 /* IPv6 Hop Limit */
158 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
159 /* Source L4 port */
160 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
161 /* Destination L4 port */
162 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
163 /* SCTP verification tag */
164 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
165 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
166 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
167 /* Source port of tunneling UDP */
168 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
169 /* Destination port of tunneling UDP */
170 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
171 /* UDP Tunneling ID, NVGRE/GRE key */
172 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
173 /* Last ether type */
174 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
175 /* Tunneling outer destination IPv4 address */
176 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
177 /* Tunneling outer destination IPv6 address */
178 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
179 /* 1st word of flex payload */
180 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
181 /* 2nd word of flex payload */
182 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
183 /* 3rd word of flex payload */
184 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
185 /* 4th word of flex payload */
186 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
187 /* 5th word of flex payload */
188 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
189 /* 6th word of flex payload */
190 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
191 /* 7th word of flex payload */
192 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
193 /* 8th word of flex payload */
194 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
195 /* all 8 words flex payload */
196 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
197 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
198 
199 #define I40E_TRANSLATE_INSET 0
200 #define I40E_TRANSLATE_REG   1
201 
202 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
203 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
204 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
205 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
206 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
207 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
208 
209 /* PCI offset for querying capability */
210 #define PCI_DEV_CAP_REG            0xA4
211 /* PCI offset for enabling/disabling Extended Tag */
212 #define PCI_DEV_CTRL_REG           0xA8
213 /* Bit mask of Extended Tag capability */
214 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
215 /* Bit shift of Extended Tag enable/disable */
216 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
217 /* Bit mask of Extended Tag enable/disable */
218 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
219 
220 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
221 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
222 static int i40e_dev_configure(struct rte_eth_dev *dev);
223 static int i40e_dev_start(struct rte_eth_dev *dev);
224 static void i40e_dev_stop(struct rte_eth_dev *dev);
225 static void i40e_dev_close(struct rte_eth_dev *dev);
226 static int  i40e_dev_reset(struct rte_eth_dev *dev);
227 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
228 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
229 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
230 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
231 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
232 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
233 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
234 			       struct rte_eth_stats *stats);
235 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
236 			       struct rte_eth_xstat *xstats, unsigned n);
237 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
238 				     struct rte_eth_xstat_name *xstats_names,
239 				     unsigned limit);
240 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
241 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
242 					    uint16_t queue_id,
243 					    uint8_t stat_idx,
244 					    uint8_t is_rx);
245 static int i40e_fw_version_get(struct rte_eth_dev *dev,
246 				char *fw_version, size_t fw_size);
247 static void i40e_dev_info_get(struct rte_eth_dev *dev,
248 			      struct rte_eth_dev_info *dev_info);
249 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
250 				uint16_t vlan_id,
251 				int on);
252 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
253 			      enum rte_vlan_type vlan_type,
254 			      uint16_t tpid);
255 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
256 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
257 				      uint16_t queue,
258 				      int on);
259 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
260 static int i40e_dev_led_on(struct rte_eth_dev *dev);
261 static int i40e_dev_led_off(struct rte_eth_dev *dev);
262 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
263 			      struct rte_eth_fc_conf *fc_conf);
264 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
265 			      struct rte_eth_fc_conf *fc_conf);
266 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
267 				       struct rte_eth_pfc_conf *pfc_conf);
268 static int i40e_macaddr_add(struct rte_eth_dev *dev,
269 			    struct ether_addr *mac_addr,
270 			    uint32_t index,
271 			    uint32_t pool);
272 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
273 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
274 				    struct rte_eth_rss_reta_entry64 *reta_conf,
275 				    uint16_t reta_size);
276 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
277 				   struct rte_eth_rss_reta_entry64 *reta_conf,
278 				   uint16_t reta_size);
279 
280 static int i40e_get_cap(struct i40e_hw *hw);
281 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
282 static int i40e_pf_setup(struct i40e_pf *pf);
283 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
284 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
285 static int i40e_dcb_setup(struct rte_eth_dev *dev);
286 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
287 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
288 static void i40e_stat_update_48(struct i40e_hw *hw,
289 			       uint32_t hireg,
290 			       uint32_t loreg,
291 			       bool offset_loaded,
292 			       uint64_t *offset,
293 			       uint64_t *stat);
294 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
295 static void i40e_dev_interrupt_handler(void *param);
296 static void i40e_dev_alarm_handler(void *param);
297 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
298 				uint32_t base, uint32_t num);
299 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
300 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
301 			uint32_t base);
302 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
303 			uint16_t num);
304 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
305 static int i40e_veb_release(struct i40e_veb *veb);
306 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
307 						struct i40e_vsi *vsi);
308 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
309 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
310 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
311 					     struct i40e_macvlan_filter *mv_f,
312 					     int num,
313 					     uint16_t vlan);
314 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
315 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
316 				    struct rte_eth_rss_conf *rss_conf);
317 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
318 				      struct rte_eth_rss_conf *rss_conf);
319 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
320 					struct rte_eth_udp_tunnel *udp_tunnel);
321 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
322 					struct rte_eth_udp_tunnel *udp_tunnel);
323 static void i40e_filter_input_set_init(struct i40e_pf *pf);
324 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
325 				enum rte_filter_op filter_op,
326 				void *arg);
327 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
328 				enum rte_filter_type filter_type,
329 				enum rte_filter_op filter_op,
330 				void *arg);
331 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
332 				  struct rte_eth_dcb_info *dcb_info);
333 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
334 static void i40e_configure_registers(struct i40e_hw *hw);
335 static void i40e_hw_init(struct rte_eth_dev *dev);
336 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
337 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
338 						     uint16_t seid,
339 						     uint16_t rule_type,
340 						     uint16_t *entries,
341 						     uint16_t count,
342 						     uint16_t rule_id);
343 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
344 			struct rte_eth_mirror_conf *mirror_conf,
345 			uint8_t sw_id, uint8_t on);
346 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
347 
348 static int i40e_timesync_enable(struct rte_eth_dev *dev);
349 static int i40e_timesync_disable(struct rte_eth_dev *dev);
350 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
351 					   struct timespec *timestamp,
352 					   uint32_t flags);
353 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
354 					   struct timespec *timestamp);
355 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
356 
357 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
358 
359 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
360 				   struct timespec *timestamp);
361 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
362 				    const struct timespec *timestamp);
363 
364 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
365 					 uint16_t queue_id);
366 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
367 					  uint16_t queue_id);
368 
369 static int i40e_get_regs(struct rte_eth_dev *dev,
370 			 struct rte_dev_reg_info *regs);
371 
372 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
373 
374 static int i40e_get_eeprom(struct rte_eth_dev *dev,
375 			   struct rte_dev_eeprom_info *eeprom);
376 
377 static int i40e_get_module_info(struct rte_eth_dev *dev,
378 				struct rte_eth_dev_module_info *modinfo);
379 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
380 				  struct rte_dev_eeprom_info *info);
381 
382 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
383 				      struct ether_addr *mac_addr);
384 
385 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
386 
387 static int i40e_ethertype_filter_convert(
388 	const struct rte_eth_ethertype_filter *input,
389 	struct i40e_ethertype_filter *filter);
390 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
391 				   struct i40e_ethertype_filter *filter);
392 
393 static int i40e_tunnel_filter_convert(
394 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
395 	struct i40e_tunnel_filter *tunnel_filter);
396 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
397 				struct i40e_tunnel_filter *tunnel_filter);
398 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
399 
400 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
401 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
402 static void i40e_filter_restore(struct i40e_pf *pf);
403 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
404 
405 int i40e_logtype_init;
406 int i40e_logtype_driver;
407 
408 static const char *const valid_keys[] = {
409 	ETH_I40E_FLOATING_VEB_ARG,
410 	ETH_I40E_FLOATING_VEB_LIST_ARG,
411 	ETH_I40E_SUPPORT_MULTI_DRIVER,
412 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
413 	ETH_I40E_USE_LATEST_VEC,
414 	NULL};
415 
416 static const struct rte_pci_id pci_id_i40e_map[] = {
417 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
418 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
419 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
420 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
421 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
422 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
423 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
424 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
425 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
426 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
427 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
428 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
429 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
430 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
431 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
432 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
433 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
434 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
435 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
436 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
437 	{ .vendor_id = 0, /* sentinel */ },
438 };
439 
440 static const struct eth_dev_ops i40e_eth_dev_ops = {
441 	.dev_configure                = i40e_dev_configure,
442 	.dev_start                    = i40e_dev_start,
443 	.dev_stop                     = i40e_dev_stop,
444 	.dev_close                    = i40e_dev_close,
445 	.dev_reset		      = i40e_dev_reset,
446 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
447 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
448 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
449 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
450 	.dev_set_link_up              = i40e_dev_set_link_up,
451 	.dev_set_link_down            = i40e_dev_set_link_down,
452 	.link_update                  = i40e_dev_link_update,
453 	.stats_get                    = i40e_dev_stats_get,
454 	.xstats_get                   = i40e_dev_xstats_get,
455 	.xstats_get_names             = i40e_dev_xstats_get_names,
456 	.stats_reset                  = i40e_dev_stats_reset,
457 	.xstats_reset                 = i40e_dev_stats_reset,
458 	.queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
459 	.fw_version_get               = i40e_fw_version_get,
460 	.dev_infos_get                = i40e_dev_info_get,
461 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
462 	.vlan_filter_set              = i40e_vlan_filter_set,
463 	.vlan_tpid_set                = i40e_vlan_tpid_set,
464 	.vlan_offload_set             = i40e_vlan_offload_set,
465 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
466 	.vlan_pvid_set                = i40e_vlan_pvid_set,
467 	.rx_queue_start               = i40e_dev_rx_queue_start,
468 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
469 	.tx_queue_start               = i40e_dev_tx_queue_start,
470 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
471 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
472 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
473 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
474 	.rx_queue_release             = i40e_dev_rx_queue_release,
475 	.rx_queue_count               = i40e_dev_rx_queue_count,
476 	.rx_descriptor_done           = i40e_dev_rx_descriptor_done,
477 	.rx_descriptor_status         = i40e_dev_rx_descriptor_status,
478 	.tx_descriptor_status         = i40e_dev_tx_descriptor_status,
479 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
480 	.tx_queue_release             = i40e_dev_tx_queue_release,
481 	.dev_led_on                   = i40e_dev_led_on,
482 	.dev_led_off                  = i40e_dev_led_off,
483 	.flow_ctrl_get                = i40e_flow_ctrl_get,
484 	.flow_ctrl_set                = i40e_flow_ctrl_set,
485 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
486 	.mac_addr_add                 = i40e_macaddr_add,
487 	.mac_addr_remove              = i40e_macaddr_remove,
488 	.reta_update                  = i40e_dev_rss_reta_update,
489 	.reta_query                   = i40e_dev_rss_reta_query,
490 	.rss_hash_update              = i40e_dev_rss_hash_update,
491 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
492 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
493 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
494 	.filter_ctrl                  = i40e_dev_filter_ctrl,
495 	.rxq_info_get                 = i40e_rxq_info_get,
496 	.txq_info_get                 = i40e_txq_info_get,
497 	.mirror_rule_set              = i40e_mirror_rule_set,
498 	.mirror_rule_reset            = i40e_mirror_rule_reset,
499 	.timesync_enable              = i40e_timesync_enable,
500 	.timesync_disable             = i40e_timesync_disable,
501 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
502 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
503 	.get_dcb_info                 = i40e_dev_get_dcb_info,
504 	.timesync_adjust_time         = i40e_timesync_adjust_time,
505 	.timesync_read_time           = i40e_timesync_read_time,
506 	.timesync_write_time          = i40e_timesync_write_time,
507 	.get_reg                      = i40e_get_regs,
508 	.get_eeprom_length            = i40e_get_eeprom_length,
509 	.get_eeprom                   = i40e_get_eeprom,
510 	.get_module_info              = i40e_get_module_info,
511 	.get_module_eeprom            = i40e_get_module_eeprom,
512 	.mac_addr_set                 = i40e_set_default_mac_addr,
513 	.mtu_set                      = i40e_dev_mtu_set,
514 	.tm_ops_get                   = i40e_tm_ops_get,
515 };
516 
517 /* store statistics names and its offset in stats structure */
518 struct rte_i40e_xstats_name_off {
519 	char name[RTE_ETH_XSTATS_NAME_SIZE];
520 	unsigned offset;
521 };
522 
523 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
524 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
525 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
526 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
527 	{"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
528 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
529 		rx_unknown_protocol)},
530 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
531 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
532 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
533 	{"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
534 };
535 
536 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
537 		sizeof(rte_i40e_stats_strings[0]))
538 
539 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
540 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
541 		tx_dropped_link_down)},
542 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
543 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
544 		illegal_bytes)},
545 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
546 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
547 		mac_local_faults)},
548 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
549 		mac_remote_faults)},
550 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
551 		rx_length_errors)},
552 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
553 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
554 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
555 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
556 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
557 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
558 		rx_size_127)},
559 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
560 		rx_size_255)},
561 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
562 		rx_size_511)},
563 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
564 		rx_size_1023)},
565 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
566 		rx_size_1522)},
567 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
568 		rx_size_big)},
569 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
570 		rx_undersize)},
571 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
572 		rx_oversize)},
573 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
574 		mac_short_packet_dropped)},
575 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
576 		rx_fragments)},
577 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
578 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
579 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
580 		tx_size_127)},
581 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
582 		tx_size_255)},
583 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
584 		tx_size_511)},
585 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
586 		tx_size_1023)},
587 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
588 		tx_size_1522)},
589 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
590 		tx_size_big)},
591 	{"rx_flow_director_atr_match_packets",
592 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
593 	{"rx_flow_director_sb_match_packets",
594 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
595 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
596 		tx_lpi_status)},
597 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
598 		rx_lpi_status)},
599 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
600 		tx_lpi_count)},
601 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
602 		rx_lpi_count)},
603 };
604 
605 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
606 		sizeof(rte_i40e_hw_port_strings[0]))
607 
608 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
609 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
610 		priority_xon_rx)},
611 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
612 		priority_xoff_rx)},
613 };
614 
615 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
616 		sizeof(rte_i40e_rxq_prio_strings[0]))
617 
618 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
619 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
620 		priority_xon_tx)},
621 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
622 		priority_xoff_tx)},
623 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
624 		priority_xon_2_xoff)},
625 };
626 
627 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
628 		sizeof(rte_i40e_txq_prio_strings[0]))
629 
630 static int
631 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
632 	struct rte_pci_device *pci_dev)
633 {
634 	char name[RTE_ETH_NAME_MAX_LEN];
635 	struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
636 	int i, retval;
637 
638 	if (pci_dev->device.devargs) {
639 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
640 				&eth_da);
641 		if (retval)
642 			return retval;
643 	}
644 
645 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
646 		sizeof(struct i40e_adapter),
647 		eth_dev_pci_specific_init, pci_dev,
648 		eth_i40e_dev_init, NULL);
649 
650 	if (retval || eth_da.nb_representor_ports < 1)
651 		return retval;
652 
653 	/* probe VF representor ports */
654 	struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
655 		pci_dev->device.name);
656 
657 	if (pf_ethdev == NULL)
658 		return -ENODEV;
659 
660 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
661 		struct i40e_vf_representor representor = {
662 			.vf_id = eth_da.representor_ports[i],
663 			.switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
664 				pf_ethdev->data->dev_private)->switch_domain_id,
665 			.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
666 				pf_ethdev->data->dev_private)
667 		};
668 
669 		/* representor port net_bdf_port */
670 		snprintf(name, sizeof(name), "net_%s_representor_%d",
671 			pci_dev->device.name, eth_da.representor_ports[i]);
672 
673 		retval = rte_eth_dev_create(&pci_dev->device, name,
674 			sizeof(struct i40e_vf_representor), NULL, NULL,
675 			i40e_vf_representor_init, &representor);
676 
677 		if (retval)
678 			PMD_DRV_LOG(ERR, "failed to create i40e vf "
679 				"representor %s.", name);
680 	}
681 
682 	return 0;
683 }
684 
685 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
686 {
687 	struct rte_eth_dev *ethdev;
688 
689 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
690 	if (!ethdev)
691 		return -ENODEV;
692 
693 
694 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
695 		return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
696 	else
697 		return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
698 }
699 
700 static struct rte_pci_driver rte_i40e_pmd = {
701 	.id_table = pci_id_i40e_map,
702 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
703 		     RTE_PCI_DRV_IOVA_AS_VA,
704 	.probe = eth_i40e_pci_probe,
705 	.remove = eth_i40e_pci_remove,
706 };
707 
708 static inline void
709 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
710 			 uint32_t reg_val)
711 {
712 	uint32_t ori_reg_val;
713 	struct rte_eth_dev *dev;
714 
715 	ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
716 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
717 	i40e_write_rx_ctl(hw, reg_addr, reg_val);
718 	if (ori_reg_val != reg_val)
719 		PMD_DRV_LOG(WARNING,
720 			    "i40e device %s changed global register [0x%08x]."
721 			    " original: 0x%08x, new: 0x%08x",
722 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
723 }
724 
725 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
726 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
727 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
728 
729 #ifndef I40E_GLQF_ORT
730 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
731 #endif
732 #ifndef I40E_GLQF_PIT
733 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
734 #endif
735 #ifndef I40E_GLQF_L3_MAP
736 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
737 #endif
738 
739 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
740 {
741 	/*
742 	 * Initialize registers for parsing packet type of QinQ
743 	 * This should be removed from code once proper
744 	 * configuration API is added to avoid configuration conflicts
745 	 * between ports of the same device.
746 	 */
747 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
748 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
749 }
750 
751 static inline void i40e_config_automask(struct i40e_pf *pf)
752 {
753 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
754 	uint32_t val;
755 
756 	/* INTENA flag is not auto-cleared for interrupt */
757 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
758 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
759 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
760 
761 	/* If support multi-driver, PF will use INT0. */
762 	if (!pf->support_multi_driver)
763 		val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
764 
765 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
766 }
767 
768 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
769 
770 /*
771  * Add a ethertype filter to drop all flow control frames transmitted
772  * from VSIs.
773 */
774 static void
775 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
776 {
777 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
778 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
779 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
780 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
781 	int ret;
782 
783 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
784 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
785 				pf->main_vsi_seid, 0,
786 				TRUE, NULL, NULL);
787 	if (ret)
788 		PMD_INIT_LOG(ERR,
789 			"Failed to add filter to drop flow control frames from VSIs.");
790 }
791 
792 static int
793 floating_veb_list_handler(__rte_unused const char *key,
794 			  const char *floating_veb_value,
795 			  void *opaque)
796 {
797 	int idx = 0;
798 	unsigned int count = 0;
799 	char *end = NULL;
800 	int min, max;
801 	bool *vf_floating_veb = opaque;
802 
803 	while (isblank(*floating_veb_value))
804 		floating_veb_value++;
805 
806 	/* Reset floating VEB configuration for VFs */
807 	for (idx = 0; idx < I40E_MAX_VF; idx++)
808 		vf_floating_veb[idx] = false;
809 
810 	min = I40E_MAX_VF;
811 	do {
812 		while (isblank(*floating_veb_value))
813 			floating_veb_value++;
814 		if (*floating_veb_value == '\0')
815 			return -1;
816 		errno = 0;
817 		idx = strtoul(floating_veb_value, &end, 10);
818 		if (errno || end == NULL)
819 			return -1;
820 		while (isblank(*end))
821 			end++;
822 		if (*end == '-') {
823 			min = idx;
824 		} else if ((*end == ';') || (*end == '\0')) {
825 			max = idx;
826 			if (min == I40E_MAX_VF)
827 				min = idx;
828 			if (max >= I40E_MAX_VF)
829 				max = I40E_MAX_VF - 1;
830 			for (idx = min; idx <= max; idx++) {
831 				vf_floating_veb[idx] = true;
832 				count++;
833 			}
834 			min = I40E_MAX_VF;
835 		} else {
836 			return -1;
837 		}
838 		floating_veb_value = end + 1;
839 	} while (*end != '\0');
840 
841 	if (count == 0)
842 		return -1;
843 
844 	return 0;
845 }
846 
847 static void
848 config_vf_floating_veb(struct rte_devargs *devargs,
849 		       uint16_t floating_veb,
850 		       bool *vf_floating_veb)
851 {
852 	struct rte_kvargs *kvlist;
853 	int i;
854 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
855 
856 	if (!floating_veb)
857 		return;
858 	/* All the VFs attach to the floating VEB by default
859 	 * when the floating VEB is enabled.
860 	 */
861 	for (i = 0; i < I40E_MAX_VF; i++)
862 		vf_floating_veb[i] = true;
863 
864 	if (devargs == NULL)
865 		return;
866 
867 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
868 	if (kvlist == NULL)
869 		return;
870 
871 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
872 		rte_kvargs_free(kvlist);
873 		return;
874 	}
875 	/* When the floating_veb_list parameter exists, all the VFs
876 	 * will attach to the legacy VEB firstly, then configure VFs
877 	 * to the floating VEB according to the floating_veb_list.
878 	 */
879 	if (rte_kvargs_process(kvlist, floating_veb_list,
880 			       floating_veb_list_handler,
881 			       vf_floating_veb) < 0) {
882 		rte_kvargs_free(kvlist);
883 		return;
884 	}
885 	rte_kvargs_free(kvlist);
886 }
887 
888 static int
889 i40e_check_floating_handler(__rte_unused const char *key,
890 			    const char *value,
891 			    __rte_unused void *opaque)
892 {
893 	if (strcmp(value, "1"))
894 		return -1;
895 
896 	return 0;
897 }
898 
899 static int
900 is_floating_veb_supported(struct rte_devargs *devargs)
901 {
902 	struct rte_kvargs *kvlist;
903 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
904 
905 	if (devargs == NULL)
906 		return 0;
907 
908 	kvlist = rte_kvargs_parse(devargs->args, valid_keys);
909 	if (kvlist == NULL)
910 		return 0;
911 
912 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
913 		rte_kvargs_free(kvlist);
914 		return 0;
915 	}
916 	/* Floating VEB is enabled when there's key-value:
917 	 * enable_floating_veb=1
918 	 */
919 	if (rte_kvargs_process(kvlist, floating_veb_key,
920 			       i40e_check_floating_handler, NULL) < 0) {
921 		rte_kvargs_free(kvlist);
922 		return 0;
923 	}
924 	rte_kvargs_free(kvlist);
925 
926 	return 1;
927 }
928 
929 static void
930 config_floating_veb(struct rte_eth_dev *dev)
931 {
932 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
933 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
934 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
935 
936 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
937 
938 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
939 		pf->floating_veb =
940 			is_floating_veb_supported(pci_dev->device.devargs);
941 		config_vf_floating_veb(pci_dev->device.devargs,
942 				       pf->floating_veb,
943 				       pf->floating_veb_list);
944 	} else {
945 		pf->floating_veb = false;
946 	}
947 }
948 
949 #define I40E_L2_TAGS_S_TAG_SHIFT 1
950 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
951 
952 static int
953 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
954 {
955 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
956 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
957 	char ethertype_hash_name[RTE_HASH_NAMESIZE];
958 	int ret;
959 
960 	struct rte_hash_parameters ethertype_hash_params = {
961 		.name = ethertype_hash_name,
962 		.entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
963 		.key_len = sizeof(struct i40e_ethertype_filter_input),
964 		.hash_func = rte_hash_crc,
965 		.hash_func_init_val = 0,
966 		.socket_id = rte_socket_id(),
967 	};
968 
969 	/* Initialize ethertype filter rule list and hash */
970 	TAILQ_INIT(&ethertype_rule->ethertype_list);
971 	snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
972 		 "ethertype_%s", dev->device->name);
973 	ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
974 	if (!ethertype_rule->hash_table) {
975 		PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
976 		return -EINVAL;
977 	}
978 	ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
979 				       sizeof(struct i40e_ethertype_filter *) *
980 				       I40E_MAX_ETHERTYPE_FILTER_NUM,
981 				       0);
982 	if (!ethertype_rule->hash_map) {
983 		PMD_INIT_LOG(ERR,
984 			     "Failed to allocate memory for ethertype hash map!");
985 		ret = -ENOMEM;
986 		goto err_ethertype_hash_map_alloc;
987 	}
988 
989 	return 0;
990 
991 err_ethertype_hash_map_alloc:
992 	rte_hash_free(ethertype_rule->hash_table);
993 
994 	return ret;
995 }
996 
997 static int
998 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
999 {
1000 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1001 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1002 	char tunnel_hash_name[RTE_HASH_NAMESIZE];
1003 	int ret;
1004 
1005 	struct rte_hash_parameters tunnel_hash_params = {
1006 		.name = tunnel_hash_name,
1007 		.entries = I40E_MAX_TUNNEL_FILTER_NUM,
1008 		.key_len = sizeof(struct i40e_tunnel_filter_input),
1009 		.hash_func = rte_hash_crc,
1010 		.hash_func_init_val = 0,
1011 		.socket_id = rte_socket_id(),
1012 	};
1013 
1014 	/* Initialize tunnel filter rule list and hash */
1015 	TAILQ_INIT(&tunnel_rule->tunnel_list);
1016 	snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1017 		 "tunnel_%s", dev->device->name);
1018 	tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1019 	if (!tunnel_rule->hash_table) {
1020 		PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1021 		return -EINVAL;
1022 	}
1023 	tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1024 				    sizeof(struct i40e_tunnel_filter *) *
1025 				    I40E_MAX_TUNNEL_FILTER_NUM,
1026 				    0);
1027 	if (!tunnel_rule->hash_map) {
1028 		PMD_INIT_LOG(ERR,
1029 			     "Failed to allocate memory for tunnel hash map!");
1030 		ret = -ENOMEM;
1031 		goto err_tunnel_hash_map_alloc;
1032 	}
1033 
1034 	return 0;
1035 
1036 err_tunnel_hash_map_alloc:
1037 	rte_hash_free(tunnel_rule->hash_table);
1038 
1039 	return ret;
1040 }
1041 
1042 static int
1043 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1044 {
1045 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1046 	struct i40e_fdir_info *fdir_info = &pf->fdir;
1047 	char fdir_hash_name[RTE_HASH_NAMESIZE];
1048 	int ret;
1049 
1050 	struct rte_hash_parameters fdir_hash_params = {
1051 		.name = fdir_hash_name,
1052 		.entries = I40E_MAX_FDIR_FILTER_NUM,
1053 		.key_len = sizeof(struct i40e_fdir_input),
1054 		.hash_func = rte_hash_crc,
1055 		.hash_func_init_val = 0,
1056 		.socket_id = rte_socket_id(),
1057 	};
1058 
1059 	/* Initialize flow director filter rule list and hash */
1060 	TAILQ_INIT(&fdir_info->fdir_list);
1061 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1062 		 "fdir_%s", dev->device->name);
1063 	fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1064 	if (!fdir_info->hash_table) {
1065 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1066 		return -EINVAL;
1067 	}
1068 	fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1069 					  sizeof(struct i40e_fdir_filter *) *
1070 					  I40E_MAX_FDIR_FILTER_NUM,
1071 					  0);
1072 	if (!fdir_info->hash_map) {
1073 		PMD_INIT_LOG(ERR,
1074 			     "Failed to allocate memory for fdir hash map!");
1075 		ret = -ENOMEM;
1076 		goto err_fdir_hash_map_alloc;
1077 	}
1078 	return 0;
1079 
1080 err_fdir_hash_map_alloc:
1081 	rte_hash_free(fdir_info->hash_table);
1082 
1083 	return ret;
1084 }
1085 
1086 static void
1087 i40e_init_customized_info(struct i40e_pf *pf)
1088 {
1089 	int i;
1090 
1091 	/* Initialize customized pctype */
1092 	for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1093 		pf->customized_pctype[i].index = i;
1094 		pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1095 		pf->customized_pctype[i].valid = false;
1096 	}
1097 
1098 	pf->gtp_support = false;
1099 }
1100 
1101 void
1102 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1103 {
1104 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1105 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1106 	struct i40e_queue_regions *info = &pf->queue_region;
1107 	uint16_t i;
1108 
1109 	for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1110 		i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1111 
1112 	memset(info, 0, sizeof(struct i40e_queue_regions));
1113 }
1114 
1115 static int
1116 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1117 			       const char *value,
1118 			       void *opaque)
1119 {
1120 	struct i40e_pf *pf;
1121 	unsigned long support_multi_driver;
1122 	char *end;
1123 
1124 	pf = (struct i40e_pf *)opaque;
1125 
1126 	errno = 0;
1127 	support_multi_driver = strtoul(value, &end, 10);
1128 	if (errno != 0 || end == value || *end != 0) {
1129 		PMD_DRV_LOG(WARNING, "Wrong global configuration");
1130 		return -(EINVAL);
1131 	}
1132 
1133 	if (support_multi_driver == 1 || support_multi_driver == 0)
1134 		pf->support_multi_driver = (bool)support_multi_driver;
1135 	else
1136 		PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1137 			    "enable global configuration by default."
1138 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1139 	return 0;
1140 }
1141 
1142 static int
1143 i40e_support_multi_driver(struct rte_eth_dev *dev)
1144 {
1145 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1146 	struct rte_kvargs *kvlist;
1147 	int kvargs_count;
1148 
1149 	/* Enable global configuration by default */
1150 	pf->support_multi_driver = false;
1151 
1152 	if (!dev->device->devargs)
1153 		return 0;
1154 
1155 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1156 	if (!kvlist)
1157 		return -EINVAL;
1158 
1159 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1160 	if (!kvargs_count) {
1161 		rte_kvargs_free(kvlist);
1162 		return 0;
1163 	}
1164 
1165 	if (kvargs_count > 1)
1166 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1167 			    "the first invalid or last valid one is used !",
1168 			    ETH_I40E_SUPPORT_MULTI_DRIVER);
1169 
1170 	if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1171 			       i40e_parse_multi_drv_handler, pf) < 0) {
1172 		rte_kvargs_free(kvlist);
1173 		return -EINVAL;
1174 	}
1175 
1176 	rte_kvargs_free(kvlist);
1177 	return 0;
1178 }
1179 
1180 static int
1181 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1182 				    uint32_t reg_addr, uint64_t reg_val,
1183 				    struct i40e_asq_cmd_details *cmd_details)
1184 {
1185 	uint64_t ori_reg_val;
1186 	struct rte_eth_dev *dev;
1187 	int ret;
1188 
1189 	ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1190 	if (ret != I40E_SUCCESS) {
1191 		PMD_DRV_LOG(ERR,
1192 			    "Fail to debug read from 0x%08x",
1193 			    reg_addr);
1194 		return -EIO;
1195 	}
1196 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1197 
1198 	if (ori_reg_val != reg_val)
1199 		PMD_DRV_LOG(WARNING,
1200 			    "i40e device %s changed global register [0x%08x]."
1201 			    " original: 0x%"PRIx64", after: 0x%"PRIx64,
1202 			    dev->device->name, reg_addr, ori_reg_val, reg_val);
1203 
1204 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1205 }
1206 
1207 static int
1208 i40e_parse_latest_vec_handler(__rte_unused const char *key,
1209 				const char *value,
1210 				void *opaque)
1211 {
1212 	struct i40e_adapter *ad;
1213 	int use_latest_vec;
1214 
1215 	ad = (struct i40e_adapter *)opaque;
1216 
1217 	use_latest_vec = atoi(value);
1218 
1219 	if (use_latest_vec != 0 && use_latest_vec != 1)
1220 		PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
1221 
1222 	ad->use_latest_vec = (uint8_t)use_latest_vec;
1223 
1224 	return 0;
1225 }
1226 
1227 static int
1228 i40e_use_latest_vec(struct rte_eth_dev *dev)
1229 {
1230 	struct i40e_adapter *ad =
1231 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1232 	struct rte_kvargs *kvlist;
1233 	int kvargs_count;
1234 
1235 	ad->use_latest_vec = false;
1236 
1237 	if (!dev->device->devargs)
1238 		return 0;
1239 
1240 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1241 	if (!kvlist)
1242 		return -EINVAL;
1243 
1244 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
1245 	if (!kvargs_count) {
1246 		rte_kvargs_free(kvlist);
1247 		return 0;
1248 	}
1249 
1250 	if (kvargs_count > 1)
1251 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1252 			    "the first invalid or last valid one is used !",
1253 			    ETH_I40E_USE_LATEST_VEC);
1254 
1255 	if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
1256 				i40e_parse_latest_vec_handler, ad) < 0) {
1257 		rte_kvargs_free(kvlist);
1258 		return -EINVAL;
1259 	}
1260 
1261 	rte_kvargs_free(kvlist);
1262 	return 0;
1263 }
1264 
1265 #define I40E_ALARM_INTERVAL 50000 /* us */
1266 
1267 static int
1268 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1269 {
1270 	struct rte_pci_device *pci_dev;
1271 	struct rte_intr_handle *intr_handle;
1272 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1273 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1274 	struct i40e_vsi *vsi;
1275 	int ret;
1276 	uint32_t len, val;
1277 	uint8_t aq_fail = 0;
1278 
1279 	PMD_INIT_FUNC_TRACE();
1280 
1281 	dev->dev_ops = &i40e_eth_dev_ops;
1282 	dev->rx_pkt_burst = i40e_recv_pkts;
1283 	dev->tx_pkt_burst = i40e_xmit_pkts;
1284 	dev->tx_pkt_prepare = i40e_prep_pkts;
1285 
1286 	/* for secondary processes, we don't initialise any further as primary
1287 	 * has already done this work. Only check we don't need a different
1288 	 * RX function */
1289 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1290 		i40e_set_rx_function(dev);
1291 		i40e_set_tx_function(dev);
1292 		return 0;
1293 	}
1294 	i40e_set_default_ptype_table(dev);
1295 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1296 	intr_handle = &pci_dev->intr_handle;
1297 
1298 	rte_eth_copy_pci_info(dev, pci_dev);
1299 
1300 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1301 	pf->adapter->eth_dev = dev;
1302 	pf->dev_data = dev->data;
1303 
1304 	hw->back = I40E_PF_TO_ADAPTER(pf);
1305 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1306 	if (!hw->hw_addr) {
1307 		PMD_INIT_LOG(ERR,
1308 			"Hardware is not available, as address is NULL");
1309 		return -ENODEV;
1310 	}
1311 
1312 	hw->vendor_id = pci_dev->id.vendor_id;
1313 	hw->device_id = pci_dev->id.device_id;
1314 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1315 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1316 	hw->bus.device = pci_dev->addr.devid;
1317 	hw->bus.func = pci_dev->addr.function;
1318 	hw->adapter_stopped = 0;
1319 	hw->adapter_closed = 0;
1320 
1321 	/*
1322 	 * Switch Tag value should not be identical to either the First Tag
1323 	 * or Second Tag values. So set something other than common Ethertype
1324 	 * for internal switching.
1325 	 */
1326 	hw->switch_tag = 0xffff;
1327 
1328 	val = I40E_READ_REG(hw, I40E_GL_FWSTS);
1329 	if (val & I40E_GL_FWSTS_FWS1B_MASK) {
1330 		PMD_INIT_LOG(ERR, "\nERROR: "
1331 			"Firmware recovery mode detected. Limiting functionality.\n"
1332 			"Refer to the Intel(R) Ethernet Adapters and Devices "
1333 			"User Guide for details on firmware recovery mode.");
1334 		return -EIO;
1335 	}
1336 
1337 	/* Check if need to support multi-driver */
1338 	i40e_support_multi_driver(dev);
1339 	/* Check if users want the latest supported vec path */
1340 	i40e_use_latest_vec(dev);
1341 
1342 	/* Make sure all is clean before doing PF reset */
1343 	i40e_clear_hw(hw);
1344 
1345 	/* Reset here to make sure all is clean for each PF */
1346 	ret = i40e_pf_reset(hw);
1347 	if (ret) {
1348 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1349 		return ret;
1350 	}
1351 
1352 	/* Initialize the shared code (base driver) */
1353 	ret = i40e_init_shared_code(hw);
1354 	if (ret) {
1355 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1356 		return ret;
1357 	}
1358 
1359 	/* Initialize the parameters for adminq */
1360 	i40e_init_adminq_parameter(hw);
1361 	ret = i40e_init_adminq(hw);
1362 	if (ret != I40E_SUCCESS) {
1363 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1364 		return -EIO;
1365 	}
1366 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1367 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1368 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1369 		     ((hw->nvm.version >> 12) & 0xf),
1370 		     ((hw->nvm.version >> 4) & 0xff),
1371 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1372 
1373 	/* Initialize the hardware */
1374 	i40e_hw_init(dev);
1375 
1376 	i40e_config_automask(pf);
1377 
1378 	i40e_set_default_pctype_table(dev);
1379 
1380 	/*
1381 	 * To work around the NVM issue, initialize registers
1382 	 * for packet type of QinQ by software.
1383 	 * It should be removed once issues are fixed in NVM.
1384 	 */
1385 	if (!pf->support_multi_driver)
1386 		i40e_GLQF_reg_init(hw);
1387 
1388 	/* Initialize the input set for filters (hash and fd) to default value */
1389 	i40e_filter_input_set_init(pf);
1390 
1391 	/* initialise the L3_MAP register */
1392 	if (!pf->support_multi_driver) {
1393 		ret = i40e_aq_debug_write_global_register(hw,
1394 						   I40E_GLQF_L3_MAP(40),
1395 						   0x00000028,	NULL);
1396 		if (ret)
1397 			PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1398 				     ret);
1399 		PMD_INIT_LOG(DEBUG,
1400 			     "Global register 0x%08x is changed with 0x28",
1401 			     I40E_GLQF_L3_MAP(40));
1402 	}
1403 
1404 	/* Need the special FW version to support floating VEB */
1405 	config_floating_veb(dev);
1406 	/* Clear PXE mode */
1407 	i40e_clear_pxe_mode(hw);
1408 	i40e_dev_sync_phy_type(hw);
1409 
1410 	/*
1411 	 * On X710, performance number is far from the expectation on recent
1412 	 * firmware versions. The fix for this issue may not be integrated in
1413 	 * the following firmware version. So the workaround in software driver
1414 	 * is needed. It needs to modify the initial values of 3 internal only
1415 	 * registers. Note that the workaround can be removed when it is fixed
1416 	 * in firmware in the future.
1417 	 */
1418 	i40e_configure_registers(hw);
1419 
1420 	/* Get hw capabilities */
1421 	ret = i40e_get_cap(hw);
1422 	if (ret != I40E_SUCCESS) {
1423 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1424 		goto err_get_capabilities;
1425 	}
1426 
1427 	/* Initialize parameters for PF */
1428 	ret = i40e_pf_parameter_init(dev);
1429 	if (ret != 0) {
1430 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1431 		goto err_parameter_init;
1432 	}
1433 
1434 	/* Initialize the queue management */
1435 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1436 	if (ret < 0) {
1437 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1438 		goto err_qp_pool_init;
1439 	}
1440 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1441 				hw->func_caps.num_msix_vectors - 1);
1442 	if (ret < 0) {
1443 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1444 		goto err_msix_pool_init;
1445 	}
1446 
1447 	/* Initialize lan hmc */
1448 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1449 				hw->func_caps.num_rx_qp, 0, 0);
1450 	if (ret != I40E_SUCCESS) {
1451 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1452 		goto err_init_lan_hmc;
1453 	}
1454 
1455 	/* Configure lan hmc */
1456 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1457 	if (ret != I40E_SUCCESS) {
1458 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1459 		goto err_configure_lan_hmc;
1460 	}
1461 
1462 	/* Get and check the mac address */
1463 	i40e_get_mac_addr(hw, hw->mac.addr);
1464 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1465 		PMD_INIT_LOG(ERR, "mac address is not valid");
1466 		ret = -EIO;
1467 		goto err_get_mac_addr;
1468 	}
1469 	/* Copy the permanent MAC address */
1470 	ether_addr_copy((struct ether_addr *) hw->mac.addr,
1471 			(struct ether_addr *) hw->mac.perm_addr);
1472 
1473 	/* Disable flow control */
1474 	hw->fc.requested_mode = I40E_FC_NONE;
1475 	i40e_set_fc(hw, &aq_fail, TRUE);
1476 
1477 	/* Set the global registers with default ether type value */
1478 	if (!pf->support_multi_driver) {
1479 		ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1480 					 ETHER_TYPE_VLAN);
1481 		if (ret != I40E_SUCCESS) {
1482 			PMD_INIT_LOG(ERR,
1483 				     "Failed to set the default outer "
1484 				     "VLAN ether type");
1485 			goto err_setup_pf_switch;
1486 		}
1487 	}
1488 
1489 	/* PF setup, which includes VSI setup */
1490 	ret = i40e_pf_setup(pf);
1491 	if (ret) {
1492 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1493 		goto err_setup_pf_switch;
1494 	}
1495 
1496 	vsi = pf->main_vsi;
1497 
1498 	/* Disable double vlan by default */
1499 	i40e_vsi_config_double_vlan(vsi, FALSE);
1500 
1501 	/* Disable S-TAG identification when floating_veb is disabled */
1502 	if (!pf->floating_veb) {
1503 		ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1504 		if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1505 			ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1506 			I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1507 		}
1508 	}
1509 
1510 	if (!vsi->max_macaddrs)
1511 		len = ETHER_ADDR_LEN;
1512 	else
1513 		len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1514 
1515 	/* Should be after VSI initialized */
1516 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1517 	if (!dev->data->mac_addrs) {
1518 		PMD_INIT_LOG(ERR,
1519 			"Failed to allocated memory for storing mac address");
1520 		goto err_mac_alloc;
1521 	}
1522 	ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1523 					&dev->data->mac_addrs[0]);
1524 
1525 	/* Init dcb to sw mode by default */
1526 	ret = i40e_dcb_init_configure(dev, TRUE);
1527 	if (ret != I40E_SUCCESS) {
1528 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
1529 		pf->flags &= ~I40E_FLAG_DCB;
1530 	}
1531 	/* Update HW struct after DCB configuration */
1532 	i40e_get_cap(hw);
1533 
1534 	/* initialize pf host driver to setup SRIOV resource if applicable */
1535 	i40e_pf_host_init(dev);
1536 
1537 	/* register callback func to eal lib */
1538 	rte_intr_callback_register(intr_handle,
1539 				   i40e_dev_interrupt_handler, dev);
1540 
1541 	/* configure and enable device interrupt */
1542 	i40e_pf_config_irq0(hw, TRUE);
1543 	i40e_pf_enable_irq0(hw);
1544 
1545 	/* enable uio intr after callback register */
1546 	rte_intr_enable(intr_handle);
1547 
1548 	/* By default disable flexible payload in global configuration */
1549 	if (!pf->support_multi_driver)
1550 		i40e_flex_payload_reg_set_default(hw);
1551 
1552 	/*
1553 	 * Add an ethertype filter to drop all flow control frames transmitted
1554 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1555 	 * frames to wire.
1556 	 */
1557 	i40e_add_tx_flow_control_drop_filter(pf);
1558 
1559 	/* Set the max frame size to 0x2600 by default,
1560 	 * in case other drivers changed the default value.
1561 	 */
1562 	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1563 
1564 	/* initialize mirror rule list */
1565 	TAILQ_INIT(&pf->mirror_list);
1566 
1567 	/* initialize Traffic Manager configuration */
1568 	i40e_tm_conf_init(dev);
1569 
1570 	/* Initialize customized information */
1571 	i40e_init_customized_info(pf);
1572 
1573 	ret = i40e_init_ethtype_filter_list(dev);
1574 	if (ret < 0)
1575 		goto err_init_ethtype_filter_list;
1576 	ret = i40e_init_tunnel_filter_list(dev);
1577 	if (ret < 0)
1578 		goto err_init_tunnel_filter_list;
1579 	ret = i40e_init_fdir_filter_list(dev);
1580 	if (ret < 0)
1581 		goto err_init_fdir_filter_list;
1582 
1583 	/* initialize queue region configuration */
1584 	i40e_init_queue_region_conf(dev);
1585 
1586 	/* initialize rss configuration from rte_flow */
1587 	memset(&pf->rss_info, 0,
1588 		sizeof(struct i40e_rte_flow_rss_conf));
1589 
1590 	/* reset all stats of the device, including pf and main vsi */
1591 	i40e_dev_stats_reset(dev);
1592 
1593 	return 0;
1594 
1595 err_init_fdir_filter_list:
1596 	rte_free(pf->tunnel.hash_table);
1597 	rte_free(pf->tunnel.hash_map);
1598 err_init_tunnel_filter_list:
1599 	rte_free(pf->ethertype.hash_table);
1600 	rte_free(pf->ethertype.hash_map);
1601 err_init_ethtype_filter_list:
1602 	rte_free(dev->data->mac_addrs);
1603 err_mac_alloc:
1604 	i40e_vsi_release(pf->main_vsi);
1605 err_setup_pf_switch:
1606 err_get_mac_addr:
1607 err_configure_lan_hmc:
1608 	(void)i40e_shutdown_lan_hmc(hw);
1609 err_init_lan_hmc:
1610 	i40e_res_pool_destroy(&pf->msix_pool);
1611 err_msix_pool_init:
1612 	i40e_res_pool_destroy(&pf->qp_pool);
1613 err_qp_pool_init:
1614 err_parameter_init:
1615 err_get_capabilities:
1616 	(void)i40e_shutdown_adminq(hw);
1617 
1618 	return ret;
1619 }
1620 
1621 static void
1622 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1623 {
1624 	struct i40e_ethertype_filter *p_ethertype;
1625 	struct i40e_ethertype_rule *ethertype_rule;
1626 
1627 	ethertype_rule = &pf->ethertype;
1628 	/* Remove all ethertype filter rules and hash */
1629 	if (ethertype_rule->hash_map)
1630 		rte_free(ethertype_rule->hash_map);
1631 	if (ethertype_rule->hash_table)
1632 		rte_hash_free(ethertype_rule->hash_table);
1633 
1634 	while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1635 		TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1636 			     p_ethertype, rules);
1637 		rte_free(p_ethertype);
1638 	}
1639 }
1640 
1641 static void
1642 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1643 {
1644 	struct i40e_tunnel_filter *p_tunnel;
1645 	struct i40e_tunnel_rule *tunnel_rule;
1646 
1647 	tunnel_rule = &pf->tunnel;
1648 	/* Remove all tunnel director rules and hash */
1649 	if (tunnel_rule->hash_map)
1650 		rte_free(tunnel_rule->hash_map);
1651 	if (tunnel_rule->hash_table)
1652 		rte_hash_free(tunnel_rule->hash_table);
1653 
1654 	while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1655 		TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1656 		rte_free(p_tunnel);
1657 	}
1658 }
1659 
1660 static void
1661 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1662 {
1663 	struct i40e_fdir_filter *p_fdir;
1664 	struct i40e_fdir_info *fdir_info;
1665 
1666 	fdir_info = &pf->fdir;
1667 	/* Remove all flow director rules and hash */
1668 	if (fdir_info->hash_map)
1669 		rte_free(fdir_info->hash_map);
1670 	if (fdir_info->hash_table)
1671 		rte_hash_free(fdir_info->hash_table);
1672 
1673 	while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1674 		TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1675 		rte_free(p_fdir);
1676 	}
1677 }
1678 
1679 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1680 {
1681 	/*
1682 	 * Disable by default flexible payload
1683 	 * for corresponding L2/L3/L4 layers.
1684 	 */
1685 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1686 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1687 	I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1688 }
1689 
1690 static int
1691 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1692 {
1693 	struct i40e_pf *pf;
1694 	struct rte_pci_device *pci_dev;
1695 	struct rte_intr_handle *intr_handle;
1696 	struct i40e_hw *hw;
1697 	struct i40e_filter_control_settings settings;
1698 	struct rte_flow *p_flow;
1699 	int ret;
1700 	uint8_t aq_fail = 0;
1701 	int retries = 0;
1702 
1703 	PMD_INIT_FUNC_TRACE();
1704 
1705 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1706 		return 0;
1707 
1708 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1709 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1711 	intr_handle = &pci_dev->intr_handle;
1712 
1713 	ret = rte_eth_switch_domain_free(pf->switch_domain_id);
1714 	if (ret)
1715 		PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
1716 
1717 	if (hw->adapter_closed == 0)
1718 		i40e_dev_close(dev);
1719 
1720 	dev->dev_ops = NULL;
1721 	dev->rx_pkt_burst = NULL;
1722 	dev->tx_pkt_burst = NULL;
1723 
1724 	/* Clear PXE mode */
1725 	i40e_clear_pxe_mode(hw);
1726 
1727 	/* Unconfigure filter control */
1728 	memset(&settings, 0, sizeof(settings));
1729 	ret = i40e_set_filter_control(hw, &settings);
1730 	if (ret)
1731 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1732 					ret);
1733 
1734 	/* Disable flow control */
1735 	hw->fc.requested_mode = I40E_FC_NONE;
1736 	i40e_set_fc(hw, &aq_fail, TRUE);
1737 
1738 	/* uninitialize pf host driver */
1739 	i40e_pf_host_uninit(dev);
1740 
1741 	/* disable uio intr before callback unregister */
1742 	rte_intr_disable(intr_handle);
1743 
1744 	/* unregister callback func to eal lib */
1745 	do {
1746 		ret = rte_intr_callback_unregister(intr_handle,
1747 				i40e_dev_interrupt_handler, dev);
1748 		if (ret >= 0) {
1749 			break;
1750 		} else if (ret != -EAGAIN) {
1751 			PMD_INIT_LOG(ERR,
1752 				 "intr callback unregister failed: %d",
1753 				 ret);
1754 			return ret;
1755 		}
1756 		i40e_msec_delay(500);
1757 	} while (retries++ < 5);
1758 
1759 	i40e_rm_ethtype_filter_list(pf);
1760 	i40e_rm_tunnel_filter_list(pf);
1761 	i40e_rm_fdir_filter_list(pf);
1762 
1763 	/* Remove all flows */
1764 	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1765 		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1766 		rte_free(p_flow);
1767 	}
1768 
1769 	/* Remove all Traffic Manager configuration */
1770 	i40e_tm_conf_uninit(dev);
1771 
1772 	return 0;
1773 }
1774 
1775 static int
1776 i40e_dev_configure(struct rte_eth_dev *dev)
1777 {
1778 	struct i40e_adapter *ad =
1779 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1780 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1781 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1782 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1783 	int i, ret;
1784 
1785 	ret = i40e_dev_sync_phy_type(hw);
1786 	if (ret)
1787 		return ret;
1788 
1789 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1790 	 * bulk allocation or vector Rx preconditions we will reset it.
1791 	 */
1792 	ad->rx_bulk_alloc_allowed = true;
1793 	ad->rx_vec_allowed = true;
1794 	ad->tx_simple_allowed = true;
1795 	ad->tx_vec_allowed = true;
1796 
1797 	/* Only legacy filter API needs the following fdir config. So when the
1798 	 * legacy filter API is deprecated, the following codes should also be
1799 	 * removed.
1800 	 */
1801 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1802 		ret = i40e_fdir_setup(pf);
1803 		if (ret != I40E_SUCCESS) {
1804 			PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1805 			return -ENOTSUP;
1806 		}
1807 		ret = i40e_fdir_configure(dev);
1808 		if (ret < 0) {
1809 			PMD_DRV_LOG(ERR, "failed to configure fdir.");
1810 			goto err;
1811 		}
1812 	} else
1813 		i40e_fdir_teardown(pf);
1814 
1815 	ret = i40e_dev_init_vlan(dev);
1816 	if (ret < 0)
1817 		goto err;
1818 
1819 	/* VMDQ setup.
1820 	 *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1821 	 *  RSS setting have different requirements.
1822 	 *  General PMD driver call sequence are NIC init, configure,
1823 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1824 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
1825 	 *  applicable. So, VMDQ setting has to be done before
1826 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1827 	 *  For RSS setting, it will try to calculate actual configured RX queue
1828 	 *  number, which will be available after rx_queue_setup(). dev_start()
1829 	 *  function is good to place RSS setup.
1830 	 */
1831 	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1832 		ret = i40e_vmdq_setup(dev);
1833 		if (ret)
1834 			goto err;
1835 	}
1836 
1837 	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1838 		ret = i40e_dcb_setup(dev);
1839 		if (ret) {
1840 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
1841 			goto err_dcb;
1842 		}
1843 	}
1844 
1845 	TAILQ_INIT(&pf->flow_list);
1846 
1847 	return 0;
1848 
1849 err_dcb:
1850 	/* need to release vmdq resource if exists */
1851 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1852 		i40e_vsi_release(pf->vmdq[i].vsi);
1853 		pf->vmdq[i].vsi = NULL;
1854 	}
1855 	rte_free(pf->vmdq);
1856 	pf->vmdq = NULL;
1857 err:
1858 	/* Need to release fdir resource if exists.
1859 	 * Only legacy filter API needs the following fdir config. So when the
1860 	 * legacy filter API is deprecated, the following code should also be
1861 	 * removed.
1862 	 */
1863 	i40e_fdir_teardown(pf);
1864 	return ret;
1865 }
1866 
1867 void
1868 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1869 {
1870 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1871 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1872 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1873 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1874 	uint16_t msix_vect = vsi->msix_intr;
1875 	uint16_t i;
1876 
1877 	for (i = 0; i < vsi->nb_qps; i++) {
1878 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1879 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1880 		rte_wmb();
1881 	}
1882 
1883 	if (vsi->type != I40E_VSI_SRIOV) {
1884 		if (!rte_intr_allow_others(intr_handle)) {
1885 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1886 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1887 			I40E_WRITE_REG(hw,
1888 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1889 				       0);
1890 		} else {
1891 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1892 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1893 			I40E_WRITE_REG(hw,
1894 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1895 						       msix_vect - 1), 0);
1896 		}
1897 	} else {
1898 		uint32_t reg;
1899 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1900 			vsi->user_param + (msix_vect - 1);
1901 
1902 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1903 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1904 	}
1905 	I40E_WRITE_FLUSH(hw);
1906 }
1907 
1908 static void
1909 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1910 		       int base_queue, int nb_queue,
1911 		       uint16_t itr_idx)
1912 {
1913 	int i;
1914 	uint32_t val;
1915 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1916 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1917 
1918 	/* Bind all RX queues to allocated MSIX interrupt */
1919 	for (i = 0; i < nb_queue; i++) {
1920 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1921 			itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1922 			((base_queue + i + 1) <<
1923 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1924 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1925 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1926 
1927 		if (i == nb_queue - 1)
1928 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1929 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1930 	}
1931 
1932 	/* Write first RX queue to Link list register as the head element */
1933 	if (vsi->type != I40E_VSI_SRIOV) {
1934 		uint16_t interval =
1935 			i40e_calc_itr_interval(1, pf->support_multi_driver);
1936 
1937 		if (msix_vect == I40E_MISC_VEC_ID) {
1938 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1939 				       (base_queue <<
1940 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1941 				       (0x0 <<
1942 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1943 			I40E_WRITE_REG(hw,
1944 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1945 				       interval);
1946 		} else {
1947 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1948 				       (base_queue <<
1949 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1950 				       (0x0 <<
1951 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1952 			I40E_WRITE_REG(hw,
1953 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1954 						       msix_vect - 1),
1955 				       interval);
1956 		}
1957 	} else {
1958 		uint32_t reg;
1959 
1960 		if (msix_vect == I40E_MISC_VEC_ID) {
1961 			I40E_WRITE_REG(hw,
1962 				       I40E_VPINT_LNKLST0(vsi->user_param),
1963 				       (base_queue <<
1964 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1965 				       (0x0 <<
1966 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1967 		} else {
1968 			/* num_msix_vectors_vf needs to minus irq0 */
1969 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1970 				vsi->user_param + (msix_vect - 1);
1971 
1972 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1973 				       (base_queue <<
1974 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1975 				       (0x0 <<
1976 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1977 		}
1978 	}
1979 
1980 	I40E_WRITE_FLUSH(hw);
1981 }
1982 
1983 void
1984 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1985 {
1986 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1987 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1988 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1989 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1990 	uint16_t msix_vect = vsi->msix_intr;
1991 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1992 	uint16_t queue_idx = 0;
1993 	int record = 0;
1994 	int i;
1995 
1996 	for (i = 0; i < vsi->nb_qps; i++) {
1997 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1998 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1999 	}
2000 
2001 	/* VF bind interrupt */
2002 	if (vsi->type == I40E_VSI_SRIOV) {
2003 		__vsi_queues_bind_intr(vsi, msix_vect,
2004 				       vsi->base_queue, vsi->nb_qps,
2005 				       itr_idx);
2006 		return;
2007 	}
2008 
2009 	/* PF & VMDq bind interrupt */
2010 	if (rte_intr_dp_is_en(intr_handle)) {
2011 		if (vsi->type == I40E_VSI_MAIN) {
2012 			queue_idx = 0;
2013 			record = 1;
2014 		} else if (vsi->type == I40E_VSI_VMDQ2) {
2015 			struct i40e_vsi *main_vsi =
2016 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
2017 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
2018 			record = 1;
2019 		}
2020 	}
2021 
2022 	for (i = 0; i < vsi->nb_used_qps; i++) {
2023 		if (nb_msix <= 1) {
2024 			if (!rte_intr_allow_others(intr_handle))
2025 				/* allow to share MISC_VEC_ID */
2026 				msix_vect = I40E_MISC_VEC_ID;
2027 
2028 			/* no enough msix_vect, map all to one */
2029 			__vsi_queues_bind_intr(vsi, msix_vect,
2030 					       vsi->base_queue + i,
2031 					       vsi->nb_used_qps - i,
2032 					       itr_idx);
2033 			for (; !!record && i < vsi->nb_used_qps; i++)
2034 				intr_handle->intr_vec[queue_idx + i] =
2035 					msix_vect;
2036 			break;
2037 		}
2038 		/* 1:1 queue/msix_vect mapping */
2039 		__vsi_queues_bind_intr(vsi, msix_vect,
2040 				       vsi->base_queue + i, 1,
2041 				       itr_idx);
2042 		if (!!record)
2043 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
2044 
2045 		msix_vect++;
2046 		nb_msix--;
2047 	}
2048 }
2049 
2050 static void
2051 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
2052 {
2053 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2054 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2055 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2056 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2057 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2058 	uint16_t msix_intr, i;
2059 
2060 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2061 		for (i = 0; i < vsi->nb_msix; i++) {
2062 			msix_intr = vsi->msix_intr + i;
2063 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2064 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
2065 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2066 				I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2067 		}
2068 	else
2069 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2070 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
2071 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2072 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2073 
2074 	I40E_WRITE_FLUSH(hw);
2075 }
2076 
2077 static void
2078 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
2079 {
2080 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2081 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2082 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2083 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2084 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2085 	uint16_t msix_intr, i;
2086 
2087 	if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2088 		for (i = 0; i < vsi->nb_msix; i++) {
2089 			msix_intr = vsi->msix_intr + i;
2090 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2091 				       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2092 		}
2093 	else
2094 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2095 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2096 
2097 	I40E_WRITE_FLUSH(hw);
2098 }
2099 
2100 static inline uint8_t
2101 i40e_parse_link_speeds(uint16_t link_speeds)
2102 {
2103 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2104 
2105 	if (link_speeds & ETH_LINK_SPEED_40G)
2106 		link_speed |= I40E_LINK_SPEED_40GB;
2107 	if (link_speeds & ETH_LINK_SPEED_25G)
2108 		link_speed |= I40E_LINK_SPEED_25GB;
2109 	if (link_speeds & ETH_LINK_SPEED_20G)
2110 		link_speed |= I40E_LINK_SPEED_20GB;
2111 	if (link_speeds & ETH_LINK_SPEED_10G)
2112 		link_speed |= I40E_LINK_SPEED_10GB;
2113 	if (link_speeds & ETH_LINK_SPEED_1G)
2114 		link_speed |= I40E_LINK_SPEED_1GB;
2115 	if (link_speeds & ETH_LINK_SPEED_100M)
2116 		link_speed |= I40E_LINK_SPEED_100MB;
2117 
2118 	return link_speed;
2119 }
2120 
2121 static int
2122 i40e_phy_conf_link(struct i40e_hw *hw,
2123 		   uint8_t abilities,
2124 		   uint8_t force_speed,
2125 		   bool is_up)
2126 {
2127 	enum i40e_status_code status;
2128 	struct i40e_aq_get_phy_abilities_resp phy_ab;
2129 	struct i40e_aq_set_phy_config phy_conf;
2130 	enum i40e_aq_phy_type cnt;
2131 	uint8_t avail_speed;
2132 	uint32_t phy_type_mask = 0;
2133 
2134 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2135 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2136 			I40E_AQ_PHY_FLAG_PAUSE_RX |
2137 			I40E_AQ_PHY_FLAG_LOW_POWER;
2138 	int ret = -ENOTSUP;
2139 
2140 	/* To get phy capabilities of available speeds. */
2141 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2142 					      NULL);
2143 	if (status) {
2144 		PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2145 				status);
2146 		return ret;
2147 	}
2148 	avail_speed = phy_ab.link_speed;
2149 
2150 	/* To get the current phy config. */
2151 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2152 					      NULL);
2153 	if (status) {
2154 		PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2155 				status);
2156 		return ret;
2157 	}
2158 
2159 	/* If link needs to go up and it is in autoneg mode the speed is OK,
2160 	 * no need to set up again.
2161 	 */
2162 	if (is_up && phy_ab.phy_type != 0 &&
2163 		     abilities & I40E_AQ_PHY_AN_ENABLED &&
2164 		     phy_ab.link_speed != 0)
2165 		return I40E_SUCCESS;
2166 
2167 	memset(&phy_conf, 0, sizeof(phy_conf));
2168 
2169 	/* bits 0-2 use the values from get_phy_abilities_resp */
2170 	abilities &= ~mask;
2171 	abilities |= phy_ab.abilities & mask;
2172 
2173 	phy_conf.abilities = abilities;
2174 
2175 	/* If link needs to go up, but the force speed is not supported,
2176 	 * Warn users and config the default available speeds.
2177 	 */
2178 	if (is_up && !(force_speed & avail_speed)) {
2179 		PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2180 		phy_conf.link_speed = avail_speed;
2181 	} else {
2182 		phy_conf.link_speed = is_up ? force_speed : avail_speed;
2183 	}
2184 
2185 	/* PHY type mask needs to include each type except PHY type extension */
2186 	for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2187 		phy_type_mask |= 1 << cnt;
2188 
2189 	/* use get_phy_abilities_resp value for the rest */
2190 	phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2191 	phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2192 		I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2193 		I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2194 	phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2195 	phy_conf.eee_capability = phy_ab.eee_capability;
2196 	phy_conf.eeer = phy_ab.eeer_val;
2197 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2198 
2199 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2200 		    phy_ab.abilities, phy_ab.link_speed);
2201 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
2202 		    phy_conf.abilities, phy_conf.link_speed);
2203 
2204 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2205 	if (status)
2206 		return ret;
2207 
2208 	return I40E_SUCCESS;
2209 }
2210 
2211 static int
2212 i40e_apply_link_speed(struct rte_eth_dev *dev)
2213 {
2214 	uint8_t speed;
2215 	uint8_t abilities = 0;
2216 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2217 	struct rte_eth_conf *conf = &dev->data->dev_conf;
2218 
2219 	if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2220 		conf->link_speeds = ETH_LINK_SPEED_40G |
2221 				    ETH_LINK_SPEED_25G |
2222 				    ETH_LINK_SPEED_20G |
2223 				    ETH_LINK_SPEED_10G |
2224 				    ETH_LINK_SPEED_1G |
2225 				    ETH_LINK_SPEED_100M;
2226 	}
2227 	speed = i40e_parse_link_speeds(conf->link_speeds);
2228 	abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2229 		     I40E_AQ_PHY_AN_ENABLED |
2230 		     I40E_AQ_PHY_LINK_ENABLED;
2231 
2232 	return i40e_phy_conf_link(hw, abilities, speed, true);
2233 }
2234 
2235 static int
2236 i40e_dev_start(struct rte_eth_dev *dev)
2237 {
2238 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2239 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2240 	struct i40e_vsi *main_vsi = pf->main_vsi;
2241 	int ret, i;
2242 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2243 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2244 	uint32_t intr_vector = 0;
2245 	struct i40e_vsi *vsi;
2246 
2247 	hw->adapter_stopped = 0;
2248 
2249 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2250 		PMD_INIT_LOG(ERR,
2251 		"Invalid link_speeds for port %u, autonegotiation disabled",
2252 			      dev->data->port_id);
2253 		return -EINVAL;
2254 	}
2255 
2256 	rte_intr_disable(intr_handle);
2257 
2258 	if ((rte_intr_cap_multiple(intr_handle) ||
2259 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2260 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2261 		intr_vector = dev->data->nb_rx_queues;
2262 		ret = rte_intr_efd_enable(intr_handle, intr_vector);
2263 		if (ret)
2264 			return ret;
2265 	}
2266 
2267 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2268 		intr_handle->intr_vec =
2269 			rte_zmalloc("intr_vec",
2270 				    dev->data->nb_rx_queues * sizeof(int),
2271 				    0);
2272 		if (!intr_handle->intr_vec) {
2273 			PMD_INIT_LOG(ERR,
2274 				"Failed to allocate %d rx_queues intr_vec",
2275 				dev->data->nb_rx_queues);
2276 			return -ENOMEM;
2277 		}
2278 	}
2279 
2280 	/* Initialize VSI */
2281 	ret = i40e_dev_rxtx_init(pf);
2282 	if (ret != I40E_SUCCESS) {
2283 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2284 		goto err_up;
2285 	}
2286 
2287 	/* Map queues with MSIX interrupt */
2288 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2289 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2290 	i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2291 	i40e_vsi_enable_queues_intr(main_vsi);
2292 
2293 	/* Map VMDQ VSI queues with MSIX interrupt */
2294 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2295 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2296 		i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2297 					  I40E_ITR_INDEX_DEFAULT);
2298 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2299 	}
2300 
2301 	/* enable FDIR MSIX interrupt */
2302 	if (pf->fdir.fdir_vsi) {
2303 		i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2304 					  I40E_ITR_INDEX_NONE);
2305 		i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2306 	}
2307 
2308 	/* Enable all queues which have been configured */
2309 	ret = i40e_dev_switch_queues(pf, TRUE);
2310 	if (ret != I40E_SUCCESS) {
2311 		PMD_DRV_LOG(ERR, "Failed to enable VSI");
2312 		goto err_up;
2313 	}
2314 
2315 	/* Enable receiving broadcast packets */
2316 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2317 	if (ret != I40E_SUCCESS)
2318 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2319 
2320 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2321 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2322 						true, NULL);
2323 		if (ret != I40E_SUCCESS)
2324 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2325 	}
2326 
2327 	/* Enable the VLAN promiscuous mode. */
2328 	if (pf->vfs) {
2329 		for (i = 0; i < pf->vf_num; i++) {
2330 			vsi = pf->vfs[i].vsi;
2331 			i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2332 						     true, NULL);
2333 		}
2334 	}
2335 
2336 	/* Enable mac loopback mode */
2337 	if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2338 	    dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2339 		ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2340 		if (ret != I40E_SUCCESS) {
2341 			PMD_DRV_LOG(ERR, "fail to set loopback link");
2342 			goto err_up;
2343 		}
2344 	}
2345 
2346 	/* Apply link configure */
2347 	ret = i40e_apply_link_speed(dev);
2348 	if (I40E_SUCCESS != ret) {
2349 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
2350 		goto err_up;
2351 	}
2352 
2353 	if (!rte_intr_allow_others(intr_handle)) {
2354 		rte_intr_callback_unregister(intr_handle,
2355 					     i40e_dev_interrupt_handler,
2356 					     (void *)dev);
2357 		/* configure and enable device interrupt */
2358 		i40e_pf_config_irq0(hw, FALSE);
2359 		i40e_pf_enable_irq0(hw);
2360 
2361 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2362 			PMD_INIT_LOG(INFO,
2363 				"lsc won't enable because of no intr multiplex");
2364 	} else {
2365 		ret = i40e_aq_set_phy_int_mask(hw,
2366 					       ~(I40E_AQ_EVENT_LINK_UPDOWN |
2367 					       I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2368 					       I40E_AQ_EVENT_MEDIA_NA), NULL);
2369 		if (ret != I40E_SUCCESS)
2370 			PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2371 
2372 		/* Call get_link_info aq commond to enable/disable LSE */
2373 		i40e_dev_link_update(dev, 0);
2374 	}
2375 
2376 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2377 		rte_eal_alarm_set(I40E_ALARM_INTERVAL,
2378 				  i40e_dev_alarm_handler, dev);
2379 	} else {
2380 		/* enable uio intr after callback register */
2381 		rte_intr_enable(intr_handle);
2382 	}
2383 
2384 	i40e_filter_restore(pf);
2385 
2386 	if (pf->tm_conf.root && !pf->tm_conf.committed)
2387 		PMD_DRV_LOG(WARNING,
2388 			    "please call hierarchy_commit() "
2389 			    "before starting the port");
2390 
2391 	return I40E_SUCCESS;
2392 
2393 err_up:
2394 	i40e_dev_switch_queues(pf, FALSE);
2395 	i40e_dev_clear_queues(dev);
2396 
2397 	return ret;
2398 }
2399 
2400 static void
2401 i40e_dev_stop(struct rte_eth_dev *dev)
2402 {
2403 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2404 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2405 	struct i40e_vsi *main_vsi = pf->main_vsi;
2406 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2407 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2408 	int i;
2409 
2410 	if (hw->adapter_stopped == 1)
2411 		return;
2412 
2413 	if (dev->data->dev_conf.intr_conf.rxq == 0) {
2414 		rte_eal_alarm_cancel(i40e_dev_alarm_handler, dev);
2415 		rte_intr_enable(intr_handle);
2416 	}
2417 
2418 	/* Disable all queues */
2419 	i40e_dev_switch_queues(pf, FALSE);
2420 
2421 	/* un-map queues with interrupt registers */
2422 	i40e_vsi_disable_queues_intr(main_vsi);
2423 	i40e_vsi_queues_unbind_intr(main_vsi);
2424 
2425 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2426 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2427 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2428 	}
2429 
2430 	if (pf->fdir.fdir_vsi) {
2431 		i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2432 		i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2433 	}
2434 	/* Clear all queues and release memory */
2435 	i40e_dev_clear_queues(dev);
2436 
2437 	/* Set link down */
2438 	i40e_dev_set_link_down(dev);
2439 
2440 	if (!rte_intr_allow_others(intr_handle))
2441 		/* resume to the default handler */
2442 		rte_intr_callback_register(intr_handle,
2443 					   i40e_dev_interrupt_handler,
2444 					   (void *)dev);
2445 
2446 	/* Clean datapath event and queue/vec mapping */
2447 	rte_intr_efd_disable(intr_handle);
2448 	if (intr_handle->intr_vec) {
2449 		rte_free(intr_handle->intr_vec);
2450 		intr_handle->intr_vec = NULL;
2451 	}
2452 
2453 	/* reset hierarchy commit */
2454 	pf->tm_conf.committed = false;
2455 
2456 	hw->adapter_stopped = 1;
2457 
2458 	pf->adapter->rss_reta_updated = 0;
2459 }
2460 
2461 static void
2462 i40e_dev_close(struct rte_eth_dev *dev)
2463 {
2464 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2465 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2466 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2467 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2468 	struct i40e_mirror_rule *p_mirror;
2469 	uint32_t reg;
2470 	int i;
2471 	int ret;
2472 
2473 	PMD_INIT_FUNC_TRACE();
2474 
2475 	i40e_dev_stop(dev);
2476 
2477 	/* Remove all mirror rules */
2478 	while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2479 		ret = i40e_aq_del_mirror_rule(hw,
2480 					      pf->main_vsi->veb->seid,
2481 					      p_mirror->rule_type,
2482 					      p_mirror->entries,
2483 					      p_mirror->num_entries,
2484 					      p_mirror->id);
2485 		if (ret < 0)
2486 			PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2487 				    "status = %d, aq_err = %d.", ret,
2488 				    hw->aq.asq_last_status);
2489 
2490 		/* remove mirror software resource anyway */
2491 		TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2492 		rte_free(p_mirror);
2493 		pf->nb_mirror_rule--;
2494 	}
2495 
2496 	i40e_dev_free_queues(dev);
2497 
2498 	/* Disable interrupt */
2499 	i40e_pf_disable_irq0(hw);
2500 	rte_intr_disable(intr_handle);
2501 
2502 	/*
2503 	 * Only legacy filter API needs the following fdir config. So when the
2504 	 * legacy filter API is deprecated, the following code should also be
2505 	 * removed.
2506 	 */
2507 	i40e_fdir_teardown(pf);
2508 
2509 	/* shutdown and destroy the HMC */
2510 	i40e_shutdown_lan_hmc(hw);
2511 
2512 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2513 		i40e_vsi_release(pf->vmdq[i].vsi);
2514 		pf->vmdq[i].vsi = NULL;
2515 	}
2516 	rte_free(pf->vmdq);
2517 	pf->vmdq = NULL;
2518 
2519 	/* release all the existing VSIs and VEBs */
2520 	i40e_vsi_release(pf->main_vsi);
2521 
2522 	/* shutdown the adminq */
2523 	i40e_aq_queue_shutdown(hw, true);
2524 	i40e_shutdown_adminq(hw);
2525 
2526 	i40e_res_pool_destroy(&pf->qp_pool);
2527 	i40e_res_pool_destroy(&pf->msix_pool);
2528 
2529 	/* Disable flexible payload in global configuration */
2530 	if (!pf->support_multi_driver)
2531 		i40e_flex_payload_reg_set_default(hw);
2532 
2533 	/* force a PF reset to clean anything leftover */
2534 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2535 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2536 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2537 	I40E_WRITE_FLUSH(hw);
2538 
2539 	hw->adapter_closed = 1;
2540 }
2541 
2542 /*
2543  * Reset PF device only to re-initialize resources in PMD layer
2544  */
2545 static int
2546 i40e_dev_reset(struct rte_eth_dev *dev)
2547 {
2548 	int ret;
2549 
2550 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2551 	 * its VF to make them align with it. The detailed notification
2552 	 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2553 	 * To avoid unexpected behavior in VF, currently reset of PF with
2554 	 * SR-IOV activation is not supported. It might be supported later.
2555 	 */
2556 	if (dev->data->sriov.active)
2557 		return -ENOTSUP;
2558 
2559 	ret = eth_i40e_dev_uninit(dev);
2560 	if (ret)
2561 		return ret;
2562 
2563 	ret = eth_i40e_dev_init(dev, NULL);
2564 
2565 	return ret;
2566 }
2567 
2568 static void
2569 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2570 {
2571 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2572 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2573 	struct i40e_vsi *vsi = pf->main_vsi;
2574 	int status;
2575 
2576 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2577 						     true, NULL, true);
2578 	if (status != I40E_SUCCESS)
2579 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2580 
2581 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2582 							TRUE, NULL);
2583 	if (status != I40E_SUCCESS)
2584 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2585 
2586 }
2587 
2588 static void
2589 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2590 {
2591 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2592 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2593 	struct i40e_vsi *vsi = pf->main_vsi;
2594 	int status;
2595 
2596 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2597 						     false, NULL, true);
2598 	if (status != I40E_SUCCESS)
2599 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2600 
2601 	/* must remain in all_multicast mode */
2602 	if (dev->data->all_multicast == 1)
2603 		return;
2604 
2605 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2606 							false, NULL);
2607 	if (status != I40E_SUCCESS)
2608 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2609 }
2610 
2611 static void
2612 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2613 {
2614 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2615 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2616 	struct i40e_vsi *vsi = pf->main_vsi;
2617 	int ret;
2618 
2619 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2620 	if (ret != I40E_SUCCESS)
2621 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2622 }
2623 
2624 static void
2625 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2626 {
2627 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2628 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2629 	struct i40e_vsi *vsi = pf->main_vsi;
2630 	int ret;
2631 
2632 	if (dev->data->promiscuous == 1)
2633 		return; /* must remain in all_multicast mode */
2634 
2635 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2636 				vsi->seid, FALSE, NULL);
2637 	if (ret != I40E_SUCCESS)
2638 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2639 }
2640 
2641 /*
2642  * Set device link up.
2643  */
2644 static int
2645 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2646 {
2647 	/* re-apply link speed setting */
2648 	return i40e_apply_link_speed(dev);
2649 }
2650 
2651 /*
2652  * Set device link down.
2653  */
2654 static int
2655 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2656 {
2657 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2658 	uint8_t abilities = 0;
2659 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2660 
2661 	abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2662 	return i40e_phy_conf_link(hw, abilities, speed, false);
2663 }
2664 
2665 static __rte_always_inline void
2666 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2667 {
2668 /* Link status registers and values*/
2669 #define I40E_PRTMAC_LINKSTA		0x001E2420
2670 #define I40E_REG_LINK_UP		0x40000080
2671 #define I40E_PRTMAC_MACC		0x001E24E0
2672 #define I40E_REG_MACC_25GB		0x00020000
2673 #define I40E_REG_SPEED_MASK		0x38000000
2674 #define I40E_REG_SPEED_100MB		0x00000000
2675 #define I40E_REG_SPEED_1GB		0x08000000
2676 #define I40E_REG_SPEED_10GB		0x10000000
2677 #define I40E_REG_SPEED_20GB		0x20000000
2678 #define I40E_REG_SPEED_25_40GB		0x18000000
2679 	uint32_t link_speed;
2680 	uint32_t reg_val;
2681 
2682 	reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2683 	link_speed = reg_val & I40E_REG_SPEED_MASK;
2684 	reg_val &= I40E_REG_LINK_UP;
2685 	link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2686 
2687 	if (unlikely(link->link_status == 0))
2688 		return;
2689 
2690 	/* Parse the link status */
2691 	switch (link_speed) {
2692 	case I40E_REG_SPEED_100MB:
2693 		link->link_speed = ETH_SPEED_NUM_100M;
2694 		break;
2695 	case I40E_REG_SPEED_1GB:
2696 		link->link_speed = ETH_SPEED_NUM_1G;
2697 		break;
2698 	case I40E_REG_SPEED_10GB:
2699 		link->link_speed = ETH_SPEED_NUM_10G;
2700 		break;
2701 	case I40E_REG_SPEED_20GB:
2702 		link->link_speed = ETH_SPEED_NUM_20G;
2703 		break;
2704 	case I40E_REG_SPEED_25_40GB:
2705 		reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2706 
2707 		if (reg_val & I40E_REG_MACC_25GB)
2708 			link->link_speed = ETH_SPEED_NUM_25G;
2709 		else
2710 			link->link_speed = ETH_SPEED_NUM_40G;
2711 
2712 		break;
2713 	default:
2714 		PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2715 		break;
2716 	}
2717 }
2718 
2719 static __rte_always_inline void
2720 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2721 	bool enable_lse, int wait_to_complete)
2722 {
2723 #define CHECK_INTERVAL             100  /* 100ms */
2724 #define MAX_REPEAT_TIME            10  /* 1s (10 * 100ms) in total */
2725 	uint32_t rep_cnt = MAX_REPEAT_TIME;
2726 	struct i40e_link_status link_status;
2727 	int status;
2728 
2729 	memset(&link_status, 0, sizeof(link_status));
2730 
2731 	do {
2732 		memset(&link_status, 0, sizeof(link_status));
2733 
2734 		/* Get link status information from hardware */
2735 		status = i40e_aq_get_link_info(hw, enable_lse,
2736 						&link_status, NULL);
2737 		if (unlikely(status != I40E_SUCCESS)) {
2738 			link->link_speed = ETH_SPEED_NUM_100M;
2739 			link->link_duplex = ETH_LINK_FULL_DUPLEX;
2740 			PMD_DRV_LOG(ERR, "Failed to get link info");
2741 			return;
2742 		}
2743 
2744 		link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2745 		if (!wait_to_complete || link->link_status)
2746 			break;
2747 
2748 		rte_delay_ms(CHECK_INTERVAL);
2749 	} while (--rep_cnt);
2750 
2751 	/* Parse the link status */
2752 	switch (link_status.link_speed) {
2753 	case I40E_LINK_SPEED_100MB:
2754 		link->link_speed = ETH_SPEED_NUM_100M;
2755 		break;
2756 	case I40E_LINK_SPEED_1GB:
2757 		link->link_speed = ETH_SPEED_NUM_1G;
2758 		break;
2759 	case I40E_LINK_SPEED_10GB:
2760 		link->link_speed = ETH_SPEED_NUM_10G;
2761 		break;
2762 	case I40E_LINK_SPEED_20GB:
2763 		link->link_speed = ETH_SPEED_NUM_20G;
2764 		break;
2765 	case I40E_LINK_SPEED_25GB:
2766 		link->link_speed = ETH_SPEED_NUM_25G;
2767 		break;
2768 	case I40E_LINK_SPEED_40GB:
2769 		link->link_speed = ETH_SPEED_NUM_40G;
2770 		break;
2771 	default:
2772 		link->link_speed = ETH_SPEED_NUM_100M;
2773 		break;
2774 	}
2775 }
2776 
2777 int
2778 i40e_dev_link_update(struct rte_eth_dev *dev,
2779 		     int wait_to_complete)
2780 {
2781 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2782 	struct rte_eth_link link;
2783 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2784 	int ret;
2785 
2786 	memset(&link, 0, sizeof(link));
2787 
2788 	/* i40e uses full duplex only */
2789 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2790 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2791 			ETH_LINK_SPEED_FIXED);
2792 
2793 	if (!wait_to_complete && !enable_lse)
2794 		update_link_reg(hw, &link);
2795 	else
2796 		update_link_aq(hw, &link, enable_lse, wait_to_complete);
2797 
2798 	ret = rte_eth_linkstatus_set(dev, &link);
2799 	i40e_notify_all_vfs_link_status(dev);
2800 
2801 	return ret;
2802 }
2803 
2804 /* Get all the statistics of a VSI */
2805 void
2806 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2807 {
2808 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2809 	struct i40e_eth_stats *nes = &vsi->eth_stats;
2810 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2811 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2812 
2813 	i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2814 			    vsi->offset_loaded, &oes->rx_bytes,
2815 			    &nes->rx_bytes);
2816 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2817 			    vsi->offset_loaded, &oes->rx_unicast,
2818 			    &nes->rx_unicast);
2819 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2820 			    vsi->offset_loaded, &oes->rx_multicast,
2821 			    &nes->rx_multicast);
2822 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2823 			    vsi->offset_loaded, &oes->rx_broadcast,
2824 			    &nes->rx_broadcast);
2825 	/* exclude CRC bytes */
2826 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2827 		nes->rx_broadcast) * ETHER_CRC_LEN;
2828 
2829 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2830 			    &oes->rx_discards, &nes->rx_discards);
2831 	/* GLV_REPC not supported */
2832 	/* GLV_RMPC not supported */
2833 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2834 			    &oes->rx_unknown_protocol,
2835 			    &nes->rx_unknown_protocol);
2836 	i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2837 			    vsi->offset_loaded, &oes->tx_bytes,
2838 			    &nes->tx_bytes);
2839 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2840 			    vsi->offset_loaded, &oes->tx_unicast,
2841 			    &nes->tx_unicast);
2842 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2843 			    vsi->offset_loaded, &oes->tx_multicast,
2844 			    &nes->tx_multicast);
2845 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2846 			    vsi->offset_loaded,  &oes->tx_broadcast,
2847 			    &nes->tx_broadcast);
2848 	/* GLV_TDPC not supported */
2849 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2850 			    &oes->tx_errors, &nes->tx_errors);
2851 	vsi->offset_loaded = true;
2852 
2853 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2854 		    vsi->vsi_id);
2855 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2856 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2857 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2858 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2859 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2860 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2861 		    nes->rx_unknown_protocol);
2862 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2863 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2864 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2865 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2866 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2867 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2868 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2869 		    vsi->vsi_id);
2870 }
2871 
2872 static void
2873 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2874 {
2875 	unsigned int i;
2876 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2877 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2878 
2879 	/* Get rx/tx bytes of internal transfer packets */
2880 	i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2881 			I40E_GLV_GORCL(hw->port),
2882 			pf->offset_loaded,
2883 			&pf->internal_stats_offset.rx_bytes,
2884 			&pf->internal_stats.rx_bytes);
2885 
2886 	i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2887 			I40E_GLV_GOTCL(hw->port),
2888 			pf->offset_loaded,
2889 			&pf->internal_stats_offset.tx_bytes,
2890 			&pf->internal_stats.tx_bytes);
2891 	/* Get total internal rx packet count */
2892 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2893 			    I40E_GLV_UPRCL(hw->port),
2894 			    pf->offset_loaded,
2895 			    &pf->internal_stats_offset.rx_unicast,
2896 			    &pf->internal_stats.rx_unicast);
2897 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2898 			    I40E_GLV_MPRCL(hw->port),
2899 			    pf->offset_loaded,
2900 			    &pf->internal_stats_offset.rx_multicast,
2901 			    &pf->internal_stats.rx_multicast);
2902 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2903 			    I40E_GLV_BPRCL(hw->port),
2904 			    pf->offset_loaded,
2905 			    &pf->internal_stats_offset.rx_broadcast,
2906 			    &pf->internal_stats.rx_broadcast);
2907 	/* Get total internal tx packet count */
2908 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2909 			    I40E_GLV_UPTCL(hw->port),
2910 			    pf->offset_loaded,
2911 			    &pf->internal_stats_offset.tx_unicast,
2912 			    &pf->internal_stats.tx_unicast);
2913 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2914 			    I40E_GLV_MPTCL(hw->port),
2915 			    pf->offset_loaded,
2916 			    &pf->internal_stats_offset.tx_multicast,
2917 			    &pf->internal_stats.tx_multicast);
2918 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2919 			    I40E_GLV_BPTCL(hw->port),
2920 			    pf->offset_loaded,
2921 			    &pf->internal_stats_offset.tx_broadcast,
2922 			    &pf->internal_stats.tx_broadcast);
2923 
2924 	/* exclude CRC size */
2925 	pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2926 		pf->internal_stats.rx_multicast +
2927 		pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2928 
2929 	/* Get statistics of struct i40e_eth_stats */
2930 	i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2931 			    I40E_GLPRT_GORCL(hw->port),
2932 			    pf->offset_loaded, &os->eth.rx_bytes,
2933 			    &ns->eth.rx_bytes);
2934 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2935 			    I40E_GLPRT_UPRCL(hw->port),
2936 			    pf->offset_loaded, &os->eth.rx_unicast,
2937 			    &ns->eth.rx_unicast);
2938 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2939 			    I40E_GLPRT_MPRCL(hw->port),
2940 			    pf->offset_loaded, &os->eth.rx_multicast,
2941 			    &ns->eth.rx_multicast);
2942 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2943 			    I40E_GLPRT_BPRCL(hw->port),
2944 			    pf->offset_loaded, &os->eth.rx_broadcast,
2945 			    &ns->eth.rx_broadcast);
2946 	/* Workaround: CRC size should not be included in byte statistics,
2947 	 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2948 	 */
2949 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2950 		ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2951 
2952 	/* exclude internal rx bytes
2953 	 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2954 	 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2955 	 * value.
2956 	 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2957 	 */
2958 	if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2959 		ns->eth.rx_bytes = 0;
2960 	else
2961 		ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2962 
2963 	if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2964 		ns->eth.rx_unicast = 0;
2965 	else
2966 		ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2967 
2968 	if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2969 		ns->eth.rx_multicast = 0;
2970 	else
2971 		ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2972 
2973 	if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2974 		ns->eth.rx_broadcast = 0;
2975 	else
2976 		ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2977 
2978 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2979 			    pf->offset_loaded, &os->eth.rx_discards,
2980 			    &ns->eth.rx_discards);
2981 	/* GLPRT_REPC not supported */
2982 	/* GLPRT_RMPC not supported */
2983 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2984 			    pf->offset_loaded,
2985 			    &os->eth.rx_unknown_protocol,
2986 			    &ns->eth.rx_unknown_protocol);
2987 	i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2988 			    I40E_GLPRT_GOTCL(hw->port),
2989 			    pf->offset_loaded, &os->eth.tx_bytes,
2990 			    &ns->eth.tx_bytes);
2991 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2992 			    I40E_GLPRT_UPTCL(hw->port),
2993 			    pf->offset_loaded, &os->eth.tx_unicast,
2994 			    &ns->eth.tx_unicast);
2995 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2996 			    I40E_GLPRT_MPTCL(hw->port),
2997 			    pf->offset_loaded, &os->eth.tx_multicast,
2998 			    &ns->eth.tx_multicast);
2999 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
3000 			    I40E_GLPRT_BPTCL(hw->port),
3001 			    pf->offset_loaded, &os->eth.tx_broadcast,
3002 			    &ns->eth.tx_broadcast);
3003 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3004 		ns->eth.tx_broadcast) * ETHER_CRC_LEN;
3005 
3006 	/* exclude internal tx bytes
3007 	 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
3008 	 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
3009 	 * value.
3010 	 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
3011 	 */
3012 	if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
3013 		ns->eth.tx_bytes = 0;
3014 	else
3015 		ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
3016 
3017 	if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
3018 		ns->eth.tx_unicast = 0;
3019 	else
3020 		ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
3021 
3022 	if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
3023 		ns->eth.tx_multicast = 0;
3024 	else
3025 		ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
3026 
3027 	if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
3028 		ns->eth.tx_broadcast = 0;
3029 	else
3030 		ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
3031 
3032 	/* GLPRT_TEPC not supported */
3033 
3034 	/* additional port specific stats */
3035 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
3036 			    pf->offset_loaded, &os->tx_dropped_link_down,
3037 			    &ns->tx_dropped_link_down);
3038 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
3039 			    pf->offset_loaded, &os->crc_errors,
3040 			    &ns->crc_errors);
3041 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
3042 			    pf->offset_loaded, &os->illegal_bytes,
3043 			    &ns->illegal_bytes);
3044 	/* GLPRT_ERRBC not supported */
3045 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
3046 			    pf->offset_loaded, &os->mac_local_faults,
3047 			    &ns->mac_local_faults);
3048 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
3049 			    pf->offset_loaded, &os->mac_remote_faults,
3050 			    &ns->mac_remote_faults);
3051 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
3052 			    pf->offset_loaded, &os->rx_length_errors,
3053 			    &ns->rx_length_errors);
3054 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
3055 			    pf->offset_loaded, &os->link_xon_rx,
3056 			    &ns->link_xon_rx);
3057 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
3058 			    pf->offset_loaded, &os->link_xoff_rx,
3059 			    &ns->link_xoff_rx);
3060 	for (i = 0; i < 8; i++) {
3061 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
3062 				    pf->offset_loaded,
3063 				    &os->priority_xon_rx[i],
3064 				    &ns->priority_xon_rx[i]);
3065 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
3066 				    pf->offset_loaded,
3067 				    &os->priority_xoff_rx[i],
3068 				    &ns->priority_xoff_rx[i]);
3069 	}
3070 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
3071 			    pf->offset_loaded, &os->link_xon_tx,
3072 			    &ns->link_xon_tx);
3073 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
3074 			    pf->offset_loaded, &os->link_xoff_tx,
3075 			    &ns->link_xoff_tx);
3076 	for (i = 0; i < 8; i++) {
3077 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
3078 				    pf->offset_loaded,
3079 				    &os->priority_xon_tx[i],
3080 				    &ns->priority_xon_tx[i]);
3081 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
3082 				    pf->offset_loaded,
3083 				    &os->priority_xoff_tx[i],
3084 				    &ns->priority_xoff_tx[i]);
3085 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
3086 				    pf->offset_loaded,
3087 				    &os->priority_xon_2_xoff[i],
3088 				    &ns->priority_xon_2_xoff[i]);
3089 	}
3090 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
3091 			    I40E_GLPRT_PRC64L(hw->port),
3092 			    pf->offset_loaded, &os->rx_size_64,
3093 			    &ns->rx_size_64);
3094 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
3095 			    I40E_GLPRT_PRC127L(hw->port),
3096 			    pf->offset_loaded, &os->rx_size_127,
3097 			    &ns->rx_size_127);
3098 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
3099 			    I40E_GLPRT_PRC255L(hw->port),
3100 			    pf->offset_loaded, &os->rx_size_255,
3101 			    &ns->rx_size_255);
3102 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
3103 			    I40E_GLPRT_PRC511L(hw->port),
3104 			    pf->offset_loaded, &os->rx_size_511,
3105 			    &ns->rx_size_511);
3106 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3107 			    I40E_GLPRT_PRC1023L(hw->port),
3108 			    pf->offset_loaded, &os->rx_size_1023,
3109 			    &ns->rx_size_1023);
3110 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3111 			    I40E_GLPRT_PRC1522L(hw->port),
3112 			    pf->offset_loaded, &os->rx_size_1522,
3113 			    &ns->rx_size_1522);
3114 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3115 			    I40E_GLPRT_PRC9522L(hw->port),
3116 			    pf->offset_loaded, &os->rx_size_big,
3117 			    &ns->rx_size_big);
3118 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3119 			    pf->offset_loaded, &os->rx_undersize,
3120 			    &ns->rx_undersize);
3121 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3122 			    pf->offset_loaded, &os->rx_fragments,
3123 			    &ns->rx_fragments);
3124 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3125 			    pf->offset_loaded, &os->rx_oversize,
3126 			    &ns->rx_oversize);
3127 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3128 			    pf->offset_loaded, &os->rx_jabber,
3129 			    &ns->rx_jabber);
3130 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3131 			    I40E_GLPRT_PTC64L(hw->port),
3132 			    pf->offset_loaded, &os->tx_size_64,
3133 			    &ns->tx_size_64);
3134 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3135 			    I40E_GLPRT_PTC127L(hw->port),
3136 			    pf->offset_loaded, &os->tx_size_127,
3137 			    &ns->tx_size_127);
3138 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3139 			    I40E_GLPRT_PTC255L(hw->port),
3140 			    pf->offset_loaded, &os->tx_size_255,
3141 			    &ns->tx_size_255);
3142 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3143 			    I40E_GLPRT_PTC511L(hw->port),
3144 			    pf->offset_loaded, &os->tx_size_511,
3145 			    &ns->tx_size_511);
3146 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3147 			    I40E_GLPRT_PTC1023L(hw->port),
3148 			    pf->offset_loaded, &os->tx_size_1023,
3149 			    &ns->tx_size_1023);
3150 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3151 			    I40E_GLPRT_PTC1522L(hw->port),
3152 			    pf->offset_loaded, &os->tx_size_1522,
3153 			    &ns->tx_size_1522);
3154 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3155 			    I40E_GLPRT_PTC9522L(hw->port),
3156 			    pf->offset_loaded, &os->tx_size_big,
3157 			    &ns->tx_size_big);
3158 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3159 			   pf->offset_loaded,
3160 			   &os->fd_sb_match, &ns->fd_sb_match);
3161 	/* GLPRT_MSPDC not supported */
3162 	/* GLPRT_XEC not supported */
3163 
3164 	pf->offset_loaded = true;
3165 
3166 	if (pf->main_vsi)
3167 		i40e_update_vsi_stats(pf->main_vsi);
3168 }
3169 
3170 /* Get all statistics of a port */
3171 static int
3172 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3173 {
3174 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3175 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3176 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3177 	struct i40e_vsi *vsi;
3178 	unsigned i;
3179 
3180 	/* call read registers - updates values, now write them to struct */
3181 	i40e_read_stats_registers(pf, hw);
3182 
3183 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3184 			pf->main_vsi->eth_stats.rx_multicast +
3185 			pf->main_vsi->eth_stats.rx_broadcast -
3186 			pf->main_vsi->eth_stats.rx_discards;
3187 	stats->opackets = ns->eth.tx_unicast +
3188 			ns->eth.tx_multicast +
3189 			ns->eth.tx_broadcast;
3190 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
3191 	stats->obytes   = ns->eth.tx_bytes;
3192 	stats->oerrors  = ns->eth.tx_errors +
3193 			pf->main_vsi->eth_stats.tx_errors;
3194 
3195 	/* Rx Errors */
3196 	stats->imissed  = ns->eth.rx_discards +
3197 			pf->main_vsi->eth_stats.rx_discards;
3198 	stats->ierrors  = ns->crc_errors +
3199 			ns->rx_length_errors + ns->rx_undersize +
3200 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3201 
3202 	if (pf->vfs) {
3203 		for (i = 0; i < pf->vf_num; i++) {
3204 			vsi = pf->vfs[i].vsi;
3205 			i40e_update_vsi_stats(vsi);
3206 
3207 			stats->ipackets += (vsi->eth_stats.rx_unicast +
3208 					vsi->eth_stats.rx_multicast +
3209 					vsi->eth_stats.rx_broadcast -
3210 					vsi->eth_stats.rx_discards);
3211 			stats->ibytes   += vsi->eth_stats.rx_bytes;
3212 			stats->oerrors  += vsi->eth_stats.tx_errors;
3213 			stats->imissed  += vsi->eth_stats.rx_discards;
3214 		}
3215 	}
3216 
3217 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3218 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
3219 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
3220 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
3221 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
3222 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
3223 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3224 		    ns->eth.rx_unknown_protocol);
3225 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
3226 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
3227 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
3228 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
3229 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
3230 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
3231 
3232 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
3233 		    ns->tx_dropped_link_down);
3234 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
3235 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
3236 		    ns->illegal_bytes);
3237 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
3238 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
3239 		    ns->mac_local_faults);
3240 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
3241 		    ns->mac_remote_faults);
3242 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
3243 		    ns->rx_length_errors);
3244 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
3245 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
3246 	for (i = 0; i < 8; i++) {
3247 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
3248 				i, ns->priority_xon_rx[i]);
3249 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
3250 				i, ns->priority_xoff_rx[i]);
3251 	}
3252 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
3253 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
3254 	for (i = 0; i < 8; i++) {
3255 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
3256 				i, ns->priority_xon_tx[i]);
3257 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
3258 				i, ns->priority_xoff_tx[i]);
3259 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
3260 				i, ns->priority_xon_2_xoff[i]);
3261 	}
3262 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
3263 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
3264 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
3265 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
3266 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
3267 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
3268 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
3269 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
3270 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
3271 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
3272 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
3273 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
3274 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
3275 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
3276 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
3277 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
3278 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
3279 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
3280 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3281 			ns->mac_short_packet_dropped);
3282 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
3283 		    ns->checksum_error);
3284 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
3285 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3286 	return 0;
3287 }
3288 
3289 /* Reset the statistics */
3290 static void
3291 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3292 {
3293 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3294 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3295 
3296 	/* Mark PF and VSI stats to update the offset, aka "reset" */
3297 	pf->offset_loaded = false;
3298 	if (pf->main_vsi)
3299 		pf->main_vsi->offset_loaded = false;
3300 
3301 	/* read the stats, reading current register values into offset */
3302 	i40e_read_stats_registers(pf, hw);
3303 }
3304 
3305 static uint32_t
3306 i40e_xstats_calc_num(void)
3307 {
3308 	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3309 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
3310 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
3311 }
3312 
3313 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3314 				     struct rte_eth_xstat_name *xstats_names,
3315 				     __rte_unused unsigned limit)
3316 {
3317 	unsigned count = 0;
3318 	unsigned i, prio;
3319 
3320 	if (xstats_names == NULL)
3321 		return i40e_xstats_calc_num();
3322 
3323 	/* Note: limit checked in rte_eth_xstats_names() */
3324 
3325 	/* Get stats from i40e_eth_stats struct */
3326 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3327 		snprintf(xstats_names[count].name,
3328 			 sizeof(xstats_names[count].name),
3329 			 "%s", rte_i40e_stats_strings[i].name);
3330 		count++;
3331 	}
3332 
3333 	/* Get individiual stats from i40e_hw_port struct */
3334 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3335 		snprintf(xstats_names[count].name,
3336 			sizeof(xstats_names[count].name),
3337 			 "%s", rte_i40e_hw_port_strings[i].name);
3338 		count++;
3339 	}
3340 
3341 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3342 		for (prio = 0; prio < 8; prio++) {
3343 			snprintf(xstats_names[count].name,
3344 				 sizeof(xstats_names[count].name),
3345 				 "rx_priority%u_%s", prio,
3346 				 rte_i40e_rxq_prio_strings[i].name);
3347 			count++;
3348 		}
3349 	}
3350 
3351 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3352 		for (prio = 0; prio < 8; prio++) {
3353 			snprintf(xstats_names[count].name,
3354 				 sizeof(xstats_names[count].name),
3355 				 "tx_priority%u_%s", prio,
3356 				 rte_i40e_txq_prio_strings[i].name);
3357 			count++;
3358 		}
3359 	}
3360 	return count;
3361 }
3362 
3363 static int
3364 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3365 		    unsigned n)
3366 {
3367 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3368 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3369 	unsigned i, count, prio;
3370 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
3371 
3372 	count = i40e_xstats_calc_num();
3373 	if (n < count)
3374 		return count;
3375 
3376 	i40e_read_stats_registers(pf, hw);
3377 
3378 	if (xstats == NULL)
3379 		return 0;
3380 
3381 	count = 0;
3382 
3383 	/* Get stats from i40e_eth_stats struct */
3384 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3385 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3386 			rte_i40e_stats_strings[i].offset);
3387 		xstats[count].id = count;
3388 		count++;
3389 	}
3390 
3391 	/* Get individiual stats from i40e_hw_port struct */
3392 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3393 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3394 			rte_i40e_hw_port_strings[i].offset);
3395 		xstats[count].id = count;
3396 		count++;
3397 	}
3398 
3399 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3400 		for (prio = 0; prio < 8; prio++) {
3401 			xstats[count].value =
3402 				*(uint64_t *)(((char *)hw_stats) +
3403 				rte_i40e_rxq_prio_strings[i].offset +
3404 				(sizeof(uint64_t) * prio));
3405 			xstats[count].id = count;
3406 			count++;
3407 		}
3408 	}
3409 
3410 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3411 		for (prio = 0; prio < 8; prio++) {
3412 			xstats[count].value =
3413 				*(uint64_t *)(((char *)hw_stats) +
3414 				rte_i40e_txq_prio_strings[i].offset +
3415 				(sizeof(uint64_t) * prio));
3416 			xstats[count].id = count;
3417 			count++;
3418 		}
3419 	}
3420 
3421 	return count;
3422 }
3423 
3424 static int
3425 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3426 				 __rte_unused uint16_t queue_id,
3427 				 __rte_unused uint8_t stat_idx,
3428 				 __rte_unused uint8_t is_rx)
3429 {
3430 	PMD_INIT_FUNC_TRACE();
3431 
3432 	return -ENOSYS;
3433 }
3434 
3435 static int
3436 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3437 {
3438 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3439 	u32 full_ver;
3440 	u8 ver, patch;
3441 	u16 build;
3442 	int ret;
3443 
3444 	full_ver = hw->nvm.oem_ver;
3445 	ver = (u8)(full_ver >> 24);
3446 	build = (u16)((full_ver >> 8) & 0xffff);
3447 	patch = (u8)(full_ver & 0xff);
3448 
3449 	ret = snprintf(fw_version, fw_size,
3450 		 "%d.%d%d 0x%08x %d.%d.%d",
3451 		 ((hw->nvm.version >> 12) & 0xf),
3452 		 ((hw->nvm.version >> 4) & 0xff),
3453 		 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3454 		 ver, build, patch);
3455 
3456 	ret += 1; /* add the size of '\0' */
3457 	if (fw_size < (u32)ret)
3458 		return ret;
3459 	else
3460 		return 0;
3461 }
3462 
3463 /*
3464  * When using NVM 6.01(for X710 XL710 XXV710)/3.33(for X722) or later,
3465  * the Rx data path does not hang if the FW LLDP is stopped.
3466  * return true if lldp need to stop
3467  * return false if we cannot disable the LLDP to avoid Rx data path blocking.
3468  */
3469 static bool
3470 i40e_need_stop_lldp(struct rte_eth_dev *dev)
3471 {
3472 	double nvm_ver;
3473 	char ver_str[64] = {0};
3474 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3475 
3476 	i40e_fw_version_get(dev, ver_str, 64);
3477 	nvm_ver = atof(ver_str);
3478 	if ((hw->mac.type == I40E_MAC_X722 ||
3479 	     hw->mac.type == I40E_MAC_X722_VF) &&
3480 	     ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(3.33 * 1000)))
3481 		return true;
3482 	else if ((uint32_t)(nvm_ver * 1000) >= (uint32_t)(6.01 * 1000))
3483 		return true;
3484 
3485 	return false;
3486 }
3487 
3488 static void
3489 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3490 {
3491 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3492 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3493 	struct i40e_vsi *vsi = pf->main_vsi;
3494 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3495 
3496 	dev_info->max_rx_queues = vsi->nb_qps;
3497 	dev_info->max_tx_queues = vsi->nb_qps;
3498 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3499 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3500 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3501 	dev_info->max_vfs = pci_dev->max_vfs;
3502 	dev_info->rx_queue_offload_capa = 0;
3503 	dev_info->rx_offload_capa =
3504 		DEV_RX_OFFLOAD_VLAN_STRIP |
3505 		DEV_RX_OFFLOAD_QINQ_STRIP |
3506 		DEV_RX_OFFLOAD_IPV4_CKSUM |
3507 		DEV_RX_OFFLOAD_UDP_CKSUM |
3508 		DEV_RX_OFFLOAD_TCP_CKSUM |
3509 		DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3510 		DEV_RX_OFFLOAD_KEEP_CRC |
3511 		DEV_RX_OFFLOAD_SCATTER |
3512 		DEV_RX_OFFLOAD_VLAN_EXTEND |
3513 		DEV_RX_OFFLOAD_VLAN_FILTER |
3514 		DEV_RX_OFFLOAD_JUMBO_FRAME;
3515 
3516 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3517 	dev_info->tx_offload_capa =
3518 		DEV_TX_OFFLOAD_VLAN_INSERT |
3519 		DEV_TX_OFFLOAD_QINQ_INSERT |
3520 		DEV_TX_OFFLOAD_IPV4_CKSUM |
3521 		DEV_TX_OFFLOAD_UDP_CKSUM |
3522 		DEV_TX_OFFLOAD_TCP_CKSUM |
3523 		DEV_TX_OFFLOAD_SCTP_CKSUM |
3524 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3525 		DEV_TX_OFFLOAD_TCP_TSO |
3526 		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3527 		DEV_TX_OFFLOAD_GRE_TNL_TSO |
3528 		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3529 		DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3530 		DEV_TX_OFFLOAD_MULTI_SEGS |
3531 		dev_info->tx_queue_offload_capa;
3532 	dev_info->dev_capa =
3533 		RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3534 		RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3535 
3536 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3537 						sizeof(uint32_t);
3538 	dev_info->reta_size = pf->hash_lut_size;
3539 	dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3540 
3541 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3542 		.rx_thresh = {
3543 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
3544 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
3545 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
3546 		},
3547 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3548 		.rx_drop_en = 0,
3549 		.offloads = 0,
3550 	};
3551 
3552 	dev_info->default_txconf = (struct rte_eth_txconf) {
3553 		.tx_thresh = {
3554 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
3555 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
3556 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
3557 		},
3558 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3559 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3560 		.offloads = 0,
3561 	};
3562 
3563 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3564 		.nb_max = I40E_MAX_RING_DESC,
3565 		.nb_min = I40E_MIN_RING_DESC,
3566 		.nb_align = I40E_ALIGN_RING_DESC,
3567 	};
3568 
3569 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3570 		.nb_max = I40E_MAX_RING_DESC,
3571 		.nb_min = I40E_MIN_RING_DESC,
3572 		.nb_align = I40E_ALIGN_RING_DESC,
3573 		.nb_seg_max = I40E_TX_MAX_SEG,
3574 		.nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3575 	};
3576 
3577 	if (pf->flags & I40E_FLAG_VMDQ) {
3578 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3579 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3580 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3581 						pf->max_nb_vmdq_vsi;
3582 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3583 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3584 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3585 	}
3586 
3587 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3588 		/* For XL710 */
3589 		dev_info->speed_capa = ETH_LINK_SPEED_40G;
3590 		dev_info->default_rxportconf.nb_queues = 2;
3591 		dev_info->default_txportconf.nb_queues = 2;
3592 		if (dev->data->nb_rx_queues == 1)
3593 			dev_info->default_rxportconf.ring_size = 2048;
3594 		else
3595 			dev_info->default_rxportconf.ring_size = 1024;
3596 		if (dev->data->nb_tx_queues == 1)
3597 			dev_info->default_txportconf.ring_size = 1024;
3598 		else
3599 			dev_info->default_txportconf.ring_size = 512;
3600 
3601 	} else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3602 		/* For XXV710 */
3603 		dev_info->speed_capa = ETH_LINK_SPEED_25G;
3604 		dev_info->default_rxportconf.nb_queues = 1;
3605 		dev_info->default_txportconf.nb_queues = 1;
3606 		dev_info->default_rxportconf.ring_size = 256;
3607 		dev_info->default_txportconf.ring_size = 256;
3608 	} else {
3609 		/* For X710 */
3610 		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3611 		dev_info->default_rxportconf.nb_queues = 1;
3612 		dev_info->default_txportconf.nb_queues = 1;
3613 		if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3614 			dev_info->default_rxportconf.ring_size = 512;
3615 			dev_info->default_txportconf.ring_size = 256;
3616 		} else {
3617 			dev_info->default_rxportconf.ring_size = 256;
3618 			dev_info->default_txportconf.ring_size = 256;
3619 		}
3620 	}
3621 	dev_info->default_rxportconf.burst_size = 32;
3622 	dev_info->default_txportconf.burst_size = 32;
3623 }
3624 
3625 static int
3626 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3627 {
3628 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3629 	struct i40e_vsi *vsi = pf->main_vsi;
3630 	PMD_INIT_FUNC_TRACE();
3631 
3632 	if (on)
3633 		return i40e_vsi_add_vlan(vsi, vlan_id);
3634 	else
3635 		return i40e_vsi_delete_vlan(vsi, vlan_id);
3636 }
3637 
3638 static int
3639 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3640 				enum rte_vlan_type vlan_type,
3641 				uint16_t tpid, int qinq)
3642 {
3643 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3644 	uint64_t reg_r = 0;
3645 	uint64_t reg_w = 0;
3646 	uint16_t reg_id = 3;
3647 	int ret;
3648 
3649 	if (qinq) {
3650 		if (vlan_type == ETH_VLAN_TYPE_OUTER)
3651 			reg_id = 2;
3652 	}
3653 
3654 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3655 					  &reg_r, NULL);
3656 	if (ret != I40E_SUCCESS) {
3657 		PMD_DRV_LOG(ERR,
3658 			   "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3659 			   reg_id);
3660 		return -EIO;
3661 	}
3662 	PMD_DRV_LOG(DEBUG,
3663 		    "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3664 		    reg_id, reg_r);
3665 
3666 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3667 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3668 	if (reg_r == reg_w) {
3669 		PMD_DRV_LOG(DEBUG, "No need to write");
3670 		return 0;
3671 	}
3672 
3673 	ret = i40e_aq_debug_write_global_register(hw,
3674 					   I40E_GL_SWT_L2TAGCTRL(reg_id),
3675 					   reg_w, NULL);
3676 	if (ret != I40E_SUCCESS) {
3677 		PMD_DRV_LOG(ERR,
3678 			    "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3679 			    reg_id);
3680 		return -EIO;
3681 	}
3682 	PMD_DRV_LOG(DEBUG,
3683 		    "Global register 0x%08x is changed with value 0x%08x",
3684 		    I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3685 
3686 	return 0;
3687 }
3688 
3689 static int
3690 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3691 		   enum rte_vlan_type vlan_type,
3692 		   uint16_t tpid)
3693 {
3694 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3695 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3696 	int qinq = dev->data->dev_conf.rxmode.offloads &
3697 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
3698 	int ret = 0;
3699 
3700 	if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3701 	     vlan_type != ETH_VLAN_TYPE_OUTER) ||
3702 	    (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3703 		PMD_DRV_LOG(ERR,
3704 			    "Unsupported vlan type.");
3705 		return -EINVAL;
3706 	}
3707 
3708 	if (pf->support_multi_driver) {
3709 		PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3710 		return -ENOTSUP;
3711 	}
3712 
3713 	/* 802.1ad frames ability is added in NVM API 1.7*/
3714 	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3715 		if (qinq) {
3716 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
3717 				hw->first_tag = rte_cpu_to_le_16(tpid);
3718 			else if (vlan_type == ETH_VLAN_TYPE_INNER)
3719 				hw->second_tag = rte_cpu_to_le_16(tpid);
3720 		} else {
3721 			if (vlan_type == ETH_VLAN_TYPE_OUTER)
3722 				hw->second_tag = rte_cpu_to_le_16(tpid);
3723 		}
3724 		ret = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3725 		if (ret != I40E_SUCCESS) {
3726 			PMD_DRV_LOG(ERR,
3727 				    "Set switch config failed aq_err: %d",
3728 				    hw->aq.asq_last_status);
3729 			ret = -EIO;
3730 		}
3731 	} else
3732 		/* If NVM API < 1.7, keep the register setting */
3733 		ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3734 						      tpid, qinq);
3735 
3736 	return ret;
3737 }
3738 
3739 static int
3740 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3741 {
3742 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3743 	struct i40e_vsi *vsi = pf->main_vsi;
3744 	struct rte_eth_rxmode *rxmode;
3745 
3746 	rxmode = &dev->data->dev_conf.rxmode;
3747 	if (mask & ETH_VLAN_FILTER_MASK) {
3748 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3749 			i40e_vsi_config_vlan_filter(vsi, TRUE);
3750 		else
3751 			i40e_vsi_config_vlan_filter(vsi, FALSE);
3752 	}
3753 
3754 	if (mask & ETH_VLAN_STRIP_MASK) {
3755 		/* Enable or disable VLAN stripping */
3756 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3757 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
3758 		else
3759 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
3760 	}
3761 
3762 	if (mask & ETH_VLAN_EXTEND_MASK) {
3763 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3764 			i40e_vsi_config_double_vlan(vsi, TRUE);
3765 			/* Set global registers with default ethertype. */
3766 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3767 					   ETHER_TYPE_VLAN);
3768 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3769 					   ETHER_TYPE_VLAN);
3770 		}
3771 		else
3772 			i40e_vsi_config_double_vlan(vsi, FALSE);
3773 	}
3774 
3775 	return 0;
3776 }
3777 
3778 static void
3779 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3780 			  __rte_unused uint16_t queue,
3781 			  __rte_unused int on)
3782 {
3783 	PMD_INIT_FUNC_TRACE();
3784 }
3785 
3786 static int
3787 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3788 {
3789 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3790 	struct i40e_vsi *vsi = pf->main_vsi;
3791 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3792 	struct i40e_vsi_vlan_pvid_info info;
3793 
3794 	memset(&info, 0, sizeof(info));
3795 	info.on = on;
3796 	if (info.on)
3797 		info.config.pvid = pvid;
3798 	else {
3799 		info.config.reject.tagged =
3800 				data->dev_conf.txmode.hw_vlan_reject_tagged;
3801 		info.config.reject.untagged =
3802 				data->dev_conf.txmode.hw_vlan_reject_untagged;
3803 	}
3804 
3805 	return i40e_vsi_vlan_pvid_set(vsi, &info);
3806 }
3807 
3808 static int
3809 i40e_dev_led_on(struct rte_eth_dev *dev)
3810 {
3811 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3812 	uint32_t mode = i40e_led_get(hw);
3813 
3814 	if (mode == 0)
3815 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3816 
3817 	return 0;
3818 }
3819 
3820 static int
3821 i40e_dev_led_off(struct rte_eth_dev *dev)
3822 {
3823 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3824 	uint32_t mode = i40e_led_get(hw);
3825 
3826 	if (mode != 0)
3827 		i40e_led_set(hw, 0, false);
3828 
3829 	return 0;
3830 }
3831 
3832 static int
3833 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3834 {
3835 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3836 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3837 
3838 	fc_conf->pause_time = pf->fc_conf.pause_time;
3839 
3840 	/* read out from register, in case they are modified by other port */
3841 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3842 		I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3843 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3844 		I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3845 
3846 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3847 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3848 
3849 	 /* Return current mode according to actual setting*/
3850 	switch (hw->fc.current_mode) {
3851 	case I40E_FC_FULL:
3852 		fc_conf->mode = RTE_FC_FULL;
3853 		break;
3854 	case I40E_FC_TX_PAUSE:
3855 		fc_conf->mode = RTE_FC_TX_PAUSE;
3856 		break;
3857 	case I40E_FC_RX_PAUSE:
3858 		fc_conf->mode = RTE_FC_RX_PAUSE;
3859 		break;
3860 	case I40E_FC_NONE:
3861 	default:
3862 		fc_conf->mode = RTE_FC_NONE;
3863 	};
3864 
3865 	return 0;
3866 }
3867 
3868 static int
3869 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3870 {
3871 	uint32_t mflcn_reg, fctrl_reg, reg;
3872 	uint32_t max_high_water;
3873 	uint8_t i, aq_failure;
3874 	int err;
3875 	struct i40e_hw *hw;
3876 	struct i40e_pf *pf;
3877 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3878 		[RTE_FC_NONE] = I40E_FC_NONE,
3879 		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3880 		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3881 		[RTE_FC_FULL] = I40E_FC_FULL
3882 	};
3883 
3884 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3885 
3886 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3887 	if ((fc_conf->high_water > max_high_water) ||
3888 			(fc_conf->high_water < fc_conf->low_water)) {
3889 		PMD_INIT_LOG(ERR,
3890 			"Invalid high/low water setup value in KB, High_water must be <= %d.",
3891 			max_high_water);
3892 		return -EINVAL;
3893 	}
3894 
3895 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3896 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3897 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3898 
3899 	pf->fc_conf.pause_time = fc_conf->pause_time;
3900 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3901 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3902 
3903 	PMD_INIT_FUNC_TRACE();
3904 
3905 	/* All the link flow control related enable/disable register
3906 	 * configuration is handle by the F/W
3907 	 */
3908 	err = i40e_set_fc(hw, &aq_failure, true);
3909 	if (err < 0)
3910 		return -ENOSYS;
3911 
3912 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3913 		/* Configure flow control refresh threshold,
3914 		 * the value for stat_tx_pause_refresh_timer[8]
3915 		 * is used for global pause operation.
3916 		 */
3917 
3918 		I40E_WRITE_REG(hw,
3919 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3920 			       pf->fc_conf.pause_time);
3921 
3922 		/* configure the timer value included in transmitted pause
3923 		 * frame,
3924 		 * the value for stat_tx_pause_quanta[8] is used for global
3925 		 * pause operation
3926 		 */
3927 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3928 			       pf->fc_conf.pause_time);
3929 
3930 		fctrl_reg = I40E_READ_REG(hw,
3931 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3932 
3933 		if (fc_conf->mac_ctrl_frame_fwd != 0)
3934 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3935 		else
3936 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3937 
3938 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3939 			       fctrl_reg);
3940 	} else {
3941 		/* Configure pause time (2 TCs per register) */
3942 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3943 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3944 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3945 
3946 		/* Configure flow control refresh threshold value */
3947 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3948 			       pf->fc_conf.pause_time / 2);
3949 
3950 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3951 
3952 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
3953 		 *depending on configuration
3954 		 */
3955 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
3956 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3957 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3958 		} else {
3959 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3960 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3961 		}
3962 
3963 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3964 	}
3965 
3966 	if (!pf->support_multi_driver) {
3967 		/* config water marker both based on the packets and bytes */
3968 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3969 				 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3970 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3971 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3972 				  (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3973 				 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3974 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3975 				  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3976 				  << I40E_KILOSHIFT);
3977 		I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3978 				   pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3979 				   << I40E_KILOSHIFT);
3980 	} else {
3981 		PMD_DRV_LOG(ERR,
3982 			    "Water marker configuration is not supported.");
3983 	}
3984 
3985 	I40E_WRITE_FLUSH(hw);
3986 
3987 	return 0;
3988 }
3989 
3990 static int
3991 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3992 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3993 {
3994 	PMD_INIT_FUNC_TRACE();
3995 
3996 	return -ENOSYS;
3997 }
3998 
3999 /* Add a MAC address, and update filters */
4000 static int
4001 i40e_macaddr_add(struct rte_eth_dev *dev,
4002 		 struct ether_addr *mac_addr,
4003 		 __rte_unused uint32_t index,
4004 		 uint32_t pool)
4005 {
4006 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4007 	struct i40e_mac_filter_info mac_filter;
4008 	struct i40e_vsi *vsi;
4009 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4010 	int ret;
4011 
4012 	/* If VMDQ not enabled or configured, return */
4013 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
4014 			  !pf->nb_cfg_vmdq_vsi)) {
4015 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
4016 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
4017 			pool);
4018 		return -ENOTSUP;
4019 	}
4020 
4021 	if (pool > pf->nb_cfg_vmdq_vsi) {
4022 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
4023 				pool, pf->nb_cfg_vmdq_vsi);
4024 		return -EINVAL;
4025 	}
4026 
4027 	rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
4028 	if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4029 		mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4030 	else
4031 		mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
4032 
4033 	if (pool == 0)
4034 		vsi = pf->main_vsi;
4035 	else
4036 		vsi = pf->vmdq[pool - 1].vsi;
4037 
4038 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
4039 	if (ret != I40E_SUCCESS) {
4040 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4041 		return -ENODEV;
4042 	}
4043 	return 0;
4044 }
4045 
4046 /* Remove a MAC address, and update filters */
4047 static void
4048 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4049 {
4050 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4051 	struct i40e_vsi *vsi;
4052 	struct rte_eth_dev_data *data = dev->data;
4053 	struct ether_addr *macaddr;
4054 	int ret;
4055 	uint32_t i;
4056 	uint64_t pool_sel;
4057 
4058 	macaddr = &(data->mac_addrs[index]);
4059 
4060 	pool_sel = dev->data->mac_pool_sel[index];
4061 
4062 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
4063 		if (pool_sel & (1ULL << i)) {
4064 			if (i == 0)
4065 				vsi = pf->main_vsi;
4066 			else {
4067 				/* No VMDQ pool enabled or configured */
4068 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
4069 					(i > pf->nb_cfg_vmdq_vsi)) {
4070 					PMD_DRV_LOG(ERR,
4071 						"No VMDQ pool enabled/configured");
4072 					return;
4073 				}
4074 				vsi = pf->vmdq[i - 1].vsi;
4075 			}
4076 			ret = i40e_vsi_delete_mac(vsi, macaddr);
4077 
4078 			if (ret) {
4079 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
4080 				return;
4081 			}
4082 		}
4083 	}
4084 }
4085 
4086 /* Set perfect match or hash match of MAC and VLAN for a VF */
4087 static int
4088 i40e_vf_mac_filter_set(struct i40e_pf *pf,
4089 		 struct rte_eth_mac_filter *filter,
4090 		 bool add)
4091 {
4092 	struct i40e_hw *hw;
4093 	struct i40e_mac_filter_info mac_filter;
4094 	struct ether_addr old_mac;
4095 	struct ether_addr *new_mac;
4096 	struct i40e_pf_vf *vf = NULL;
4097 	uint16_t vf_id;
4098 	int ret;
4099 
4100 	if (pf == NULL) {
4101 		PMD_DRV_LOG(ERR, "Invalid PF argument.");
4102 		return -EINVAL;
4103 	}
4104 	hw = I40E_PF_TO_HW(pf);
4105 
4106 	if (filter == NULL) {
4107 		PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
4108 		return -EINVAL;
4109 	}
4110 
4111 	new_mac = &filter->mac_addr;
4112 
4113 	if (is_zero_ether_addr(new_mac)) {
4114 		PMD_DRV_LOG(ERR, "Invalid ethernet address.");
4115 		return -EINVAL;
4116 	}
4117 
4118 	vf_id = filter->dst_id;
4119 
4120 	if (vf_id > pf->vf_num - 1 || !pf->vfs) {
4121 		PMD_DRV_LOG(ERR, "Invalid argument.");
4122 		return -EINVAL;
4123 	}
4124 	vf = &pf->vfs[vf_id];
4125 
4126 	if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
4127 		PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
4128 		return -EINVAL;
4129 	}
4130 
4131 	if (add) {
4132 		rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
4133 		rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
4134 				ETHER_ADDR_LEN);
4135 		rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
4136 				 ETHER_ADDR_LEN);
4137 
4138 		mac_filter.filter_type = filter->filter_type;
4139 		ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
4140 		if (ret != I40E_SUCCESS) {
4141 			PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
4142 			return -1;
4143 		}
4144 		ether_addr_copy(new_mac, &pf->dev_addr);
4145 	} else {
4146 		rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
4147 				ETHER_ADDR_LEN);
4148 		ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
4149 		if (ret != I40E_SUCCESS) {
4150 			PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
4151 			return -1;
4152 		}
4153 
4154 		/* Clear device address as it has been removed */
4155 		if (is_same_ether_addr(&(pf->dev_addr), new_mac))
4156 			memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
4157 	}
4158 
4159 	return 0;
4160 }
4161 
4162 /* MAC filter handle */
4163 static int
4164 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4165 		void *arg)
4166 {
4167 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4168 	struct rte_eth_mac_filter *filter;
4169 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4170 	int ret = I40E_NOT_SUPPORTED;
4171 
4172 	filter = (struct rte_eth_mac_filter *)(arg);
4173 
4174 	switch (filter_op) {
4175 	case RTE_ETH_FILTER_NOP:
4176 		ret = I40E_SUCCESS;
4177 		break;
4178 	case RTE_ETH_FILTER_ADD:
4179 		i40e_pf_disable_irq0(hw);
4180 		if (filter->is_vf)
4181 			ret = i40e_vf_mac_filter_set(pf, filter, 1);
4182 		i40e_pf_enable_irq0(hw);
4183 		break;
4184 	case RTE_ETH_FILTER_DELETE:
4185 		i40e_pf_disable_irq0(hw);
4186 		if (filter->is_vf)
4187 			ret = i40e_vf_mac_filter_set(pf, filter, 0);
4188 		i40e_pf_enable_irq0(hw);
4189 		break;
4190 	default:
4191 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4192 		ret = I40E_ERR_PARAM;
4193 		break;
4194 	}
4195 
4196 	return ret;
4197 }
4198 
4199 static int
4200 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4201 {
4202 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4203 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4204 	uint32_t reg;
4205 	int ret;
4206 
4207 	if (!lut)
4208 		return -EINVAL;
4209 
4210 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4211 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id,
4212 					  vsi->type != I40E_VSI_SRIOV,
4213 					  lut, lut_size);
4214 		if (ret) {
4215 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4216 			return ret;
4217 		}
4218 	} else {
4219 		uint32_t *lut_dw = (uint32_t *)lut;
4220 		uint16_t i, lut_size_dw = lut_size / 4;
4221 
4222 		if (vsi->type == I40E_VSI_SRIOV) {
4223 			for (i = 0; i <= lut_size_dw; i++) {
4224 				reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4225 				lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4226 			}
4227 		} else {
4228 			for (i = 0; i < lut_size_dw; i++)
4229 				lut_dw[i] = I40E_READ_REG(hw,
4230 							  I40E_PFQF_HLUT(i));
4231 		}
4232 	}
4233 
4234 	return 0;
4235 }
4236 
4237 int
4238 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4239 {
4240 	struct i40e_pf *pf;
4241 	struct i40e_hw *hw;
4242 	int ret;
4243 
4244 	if (!vsi || !lut)
4245 		return -EINVAL;
4246 
4247 	pf = I40E_VSI_TO_PF(vsi);
4248 	hw = I40E_VSI_TO_HW(vsi);
4249 
4250 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4251 		ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id,
4252 					  vsi->type != I40E_VSI_SRIOV,
4253 					  lut, lut_size);
4254 		if (ret) {
4255 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4256 			return ret;
4257 		}
4258 	} else {
4259 		uint32_t *lut_dw = (uint32_t *)lut;
4260 		uint16_t i, lut_size_dw = lut_size / 4;
4261 
4262 		if (vsi->type == I40E_VSI_SRIOV) {
4263 			for (i = 0; i < lut_size_dw; i++)
4264 				I40E_WRITE_REG(
4265 					hw,
4266 					I40E_VFQF_HLUT1(i, vsi->user_param),
4267 					lut_dw[i]);
4268 		} else {
4269 			for (i = 0; i < lut_size_dw; i++)
4270 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4271 					       lut_dw[i]);
4272 		}
4273 		I40E_WRITE_FLUSH(hw);
4274 	}
4275 
4276 	return 0;
4277 }
4278 
4279 static int
4280 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4281 			 struct rte_eth_rss_reta_entry64 *reta_conf,
4282 			 uint16_t reta_size)
4283 {
4284 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4285 	uint16_t i, lut_size = pf->hash_lut_size;
4286 	uint16_t idx, shift;
4287 	uint8_t *lut;
4288 	int ret;
4289 
4290 	if (reta_size != lut_size ||
4291 		reta_size > ETH_RSS_RETA_SIZE_512) {
4292 		PMD_DRV_LOG(ERR,
4293 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4294 			reta_size, lut_size);
4295 		return -EINVAL;
4296 	}
4297 
4298 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4299 	if (!lut) {
4300 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4301 		return -ENOMEM;
4302 	}
4303 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4304 	if (ret)
4305 		goto out;
4306 	for (i = 0; i < reta_size; i++) {
4307 		idx = i / RTE_RETA_GROUP_SIZE;
4308 		shift = i % RTE_RETA_GROUP_SIZE;
4309 		if (reta_conf[idx].mask & (1ULL << shift))
4310 			lut[i] = reta_conf[idx].reta[shift];
4311 	}
4312 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4313 
4314 	pf->adapter->rss_reta_updated = 1;
4315 
4316 out:
4317 	rte_free(lut);
4318 
4319 	return ret;
4320 }
4321 
4322 static int
4323 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4324 			struct rte_eth_rss_reta_entry64 *reta_conf,
4325 			uint16_t reta_size)
4326 {
4327 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4328 	uint16_t i, lut_size = pf->hash_lut_size;
4329 	uint16_t idx, shift;
4330 	uint8_t *lut;
4331 	int ret;
4332 
4333 	if (reta_size != lut_size ||
4334 		reta_size > ETH_RSS_RETA_SIZE_512) {
4335 		PMD_DRV_LOG(ERR,
4336 			"The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4337 			reta_size, lut_size);
4338 		return -EINVAL;
4339 	}
4340 
4341 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4342 	if (!lut) {
4343 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4344 		return -ENOMEM;
4345 	}
4346 
4347 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4348 	if (ret)
4349 		goto out;
4350 	for (i = 0; i < reta_size; i++) {
4351 		idx = i / RTE_RETA_GROUP_SIZE;
4352 		shift = i % RTE_RETA_GROUP_SIZE;
4353 		if (reta_conf[idx].mask & (1ULL << shift))
4354 			reta_conf[idx].reta[shift] = lut[i];
4355 	}
4356 
4357 out:
4358 	rte_free(lut);
4359 
4360 	return ret;
4361 }
4362 
4363 /**
4364  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4365  * @hw:   pointer to the HW structure
4366  * @mem:  pointer to mem struct to fill out
4367  * @size: size of memory requested
4368  * @alignment: what to align the allocation to
4369  **/
4370 enum i40e_status_code
4371 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4372 			struct i40e_dma_mem *mem,
4373 			u64 size,
4374 			u32 alignment)
4375 {
4376 	const struct rte_memzone *mz = NULL;
4377 	char z_name[RTE_MEMZONE_NAMESIZE];
4378 
4379 	if (!mem)
4380 		return I40E_ERR_PARAM;
4381 
4382 	snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4383 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4384 			RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4385 	if (!mz)
4386 		return I40E_ERR_NO_MEMORY;
4387 
4388 	mem->size = size;
4389 	mem->va = mz->addr;
4390 	mem->pa = mz->iova;
4391 	mem->zone = (const void *)mz;
4392 	PMD_DRV_LOG(DEBUG,
4393 		"memzone %s allocated with physical address: %"PRIu64,
4394 		mz->name, mem->pa);
4395 
4396 	return I40E_SUCCESS;
4397 }
4398 
4399 /**
4400  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4401  * @hw:   pointer to the HW structure
4402  * @mem:  ptr to mem struct to free
4403  **/
4404 enum i40e_status_code
4405 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4406 		    struct i40e_dma_mem *mem)
4407 {
4408 	if (!mem)
4409 		return I40E_ERR_PARAM;
4410 
4411 	PMD_DRV_LOG(DEBUG,
4412 		"memzone %s to be freed with physical address: %"PRIu64,
4413 		((const struct rte_memzone *)mem->zone)->name, mem->pa);
4414 	rte_memzone_free((const struct rte_memzone *)mem->zone);
4415 	mem->zone = NULL;
4416 	mem->va = NULL;
4417 	mem->pa = (u64)0;
4418 
4419 	return I40E_SUCCESS;
4420 }
4421 
4422 /**
4423  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4424  * @hw:   pointer to the HW structure
4425  * @mem:  pointer to mem struct to fill out
4426  * @size: size of memory requested
4427  **/
4428 enum i40e_status_code
4429 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4430 			 struct i40e_virt_mem *mem,
4431 			 u32 size)
4432 {
4433 	if (!mem)
4434 		return I40E_ERR_PARAM;
4435 
4436 	mem->size = size;
4437 	mem->va = rte_zmalloc("i40e", size, 0);
4438 
4439 	if (mem->va)
4440 		return I40E_SUCCESS;
4441 	else
4442 		return I40E_ERR_NO_MEMORY;
4443 }
4444 
4445 /**
4446  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4447  * @hw:   pointer to the HW structure
4448  * @mem:  pointer to mem struct to free
4449  **/
4450 enum i40e_status_code
4451 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4452 		     struct i40e_virt_mem *mem)
4453 {
4454 	if (!mem)
4455 		return I40E_ERR_PARAM;
4456 
4457 	rte_free(mem->va);
4458 	mem->va = NULL;
4459 
4460 	return I40E_SUCCESS;
4461 }
4462 
4463 void
4464 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4465 {
4466 	rte_spinlock_init(&sp->spinlock);
4467 }
4468 
4469 void
4470 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4471 {
4472 	rte_spinlock_lock(&sp->spinlock);
4473 }
4474 
4475 void
4476 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4477 {
4478 	rte_spinlock_unlock(&sp->spinlock);
4479 }
4480 
4481 void
4482 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4483 {
4484 	return;
4485 }
4486 
4487 /**
4488  * Get the hardware capabilities, which will be parsed
4489  * and saved into struct i40e_hw.
4490  */
4491 static int
4492 i40e_get_cap(struct i40e_hw *hw)
4493 {
4494 	struct i40e_aqc_list_capabilities_element_resp *buf;
4495 	uint16_t len, size = 0;
4496 	int ret;
4497 
4498 	/* Calculate a huge enough buff for saving response data temporarily */
4499 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4500 						I40E_MAX_CAP_ELE_NUM;
4501 	buf = rte_zmalloc("i40e", len, 0);
4502 	if (!buf) {
4503 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
4504 		return I40E_ERR_NO_MEMORY;
4505 	}
4506 
4507 	/* Get, parse the capabilities and save it to hw */
4508 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4509 			i40e_aqc_opc_list_func_capabilities, NULL);
4510 	if (ret != I40E_SUCCESS)
4511 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4512 
4513 	/* Free the temporary buffer after being used */
4514 	rte_free(buf);
4515 
4516 	return ret;
4517 }
4518 
4519 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF	4
4520 
4521 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4522 		const char *value,
4523 		void *opaque)
4524 {
4525 	struct i40e_pf *pf;
4526 	unsigned long num;
4527 	char *end;
4528 
4529 	pf = (struct i40e_pf *)opaque;
4530 	RTE_SET_USED(key);
4531 
4532 	errno = 0;
4533 	num = strtoul(value, &end, 0);
4534 	if (errno != 0 || end == value || *end != 0) {
4535 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4536 			    "kept the value = %hu", value, pf->vf_nb_qp_max);
4537 		return -(EINVAL);
4538 	}
4539 
4540 	if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4541 		pf->vf_nb_qp_max = (uint16_t)num;
4542 	else
4543 		/* here return 0 to make next valid same argument work */
4544 		PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4545 			    "power of 2 and equal or less than 16 !, Now it is "
4546 			    "kept the value = %hu", num, pf->vf_nb_qp_max);
4547 
4548 	return 0;
4549 }
4550 
4551 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4552 {
4553 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4554 	struct rte_kvargs *kvlist;
4555 	int kvargs_count;
4556 
4557 	/* set default queue number per VF as 4 */
4558 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4559 
4560 	if (dev->device->devargs == NULL)
4561 		return 0;
4562 
4563 	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4564 	if (kvlist == NULL)
4565 		return -(EINVAL);
4566 
4567 	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4568 	if (!kvargs_count) {
4569 		rte_kvargs_free(kvlist);
4570 		return 0;
4571 	}
4572 
4573 	if (kvargs_count > 1)
4574 		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4575 			    "the first invalid or last valid one is used !",
4576 			    ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4577 
4578 	rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4579 			   i40e_pf_parse_vf_queue_number_handler, pf);
4580 
4581 	rte_kvargs_free(kvlist);
4582 
4583 	return 0;
4584 }
4585 
4586 static int
4587 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4588 {
4589 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4590 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4591 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4592 	uint16_t qp_count = 0, vsi_count = 0;
4593 
4594 	if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4595 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4596 		return -EINVAL;
4597 	}
4598 
4599 	i40e_pf_config_vf_rxq_number(dev);
4600 
4601 	/* Add the parameter init for LFC */
4602 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4603 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4604 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4605 
4606 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4607 	pf->max_num_vsi = hw->func_caps.num_vsis;
4608 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4609 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4610 
4611 	/* FDir queue/VSI allocation */
4612 	pf->fdir_qp_offset = 0;
4613 	if (hw->func_caps.fd) {
4614 		pf->flags |= I40E_FLAG_FDIR;
4615 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4616 	} else {
4617 		pf->fdir_nb_qps = 0;
4618 	}
4619 	qp_count += pf->fdir_nb_qps;
4620 	vsi_count += 1;
4621 
4622 	/* LAN queue/VSI allocation */
4623 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4624 	if (!hw->func_caps.rss) {
4625 		pf->lan_nb_qps = 1;
4626 	} else {
4627 		pf->flags |= I40E_FLAG_RSS;
4628 		if (hw->mac.type == I40E_MAC_X722)
4629 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4630 		pf->lan_nb_qps = pf->lan_nb_qp_max;
4631 	}
4632 	qp_count += pf->lan_nb_qps;
4633 	vsi_count += 1;
4634 
4635 	/* VF queue/VSI allocation */
4636 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4637 	if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4638 		pf->flags |= I40E_FLAG_SRIOV;
4639 		pf->vf_nb_qps = pf->vf_nb_qp_max;
4640 		pf->vf_num = pci_dev->max_vfs;
4641 		PMD_DRV_LOG(DEBUG,
4642 			"%u VF VSIs, %u queues per VF VSI, in total %u queues",
4643 			pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4644 	} else {
4645 		pf->vf_nb_qps = 0;
4646 		pf->vf_num = 0;
4647 	}
4648 	qp_count += pf->vf_nb_qps * pf->vf_num;
4649 	vsi_count += pf->vf_num;
4650 
4651 	/* VMDq queue/VSI allocation */
4652 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4653 	pf->vmdq_nb_qps = 0;
4654 	pf->max_nb_vmdq_vsi = 0;
4655 	if (hw->func_caps.vmdq) {
4656 		if (qp_count < hw->func_caps.num_tx_qp &&
4657 			vsi_count < hw->func_caps.num_vsis) {
4658 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4659 				qp_count) / pf->vmdq_nb_qp_max;
4660 
4661 			/* Limit the maximum number of VMDq vsi to the maximum
4662 			 * ethdev can support
4663 			 */
4664 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4665 				hw->func_caps.num_vsis - vsi_count);
4666 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4667 				ETH_64_POOLS);
4668 			if (pf->max_nb_vmdq_vsi) {
4669 				pf->flags |= I40E_FLAG_VMDQ;
4670 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4671 				PMD_DRV_LOG(DEBUG,
4672 					"%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4673 					pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4674 					pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4675 			} else {
4676 				PMD_DRV_LOG(INFO,
4677 					"No enough queues left for VMDq");
4678 			}
4679 		} else {
4680 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4681 		}
4682 	}
4683 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4684 	vsi_count += pf->max_nb_vmdq_vsi;
4685 
4686 	if (hw->func_caps.dcb)
4687 		pf->flags |= I40E_FLAG_DCB;
4688 
4689 	if (qp_count > hw->func_caps.num_tx_qp) {
4690 		PMD_DRV_LOG(ERR,
4691 			"Failed to allocate %u queues, which exceeds the hardware maximum %u",
4692 			qp_count, hw->func_caps.num_tx_qp);
4693 		return -EINVAL;
4694 	}
4695 	if (vsi_count > hw->func_caps.num_vsis) {
4696 		PMD_DRV_LOG(ERR,
4697 			"Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4698 			vsi_count, hw->func_caps.num_vsis);
4699 		return -EINVAL;
4700 	}
4701 
4702 	return 0;
4703 }
4704 
4705 static int
4706 i40e_pf_get_switch_config(struct i40e_pf *pf)
4707 {
4708 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4709 	struct i40e_aqc_get_switch_config_resp *switch_config;
4710 	struct i40e_aqc_switch_config_element_resp *element;
4711 	uint16_t start_seid = 0, num_reported;
4712 	int ret;
4713 
4714 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4715 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4716 	if (!switch_config) {
4717 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
4718 		return -ENOMEM;
4719 	}
4720 
4721 	/* Get the switch configurations */
4722 	ret = i40e_aq_get_switch_config(hw, switch_config,
4723 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
4724 	if (ret != I40E_SUCCESS) {
4725 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4726 		goto fail;
4727 	}
4728 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4729 	if (num_reported != 1) { /* The number should be 1 */
4730 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4731 		goto fail;
4732 	}
4733 
4734 	/* Parse the switch configuration elements */
4735 	element = &(switch_config->element[0]);
4736 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4737 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4738 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4739 	} else
4740 		PMD_DRV_LOG(INFO, "Unknown element type");
4741 
4742 fail:
4743 	rte_free(switch_config);
4744 
4745 	return ret;
4746 }
4747 
4748 static int
4749 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4750 			uint32_t num)
4751 {
4752 	struct pool_entry *entry;
4753 
4754 	if (pool == NULL || num == 0)
4755 		return -EINVAL;
4756 
4757 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4758 	if (entry == NULL) {
4759 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4760 		return -ENOMEM;
4761 	}
4762 
4763 	/* queue heap initialize */
4764 	pool->num_free = num;
4765 	pool->num_alloc = 0;
4766 	pool->base = base;
4767 	LIST_INIT(&pool->alloc_list);
4768 	LIST_INIT(&pool->free_list);
4769 
4770 	/* Initialize element  */
4771 	entry->base = 0;
4772 	entry->len = num;
4773 
4774 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
4775 	return 0;
4776 }
4777 
4778 static void
4779 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4780 {
4781 	struct pool_entry *entry, *next_entry;
4782 
4783 	if (pool == NULL)
4784 		return;
4785 
4786 	for (entry = LIST_FIRST(&pool->alloc_list);
4787 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4788 			entry = next_entry) {
4789 		LIST_REMOVE(entry, next);
4790 		rte_free(entry);
4791 	}
4792 
4793 	for (entry = LIST_FIRST(&pool->free_list);
4794 			entry && (next_entry = LIST_NEXT(entry, next), 1);
4795 			entry = next_entry) {
4796 		LIST_REMOVE(entry, next);
4797 		rte_free(entry);
4798 	}
4799 
4800 	pool->num_free = 0;
4801 	pool->num_alloc = 0;
4802 	pool->base = 0;
4803 	LIST_INIT(&pool->alloc_list);
4804 	LIST_INIT(&pool->free_list);
4805 }
4806 
4807 static int
4808 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4809 		       uint32_t base)
4810 {
4811 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4812 	uint32_t pool_offset;
4813 	int insert;
4814 
4815 	if (pool == NULL) {
4816 		PMD_DRV_LOG(ERR, "Invalid parameter");
4817 		return -EINVAL;
4818 	}
4819 
4820 	pool_offset = base - pool->base;
4821 	/* Lookup in alloc list */
4822 	LIST_FOREACH(entry, &pool->alloc_list, next) {
4823 		if (entry->base == pool_offset) {
4824 			valid_entry = entry;
4825 			LIST_REMOVE(entry, next);
4826 			break;
4827 		}
4828 	}
4829 
4830 	/* Not find, return */
4831 	if (valid_entry == NULL) {
4832 		PMD_DRV_LOG(ERR, "Failed to find entry");
4833 		return -EINVAL;
4834 	}
4835 
4836 	/**
4837 	 * Found it, move it to free list  and try to merge.
4838 	 * In order to make merge easier, always sort it by qbase.
4839 	 * Find adjacent prev and last entries.
4840 	 */
4841 	prev = next = NULL;
4842 	LIST_FOREACH(entry, &pool->free_list, next) {
4843 		if (entry->base > valid_entry->base) {
4844 			next = entry;
4845 			break;
4846 		}
4847 		prev = entry;
4848 	}
4849 
4850 	insert = 0;
4851 	/* Try to merge with next one*/
4852 	if (next != NULL) {
4853 		/* Merge with next one */
4854 		if (valid_entry->base + valid_entry->len == next->base) {
4855 			next->base = valid_entry->base;
4856 			next->len += valid_entry->len;
4857 			rte_free(valid_entry);
4858 			valid_entry = next;
4859 			insert = 1;
4860 		}
4861 	}
4862 
4863 	if (prev != NULL) {
4864 		/* Merge with previous one */
4865 		if (prev->base + prev->len == valid_entry->base) {
4866 			prev->len += valid_entry->len;
4867 			/* If it merge with next one, remove next node */
4868 			if (insert == 1) {
4869 				LIST_REMOVE(valid_entry, next);
4870 				rte_free(valid_entry);
4871 			} else {
4872 				rte_free(valid_entry);
4873 				insert = 1;
4874 			}
4875 		}
4876 	}
4877 
4878 	/* Not find any entry to merge, insert */
4879 	if (insert == 0) {
4880 		if (prev != NULL)
4881 			LIST_INSERT_AFTER(prev, valid_entry, next);
4882 		else if (next != NULL)
4883 			LIST_INSERT_BEFORE(next, valid_entry, next);
4884 		else /* It's empty list, insert to head */
4885 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4886 	}
4887 
4888 	pool->num_free += valid_entry->len;
4889 	pool->num_alloc -= valid_entry->len;
4890 
4891 	return 0;
4892 }
4893 
4894 static int
4895 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4896 		       uint16_t num)
4897 {
4898 	struct pool_entry *entry, *valid_entry;
4899 
4900 	if (pool == NULL || num == 0) {
4901 		PMD_DRV_LOG(ERR, "Invalid parameter");
4902 		return -EINVAL;
4903 	}
4904 
4905 	if (pool->num_free < num) {
4906 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4907 			    num, pool->num_free);
4908 		return -ENOMEM;
4909 	}
4910 
4911 	valid_entry = NULL;
4912 	/* Lookup  in free list and find most fit one */
4913 	LIST_FOREACH(entry, &pool->free_list, next) {
4914 		if (entry->len >= num) {
4915 			/* Find best one */
4916 			if (entry->len == num) {
4917 				valid_entry = entry;
4918 				break;
4919 			}
4920 			if (valid_entry == NULL || valid_entry->len > entry->len)
4921 				valid_entry = entry;
4922 		}
4923 	}
4924 
4925 	/* Not find one to satisfy the request, return */
4926 	if (valid_entry == NULL) {
4927 		PMD_DRV_LOG(ERR, "No valid entry found");
4928 		return -ENOMEM;
4929 	}
4930 	/**
4931 	 * The entry have equal queue number as requested,
4932 	 * remove it from alloc_list.
4933 	 */
4934 	if (valid_entry->len == num) {
4935 		LIST_REMOVE(valid_entry, next);
4936 	} else {
4937 		/**
4938 		 * The entry have more numbers than requested,
4939 		 * create a new entry for alloc_list and minus its
4940 		 * queue base and number in free_list.
4941 		 */
4942 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4943 		if (entry == NULL) {
4944 			PMD_DRV_LOG(ERR,
4945 				"Failed to allocate memory for resource pool");
4946 			return -ENOMEM;
4947 		}
4948 		entry->base = valid_entry->base;
4949 		entry->len = num;
4950 		valid_entry->base += num;
4951 		valid_entry->len -= num;
4952 		valid_entry = entry;
4953 	}
4954 
4955 	/* Insert it into alloc list, not sorted */
4956 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4957 
4958 	pool->num_free -= valid_entry->len;
4959 	pool->num_alloc += valid_entry->len;
4960 
4961 	return valid_entry->base + pool->base;
4962 }
4963 
4964 /**
4965  * bitmap_is_subset - Check whether src2 is subset of src1
4966  **/
4967 static inline int
4968 bitmap_is_subset(uint8_t src1, uint8_t src2)
4969 {
4970 	return !((src1 ^ src2) & src2);
4971 }
4972 
4973 static enum i40e_status_code
4974 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4975 {
4976 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4977 
4978 	/* If DCB is not supported, only default TC is supported */
4979 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4980 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4981 		return I40E_NOT_SUPPORTED;
4982 	}
4983 
4984 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4985 		PMD_DRV_LOG(ERR,
4986 			"Enabled TC map 0x%x not applicable to HW support 0x%x",
4987 			hw->func_caps.enabled_tcmap, enabled_tcmap);
4988 		return I40E_NOT_SUPPORTED;
4989 	}
4990 	return I40E_SUCCESS;
4991 }
4992 
4993 int
4994 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4995 				struct i40e_vsi_vlan_pvid_info *info)
4996 {
4997 	struct i40e_hw *hw;
4998 	struct i40e_vsi_context ctxt;
4999 	uint8_t vlan_flags = 0;
5000 	int ret;
5001 
5002 	if (vsi == NULL || info == NULL) {
5003 		PMD_DRV_LOG(ERR, "invalid parameters");
5004 		return I40E_ERR_PARAM;
5005 	}
5006 
5007 	if (info->on) {
5008 		vsi->info.pvid = info->config.pvid;
5009 		/**
5010 		 * If insert pvid is enabled, only tagged pkts are
5011 		 * allowed to be sent out.
5012 		 */
5013 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
5014 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5015 	} else {
5016 		vsi->info.pvid = 0;
5017 		if (info->config.reject.tagged == 0)
5018 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
5019 
5020 		if (info->config.reject.untagged == 0)
5021 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
5022 	}
5023 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
5024 					I40E_AQ_VSI_PVLAN_MODE_MASK);
5025 	vsi->info.port_vlan_flags |= vlan_flags;
5026 	vsi->info.valid_sections =
5027 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5028 	memset(&ctxt, 0, sizeof(ctxt));
5029 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5030 	ctxt.seid = vsi->seid;
5031 
5032 	hw = I40E_VSI_TO_HW(vsi);
5033 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5034 	if (ret != I40E_SUCCESS)
5035 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
5036 
5037 	return ret;
5038 }
5039 
5040 static int
5041 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
5042 {
5043 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5044 	int i, ret;
5045 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
5046 
5047 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5048 	if (ret != I40E_SUCCESS)
5049 		return ret;
5050 
5051 	if (!vsi->seid) {
5052 		PMD_DRV_LOG(ERR, "seid not valid");
5053 		return -EINVAL;
5054 	}
5055 
5056 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
5057 	tc_bw_data.tc_valid_bits = enabled_tcmap;
5058 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5059 		tc_bw_data.tc_bw_credits[i] =
5060 			(enabled_tcmap & (1 << i)) ? 1 : 0;
5061 
5062 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
5063 	if (ret != I40E_SUCCESS) {
5064 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
5065 		return ret;
5066 	}
5067 
5068 	rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
5069 					sizeof(vsi->info.qs_handle));
5070 	return I40E_SUCCESS;
5071 }
5072 
5073 static enum i40e_status_code
5074 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
5075 				 struct i40e_aqc_vsi_properties_data *info,
5076 				 uint8_t enabled_tcmap)
5077 {
5078 	enum i40e_status_code ret;
5079 	int i, total_tc = 0;
5080 	uint16_t qpnum_per_tc, bsf, qp_idx;
5081 
5082 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
5083 	if (ret != I40E_SUCCESS)
5084 		return ret;
5085 
5086 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5087 		if (enabled_tcmap & (1 << i))
5088 			total_tc++;
5089 	if (total_tc == 0)
5090 		total_tc = 1;
5091 	vsi->enabled_tc = enabled_tcmap;
5092 
5093 	/* Number of queues per enabled TC */
5094 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
5095 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
5096 	bsf = rte_bsf32(qpnum_per_tc);
5097 
5098 	/* Adjust the queue number to actual queues that can be applied */
5099 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
5100 		vsi->nb_qps = qpnum_per_tc * total_tc;
5101 
5102 	/**
5103 	 * Configure TC and queue mapping parameters, for enabled TC,
5104 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
5105 	 * default queue will serve it.
5106 	 */
5107 	qp_idx = 0;
5108 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5109 		if (vsi->enabled_tc & (1 << i)) {
5110 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
5111 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5112 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
5113 			qp_idx += qpnum_per_tc;
5114 		} else
5115 			info->tc_mapping[i] = 0;
5116 	}
5117 
5118 	/* Associate queue number with VSI */
5119 	if (vsi->type == I40E_VSI_SRIOV) {
5120 		info->mapping_flags |=
5121 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
5122 		for (i = 0; i < vsi->nb_qps; i++)
5123 			info->queue_mapping[i] =
5124 				rte_cpu_to_le_16(vsi->base_queue + i);
5125 	} else {
5126 		info->mapping_flags |=
5127 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5128 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
5129 	}
5130 	info->valid_sections |=
5131 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
5132 
5133 	return I40E_SUCCESS;
5134 }
5135 
5136 static int
5137 i40e_veb_release(struct i40e_veb *veb)
5138 {
5139 	struct i40e_vsi *vsi;
5140 	struct i40e_hw *hw;
5141 
5142 	if (veb == NULL)
5143 		return -EINVAL;
5144 
5145 	if (!TAILQ_EMPTY(&veb->head)) {
5146 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
5147 		return -EACCES;
5148 	}
5149 	/* associate_vsi field is NULL for floating VEB */
5150 	if (veb->associate_vsi != NULL) {
5151 		vsi = veb->associate_vsi;
5152 		hw = I40E_VSI_TO_HW(vsi);
5153 
5154 		vsi->uplink_seid = veb->uplink_seid;
5155 		vsi->veb = NULL;
5156 	} else {
5157 		veb->associate_pf->main_vsi->floating_veb = NULL;
5158 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5159 	}
5160 
5161 	i40e_aq_delete_element(hw, veb->seid, NULL);
5162 	rte_free(veb);
5163 	return I40E_SUCCESS;
5164 }
5165 
5166 /* Setup a veb */
5167 static struct i40e_veb *
5168 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5169 {
5170 	struct i40e_veb *veb;
5171 	int ret;
5172 	struct i40e_hw *hw;
5173 
5174 	if (pf == NULL) {
5175 		PMD_DRV_LOG(ERR,
5176 			    "veb setup failed, associated PF shouldn't null");
5177 		return NULL;
5178 	}
5179 	hw = I40E_PF_TO_HW(pf);
5180 
5181 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5182 	if (!veb) {
5183 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5184 		goto fail;
5185 	}
5186 
5187 	veb->associate_vsi = vsi;
5188 	veb->associate_pf = pf;
5189 	TAILQ_INIT(&veb->head);
5190 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5191 
5192 	/* create floating veb if vsi is NULL */
5193 	if (vsi != NULL) {
5194 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5195 				      I40E_DEFAULT_TCMAP, false,
5196 				      &veb->seid, false, NULL);
5197 	} else {
5198 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5199 				      true, &veb->seid, false, NULL);
5200 	}
5201 
5202 	if (ret != I40E_SUCCESS) {
5203 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5204 			    hw->aq.asq_last_status);
5205 		goto fail;
5206 	}
5207 	veb->enabled_tc = I40E_DEFAULT_TCMAP;
5208 
5209 	/* get statistics index */
5210 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5211 				&veb->stats_idx, NULL, NULL, NULL);
5212 	if (ret != I40E_SUCCESS) {
5213 		PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5214 			    hw->aq.asq_last_status);
5215 		goto fail;
5216 	}
5217 	/* Get VEB bandwidth, to be implemented */
5218 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
5219 	if (vsi)
5220 		vsi->uplink_seid = veb->seid;
5221 
5222 	return veb;
5223 fail:
5224 	rte_free(veb);
5225 	return NULL;
5226 }
5227 
5228 int
5229 i40e_vsi_release(struct i40e_vsi *vsi)
5230 {
5231 	struct i40e_pf *pf;
5232 	struct i40e_hw *hw;
5233 	struct i40e_vsi_list *vsi_list;
5234 	void *temp;
5235 	int ret;
5236 	struct i40e_mac_filter *f;
5237 	uint16_t user_param;
5238 
5239 	if (!vsi)
5240 		return I40E_SUCCESS;
5241 
5242 	if (!vsi->adapter)
5243 		return -EFAULT;
5244 
5245 	user_param = vsi->user_param;
5246 
5247 	pf = I40E_VSI_TO_PF(vsi);
5248 	hw = I40E_VSI_TO_HW(vsi);
5249 
5250 	/* VSI has child to attach, release child first */
5251 	if (vsi->veb) {
5252 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5253 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5254 				return -1;
5255 		}
5256 		i40e_veb_release(vsi->veb);
5257 	}
5258 
5259 	if (vsi->floating_veb) {
5260 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5261 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5262 				return -1;
5263 		}
5264 	}
5265 
5266 	/* Remove all macvlan filters of the VSI */
5267 	i40e_vsi_remove_all_macvlan_filter(vsi);
5268 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5269 		rte_free(f);
5270 
5271 	if (vsi->type != I40E_VSI_MAIN &&
5272 	    ((vsi->type != I40E_VSI_SRIOV) ||
5273 	    !pf->floating_veb_list[user_param])) {
5274 		/* Remove vsi from parent's sibling list */
5275 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5276 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5277 			return I40E_ERR_PARAM;
5278 		}
5279 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5280 				&vsi->sib_vsi_list, list);
5281 
5282 		/* Remove all switch element of the VSI */
5283 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5284 		if (ret != I40E_SUCCESS)
5285 			PMD_DRV_LOG(ERR, "Failed to delete element");
5286 	}
5287 
5288 	if ((vsi->type == I40E_VSI_SRIOV) &&
5289 	    pf->floating_veb_list[user_param]) {
5290 		/* Remove vsi from parent's sibling list */
5291 		if (vsi->parent_vsi == NULL ||
5292 		    vsi->parent_vsi->floating_veb == NULL) {
5293 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5294 			return I40E_ERR_PARAM;
5295 		}
5296 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5297 			     &vsi->sib_vsi_list, list);
5298 
5299 		/* Remove all switch element of the VSI */
5300 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5301 		if (ret != I40E_SUCCESS)
5302 			PMD_DRV_LOG(ERR, "Failed to delete element");
5303 	}
5304 
5305 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5306 
5307 	if (vsi->type != I40E_VSI_SRIOV)
5308 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5309 	rte_free(vsi);
5310 
5311 	return I40E_SUCCESS;
5312 }
5313 
5314 static int
5315 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5316 {
5317 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5318 	struct i40e_aqc_remove_macvlan_element_data def_filter;
5319 	struct i40e_mac_filter_info filter;
5320 	int ret;
5321 
5322 	if (vsi->type != I40E_VSI_MAIN)
5323 		return I40E_ERR_CONFIG;
5324 	memset(&def_filter, 0, sizeof(def_filter));
5325 	rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5326 					ETH_ADDR_LEN);
5327 	def_filter.vlan_tag = 0;
5328 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5329 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5330 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5331 	if (ret != I40E_SUCCESS) {
5332 		struct i40e_mac_filter *f;
5333 		struct ether_addr *mac;
5334 
5335 		PMD_DRV_LOG(DEBUG,
5336 			    "Cannot remove the default macvlan filter");
5337 		/* It needs to add the permanent mac into mac list */
5338 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5339 		if (f == NULL) {
5340 			PMD_DRV_LOG(ERR, "failed to allocate memory");
5341 			return I40E_ERR_NO_MEMORY;
5342 		}
5343 		mac = &f->mac_info.mac_addr;
5344 		rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5345 				ETH_ADDR_LEN);
5346 		f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5347 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5348 		vsi->mac_num++;
5349 
5350 		return ret;
5351 	}
5352 	rte_memcpy(&filter.mac_addr,
5353 		(struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5354 	filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5355 	return i40e_vsi_add_mac(vsi, &filter);
5356 }
5357 
5358 /*
5359  * i40e_vsi_get_bw_config - Query VSI BW Information
5360  * @vsi: the VSI to be queried
5361  *
5362  * Returns 0 on success, negative value on failure
5363  */
5364 static enum i40e_status_code
5365 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5366 {
5367 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5368 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5369 	struct i40e_hw *hw = &vsi->adapter->hw;
5370 	i40e_status ret;
5371 	int i;
5372 	uint32_t bw_max;
5373 
5374 	memset(&bw_config, 0, sizeof(bw_config));
5375 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5376 	if (ret != I40E_SUCCESS) {
5377 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5378 			    hw->aq.asq_last_status);
5379 		return ret;
5380 	}
5381 
5382 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5383 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5384 					&ets_sla_config, NULL);
5385 	if (ret != I40E_SUCCESS) {
5386 		PMD_DRV_LOG(ERR,
5387 			"VSI failed to get TC bandwdith configuration %u",
5388 			hw->aq.asq_last_status);
5389 		return ret;
5390 	}
5391 
5392 	/* store and print out BW info */
5393 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5394 	vsi->bw_info.bw_max = bw_config.max_bw;
5395 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5396 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5397 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5398 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5399 		     I40E_16_BIT_WIDTH);
5400 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5401 		vsi->bw_info.bw_ets_share_credits[i] =
5402 				ets_sla_config.share_credits[i];
5403 		vsi->bw_info.bw_ets_credits[i] =
5404 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
5405 		/* 4 bits per TC, 4th bit is reserved */
5406 		vsi->bw_info.bw_ets_max[i] =
5407 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5408 				  RTE_LEN2MASK(3, uint8_t));
5409 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5410 			    vsi->bw_info.bw_ets_share_credits[i]);
5411 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5412 			    vsi->bw_info.bw_ets_credits[i]);
5413 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5414 			    vsi->bw_info.bw_ets_max[i]);
5415 	}
5416 
5417 	return I40E_SUCCESS;
5418 }
5419 
5420 /* i40e_enable_pf_lb
5421  * @pf: pointer to the pf structure
5422  *
5423  * allow loopback on pf
5424  */
5425 static inline void
5426 i40e_enable_pf_lb(struct i40e_pf *pf)
5427 {
5428 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5429 	struct i40e_vsi_context ctxt;
5430 	int ret;
5431 
5432 	/* Use the FW API if FW >= v5.0 */
5433 	if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
5434 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5435 		return;
5436 	}
5437 
5438 	memset(&ctxt, 0, sizeof(ctxt));
5439 	ctxt.seid = pf->main_vsi_seid;
5440 	ctxt.pf_num = hw->pf_id;
5441 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5442 	if (ret) {
5443 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5444 			    ret, hw->aq.asq_last_status);
5445 		return;
5446 	}
5447 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5448 	ctxt.info.valid_sections =
5449 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5450 	ctxt.info.switch_id |=
5451 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5452 
5453 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5454 	if (ret)
5455 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5456 			    hw->aq.asq_last_status);
5457 }
5458 
5459 /* Setup a VSI */
5460 struct i40e_vsi *
5461 i40e_vsi_setup(struct i40e_pf *pf,
5462 	       enum i40e_vsi_type type,
5463 	       struct i40e_vsi *uplink_vsi,
5464 	       uint16_t user_param)
5465 {
5466 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5467 	struct i40e_vsi *vsi;
5468 	struct i40e_mac_filter_info filter;
5469 	int ret;
5470 	struct i40e_vsi_context ctxt;
5471 	struct ether_addr broadcast =
5472 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5473 
5474 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5475 	    uplink_vsi == NULL) {
5476 		PMD_DRV_LOG(ERR,
5477 			"VSI setup failed, VSI link shouldn't be NULL");
5478 		return NULL;
5479 	}
5480 
5481 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5482 		PMD_DRV_LOG(ERR,
5483 			"VSI setup failed, MAIN VSI uplink VSI should be NULL");
5484 		return NULL;
5485 	}
5486 
5487 	/* two situations
5488 	 * 1.type is not MAIN and uplink vsi is not NULL
5489 	 * If uplink vsi didn't setup VEB, create one first under veb field
5490 	 * 2.type is SRIOV and the uplink is NULL
5491 	 * If floating VEB is NULL, create one veb under floating veb field
5492 	 */
5493 
5494 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5495 	    uplink_vsi->veb == NULL) {
5496 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5497 
5498 		if (uplink_vsi->veb == NULL) {
5499 			PMD_DRV_LOG(ERR, "VEB setup failed");
5500 			return NULL;
5501 		}
5502 		/* set ALLOWLOOPBACk on pf, when veb is created */
5503 		i40e_enable_pf_lb(pf);
5504 	}
5505 
5506 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5507 	    pf->main_vsi->floating_veb == NULL) {
5508 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5509 
5510 		if (pf->main_vsi->floating_veb == NULL) {
5511 			PMD_DRV_LOG(ERR, "VEB setup failed");
5512 			return NULL;
5513 		}
5514 	}
5515 
5516 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5517 	if (!vsi) {
5518 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5519 		return NULL;
5520 	}
5521 	TAILQ_INIT(&vsi->mac_list);
5522 	vsi->type = type;
5523 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5524 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5525 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5526 	vsi->user_param = user_param;
5527 	vsi->vlan_anti_spoof_on = 0;
5528 	vsi->vlan_filter_on = 0;
5529 	/* Allocate queues */
5530 	switch (vsi->type) {
5531 	case I40E_VSI_MAIN  :
5532 		vsi->nb_qps = pf->lan_nb_qps;
5533 		break;
5534 	case I40E_VSI_SRIOV :
5535 		vsi->nb_qps = pf->vf_nb_qps;
5536 		break;
5537 	case I40E_VSI_VMDQ2:
5538 		vsi->nb_qps = pf->vmdq_nb_qps;
5539 		break;
5540 	case I40E_VSI_FDIR:
5541 		vsi->nb_qps = pf->fdir_nb_qps;
5542 		break;
5543 	default:
5544 		goto fail_mem;
5545 	}
5546 	/*
5547 	 * The filter status descriptor is reported in rx queue 0,
5548 	 * while the tx queue for fdir filter programming has no
5549 	 * such constraints, can be non-zero queues.
5550 	 * To simplify it, choose FDIR vsi use queue 0 pair.
5551 	 * To make sure it will use queue 0 pair, queue allocation
5552 	 * need be done before this function is called
5553 	 */
5554 	if (type != I40E_VSI_FDIR) {
5555 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5556 			if (ret < 0) {
5557 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5558 						vsi->seid, ret);
5559 				goto fail_mem;
5560 			}
5561 			vsi->base_queue = ret;
5562 	} else
5563 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
5564 
5565 	/* VF has MSIX interrupt in VF range, don't allocate here */
5566 	if (type == I40E_VSI_MAIN) {
5567 		if (pf->support_multi_driver) {
5568 			/* If support multi-driver, need to use INT0 instead of
5569 			 * allocating from msix pool. The Msix pool is init from
5570 			 * INT1, so it's OK just set msix_intr to 0 and nb_msix
5571 			 * to 1 without calling i40e_res_pool_alloc.
5572 			 */
5573 			vsi->msix_intr = 0;
5574 			vsi->nb_msix = 1;
5575 		} else {
5576 			ret = i40e_res_pool_alloc(&pf->msix_pool,
5577 						  RTE_MIN(vsi->nb_qps,
5578 						     RTE_MAX_RXTX_INTR_VEC_ID));
5579 			if (ret < 0) {
5580 				PMD_DRV_LOG(ERR,
5581 					    "VSI MAIN %d get heap failed %d",
5582 					    vsi->seid, ret);
5583 				goto fail_queue_alloc;
5584 			}
5585 			vsi->msix_intr = ret;
5586 			vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5587 					       RTE_MAX_RXTX_INTR_VEC_ID);
5588 		}
5589 	} else if (type != I40E_VSI_SRIOV) {
5590 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5591 		if (ret < 0) {
5592 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5593 			goto fail_queue_alloc;
5594 		}
5595 		vsi->msix_intr = ret;
5596 		vsi->nb_msix = 1;
5597 	} else {
5598 		vsi->msix_intr = 0;
5599 		vsi->nb_msix = 0;
5600 	}
5601 
5602 	/* Add VSI */
5603 	if (type == I40E_VSI_MAIN) {
5604 		/* For main VSI, no need to add since it's default one */
5605 		vsi->uplink_seid = pf->mac_seid;
5606 		vsi->seid = pf->main_vsi_seid;
5607 		/* Bind queues with specific MSIX interrupt */
5608 		/**
5609 		 * Needs 2 interrupt at least, one for misc cause which will
5610 		 * enabled from OS side, Another for queues binding the
5611 		 * interrupt from device side only.
5612 		 */
5613 
5614 		/* Get default VSI parameters from hardware */
5615 		memset(&ctxt, 0, sizeof(ctxt));
5616 		ctxt.seid = vsi->seid;
5617 		ctxt.pf_num = hw->pf_id;
5618 		ctxt.uplink_seid = vsi->uplink_seid;
5619 		ctxt.vf_num = 0;
5620 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5621 		if (ret != I40E_SUCCESS) {
5622 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
5623 			goto fail_msix_alloc;
5624 		}
5625 		rte_memcpy(&vsi->info, &ctxt.info,
5626 			sizeof(struct i40e_aqc_vsi_properties_data));
5627 		vsi->vsi_id = ctxt.vsi_number;
5628 		vsi->info.valid_sections = 0;
5629 
5630 		/* Configure tc, enabled TC0 only */
5631 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5632 			I40E_SUCCESS) {
5633 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5634 			goto fail_msix_alloc;
5635 		}
5636 
5637 		/* TC, queue mapping */
5638 		memset(&ctxt, 0, sizeof(ctxt));
5639 		vsi->info.valid_sections |=
5640 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5641 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5642 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5643 		rte_memcpy(&ctxt.info, &vsi->info,
5644 			sizeof(struct i40e_aqc_vsi_properties_data));
5645 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5646 						I40E_DEFAULT_TCMAP);
5647 		if (ret != I40E_SUCCESS) {
5648 			PMD_DRV_LOG(ERR,
5649 				"Failed to configure TC queue mapping");
5650 			goto fail_msix_alloc;
5651 		}
5652 		ctxt.seid = vsi->seid;
5653 		ctxt.pf_num = hw->pf_id;
5654 		ctxt.uplink_seid = vsi->uplink_seid;
5655 		ctxt.vf_num = 0;
5656 
5657 		/* Update VSI parameters */
5658 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5659 		if (ret != I40E_SUCCESS) {
5660 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
5661 			goto fail_msix_alloc;
5662 		}
5663 
5664 		rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5665 						sizeof(vsi->info.tc_mapping));
5666 		rte_memcpy(&vsi->info.queue_mapping,
5667 				&ctxt.info.queue_mapping,
5668 			sizeof(vsi->info.queue_mapping));
5669 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
5670 		vsi->info.valid_sections = 0;
5671 
5672 		rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5673 				ETH_ADDR_LEN);
5674 
5675 		/**
5676 		 * Updating default filter settings are necessary to prevent
5677 		 * reception of tagged packets.
5678 		 * Some old firmware configurations load a default macvlan
5679 		 * filter which accepts both tagged and untagged packets.
5680 		 * The updating is to use a normal filter instead if needed.
5681 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
5682 		 * The firmware with correct configurations load the default
5683 		 * macvlan filter which is expected and cannot be removed.
5684 		 */
5685 		i40e_update_default_filter_setting(vsi);
5686 		i40e_config_qinq(hw, vsi);
5687 	} else if (type == I40E_VSI_SRIOV) {
5688 		memset(&ctxt, 0, sizeof(ctxt));
5689 		/**
5690 		 * For other VSI, the uplink_seid equals to uplink VSI's
5691 		 * uplink_seid since they share same VEB
5692 		 */
5693 		if (uplink_vsi == NULL)
5694 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5695 		else
5696 			vsi->uplink_seid = uplink_vsi->uplink_seid;
5697 		ctxt.pf_num = hw->pf_id;
5698 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5699 		ctxt.uplink_seid = vsi->uplink_seid;
5700 		ctxt.connection_type = 0x1;
5701 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5702 
5703 		/* Use the VEB configuration if FW >= v5.0 */
5704 		if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
5705 			/* Configure switch ID */
5706 			ctxt.info.valid_sections |=
5707 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5708 			ctxt.info.switch_id =
5709 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5710 		}
5711 
5712 		/* Configure port/vlan */
5713 		ctxt.info.valid_sections |=
5714 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5715 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5716 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5717 						hw->func_caps.enabled_tcmap);
5718 		if (ret != I40E_SUCCESS) {
5719 			PMD_DRV_LOG(ERR,
5720 				"Failed to configure TC queue mapping");
5721 			goto fail_msix_alloc;
5722 		}
5723 
5724 		ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5725 		ctxt.info.valid_sections |=
5726 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5727 		/**
5728 		 * Since VSI is not created yet, only configure parameter,
5729 		 * will add vsi below.
5730 		 */
5731 
5732 		i40e_config_qinq(hw, vsi);
5733 	} else if (type == I40E_VSI_VMDQ2) {
5734 		memset(&ctxt, 0, sizeof(ctxt));
5735 		/*
5736 		 * For other VSI, the uplink_seid equals to uplink VSI's
5737 		 * uplink_seid since they share same VEB
5738 		 */
5739 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5740 		ctxt.pf_num = hw->pf_id;
5741 		ctxt.vf_num = 0;
5742 		ctxt.uplink_seid = vsi->uplink_seid;
5743 		ctxt.connection_type = 0x1;
5744 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5745 
5746 		ctxt.info.valid_sections |=
5747 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5748 		/* user_param carries flag to enable loop back */
5749 		if (user_param) {
5750 			ctxt.info.switch_id =
5751 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5752 			ctxt.info.switch_id |=
5753 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5754 		}
5755 
5756 		/* Configure port/vlan */
5757 		ctxt.info.valid_sections |=
5758 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5759 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5760 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5761 						I40E_DEFAULT_TCMAP);
5762 		if (ret != I40E_SUCCESS) {
5763 			PMD_DRV_LOG(ERR,
5764 				"Failed to configure TC queue mapping");
5765 			goto fail_msix_alloc;
5766 		}
5767 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5768 		ctxt.info.valid_sections |=
5769 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5770 	} else if (type == I40E_VSI_FDIR) {
5771 		memset(&ctxt, 0, sizeof(ctxt));
5772 		vsi->uplink_seid = uplink_vsi->uplink_seid;
5773 		ctxt.pf_num = hw->pf_id;
5774 		ctxt.vf_num = 0;
5775 		ctxt.uplink_seid = vsi->uplink_seid;
5776 		ctxt.connection_type = 0x1;     /* regular data port */
5777 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5778 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5779 						I40E_DEFAULT_TCMAP);
5780 		if (ret != I40E_SUCCESS) {
5781 			PMD_DRV_LOG(ERR,
5782 				"Failed to configure TC queue mapping.");
5783 			goto fail_msix_alloc;
5784 		}
5785 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5786 		ctxt.info.valid_sections |=
5787 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5788 	} else {
5789 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5790 		goto fail_msix_alloc;
5791 	}
5792 
5793 	if (vsi->type != I40E_VSI_MAIN) {
5794 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5795 		if (ret != I40E_SUCCESS) {
5796 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5797 				    hw->aq.asq_last_status);
5798 			goto fail_msix_alloc;
5799 		}
5800 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5801 		vsi->info.valid_sections = 0;
5802 		vsi->seid = ctxt.seid;
5803 		vsi->vsi_id = ctxt.vsi_number;
5804 		vsi->sib_vsi_list.vsi = vsi;
5805 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5806 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5807 					  &vsi->sib_vsi_list, list);
5808 		} else {
5809 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5810 					  &vsi->sib_vsi_list, list);
5811 		}
5812 	}
5813 
5814 	/* MAC/VLAN configuration */
5815 	rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5816 	filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5817 
5818 	ret = i40e_vsi_add_mac(vsi, &filter);
5819 	if (ret != I40E_SUCCESS) {
5820 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5821 		goto fail_msix_alloc;
5822 	}
5823 
5824 	/* Get VSI BW information */
5825 	i40e_vsi_get_bw_config(vsi);
5826 	return vsi;
5827 fail_msix_alloc:
5828 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5829 fail_queue_alloc:
5830 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5831 fail_mem:
5832 	rte_free(vsi);
5833 	return NULL;
5834 }
5835 
5836 /* Configure vlan filter on or off */
5837 int
5838 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5839 {
5840 	int i, num;
5841 	struct i40e_mac_filter *f;
5842 	void *temp;
5843 	struct i40e_mac_filter_info *mac_filter;
5844 	enum rte_mac_filter_type desired_filter;
5845 	int ret = I40E_SUCCESS;
5846 
5847 	if (on) {
5848 		/* Filter to match MAC and VLAN */
5849 		desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5850 	} else {
5851 		/* Filter to match only MAC */
5852 		desired_filter = RTE_MAC_PERFECT_MATCH;
5853 	}
5854 
5855 	num = vsi->mac_num;
5856 
5857 	mac_filter = rte_zmalloc("mac_filter_info_data",
5858 				 num * sizeof(*mac_filter), 0);
5859 	if (mac_filter == NULL) {
5860 		PMD_DRV_LOG(ERR, "failed to allocate memory");
5861 		return I40E_ERR_NO_MEMORY;
5862 	}
5863 
5864 	i = 0;
5865 
5866 	/* Remove all existing mac */
5867 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5868 		mac_filter[i] = f->mac_info;
5869 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5870 		if (ret) {
5871 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5872 				    on ? "enable" : "disable");
5873 			goto DONE;
5874 		}
5875 		i++;
5876 	}
5877 
5878 	/* Override with new filter */
5879 	for (i = 0; i < num; i++) {
5880 		mac_filter[i].filter_type = desired_filter;
5881 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5882 		if (ret) {
5883 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5884 				    on ? "enable" : "disable");
5885 			goto DONE;
5886 		}
5887 	}
5888 
5889 DONE:
5890 	rte_free(mac_filter);
5891 	return ret;
5892 }
5893 
5894 /* Configure vlan stripping on or off */
5895 int
5896 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5897 {
5898 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5899 	struct i40e_vsi_context ctxt;
5900 	uint8_t vlan_flags;
5901 	int ret = I40E_SUCCESS;
5902 
5903 	/* Check if it has been already on or off */
5904 	if (vsi->info.valid_sections &
5905 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5906 		if (on) {
5907 			if ((vsi->info.port_vlan_flags &
5908 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5909 				return 0; /* already on */
5910 		} else {
5911 			if ((vsi->info.port_vlan_flags &
5912 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5913 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
5914 				return 0; /* already off */
5915 		}
5916 	}
5917 
5918 	if (on)
5919 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5920 	else
5921 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5922 	vsi->info.valid_sections =
5923 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5924 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5925 	vsi->info.port_vlan_flags |= vlan_flags;
5926 	ctxt.seid = vsi->seid;
5927 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5928 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5929 	if (ret)
5930 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5931 			    on ? "enable" : "disable");
5932 
5933 	return ret;
5934 }
5935 
5936 static int
5937 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5938 {
5939 	struct rte_eth_dev_data *data = dev->data;
5940 	int ret;
5941 	int mask = 0;
5942 
5943 	/* Apply vlan offload setting */
5944 	mask = ETH_VLAN_STRIP_MASK |
5945 	       ETH_VLAN_FILTER_MASK |
5946 	       ETH_VLAN_EXTEND_MASK;
5947 	ret = i40e_vlan_offload_set(dev, mask);
5948 	if (ret) {
5949 		PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5950 		return ret;
5951 	}
5952 
5953 	/* Apply pvid setting */
5954 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5955 				data->dev_conf.txmode.hw_vlan_insert_pvid);
5956 	if (ret)
5957 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
5958 
5959 	return ret;
5960 }
5961 
5962 static int
5963 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5964 {
5965 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5966 
5967 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5968 }
5969 
5970 static int
5971 i40e_update_flow_control(struct i40e_hw *hw)
5972 {
5973 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5974 	struct i40e_link_status link_status;
5975 	uint32_t rxfc = 0, txfc = 0, reg;
5976 	uint8_t an_info;
5977 	int ret;
5978 
5979 	memset(&link_status, 0, sizeof(link_status));
5980 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5981 	if (ret != I40E_SUCCESS) {
5982 		PMD_DRV_LOG(ERR, "Failed to get link status information");
5983 		goto write_reg; /* Disable flow control */
5984 	}
5985 
5986 	an_info = hw->phy.link_info.an_info;
5987 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5988 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5989 		ret = I40E_ERR_NOT_READY;
5990 		goto write_reg; /* Disable flow control */
5991 	}
5992 	/**
5993 	 * If link auto negotiation is enabled, flow control needs to
5994 	 * be configured according to it
5995 	 */
5996 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
5997 	case I40E_LINK_PAUSE_RXTX:
5998 		rxfc = 1;
5999 		txfc = 1;
6000 		hw->fc.current_mode = I40E_FC_FULL;
6001 		break;
6002 	case I40E_AQ_LINK_PAUSE_RX:
6003 		rxfc = 1;
6004 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
6005 		break;
6006 	case I40E_AQ_LINK_PAUSE_TX:
6007 		txfc = 1;
6008 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
6009 		break;
6010 	default:
6011 		hw->fc.current_mode = I40E_FC_NONE;
6012 		break;
6013 	}
6014 
6015 write_reg:
6016 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
6017 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
6018 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
6019 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
6020 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
6021 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
6022 
6023 	return ret;
6024 }
6025 
6026 /* PF setup */
6027 static int
6028 i40e_pf_setup(struct i40e_pf *pf)
6029 {
6030 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6031 	struct i40e_filter_control_settings settings;
6032 	struct i40e_vsi *vsi;
6033 	int ret;
6034 
6035 	/* Clear all stats counters */
6036 	pf->offset_loaded = FALSE;
6037 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
6038 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
6039 	memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
6040 	memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
6041 
6042 	ret = i40e_pf_get_switch_config(pf);
6043 	if (ret != I40E_SUCCESS) {
6044 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
6045 		return ret;
6046 	}
6047 
6048 	ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
6049 	if (ret)
6050 		PMD_INIT_LOG(WARNING,
6051 			"failed to allocate switch domain for device %d", ret);
6052 
6053 	if (pf->flags & I40E_FLAG_FDIR) {
6054 		/* make queue allocated first, let FDIR use queue pair 0*/
6055 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
6056 		if (ret != I40E_FDIR_QUEUE_ID) {
6057 			PMD_DRV_LOG(ERR,
6058 				"queue allocation fails for FDIR: ret =%d",
6059 				ret);
6060 			pf->flags &= ~I40E_FLAG_FDIR;
6061 		}
6062 	}
6063 	/*  main VSI setup */
6064 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
6065 	if (!vsi) {
6066 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
6067 		return I40E_ERR_NOT_READY;
6068 	}
6069 	pf->main_vsi = vsi;
6070 
6071 	/* Configure filter control */
6072 	memset(&settings, 0, sizeof(settings));
6073 	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
6074 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
6075 	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
6076 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
6077 	else {
6078 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
6079 			hw->func_caps.rss_table_size);
6080 		return I40E_ERR_PARAM;
6081 	}
6082 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
6083 		hw->func_caps.rss_table_size);
6084 	pf->hash_lut_size = hw->func_caps.rss_table_size;
6085 
6086 	/* Enable ethtype and macvlan filters */
6087 	settings.enable_ethtype = TRUE;
6088 	settings.enable_macvlan = TRUE;
6089 	ret = i40e_set_filter_control(hw, &settings);
6090 	if (ret)
6091 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
6092 								ret);
6093 
6094 	/* Update flow control according to the auto negotiation */
6095 	i40e_update_flow_control(hw);
6096 
6097 	return I40E_SUCCESS;
6098 }
6099 
6100 int
6101 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6102 {
6103 	uint32_t reg;
6104 	uint16_t j;
6105 
6106 	/**
6107 	 * Set or clear TX Queue Disable flags,
6108 	 * which is required by hardware.
6109 	 */
6110 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
6111 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
6112 
6113 	/* Wait until the request is finished */
6114 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6115 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6116 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6117 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6118 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
6119 							& 0x1))) {
6120 			break;
6121 		}
6122 	}
6123 	if (on) {
6124 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
6125 			return I40E_SUCCESS; /* already on, skip next steps */
6126 
6127 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
6128 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
6129 	} else {
6130 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6131 			return I40E_SUCCESS; /* already off, skip next steps */
6132 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
6133 	}
6134 	/* Write the register */
6135 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
6136 	/* Check the result */
6137 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6138 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6139 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
6140 		if (on) {
6141 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6142 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6143 				break;
6144 		} else {
6145 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
6146 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
6147 				break;
6148 		}
6149 	}
6150 	/* Check if it is timeout */
6151 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6152 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6153 			    (on ? "enable" : "disable"), q_idx);
6154 		return I40E_ERR_TIMEOUT;
6155 	}
6156 
6157 	return I40E_SUCCESS;
6158 }
6159 
6160 /* Swith on or off the tx queues */
6161 static int
6162 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
6163 {
6164 	struct rte_eth_dev_data *dev_data = pf->dev_data;
6165 	struct i40e_tx_queue *txq;
6166 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
6167 	uint16_t i;
6168 	int ret;
6169 
6170 	for (i = 0; i < dev_data->nb_tx_queues; i++) {
6171 		txq = dev_data->tx_queues[i];
6172 		/* Don't operate the queue if not configured or
6173 		 * if starting only per queue */
6174 		if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
6175 			continue;
6176 		if (on)
6177 			ret = i40e_dev_tx_queue_start(dev, i);
6178 		else
6179 			ret = i40e_dev_tx_queue_stop(dev, i);
6180 		if ( ret != I40E_SUCCESS)
6181 			return ret;
6182 	}
6183 
6184 	return I40E_SUCCESS;
6185 }
6186 
6187 int
6188 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6189 {
6190 	uint32_t reg;
6191 	uint16_t j;
6192 
6193 	/* Wait until the request is finished */
6194 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6195 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6196 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6197 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6198 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6199 			break;
6200 	}
6201 
6202 	if (on) {
6203 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6204 			return I40E_SUCCESS; /* Already on, skip next steps */
6205 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6206 	} else {
6207 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6208 			return I40E_SUCCESS; /* Already off, skip next steps */
6209 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6210 	}
6211 
6212 	/* Write the register */
6213 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6214 	/* Check the result */
6215 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6216 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6217 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6218 		if (on) {
6219 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6220 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6221 				break;
6222 		} else {
6223 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6224 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6225 				break;
6226 		}
6227 	}
6228 
6229 	/* Check if it is timeout */
6230 	if (j >= I40E_CHK_Q_ENA_COUNT) {
6231 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6232 			    (on ? "enable" : "disable"), q_idx);
6233 		return I40E_ERR_TIMEOUT;
6234 	}
6235 
6236 	return I40E_SUCCESS;
6237 }
6238 /* Switch on or off the rx queues */
6239 static int
6240 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
6241 {
6242 	struct rte_eth_dev_data *dev_data = pf->dev_data;
6243 	struct i40e_rx_queue *rxq;
6244 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
6245 	uint16_t i;
6246 	int ret;
6247 
6248 	for (i = 0; i < dev_data->nb_rx_queues; i++) {
6249 		rxq = dev_data->rx_queues[i];
6250 		/* Don't operate the queue if not configured or
6251 		 * if starting only per queue */
6252 		if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
6253 			continue;
6254 		if (on)
6255 			ret = i40e_dev_rx_queue_start(dev, i);
6256 		else
6257 			ret = i40e_dev_rx_queue_stop(dev, i);
6258 		if (ret != I40E_SUCCESS)
6259 			return ret;
6260 	}
6261 
6262 	return I40E_SUCCESS;
6263 }
6264 
6265 /* Switch on or off all the rx/tx queues */
6266 int
6267 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
6268 {
6269 	int ret;
6270 
6271 	if (on) {
6272 		/* enable rx queues before enabling tx queues */
6273 		ret = i40e_dev_switch_rx_queues(pf, on);
6274 		if (ret) {
6275 			PMD_DRV_LOG(ERR, "Failed to switch rx queues");
6276 			return ret;
6277 		}
6278 		ret = i40e_dev_switch_tx_queues(pf, on);
6279 	} else {
6280 		/* Stop tx queues before stopping rx queues */
6281 		ret = i40e_dev_switch_tx_queues(pf, on);
6282 		if (ret) {
6283 			PMD_DRV_LOG(ERR, "Failed to switch tx queues");
6284 			return ret;
6285 		}
6286 		ret = i40e_dev_switch_rx_queues(pf, on);
6287 	}
6288 
6289 	return ret;
6290 }
6291 
6292 /* Initialize VSI for TX */
6293 static int
6294 i40e_dev_tx_init(struct i40e_pf *pf)
6295 {
6296 	struct rte_eth_dev_data *data = pf->dev_data;
6297 	uint16_t i;
6298 	uint32_t ret = I40E_SUCCESS;
6299 	struct i40e_tx_queue *txq;
6300 
6301 	for (i = 0; i < data->nb_tx_queues; i++) {
6302 		txq = data->tx_queues[i];
6303 		if (!txq || !txq->q_set)
6304 			continue;
6305 		ret = i40e_tx_queue_init(txq);
6306 		if (ret != I40E_SUCCESS)
6307 			break;
6308 	}
6309 	if (ret == I40E_SUCCESS)
6310 		i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6311 				     ->eth_dev);
6312 
6313 	return ret;
6314 }
6315 
6316 /* Initialize VSI for RX */
6317 static int
6318 i40e_dev_rx_init(struct i40e_pf *pf)
6319 {
6320 	struct rte_eth_dev_data *data = pf->dev_data;
6321 	int ret = I40E_SUCCESS;
6322 	uint16_t i;
6323 	struct i40e_rx_queue *rxq;
6324 
6325 	i40e_pf_config_mq_rx(pf);
6326 	for (i = 0; i < data->nb_rx_queues; i++) {
6327 		rxq = data->rx_queues[i];
6328 		if (!rxq || !rxq->q_set)
6329 			continue;
6330 
6331 		ret = i40e_rx_queue_init(rxq);
6332 		if (ret != I40E_SUCCESS) {
6333 			PMD_DRV_LOG(ERR,
6334 				"Failed to do RX queue initialization");
6335 			break;
6336 		}
6337 	}
6338 	if (ret == I40E_SUCCESS)
6339 		i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6340 				     ->eth_dev);
6341 
6342 	return ret;
6343 }
6344 
6345 static int
6346 i40e_dev_rxtx_init(struct i40e_pf *pf)
6347 {
6348 	int err;
6349 
6350 	err = i40e_dev_tx_init(pf);
6351 	if (err) {
6352 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6353 		return err;
6354 	}
6355 	err = i40e_dev_rx_init(pf);
6356 	if (err) {
6357 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6358 		return err;
6359 	}
6360 
6361 	return err;
6362 }
6363 
6364 static int
6365 i40e_vmdq_setup(struct rte_eth_dev *dev)
6366 {
6367 	struct rte_eth_conf *conf = &dev->data->dev_conf;
6368 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6369 	int i, err, conf_vsis, j, loop;
6370 	struct i40e_vsi *vsi;
6371 	struct i40e_vmdq_info *vmdq_info;
6372 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
6373 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6374 
6375 	/*
6376 	 * Disable interrupt to avoid message from VF. Furthermore, it will
6377 	 * avoid race condition in VSI creation/destroy.
6378 	 */
6379 	i40e_pf_disable_irq0(hw);
6380 
6381 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6382 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6383 		return -ENOTSUP;
6384 	}
6385 
6386 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6387 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
6388 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6389 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6390 			pf->max_nb_vmdq_vsi);
6391 		return -ENOTSUP;
6392 	}
6393 
6394 	if (pf->vmdq != NULL) {
6395 		PMD_INIT_LOG(INFO, "VMDQ already configured");
6396 		return 0;
6397 	}
6398 
6399 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
6400 				sizeof(*vmdq_info) * conf_vsis, 0);
6401 
6402 	if (pf->vmdq == NULL) {
6403 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
6404 		return -ENOMEM;
6405 	}
6406 
6407 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6408 
6409 	/* Create VMDQ VSI */
6410 	for (i = 0; i < conf_vsis; i++) {
6411 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6412 				vmdq_conf->enable_loop_back);
6413 		if (vsi == NULL) {
6414 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6415 			err = -1;
6416 			goto err_vsi_setup;
6417 		}
6418 		vmdq_info = &pf->vmdq[i];
6419 		vmdq_info->pf = pf;
6420 		vmdq_info->vsi = vsi;
6421 	}
6422 	pf->nb_cfg_vmdq_vsi = conf_vsis;
6423 
6424 	/* Configure Vlan */
6425 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6426 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6427 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6428 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6429 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6430 					vmdq_conf->pool_map[i].vlan_id, j);
6431 
6432 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6433 						vmdq_conf->pool_map[i].vlan_id);
6434 				if (err) {
6435 					PMD_INIT_LOG(ERR, "Failed to add vlan");
6436 					err = -1;
6437 					goto err_vsi_setup;
6438 				}
6439 			}
6440 		}
6441 	}
6442 
6443 	i40e_pf_enable_irq0(hw);
6444 
6445 	return 0;
6446 
6447 err_vsi_setup:
6448 	for (i = 0; i < conf_vsis; i++)
6449 		if (pf->vmdq[i].vsi == NULL)
6450 			break;
6451 		else
6452 			i40e_vsi_release(pf->vmdq[i].vsi);
6453 
6454 	rte_free(pf->vmdq);
6455 	pf->vmdq = NULL;
6456 	i40e_pf_enable_irq0(hw);
6457 	return err;
6458 }
6459 
6460 static void
6461 i40e_stat_update_32(struct i40e_hw *hw,
6462 		   uint32_t reg,
6463 		   bool offset_loaded,
6464 		   uint64_t *offset,
6465 		   uint64_t *stat)
6466 {
6467 	uint64_t new_data;
6468 
6469 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
6470 	if (!offset_loaded)
6471 		*offset = new_data;
6472 
6473 	if (new_data >= *offset)
6474 		*stat = (uint64_t)(new_data - *offset);
6475 	else
6476 		*stat = (uint64_t)((new_data +
6477 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6478 }
6479 
6480 static void
6481 i40e_stat_update_48(struct i40e_hw *hw,
6482 		   uint32_t hireg,
6483 		   uint32_t loreg,
6484 		   bool offset_loaded,
6485 		   uint64_t *offset,
6486 		   uint64_t *stat)
6487 {
6488 	uint64_t new_data;
6489 
6490 	new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6491 	new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6492 			I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6493 
6494 	if (!offset_loaded)
6495 		*offset = new_data;
6496 
6497 	if (new_data >= *offset)
6498 		*stat = new_data - *offset;
6499 	else
6500 		*stat = (uint64_t)((new_data +
6501 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6502 
6503 	*stat &= I40E_48_BIT_MASK;
6504 }
6505 
6506 /* Disable IRQ0 */
6507 void
6508 i40e_pf_disable_irq0(struct i40e_hw *hw)
6509 {
6510 	/* Disable all interrupt types */
6511 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6512 		       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6513 	I40E_WRITE_FLUSH(hw);
6514 }
6515 
6516 /* Enable IRQ0 */
6517 void
6518 i40e_pf_enable_irq0(struct i40e_hw *hw)
6519 {
6520 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6521 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
6522 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6523 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6524 	I40E_WRITE_FLUSH(hw);
6525 }
6526 
6527 static void
6528 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6529 {
6530 	/* read pending request and disable first */
6531 	i40e_pf_disable_irq0(hw);
6532 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6533 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6534 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6535 
6536 	if (no_queue)
6537 		/* Link no queues with irq0 */
6538 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6539 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6540 }
6541 
6542 static void
6543 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6544 {
6545 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6546 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6547 	int i;
6548 	uint16_t abs_vf_id;
6549 	uint32_t index, offset, val;
6550 
6551 	if (!pf->vfs)
6552 		return;
6553 	/**
6554 	 * Try to find which VF trigger a reset, use absolute VF id to access
6555 	 * since the reg is global register.
6556 	 */
6557 	for (i = 0; i < pf->vf_num; i++) {
6558 		abs_vf_id = hw->func_caps.vf_base_id + i;
6559 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6560 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6561 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6562 		/* VFR event occurred */
6563 		if (val & (0x1 << offset)) {
6564 			int ret;
6565 
6566 			/* Clear the event first */
6567 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6568 							(0x1 << offset));
6569 			PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6570 			/**
6571 			 * Only notify a VF reset event occurred,
6572 			 * don't trigger another SW reset
6573 			 */
6574 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6575 			if (ret != I40E_SUCCESS)
6576 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
6577 		}
6578 	}
6579 }
6580 
6581 static void
6582 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6583 {
6584 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6585 	int i;
6586 
6587 	for (i = 0; i < pf->vf_num; i++)
6588 		i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6589 }
6590 
6591 static void
6592 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6593 {
6594 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6595 	struct i40e_arq_event_info info;
6596 	uint16_t pending, opcode;
6597 	int ret;
6598 
6599 	info.buf_len = I40E_AQ_BUF_SZ;
6600 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6601 	if (!info.msg_buf) {
6602 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
6603 		return;
6604 	}
6605 
6606 	pending = 1;
6607 	while (pending) {
6608 		ret = i40e_clean_arq_element(hw, &info, &pending);
6609 
6610 		if (ret != I40E_SUCCESS) {
6611 			PMD_DRV_LOG(INFO,
6612 				"Failed to read msg from AdminQ, aq_err: %u",
6613 				hw->aq.asq_last_status);
6614 			break;
6615 		}
6616 		opcode = rte_le_to_cpu_16(info.desc.opcode);
6617 
6618 		switch (opcode) {
6619 		case i40e_aqc_opc_send_msg_to_pf:
6620 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6621 			i40e_pf_host_handle_vf_msg(dev,
6622 					rte_le_to_cpu_16(info.desc.retval),
6623 					rte_le_to_cpu_32(info.desc.cookie_high),
6624 					rte_le_to_cpu_32(info.desc.cookie_low),
6625 					info.msg_buf,
6626 					info.msg_len);
6627 			break;
6628 		case i40e_aqc_opc_get_link_status:
6629 			ret = i40e_dev_link_update(dev, 0);
6630 			if (!ret)
6631 				_rte_eth_dev_callback_process(dev,
6632 					RTE_ETH_EVENT_INTR_LSC, NULL);
6633 			break;
6634 		default:
6635 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6636 				    opcode);
6637 			break;
6638 		}
6639 	}
6640 	rte_free(info.msg_buf);
6641 }
6642 
6643 /**
6644  * Interrupt handler triggered by NIC  for handling
6645  * specific interrupt.
6646  *
6647  * @param handle
6648  *  Pointer to interrupt handle.
6649  * @param param
6650  *  The address of parameter (struct rte_eth_dev *) regsitered before.
6651  *
6652  * @return
6653  *  void
6654  */
6655 static void
6656 i40e_dev_interrupt_handler(void *param)
6657 {
6658 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6659 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6660 	uint32_t icr0;
6661 
6662 	/* Disable interrupt */
6663 	i40e_pf_disable_irq0(hw);
6664 
6665 	/* read out interrupt causes */
6666 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6667 
6668 	/* No interrupt event indicated */
6669 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6670 		PMD_DRV_LOG(INFO, "No interrupt event");
6671 		goto done;
6672 	}
6673 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6674 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6675 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6676 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6677 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6678 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6679 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6680 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6681 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6682 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6683 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6684 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6685 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6686 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6687 
6688 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6689 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6690 		i40e_dev_handle_vfr_event(dev);
6691 	}
6692 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6693 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6694 		i40e_dev_handle_aq_msg(dev);
6695 	}
6696 
6697 done:
6698 	/* Enable interrupt */
6699 	i40e_pf_enable_irq0(hw);
6700 }
6701 
6702 static void
6703 i40e_dev_alarm_handler(void *param)
6704 {
6705 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6706 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6707 	uint32_t icr0;
6708 
6709 	/* Disable interrupt */
6710 	i40e_pf_disable_irq0(hw);
6711 
6712 	/* read out interrupt causes */
6713 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6714 
6715 	/* No interrupt event indicated */
6716 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK))
6717 		goto done;
6718 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6719 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6720 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6721 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6722 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6723 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6724 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6725 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6726 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6727 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6728 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6729 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
6730 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6731 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6732 
6733 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6734 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6735 		i40e_dev_handle_vfr_event(dev);
6736 	}
6737 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6738 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
6739 		i40e_dev_handle_aq_msg(dev);
6740 	}
6741 
6742 done:
6743 	/* Enable interrupt */
6744 	i40e_pf_enable_irq0(hw);
6745 	rte_eal_alarm_set(I40E_ALARM_INTERVAL,
6746 			  i40e_dev_alarm_handler, dev);
6747 }
6748 
6749 int
6750 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6751 			 struct i40e_macvlan_filter *filter,
6752 			 int total)
6753 {
6754 	int ele_num, ele_buff_size;
6755 	int num, actual_num, i;
6756 	uint16_t flags;
6757 	int ret = I40E_SUCCESS;
6758 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6759 	struct i40e_aqc_add_macvlan_element_data *req_list;
6760 
6761 	if (filter == NULL  || total == 0)
6762 		return I40E_ERR_PARAM;
6763 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6764 	ele_buff_size = hw->aq.asq_buf_size;
6765 
6766 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6767 	if (req_list == NULL) {
6768 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6769 		return I40E_ERR_NO_MEMORY;
6770 	}
6771 
6772 	num = 0;
6773 	do {
6774 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6775 		memset(req_list, 0, ele_buff_size);
6776 
6777 		for (i = 0; i < actual_num; i++) {
6778 			rte_memcpy(req_list[i].mac_addr,
6779 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6780 			req_list[i].vlan_tag =
6781 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6782 
6783 			switch (filter[num + i].filter_type) {
6784 			case RTE_MAC_PERFECT_MATCH:
6785 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6786 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6787 				break;
6788 			case RTE_MACVLAN_PERFECT_MATCH:
6789 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6790 				break;
6791 			case RTE_MAC_HASH_MATCH:
6792 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6793 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6794 				break;
6795 			case RTE_MACVLAN_HASH_MATCH:
6796 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6797 				break;
6798 			default:
6799 				PMD_DRV_LOG(ERR, "Invalid MAC match type");
6800 				ret = I40E_ERR_PARAM;
6801 				goto DONE;
6802 			}
6803 
6804 			req_list[i].queue_number = 0;
6805 
6806 			req_list[i].flags = rte_cpu_to_le_16(flags);
6807 		}
6808 
6809 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6810 						actual_num, NULL);
6811 		if (ret != I40E_SUCCESS) {
6812 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6813 			goto DONE;
6814 		}
6815 		num += actual_num;
6816 	} while (num < total);
6817 
6818 DONE:
6819 	rte_free(req_list);
6820 	return ret;
6821 }
6822 
6823 int
6824 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6825 			    struct i40e_macvlan_filter *filter,
6826 			    int total)
6827 {
6828 	int ele_num, ele_buff_size;
6829 	int num, actual_num, i;
6830 	uint16_t flags;
6831 	int ret = I40E_SUCCESS;
6832 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6833 	struct i40e_aqc_remove_macvlan_element_data *req_list;
6834 
6835 	if (filter == NULL  || total == 0)
6836 		return I40E_ERR_PARAM;
6837 
6838 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6839 	ele_buff_size = hw->aq.asq_buf_size;
6840 
6841 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6842 	if (req_list == NULL) {
6843 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
6844 		return I40E_ERR_NO_MEMORY;
6845 	}
6846 
6847 	num = 0;
6848 	do {
6849 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6850 		memset(req_list, 0, ele_buff_size);
6851 
6852 		for (i = 0; i < actual_num; i++) {
6853 			rte_memcpy(req_list[i].mac_addr,
6854 				&filter[num + i].macaddr, ETH_ADDR_LEN);
6855 			req_list[i].vlan_tag =
6856 				rte_cpu_to_le_16(filter[num + i].vlan_id);
6857 
6858 			switch (filter[num + i].filter_type) {
6859 			case RTE_MAC_PERFECT_MATCH:
6860 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6861 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6862 				break;
6863 			case RTE_MACVLAN_PERFECT_MATCH:
6864 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6865 				break;
6866 			case RTE_MAC_HASH_MATCH:
6867 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6868 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6869 				break;
6870 			case RTE_MACVLAN_HASH_MATCH:
6871 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6872 				break;
6873 			default:
6874 				PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6875 				ret = I40E_ERR_PARAM;
6876 				goto DONE;
6877 			}
6878 			req_list[i].flags = rte_cpu_to_le_16(flags);
6879 		}
6880 
6881 		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6882 						actual_num, NULL);
6883 		if (ret != I40E_SUCCESS) {
6884 			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6885 			goto DONE;
6886 		}
6887 		num += actual_num;
6888 	} while (num < total);
6889 
6890 DONE:
6891 	rte_free(req_list);
6892 	return ret;
6893 }
6894 
6895 /* Find out specific MAC filter */
6896 static struct i40e_mac_filter *
6897 i40e_find_mac_filter(struct i40e_vsi *vsi,
6898 			 struct ether_addr *macaddr)
6899 {
6900 	struct i40e_mac_filter *f;
6901 
6902 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
6903 		if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6904 			return f;
6905 	}
6906 
6907 	return NULL;
6908 }
6909 
6910 static bool
6911 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6912 			 uint16_t vlan_id)
6913 {
6914 	uint32_t vid_idx, vid_bit;
6915 
6916 	if (vlan_id > ETH_VLAN_ID_MAX)
6917 		return 0;
6918 
6919 	vid_idx = I40E_VFTA_IDX(vlan_id);
6920 	vid_bit = I40E_VFTA_BIT(vlan_id);
6921 
6922 	if (vsi->vfta[vid_idx] & vid_bit)
6923 		return 1;
6924 	else
6925 		return 0;
6926 }
6927 
6928 static void
6929 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6930 		       uint16_t vlan_id, bool on)
6931 {
6932 	uint32_t vid_idx, vid_bit;
6933 
6934 	vid_idx = I40E_VFTA_IDX(vlan_id);
6935 	vid_bit = I40E_VFTA_BIT(vlan_id);
6936 
6937 	if (on)
6938 		vsi->vfta[vid_idx] |= vid_bit;
6939 	else
6940 		vsi->vfta[vid_idx] &= ~vid_bit;
6941 }
6942 
6943 void
6944 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6945 		     uint16_t vlan_id, bool on)
6946 {
6947 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6948 	struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6949 	int ret;
6950 
6951 	if (vlan_id > ETH_VLAN_ID_MAX)
6952 		return;
6953 
6954 	i40e_store_vlan_filter(vsi, vlan_id, on);
6955 
6956 	if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6957 		return;
6958 
6959 	vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6960 
6961 	if (on) {
6962 		ret = i40e_aq_add_vlan(hw, vsi->seid,
6963 				       &vlan_data, 1, NULL);
6964 		if (ret != I40E_SUCCESS)
6965 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6966 	} else {
6967 		ret = i40e_aq_remove_vlan(hw, vsi->seid,
6968 					  &vlan_data, 1, NULL);
6969 		if (ret != I40E_SUCCESS)
6970 			PMD_DRV_LOG(ERR,
6971 				    "Failed to remove vlan filter");
6972 	}
6973 }
6974 
6975 /**
6976  * Find all vlan options for specific mac addr,
6977  * return with actual vlan found.
6978  */
6979 int
6980 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6981 			   struct i40e_macvlan_filter *mv_f,
6982 			   int num, struct ether_addr *addr)
6983 {
6984 	int i;
6985 	uint32_t j, k;
6986 
6987 	/**
6988 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
6989 	 * although the code looks complex.
6990 	  */
6991 	if (num < vsi->vlan_num)
6992 		return I40E_ERR_PARAM;
6993 
6994 	i = 0;
6995 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
6996 		if (vsi->vfta[j]) {
6997 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6998 				if (vsi->vfta[j] & (1 << k)) {
6999 					if (i > num - 1) {
7000 						PMD_DRV_LOG(ERR,
7001 							"vlan number doesn't match");
7002 						return I40E_ERR_PARAM;
7003 					}
7004 					rte_memcpy(&mv_f[i].macaddr,
7005 							addr, ETH_ADDR_LEN);
7006 					mv_f[i].vlan_id =
7007 						j * I40E_UINT32_BIT_SIZE + k;
7008 					i++;
7009 				}
7010 			}
7011 		}
7012 	}
7013 	return I40E_SUCCESS;
7014 }
7015 
7016 static inline int
7017 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
7018 			   struct i40e_macvlan_filter *mv_f,
7019 			   int num,
7020 			   uint16_t vlan)
7021 {
7022 	int i = 0;
7023 	struct i40e_mac_filter *f;
7024 
7025 	if (num < vsi->mac_num)
7026 		return I40E_ERR_PARAM;
7027 
7028 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
7029 		if (i > num - 1) {
7030 			PMD_DRV_LOG(ERR, "buffer number not match");
7031 			return I40E_ERR_PARAM;
7032 		}
7033 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7034 				ETH_ADDR_LEN);
7035 		mv_f[i].vlan_id = vlan;
7036 		mv_f[i].filter_type = f->mac_info.filter_type;
7037 		i++;
7038 	}
7039 
7040 	return I40E_SUCCESS;
7041 }
7042 
7043 static int
7044 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
7045 {
7046 	int i, j, num;
7047 	struct i40e_mac_filter *f;
7048 	struct i40e_macvlan_filter *mv_f;
7049 	int ret = I40E_SUCCESS;
7050 
7051 	if (vsi == NULL || vsi->mac_num == 0)
7052 		return I40E_ERR_PARAM;
7053 
7054 	/* Case that no vlan is set */
7055 	if (vsi->vlan_num == 0)
7056 		num = vsi->mac_num;
7057 	else
7058 		num = vsi->mac_num * vsi->vlan_num;
7059 
7060 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
7061 	if (mv_f == NULL) {
7062 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7063 		return I40E_ERR_NO_MEMORY;
7064 	}
7065 
7066 	i = 0;
7067 	if (vsi->vlan_num == 0) {
7068 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7069 			rte_memcpy(&mv_f[i].macaddr,
7070 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
7071 			mv_f[i].filter_type = f->mac_info.filter_type;
7072 			mv_f[i].vlan_id = 0;
7073 			i++;
7074 		}
7075 	} else {
7076 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
7077 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
7078 					vsi->vlan_num, &f->mac_info.mac_addr);
7079 			if (ret != I40E_SUCCESS)
7080 				goto DONE;
7081 			for (j = i; j < i + vsi->vlan_num; j++)
7082 				mv_f[j].filter_type = f->mac_info.filter_type;
7083 			i += vsi->vlan_num;
7084 		}
7085 	}
7086 
7087 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
7088 DONE:
7089 	rte_free(mv_f);
7090 
7091 	return ret;
7092 }
7093 
7094 int
7095 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7096 {
7097 	struct i40e_macvlan_filter *mv_f;
7098 	int mac_num;
7099 	int ret = I40E_SUCCESS;
7100 
7101 	if (!vsi || vlan > ETHER_MAX_VLAN_ID)
7102 		return I40E_ERR_PARAM;
7103 
7104 	/* If it's already set, just return */
7105 	if (i40e_find_vlan_filter(vsi,vlan))
7106 		return I40E_SUCCESS;
7107 
7108 	mac_num = vsi->mac_num;
7109 
7110 	if (mac_num == 0) {
7111 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7112 		return I40E_ERR_PARAM;
7113 	}
7114 
7115 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7116 
7117 	if (mv_f == NULL) {
7118 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7119 		return I40E_ERR_NO_MEMORY;
7120 	}
7121 
7122 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7123 
7124 	if (ret != I40E_SUCCESS)
7125 		goto DONE;
7126 
7127 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7128 
7129 	if (ret != I40E_SUCCESS)
7130 		goto DONE;
7131 
7132 	i40e_set_vlan_filter(vsi, vlan, 1);
7133 
7134 	vsi->vlan_num++;
7135 	ret = I40E_SUCCESS;
7136 DONE:
7137 	rte_free(mv_f);
7138 	return ret;
7139 }
7140 
7141 int
7142 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
7143 {
7144 	struct i40e_macvlan_filter *mv_f;
7145 	int mac_num;
7146 	int ret = I40E_SUCCESS;
7147 
7148 	/**
7149 	 * Vlan 0 is the generic filter for untagged packets
7150 	 * and can't be removed.
7151 	 */
7152 	if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
7153 		return I40E_ERR_PARAM;
7154 
7155 	/* If can't find it, just return */
7156 	if (!i40e_find_vlan_filter(vsi, vlan))
7157 		return I40E_ERR_PARAM;
7158 
7159 	mac_num = vsi->mac_num;
7160 
7161 	if (mac_num == 0) {
7162 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
7163 		return I40E_ERR_PARAM;
7164 	}
7165 
7166 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
7167 
7168 	if (mv_f == NULL) {
7169 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7170 		return I40E_ERR_NO_MEMORY;
7171 	}
7172 
7173 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
7174 
7175 	if (ret != I40E_SUCCESS)
7176 		goto DONE;
7177 
7178 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
7179 
7180 	if (ret != I40E_SUCCESS)
7181 		goto DONE;
7182 
7183 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
7184 	if (vsi->vlan_num == 1) {
7185 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
7186 		if (ret != I40E_SUCCESS)
7187 			goto DONE;
7188 
7189 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
7190 		if (ret != I40E_SUCCESS)
7191 			goto DONE;
7192 	}
7193 
7194 	i40e_set_vlan_filter(vsi, vlan, 0);
7195 
7196 	vsi->vlan_num--;
7197 	ret = I40E_SUCCESS;
7198 DONE:
7199 	rte_free(mv_f);
7200 	return ret;
7201 }
7202 
7203 int
7204 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7205 {
7206 	struct i40e_mac_filter *f;
7207 	struct i40e_macvlan_filter *mv_f;
7208 	int i, vlan_num = 0;
7209 	int ret = I40E_SUCCESS;
7210 
7211 	/* If it's add and we've config it, return */
7212 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7213 	if (f != NULL)
7214 		return I40E_SUCCESS;
7215 	if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7216 		(mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7217 
7218 		/**
7219 		 * If vlan_num is 0, that's the first time to add mac,
7220 		 * set mask for vlan_id 0.
7221 		 */
7222 		if (vsi->vlan_num == 0) {
7223 			i40e_set_vlan_filter(vsi, 0, 1);
7224 			vsi->vlan_num = 1;
7225 		}
7226 		vlan_num = vsi->vlan_num;
7227 	} else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7228 			(mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7229 		vlan_num = 1;
7230 
7231 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7232 	if (mv_f == NULL) {
7233 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7234 		return I40E_ERR_NO_MEMORY;
7235 	}
7236 
7237 	for (i = 0; i < vlan_num; i++) {
7238 		mv_f[i].filter_type = mac_filter->filter_type;
7239 		rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7240 				ETH_ADDR_LEN);
7241 	}
7242 
7243 	if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7244 		mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7245 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7246 					&mac_filter->mac_addr);
7247 		if (ret != I40E_SUCCESS)
7248 			goto DONE;
7249 	}
7250 
7251 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7252 	if (ret != I40E_SUCCESS)
7253 		goto DONE;
7254 
7255 	/* Add the mac addr into mac list */
7256 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7257 	if (f == NULL) {
7258 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7259 		ret = I40E_ERR_NO_MEMORY;
7260 		goto DONE;
7261 	}
7262 	rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7263 			ETH_ADDR_LEN);
7264 	f->mac_info.filter_type = mac_filter->filter_type;
7265 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7266 	vsi->mac_num++;
7267 
7268 	ret = I40E_SUCCESS;
7269 DONE:
7270 	rte_free(mv_f);
7271 
7272 	return ret;
7273 }
7274 
7275 int
7276 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
7277 {
7278 	struct i40e_mac_filter *f;
7279 	struct i40e_macvlan_filter *mv_f;
7280 	int i, vlan_num;
7281 	enum rte_mac_filter_type filter_type;
7282 	int ret = I40E_SUCCESS;
7283 
7284 	/* Can't find it, return an error */
7285 	f = i40e_find_mac_filter(vsi, addr);
7286 	if (f == NULL)
7287 		return I40E_ERR_PARAM;
7288 
7289 	vlan_num = vsi->vlan_num;
7290 	filter_type = f->mac_info.filter_type;
7291 	if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7292 		filter_type == RTE_MACVLAN_HASH_MATCH) {
7293 		if (vlan_num == 0) {
7294 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7295 			return I40E_ERR_PARAM;
7296 		}
7297 	} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7298 			filter_type == RTE_MAC_HASH_MATCH)
7299 		vlan_num = 1;
7300 
7301 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7302 	if (mv_f == NULL) {
7303 		PMD_DRV_LOG(ERR, "failed to allocate memory");
7304 		return I40E_ERR_NO_MEMORY;
7305 	}
7306 
7307 	for (i = 0; i < vlan_num; i++) {
7308 		mv_f[i].filter_type = filter_type;
7309 		rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7310 				ETH_ADDR_LEN);
7311 	}
7312 	if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7313 			filter_type == RTE_MACVLAN_HASH_MATCH) {
7314 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7315 		if (ret != I40E_SUCCESS)
7316 			goto DONE;
7317 	}
7318 
7319 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7320 	if (ret != I40E_SUCCESS)
7321 		goto DONE;
7322 
7323 	/* Remove the mac addr into mac list */
7324 	TAILQ_REMOVE(&vsi->mac_list, f, next);
7325 	rte_free(f);
7326 	vsi->mac_num--;
7327 
7328 	ret = I40E_SUCCESS;
7329 DONE:
7330 	rte_free(mv_f);
7331 	return ret;
7332 }
7333 
7334 /* Configure hash enable flags for RSS */
7335 uint64_t
7336 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7337 {
7338 	uint64_t hena = 0;
7339 	int i;
7340 
7341 	if (!flags)
7342 		return hena;
7343 
7344 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7345 		if (flags & (1ULL << i))
7346 			hena |= adapter->pctypes_tbl[i];
7347 	}
7348 
7349 	return hena;
7350 }
7351 
7352 /* Parse the hash enable flags */
7353 uint64_t
7354 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7355 {
7356 	uint64_t rss_hf = 0;
7357 
7358 	if (!flags)
7359 		return rss_hf;
7360 	int i;
7361 
7362 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7363 		if (flags & adapter->pctypes_tbl[i])
7364 			rss_hf |= (1ULL << i);
7365 	}
7366 	return rss_hf;
7367 }
7368 
7369 /* Disable RSS */
7370 static void
7371 i40e_pf_disable_rss(struct i40e_pf *pf)
7372 {
7373 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7374 
7375 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7376 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7377 	I40E_WRITE_FLUSH(hw);
7378 }
7379 
7380 int
7381 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7382 {
7383 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7384 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7385 	uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7386 			   I40E_VFQF_HKEY_MAX_INDEX :
7387 			   I40E_PFQF_HKEY_MAX_INDEX;
7388 	int ret = 0;
7389 
7390 	if (!key || key_len == 0) {
7391 		PMD_DRV_LOG(DEBUG, "No key to be configured");
7392 		return 0;
7393 	} else if (key_len != (key_idx + 1) *
7394 		sizeof(uint32_t)) {
7395 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7396 		return -EINVAL;
7397 	}
7398 
7399 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7400 		struct i40e_aqc_get_set_rss_key_data *key_dw =
7401 			(struct i40e_aqc_get_set_rss_key_data *)key;
7402 
7403 		ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7404 		if (ret)
7405 			PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7406 	} else {
7407 		uint32_t *hash_key = (uint32_t *)key;
7408 		uint16_t i;
7409 
7410 		if (vsi->type == I40E_VSI_SRIOV) {
7411 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7412 				I40E_WRITE_REG(
7413 					hw,
7414 					I40E_VFQF_HKEY1(i, vsi->user_param),
7415 					hash_key[i]);
7416 
7417 		} else {
7418 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7419 				I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7420 					       hash_key[i]);
7421 		}
7422 		I40E_WRITE_FLUSH(hw);
7423 	}
7424 
7425 	return ret;
7426 }
7427 
7428 static int
7429 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7430 {
7431 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7432 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7433 	uint32_t reg;
7434 	int ret;
7435 
7436 	if (!key || !key_len)
7437 		return 0;
7438 
7439 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7440 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7441 			(struct i40e_aqc_get_set_rss_key_data *)key);
7442 		if (ret) {
7443 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7444 			return ret;
7445 		}
7446 	} else {
7447 		uint32_t *key_dw = (uint32_t *)key;
7448 		uint16_t i;
7449 
7450 		if (vsi->type == I40E_VSI_SRIOV) {
7451 			for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7452 				reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7453 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7454 			}
7455 			*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7456 				   sizeof(uint32_t);
7457 		} else {
7458 			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7459 				reg = I40E_PFQF_HKEY(i);
7460 				key_dw[i] = i40e_read_rx_ctl(hw, reg);
7461 			}
7462 			*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7463 				   sizeof(uint32_t);
7464 		}
7465 	}
7466 	return 0;
7467 }
7468 
7469 static int
7470 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7471 {
7472 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7473 	uint64_t hena;
7474 	int ret;
7475 
7476 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7477 			       rss_conf->rss_key_len);
7478 	if (ret)
7479 		return ret;
7480 
7481 	hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7482 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7483 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7484 	I40E_WRITE_FLUSH(hw);
7485 
7486 	return 0;
7487 }
7488 
7489 static int
7490 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7491 			 struct rte_eth_rss_conf *rss_conf)
7492 {
7493 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7494 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7495 	uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7496 	uint64_t hena;
7497 
7498 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7499 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7500 
7501 	if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7502 		if (rss_hf != 0) /* Enable RSS */
7503 			return -EINVAL;
7504 		return 0; /* Nothing to do */
7505 	}
7506 	/* RSS enabled */
7507 	if (rss_hf == 0) /* Disable RSS */
7508 		return -EINVAL;
7509 
7510 	return i40e_hw_rss_hash_set(pf, rss_conf);
7511 }
7512 
7513 static int
7514 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7515 			   struct rte_eth_rss_conf *rss_conf)
7516 {
7517 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7518 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7519 	uint64_t hena;
7520 	int ret;
7521 
7522 	if (!rss_conf)
7523 		return -EINVAL;
7524 
7525 	ret = i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7526 			 &rss_conf->rss_key_len);
7527 	if (ret)
7528 		return ret;
7529 
7530 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7531 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7532 	rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7533 
7534 	return 0;
7535 }
7536 
7537 static int
7538 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7539 {
7540 	switch (filter_type) {
7541 	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7542 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7543 		break;
7544 	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7545 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7546 		break;
7547 	case RTE_TUNNEL_FILTER_IMAC_TENID:
7548 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7549 		break;
7550 	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7551 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7552 		break;
7553 	case ETH_TUNNEL_FILTER_IMAC:
7554 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7555 		break;
7556 	case ETH_TUNNEL_FILTER_OIP:
7557 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7558 		break;
7559 	case ETH_TUNNEL_FILTER_IIP:
7560 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7561 		break;
7562 	default:
7563 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7564 		return -EINVAL;
7565 	}
7566 
7567 	return 0;
7568 }
7569 
7570 /* Convert tunnel filter structure */
7571 static int
7572 i40e_tunnel_filter_convert(
7573 	struct i40e_aqc_cloud_filters_element_bb *cld_filter,
7574 	struct i40e_tunnel_filter *tunnel_filter)
7575 {
7576 	ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7577 			(struct ether_addr *)&tunnel_filter->input.outer_mac);
7578 	ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7579 			(struct ether_addr *)&tunnel_filter->input.inner_mac);
7580 	tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7581 	if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7582 	     I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7583 	    I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7584 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7585 	else
7586 		tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7587 	tunnel_filter->input.flags = cld_filter->element.flags;
7588 	tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7589 	tunnel_filter->queue = cld_filter->element.queue_number;
7590 	rte_memcpy(tunnel_filter->input.general_fields,
7591 		   cld_filter->general_fields,
7592 		   sizeof(cld_filter->general_fields));
7593 
7594 	return 0;
7595 }
7596 
7597 /* Check if there exists the tunnel filter */
7598 struct i40e_tunnel_filter *
7599 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7600 			     const struct i40e_tunnel_filter_input *input)
7601 {
7602 	int ret;
7603 
7604 	ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7605 	if (ret < 0)
7606 		return NULL;
7607 
7608 	return tunnel_rule->hash_map[ret];
7609 }
7610 
7611 /* Add a tunnel filter into the SW list */
7612 static int
7613 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7614 			     struct i40e_tunnel_filter *tunnel_filter)
7615 {
7616 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7617 	int ret;
7618 
7619 	ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7620 	if (ret < 0) {
7621 		PMD_DRV_LOG(ERR,
7622 			    "Failed to insert tunnel filter to hash table %d!",
7623 			    ret);
7624 		return ret;
7625 	}
7626 	rule->hash_map[ret] = tunnel_filter;
7627 
7628 	TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7629 
7630 	return 0;
7631 }
7632 
7633 /* Delete a tunnel filter from the SW list */
7634 int
7635 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7636 			  struct i40e_tunnel_filter_input *input)
7637 {
7638 	struct i40e_tunnel_rule *rule = &pf->tunnel;
7639 	struct i40e_tunnel_filter *tunnel_filter;
7640 	int ret;
7641 
7642 	ret = rte_hash_del_key(rule->hash_table, input);
7643 	if (ret < 0) {
7644 		PMD_DRV_LOG(ERR,
7645 			    "Failed to delete tunnel filter to hash table %d!",
7646 			    ret);
7647 		return ret;
7648 	}
7649 	tunnel_filter = rule->hash_map[ret];
7650 	rule->hash_map[ret] = NULL;
7651 
7652 	TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7653 	rte_free(tunnel_filter);
7654 
7655 	return 0;
7656 }
7657 
7658 int
7659 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7660 			struct rte_eth_tunnel_filter_conf *tunnel_filter,
7661 			uint8_t add)
7662 {
7663 	uint16_t ip_type;
7664 	uint32_t ipv4_addr, ipv4_addr_le;
7665 	uint8_t i, tun_type = 0;
7666 	/* internal varialbe to convert ipv6 byte order */
7667 	uint32_t convert_ipv6[4];
7668 	int val, ret = 0;
7669 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7670 	struct i40e_vsi *vsi = pf->main_vsi;
7671 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
7672 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
7673 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7674 	struct i40e_tunnel_filter *tunnel, *node;
7675 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7676 
7677 	cld_filter = rte_zmalloc("tunnel_filter",
7678 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7679 	0);
7680 
7681 	if (NULL == cld_filter) {
7682 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7683 		return -ENOMEM;
7684 	}
7685 	pfilter = cld_filter;
7686 
7687 	ether_addr_copy(&tunnel_filter->outer_mac,
7688 			(struct ether_addr *)&pfilter->element.outer_mac);
7689 	ether_addr_copy(&tunnel_filter->inner_mac,
7690 			(struct ether_addr *)&pfilter->element.inner_mac);
7691 
7692 	pfilter->element.inner_vlan =
7693 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7694 	if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7695 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7696 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7697 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7698 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
7699 				&ipv4_addr_le,
7700 				sizeof(pfilter->element.ipaddr.v4.data));
7701 	} else {
7702 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7703 		for (i = 0; i < 4; i++) {
7704 			convert_ipv6[i] =
7705 			rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7706 		}
7707 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
7708 			   &convert_ipv6,
7709 			   sizeof(pfilter->element.ipaddr.v6.data));
7710 	}
7711 
7712 	/* check tunneled type */
7713 	switch (tunnel_filter->tunnel_type) {
7714 	case RTE_TUNNEL_TYPE_VXLAN:
7715 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7716 		break;
7717 	case RTE_TUNNEL_TYPE_NVGRE:
7718 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7719 		break;
7720 	case RTE_TUNNEL_TYPE_IP_IN_GRE:
7721 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7722 		break;
7723 	default:
7724 		/* Other tunnel types is not supported. */
7725 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7726 		rte_free(cld_filter);
7727 		return -EINVAL;
7728 	}
7729 
7730 	val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7731 				       &pfilter->element.flags);
7732 	if (val < 0) {
7733 		rte_free(cld_filter);
7734 		return -EINVAL;
7735 	}
7736 
7737 	pfilter->element.flags |= rte_cpu_to_le_16(
7738 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7739 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7740 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7741 	pfilter->element.queue_number =
7742 		rte_cpu_to_le_16(tunnel_filter->queue_id);
7743 
7744 	/* Check if there is the filter in SW list */
7745 	memset(&check_filter, 0, sizeof(check_filter));
7746 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
7747 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7748 	if (add && node) {
7749 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7750 		rte_free(cld_filter);
7751 		return -EINVAL;
7752 	}
7753 
7754 	if (!add && !node) {
7755 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7756 		rte_free(cld_filter);
7757 		return -EINVAL;
7758 	}
7759 
7760 	if (add) {
7761 		ret = i40e_aq_add_cloud_filters(hw,
7762 					vsi->seid, &cld_filter->element, 1);
7763 		if (ret < 0) {
7764 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7765 			rte_free(cld_filter);
7766 			return -ENOTSUP;
7767 		}
7768 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7769 		if (tunnel == NULL) {
7770 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7771 			rte_free(cld_filter);
7772 			return -ENOMEM;
7773 		}
7774 
7775 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7776 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7777 		if (ret < 0)
7778 			rte_free(tunnel);
7779 	} else {
7780 		ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
7781 						   &cld_filter->element, 1);
7782 		if (ret < 0) {
7783 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7784 			rte_free(cld_filter);
7785 			return -ENOTSUP;
7786 		}
7787 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7788 	}
7789 
7790 	rte_free(cld_filter);
7791 	return ret;
7792 }
7793 
7794 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7795 #define I40E_TR_VXLAN_GRE_KEY_MASK		0x4
7796 #define I40E_TR_GENEVE_KEY_MASK			0x8
7797 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK		0x40
7798 #define I40E_TR_GRE_KEY_MASK			0x400
7799 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
7800 #define I40E_TR_GRE_NO_KEY_MASK			0x8000
7801 
7802 static enum
7803 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7804 {
7805 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7806 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7807 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7808 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7809 	enum i40e_status_code status = I40E_SUCCESS;
7810 
7811 	if (pf->support_multi_driver) {
7812 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7813 		return I40E_NOT_SUPPORTED;
7814 	}
7815 
7816 	memset(&filter_replace, 0,
7817 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7818 	memset(&filter_replace_buf, 0,
7819 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7820 
7821 	/* create L1 filter */
7822 	filter_replace.old_filter_type =
7823 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7824 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7825 	filter_replace.tr_bit = 0;
7826 
7827 	/* Prepare the buffer, 3 entries */
7828 	filter_replace_buf.data[0] =
7829 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7830 	filter_replace_buf.data[0] |=
7831 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7832 	filter_replace_buf.data[2] = 0xFF;
7833 	filter_replace_buf.data[3] = 0xFF;
7834 	filter_replace_buf.data[4] =
7835 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7836 	filter_replace_buf.data[4] |=
7837 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7838 	filter_replace_buf.data[7] = 0xF0;
7839 	filter_replace_buf.data[8]
7840 		= I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7841 	filter_replace_buf.data[8] |=
7842 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7843 	filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7844 		I40E_TR_GENEVE_KEY_MASK |
7845 		I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7846 	filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7847 		I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7848 		I40E_TR_GRE_NO_KEY_MASK) >> 8;
7849 
7850 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7851 					       &filter_replace_buf);
7852 	if (!status && (filter_replace.old_filter_type !=
7853 			filter_replace.new_filter_type))
7854 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7855 			    " original: 0x%x, new: 0x%x",
7856 			    dev->device->name,
7857 			    filter_replace.old_filter_type,
7858 			    filter_replace.new_filter_type);
7859 
7860 	return status;
7861 }
7862 
7863 static enum
7864 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7865 {
7866 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7867 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7868 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7869 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7870 	enum i40e_status_code status = I40E_SUCCESS;
7871 
7872 	if (pf->support_multi_driver) {
7873 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7874 		return I40E_NOT_SUPPORTED;
7875 	}
7876 
7877 	/* For MPLSoUDP */
7878 	memset(&filter_replace, 0,
7879 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7880 	memset(&filter_replace_buf, 0,
7881 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7882 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7883 		I40E_AQC_MIRROR_CLOUD_FILTER;
7884 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7885 	filter_replace.new_filter_type =
7886 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
7887 	/* Prepare the buffer, 2 entries */
7888 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7889 	filter_replace_buf.data[0] |=
7890 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7891 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7892 	filter_replace_buf.data[4] |=
7893 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7894 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7895 					       &filter_replace_buf);
7896 	if (status < 0)
7897 		return status;
7898 	if (filter_replace.old_filter_type !=
7899 	    filter_replace.new_filter_type)
7900 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7901 			    " original: 0x%x, new: 0x%x",
7902 			    dev->device->name,
7903 			    filter_replace.old_filter_type,
7904 			    filter_replace.new_filter_type);
7905 
7906 	/* For MPLSoGRE */
7907 	memset(&filter_replace, 0,
7908 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7909 	memset(&filter_replace_buf, 0,
7910 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7911 
7912 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7913 		I40E_AQC_MIRROR_CLOUD_FILTER;
7914 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7915 	filter_replace.new_filter_type =
7916 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
7917 	/* Prepare the buffer, 2 entries */
7918 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7919 	filter_replace_buf.data[0] |=
7920 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7921 	filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7922 	filter_replace_buf.data[4] |=
7923 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7924 
7925 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7926 					       &filter_replace_buf);
7927 	if (!status && (filter_replace.old_filter_type !=
7928 			filter_replace.new_filter_type))
7929 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7930 			    " original: 0x%x, new: 0x%x",
7931 			    dev->device->name,
7932 			    filter_replace.old_filter_type,
7933 			    filter_replace.new_filter_type);
7934 
7935 	return status;
7936 }
7937 
7938 static enum i40e_status_code
7939 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7940 {
7941 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
7942 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
7943 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7944 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7945 	enum i40e_status_code status = I40E_SUCCESS;
7946 
7947 	if (pf->support_multi_driver) {
7948 		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7949 		return I40E_NOT_SUPPORTED;
7950 	}
7951 
7952 	/* For GTP-C */
7953 	memset(&filter_replace, 0,
7954 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7955 	memset(&filter_replace_buf, 0,
7956 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7957 	/* create L1 filter */
7958 	filter_replace.old_filter_type =
7959 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7960 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7961 	filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7962 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7963 	/* Prepare the buffer, 2 entries */
7964 	filter_replace_buf.data[0] =
7965 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7966 	filter_replace_buf.data[0] |=
7967 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7968 	filter_replace_buf.data[2] = 0xFF;
7969 	filter_replace_buf.data[3] = 0xFF;
7970 	filter_replace_buf.data[4] =
7971 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7972 	filter_replace_buf.data[4] |=
7973 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7974 	filter_replace_buf.data[6] = 0xFF;
7975 	filter_replace_buf.data[7] = 0xFF;
7976 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7977 					       &filter_replace_buf);
7978 	if (status < 0)
7979 		return status;
7980 	if (filter_replace.old_filter_type !=
7981 	    filter_replace.new_filter_type)
7982 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7983 			    " original: 0x%x, new: 0x%x",
7984 			    dev->device->name,
7985 			    filter_replace.old_filter_type,
7986 			    filter_replace.new_filter_type);
7987 
7988 	/* for GTP-U */
7989 	memset(&filter_replace, 0,
7990 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7991 	memset(&filter_replace_buf, 0,
7992 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7993 	/* create L1 filter */
7994 	filter_replace.old_filter_type =
7995 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7996 	filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7997 	filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7998 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7999 	/* Prepare the buffer, 2 entries */
8000 	filter_replace_buf.data[0] =
8001 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
8002 	filter_replace_buf.data[0] |=
8003 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8004 	filter_replace_buf.data[2] = 0xFF;
8005 	filter_replace_buf.data[3] = 0xFF;
8006 	filter_replace_buf.data[4] =
8007 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
8008 	filter_replace_buf.data[4] |=
8009 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8010 	filter_replace_buf.data[6] = 0xFF;
8011 	filter_replace_buf.data[7] = 0xFF;
8012 
8013 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8014 					       &filter_replace_buf);
8015 	if (!status && (filter_replace.old_filter_type !=
8016 			filter_replace.new_filter_type))
8017 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
8018 			    " original: 0x%x, new: 0x%x",
8019 			    dev->device->name,
8020 			    filter_replace.old_filter_type,
8021 			    filter_replace.new_filter_type);
8022 
8023 	return status;
8024 }
8025 
8026 static enum
8027 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
8028 {
8029 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
8030 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
8031 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8032 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
8033 	enum i40e_status_code status = I40E_SUCCESS;
8034 
8035 	if (pf->support_multi_driver) {
8036 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
8037 		return I40E_NOT_SUPPORTED;
8038 	}
8039 
8040 	/* for GTP-C */
8041 	memset(&filter_replace, 0,
8042 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8043 	memset(&filter_replace_buf, 0,
8044 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8045 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8046 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
8047 	filter_replace.new_filter_type =
8048 		I40E_AQC_ADD_CLOUD_FILTER_0X11;
8049 	/* Prepare the buffer, 2 entries */
8050 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
8051 	filter_replace_buf.data[0] |=
8052 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8053 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8054 	filter_replace_buf.data[4] |=
8055 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8056 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8057 					       &filter_replace_buf);
8058 	if (status < 0)
8059 		return status;
8060 	if (filter_replace.old_filter_type !=
8061 	    filter_replace.new_filter_type)
8062 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8063 			    " original: 0x%x, new: 0x%x",
8064 			    dev->device->name,
8065 			    filter_replace.old_filter_type,
8066 			    filter_replace.new_filter_type);
8067 
8068 	/* for GTP-U */
8069 	memset(&filter_replace, 0,
8070 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
8071 	memset(&filter_replace_buf, 0,
8072 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
8073 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
8074 	filter_replace.old_filter_type =
8075 		I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
8076 	filter_replace.new_filter_type =
8077 		I40E_AQC_ADD_CLOUD_FILTER_0X12;
8078 	/* Prepare the buffer, 2 entries */
8079 	filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
8080 	filter_replace_buf.data[0] |=
8081 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8082 	filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
8083 	filter_replace_buf.data[4] |=
8084 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
8085 
8086 	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
8087 					       &filter_replace_buf);
8088 	if (!status && (filter_replace.old_filter_type !=
8089 			filter_replace.new_filter_type))
8090 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
8091 			    " original: 0x%x, new: 0x%x",
8092 			    dev->device->name,
8093 			    filter_replace.old_filter_type,
8094 			    filter_replace.new_filter_type);
8095 
8096 	return status;
8097 }
8098 
8099 int
8100 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
8101 		      struct i40e_tunnel_filter_conf *tunnel_filter,
8102 		      uint8_t add)
8103 {
8104 	uint16_t ip_type;
8105 	uint32_t ipv4_addr, ipv4_addr_le;
8106 	uint8_t i, tun_type = 0;
8107 	/* internal variable to convert ipv6 byte order */
8108 	uint32_t convert_ipv6[4];
8109 	int val, ret = 0;
8110 	struct i40e_pf_vf *vf = NULL;
8111 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8112 	struct i40e_vsi *vsi;
8113 	struct i40e_aqc_cloud_filters_element_bb *cld_filter;
8114 	struct i40e_aqc_cloud_filters_element_bb *pfilter;
8115 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
8116 	struct i40e_tunnel_filter *tunnel, *node;
8117 	struct i40e_tunnel_filter check_filter; /* Check if filter exists */
8118 	uint32_t teid_le;
8119 	bool big_buffer = 0;
8120 
8121 	cld_filter = rte_zmalloc("tunnel_filter",
8122 			 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
8123 			 0);
8124 
8125 	if (cld_filter == NULL) {
8126 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8127 		return -ENOMEM;
8128 	}
8129 	pfilter = cld_filter;
8130 
8131 	ether_addr_copy(&tunnel_filter->outer_mac,
8132 			(struct ether_addr *)&pfilter->element.outer_mac);
8133 	ether_addr_copy(&tunnel_filter->inner_mac,
8134 			(struct ether_addr *)&pfilter->element.inner_mac);
8135 
8136 	pfilter->element.inner_vlan =
8137 		rte_cpu_to_le_16(tunnel_filter->inner_vlan);
8138 	if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
8139 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
8140 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
8141 		ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
8142 		rte_memcpy(&pfilter->element.ipaddr.v4.data,
8143 				&ipv4_addr_le,
8144 				sizeof(pfilter->element.ipaddr.v4.data));
8145 	} else {
8146 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
8147 		for (i = 0; i < 4; i++) {
8148 			convert_ipv6[i] =
8149 			rte_cpu_to_le_32(rte_be_to_cpu_32(
8150 					 tunnel_filter->ip_addr.ipv6_addr[i]));
8151 		}
8152 		rte_memcpy(&pfilter->element.ipaddr.v6.data,
8153 			   &convert_ipv6,
8154 			   sizeof(pfilter->element.ipaddr.v6.data));
8155 	}
8156 
8157 	/* check tunneled type */
8158 	switch (tunnel_filter->tunnel_type) {
8159 	case I40E_TUNNEL_TYPE_VXLAN:
8160 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
8161 		break;
8162 	case I40E_TUNNEL_TYPE_NVGRE:
8163 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
8164 		break;
8165 	case I40E_TUNNEL_TYPE_IP_IN_GRE:
8166 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
8167 		break;
8168 	case I40E_TUNNEL_TYPE_MPLSoUDP:
8169 		if (!pf->mpls_replace_flag) {
8170 			i40e_replace_mpls_l1_filter(pf);
8171 			i40e_replace_mpls_cloud_filter(pf);
8172 			pf->mpls_replace_flag = 1;
8173 		}
8174 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8175 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8176 			teid_le >> 4;
8177 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8178 			(teid_le & 0xF) << 12;
8179 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8180 			0x40;
8181 		big_buffer = 1;
8182 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
8183 		break;
8184 	case I40E_TUNNEL_TYPE_MPLSoGRE:
8185 		if (!pf->mpls_replace_flag) {
8186 			i40e_replace_mpls_l1_filter(pf);
8187 			i40e_replace_mpls_cloud_filter(pf);
8188 			pf->mpls_replace_flag = 1;
8189 		}
8190 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8191 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
8192 			teid_le >> 4;
8193 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
8194 			(teid_le & 0xF) << 12;
8195 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
8196 			0x0;
8197 		big_buffer = 1;
8198 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
8199 		break;
8200 	case I40E_TUNNEL_TYPE_GTPC:
8201 		if (!pf->gtp_replace_flag) {
8202 			i40e_replace_gtp_l1_filter(pf);
8203 			i40e_replace_gtp_cloud_filter(pf);
8204 			pf->gtp_replace_flag = 1;
8205 		}
8206 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8207 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8208 			(teid_le >> 16) & 0xFFFF;
8209 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8210 			teid_le & 0xFFFF;
8211 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8212 			0x0;
8213 		big_buffer = 1;
8214 		break;
8215 	case I40E_TUNNEL_TYPE_GTPU:
8216 		if (!pf->gtp_replace_flag) {
8217 			i40e_replace_gtp_l1_filter(pf);
8218 			i40e_replace_gtp_cloud_filter(pf);
8219 			pf->gtp_replace_flag = 1;
8220 		}
8221 		teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8222 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8223 			(teid_le >> 16) & 0xFFFF;
8224 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8225 			teid_le & 0xFFFF;
8226 		pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8227 			0x0;
8228 		big_buffer = 1;
8229 		break;
8230 	case I40E_TUNNEL_TYPE_QINQ:
8231 		if (!pf->qinq_replace_flag) {
8232 			ret = i40e_cloud_filter_qinq_create(pf);
8233 			if (ret < 0)
8234 				PMD_DRV_LOG(DEBUG,
8235 					    "QinQ tunnel filter already created.");
8236 			pf->qinq_replace_flag = 1;
8237 		}
8238 		/*	Add in the General fields the values of
8239 		 *	the Outer and Inner VLAN
8240 		 *	Big Buffer should be set, see changes in
8241 		 *	i40e_aq_add_cloud_filters
8242 		 */
8243 		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8244 		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8245 		big_buffer = 1;
8246 		break;
8247 	default:
8248 		/* Other tunnel types is not supported. */
8249 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8250 		rte_free(cld_filter);
8251 		return -EINVAL;
8252 	}
8253 
8254 	if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8255 		pfilter->element.flags =
8256 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8257 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8258 		pfilter->element.flags =
8259 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8260 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8261 		pfilter->element.flags =
8262 			I40E_AQC_ADD_CLOUD_FILTER_0X11;
8263 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8264 		pfilter->element.flags =
8265 			I40E_AQC_ADD_CLOUD_FILTER_0X12;
8266 	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8267 		pfilter->element.flags |=
8268 			I40E_AQC_ADD_CLOUD_FILTER_0X10;
8269 	else {
8270 		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8271 						&pfilter->element.flags);
8272 		if (val < 0) {
8273 			rte_free(cld_filter);
8274 			return -EINVAL;
8275 		}
8276 	}
8277 
8278 	pfilter->element.flags |= rte_cpu_to_le_16(
8279 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8280 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8281 	pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8282 	pfilter->element.queue_number =
8283 		rte_cpu_to_le_16(tunnel_filter->queue_id);
8284 
8285 	if (!tunnel_filter->is_to_vf)
8286 		vsi = pf->main_vsi;
8287 	else {
8288 		if (tunnel_filter->vf_id >= pf->vf_num) {
8289 			PMD_DRV_LOG(ERR, "Invalid argument.");
8290 			rte_free(cld_filter);
8291 			return -EINVAL;
8292 		}
8293 		vf = &pf->vfs[tunnel_filter->vf_id];
8294 		vsi = vf->vsi;
8295 	}
8296 
8297 	/* Check if there is the filter in SW list */
8298 	memset(&check_filter, 0, sizeof(check_filter));
8299 	i40e_tunnel_filter_convert(cld_filter, &check_filter);
8300 	check_filter.is_to_vf = tunnel_filter->is_to_vf;
8301 	check_filter.vf_id = tunnel_filter->vf_id;
8302 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8303 	if (add && node) {
8304 		PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8305 		rte_free(cld_filter);
8306 		return -EINVAL;
8307 	}
8308 
8309 	if (!add && !node) {
8310 		PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8311 		rte_free(cld_filter);
8312 		return -EINVAL;
8313 	}
8314 
8315 	if (add) {
8316 		if (big_buffer)
8317 			ret = i40e_aq_add_cloud_filters_bb(hw,
8318 						   vsi->seid, cld_filter, 1);
8319 		else
8320 			ret = i40e_aq_add_cloud_filters(hw,
8321 					vsi->seid, &cld_filter->element, 1);
8322 		if (ret < 0) {
8323 			PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8324 			rte_free(cld_filter);
8325 			return -ENOTSUP;
8326 		}
8327 		tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8328 		if (tunnel == NULL) {
8329 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8330 			rte_free(cld_filter);
8331 			return -ENOMEM;
8332 		}
8333 
8334 		rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8335 		ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8336 		if (ret < 0)
8337 			rte_free(tunnel);
8338 	} else {
8339 		if (big_buffer)
8340 			ret = i40e_aq_rem_cloud_filters_bb(
8341 				hw, vsi->seid, cld_filter, 1);
8342 		else
8343 			ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
8344 						&cld_filter->element, 1);
8345 		if (ret < 0) {
8346 			PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8347 			rte_free(cld_filter);
8348 			return -ENOTSUP;
8349 		}
8350 		ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8351 	}
8352 
8353 	rte_free(cld_filter);
8354 	return ret;
8355 }
8356 
8357 static int
8358 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8359 {
8360 	uint8_t i;
8361 
8362 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8363 		if (pf->vxlan_ports[i] == port)
8364 			return i;
8365 	}
8366 
8367 	return -1;
8368 }
8369 
8370 static int
8371 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
8372 {
8373 	int  idx, ret;
8374 	uint8_t filter_idx;
8375 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8376 
8377 	idx = i40e_get_vxlan_port_idx(pf, port);
8378 
8379 	/* Check if port already exists */
8380 	if (idx >= 0) {
8381 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8382 		return -EINVAL;
8383 	}
8384 
8385 	/* Now check if there is space to add the new port */
8386 	idx = i40e_get_vxlan_port_idx(pf, 0);
8387 	if (idx < 0) {
8388 		PMD_DRV_LOG(ERR,
8389 			"Maximum number of UDP ports reached, not adding port %d",
8390 			port);
8391 		return -ENOSPC;
8392 	}
8393 
8394 	ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
8395 					&filter_idx, NULL);
8396 	if (ret < 0) {
8397 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8398 		return -1;
8399 	}
8400 
8401 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8402 			 port,  filter_idx);
8403 
8404 	/* New port: add it and mark its index in the bitmap */
8405 	pf->vxlan_ports[idx] = port;
8406 	pf->vxlan_bitmap |= (1 << idx);
8407 
8408 	if (!(pf->flags & I40E_FLAG_VXLAN))
8409 		pf->flags |= I40E_FLAG_VXLAN;
8410 
8411 	return 0;
8412 }
8413 
8414 static int
8415 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8416 {
8417 	int idx;
8418 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8419 
8420 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
8421 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8422 		return -EINVAL;
8423 	}
8424 
8425 	idx = i40e_get_vxlan_port_idx(pf, port);
8426 
8427 	if (idx < 0) {
8428 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8429 		return -EINVAL;
8430 	}
8431 
8432 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8433 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8434 		return -1;
8435 	}
8436 
8437 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8438 			port, idx);
8439 
8440 	pf->vxlan_ports[idx] = 0;
8441 	pf->vxlan_bitmap &= ~(1 << idx);
8442 
8443 	if (!pf->vxlan_bitmap)
8444 		pf->flags &= ~I40E_FLAG_VXLAN;
8445 
8446 	return 0;
8447 }
8448 
8449 /* Add UDP tunneling port */
8450 static int
8451 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8452 			     struct rte_eth_udp_tunnel *udp_tunnel)
8453 {
8454 	int ret = 0;
8455 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8456 
8457 	if (udp_tunnel == NULL)
8458 		return -EINVAL;
8459 
8460 	switch (udp_tunnel->prot_type) {
8461 	case RTE_TUNNEL_TYPE_VXLAN:
8462 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8463 		break;
8464 
8465 	case RTE_TUNNEL_TYPE_GENEVE:
8466 	case RTE_TUNNEL_TYPE_TEREDO:
8467 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8468 		ret = -1;
8469 		break;
8470 
8471 	default:
8472 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8473 		ret = -1;
8474 		break;
8475 	}
8476 
8477 	return ret;
8478 }
8479 
8480 /* Remove UDP tunneling port */
8481 static int
8482 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8483 			     struct rte_eth_udp_tunnel *udp_tunnel)
8484 {
8485 	int ret = 0;
8486 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8487 
8488 	if (udp_tunnel == NULL)
8489 		return -EINVAL;
8490 
8491 	switch (udp_tunnel->prot_type) {
8492 	case RTE_TUNNEL_TYPE_VXLAN:
8493 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8494 		break;
8495 	case RTE_TUNNEL_TYPE_GENEVE:
8496 	case RTE_TUNNEL_TYPE_TEREDO:
8497 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8498 		ret = -1;
8499 		break;
8500 	default:
8501 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
8502 		ret = -1;
8503 		break;
8504 	}
8505 
8506 	return ret;
8507 }
8508 
8509 /* Calculate the maximum number of contiguous PF queues that are configured */
8510 static int
8511 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8512 {
8513 	struct rte_eth_dev_data *data = pf->dev_data;
8514 	int i, num;
8515 	struct i40e_rx_queue *rxq;
8516 
8517 	num = 0;
8518 	for (i = 0; i < pf->lan_nb_qps; i++) {
8519 		rxq = data->rx_queues[i];
8520 		if (rxq && rxq->q_set)
8521 			num++;
8522 		else
8523 			break;
8524 	}
8525 
8526 	return num;
8527 }
8528 
8529 /* Configure RSS */
8530 static int
8531 i40e_pf_config_rss(struct i40e_pf *pf)
8532 {
8533 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8534 	struct rte_eth_rss_conf rss_conf;
8535 	uint32_t i, lut = 0;
8536 	uint16_t j, num;
8537 
8538 	/*
8539 	 * If both VMDQ and RSS enabled, not all of PF queues are configured.
8540 	 * It's necessary to calculate the actual PF queues that are configured.
8541 	 */
8542 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8543 		num = i40e_pf_calc_configured_queues_num(pf);
8544 	else
8545 		num = pf->dev_data->nb_rx_queues;
8546 
8547 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8548 	PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8549 			num);
8550 
8551 	if (num == 0) {
8552 		PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8553 		return -ENOTSUP;
8554 	}
8555 
8556 	if (pf->adapter->rss_reta_updated == 0) {
8557 		for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8558 			if (j == num)
8559 				j = 0;
8560 			lut = (lut << 8) | (j & ((0x1 <<
8561 				hw->func_caps.rss_table_entry_width) - 1));
8562 			if ((i & 3) == 3)
8563 				I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2),
8564 					       rte_bswap32(lut));
8565 		}
8566 	}
8567 
8568 	rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8569 	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8570 		i40e_pf_disable_rss(pf);
8571 		return 0;
8572 	}
8573 	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8574 		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8575 		/* Random default keys */
8576 		static uint32_t rss_key_default[] = {0x6b793944,
8577 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8578 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8579 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8580 
8581 		rss_conf.rss_key = (uint8_t *)rss_key_default;
8582 		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8583 							sizeof(uint32_t);
8584 	}
8585 
8586 	return i40e_hw_rss_hash_set(pf, &rss_conf);
8587 }
8588 
8589 static int
8590 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8591 			       struct rte_eth_tunnel_filter_conf *filter)
8592 {
8593 	if (pf == NULL || filter == NULL) {
8594 		PMD_DRV_LOG(ERR, "Invalid parameter");
8595 		return -EINVAL;
8596 	}
8597 
8598 	if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8599 		PMD_DRV_LOG(ERR, "Invalid queue ID");
8600 		return -EINVAL;
8601 	}
8602 
8603 	if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8604 		PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8605 		return -EINVAL;
8606 	}
8607 
8608 	if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8609 		(is_zero_ether_addr(&filter->outer_mac))) {
8610 		PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8611 		return -EINVAL;
8612 	}
8613 
8614 	if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8615 		(is_zero_ether_addr(&filter->inner_mac))) {
8616 		PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8617 		return -EINVAL;
8618 	}
8619 
8620 	return 0;
8621 }
8622 
8623 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8624 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
8625 static int
8626 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8627 {
8628 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8629 	uint32_t val, reg;
8630 	int ret = -EINVAL;
8631 
8632 	if (pf->support_multi_driver) {
8633 		PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8634 		return -ENOTSUP;
8635 	}
8636 
8637 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8638 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8639 
8640 	if (len == 3) {
8641 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8642 	} else if (len == 4) {
8643 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8644 	} else {
8645 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8646 		return ret;
8647 	}
8648 
8649 	if (reg != val) {
8650 		ret = i40e_aq_debug_write_global_register(hw,
8651 						   I40E_GL_PRS_FVBM(2),
8652 						   reg, NULL);
8653 		if (ret != 0)
8654 			return ret;
8655 		PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8656 			    "with value 0x%08x",
8657 			    I40E_GL_PRS_FVBM(2), reg);
8658 	} else {
8659 		ret = 0;
8660 	}
8661 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8662 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8663 
8664 	return ret;
8665 }
8666 
8667 static int
8668 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8669 {
8670 	int ret = -EINVAL;
8671 
8672 	if (!hw || !cfg)
8673 		return -EINVAL;
8674 
8675 	switch (cfg->cfg_type) {
8676 	case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8677 		ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8678 		break;
8679 	default:
8680 		PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8681 		break;
8682 	}
8683 
8684 	return ret;
8685 }
8686 
8687 static int
8688 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8689 			       enum rte_filter_op filter_op,
8690 			       void *arg)
8691 {
8692 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8693 	int ret = I40E_ERR_PARAM;
8694 
8695 	switch (filter_op) {
8696 	case RTE_ETH_FILTER_SET:
8697 		ret = i40e_dev_global_config_set(hw,
8698 			(struct rte_eth_global_cfg *)arg);
8699 		break;
8700 	default:
8701 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8702 		break;
8703 	}
8704 
8705 	return ret;
8706 }
8707 
8708 static int
8709 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8710 			  enum rte_filter_op filter_op,
8711 			  void *arg)
8712 {
8713 	struct rte_eth_tunnel_filter_conf *filter;
8714 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8715 	int ret = I40E_SUCCESS;
8716 
8717 	filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8718 
8719 	if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8720 		return I40E_ERR_PARAM;
8721 
8722 	switch (filter_op) {
8723 	case RTE_ETH_FILTER_NOP:
8724 		if (!(pf->flags & I40E_FLAG_VXLAN))
8725 			ret = I40E_NOT_SUPPORTED;
8726 		break;
8727 	case RTE_ETH_FILTER_ADD:
8728 		ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8729 		break;
8730 	case RTE_ETH_FILTER_DELETE:
8731 		ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8732 		break;
8733 	default:
8734 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8735 		ret = I40E_ERR_PARAM;
8736 		break;
8737 	}
8738 
8739 	return ret;
8740 }
8741 
8742 static int
8743 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8744 {
8745 	int ret = 0;
8746 	enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8747 
8748 	/* RSS setup */
8749 	if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8750 		ret = i40e_pf_config_rss(pf);
8751 	else
8752 		i40e_pf_disable_rss(pf);
8753 
8754 	return ret;
8755 }
8756 
8757 /* Get the symmetric hash enable configurations per port */
8758 static void
8759 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8760 {
8761 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8762 
8763 	*enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8764 }
8765 
8766 /* Set the symmetric hash enable configurations per port */
8767 static void
8768 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8769 {
8770 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8771 
8772 	if (enable > 0) {
8773 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8774 			PMD_DRV_LOG(INFO,
8775 				"Symmetric hash has already been enabled");
8776 			return;
8777 		}
8778 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8779 	} else {
8780 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8781 			PMD_DRV_LOG(INFO,
8782 				"Symmetric hash has already been disabled");
8783 			return;
8784 		}
8785 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8786 	}
8787 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8788 	I40E_WRITE_FLUSH(hw);
8789 }
8790 
8791 /*
8792  * Get global configurations of hash function type and symmetric hash enable
8793  * per flow type (pctype). Note that global configuration means it affects all
8794  * the ports on the same NIC.
8795  */
8796 static int
8797 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8798 				   struct rte_eth_hash_global_conf *g_cfg)
8799 {
8800 	struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8801 	uint32_t reg;
8802 	uint16_t i, j;
8803 
8804 	memset(g_cfg, 0, sizeof(*g_cfg));
8805 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8806 	if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8807 		g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8808 	else
8809 		g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8810 	PMD_DRV_LOG(DEBUG, "Hash function is %s",
8811 		(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8812 
8813 	/*
8814 	 * As i40e supports less than 64 flow types, only first 64 bits need to
8815 	 * be checked.
8816 	 */
8817 	for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8818 		g_cfg->valid_bit_mask[i] = 0ULL;
8819 		g_cfg->sym_hash_enable_mask[i] = 0ULL;
8820 	}
8821 
8822 	g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8823 
8824 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8825 		if (!adapter->pctypes_tbl[i])
8826 			continue;
8827 		for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8828 		     j < I40E_FILTER_PCTYPE_MAX; j++) {
8829 			if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8830 				reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8831 				if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8832 					g_cfg->sym_hash_enable_mask[0] |=
8833 								(1ULL << i);
8834 				}
8835 			}
8836 		}
8837 	}
8838 
8839 	return 0;
8840 }
8841 
8842 static int
8843 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8844 			      const struct rte_eth_hash_global_conf *g_cfg)
8845 {
8846 	uint32_t i;
8847 	uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8848 
8849 	if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8850 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8851 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8852 		PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8853 						g_cfg->hash_func);
8854 		return -EINVAL;
8855 	}
8856 
8857 	/*
8858 	 * As i40e supports less than 64 flow types, only first 64 bits need to
8859 	 * be checked.
8860 	 */
8861 	mask0 = g_cfg->valid_bit_mask[0];
8862 	for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8863 		if (i == 0) {
8864 			/* Check if any unsupported flow type configured */
8865 			if ((mask0 | i40e_mask) ^ i40e_mask)
8866 				goto mask_err;
8867 		} else {
8868 			if (g_cfg->valid_bit_mask[i])
8869 				goto mask_err;
8870 		}
8871 	}
8872 
8873 	return 0;
8874 
8875 mask_err:
8876 	PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8877 
8878 	return -EINVAL;
8879 }
8880 
8881 /*
8882  * Set global configurations of hash function type and symmetric hash enable
8883  * per flow type (pctype). Note any modifying global configuration will affect
8884  * all the ports on the same NIC.
8885  */
8886 static int
8887 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8888 				   struct rte_eth_hash_global_conf *g_cfg)
8889 {
8890 	struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8891 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8892 	int ret;
8893 	uint16_t i, j;
8894 	uint32_t reg;
8895 	uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8896 
8897 	if (pf->support_multi_driver) {
8898 		PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8899 		return -ENOTSUP;
8900 	}
8901 
8902 	/* Check the input parameters */
8903 	ret = i40e_hash_global_config_check(adapter, g_cfg);
8904 	if (ret < 0)
8905 		return ret;
8906 
8907 	/*
8908 	 * As i40e supports less than 64 flow types, only first 64 bits need to
8909 	 * be configured.
8910 	 */
8911 	for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8912 		if (mask0 & (1UL << i)) {
8913 			reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8914 					I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8915 
8916 			for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8917 			     j < I40E_FILTER_PCTYPE_MAX; j++) {
8918 				if (adapter->pctypes_tbl[i] & (1ULL << j))
8919 					i40e_write_global_rx_ctl(hw,
8920 							  I40E_GLQF_HSYM(j),
8921 							  reg);
8922 			}
8923 		}
8924 	}
8925 
8926 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8927 	if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8928 		/* Toeplitz */
8929 		if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8930 			PMD_DRV_LOG(DEBUG,
8931 				"Hash function already set to Toeplitz");
8932 			goto out;
8933 		}
8934 		reg |= I40E_GLQF_CTL_HTOEP_MASK;
8935 	} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8936 		/* Simple XOR */
8937 		if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8938 			PMD_DRV_LOG(DEBUG,
8939 				"Hash function already set to Simple XOR");
8940 			goto out;
8941 		}
8942 		reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8943 	} else
8944 		/* Use the default, and keep it as it is */
8945 		goto out;
8946 
8947 	i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8948 
8949 out:
8950 	I40E_WRITE_FLUSH(hw);
8951 
8952 	return 0;
8953 }
8954 
8955 /**
8956  * Valid input sets for hash and flow director filters per PCTYPE
8957  */
8958 static uint64_t
8959 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8960 		enum rte_filter_type filter)
8961 {
8962 	uint64_t valid;
8963 
8964 	static const uint64_t valid_hash_inset_table[] = {
8965 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
8966 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8967 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8968 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8969 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8970 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8971 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8972 			I40E_INSET_FLEX_PAYLOAD,
8973 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8974 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8975 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8976 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8977 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8978 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8979 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8980 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8981 			I40E_INSET_FLEX_PAYLOAD,
8982 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8983 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8984 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8985 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8986 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8987 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8988 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8989 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8990 			I40E_INSET_FLEX_PAYLOAD,
8991 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8992 			I40E_INSET_DMAC | I40E_INSET_SMAC |
8993 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8994 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8995 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8996 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8997 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8998 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8999 			I40E_INSET_FLEX_PAYLOAD,
9000 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9001 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9002 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9003 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9004 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9005 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9006 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9007 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9008 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9009 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9010 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9011 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9012 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9013 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9014 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9015 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9016 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9017 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
9018 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9019 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9020 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9021 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9022 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9023 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9024 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9025 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9026 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
9027 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9028 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9029 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9030 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
9031 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
9032 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
9033 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9034 			I40E_INSET_FLEX_PAYLOAD,
9035 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9036 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9037 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9038 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9039 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9040 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
9041 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
9042 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
9043 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9044 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9045 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9046 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9047 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9048 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9049 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9050 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
9051 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9052 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9053 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9054 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9055 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9056 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9057 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9058 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9059 			I40E_INSET_FLEX_PAYLOAD,
9060 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9061 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9062 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9063 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9064 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9065 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9066 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9067 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9068 			I40E_INSET_FLEX_PAYLOAD,
9069 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9070 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9071 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9072 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9073 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9074 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9075 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9076 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9077 			I40E_INSET_FLEX_PAYLOAD,
9078 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9079 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9080 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9081 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9082 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9083 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9084 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9085 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
9086 			I40E_INSET_FLEX_PAYLOAD,
9087 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9088 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9089 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9090 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9091 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9092 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9093 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
9094 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
9095 			I40E_INSET_FLEX_PAYLOAD,
9096 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9097 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9098 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9099 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
9100 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
9101 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
9102 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
9103 			I40E_INSET_FLEX_PAYLOAD,
9104 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9105 			I40E_INSET_DMAC | I40E_INSET_SMAC |
9106 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9107 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
9108 			I40E_INSET_FLEX_PAYLOAD,
9109 	};
9110 
9111 	/**
9112 	 * Flow director supports only fields defined in
9113 	 * union rte_eth_fdir_flow.
9114 	 */
9115 	static const uint64_t valid_fdir_inset_table[] = {
9116 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9117 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9118 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9119 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9120 		I40E_INSET_IPV4_TTL,
9121 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9122 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9123 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9124 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9125 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9126 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9127 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9128 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9129 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9130 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9131 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9132 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9133 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9134 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9135 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9136 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9137 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9138 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9139 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9140 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9141 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9142 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9143 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9144 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9145 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9146 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9147 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9148 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9149 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
9150 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9151 		I40E_INSET_SCTP_VT,
9152 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9153 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9154 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9155 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
9156 		I40E_INSET_IPV4_TTL,
9157 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9158 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9159 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9160 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9161 		I40E_INSET_IPV6_HOP_LIMIT,
9162 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9163 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9164 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9165 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9166 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9167 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9168 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9169 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9170 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9171 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9172 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9173 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9174 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9175 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9176 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9177 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9178 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9179 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9180 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9181 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9182 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9183 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9184 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9185 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9186 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9187 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9188 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9189 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9190 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
9191 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9192 		I40E_INSET_SCTP_VT,
9193 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9194 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9195 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9196 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
9197 		I40E_INSET_IPV6_HOP_LIMIT,
9198 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9199 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
9200 		I40E_INSET_LAST_ETHER_TYPE,
9201 	};
9202 
9203 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9204 		return 0;
9205 	if (filter == RTE_ETH_FILTER_HASH)
9206 		valid = valid_hash_inset_table[pctype];
9207 	else
9208 		valid = valid_fdir_inset_table[pctype];
9209 
9210 	return valid;
9211 }
9212 
9213 /**
9214  * Validate if the input set is allowed for a specific PCTYPE
9215  */
9216 int
9217 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9218 		enum rte_filter_type filter, uint64_t inset)
9219 {
9220 	uint64_t valid;
9221 
9222 	valid = i40e_get_valid_input_set(pctype, filter);
9223 	if (inset & (~valid))
9224 		return -EINVAL;
9225 
9226 	return 0;
9227 }
9228 
9229 /* default input set fields combination per pctype */
9230 uint64_t
9231 i40e_get_default_input_set(uint16_t pctype)
9232 {
9233 	static const uint64_t default_inset_table[] = {
9234 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
9235 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9236 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9237 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9238 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9239 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9240 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9241 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9242 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9243 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9244 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9245 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9246 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9247 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9248 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9249 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9250 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9251 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9252 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9253 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9254 			I40E_INSET_SCTP_VT,
9255 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9256 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9257 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
9258 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9259 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9260 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9261 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9262 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9263 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9264 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9265 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9266 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9267 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9268 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9269 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9270 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9271 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9272 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9273 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9274 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9275 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9276 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9277 			I40E_INSET_SCTP_VT,
9278 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9279 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9280 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9281 			I40E_INSET_LAST_ETHER_TYPE,
9282 	};
9283 
9284 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9285 		return 0;
9286 
9287 	return default_inset_table[pctype];
9288 }
9289 
9290 /**
9291  * Parse the input set from index to logical bit masks
9292  */
9293 static int
9294 i40e_parse_input_set(uint64_t *inset,
9295 		     enum i40e_filter_pctype pctype,
9296 		     enum rte_eth_input_set_field *field,
9297 		     uint16_t size)
9298 {
9299 	uint16_t i, j;
9300 	int ret = -EINVAL;
9301 
9302 	static const struct {
9303 		enum rte_eth_input_set_field field;
9304 		uint64_t inset;
9305 	} inset_convert_table[] = {
9306 		{RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9307 		{RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9308 		{RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9309 		{RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9310 		{RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9311 		{RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9312 		{RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9313 		{RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9314 		{RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9315 		{RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9316 		{RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9317 		{RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9318 		{RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9319 		{RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9320 		{RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9321 			I40E_INSET_IPV6_NEXT_HDR},
9322 		{RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9323 			I40E_INSET_IPV6_HOP_LIMIT},
9324 		{RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9325 		{RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9326 		{RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9327 		{RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9328 		{RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9329 		{RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9330 		{RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9331 			I40E_INSET_SCTP_VT},
9332 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9333 			I40E_INSET_TUNNEL_DMAC},
9334 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9335 			I40E_INSET_VLAN_TUNNEL},
9336 		{RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9337 			I40E_INSET_TUNNEL_ID},
9338 		{RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9339 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9340 			I40E_INSET_FLEX_PAYLOAD_W1},
9341 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9342 			I40E_INSET_FLEX_PAYLOAD_W2},
9343 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9344 			I40E_INSET_FLEX_PAYLOAD_W3},
9345 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9346 			I40E_INSET_FLEX_PAYLOAD_W4},
9347 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9348 			I40E_INSET_FLEX_PAYLOAD_W5},
9349 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9350 			I40E_INSET_FLEX_PAYLOAD_W6},
9351 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9352 			I40E_INSET_FLEX_PAYLOAD_W7},
9353 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9354 			I40E_INSET_FLEX_PAYLOAD_W8},
9355 	};
9356 
9357 	if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9358 		return ret;
9359 
9360 	/* Only one item allowed for default or all */
9361 	if (size == 1) {
9362 		if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9363 			*inset = i40e_get_default_input_set(pctype);
9364 			return 0;
9365 		} else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9366 			*inset = I40E_INSET_NONE;
9367 			return 0;
9368 		}
9369 	}
9370 
9371 	for (i = 0, *inset = 0; i < size; i++) {
9372 		for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9373 			if (field[i] == inset_convert_table[j].field) {
9374 				*inset |= inset_convert_table[j].inset;
9375 				break;
9376 			}
9377 		}
9378 
9379 		/* It contains unsupported input set, return immediately */
9380 		if (j == RTE_DIM(inset_convert_table))
9381 			return ret;
9382 	}
9383 
9384 	return 0;
9385 }
9386 
9387 /**
9388  * Translate the input set from bit masks to register aware bit masks
9389  * and vice versa
9390  */
9391 uint64_t
9392 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9393 {
9394 	uint64_t val = 0;
9395 	uint16_t i;
9396 
9397 	struct inset_map {
9398 		uint64_t inset;
9399 		uint64_t inset_reg;
9400 	};
9401 
9402 	static const struct inset_map inset_map_common[] = {
9403 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9404 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9405 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9406 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9407 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9408 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9409 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9410 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9411 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9412 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9413 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9414 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9415 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9416 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9417 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9418 		{I40E_INSET_TUNNEL_DMAC,
9419 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9420 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9421 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9422 		{I40E_INSET_TUNNEL_SRC_PORT,
9423 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9424 		{I40E_INSET_TUNNEL_DST_PORT,
9425 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9426 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9427 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9428 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9429 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9430 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9431 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9432 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9433 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9434 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9435 	};
9436 
9437     /* some different registers map in x722*/
9438 	static const struct inset_map inset_map_diff_x722[] = {
9439 		{I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9440 		{I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9441 		{I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9442 		{I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9443 	};
9444 
9445 	static const struct inset_map inset_map_diff_not_x722[] = {
9446 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9447 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9448 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9449 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9450 	};
9451 
9452 	if (input == 0)
9453 		return val;
9454 
9455 	/* Translate input set to register aware inset */
9456 	if (type == I40E_MAC_X722) {
9457 		for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9458 			if (input & inset_map_diff_x722[i].inset)
9459 				val |= inset_map_diff_x722[i].inset_reg;
9460 		}
9461 	} else {
9462 		for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9463 			if (input & inset_map_diff_not_x722[i].inset)
9464 				val |= inset_map_diff_not_x722[i].inset_reg;
9465 		}
9466 	}
9467 
9468 	for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9469 		if (input & inset_map_common[i].inset)
9470 			val |= inset_map_common[i].inset_reg;
9471 	}
9472 
9473 	return val;
9474 }
9475 
9476 int
9477 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9478 {
9479 	uint8_t i, idx = 0;
9480 	uint64_t inset_need_mask = inset;
9481 
9482 	static const struct {
9483 		uint64_t inset;
9484 		uint32_t mask;
9485 	} inset_mask_map[] = {
9486 		{I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9487 		{I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9488 		{I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9489 		{I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9490 		{I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9491 		{I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9492 		{I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9493 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9494 	};
9495 
9496 	if (!inset || !mask || !nb_elem)
9497 		return 0;
9498 
9499 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9500 		/* Clear the inset bit, if no MASK is required,
9501 		 * for example proto + ttl
9502 		 */
9503 		if ((inset & inset_mask_map[i].inset) ==
9504 		     inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9505 			inset_need_mask &= ~inset_mask_map[i].inset;
9506 		if (!inset_need_mask)
9507 			return 0;
9508 	}
9509 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9510 		if ((inset_need_mask & inset_mask_map[i].inset) ==
9511 		    inset_mask_map[i].inset) {
9512 			if (idx >= nb_elem) {
9513 				PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9514 				return -EINVAL;
9515 			}
9516 			mask[idx] = inset_mask_map[i].mask;
9517 			idx++;
9518 		}
9519 	}
9520 
9521 	return idx;
9522 }
9523 
9524 void
9525 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9526 {
9527 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9528 
9529 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9530 	if (reg != val)
9531 		i40e_write_rx_ctl(hw, addr, val);
9532 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9533 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
9534 }
9535 
9536 void
9537 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9538 {
9539 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
9540 	struct rte_eth_dev *dev;
9541 
9542 	dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9543 	if (reg != val) {
9544 		i40e_write_rx_ctl(hw, addr, val);
9545 		PMD_DRV_LOG(WARNING,
9546 			    "i40e device %s changed global register [0x%08x]."
9547 			    " original: 0x%08x, new: 0x%08x",
9548 			    dev->device->name, addr, reg,
9549 			    (uint32_t)i40e_read_rx_ctl(hw, addr));
9550 	}
9551 }
9552 
9553 static void
9554 i40e_filter_input_set_init(struct i40e_pf *pf)
9555 {
9556 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9557 	enum i40e_filter_pctype pctype;
9558 	uint64_t input_set, inset_reg;
9559 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9560 	int num, i;
9561 	uint16_t flow_type;
9562 
9563 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9564 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9565 		flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9566 
9567 		if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9568 			continue;
9569 
9570 		input_set = i40e_get_default_input_set(pctype);
9571 
9572 		num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9573 						   I40E_INSET_MASK_NUM_REG);
9574 		if (num < 0)
9575 			return;
9576 		if (pf->support_multi_driver && num > 0) {
9577 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9578 			return;
9579 		}
9580 		inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9581 					input_set);
9582 
9583 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9584 				      (uint32_t)(inset_reg & UINT32_MAX));
9585 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9586 				     (uint32_t)((inset_reg >>
9587 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
9588 		if (!pf->support_multi_driver) {
9589 			i40e_check_write_global_reg(hw,
9590 					    I40E_GLQF_HASH_INSET(0, pctype),
9591 					    (uint32_t)(inset_reg & UINT32_MAX));
9592 			i40e_check_write_global_reg(hw,
9593 					     I40E_GLQF_HASH_INSET(1, pctype),
9594 					     (uint32_t)((inset_reg >>
9595 					      I40E_32_BIT_WIDTH) & UINT32_MAX));
9596 
9597 			for (i = 0; i < num; i++) {
9598 				i40e_check_write_global_reg(hw,
9599 						    I40E_GLQF_FD_MSK(i, pctype),
9600 						    mask_reg[i]);
9601 				i40e_check_write_global_reg(hw,
9602 						  I40E_GLQF_HASH_MSK(i, pctype),
9603 						  mask_reg[i]);
9604 			}
9605 			/*clear unused mask registers of the pctype */
9606 			for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9607 				i40e_check_write_global_reg(hw,
9608 						    I40E_GLQF_FD_MSK(i, pctype),
9609 						    0);
9610 				i40e_check_write_global_reg(hw,
9611 						  I40E_GLQF_HASH_MSK(i, pctype),
9612 						  0);
9613 			}
9614 		} else {
9615 			PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9616 		}
9617 		I40E_WRITE_FLUSH(hw);
9618 
9619 		/* store the default input set */
9620 		if (!pf->support_multi_driver)
9621 			pf->hash_input_set[pctype] = input_set;
9622 		pf->fdir.input_set[pctype] = input_set;
9623 	}
9624 }
9625 
9626 int
9627 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9628 			 struct rte_eth_input_set_conf *conf)
9629 {
9630 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9631 	enum i40e_filter_pctype pctype;
9632 	uint64_t input_set, inset_reg = 0;
9633 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9634 	int ret, i, num;
9635 
9636 	if (!conf) {
9637 		PMD_DRV_LOG(ERR, "Invalid pointer");
9638 		return -EFAULT;
9639 	}
9640 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9641 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
9642 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
9643 		return -EINVAL;
9644 	}
9645 
9646 	if (pf->support_multi_driver) {
9647 		PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9648 		return -ENOTSUP;
9649 	}
9650 
9651 	pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9652 	if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9653 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
9654 		return -EINVAL;
9655 	}
9656 
9657 	if (hw->mac.type == I40E_MAC_X722) {
9658 		/* get translated pctype value in fd pctype register */
9659 		pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9660 			I40E_GLQF_FD_PCTYPES((int)pctype));
9661 	}
9662 
9663 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9664 				   conf->inset_size);
9665 	if (ret) {
9666 		PMD_DRV_LOG(ERR, "Failed to parse input set");
9667 		return -EINVAL;
9668 	}
9669 
9670 	if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9671 		/* get inset value in register */
9672 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9673 		inset_reg <<= I40E_32_BIT_WIDTH;
9674 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9675 		input_set |= pf->hash_input_set[pctype];
9676 	}
9677 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9678 					   I40E_INSET_MASK_NUM_REG);
9679 	if (num < 0)
9680 		return -EINVAL;
9681 
9682 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9683 
9684 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9685 				    (uint32_t)(inset_reg & UINT32_MAX));
9686 	i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9687 				    (uint32_t)((inset_reg >>
9688 				    I40E_32_BIT_WIDTH) & UINT32_MAX));
9689 
9690 	for (i = 0; i < num; i++)
9691 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9692 					    mask_reg[i]);
9693 	/*clear unused mask registers of the pctype */
9694 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9695 		i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9696 					    0);
9697 	I40E_WRITE_FLUSH(hw);
9698 
9699 	pf->hash_input_set[pctype] = input_set;
9700 	return 0;
9701 }
9702 
9703 int
9704 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9705 			 struct rte_eth_input_set_conf *conf)
9706 {
9707 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9708 	enum i40e_filter_pctype pctype;
9709 	uint64_t input_set, inset_reg = 0;
9710 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9711 	int ret, i, num;
9712 
9713 	if (!hw || !conf) {
9714 		PMD_DRV_LOG(ERR, "Invalid pointer");
9715 		return -EFAULT;
9716 	}
9717 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9718 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
9719 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
9720 		return -EINVAL;
9721 	}
9722 
9723 	pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9724 
9725 	if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9726 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
9727 		return -EINVAL;
9728 	}
9729 
9730 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9731 				   conf->inset_size);
9732 	if (ret) {
9733 		PMD_DRV_LOG(ERR, "Failed to parse input set");
9734 		return -EINVAL;
9735 	}
9736 
9737 	/* get inset value in register */
9738 	inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9739 	inset_reg <<= I40E_32_BIT_WIDTH;
9740 	inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9741 
9742 	/* Can not change the inset reg for flex payload for fdir,
9743 	 * it is done by writing I40E_PRTQF_FD_FLXINSET
9744 	 * in i40e_set_flex_mask_on_pctype.
9745 	 */
9746 	if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9747 		inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9748 	else
9749 		input_set |= pf->fdir.input_set[pctype];
9750 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9751 					   I40E_INSET_MASK_NUM_REG);
9752 	if (num < 0)
9753 		return -EINVAL;
9754 	if (pf->support_multi_driver && num > 0) {
9755 		PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9756 		return -ENOTSUP;
9757 	}
9758 
9759 	inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9760 
9761 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9762 			      (uint32_t)(inset_reg & UINT32_MAX));
9763 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9764 			     (uint32_t)((inset_reg >>
9765 			     I40E_32_BIT_WIDTH) & UINT32_MAX));
9766 
9767 	if (!pf->support_multi_driver) {
9768 		for (i = 0; i < num; i++)
9769 			i40e_check_write_global_reg(hw,
9770 						    I40E_GLQF_FD_MSK(i, pctype),
9771 						    mask_reg[i]);
9772 		/*clear unused mask registers of the pctype */
9773 		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9774 			i40e_check_write_global_reg(hw,
9775 						    I40E_GLQF_FD_MSK(i, pctype),
9776 						    0);
9777 	} else {
9778 		PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9779 	}
9780 	I40E_WRITE_FLUSH(hw);
9781 
9782 	pf->fdir.input_set[pctype] = input_set;
9783 	return 0;
9784 }
9785 
9786 static int
9787 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9788 {
9789 	int ret = 0;
9790 
9791 	if (!hw || !info) {
9792 		PMD_DRV_LOG(ERR, "Invalid pointer");
9793 		return -EFAULT;
9794 	}
9795 
9796 	switch (info->info_type) {
9797 	case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9798 		i40e_get_symmetric_hash_enable_per_port(hw,
9799 					&(info->info.enable));
9800 		break;
9801 	case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9802 		ret = i40e_get_hash_filter_global_config(hw,
9803 				&(info->info.global_conf));
9804 		break;
9805 	default:
9806 		PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9807 							info->info_type);
9808 		ret = -EINVAL;
9809 		break;
9810 	}
9811 
9812 	return ret;
9813 }
9814 
9815 static int
9816 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9817 {
9818 	int ret = 0;
9819 
9820 	if (!hw || !info) {
9821 		PMD_DRV_LOG(ERR, "Invalid pointer");
9822 		return -EFAULT;
9823 	}
9824 
9825 	switch (info->info_type) {
9826 	case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9827 		i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9828 		break;
9829 	case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9830 		ret = i40e_set_hash_filter_global_config(hw,
9831 				&(info->info.global_conf));
9832 		break;
9833 	case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9834 		ret = i40e_hash_filter_inset_select(hw,
9835 					       &(info->info.input_set_conf));
9836 		break;
9837 
9838 	default:
9839 		PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9840 							info->info_type);
9841 		ret = -EINVAL;
9842 		break;
9843 	}
9844 
9845 	return ret;
9846 }
9847 
9848 /* Operations for hash function */
9849 static int
9850 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9851 		      enum rte_filter_op filter_op,
9852 		      void *arg)
9853 {
9854 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9855 	int ret = 0;
9856 
9857 	switch (filter_op) {
9858 	case RTE_ETH_FILTER_NOP:
9859 		break;
9860 	case RTE_ETH_FILTER_GET:
9861 		ret = i40e_hash_filter_get(hw,
9862 			(struct rte_eth_hash_filter_info *)arg);
9863 		break;
9864 	case RTE_ETH_FILTER_SET:
9865 		ret = i40e_hash_filter_set(hw,
9866 			(struct rte_eth_hash_filter_info *)arg);
9867 		break;
9868 	default:
9869 		PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9870 								filter_op);
9871 		ret = -ENOTSUP;
9872 		break;
9873 	}
9874 
9875 	return ret;
9876 }
9877 
9878 /* Convert ethertype filter structure */
9879 static int
9880 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9881 			      struct i40e_ethertype_filter *filter)
9882 {
9883 	rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9884 	filter->input.ether_type = input->ether_type;
9885 	filter->flags = input->flags;
9886 	filter->queue = input->queue;
9887 
9888 	return 0;
9889 }
9890 
9891 /* Check if there exists the ehtertype filter */
9892 struct i40e_ethertype_filter *
9893 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9894 				const struct i40e_ethertype_filter_input *input)
9895 {
9896 	int ret;
9897 
9898 	ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9899 	if (ret < 0)
9900 		return NULL;
9901 
9902 	return ethertype_rule->hash_map[ret];
9903 }
9904 
9905 /* Add ethertype filter in SW list */
9906 static int
9907 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9908 				struct i40e_ethertype_filter *filter)
9909 {
9910 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9911 	int ret;
9912 
9913 	ret = rte_hash_add_key(rule->hash_table, &filter->input);
9914 	if (ret < 0) {
9915 		PMD_DRV_LOG(ERR,
9916 			    "Failed to insert ethertype filter"
9917 			    " to hash table %d!",
9918 			    ret);
9919 		return ret;
9920 	}
9921 	rule->hash_map[ret] = filter;
9922 
9923 	TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9924 
9925 	return 0;
9926 }
9927 
9928 /* Delete ethertype filter in SW list */
9929 int
9930 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9931 			     struct i40e_ethertype_filter_input *input)
9932 {
9933 	struct i40e_ethertype_rule *rule = &pf->ethertype;
9934 	struct i40e_ethertype_filter *filter;
9935 	int ret;
9936 
9937 	ret = rte_hash_del_key(rule->hash_table, input);
9938 	if (ret < 0) {
9939 		PMD_DRV_LOG(ERR,
9940 			    "Failed to delete ethertype filter"
9941 			    " to hash table %d!",
9942 			    ret);
9943 		return ret;
9944 	}
9945 	filter = rule->hash_map[ret];
9946 	rule->hash_map[ret] = NULL;
9947 
9948 	TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9949 	rte_free(filter);
9950 
9951 	return 0;
9952 }
9953 
9954 /*
9955  * Configure ethertype filter, which can director packet by filtering
9956  * with mac address and ether_type or only ether_type
9957  */
9958 int
9959 i40e_ethertype_filter_set(struct i40e_pf *pf,
9960 			struct rte_eth_ethertype_filter *filter,
9961 			bool add)
9962 {
9963 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9964 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9965 	struct i40e_ethertype_filter *ethertype_filter, *node;
9966 	struct i40e_ethertype_filter check_filter;
9967 	struct i40e_control_filter_stats stats;
9968 	uint16_t flags = 0;
9969 	int ret;
9970 
9971 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
9972 		PMD_DRV_LOG(ERR, "Invalid queue ID");
9973 		return -EINVAL;
9974 	}
9975 	if (filter->ether_type == ETHER_TYPE_IPv4 ||
9976 		filter->ether_type == ETHER_TYPE_IPv6) {
9977 		PMD_DRV_LOG(ERR,
9978 			"unsupported ether_type(0x%04x) in control packet filter.",
9979 			filter->ether_type);
9980 		return -EINVAL;
9981 	}
9982 	if (filter->ether_type == ETHER_TYPE_VLAN)
9983 		PMD_DRV_LOG(WARNING,
9984 			"filter vlan ether_type in first tag is not supported.");
9985 
9986 	/* Check if there is the filter in SW list */
9987 	memset(&check_filter, 0, sizeof(check_filter));
9988 	i40e_ethertype_filter_convert(filter, &check_filter);
9989 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9990 					       &check_filter.input);
9991 	if (add && node) {
9992 		PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9993 		return -EINVAL;
9994 	}
9995 
9996 	if (!add && !node) {
9997 		PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9998 		return -EINVAL;
9999 	}
10000 
10001 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
10002 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
10003 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
10004 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
10005 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
10006 
10007 	memset(&stats, 0, sizeof(stats));
10008 	ret = i40e_aq_add_rem_control_packet_filter(hw,
10009 			filter->mac_addr.addr_bytes,
10010 			filter->ether_type, flags,
10011 			pf->main_vsi->seid,
10012 			filter->queue, add, &stats, NULL);
10013 
10014 	PMD_DRV_LOG(INFO,
10015 		"add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
10016 		ret, stats.mac_etype_used, stats.etype_used,
10017 		stats.mac_etype_free, stats.etype_free);
10018 	if (ret < 0)
10019 		return -ENOSYS;
10020 
10021 	/* Add or delete a filter in SW list */
10022 	if (add) {
10023 		ethertype_filter = rte_zmalloc("ethertype_filter",
10024 				       sizeof(*ethertype_filter), 0);
10025 		if (ethertype_filter == NULL) {
10026 			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
10027 			return -ENOMEM;
10028 		}
10029 
10030 		rte_memcpy(ethertype_filter, &check_filter,
10031 			   sizeof(check_filter));
10032 		ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
10033 		if (ret < 0)
10034 			rte_free(ethertype_filter);
10035 	} else {
10036 		ret = i40e_sw_ethertype_filter_del(pf, &node->input);
10037 	}
10038 
10039 	return ret;
10040 }
10041 
10042 /*
10043  * Handle operations for ethertype filter.
10044  */
10045 static int
10046 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
10047 				enum rte_filter_op filter_op,
10048 				void *arg)
10049 {
10050 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10051 	int ret = 0;
10052 
10053 	if (filter_op == RTE_ETH_FILTER_NOP)
10054 		return ret;
10055 
10056 	if (arg == NULL) {
10057 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
10058 			    filter_op);
10059 		return -EINVAL;
10060 	}
10061 
10062 	switch (filter_op) {
10063 	case RTE_ETH_FILTER_ADD:
10064 		ret = i40e_ethertype_filter_set(pf,
10065 			(struct rte_eth_ethertype_filter *)arg,
10066 			TRUE);
10067 		break;
10068 	case RTE_ETH_FILTER_DELETE:
10069 		ret = i40e_ethertype_filter_set(pf,
10070 			(struct rte_eth_ethertype_filter *)arg,
10071 			FALSE);
10072 		break;
10073 	default:
10074 		PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
10075 		ret = -ENOSYS;
10076 		break;
10077 	}
10078 	return ret;
10079 }
10080 
10081 static int
10082 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
10083 		     enum rte_filter_type filter_type,
10084 		     enum rte_filter_op filter_op,
10085 		     void *arg)
10086 {
10087 	int ret = 0;
10088 
10089 	if (dev == NULL)
10090 		return -EINVAL;
10091 
10092 	switch (filter_type) {
10093 	case RTE_ETH_FILTER_NONE:
10094 		/* For global configuration */
10095 		ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
10096 		break;
10097 	case RTE_ETH_FILTER_HASH:
10098 		ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
10099 		break;
10100 	case RTE_ETH_FILTER_MACVLAN:
10101 		ret = i40e_mac_filter_handle(dev, filter_op, arg);
10102 		break;
10103 	case RTE_ETH_FILTER_ETHERTYPE:
10104 		ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
10105 		break;
10106 	case RTE_ETH_FILTER_TUNNEL:
10107 		ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
10108 		break;
10109 	case RTE_ETH_FILTER_FDIR:
10110 		ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
10111 		break;
10112 	case RTE_ETH_FILTER_GENERIC:
10113 		if (filter_op != RTE_ETH_FILTER_GET)
10114 			return -EINVAL;
10115 		*(const void **)arg = &i40e_flow_ops;
10116 		break;
10117 	default:
10118 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
10119 							filter_type);
10120 		ret = -EINVAL;
10121 		break;
10122 	}
10123 
10124 	return ret;
10125 }
10126 
10127 /*
10128  * Check and enable Extended Tag.
10129  * Enabling Extended Tag is important for 40G performance.
10130  */
10131 static void
10132 i40e_enable_extended_tag(struct rte_eth_dev *dev)
10133 {
10134 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
10135 	uint32_t buf = 0;
10136 	int ret;
10137 
10138 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10139 				      PCI_DEV_CAP_REG);
10140 	if (ret < 0) {
10141 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10142 			    PCI_DEV_CAP_REG);
10143 		return;
10144 	}
10145 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
10146 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
10147 		return;
10148 	}
10149 
10150 	buf = 0;
10151 	ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
10152 				      PCI_DEV_CTRL_REG);
10153 	if (ret < 0) {
10154 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
10155 			    PCI_DEV_CTRL_REG);
10156 		return;
10157 	}
10158 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
10159 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
10160 		return;
10161 	}
10162 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
10163 	ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
10164 				       PCI_DEV_CTRL_REG);
10165 	if (ret < 0) {
10166 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
10167 			    PCI_DEV_CTRL_REG);
10168 		return;
10169 	}
10170 }
10171 
10172 /*
10173  * As some registers wouldn't be reset unless a global hardware reset,
10174  * hardware initialization is needed to put those registers into an
10175  * expected initial state.
10176  */
10177 static void
10178 i40e_hw_init(struct rte_eth_dev *dev)
10179 {
10180 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10181 
10182 	i40e_enable_extended_tag(dev);
10183 
10184 	/* clear the PF Queue Filter control register */
10185 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
10186 
10187 	/* Disable symmetric hash per port */
10188 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
10189 }
10190 
10191 /*
10192  * For X722 it is possible to have multiple pctypes mapped to the same flowtype
10193  * however this function will return only one highest pctype index,
10194  * which is not quite correct. This is known problem of i40e driver
10195  * and needs to be fixed later.
10196  */
10197 enum i40e_filter_pctype
10198 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
10199 {
10200 	int i;
10201 	uint64_t pctype_mask;
10202 
10203 	if (flow_type < I40E_FLOW_TYPE_MAX) {
10204 		pctype_mask = adapter->pctypes_tbl[flow_type];
10205 		for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10206 			if (pctype_mask & (1ULL << i))
10207 				return (enum i40e_filter_pctype)i;
10208 		}
10209 	}
10210 	return I40E_FILTER_PCTYPE_INVALID;
10211 }
10212 
10213 uint16_t
10214 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10215 			enum i40e_filter_pctype pctype)
10216 {
10217 	uint16_t flowtype;
10218 	uint64_t pctype_mask = 1ULL << pctype;
10219 
10220 	for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10221 	     flowtype++) {
10222 		if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10223 			return flowtype;
10224 	}
10225 
10226 	return RTE_ETH_FLOW_UNKNOWN;
10227 }
10228 
10229 /*
10230  * On X710, performance number is far from the expectation on recent firmware
10231  * versions; on XL710, performance number is also far from the expectation on
10232  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10233  * mode is enabled and port MAC address is equal to the packet destination MAC
10234  * address. The fix for this issue may not be integrated in the following
10235  * firmware version. So the workaround in software driver is needed. It needs
10236  * to modify the initial values of 3 internal only registers for both X710 and
10237  * XL710. Note that the values for X710 or XL710 could be different, and the
10238  * workaround can be removed when it is fixed in firmware in the future.
10239  */
10240 
10241 /* For both X710 and XL710 */
10242 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1	0x10000200
10243 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2	0x203F0200
10244 #define I40E_GL_SWR_PRI_JOIN_MAP_0		0x26CE00
10245 
10246 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10247 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
10248 
10249 /* For X722 */
10250 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10251 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10252 
10253 /* For X710 */
10254 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
10255 /* For XL710 */
10256 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
10257 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
10258 
10259 /*
10260  * GL_SWR_PM_UP_THR:
10261  * The value is not impacted from the link speed, its value is set according
10262  * to the total number of ports for a better pipe-monitor configuration.
10263  */
10264 static bool
10265 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10266 {
10267 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10268 		.device_id = (dev),   \
10269 		.val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10270 
10271 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10272 		.device_id = (dev),   \
10273 		.val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10274 
10275 	static const struct {
10276 		uint16_t device_id;
10277 		uint32_t val;
10278 	} swr_pm_table[] = {
10279 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10280 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10281 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10282 		{ I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10283 
10284 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10285 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10286 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10287 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10288 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10289 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10290 		{ I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10291 	};
10292 	uint32_t i;
10293 
10294 	if (value == NULL) {
10295 		PMD_DRV_LOG(ERR, "value is NULL");
10296 		return false;
10297 	}
10298 
10299 	for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10300 		if (hw->device_id == swr_pm_table[i].device_id) {
10301 			*value = swr_pm_table[i].val;
10302 
10303 			PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10304 				    "value - 0x%08x",
10305 				    hw->device_id, *value);
10306 			return true;
10307 		}
10308 	}
10309 
10310 	return false;
10311 }
10312 
10313 static int
10314 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10315 {
10316 	enum i40e_status_code status;
10317 	struct i40e_aq_get_phy_abilities_resp phy_ab;
10318 	int ret = -ENOTSUP;
10319 	int retries = 0;
10320 
10321 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10322 					      NULL);
10323 
10324 	while (status) {
10325 		PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10326 			status);
10327 		retries++;
10328 		rte_delay_us(100000);
10329 		if  (retries < 5)
10330 			status = i40e_aq_get_phy_capabilities(hw, false,
10331 					true, &phy_ab, NULL);
10332 		else
10333 			return ret;
10334 	}
10335 	return 0;
10336 }
10337 
10338 static void
10339 i40e_configure_registers(struct i40e_hw *hw)
10340 {
10341 	static struct {
10342 		uint32_t addr;
10343 		uint64_t val;
10344 	} reg_table[] = {
10345 		{I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10346 		{I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10347 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10348 	};
10349 	uint64_t reg;
10350 	uint32_t i;
10351 	int ret;
10352 
10353 	for (i = 0; i < RTE_DIM(reg_table); i++) {
10354 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10355 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10356 				reg_table[i].val =
10357 					I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10358 			else /* For X710/XL710/XXV710 */
10359 				if (hw->aq.fw_maj_ver < 6)
10360 					reg_table[i].val =
10361 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10362 				else
10363 					reg_table[i].val =
10364 					     I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10365 		}
10366 
10367 		if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10368 			if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10369 				reg_table[i].val =
10370 					I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10371 			else /* For X710/XL710/XXV710 */
10372 				reg_table[i].val =
10373 					I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10374 		}
10375 
10376 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10377 			uint32_t cfg_val;
10378 
10379 			if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10380 				PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10381 					    "GL_SWR_PM_UP_THR value fixup",
10382 					    hw->device_id);
10383 				continue;
10384 			}
10385 
10386 			reg_table[i].val = cfg_val;
10387 		}
10388 
10389 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10390 							&reg, NULL);
10391 		if (ret < 0) {
10392 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10393 							reg_table[i].addr);
10394 			break;
10395 		}
10396 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10397 						reg_table[i].addr, reg);
10398 		if (reg == reg_table[i].val)
10399 			continue;
10400 
10401 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10402 						reg_table[i].val, NULL);
10403 		if (ret < 0) {
10404 			PMD_DRV_LOG(ERR,
10405 				"Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10406 				reg_table[i].val, reg_table[i].addr);
10407 			break;
10408 		}
10409 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10410 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10411 	}
10412 }
10413 
10414 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
10415 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
10416 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
10417 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10418 static int
10419 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10420 {
10421 	uint32_t reg;
10422 	int ret;
10423 
10424 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10425 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10426 		return -EINVAL;
10427 	}
10428 
10429 	/* Configure for double VLAN RX stripping */
10430 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10431 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10432 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
10433 		ret = i40e_aq_debug_write_register(hw,
10434 						   I40E_VSI_TSR(vsi->vsi_id),
10435 						   reg, NULL);
10436 		if (ret < 0) {
10437 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10438 				    vsi->vsi_id);
10439 			return I40E_ERR_CONFIG;
10440 		}
10441 	}
10442 
10443 	/* Configure for double VLAN TX insertion */
10444 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10445 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10446 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10447 		ret = i40e_aq_debug_write_register(hw,
10448 						   I40E_VSI_L2TAGSTXVALID(
10449 						   vsi->vsi_id), reg, NULL);
10450 		if (ret < 0) {
10451 			PMD_DRV_LOG(ERR,
10452 				"Failed to update VSI_L2TAGSTXVALID[%d]",
10453 				vsi->vsi_id);
10454 			return I40E_ERR_CONFIG;
10455 		}
10456 	}
10457 
10458 	return 0;
10459 }
10460 
10461 /**
10462  * i40e_aq_add_mirror_rule
10463  * @hw: pointer to the hardware structure
10464  * @seid: VEB seid to add mirror rule to
10465  * @dst_id: destination vsi seid
10466  * @entries: Buffer which contains the entities to be mirrored
10467  * @count: number of entities contained in the buffer
10468  * @rule_id:the rule_id of the rule to be added
10469  *
10470  * Add a mirror rule for a given veb.
10471  *
10472  **/
10473 static enum i40e_status_code
10474 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10475 			uint16_t seid, uint16_t dst_id,
10476 			uint16_t rule_type, uint16_t *entries,
10477 			uint16_t count, uint16_t *rule_id)
10478 {
10479 	struct i40e_aq_desc desc;
10480 	struct i40e_aqc_add_delete_mirror_rule cmd;
10481 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10482 		(struct i40e_aqc_add_delete_mirror_rule_completion *)
10483 		&desc.params.raw;
10484 	uint16_t buff_len;
10485 	enum i40e_status_code status;
10486 
10487 	i40e_fill_default_direct_cmd_desc(&desc,
10488 					  i40e_aqc_opc_add_mirror_rule);
10489 	memset(&cmd, 0, sizeof(cmd));
10490 
10491 	buff_len = sizeof(uint16_t) * count;
10492 	desc.datalen = rte_cpu_to_le_16(buff_len);
10493 	if (buff_len > 0)
10494 		desc.flags |= rte_cpu_to_le_16(
10495 			(uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10496 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10497 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10498 	cmd.num_entries = rte_cpu_to_le_16(count);
10499 	cmd.seid = rte_cpu_to_le_16(seid);
10500 	cmd.destination = rte_cpu_to_le_16(dst_id);
10501 
10502 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10503 	status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10504 	PMD_DRV_LOG(INFO,
10505 		"i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10506 		hw->aq.asq_last_status, resp->rule_id,
10507 		resp->mirror_rules_used, resp->mirror_rules_free);
10508 	*rule_id = rte_le_to_cpu_16(resp->rule_id);
10509 
10510 	return status;
10511 }
10512 
10513 /**
10514  * i40e_aq_del_mirror_rule
10515  * @hw: pointer to the hardware structure
10516  * @seid: VEB seid to add mirror rule to
10517  * @entries: Buffer which contains the entities to be mirrored
10518  * @count: number of entities contained in the buffer
10519  * @rule_id:the rule_id of the rule to be delete
10520  *
10521  * Delete a mirror rule for a given veb.
10522  *
10523  **/
10524 static enum i40e_status_code
10525 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10526 		uint16_t seid, uint16_t rule_type, uint16_t *entries,
10527 		uint16_t count, uint16_t rule_id)
10528 {
10529 	struct i40e_aq_desc desc;
10530 	struct i40e_aqc_add_delete_mirror_rule cmd;
10531 	uint16_t buff_len = 0;
10532 	enum i40e_status_code status;
10533 	void *buff = NULL;
10534 
10535 	i40e_fill_default_direct_cmd_desc(&desc,
10536 					  i40e_aqc_opc_delete_mirror_rule);
10537 	memset(&cmd, 0, sizeof(cmd));
10538 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10539 		desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10540 							  I40E_AQ_FLAG_RD));
10541 		cmd.num_entries = count;
10542 		buff_len = sizeof(uint16_t) * count;
10543 		desc.datalen = rte_cpu_to_le_16(buff_len);
10544 		buff = (void *)entries;
10545 	} else
10546 		/* rule id is filled in destination field for deleting mirror rule */
10547 		cmd.destination = rte_cpu_to_le_16(rule_id);
10548 
10549 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10550 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10551 	cmd.seid = rte_cpu_to_le_16(seid);
10552 
10553 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10554 	status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10555 
10556 	return status;
10557 }
10558 
10559 /**
10560  * i40e_mirror_rule_set
10561  * @dev: pointer to the hardware structure
10562  * @mirror_conf: mirror rule info
10563  * @sw_id: mirror rule's sw_id
10564  * @on: enable/disable
10565  *
10566  * set a mirror rule.
10567  *
10568  **/
10569 static int
10570 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10571 			struct rte_eth_mirror_conf *mirror_conf,
10572 			uint8_t sw_id, uint8_t on)
10573 {
10574 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10575 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10576 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
10577 	struct i40e_mirror_rule *parent = NULL;
10578 	uint16_t seid, dst_seid, rule_id;
10579 	uint16_t i, j = 0;
10580 	int ret;
10581 
10582 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10583 
10584 	if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10585 		PMD_DRV_LOG(ERR,
10586 			"mirror rule can not be configured without veb or vfs.");
10587 		return -ENOSYS;
10588 	}
10589 	if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10590 		PMD_DRV_LOG(ERR, "mirror table is full.");
10591 		return -ENOSPC;
10592 	}
10593 	if (mirror_conf->dst_pool > pf->vf_num) {
10594 		PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10595 				 mirror_conf->dst_pool);
10596 		return -EINVAL;
10597 	}
10598 
10599 	seid = pf->main_vsi->veb->seid;
10600 
10601 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10602 		if (sw_id <= it->index) {
10603 			mirr_rule = it;
10604 			break;
10605 		}
10606 		parent = it;
10607 	}
10608 	if (mirr_rule && sw_id == mirr_rule->index) {
10609 		if (on) {
10610 			PMD_DRV_LOG(ERR, "mirror rule exists.");
10611 			return -EEXIST;
10612 		} else {
10613 			ret = i40e_aq_del_mirror_rule(hw, seid,
10614 					mirr_rule->rule_type,
10615 					mirr_rule->entries,
10616 					mirr_rule->num_entries, mirr_rule->id);
10617 			if (ret < 0) {
10618 				PMD_DRV_LOG(ERR,
10619 					"failed to remove mirror rule: ret = %d, aq_err = %d.",
10620 					ret, hw->aq.asq_last_status);
10621 				return -ENOSYS;
10622 			}
10623 			TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10624 			rte_free(mirr_rule);
10625 			pf->nb_mirror_rule--;
10626 			return 0;
10627 		}
10628 	} else if (!on) {
10629 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10630 		return -ENOENT;
10631 	}
10632 
10633 	mirr_rule = rte_zmalloc("i40e_mirror_rule",
10634 				sizeof(struct i40e_mirror_rule) , 0);
10635 	if (!mirr_rule) {
10636 		PMD_DRV_LOG(ERR, "failed to allocate memory");
10637 		return I40E_ERR_NO_MEMORY;
10638 	}
10639 	switch (mirror_conf->rule_type) {
10640 	case ETH_MIRROR_VLAN:
10641 		for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10642 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10643 				mirr_rule->entries[j] =
10644 					mirror_conf->vlan.vlan_id[i];
10645 				j++;
10646 			}
10647 		}
10648 		if (j == 0) {
10649 			PMD_DRV_LOG(ERR, "vlan is not specified.");
10650 			rte_free(mirr_rule);
10651 			return -EINVAL;
10652 		}
10653 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10654 		break;
10655 	case ETH_MIRROR_VIRTUAL_POOL_UP:
10656 	case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10657 		/* check if the specified pool bit is out of range */
10658 		if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10659 			PMD_DRV_LOG(ERR, "pool mask is out of range.");
10660 			rte_free(mirr_rule);
10661 			return -EINVAL;
10662 		}
10663 		for (i = 0, j = 0; i < pf->vf_num; i++) {
10664 			if (mirror_conf->pool_mask & (1ULL << i)) {
10665 				mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10666 				j++;
10667 			}
10668 		}
10669 		if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10670 			/* add pf vsi to entries */
10671 			mirr_rule->entries[j] = pf->main_vsi_seid;
10672 			j++;
10673 		}
10674 		if (j == 0) {
10675 			PMD_DRV_LOG(ERR, "pool is not specified.");
10676 			rte_free(mirr_rule);
10677 			return -EINVAL;
10678 		}
10679 		/* egress and ingress in aq commands means from switch but not port */
10680 		mirr_rule->rule_type =
10681 			(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10682 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10683 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10684 		break;
10685 	case ETH_MIRROR_UPLINK_PORT:
10686 		/* egress and ingress in aq commands means from switch but not port*/
10687 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10688 		break;
10689 	case ETH_MIRROR_DOWNLINK_PORT:
10690 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10691 		break;
10692 	default:
10693 		PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10694 			mirror_conf->rule_type);
10695 		rte_free(mirr_rule);
10696 		return -EINVAL;
10697 	}
10698 
10699 	/* If the dst_pool is equal to vf_num, consider it as PF */
10700 	if (mirror_conf->dst_pool == pf->vf_num)
10701 		dst_seid = pf->main_vsi_seid;
10702 	else
10703 		dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10704 
10705 	ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10706 				      mirr_rule->rule_type, mirr_rule->entries,
10707 				      j, &rule_id);
10708 	if (ret < 0) {
10709 		PMD_DRV_LOG(ERR,
10710 			"failed to add mirror rule: ret = %d, aq_err = %d.",
10711 			ret, hw->aq.asq_last_status);
10712 		rte_free(mirr_rule);
10713 		return -ENOSYS;
10714 	}
10715 
10716 	mirr_rule->index = sw_id;
10717 	mirr_rule->num_entries = j;
10718 	mirr_rule->id = rule_id;
10719 	mirr_rule->dst_vsi_seid = dst_seid;
10720 
10721 	if (parent)
10722 		TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10723 	else
10724 		TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10725 
10726 	pf->nb_mirror_rule++;
10727 	return 0;
10728 }
10729 
10730 /**
10731  * i40e_mirror_rule_reset
10732  * @dev: pointer to the device
10733  * @sw_id: mirror rule's sw_id
10734  *
10735  * reset a mirror rule.
10736  *
10737  **/
10738 static int
10739 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10740 {
10741 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10742 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10743 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
10744 	uint16_t seid;
10745 	int ret;
10746 
10747 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10748 
10749 	seid = pf->main_vsi->veb->seid;
10750 
10751 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10752 		if (sw_id == it->index) {
10753 			mirr_rule = it;
10754 			break;
10755 		}
10756 	}
10757 	if (mirr_rule) {
10758 		ret = i40e_aq_del_mirror_rule(hw, seid,
10759 				mirr_rule->rule_type,
10760 				mirr_rule->entries,
10761 				mirr_rule->num_entries, mirr_rule->id);
10762 		if (ret < 0) {
10763 			PMD_DRV_LOG(ERR,
10764 				"failed to remove mirror rule: status = %d, aq_err = %d.",
10765 				ret, hw->aq.asq_last_status);
10766 			return -ENOSYS;
10767 		}
10768 		TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10769 		rte_free(mirr_rule);
10770 		pf->nb_mirror_rule--;
10771 	} else {
10772 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10773 		return -ENOENT;
10774 	}
10775 	return 0;
10776 }
10777 
10778 static uint64_t
10779 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10780 {
10781 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10782 	uint64_t systim_cycles;
10783 
10784 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10785 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10786 			<< 32;
10787 
10788 	return systim_cycles;
10789 }
10790 
10791 static uint64_t
10792 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10793 {
10794 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10795 	uint64_t rx_tstamp;
10796 
10797 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10798 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10799 			<< 32;
10800 
10801 	return rx_tstamp;
10802 }
10803 
10804 static uint64_t
10805 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10806 {
10807 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10808 	uint64_t tx_tstamp;
10809 
10810 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10811 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10812 			<< 32;
10813 
10814 	return tx_tstamp;
10815 }
10816 
10817 static void
10818 i40e_start_timecounters(struct rte_eth_dev *dev)
10819 {
10820 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10821 	struct i40e_adapter *adapter =
10822 			(struct i40e_adapter *)dev->data->dev_private;
10823 	struct rte_eth_link link;
10824 	uint32_t tsync_inc_l;
10825 	uint32_t tsync_inc_h;
10826 
10827 	/* Get current link speed. */
10828 	i40e_dev_link_update(dev, 1);
10829 	rte_eth_linkstatus_get(dev, &link);
10830 
10831 	switch (link.link_speed) {
10832 	case ETH_SPEED_NUM_40G:
10833 	case ETH_SPEED_NUM_25G:
10834 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10835 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10836 		break;
10837 	case ETH_SPEED_NUM_10G:
10838 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10839 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10840 		break;
10841 	case ETH_SPEED_NUM_1G:
10842 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10843 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10844 		break;
10845 	default:
10846 		tsync_inc_l = 0x0;
10847 		tsync_inc_h = 0x0;
10848 	}
10849 
10850 	/* Set the timesync increment value. */
10851 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10852 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10853 
10854 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10855 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10856 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10857 
10858 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10859 	adapter->systime_tc.cc_shift = 0;
10860 	adapter->systime_tc.nsec_mask = 0;
10861 
10862 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10863 	adapter->rx_tstamp_tc.cc_shift = 0;
10864 	adapter->rx_tstamp_tc.nsec_mask = 0;
10865 
10866 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10867 	adapter->tx_tstamp_tc.cc_shift = 0;
10868 	adapter->tx_tstamp_tc.nsec_mask = 0;
10869 }
10870 
10871 static int
10872 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10873 {
10874 	struct i40e_adapter *adapter =
10875 			(struct i40e_adapter *)dev->data->dev_private;
10876 
10877 	adapter->systime_tc.nsec += delta;
10878 	adapter->rx_tstamp_tc.nsec += delta;
10879 	adapter->tx_tstamp_tc.nsec += delta;
10880 
10881 	return 0;
10882 }
10883 
10884 static int
10885 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10886 {
10887 	uint64_t ns;
10888 	struct i40e_adapter *adapter =
10889 			(struct i40e_adapter *)dev->data->dev_private;
10890 
10891 	ns = rte_timespec_to_ns(ts);
10892 
10893 	/* Set the timecounters to a new value. */
10894 	adapter->systime_tc.nsec = ns;
10895 	adapter->rx_tstamp_tc.nsec = ns;
10896 	adapter->tx_tstamp_tc.nsec = ns;
10897 
10898 	return 0;
10899 }
10900 
10901 static int
10902 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10903 {
10904 	uint64_t ns, systime_cycles;
10905 	struct i40e_adapter *adapter =
10906 			(struct i40e_adapter *)dev->data->dev_private;
10907 
10908 	systime_cycles = i40e_read_systime_cyclecounter(dev);
10909 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10910 	*ts = rte_ns_to_timespec(ns);
10911 
10912 	return 0;
10913 }
10914 
10915 static int
10916 i40e_timesync_enable(struct rte_eth_dev *dev)
10917 {
10918 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10919 	uint32_t tsync_ctl_l;
10920 	uint32_t tsync_ctl_h;
10921 
10922 	/* Stop the timesync system time. */
10923 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10924 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10925 	/* Reset the timesync system time value. */
10926 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10927 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10928 
10929 	i40e_start_timecounters(dev);
10930 
10931 	/* Clear timesync registers. */
10932 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10933 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10934 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10935 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10936 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10937 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10938 
10939 	/* Enable timestamping of PTP packets. */
10940 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10941 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10942 
10943 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10944 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10945 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10946 
10947 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10948 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10949 
10950 	return 0;
10951 }
10952 
10953 static int
10954 i40e_timesync_disable(struct rte_eth_dev *dev)
10955 {
10956 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10957 	uint32_t tsync_ctl_l;
10958 	uint32_t tsync_ctl_h;
10959 
10960 	/* Disable timestamping of transmitted PTP packets. */
10961 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10962 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10963 
10964 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10965 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10966 
10967 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10968 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10969 
10970 	/* Reset the timesync increment value. */
10971 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10972 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10973 
10974 	return 0;
10975 }
10976 
10977 static int
10978 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10979 				struct timespec *timestamp, uint32_t flags)
10980 {
10981 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10982 	struct i40e_adapter *adapter =
10983 		(struct i40e_adapter *)dev->data->dev_private;
10984 
10985 	uint32_t sync_status;
10986 	uint32_t index = flags & 0x03;
10987 	uint64_t rx_tstamp_cycles;
10988 	uint64_t ns;
10989 
10990 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10991 	if ((sync_status & (1 << index)) == 0)
10992 		return -EINVAL;
10993 
10994 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10995 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10996 	*timestamp = rte_ns_to_timespec(ns);
10997 
10998 	return 0;
10999 }
11000 
11001 static int
11002 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
11003 				struct timespec *timestamp)
11004 {
11005 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11006 	struct i40e_adapter *adapter =
11007 		(struct i40e_adapter *)dev->data->dev_private;
11008 
11009 	uint32_t sync_status;
11010 	uint64_t tx_tstamp_cycles;
11011 	uint64_t ns;
11012 
11013 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
11014 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
11015 		return -EINVAL;
11016 
11017 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
11018 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
11019 	*timestamp = rte_ns_to_timespec(ns);
11020 
11021 	return 0;
11022 }
11023 
11024 /*
11025  * i40e_parse_dcb_configure - parse dcb configure from user
11026  * @dev: the device being configured
11027  * @dcb_cfg: pointer of the result of parse
11028  * @*tc_map: bit map of enabled traffic classes
11029  *
11030  * Returns 0 on success, negative value on failure
11031  */
11032 static int
11033 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
11034 			 struct i40e_dcbx_config *dcb_cfg,
11035 			 uint8_t *tc_map)
11036 {
11037 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
11038 	uint8_t i, tc_bw, bw_lf;
11039 
11040 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
11041 
11042 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
11043 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
11044 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
11045 		return -EINVAL;
11046 	}
11047 
11048 	/* assume each tc has the same bw */
11049 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
11050 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11051 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
11052 	/* to ensure the sum of tcbw is equal to 100 */
11053 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
11054 	for (i = 0; i < bw_lf; i++)
11055 		dcb_cfg->etscfg.tcbwtable[i]++;
11056 
11057 	/* assume each tc has the same Transmission Selection Algorithm */
11058 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
11059 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
11060 
11061 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11062 		dcb_cfg->etscfg.prioritytable[i] =
11063 				dcb_rx_conf->dcb_tc[i];
11064 
11065 	/* FW needs one App to configure HW */
11066 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
11067 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
11068 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
11069 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
11070 
11071 	if (dcb_rx_conf->nb_tcs == 0)
11072 		*tc_map = 1; /* tc0 only */
11073 	else
11074 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
11075 
11076 	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
11077 		dcb_cfg->pfc.willing = 0;
11078 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
11079 		dcb_cfg->pfc.pfcenable = *tc_map;
11080 	}
11081 	return 0;
11082 }
11083 
11084 
11085 static enum i40e_status_code
11086 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
11087 			      struct i40e_aqc_vsi_properties_data *info,
11088 			      uint8_t enabled_tcmap)
11089 {
11090 	enum i40e_status_code ret;
11091 	int i, total_tc = 0;
11092 	uint16_t qpnum_per_tc, bsf, qp_idx;
11093 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
11094 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
11095 	uint16_t used_queues;
11096 
11097 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
11098 	if (ret != I40E_SUCCESS)
11099 		return ret;
11100 
11101 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11102 		if (enabled_tcmap & (1 << i))
11103 			total_tc++;
11104 	}
11105 	if (total_tc == 0)
11106 		total_tc = 1;
11107 	vsi->enabled_tc = enabled_tcmap;
11108 
11109 	/* different VSI has different queues assigned */
11110 	if (vsi->type == I40E_VSI_MAIN)
11111 		used_queues = dev_data->nb_rx_queues -
11112 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11113 	else if (vsi->type == I40E_VSI_VMDQ2)
11114 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
11115 	else {
11116 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
11117 		return I40E_ERR_NO_AVAILABLE_VSI;
11118 	}
11119 
11120 	qpnum_per_tc = used_queues / total_tc;
11121 	/* Number of queues per enabled TC */
11122 	if (qpnum_per_tc == 0) {
11123 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
11124 		return I40E_ERR_INVALID_QP_ID;
11125 	}
11126 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
11127 				I40E_MAX_Q_PER_TC);
11128 	bsf = rte_bsf32(qpnum_per_tc);
11129 
11130 	/**
11131 	 * Configure TC and queue mapping parameters, for enabled TC,
11132 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
11133 	 * default queue will serve it.
11134 	 */
11135 	qp_idx = 0;
11136 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11137 		if (vsi->enabled_tc & (1 << i)) {
11138 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
11139 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
11140 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
11141 			qp_idx += qpnum_per_tc;
11142 		} else
11143 			info->tc_mapping[i] = 0;
11144 	}
11145 
11146 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
11147 	if (vsi->type == I40E_VSI_SRIOV) {
11148 		info->mapping_flags |=
11149 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
11150 		for (i = 0; i < vsi->nb_qps; i++)
11151 			info->queue_mapping[i] =
11152 				rte_cpu_to_le_16(vsi->base_queue + i);
11153 	} else {
11154 		info->mapping_flags |=
11155 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
11156 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
11157 	}
11158 	info->valid_sections |=
11159 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
11160 
11161 	return I40E_SUCCESS;
11162 }
11163 
11164 /*
11165  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
11166  * @veb: VEB to be configured
11167  * @tc_map: enabled TC bitmap
11168  *
11169  * Returns 0 on success, negative value on failure
11170  */
11171 static enum i40e_status_code
11172 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
11173 {
11174 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
11175 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
11176 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
11177 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
11178 	enum i40e_status_code ret = I40E_SUCCESS;
11179 	int i;
11180 	uint32_t bw_max;
11181 
11182 	/* Check if enabled_tc is same as existing or new TCs */
11183 	if (veb->enabled_tc == tc_map)
11184 		return ret;
11185 
11186 	/* configure tc bandwidth */
11187 	memset(&veb_bw, 0, sizeof(veb_bw));
11188 	veb_bw.tc_valid_bits = tc_map;
11189 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
11190 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11191 		if (tc_map & BIT_ULL(i))
11192 			veb_bw.tc_bw_share_credits[i] = 1;
11193 	}
11194 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
11195 						   &veb_bw, NULL);
11196 	if (ret) {
11197 		PMD_INIT_LOG(ERR,
11198 			"AQ command Config switch_comp BW allocation per TC failed = %d",
11199 			hw->aq.asq_last_status);
11200 		return ret;
11201 	}
11202 
11203 	memset(&ets_query, 0, sizeof(ets_query));
11204 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11205 						   &ets_query, NULL);
11206 	if (ret != I40E_SUCCESS) {
11207 		PMD_DRV_LOG(ERR,
11208 			"Failed to get switch_comp ETS configuration %u",
11209 			hw->aq.asq_last_status);
11210 		return ret;
11211 	}
11212 	memset(&bw_query, 0, sizeof(bw_query));
11213 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11214 						  &bw_query, NULL);
11215 	if (ret != I40E_SUCCESS) {
11216 		PMD_DRV_LOG(ERR,
11217 			"Failed to get switch_comp bandwidth configuration %u",
11218 			hw->aq.asq_last_status);
11219 		return ret;
11220 	}
11221 
11222 	/* store and print out BW info */
11223 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11224 	veb->bw_info.bw_max = ets_query.tc_bw_max;
11225 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11226 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11227 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11228 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11229 		     I40E_16_BIT_WIDTH);
11230 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11231 		veb->bw_info.bw_ets_share_credits[i] =
11232 				bw_query.tc_bw_share_credits[i];
11233 		veb->bw_info.bw_ets_credits[i] =
11234 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11235 		/* 4 bits per TC, 4th bit is reserved */
11236 		veb->bw_info.bw_ets_max[i] =
11237 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11238 				  RTE_LEN2MASK(3, uint8_t));
11239 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11240 			    veb->bw_info.bw_ets_share_credits[i]);
11241 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11242 			    veb->bw_info.bw_ets_credits[i]);
11243 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11244 			    veb->bw_info.bw_ets_max[i]);
11245 	}
11246 
11247 	veb->enabled_tc = tc_map;
11248 
11249 	return ret;
11250 }
11251 
11252 
11253 /*
11254  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11255  * @vsi: VSI to be configured
11256  * @tc_map: enabled TC bitmap
11257  *
11258  * Returns 0 on success, negative value on failure
11259  */
11260 static enum i40e_status_code
11261 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11262 {
11263 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11264 	struct i40e_vsi_context ctxt;
11265 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11266 	enum i40e_status_code ret = I40E_SUCCESS;
11267 	int i;
11268 
11269 	/* Check if enabled_tc is same as existing or new TCs */
11270 	if (vsi->enabled_tc == tc_map)
11271 		return ret;
11272 
11273 	/* configure tc bandwidth */
11274 	memset(&bw_data, 0, sizeof(bw_data));
11275 	bw_data.tc_valid_bits = tc_map;
11276 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
11277 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11278 		if (tc_map & BIT_ULL(i))
11279 			bw_data.tc_bw_credits[i] = 1;
11280 	}
11281 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11282 	if (ret) {
11283 		PMD_INIT_LOG(ERR,
11284 			"AQ command Config VSI BW allocation per TC failed = %d",
11285 			hw->aq.asq_last_status);
11286 		goto out;
11287 	}
11288 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11289 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11290 
11291 	/* Update Queue Pairs Mapping for currently enabled UPs */
11292 	ctxt.seid = vsi->seid;
11293 	ctxt.pf_num = hw->pf_id;
11294 	ctxt.vf_num = 0;
11295 	ctxt.uplink_seid = vsi->uplink_seid;
11296 	ctxt.info = vsi->info;
11297 	i40e_get_cap(hw);
11298 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11299 	if (ret)
11300 		goto out;
11301 
11302 	/* Update the VSI after updating the VSI queue-mapping information */
11303 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11304 	if (ret) {
11305 		PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11306 			hw->aq.asq_last_status);
11307 		goto out;
11308 	}
11309 	/* update the local VSI info with updated queue map */
11310 	rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11311 					sizeof(vsi->info.tc_mapping));
11312 	rte_memcpy(&vsi->info.queue_mapping,
11313 			&ctxt.info.queue_mapping,
11314 		sizeof(vsi->info.queue_mapping));
11315 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
11316 	vsi->info.valid_sections = 0;
11317 
11318 	/* query and update current VSI BW information */
11319 	ret = i40e_vsi_get_bw_config(vsi);
11320 	if (ret) {
11321 		PMD_INIT_LOG(ERR,
11322 			 "Failed updating vsi bw info, err %s aq_err %s",
11323 			 i40e_stat_str(hw, ret),
11324 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11325 		goto out;
11326 	}
11327 
11328 	vsi->enabled_tc = tc_map;
11329 
11330 out:
11331 	return ret;
11332 }
11333 
11334 /*
11335  * i40e_dcb_hw_configure - program the dcb setting to hw
11336  * @pf: pf the configuration is taken on
11337  * @new_cfg: new configuration
11338  * @tc_map: enabled TC bitmap
11339  *
11340  * Returns 0 on success, negative value on failure
11341  */
11342 static enum i40e_status_code
11343 i40e_dcb_hw_configure(struct i40e_pf *pf,
11344 		      struct i40e_dcbx_config *new_cfg,
11345 		      uint8_t tc_map)
11346 {
11347 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11348 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11349 	struct i40e_vsi *main_vsi = pf->main_vsi;
11350 	struct i40e_vsi_list *vsi_list;
11351 	enum i40e_status_code ret;
11352 	int i;
11353 	uint32_t val;
11354 
11355 	/* Use the FW API if FW > v4.4*/
11356 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11357 	      (hw->aq.fw_maj_ver >= 5))) {
11358 		PMD_INIT_LOG(ERR,
11359 			"FW < v4.4, can not use FW LLDP API to configure DCB");
11360 		return I40E_ERR_FIRMWARE_API_VERSION;
11361 	}
11362 
11363 	/* Check if need reconfiguration */
11364 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11365 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11366 		return I40E_SUCCESS;
11367 	}
11368 
11369 	/* Copy the new config to the current config */
11370 	*old_cfg = *new_cfg;
11371 	old_cfg->etsrec = old_cfg->etscfg;
11372 	ret = i40e_set_dcb_config(hw);
11373 	if (ret) {
11374 		PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11375 			 i40e_stat_str(hw, ret),
11376 			 i40e_aq_str(hw, hw->aq.asq_last_status));
11377 		return ret;
11378 	}
11379 	/* set receive Arbiter to RR mode and ETS scheme by default */
11380 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11381 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11382 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
11383 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11384 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11385 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11386 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11387 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11388 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11389 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11390 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11391 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11392 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11393 	}
11394 	/* get local mib to check whether it is configured correctly */
11395 	/* IEEE mode */
11396 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11397 	/* Get Local DCB Config */
11398 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11399 				     &hw->local_dcbx_config);
11400 
11401 	/* if Veb is created, need to update TC of it at first */
11402 	if (main_vsi->veb) {
11403 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11404 		if (ret)
11405 			PMD_INIT_LOG(WARNING,
11406 				 "Failed configuring TC for VEB seid=%d",
11407 				 main_vsi->veb->seid);
11408 	}
11409 	/* Update each VSI */
11410 	i40e_vsi_config_tc(main_vsi, tc_map);
11411 	if (main_vsi->veb) {
11412 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11413 			/* Beside main VSI and VMDQ VSIs, only enable default
11414 			 * TC for other VSIs
11415 			 */
11416 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11417 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11418 							 tc_map);
11419 			else
11420 				ret = i40e_vsi_config_tc(vsi_list->vsi,
11421 							 I40E_DEFAULT_TCMAP);
11422 			if (ret)
11423 				PMD_INIT_LOG(WARNING,
11424 					"Failed configuring TC for VSI seid=%d",
11425 					vsi_list->vsi->seid);
11426 			/* continue */
11427 		}
11428 	}
11429 	return I40E_SUCCESS;
11430 }
11431 
11432 /*
11433  * i40e_dcb_init_configure - initial dcb config
11434  * @dev: device being configured
11435  * @sw_dcb: indicate whether dcb is sw configured or hw offload
11436  *
11437  * Returns 0 on success, negative value on failure
11438  */
11439 int
11440 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11441 {
11442 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11443 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11444 	int i, ret = 0;
11445 
11446 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11447 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11448 		return -ENOTSUP;
11449 	}
11450 
11451 	/* DCB initialization:
11452 	 * Update DCB configuration from the Firmware and configure
11453 	 * LLDP MIB change event.
11454 	 */
11455 	if (sw_dcb == TRUE) {
11456 		if (i40e_need_stop_lldp(dev)) {
11457 			ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
11458 			if (ret != I40E_SUCCESS)
11459 				PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
11460 		}
11461 
11462 		ret = i40e_init_dcb(hw);
11463 		/* If lldp agent is stopped, the return value from
11464 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11465 		 * adminq status. Otherwise, it should return success.
11466 		 */
11467 		if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11468 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11469 			memset(&hw->local_dcbx_config, 0,
11470 				sizeof(struct i40e_dcbx_config));
11471 			/* set dcb default configuration */
11472 			hw->local_dcbx_config.etscfg.willing = 0;
11473 			hw->local_dcbx_config.etscfg.maxtcs = 0;
11474 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11475 			hw->local_dcbx_config.etscfg.tsatable[0] =
11476 						I40E_IEEE_TSA_ETS;
11477 			/* all UPs mapping to TC0 */
11478 			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11479 				hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11480 			hw->local_dcbx_config.etsrec =
11481 				hw->local_dcbx_config.etscfg;
11482 			hw->local_dcbx_config.pfc.willing = 0;
11483 			hw->local_dcbx_config.pfc.pfccap =
11484 						I40E_MAX_TRAFFIC_CLASS;
11485 			/* FW needs one App to configure HW */
11486 			hw->local_dcbx_config.numapps = 1;
11487 			hw->local_dcbx_config.app[0].selector =
11488 						I40E_APP_SEL_ETHTYPE;
11489 			hw->local_dcbx_config.app[0].priority = 3;
11490 			hw->local_dcbx_config.app[0].protocolid =
11491 						I40E_APP_PROTOID_FCOE;
11492 			ret = i40e_set_dcb_config(hw);
11493 			if (ret) {
11494 				PMD_INIT_LOG(ERR,
11495 					"default dcb config fails. err = %d, aq_err = %d.",
11496 					ret, hw->aq.asq_last_status);
11497 				return -ENOSYS;
11498 			}
11499 		} else {
11500 			PMD_INIT_LOG(ERR,
11501 				"DCB initialization in FW fails, err = %d, aq_err = %d.",
11502 				ret, hw->aq.asq_last_status);
11503 			return -ENOTSUP;
11504 		}
11505 	} else {
11506 		ret = i40e_aq_start_lldp(hw, NULL);
11507 		if (ret != I40E_SUCCESS)
11508 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11509 
11510 		ret = i40e_init_dcb(hw);
11511 		if (!ret) {
11512 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11513 				PMD_INIT_LOG(ERR,
11514 					"HW doesn't support DCBX offload.");
11515 				return -ENOTSUP;
11516 			}
11517 		} else {
11518 			PMD_INIT_LOG(ERR,
11519 				"DCBX configuration failed, err = %d, aq_err = %d.",
11520 				ret, hw->aq.asq_last_status);
11521 			return -ENOTSUP;
11522 		}
11523 	}
11524 	return 0;
11525 }
11526 
11527 /*
11528  * i40e_dcb_setup - setup dcb related config
11529  * @dev: device being configured
11530  *
11531  * Returns 0 on success, negative value on failure
11532  */
11533 static int
11534 i40e_dcb_setup(struct rte_eth_dev *dev)
11535 {
11536 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11537 	struct i40e_dcbx_config dcb_cfg;
11538 	uint8_t tc_map = 0;
11539 	int ret = 0;
11540 
11541 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
11542 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11543 		return -ENOTSUP;
11544 	}
11545 
11546 	if (pf->vf_num != 0)
11547 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11548 
11549 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11550 	if (ret) {
11551 		PMD_INIT_LOG(ERR, "invalid dcb config");
11552 		return -EINVAL;
11553 	}
11554 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11555 	if (ret) {
11556 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
11557 		return -ENOSYS;
11558 	}
11559 
11560 	return 0;
11561 }
11562 
11563 static int
11564 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11565 		      struct rte_eth_dcb_info *dcb_info)
11566 {
11567 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11568 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11569 	struct i40e_vsi *vsi = pf->main_vsi;
11570 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11571 	uint16_t bsf, tc_mapping;
11572 	int i, j = 0;
11573 
11574 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11575 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11576 	else
11577 		dcb_info->nb_tcs = 1;
11578 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11579 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11580 	for (i = 0; i < dcb_info->nb_tcs; i++)
11581 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11582 
11583 	/* get queue mapping if vmdq is disabled */
11584 	if (!pf->nb_cfg_vmdq_vsi) {
11585 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11586 			if (!(vsi->enabled_tc & (1 << i)))
11587 				continue;
11588 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11589 			dcb_info->tc_queue.tc_rxq[j][i].base =
11590 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11591 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11592 			dcb_info->tc_queue.tc_txq[j][i].base =
11593 				dcb_info->tc_queue.tc_rxq[j][i].base;
11594 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11595 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11596 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11597 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11598 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11599 		}
11600 		return 0;
11601 	}
11602 
11603 	/* get queue mapping if vmdq is enabled */
11604 	do {
11605 		vsi = pf->vmdq[j].vsi;
11606 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11607 			if (!(vsi->enabled_tc & (1 << i)))
11608 				continue;
11609 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11610 			dcb_info->tc_queue.tc_rxq[j][i].base =
11611 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11612 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11613 			dcb_info->tc_queue.tc_txq[j][i].base =
11614 				dcb_info->tc_queue.tc_rxq[j][i].base;
11615 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11616 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11617 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11618 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11619 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11620 		}
11621 		j++;
11622 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11623 	return 0;
11624 }
11625 
11626 static int
11627 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11628 {
11629 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11630 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11631 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11632 	uint16_t msix_intr;
11633 
11634 	msix_intr = intr_handle->intr_vec[queue_id];
11635 	if (msix_intr == I40E_MISC_VEC_ID)
11636 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11637 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
11638 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11639 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11640 	else
11641 		I40E_WRITE_REG(hw,
11642 			       I40E_PFINT_DYN_CTLN(msix_intr -
11643 						   I40E_RX_VEC_START),
11644 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
11645 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11646 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11647 
11648 	I40E_WRITE_FLUSH(hw);
11649 	rte_intr_enable(&pci_dev->intr_handle);
11650 
11651 	return 0;
11652 }
11653 
11654 static int
11655 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11656 {
11657 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11658 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11659 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11660 	uint16_t msix_intr;
11661 
11662 	msix_intr = intr_handle->intr_vec[queue_id];
11663 	if (msix_intr == I40E_MISC_VEC_ID)
11664 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11665 			       I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11666 	else
11667 		I40E_WRITE_REG(hw,
11668 			       I40E_PFINT_DYN_CTLN(msix_intr -
11669 						   I40E_RX_VEC_START),
11670 			       I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11671 	I40E_WRITE_FLUSH(hw);
11672 
11673 	return 0;
11674 }
11675 
11676 /**
11677  * This function is used to check if the register is valid.
11678  * Below is the valid registers list for X722 only:
11679  * 0x2b800--0x2bb00
11680  * 0x38700--0x38a00
11681  * 0x3d800--0x3db00
11682  * 0x208e00--0x209000
11683  * 0x20be00--0x20c000
11684  * 0x263c00--0x264000
11685  * 0x265c00--0x266000
11686  */
11687 static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
11688 {
11689 	if ((type != I40E_MAC_X722) &&
11690 	    ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
11691 	     (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
11692 	     (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
11693 	     (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
11694 	     (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
11695 	     (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
11696 	     (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
11697 		return 0;
11698 	else
11699 		return 1;
11700 }
11701 
11702 static int i40e_get_regs(struct rte_eth_dev *dev,
11703 			 struct rte_dev_reg_info *regs)
11704 {
11705 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11706 	uint32_t *ptr_data = regs->data;
11707 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11708 	const struct i40e_reg_info *reg_info;
11709 
11710 	if (ptr_data == NULL) {
11711 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11712 		regs->width = sizeof(uint32_t);
11713 		return 0;
11714 	}
11715 
11716 	/* The first few registers have to be read using AQ operations */
11717 	reg_idx = 0;
11718 	while (i40e_regs_adminq[reg_idx].name) {
11719 		reg_info = &i40e_regs_adminq[reg_idx++];
11720 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11721 			for (arr_idx2 = 0;
11722 					arr_idx2 <= reg_info->count2;
11723 					arr_idx2++) {
11724 				reg_offset = arr_idx * reg_info->stride1 +
11725 					arr_idx2 * reg_info->stride2;
11726 				reg_offset += reg_info->base_addr;
11727 				ptr_data[reg_offset >> 2] =
11728 					i40e_read_rx_ctl(hw, reg_offset);
11729 			}
11730 	}
11731 
11732 	/* The remaining registers can be read using primitives */
11733 	reg_idx = 0;
11734 	while (i40e_regs_others[reg_idx].name) {
11735 		reg_info = &i40e_regs_others[reg_idx++];
11736 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11737 			for (arr_idx2 = 0;
11738 					arr_idx2 <= reg_info->count2;
11739 					arr_idx2++) {
11740 				reg_offset = arr_idx * reg_info->stride1 +
11741 					arr_idx2 * reg_info->stride2;
11742 				reg_offset += reg_info->base_addr;
11743 				if (!i40e_valid_regs(hw->mac.type, reg_offset))
11744 					ptr_data[reg_offset >> 2] = 0;
11745 				else
11746 					ptr_data[reg_offset >> 2] =
11747 						I40E_READ_REG(hw, reg_offset);
11748 			}
11749 	}
11750 
11751 	return 0;
11752 }
11753 
11754 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11755 {
11756 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11757 
11758 	/* Convert word count to byte count */
11759 	return hw->nvm.sr_size << 1;
11760 }
11761 
11762 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11763 			   struct rte_dev_eeprom_info *eeprom)
11764 {
11765 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11766 	uint16_t *data = eeprom->data;
11767 	uint16_t offset, length, cnt_words;
11768 	int ret_code;
11769 
11770 	offset = eeprom->offset >> 1;
11771 	length = eeprom->length >> 1;
11772 	cnt_words = length;
11773 
11774 	if (offset > hw->nvm.sr_size ||
11775 		offset + length > hw->nvm.sr_size) {
11776 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11777 		return -EINVAL;
11778 	}
11779 
11780 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11781 
11782 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11783 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
11784 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
11785 		return -EIO;
11786 	}
11787 
11788 	return 0;
11789 }
11790 
11791 static int i40e_get_module_info(struct rte_eth_dev *dev,
11792 				struct rte_eth_dev_module_info *modinfo)
11793 {
11794 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11795 	uint32_t sff8472_comp = 0;
11796 	uint32_t sff8472_swap = 0;
11797 	uint32_t sff8636_rev = 0;
11798 	i40e_status status;
11799 	uint32_t type = 0;
11800 
11801 	/* Check if firmware supports reading module EEPROM. */
11802 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11803 		PMD_DRV_LOG(ERR,
11804 			    "Module EEPROM memory read not supported. "
11805 			    "Please update the NVM image.\n");
11806 		return -EINVAL;
11807 	}
11808 
11809 	status = i40e_update_link_info(hw);
11810 	if (status)
11811 		return -EIO;
11812 
11813 	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11814 		PMD_DRV_LOG(ERR,
11815 			    "Cannot read module EEPROM memory. "
11816 			    "No module connected.\n");
11817 		return -EINVAL;
11818 	}
11819 
11820 	type = hw->phy.link_info.module_type[0];
11821 
11822 	switch (type) {
11823 	case I40E_MODULE_TYPE_SFP:
11824 		status = i40e_aq_get_phy_register(hw,
11825 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11826 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11827 				I40E_MODULE_SFF_8472_COMP,
11828 				&sff8472_comp, NULL);
11829 		if (status)
11830 			return -EIO;
11831 
11832 		status = i40e_aq_get_phy_register(hw,
11833 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11834 				I40E_I2C_EEPROM_DEV_ADDR, 1,
11835 				I40E_MODULE_SFF_8472_SWAP,
11836 				&sff8472_swap, NULL);
11837 		if (status)
11838 			return -EIO;
11839 
11840 		/* Check if the module requires address swap to access
11841 		 * the other EEPROM memory page.
11842 		 */
11843 		if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11844 			PMD_DRV_LOG(WARNING,
11845 				    "Module address swap to access "
11846 				    "page 0xA2 is not supported.\n");
11847 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11848 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11849 		} else if (sff8472_comp == 0x00) {
11850 			/* Module is not SFF-8472 compliant */
11851 			modinfo->type = RTE_ETH_MODULE_SFF_8079;
11852 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11853 		} else {
11854 			modinfo->type = RTE_ETH_MODULE_SFF_8472;
11855 			modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11856 		}
11857 		break;
11858 	case I40E_MODULE_TYPE_QSFP_PLUS:
11859 		/* Read from memory page 0. */
11860 		status = i40e_aq_get_phy_register(hw,
11861 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11862 				0, 1,
11863 				I40E_MODULE_REVISION_ADDR,
11864 				&sff8636_rev, NULL);
11865 		if (status)
11866 			return -EIO;
11867 		/* Determine revision compliance byte */
11868 		if (sff8636_rev > 0x02) {
11869 			/* Module is SFF-8636 compliant */
11870 			modinfo->type = RTE_ETH_MODULE_SFF_8636;
11871 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11872 		} else {
11873 			modinfo->type = RTE_ETH_MODULE_SFF_8436;
11874 			modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11875 		}
11876 		break;
11877 	case I40E_MODULE_TYPE_QSFP28:
11878 		modinfo->type = RTE_ETH_MODULE_SFF_8636;
11879 		modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11880 		break;
11881 	default:
11882 		PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11883 		return -EINVAL;
11884 	}
11885 	return 0;
11886 }
11887 
11888 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11889 				  struct rte_dev_eeprom_info *info)
11890 {
11891 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11892 	bool is_sfp = false;
11893 	i40e_status status;
11894 	uint8_t *data = info->data;
11895 	uint32_t value = 0;
11896 	uint32_t i;
11897 
11898 	if (!info || !info->length || !data)
11899 		return -EINVAL;
11900 
11901 	if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11902 		is_sfp = true;
11903 
11904 	for (i = 0; i < info->length; i++) {
11905 		u32 offset = i + info->offset;
11906 		u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11907 
11908 		/* Check if we need to access the other memory page */
11909 		if (is_sfp) {
11910 			if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11911 				offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11912 				addr = I40E_I2C_EEPROM_DEV_ADDR2;
11913 			}
11914 		} else {
11915 			while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11916 				/* Compute memory page number and offset. */
11917 				offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11918 				addr++;
11919 			}
11920 		}
11921 		status = i40e_aq_get_phy_register(hw,
11922 				I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11923 				addr, offset, 1, &value, NULL);
11924 		if (status)
11925 			return -EIO;
11926 		data[i] = (uint8_t)value;
11927 	}
11928 	return 0;
11929 }
11930 
11931 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11932 				     struct ether_addr *mac_addr)
11933 {
11934 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11935 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11936 	struct i40e_vsi *vsi = pf->main_vsi;
11937 	struct i40e_mac_filter_info mac_filter;
11938 	struct i40e_mac_filter *f;
11939 	int ret;
11940 
11941 	if (!is_valid_assigned_ether_addr(mac_addr)) {
11942 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11943 		return -EINVAL;
11944 	}
11945 
11946 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
11947 		if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11948 			break;
11949 	}
11950 
11951 	if (f == NULL) {
11952 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11953 		return -EIO;
11954 	}
11955 
11956 	mac_filter = f->mac_info;
11957 	ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11958 	if (ret != I40E_SUCCESS) {
11959 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11960 		return -EIO;
11961 	}
11962 	memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11963 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
11964 	if (ret != I40E_SUCCESS) {
11965 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
11966 		return -EIO;
11967 	}
11968 	memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11969 
11970 	ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11971 					mac_addr->addr_bytes, NULL);
11972 	if (ret != I40E_SUCCESS) {
11973 		PMD_DRV_LOG(ERR, "Failed to change mac");
11974 		return -EIO;
11975 	}
11976 
11977 	return 0;
11978 }
11979 
11980 static int
11981 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11982 {
11983 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11984 	struct rte_eth_dev_data *dev_data = pf->dev_data;
11985 	uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11986 	int ret = 0;
11987 
11988 	/* check if mtu is within the allowed range */
11989 	if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11990 		return -EINVAL;
11991 
11992 	/* mtu setting is forbidden if port is start */
11993 	if (dev_data->dev_started) {
11994 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11995 			    dev_data->port_id);
11996 		return -EBUSY;
11997 	}
11998 
11999 	if (frame_size > ETHER_MAX_LEN)
12000 		dev_data->dev_conf.rxmode.offloads |=
12001 			DEV_RX_OFFLOAD_JUMBO_FRAME;
12002 	else
12003 		dev_data->dev_conf.rxmode.offloads &=
12004 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
12005 
12006 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
12007 
12008 	return ret;
12009 }
12010 
12011 /* Restore ethertype filter */
12012 static void
12013 i40e_ethertype_filter_restore(struct i40e_pf *pf)
12014 {
12015 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12016 	struct i40e_ethertype_filter_list
12017 		*ethertype_list = &pf->ethertype.ethertype_list;
12018 	struct i40e_ethertype_filter *f;
12019 	struct i40e_control_filter_stats stats;
12020 	uint16_t flags;
12021 
12022 	TAILQ_FOREACH(f, ethertype_list, rules) {
12023 		flags = 0;
12024 		if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
12025 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
12026 		if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
12027 			flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
12028 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
12029 
12030 		memset(&stats, 0, sizeof(stats));
12031 		i40e_aq_add_rem_control_packet_filter(hw,
12032 					    f->input.mac_addr.addr_bytes,
12033 					    f->input.ether_type,
12034 					    flags, pf->main_vsi->seid,
12035 					    f->queue, 1, &stats, NULL);
12036 	}
12037 	PMD_DRV_LOG(INFO, "Ethertype filter:"
12038 		    " mac_etype_used = %u, etype_used = %u,"
12039 		    " mac_etype_free = %u, etype_free = %u",
12040 		    stats.mac_etype_used, stats.etype_used,
12041 		    stats.mac_etype_free, stats.etype_free);
12042 }
12043 
12044 /* Restore tunnel filter */
12045 static void
12046 i40e_tunnel_filter_restore(struct i40e_pf *pf)
12047 {
12048 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12049 	struct i40e_vsi *vsi;
12050 	struct i40e_pf_vf *vf;
12051 	struct i40e_tunnel_filter_list
12052 		*tunnel_list = &pf->tunnel.tunnel_list;
12053 	struct i40e_tunnel_filter *f;
12054 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
12055 	bool big_buffer = 0;
12056 
12057 	TAILQ_FOREACH(f, tunnel_list, rules) {
12058 		if (!f->is_to_vf)
12059 			vsi = pf->main_vsi;
12060 		else {
12061 			vf = &pf->vfs[f->vf_id];
12062 			vsi = vf->vsi;
12063 		}
12064 		memset(&cld_filter, 0, sizeof(cld_filter));
12065 		ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
12066 			(struct ether_addr *)&cld_filter.element.outer_mac);
12067 		ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
12068 			(struct ether_addr *)&cld_filter.element.inner_mac);
12069 		cld_filter.element.inner_vlan = f->input.inner_vlan;
12070 		cld_filter.element.flags = f->input.flags;
12071 		cld_filter.element.tenant_id = f->input.tenant_id;
12072 		cld_filter.element.queue_number = f->queue;
12073 		rte_memcpy(cld_filter.general_fields,
12074 			   f->input.general_fields,
12075 			   sizeof(f->input.general_fields));
12076 
12077 		if (((f->input.flags &
12078 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
12079 		     I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
12080 		    ((f->input.flags &
12081 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
12082 		     I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
12083 		    ((f->input.flags &
12084 		     I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
12085 		     I40E_AQC_ADD_CLOUD_FILTER_0X10))
12086 			big_buffer = 1;
12087 
12088 		if (big_buffer)
12089 			i40e_aq_add_cloud_filters_bb(hw,
12090 					vsi->seid, &cld_filter, 1);
12091 		else
12092 			i40e_aq_add_cloud_filters(hw, vsi->seid,
12093 						  &cld_filter.element, 1);
12094 	}
12095 }
12096 
12097 /* Restore rss filter */
12098 static inline void
12099 i40e_rss_filter_restore(struct i40e_pf *pf)
12100 {
12101 	struct i40e_rte_flow_rss_conf *conf =
12102 					&pf->rss_info;
12103 	if (conf->conf.queue_num)
12104 		i40e_config_rss_filter(pf, conf, TRUE);
12105 }
12106 
12107 static void
12108 i40e_filter_restore(struct i40e_pf *pf)
12109 {
12110 	i40e_ethertype_filter_restore(pf);
12111 	i40e_tunnel_filter_restore(pf);
12112 	i40e_fdir_filter_restore(pf);
12113 	i40e_rss_filter_restore(pf);
12114 }
12115 
12116 static bool
12117 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
12118 {
12119 	if (strcmp(dev->device->driver->name, drv->driver.name))
12120 		return false;
12121 
12122 	return true;
12123 }
12124 
12125 bool
12126 is_i40e_supported(struct rte_eth_dev *dev)
12127 {
12128 	return is_device_supported(dev, &rte_i40e_pmd);
12129 }
12130 
12131 struct i40e_customized_pctype*
12132 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
12133 {
12134 	int i;
12135 
12136 	for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
12137 		if (pf->customized_pctype[i].index == index)
12138 			return &pf->customized_pctype[i];
12139 	}
12140 	return NULL;
12141 }
12142 
12143 static int
12144 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
12145 			      uint32_t pkg_size, uint32_t proto_num,
12146 			      struct rte_pmd_i40e_proto_info *proto,
12147 			      enum rte_pmd_i40e_package_op op)
12148 {
12149 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12150 	uint32_t pctype_num;
12151 	struct rte_pmd_i40e_ptype_info *pctype;
12152 	uint32_t buff_size;
12153 	struct i40e_customized_pctype *new_pctype = NULL;
12154 	uint8_t proto_id;
12155 	uint8_t pctype_value;
12156 	char name[64];
12157 	uint32_t i, j, n;
12158 	int ret;
12159 
12160 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12161 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12162 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12163 		return -1;
12164 	}
12165 
12166 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12167 				(uint8_t *)&pctype_num, sizeof(pctype_num),
12168 				RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
12169 	if (ret) {
12170 		PMD_DRV_LOG(ERR, "Failed to get pctype number");
12171 		return -1;
12172 	}
12173 	if (!pctype_num) {
12174 		PMD_DRV_LOG(INFO, "No new pctype added");
12175 		return -1;
12176 	}
12177 
12178 	buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
12179 	pctype = rte_zmalloc("new_pctype", buff_size, 0);
12180 	if (!pctype) {
12181 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12182 		return -1;
12183 	}
12184 	/* get information about new pctype list */
12185 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12186 					(uint8_t *)pctype, buff_size,
12187 					RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
12188 	if (ret) {
12189 		PMD_DRV_LOG(ERR, "Failed to get pctype list");
12190 		rte_free(pctype);
12191 		return -1;
12192 	}
12193 
12194 	/* Update customized pctype. */
12195 	for (i = 0; i < pctype_num; i++) {
12196 		pctype_value = pctype[i].ptype_id;
12197 		memset(name, 0, sizeof(name));
12198 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12199 			proto_id = pctype[i].protocols[j];
12200 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12201 				continue;
12202 			for (n = 0; n < proto_num; n++) {
12203 				if (proto[n].proto_id != proto_id)
12204 					continue;
12205 				strcat(name, proto[n].name);
12206 				strcat(name, "_");
12207 				break;
12208 			}
12209 		}
12210 		name[strlen(name) - 1] = '\0';
12211 		if (!strcmp(name, "GTPC"))
12212 			new_pctype =
12213 				i40e_find_customized_pctype(pf,
12214 						      I40E_CUSTOMIZED_GTPC);
12215 		else if (!strcmp(name, "GTPU_IPV4"))
12216 			new_pctype =
12217 				i40e_find_customized_pctype(pf,
12218 						   I40E_CUSTOMIZED_GTPU_IPV4);
12219 		else if (!strcmp(name, "GTPU_IPV6"))
12220 			new_pctype =
12221 				i40e_find_customized_pctype(pf,
12222 						   I40E_CUSTOMIZED_GTPU_IPV6);
12223 		else if (!strcmp(name, "GTPU"))
12224 			new_pctype =
12225 				i40e_find_customized_pctype(pf,
12226 						      I40E_CUSTOMIZED_GTPU);
12227 		if (new_pctype) {
12228 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
12229 				new_pctype->pctype = pctype_value;
12230 				new_pctype->valid = true;
12231 			} else {
12232 				new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
12233 				new_pctype->valid = false;
12234 			}
12235 		}
12236 	}
12237 
12238 	rte_free(pctype);
12239 	return 0;
12240 }
12241 
12242 static int
12243 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12244 			     uint32_t pkg_size, uint32_t proto_num,
12245 			     struct rte_pmd_i40e_proto_info *proto,
12246 			     enum rte_pmd_i40e_package_op op)
12247 {
12248 	struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12249 	uint16_t port_id = dev->data->port_id;
12250 	uint32_t ptype_num;
12251 	struct rte_pmd_i40e_ptype_info *ptype;
12252 	uint32_t buff_size;
12253 	uint8_t proto_id;
12254 	char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12255 	uint32_t i, j, n;
12256 	bool in_tunnel;
12257 	int ret;
12258 
12259 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12260 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12261 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12262 		return -1;
12263 	}
12264 
12265 	if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12266 		rte_pmd_i40e_ptype_mapping_reset(port_id);
12267 		return 0;
12268 	}
12269 
12270 	/* get information about new ptype num */
12271 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12272 				(uint8_t *)&ptype_num, sizeof(ptype_num),
12273 				RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12274 	if (ret) {
12275 		PMD_DRV_LOG(ERR, "Failed to get ptype number");
12276 		return ret;
12277 	}
12278 	if (!ptype_num) {
12279 		PMD_DRV_LOG(INFO, "No new ptype added");
12280 		return -1;
12281 	}
12282 
12283 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12284 	ptype = rte_zmalloc("new_ptype", buff_size, 0);
12285 	if (!ptype) {
12286 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12287 		return -1;
12288 	}
12289 
12290 	/* get information about new ptype list */
12291 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12292 					(uint8_t *)ptype, buff_size,
12293 					RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12294 	if (ret) {
12295 		PMD_DRV_LOG(ERR, "Failed to get ptype list");
12296 		rte_free(ptype);
12297 		return ret;
12298 	}
12299 
12300 	buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12301 	ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12302 	if (!ptype_mapping) {
12303 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12304 		rte_free(ptype);
12305 		return -1;
12306 	}
12307 
12308 	/* Update ptype mapping table. */
12309 	for (i = 0; i < ptype_num; i++) {
12310 		ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12311 		ptype_mapping[i].sw_ptype = 0;
12312 		in_tunnel = false;
12313 		for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12314 			proto_id = ptype[i].protocols[j];
12315 			if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12316 				continue;
12317 			for (n = 0; n < proto_num; n++) {
12318 				if (proto[n].proto_id != proto_id)
12319 					continue;
12320 				memset(name, 0, sizeof(name));
12321 				strcpy(name, proto[n].name);
12322 				if (!strncasecmp(name, "PPPOE", 5))
12323 					ptype_mapping[i].sw_ptype |=
12324 						RTE_PTYPE_L2_ETHER_PPPOE;
12325 				else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12326 					 !in_tunnel) {
12327 					ptype_mapping[i].sw_ptype |=
12328 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12329 					ptype_mapping[i].sw_ptype |=
12330 						RTE_PTYPE_L4_FRAG;
12331 				} else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12332 					   in_tunnel) {
12333 					ptype_mapping[i].sw_ptype |=
12334 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12335 					ptype_mapping[i].sw_ptype |=
12336 						RTE_PTYPE_INNER_L4_FRAG;
12337 				} else if (!strncasecmp(name, "OIPV4", 5)) {
12338 					ptype_mapping[i].sw_ptype |=
12339 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12340 					in_tunnel = true;
12341 				} else if (!strncasecmp(name, "IPV4", 4) &&
12342 					   !in_tunnel)
12343 					ptype_mapping[i].sw_ptype |=
12344 						RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12345 				else if (!strncasecmp(name, "IPV4", 4) &&
12346 					 in_tunnel)
12347 					ptype_mapping[i].sw_ptype |=
12348 					    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12349 				else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12350 					 !in_tunnel) {
12351 					ptype_mapping[i].sw_ptype |=
12352 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12353 					ptype_mapping[i].sw_ptype |=
12354 						RTE_PTYPE_L4_FRAG;
12355 				} else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12356 					   in_tunnel) {
12357 					ptype_mapping[i].sw_ptype |=
12358 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12359 					ptype_mapping[i].sw_ptype |=
12360 						RTE_PTYPE_INNER_L4_FRAG;
12361 				} else if (!strncasecmp(name, "OIPV6", 5)) {
12362 					ptype_mapping[i].sw_ptype |=
12363 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12364 					in_tunnel = true;
12365 				} else if (!strncasecmp(name, "IPV6", 4) &&
12366 					   !in_tunnel)
12367 					ptype_mapping[i].sw_ptype |=
12368 						RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12369 				else if (!strncasecmp(name, "IPV6", 4) &&
12370 					 in_tunnel)
12371 					ptype_mapping[i].sw_ptype |=
12372 					    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12373 				else if (!strncasecmp(name, "UDP", 3) &&
12374 					 !in_tunnel)
12375 					ptype_mapping[i].sw_ptype |=
12376 						RTE_PTYPE_L4_UDP;
12377 				else if (!strncasecmp(name, "UDP", 3) &&
12378 					 in_tunnel)
12379 					ptype_mapping[i].sw_ptype |=
12380 						RTE_PTYPE_INNER_L4_UDP;
12381 				else if (!strncasecmp(name, "TCP", 3) &&
12382 					 !in_tunnel)
12383 					ptype_mapping[i].sw_ptype |=
12384 						RTE_PTYPE_L4_TCP;
12385 				else if (!strncasecmp(name, "TCP", 3) &&
12386 					 in_tunnel)
12387 					ptype_mapping[i].sw_ptype |=
12388 						RTE_PTYPE_INNER_L4_TCP;
12389 				else if (!strncasecmp(name, "SCTP", 4) &&
12390 					 !in_tunnel)
12391 					ptype_mapping[i].sw_ptype |=
12392 						RTE_PTYPE_L4_SCTP;
12393 				else if (!strncasecmp(name, "SCTP", 4) &&
12394 					 in_tunnel)
12395 					ptype_mapping[i].sw_ptype |=
12396 						RTE_PTYPE_INNER_L4_SCTP;
12397 				else if ((!strncasecmp(name, "ICMP", 4) ||
12398 					  !strncasecmp(name, "ICMPV6", 6)) &&
12399 					 !in_tunnel)
12400 					ptype_mapping[i].sw_ptype |=
12401 						RTE_PTYPE_L4_ICMP;
12402 				else if ((!strncasecmp(name, "ICMP", 4) ||
12403 					  !strncasecmp(name, "ICMPV6", 6)) &&
12404 					 in_tunnel)
12405 					ptype_mapping[i].sw_ptype |=
12406 						RTE_PTYPE_INNER_L4_ICMP;
12407 				else if (!strncasecmp(name, "GTPC", 4)) {
12408 					ptype_mapping[i].sw_ptype |=
12409 						RTE_PTYPE_TUNNEL_GTPC;
12410 					in_tunnel = true;
12411 				} else if (!strncasecmp(name, "GTPU", 4)) {
12412 					ptype_mapping[i].sw_ptype |=
12413 						RTE_PTYPE_TUNNEL_GTPU;
12414 					in_tunnel = true;
12415 				} else if (!strncasecmp(name, "GRENAT", 6)) {
12416 					ptype_mapping[i].sw_ptype |=
12417 						RTE_PTYPE_TUNNEL_GRENAT;
12418 					in_tunnel = true;
12419 				} else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12420 					   !strncasecmp(name, "L2TPV2", 6)) {
12421 					ptype_mapping[i].sw_ptype |=
12422 						RTE_PTYPE_TUNNEL_L2TP;
12423 					in_tunnel = true;
12424 				}
12425 
12426 				break;
12427 			}
12428 		}
12429 	}
12430 
12431 	ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12432 						ptype_num, 0);
12433 	if (ret)
12434 		PMD_DRV_LOG(ERR, "Failed to update mapping table.");
12435 
12436 	rte_free(ptype_mapping);
12437 	rte_free(ptype);
12438 	return ret;
12439 }
12440 
12441 void
12442 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12443 			    uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12444 {
12445 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12446 	uint32_t proto_num;
12447 	struct rte_pmd_i40e_proto_info *proto;
12448 	uint32_t buff_size;
12449 	uint32_t i;
12450 	int ret;
12451 
12452 	if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12453 	    op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12454 		PMD_DRV_LOG(ERR, "Unsupported operation.");
12455 		return;
12456 	}
12457 
12458 	/* get information about protocol number */
12459 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12460 				       (uint8_t *)&proto_num, sizeof(proto_num),
12461 				       RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12462 	if (ret) {
12463 		PMD_DRV_LOG(ERR, "Failed to get protocol number");
12464 		return;
12465 	}
12466 	if (!proto_num) {
12467 		PMD_DRV_LOG(INFO, "No new protocol added");
12468 		return;
12469 	}
12470 
12471 	buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12472 	proto = rte_zmalloc("new_proto", buff_size, 0);
12473 	if (!proto) {
12474 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
12475 		return;
12476 	}
12477 
12478 	/* get information about protocol list */
12479 	ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12480 					(uint8_t *)proto, buff_size,
12481 					RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12482 	if (ret) {
12483 		PMD_DRV_LOG(ERR, "Failed to get protocol list");
12484 		rte_free(proto);
12485 		return;
12486 	}
12487 
12488 	/* Check if GTP is supported. */
12489 	for (i = 0; i < proto_num; i++) {
12490 		if (!strncmp(proto[i].name, "GTP", 3)) {
12491 			if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12492 				pf->gtp_support = true;
12493 			else
12494 				pf->gtp_support = false;
12495 			break;
12496 		}
12497 	}
12498 
12499 	/* Update customized pctype info */
12500 	ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12501 					    proto_num, proto, op);
12502 	if (ret)
12503 		PMD_DRV_LOG(INFO, "No pctype is updated.");
12504 
12505 	/* Update customized ptype info */
12506 	ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12507 					   proto_num, proto, op);
12508 	if (ret)
12509 		PMD_DRV_LOG(INFO, "No ptype is updated.");
12510 
12511 	rte_free(proto);
12512 }
12513 
12514 /* Create a QinQ cloud filter
12515  *
12516  * The Fortville NIC has limited resources for tunnel filters,
12517  * so we can only reuse existing filters.
12518  *
12519  * In step 1 we define which Field Vector fields can be used for
12520  * filter types.
12521  * As we do not have the inner tag defined as a field,
12522  * we have to define it first, by reusing one of L1 entries.
12523  *
12524  * In step 2 we are replacing one of existing filter types with
12525  * a new one for QinQ.
12526  * As we reusing L1 and replacing L2, some of the default filter
12527  * types will disappear,which depends on L1 and L2 entries we reuse.
12528  *
12529  * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12530  *
12531  * 1.	Create L1 filter of outer vlan (12b) which will be in use
12532  *		later when we define the cloud filter.
12533  *	a.	Valid_flags.replace_cloud = 0
12534  *	b.	Old_filter = 10 (Stag_Inner_Vlan)
12535  *	c.	New_filter = 0x10
12536  *	d.	TR bit = 0xff (optional, not used here)
12537  *	e.	Buffer – 2 entries:
12538  *		i.	Byte 0 = 8 (outer vlan FV index).
12539  *			Byte 1 = 0 (rsv)
12540  *			Byte 2-3 = 0x0fff
12541  *		ii.	Byte 0 = 37 (inner vlan FV index).
12542  *			Byte 1 =0 (rsv)
12543  *			Byte 2-3 = 0x0fff
12544  *
12545  * Step 2:
12546  * 2.	Create cloud filter using two L1 filters entries: stag and
12547  *		new filter(outer vlan+ inner vlan)
12548  *	a.	Valid_flags.replace_cloud = 1
12549  *	b.	Old_filter = 1 (instead of outer IP)
12550  *	c.	New_filter = 0x10
12551  *	d.	Buffer – 2 entries:
12552  *		i.	Byte 0 = 0x80 | 7 (valid | Stag).
12553  *			Byte 1-3 = 0 (rsv)
12554  *		ii.	Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12555  *			Byte 9-11 = 0 (rsv)
12556  */
12557 static int
12558 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12559 {
12560 	int ret = -ENOTSUP;
12561 	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
12562 	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
12563 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12564 	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
12565 
12566 	if (pf->support_multi_driver) {
12567 		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12568 		return ret;
12569 	}
12570 
12571 	/* Init */
12572 	memset(&filter_replace, 0,
12573 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12574 	memset(&filter_replace_buf, 0,
12575 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12576 
12577 	/* create L1 filter */
12578 	filter_replace.old_filter_type =
12579 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12580 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12581 	filter_replace.tr_bit = 0;
12582 
12583 	/* Prepare the buffer, 2 entries */
12584 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12585 	filter_replace_buf.data[0] |=
12586 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12587 	/* Field Vector 12b mask */
12588 	filter_replace_buf.data[2] = 0xff;
12589 	filter_replace_buf.data[3] = 0x0f;
12590 	filter_replace_buf.data[4] =
12591 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12592 	filter_replace_buf.data[4] |=
12593 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12594 	/* Field Vector 12b mask */
12595 	filter_replace_buf.data[6] = 0xff;
12596 	filter_replace_buf.data[7] = 0x0f;
12597 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12598 			&filter_replace_buf);
12599 	if (ret != I40E_SUCCESS)
12600 		return ret;
12601 
12602 	if (filter_replace.old_filter_type !=
12603 	    filter_replace.new_filter_type)
12604 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12605 			    " original: 0x%x, new: 0x%x",
12606 			    dev->device->name,
12607 			    filter_replace.old_filter_type,
12608 			    filter_replace.new_filter_type);
12609 
12610 	/* Apply the second L2 cloud filter */
12611 	memset(&filter_replace, 0,
12612 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12613 	memset(&filter_replace_buf, 0,
12614 	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12615 
12616 	/* create L2 filter, input for L2 filter will be L1 filter  */
12617 	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12618 	filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12619 	filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12620 
12621 	/* Prepare the buffer, 2 entries */
12622 	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12623 	filter_replace_buf.data[0] |=
12624 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12625 	filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12626 	filter_replace_buf.data[4] |=
12627 		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12628 	ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12629 			&filter_replace_buf);
12630 	if (!ret && (filter_replace.old_filter_type !=
12631 		     filter_replace.new_filter_type))
12632 		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12633 			    " original: 0x%x, new: 0x%x",
12634 			    dev->device->name,
12635 			    filter_replace.old_filter_type,
12636 			    filter_replace.new_filter_type);
12637 
12638 	return ret;
12639 }
12640 
12641 int
12642 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12643 		   const struct rte_flow_action_rss *in)
12644 {
12645 	if (in->key_len > RTE_DIM(out->key) ||
12646 	    in->queue_num > RTE_DIM(out->queue))
12647 		return -EINVAL;
12648 	if (!in->key && in->key_len)
12649 		return -EINVAL;
12650 	out->conf = (struct rte_flow_action_rss){
12651 		.func = in->func,
12652 		.level = in->level,
12653 		.types = in->types,
12654 		.key_len = in->key_len,
12655 		.queue_num = in->queue_num,
12656 		.queue = memcpy(out->queue, in->queue,
12657 				sizeof(*in->queue) * in->queue_num),
12658 	};
12659 	if (in->key)
12660 		out->conf.key = memcpy(out->key, in->key, in->key_len);
12661 	return 0;
12662 }
12663 
12664 int
12665 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12666 		     const struct rte_flow_action_rss *with)
12667 {
12668 	return (comp->func == with->func &&
12669 		comp->level == with->level &&
12670 		comp->types == with->types &&
12671 		comp->key_len == with->key_len &&
12672 		comp->queue_num == with->queue_num &&
12673 		!memcmp(comp->key, with->key, with->key_len) &&
12674 		!memcmp(comp->queue, with->queue,
12675 			sizeof(*with->queue) * with->queue_num));
12676 }
12677 
12678 int
12679 i40e_config_rss_filter(struct i40e_pf *pf,
12680 		struct i40e_rte_flow_rss_conf *conf, bool add)
12681 {
12682 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12683 	uint32_t i, lut = 0;
12684 	uint16_t j, num;
12685 	struct rte_eth_rss_conf rss_conf = {
12686 		.rss_key = conf->conf.key_len ?
12687 			(void *)(uintptr_t)conf->conf.key : NULL,
12688 		.rss_key_len = conf->conf.key_len,
12689 		.rss_hf = conf->conf.types,
12690 	};
12691 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12692 
12693 	if (!add) {
12694 		if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12695 			i40e_pf_disable_rss(pf);
12696 			memset(rss_info, 0,
12697 				sizeof(struct i40e_rte_flow_rss_conf));
12698 			return 0;
12699 		}
12700 		return -EINVAL;
12701 	}
12702 
12703 	if (rss_info->conf.queue_num)
12704 		return -EINVAL;
12705 
12706 	/* If both VMDQ and RSS enabled, not all of PF queues are configured.
12707 	 * It's necessary to calculate the actual PF queues that are configured.
12708 	 */
12709 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12710 		num = i40e_pf_calc_configured_queues_num(pf);
12711 	else
12712 		num = pf->dev_data->nb_rx_queues;
12713 
12714 	num = RTE_MIN(num, conf->conf.queue_num);
12715 	PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12716 			num);
12717 
12718 	if (num == 0) {
12719 		PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12720 		return -ENOTSUP;
12721 	}
12722 
12723 	/* Fill in redirection table */
12724 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12725 		if (j == num)
12726 			j = 0;
12727 		lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12728 			hw->func_caps.rss_table_entry_width) - 1));
12729 		if ((i & 3) == 3)
12730 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12731 	}
12732 
12733 	if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12734 		i40e_pf_disable_rss(pf);
12735 		return 0;
12736 	}
12737 	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12738 		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12739 		/* Random default keys */
12740 		static uint32_t rss_key_default[] = {0x6b793944,
12741 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12742 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12743 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12744 
12745 		rss_conf.rss_key = (uint8_t *)rss_key_default;
12746 		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12747 							sizeof(uint32_t);
12748 	}
12749 
12750 	i40e_hw_rss_hash_set(pf, &rss_conf);
12751 
12752 	if (i40e_rss_conf_init(rss_info, &conf->conf))
12753 		return -EINVAL;
12754 
12755 	return 0;
12756 }
12757 
12758 RTE_INIT(i40e_init_log)
12759 {
12760 	i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12761 	if (i40e_logtype_init >= 0)
12762 		rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12763 	i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12764 	if (i40e_logtype_driver >= 0)
12765 		rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12766 }
12767 
12768 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12769 			      ETH_I40E_FLOATING_VEB_ARG "=1"
12770 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12771 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12772 			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
12773 			      ETH_I40E_USE_LATEST_VEC "=0|1");
12774