xref: /dpdk/drivers/net/i40e/i40e_ethdev.c (revision cf435a07)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40 #include <inttypes.h>
41 #include <assert.h>
42 
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53 #include <rte_tailq.h>
54 
55 #include "i40e_logs.h"
56 #include "base/i40e_prototype.h"
57 #include "base/i40e_adminq_cmd.h"
58 #include "base/i40e_type.h"
59 #include "base/i40e_register.h"
60 #include "base/i40e_dcb.h"
61 #include "i40e_ethdev.h"
62 #include "i40e_rxtx.h"
63 #include "i40e_pf.h"
64 #include "i40e_regs.h"
65 
66 #define ETH_I40E_FLOATING_VEB_ARG	"enable_floating_veb"
67 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
68 
69 #define I40E_CLEAR_PXE_WAIT_MS     200
70 
71 /* Maximun number of capability elements */
72 #define I40E_MAX_CAP_ELE_NUM       128
73 
74 /* Wait count and inteval */
75 #define I40E_CHK_Q_ENA_COUNT       1000
76 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
77 
78 /* Maximun number of VSI */
79 #define I40E_MAX_NUM_VSIS          (384UL)
80 
81 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
82 
83 /* Flow control default timer */
84 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
85 
86 /* Flow control default high water */
87 #define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
88 
89 /* Flow control default low water */
90 #define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
91 
92 /* Flow control enable fwd bit */
93 #define I40E_PRTMAC_FWD_CTRL   0x00000001
94 
95 /* Receive Packet Buffer size */
96 #define I40E_RXPBSIZE (968 * 1024)
97 
98 /* Kilobytes shift */
99 #define I40E_KILOSHIFT 10
100 
101 /* Receive Average Packet Size in Byte*/
102 #define I40E_PACKET_AVERAGE_SIZE 128
103 
104 /* Mask of PF interrupt causes */
105 #define I40E_PFINT_ICR0_ENA_MASK ( \
106 		I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
107 		I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
108 		I40E_PFINT_ICR0_ENA_GRST_MASK | \
109 		I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
110 		I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
111 		I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
112 		I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
113 		I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
114 		I40E_PFINT_ICR0_ENA_VFLR_MASK | \
115 		I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
116 
117 #define I40E_FLOW_TYPES ( \
118 	(1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
119 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
120 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
121 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
122 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
123 	(1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
124 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
125 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
126 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
127 	(1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
128 	(1UL << RTE_ETH_FLOW_L2_PAYLOAD))
129 
130 /* Additional timesync values. */
131 #define I40E_PTP_40GB_INCVAL     0x0199999999ULL
132 #define I40E_PTP_10GB_INCVAL     0x0333333333ULL
133 #define I40E_PTP_1GB_INCVAL      0x2000000000ULL
134 #define I40E_PRTTSYN_TSYNENA     0x80000000
135 #define I40E_PRTTSYN_TSYNTYPE    0x0e000000
136 #define I40E_CYCLECOUNTER_MASK   0xffffffffffffffffULL
137 
138 #define I40E_MAX_PERCENT            100
139 #define I40E_DEFAULT_DCB_APP_NUM    1
140 #define I40E_DEFAULT_DCB_APP_PRIO   3
141 
142 #define I40E_INSET_NONE            0x00000000000000000ULL
143 
144 /* bit0 ~ bit 7 */
145 #define I40E_INSET_DMAC            0x0000000000000001ULL
146 #define I40E_INSET_SMAC            0x0000000000000002ULL
147 #define I40E_INSET_VLAN_OUTER      0x0000000000000004ULL
148 #define I40E_INSET_VLAN_INNER      0x0000000000000008ULL
149 #define I40E_INSET_VLAN_TUNNEL     0x0000000000000010ULL
150 
151 /* bit 8 ~ bit 15 */
152 #define I40E_INSET_IPV4_SRC        0x0000000000000100ULL
153 #define I40E_INSET_IPV4_DST        0x0000000000000200ULL
154 #define I40E_INSET_IPV6_SRC        0x0000000000000400ULL
155 #define I40E_INSET_IPV6_DST        0x0000000000000800ULL
156 #define I40E_INSET_SRC_PORT        0x0000000000001000ULL
157 #define I40E_INSET_DST_PORT        0x0000000000002000ULL
158 #define I40E_INSET_SCTP_VT         0x0000000000004000ULL
159 
160 /* bit 16 ~ bit 31 */
161 #define I40E_INSET_IPV4_TOS        0x0000000000010000ULL
162 #define I40E_INSET_IPV4_PROTO      0x0000000000020000ULL
163 #define I40E_INSET_IPV4_TTL        0x0000000000040000ULL
164 #define I40E_INSET_IPV6_TC         0x0000000000080000ULL
165 #define I40E_INSET_IPV6_FLOW       0x0000000000100000ULL
166 #define I40E_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
167 #define I40E_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
168 #define I40E_INSET_TCP_FLAGS       0x0000000000800000ULL
169 
170 /* bit 32 ~ bit 47, tunnel fields */
171 #define I40E_INSET_TUNNEL_IPV4_DST       0x0000000100000000ULL
172 #define I40E_INSET_TUNNEL_IPV6_DST       0x0000000200000000ULL
173 #define I40E_INSET_TUNNEL_DMAC           0x0000000400000000ULL
174 #define I40E_INSET_TUNNEL_SRC_PORT       0x0000000800000000ULL
175 #define I40E_INSET_TUNNEL_DST_PORT       0x0000001000000000ULL
176 #define I40E_INSET_TUNNEL_ID             0x0000002000000000ULL
177 
178 /* bit 48 ~ bit 55 */
179 #define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
180 
181 /* bit 56 ~ bit 63, Flex Payload */
182 #define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
183 #define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
184 #define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
185 #define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
186 #define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
187 #define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
188 #define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
189 #define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
190 #define I40E_INSET_FLEX_PAYLOAD \
191 	(I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
192 	I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
193 	I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
194 	I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
195 
196 /**
197  * Below are values for writing un-exposed registers suggested
198  * by silicon experts
199  */
200 /* Destination MAC address */
201 #define I40E_REG_INSET_L2_DMAC                   0xE000000000000000ULL
202 /* Source MAC address */
203 #define I40E_REG_INSET_L2_SMAC                   0x1C00000000000000ULL
204 /* Outer (S-Tag) VLAN tag in the outer L2 header */
205 #define I40E_REG_INSET_L2_OUTER_VLAN             0x0000000004000000ULL
206 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
207 #define I40E_REG_INSET_L2_INNER_VLAN             0x0080000000000000ULL
208 /* Single VLAN tag in the inner L2 header */
209 #define I40E_REG_INSET_TUNNEL_VLAN               0x0100000000000000ULL
210 /* Source IPv4 address */
211 #define I40E_REG_INSET_L3_SRC_IP4                0x0001800000000000ULL
212 /* Destination IPv4 address */
213 #define I40E_REG_INSET_L3_DST_IP4                0x0000001800000000ULL
214 /* IPv4 Type of Service (TOS) */
215 #define I40E_REG_INSET_L3_IP4_TOS                0x0040000000000000ULL
216 /* IPv4 Protocol */
217 #define I40E_REG_INSET_L3_IP4_PROTO              0x0004000000000000ULL
218 /* IPv4 Time to Live */
219 #define I40E_REG_INSET_L3_IP4_TTL                0x0004000000000000ULL
220 /* Source IPv6 address */
221 #define I40E_REG_INSET_L3_SRC_IP6                0x0007F80000000000ULL
222 /* Destination IPv6 address */
223 #define I40E_REG_INSET_L3_DST_IP6                0x000007F800000000ULL
224 /* IPv6 Traffic Class (TC) */
225 #define I40E_REG_INSET_L3_IP6_TC                 0x0040000000000000ULL
226 /* IPv6 Next Header */
227 #define I40E_REG_INSET_L3_IP6_NEXT_HDR           0x0008000000000000ULL
228 /* IPv6 Hop Limit */
229 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT          0x0008000000000000ULL
230 /* Source L4 port */
231 #define I40E_REG_INSET_L4_SRC_PORT               0x0000000400000000ULL
232 /* Destination L4 port */
233 #define I40E_REG_INSET_L4_DST_PORT               0x0000000200000000ULL
234 /* SCTP verification tag */
235 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG  0x0000000180000000ULL
236 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
237 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC   0x0000000001C00000ULL
238 /* Source port of tunneling UDP */
239 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT    0x0000000000200000ULL
240 /* Destination port of tunneling UDP */
241 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT    0x0000000000100000ULL
242 /* UDP Tunneling ID, NVGRE/GRE key */
243 #define I40E_REG_INSET_TUNNEL_ID                 0x00000000000C0000ULL
244 /* Last ether type */
245 #define I40E_REG_INSET_LAST_ETHER_TYPE           0x0000000000004000ULL
246 /* Tunneling outer destination IPv4 address */
247 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4         0x00000000000000C0ULL
248 /* Tunneling outer destination IPv6 address */
249 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6         0x0000000000003FC0ULL
250 /* 1st word of flex payload */
251 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1        0x0000000000002000ULL
252 /* 2nd word of flex payload */
253 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2        0x0000000000001000ULL
254 /* 3rd word of flex payload */
255 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3        0x0000000000000800ULL
256 /* 4th word of flex payload */
257 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4        0x0000000000000400ULL
258 /* 5th word of flex payload */
259 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5        0x0000000000000200ULL
260 /* 6th word of flex payload */
261 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6        0x0000000000000100ULL
262 /* 7th word of flex payload */
263 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7        0x0000000000000080ULL
264 /* 8th word of flex payload */
265 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8        0x0000000000000040ULL
266 /* all 8 words flex payload */
267 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS        0x0000000000003FC0ULL
268 #define I40E_REG_INSET_MASK_DEFAULT              0x0000000000000000ULL
269 
270 #define I40E_TRANSLATE_INSET 0
271 #define I40E_TRANSLATE_REG   1
272 
273 #define I40E_INSET_IPV4_TOS_MASK        0x0009FF00UL
274 #define I40E_INSET_IPv4_TTL_MASK        0x000D00FFUL
275 #define I40E_INSET_IPV4_PROTO_MASK      0x000DFF00UL
276 #define I40E_INSET_IPV6_TC_MASK         0x0009F00FUL
277 #define I40E_INSET_IPV6_HOP_LIMIT_MASK  0x000CFF00UL
278 #define I40E_INSET_IPV6_NEXT_HDR_MASK   0x000C00FFUL
279 
280 #define I40E_GL_SWT_L2TAGCTRL(_i)             (0x001C0A70 + ((_i) * 4))
281 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
282 #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK  \
283 	I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
284 
285 /* PCI offset for querying capability */
286 #define PCI_DEV_CAP_REG            0xA4
287 /* PCI offset for enabling/disabling Extended Tag */
288 #define PCI_DEV_CTRL_REG           0xA8
289 /* Bit mask of Extended Tag capability */
290 #define PCI_DEV_CAP_EXT_TAG_MASK   0x20
291 /* Bit shift of Extended Tag enable/disable */
292 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
293 /* Bit mask of Extended Tag enable/disable */
294 #define PCI_DEV_CTRL_EXT_TAG_MASK  (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
295 
296 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
297 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
298 static int i40e_dev_configure(struct rte_eth_dev *dev);
299 static int i40e_dev_start(struct rte_eth_dev *dev);
300 static void i40e_dev_stop(struct rte_eth_dev *dev);
301 static void i40e_dev_close(struct rte_eth_dev *dev);
302 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
303 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
304 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
305 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
306 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
307 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
308 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
309 			       struct rte_eth_stats *stats);
310 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
311 			       struct rte_eth_xstat *xstats, unsigned n);
312 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
313 				     struct rte_eth_xstat_name *xstats_names,
314 				     unsigned limit);
315 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
316 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
317 					    uint16_t queue_id,
318 					    uint8_t stat_idx,
319 					    uint8_t is_rx);
320 static void i40e_dev_info_get(struct rte_eth_dev *dev,
321 			      struct rte_eth_dev_info *dev_info);
322 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
323 				uint16_t vlan_id,
324 				int on);
325 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
326 			      enum rte_vlan_type vlan_type,
327 			      uint16_t tpid);
328 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
329 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
330 				      uint16_t queue,
331 				      int on);
332 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
333 static int i40e_dev_led_on(struct rte_eth_dev *dev);
334 static int i40e_dev_led_off(struct rte_eth_dev *dev);
335 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
336 			      struct rte_eth_fc_conf *fc_conf);
337 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
338 			      struct rte_eth_fc_conf *fc_conf);
339 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
340 				       struct rte_eth_pfc_conf *pfc_conf);
341 static void i40e_macaddr_add(struct rte_eth_dev *dev,
342 			  struct ether_addr *mac_addr,
343 			  uint32_t index,
344 			  uint32_t pool);
345 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
346 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
347 				    struct rte_eth_rss_reta_entry64 *reta_conf,
348 				    uint16_t reta_size);
349 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
350 				   struct rte_eth_rss_reta_entry64 *reta_conf,
351 				   uint16_t reta_size);
352 
353 static int i40e_get_cap(struct i40e_hw *hw);
354 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
355 static int i40e_pf_setup(struct i40e_pf *pf);
356 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
357 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
358 static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
359 static int i40e_dcb_setup(struct rte_eth_dev *dev);
360 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
361 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
362 static void i40e_stat_update_48(struct i40e_hw *hw,
363 			       uint32_t hireg,
364 			       uint32_t loreg,
365 			       bool offset_loaded,
366 			       uint64_t *offset,
367 			       uint64_t *stat);
368 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
369 static void i40e_dev_interrupt_handler(
370 		__rte_unused struct rte_intr_handle *handle, void *param);
371 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
372 				uint32_t base, uint32_t num);
373 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
374 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
375 			uint32_t base);
376 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
377 			uint16_t num);
378 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
379 static int i40e_veb_release(struct i40e_veb *veb);
380 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
381 						struct i40e_vsi *vsi);
382 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
383 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
384 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
385 					     struct i40e_macvlan_filter *mv_f,
386 					     int num,
387 					     struct ether_addr *addr);
388 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
389 					     struct i40e_macvlan_filter *mv_f,
390 					     int num,
391 					     uint16_t vlan);
392 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
393 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
394 				    struct rte_eth_rss_conf *rss_conf);
395 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
396 				      struct rte_eth_rss_conf *rss_conf);
397 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
398 					struct rte_eth_udp_tunnel *udp_tunnel);
399 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
400 					struct rte_eth_udp_tunnel *udp_tunnel);
401 static void i40e_filter_input_set_init(struct i40e_pf *pf);
402 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
403 			struct rte_eth_ethertype_filter *filter,
404 			bool add);
405 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
406 				enum rte_filter_op filter_op,
407 				void *arg);
408 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
409 				enum rte_filter_type filter_type,
410 				enum rte_filter_op filter_op,
411 				void *arg);
412 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
413 				  struct rte_eth_dcb_info *dcb_info);
414 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
415 static void i40e_configure_registers(struct i40e_hw *hw);
416 static void i40e_hw_init(struct rte_eth_dev *dev);
417 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
418 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
419 			struct rte_eth_mirror_conf *mirror_conf,
420 			uint8_t sw_id, uint8_t on);
421 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
422 
423 static int i40e_timesync_enable(struct rte_eth_dev *dev);
424 static int i40e_timesync_disable(struct rte_eth_dev *dev);
425 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
426 					   struct timespec *timestamp,
427 					   uint32_t flags);
428 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
429 					   struct timespec *timestamp);
430 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
431 
432 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
433 
434 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
435 				   struct timespec *timestamp);
436 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
437 				    const struct timespec *timestamp);
438 
439 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
440 					 uint16_t queue_id);
441 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
442 					  uint16_t queue_id);
443 
444 static int i40e_get_regs(struct rte_eth_dev *dev,
445 			 struct rte_dev_reg_info *regs);
446 
447 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
448 
449 static int i40e_get_eeprom(struct rte_eth_dev *dev,
450 			   struct rte_dev_eeprom_info *eeprom);
451 
452 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
453 				      struct ether_addr *mac_addr);
454 
455 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
456 
457 static const struct rte_pci_id pci_id_i40e_map[] = {
458 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
459 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
460 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
461 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
462 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
463 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
464 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
465 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
466 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
467 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
468 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
469 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
470 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
471 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
472 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
473 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
474 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
475 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
476 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
477 	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
478 	{ .vendor_id = 0, /* sentinel */ },
479 };
480 
481 static const struct eth_dev_ops i40e_eth_dev_ops = {
482 	.dev_configure                = i40e_dev_configure,
483 	.dev_start                    = i40e_dev_start,
484 	.dev_stop                     = i40e_dev_stop,
485 	.dev_close                    = i40e_dev_close,
486 	.promiscuous_enable           = i40e_dev_promiscuous_enable,
487 	.promiscuous_disable          = i40e_dev_promiscuous_disable,
488 	.allmulticast_enable          = i40e_dev_allmulticast_enable,
489 	.allmulticast_disable         = i40e_dev_allmulticast_disable,
490 	.dev_set_link_up              = i40e_dev_set_link_up,
491 	.dev_set_link_down            = i40e_dev_set_link_down,
492 	.link_update                  = i40e_dev_link_update,
493 	.stats_get                    = i40e_dev_stats_get,
494 	.xstats_get                   = i40e_dev_xstats_get,
495 	.xstats_get_names             = i40e_dev_xstats_get_names,
496 	.stats_reset                  = i40e_dev_stats_reset,
497 	.xstats_reset                 = i40e_dev_stats_reset,
498 	.queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
499 	.dev_infos_get                = i40e_dev_info_get,
500 	.dev_supported_ptypes_get     = i40e_dev_supported_ptypes_get,
501 	.vlan_filter_set              = i40e_vlan_filter_set,
502 	.vlan_tpid_set                = i40e_vlan_tpid_set,
503 	.vlan_offload_set             = i40e_vlan_offload_set,
504 	.vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
505 	.vlan_pvid_set                = i40e_vlan_pvid_set,
506 	.rx_queue_start               = i40e_dev_rx_queue_start,
507 	.rx_queue_stop                = i40e_dev_rx_queue_stop,
508 	.tx_queue_start               = i40e_dev_tx_queue_start,
509 	.tx_queue_stop                = i40e_dev_tx_queue_stop,
510 	.rx_queue_setup               = i40e_dev_rx_queue_setup,
511 	.rx_queue_intr_enable         = i40e_dev_rx_queue_intr_enable,
512 	.rx_queue_intr_disable        = i40e_dev_rx_queue_intr_disable,
513 	.rx_queue_release             = i40e_dev_rx_queue_release,
514 	.rx_queue_count               = i40e_dev_rx_queue_count,
515 	.rx_descriptor_done           = i40e_dev_rx_descriptor_done,
516 	.tx_queue_setup               = i40e_dev_tx_queue_setup,
517 	.tx_queue_release             = i40e_dev_tx_queue_release,
518 	.dev_led_on                   = i40e_dev_led_on,
519 	.dev_led_off                  = i40e_dev_led_off,
520 	.flow_ctrl_get                = i40e_flow_ctrl_get,
521 	.flow_ctrl_set                = i40e_flow_ctrl_set,
522 	.priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
523 	.mac_addr_add                 = i40e_macaddr_add,
524 	.mac_addr_remove              = i40e_macaddr_remove,
525 	.reta_update                  = i40e_dev_rss_reta_update,
526 	.reta_query                   = i40e_dev_rss_reta_query,
527 	.rss_hash_update              = i40e_dev_rss_hash_update,
528 	.rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
529 	.udp_tunnel_port_add          = i40e_dev_udp_tunnel_port_add,
530 	.udp_tunnel_port_del          = i40e_dev_udp_tunnel_port_del,
531 	.filter_ctrl                  = i40e_dev_filter_ctrl,
532 	.rxq_info_get                 = i40e_rxq_info_get,
533 	.txq_info_get                 = i40e_txq_info_get,
534 	.mirror_rule_set              = i40e_mirror_rule_set,
535 	.mirror_rule_reset            = i40e_mirror_rule_reset,
536 	.timesync_enable              = i40e_timesync_enable,
537 	.timesync_disable             = i40e_timesync_disable,
538 	.timesync_read_rx_timestamp   = i40e_timesync_read_rx_timestamp,
539 	.timesync_read_tx_timestamp   = i40e_timesync_read_tx_timestamp,
540 	.get_dcb_info                 = i40e_dev_get_dcb_info,
541 	.timesync_adjust_time         = i40e_timesync_adjust_time,
542 	.timesync_read_time           = i40e_timesync_read_time,
543 	.timesync_write_time          = i40e_timesync_write_time,
544 	.get_reg                      = i40e_get_regs,
545 	.get_eeprom_length            = i40e_get_eeprom_length,
546 	.get_eeprom                   = i40e_get_eeprom,
547 	.mac_addr_set                 = i40e_set_default_mac_addr,
548 	.mtu_set                      = i40e_dev_mtu_set,
549 };
550 
551 /* store statistics names and its offset in stats structure */
552 struct rte_i40e_xstats_name_off {
553 	char name[RTE_ETH_XSTATS_NAME_SIZE];
554 	unsigned offset;
555 };
556 
557 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
558 	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
559 	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
560 	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
561 	{"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
562 	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
563 		rx_unknown_protocol)},
564 	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
565 	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
566 	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
567 	{"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
568 };
569 
570 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
571 		sizeof(rte_i40e_stats_strings[0]))
572 
573 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
574 	{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
575 		tx_dropped_link_down)},
576 	{"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
577 	{"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
578 		illegal_bytes)},
579 	{"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
580 	{"mac_local_errors", offsetof(struct i40e_hw_port_stats,
581 		mac_local_faults)},
582 	{"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
583 		mac_remote_faults)},
584 	{"rx_length_errors", offsetof(struct i40e_hw_port_stats,
585 		rx_length_errors)},
586 	{"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
587 	{"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
588 	{"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
589 	{"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
590 	{"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
591 	{"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
592 		rx_size_127)},
593 	{"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
594 		rx_size_255)},
595 	{"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
596 		rx_size_511)},
597 	{"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
598 		rx_size_1023)},
599 	{"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
600 		rx_size_1522)},
601 	{"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
602 		rx_size_big)},
603 	{"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
604 		rx_undersize)},
605 	{"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
606 		rx_oversize)},
607 	{"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
608 		mac_short_packet_dropped)},
609 	{"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
610 		rx_fragments)},
611 	{"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
612 	{"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
613 	{"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
614 		tx_size_127)},
615 	{"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
616 		tx_size_255)},
617 	{"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
618 		tx_size_511)},
619 	{"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
620 		tx_size_1023)},
621 	{"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
622 		tx_size_1522)},
623 	{"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
624 		tx_size_big)},
625 	{"rx_flow_director_atr_match_packets",
626 		offsetof(struct i40e_hw_port_stats, fd_atr_match)},
627 	{"rx_flow_director_sb_match_packets",
628 		offsetof(struct i40e_hw_port_stats, fd_sb_match)},
629 	{"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
630 		tx_lpi_status)},
631 	{"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
632 		rx_lpi_status)},
633 	{"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
634 		tx_lpi_count)},
635 	{"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
636 		rx_lpi_count)},
637 };
638 
639 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
640 		sizeof(rte_i40e_hw_port_strings[0]))
641 
642 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
643 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
644 		priority_xon_rx)},
645 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
646 		priority_xoff_rx)},
647 };
648 
649 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
650 		sizeof(rte_i40e_rxq_prio_strings[0]))
651 
652 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
653 	{"xon_packets", offsetof(struct i40e_hw_port_stats,
654 		priority_xon_tx)},
655 	{"xoff_packets", offsetof(struct i40e_hw_port_stats,
656 		priority_xoff_tx)},
657 	{"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
658 		priority_xon_2_xoff)},
659 };
660 
661 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
662 		sizeof(rte_i40e_txq_prio_strings[0]))
663 
664 static struct eth_driver rte_i40e_pmd = {
665 	.pci_drv = {
666 		.id_table = pci_id_i40e_map,
667 		.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
668 			RTE_PCI_DRV_DETACHABLE,
669 		.probe = rte_eth_dev_pci_probe,
670 		.remove = rte_eth_dev_pci_remove,
671 	},
672 	.eth_dev_init = eth_i40e_dev_init,
673 	.eth_dev_uninit = eth_i40e_dev_uninit,
674 	.dev_private_size = sizeof(struct i40e_adapter),
675 };
676 
677 static inline int
678 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
679 				     struct rte_eth_link *link)
680 {
681 	struct rte_eth_link *dst = link;
682 	struct rte_eth_link *src = &(dev->data->dev_link);
683 
684 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
685 					*(uint64_t *)src) == 0)
686 		return -1;
687 
688 	return 0;
689 }
690 
691 static inline int
692 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
693 				      struct rte_eth_link *link)
694 {
695 	struct rte_eth_link *dst = &(dev->data->dev_link);
696 	struct rte_eth_link *src = link;
697 
698 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
699 					*(uint64_t *)src) == 0)
700 		return -1;
701 
702 	return 0;
703 }
704 
705 DRIVER_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
706 DRIVER_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
707 
708 #ifndef I40E_GLQF_ORT
709 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
710 #endif
711 #ifndef I40E_GLQF_PIT
712 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
713 #endif
714 
715 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
716 {
717 	/*
718 	 * Initialize registers for flexible payload, which should be set by NVM.
719 	 * This should be removed from code once it is fixed in NVM.
720 	 */
721 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
722 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
723 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
724 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
725 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
726 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
727 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
728 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
729 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
730 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
731 	I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
732 	I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
733 
734 	/* Initialize registers for parsing packet type of QinQ */
735 	I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
736 	I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
737 }
738 
739 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
740 
741 /*
742  * Add a ethertype filter to drop all flow control frames transmitted
743  * from VSIs.
744 */
745 static void
746 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
747 {
748 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
749 	uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
750 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
751 			I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
752 	int ret;
753 
754 	ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
755 				I40E_FLOW_CONTROL_ETHERTYPE, flags,
756 				pf->main_vsi_seid, 0,
757 				TRUE, NULL, NULL);
758 	if (ret)
759 		PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
760 				  " frames from VSIs.");
761 }
762 
763 static int
764 floating_veb_list_handler(__rte_unused const char *key,
765 			  const char *floating_veb_value,
766 			  void *opaque)
767 {
768 	int idx = 0;
769 	unsigned int count = 0;
770 	char *end = NULL;
771 	int min, max;
772 	bool *vf_floating_veb = opaque;
773 
774 	while (isblank(*floating_veb_value))
775 		floating_veb_value++;
776 
777 	/* Reset floating VEB configuration for VFs */
778 	for (idx = 0; idx < I40E_MAX_VF; idx++)
779 		vf_floating_veb[idx] = false;
780 
781 	min = I40E_MAX_VF;
782 	do {
783 		while (isblank(*floating_veb_value))
784 			floating_veb_value++;
785 		if (*floating_veb_value == '\0')
786 			return -1;
787 		errno = 0;
788 		idx = strtoul(floating_veb_value, &end, 10);
789 		if (errno || end == NULL)
790 			return -1;
791 		while (isblank(*end))
792 			end++;
793 		if (*end == '-') {
794 			min = idx;
795 		} else if ((*end == ';') || (*end == '\0')) {
796 			max = idx;
797 			if (min == I40E_MAX_VF)
798 				min = idx;
799 			if (max >= I40E_MAX_VF)
800 				max = I40E_MAX_VF - 1;
801 			for (idx = min; idx <= max; idx++) {
802 				vf_floating_veb[idx] = true;
803 				count++;
804 			}
805 			min = I40E_MAX_VF;
806 		} else {
807 			return -1;
808 		}
809 		floating_veb_value = end + 1;
810 	} while (*end != '\0');
811 
812 	if (count == 0)
813 		return -1;
814 
815 	return 0;
816 }
817 
818 static void
819 config_vf_floating_veb(struct rte_devargs *devargs,
820 		       uint16_t floating_veb,
821 		       bool *vf_floating_veb)
822 {
823 	struct rte_kvargs *kvlist;
824 	int i;
825 	const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
826 
827 	if (!floating_veb)
828 		return;
829 	/* All the VFs attach to the floating VEB by default
830 	 * when the floating VEB is enabled.
831 	 */
832 	for (i = 0; i < I40E_MAX_VF; i++)
833 		vf_floating_veb[i] = true;
834 
835 	if (devargs == NULL)
836 		return;
837 
838 	kvlist = rte_kvargs_parse(devargs->args, NULL);
839 	if (kvlist == NULL)
840 		return;
841 
842 	if (!rte_kvargs_count(kvlist, floating_veb_list)) {
843 		rte_kvargs_free(kvlist);
844 		return;
845 	}
846 	/* When the floating_veb_list parameter exists, all the VFs
847 	 * will attach to the legacy VEB firstly, then configure VFs
848 	 * to the floating VEB according to the floating_veb_list.
849 	 */
850 	if (rte_kvargs_process(kvlist, floating_veb_list,
851 			       floating_veb_list_handler,
852 			       vf_floating_veb) < 0) {
853 		rte_kvargs_free(kvlist);
854 		return;
855 	}
856 	rte_kvargs_free(kvlist);
857 }
858 
859 static int
860 i40e_check_floating_handler(__rte_unused const char *key,
861 			    const char *value,
862 			    __rte_unused void *opaque)
863 {
864 	if (strcmp(value, "1"))
865 		return -1;
866 
867 	return 0;
868 }
869 
870 static int
871 is_floating_veb_supported(struct rte_devargs *devargs)
872 {
873 	struct rte_kvargs *kvlist;
874 	const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
875 
876 	if (devargs == NULL)
877 		return 0;
878 
879 	kvlist = rte_kvargs_parse(devargs->args, NULL);
880 	if (kvlist == NULL)
881 		return 0;
882 
883 	if (!rte_kvargs_count(kvlist, floating_veb_key)) {
884 		rte_kvargs_free(kvlist);
885 		return 0;
886 	}
887 	/* Floating VEB is enabled when there's key-value:
888 	 * enable_floating_veb=1
889 	 */
890 	if (rte_kvargs_process(kvlist, floating_veb_key,
891 			       i40e_check_floating_handler, NULL) < 0) {
892 		rte_kvargs_free(kvlist);
893 		return 0;
894 	}
895 	rte_kvargs_free(kvlist);
896 
897 	return 1;
898 }
899 
900 static void
901 config_floating_veb(struct rte_eth_dev *dev)
902 {
903 	struct rte_pci_device *pci_dev = dev->pci_dev;
904 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
905 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
906 
907 	memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
908 
909 	if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
910 		pf->floating_veb =
911 			is_floating_veb_supported(pci_dev->device.devargs);
912 		config_vf_floating_veb(pci_dev->device.devargs,
913 				       pf->floating_veb,
914 				       pf->floating_veb_list);
915 	} else {
916 		pf->floating_veb = false;
917 	}
918 }
919 
920 #define I40E_L2_TAGS_S_TAG_SHIFT 1
921 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
922 
923 static int
924 eth_i40e_dev_init(struct rte_eth_dev *dev)
925 {
926 	struct rte_pci_device *pci_dev;
927 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
928 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
929 	struct i40e_vsi *vsi;
930 	int ret;
931 	uint32_t len;
932 	uint8_t aq_fail = 0;
933 
934 	PMD_INIT_FUNC_TRACE();
935 
936 	dev->dev_ops = &i40e_eth_dev_ops;
937 	dev->rx_pkt_burst = i40e_recv_pkts;
938 	dev->tx_pkt_burst = i40e_xmit_pkts;
939 
940 	/* for secondary processes, we don't initialise any further as primary
941 	 * has already done this work. Only check we don't need a different
942 	 * RX function */
943 	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
944 		i40e_set_rx_function(dev);
945 		i40e_set_tx_function(dev);
946 		return 0;
947 	}
948 	pci_dev = dev->pci_dev;
949 
950 	rte_eth_copy_pci_info(dev, pci_dev);
951 
952 	pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
953 	pf->adapter->eth_dev = dev;
954 	pf->dev_data = dev->data;
955 
956 	hw->back = I40E_PF_TO_ADAPTER(pf);
957 	hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
958 	if (!hw->hw_addr) {
959 		PMD_INIT_LOG(ERR, "Hardware is not available, "
960 			     "as address is NULL");
961 		return -ENODEV;
962 	}
963 
964 	hw->vendor_id = pci_dev->id.vendor_id;
965 	hw->device_id = pci_dev->id.device_id;
966 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
967 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
968 	hw->bus.device = pci_dev->addr.devid;
969 	hw->bus.func = pci_dev->addr.function;
970 	hw->adapter_stopped = 0;
971 
972 	/* Make sure all is clean before doing PF reset */
973 	i40e_clear_hw(hw);
974 
975 	/* Initialize the hardware */
976 	i40e_hw_init(dev);
977 
978 	/* Reset here to make sure all is clean for each PF */
979 	ret = i40e_pf_reset(hw);
980 	if (ret) {
981 		PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
982 		return ret;
983 	}
984 
985 	/* Initialize the shared code (base driver) */
986 	ret = i40e_init_shared_code(hw);
987 	if (ret) {
988 		PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
989 		return ret;
990 	}
991 
992 	/*
993 	 * To work around the NVM issue, initialize registers
994 	 * for flexible payload and packet type of QinQ by
995 	 * software. It should be removed once issues are fixed
996 	 * in NVM.
997 	 */
998 	i40e_GLQF_reg_init(hw);
999 
1000 	/* Initialize the input set for filters (hash and fd) to default value */
1001 	i40e_filter_input_set_init(pf);
1002 
1003 	/* Initialize the parameters for adminq */
1004 	i40e_init_adminq_parameter(hw);
1005 	ret = i40e_init_adminq(hw);
1006 	if (ret != I40E_SUCCESS) {
1007 		PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1008 		return -EIO;
1009 	}
1010 	PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1011 		     hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1012 		     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1013 		     ((hw->nvm.version >> 12) & 0xf),
1014 		     ((hw->nvm.version >> 4) & 0xff),
1015 		     (hw->nvm.version & 0xf), hw->nvm.eetrack);
1016 
1017 	/* Need the special FW version to support floating VEB */
1018 	config_floating_veb(dev);
1019 	/* Clear PXE mode */
1020 	i40e_clear_pxe_mode(hw);
1021 	ret = i40e_dev_sync_phy_type(hw);
1022 	if (ret) {
1023 		PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
1024 		goto err_sync_phy_type;
1025 	}
1026 	/*
1027 	 * On X710, performance number is far from the expectation on recent
1028 	 * firmware versions. The fix for this issue may not be integrated in
1029 	 * the following firmware version. So the workaround in software driver
1030 	 * is needed. It needs to modify the initial values of 3 internal only
1031 	 * registers. Note that the workaround can be removed when it is fixed
1032 	 * in firmware in the future.
1033 	 */
1034 	i40e_configure_registers(hw);
1035 
1036 	/* Get hw capabilities */
1037 	ret = i40e_get_cap(hw);
1038 	if (ret != I40E_SUCCESS) {
1039 		PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1040 		goto err_get_capabilities;
1041 	}
1042 
1043 	/* Initialize parameters for PF */
1044 	ret = i40e_pf_parameter_init(dev);
1045 	if (ret != 0) {
1046 		PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1047 		goto err_parameter_init;
1048 	}
1049 
1050 	/* Initialize the queue management */
1051 	ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1052 	if (ret < 0) {
1053 		PMD_INIT_LOG(ERR, "Failed to init queue pool");
1054 		goto err_qp_pool_init;
1055 	}
1056 	ret = i40e_res_pool_init(&pf->msix_pool, 1,
1057 				hw->func_caps.num_msix_vectors - 1);
1058 	if (ret < 0) {
1059 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1060 		goto err_msix_pool_init;
1061 	}
1062 
1063 	/* Initialize lan hmc */
1064 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1065 				hw->func_caps.num_rx_qp, 0, 0);
1066 	if (ret != I40E_SUCCESS) {
1067 		PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1068 		goto err_init_lan_hmc;
1069 	}
1070 
1071 	/* Configure lan hmc */
1072 	ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1073 	if (ret != I40E_SUCCESS) {
1074 		PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1075 		goto err_configure_lan_hmc;
1076 	}
1077 
1078 	/* Get and check the mac address */
1079 	i40e_get_mac_addr(hw, hw->mac.addr);
1080 	if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1081 		PMD_INIT_LOG(ERR, "mac address is not valid");
1082 		ret = -EIO;
1083 		goto err_get_mac_addr;
1084 	}
1085 	/* Copy the permanent MAC address */
1086 	ether_addr_copy((struct ether_addr *) hw->mac.addr,
1087 			(struct ether_addr *) hw->mac.perm_addr);
1088 
1089 	/* Disable flow control */
1090 	hw->fc.requested_mode = I40E_FC_NONE;
1091 	i40e_set_fc(hw, &aq_fail, TRUE);
1092 
1093 	/* Set the global registers with default ether type value */
1094 	ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
1095 	if (ret != I40E_SUCCESS) {
1096 		PMD_INIT_LOG(ERR, "Failed to set the default outer "
1097 			     "VLAN ether type");
1098 		goto err_setup_pf_switch;
1099 	}
1100 
1101 	/* PF setup, which includes VSI setup */
1102 	ret = i40e_pf_setup(pf);
1103 	if (ret) {
1104 		PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1105 		goto err_setup_pf_switch;
1106 	}
1107 
1108 	/* reset all stats of the device, including pf and main vsi */
1109 	i40e_dev_stats_reset(dev);
1110 
1111 	vsi = pf->main_vsi;
1112 
1113 	/* Disable double vlan by default */
1114 	i40e_vsi_config_double_vlan(vsi, FALSE);
1115 
1116 	/* Disable S-TAG identification by default */
1117 	ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1118 	if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1119 		ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1120 		I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1121 	}
1122 
1123 	if (!vsi->max_macaddrs)
1124 		len = ETHER_ADDR_LEN;
1125 	else
1126 		len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1127 
1128 	/* Should be after VSI initialized */
1129 	dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1130 	if (!dev->data->mac_addrs) {
1131 		PMD_INIT_LOG(ERR, "Failed to allocated memory "
1132 					"for storing mac address");
1133 		goto err_mac_alloc;
1134 	}
1135 	ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1136 					&dev->data->mac_addrs[0]);
1137 
1138 	/* initialize pf host driver to setup SRIOV resource if applicable */
1139 	i40e_pf_host_init(dev);
1140 
1141 	/* register callback func to eal lib */
1142 	rte_intr_callback_register(&(pci_dev->intr_handle),
1143 		i40e_dev_interrupt_handler, (void *)dev);
1144 
1145 	/* configure and enable device interrupt */
1146 	i40e_pf_config_irq0(hw, TRUE);
1147 	i40e_pf_enable_irq0(hw);
1148 
1149 	/* enable uio intr after callback register */
1150 	rte_intr_enable(&(pci_dev->intr_handle));
1151 	/*
1152 	 * Add an ethertype filter to drop all flow control frames transmitted
1153 	 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1154 	 * frames to wire.
1155 	 */
1156 	i40e_add_tx_flow_control_drop_filter(pf);
1157 
1158 	/* Set the max frame size to 0x2600 by default,
1159 	 * in case other drivers changed the default value.
1160 	 */
1161 	i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1162 
1163 	/* initialize mirror rule list */
1164 	TAILQ_INIT(&pf->mirror_list);
1165 
1166 	/* Init dcb to sw mode by default */
1167 	ret = i40e_dcb_init_configure(dev, TRUE);
1168 	if (ret != I40E_SUCCESS) {
1169 		PMD_INIT_LOG(INFO, "Failed to init dcb.");
1170 		pf->flags &= ~I40E_FLAG_DCB;
1171 	}
1172 
1173 	return 0;
1174 
1175 err_mac_alloc:
1176 	i40e_vsi_release(pf->main_vsi);
1177 err_setup_pf_switch:
1178 err_get_mac_addr:
1179 err_configure_lan_hmc:
1180 	(void)i40e_shutdown_lan_hmc(hw);
1181 err_init_lan_hmc:
1182 	i40e_res_pool_destroy(&pf->msix_pool);
1183 err_msix_pool_init:
1184 	i40e_res_pool_destroy(&pf->qp_pool);
1185 err_qp_pool_init:
1186 err_parameter_init:
1187 err_get_capabilities:
1188 err_sync_phy_type:
1189 	(void)i40e_shutdown_adminq(hw);
1190 
1191 	return ret;
1192 }
1193 
1194 static int
1195 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1196 {
1197 	struct rte_pci_device *pci_dev;
1198 	struct i40e_hw *hw;
1199 	struct i40e_filter_control_settings settings;
1200 	int ret;
1201 	uint8_t aq_fail = 0;
1202 
1203 	PMD_INIT_FUNC_TRACE();
1204 
1205 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1206 		return 0;
1207 
1208 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1209 	pci_dev = dev->pci_dev;
1210 
1211 	if (hw->adapter_stopped == 0)
1212 		i40e_dev_close(dev);
1213 
1214 	dev->dev_ops = NULL;
1215 	dev->rx_pkt_burst = NULL;
1216 	dev->tx_pkt_burst = NULL;
1217 
1218 	/* Disable LLDP */
1219 	ret = i40e_aq_stop_lldp(hw, true, NULL);
1220 	if (ret != I40E_SUCCESS) /* Its failure can be ignored */
1221 		PMD_INIT_LOG(INFO, "Failed to stop lldp");
1222 
1223 	/* Clear PXE mode */
1224 	i40e_clear_pxe_mode(hw);
1225 
1226 	/* Unconfigure filter control */
1227 	memset(&settings, 0, sizeof(settings));
1228 	ret = i40e_set_filter_control(hw, &settings);
1229 	if (ret)
1230 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1231 					ret);
1232 
1233 	/* Disable flow control */
1234 	hw->fc.requested_mode = I40E_FC_NONE;
1235 	i40e_set_fc(hw, &aq_fail, TRUE);
1236 
1237 	/* uninitialize pf host driver */
1238 	i40e_pf_host_uninit(dev);
1239 
1240 	rte_free(dev->data->mac_addrs);
1241 	dev->data->mac_addrs = NULL;
1242 
1243 	/* disable uio intr before callback unregister */
1244 	rte_intr_disable(&(pci_dev->intr_handle));
1245 
1246 	/* register callback func to eal lib */
1247 	rte_intr_callback_unregister(&(pci_dev->intr_handle),
1248 		i40e_dev_interrupt_handler, (void *)dev);
1249 
1250 	return 0;
1251 }
1252 
1253 static int
1254 i40e_dev_configure(struct rte_eth_dev *dev)
1255 {
1256 	struct i40e_adapter *ad =
1257 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1258 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1259 	enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1260 	int i, ret;
1261 
1262 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
1263 	 * bulk allocation or vector Rx preconditions we will reset it.
1264 	 */
1265 	ad->rx_bulk_alloc_allowed = true;
1266 	ad->rx_vec_allowed = true;
1267 	ad->tx_simple_allowed = true;
1268 	ad->tx_vec_allowed = true;
1269 
1270 	if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1271 		ret = i40e_fdir_setup(pf);
1272 		if (ret != I40E_SUCCESS) {
1273 			PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1274 			return -ENOTSUP;
1275 		}
1276 		ret = i40e_fdir_configure(dev);
1277 		if (ret < 0) {
1278 			PMD_DRV_LOG(ERR, "failed to configure fdir.");
1279 			goto err;
1280 		}
1281 	} else
1282 		i40e_fdir_teardown(pf);
1283 
1284 	ret = i40e_dev_init_vlan(dev);
1285 	if (ret < 0)
1286 		goto err;
1287 
1288 	/* VMDQ setup.
1289 	 *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1290 	 *  RSS setting have different requirements.
1291 	 *  General PMD driver call sequence are NIC init, configure,
1292 	 *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1293 	 *  will try to lookup the VSI that specific queue belongs to if VMDQ
1294 	 *  applicable. So, VMDQ setting has to be done before
1295 	 *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
1296 	 *  For RSS setting, it will try to calculate actual configured RX queue
1297 	 *  number, which will be available after rx_queue_setup(). dev_start()
1298 	 *  function is good to place RSS setup.
1299 	 */
1300 	if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1301 		ret = i40e_vmdq_setup(dev);
1302 		if (ret)
1303 			goto err;
1304 	}
1305 
1306 	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1307 		ret = i40e_dcb_setup(dev);
1308 		if (ret) {
1309 			PMD_DRV_LOG(ERR, "failed to configure DCB.");
1310 			goto err_dcb;
1311 		}
1312 	}
1313 
1314 	return 0;
1315 
1316 err_dcb:
1317 	/* need to release vmdq resource if exists */
1318 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1319 		i40e_vsi_release(pf->vmdq[i].vsi);
1320 		pf->vmdq[i].vsi = NULL;
1321 	}
1322 	rte_free(pf->vmdq);
1323 	pf->vmdq = NULL;
1324 err:
1325 	/* need to release fdir resource if exists */
1326 	i40e_fdir_teardown(pf);
1327 	return ret;
1328 }
1329 
1330 void
1331 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1332 {
1333 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1334 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1335 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1336 	uint16_t msix_vect = vsi->msix_intr;
1337 	uint16_t i;
1338 
1339 	for (i = 0; i < vsi->nb_qps; i++) {
1340 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1341 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1342 		rte_wmb();
1343 	}
1344 
1345 	if (vsi->type != I40E_VSI_SRIOV) {
1346 		if (!rte_intr_allow_others(intr_handle)) {
1347 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1348 				       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1349 			I40E_WRITE_REG(hw,
1350 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1351 				       0);
1352 		} else {
1353 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1354 				       I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1355 			I40E_WRITE_REG(hw,
1356 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1357 						       msix_vect - 1), 0);
1358 		}
1359 	} else {
1360 		uint32_t reg;
1361 		reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1362 			vsi->user_param + (msix_vect - 1);
1363 
1364 		I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1365 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1366 	}
1367 	I40E_WRITE_FLUSH(hw);
1368 }
1369 
1370 static void
1371 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1372 		       int base_queue, int nb_queue)
1373 {
1374 	int i;
1375 	uint32_t val;
1376 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1377 
1378 	/* Bind all RX queues to allocated MSIX interrupt */
1379 	for (i = 0; i < nb_queue; i++) {
1380 		val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1381 			I40E_QINT_RQCTL_ITR_INDX_MASK |
1382 			((base_queue + i + 1) <<
1383 			 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1384 			(0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1385 			I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1386 
1387 		if (i == nb_queue - 1)
1388 			val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1389 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1390 	}
1391 
1392 	/* Write first RX queue to Link list register as the head element */
1393 	if (vsi->type != I40E_VSI_SRIOV) {
1394 		uint16_t interval =
1395 			i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1396 
1397 		if (msix_vect == I40E_MISC_VEC_ID) {
1398 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1399 				       (base_queue <<
1400 					I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1401 				       (0x0 <<
1402 					I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1403 			I40E_WRITE_REG(hw,
1404 				       I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1405 				       interval);
1406 		} else {
1407 			I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1408 				       (base_queue <<
1409 					I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1410 				       (0x0 <<
1411 					I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1412 			I40E_WRITE_REG(hw,
1413 				       I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1414 						       msix_vect - 1),
1415 				       interval);
1416 		}
1417 	} else {
1418 		uint32_t reg;
1419 
1420 		if (msix_vect == I40E_MISC_VEC_ID) {
1421 			I40E_WRITE_REG(hw,
1422 				       I40E_VPINT_LNKLST0(vsi->user_param),
1423 				       (base_queue <<
1424 					I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1425 				       (0x0 <<
1426 					I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1427 		} else {
1428 			/* num_msix_vectors_vf needs to minus irq0 */
1429 			reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1430 				vsi->user_param + (msix_vect - 1);
1431 
1432 			I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1433 				       (base_queue <<
1434 					I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1435 				       (0x0 <<
1436 					I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1437 		}
1438 	}
1439 
1440 	I40E_WRITE_FLUSH(hw);
1441 }
1442 
1443 void
1444 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
1445 {
1446 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1447 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1448 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1449 	uint16_t msix_vect = vsi->msix_intr;
1450 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1451 	uint16_t queue_idx = 0;
1452 	int record = 0;
1453 	uint32_t val;
1454 	int i;
1455 
1456 	for (i = 0; i < vsi->nb_qps; i++) {
1457 		I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1458 		I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1459 	}
1460 
1461 	/* INTENA flag is not auto-cleared for interrupt */
1462 	val = I40E_READ_REG(hw, I40E_GLINT_CTL);
1463 	val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
1464 		I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
1465 		I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
1466 	I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
1467 
1468 	/* VF bind interrupt */
1469 	if (vsi->type == I40E_VSI_SRIOV) {
1470 		__vsi_queues_bind_intr(vsi, msix_vect,
1471 				       vsi->base_queue, vsi->nb_qps);
1472 		return;
1473 	}
1474 
1475 	/* PF & VMDq bind interrupt */
1476 	if (rte_intr_dp_is_en(intr_handle)) {
1477 		if (vsi->type == I40E_VSI_MAIN) {
1478 			queue_idx = 0;
1479 			record = 1;
1480 		} else if (vsi->type == I40E_VSI_VMDQ2) {
1481 			struct i40e_vsi *main_vsi =
1482 				I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1483 			queue_idx = vsi->base_queue - main_vsi->nb_qps;
1484 			record = 1;
1485 		}
1486 	}
1487 
1488 	for (i = 0; i < vsi->nb_used_qps; i++) {
1489 		if (nb_msix <= 1) {
1490 			if (!rte_intr_allow_others(intr_handle))
1491 				/* allow to share MISC_VEC_ID */
1492 				msix_vect = I40E_MISC_VEC_ID;
1493 
1494 			/* no enough msix_vect, map all to one */
1495 			__vsi_queues_bind_intr(vsi, msix_vect,
1496 					       vsi->base_queue + i,
1497 					       vsi->nb_used_qps - i);
1498 			for (; !!record && i < vsi->nb_used_qps; i++)
1499 				intr_handle->intr_vec[queue_idx + i] =
1500 					msix_vect;
1501 			break;
1502 		}
1503 		/* 1:1 queue/msix_vect mapping */
1504 		__vsi_queues_bind_intr(vsi, msix_vect,
1505 				       vsi->base_queue + i, 1);
1506 		if (!!record)
1507 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
1508 
1509 		msix_vect++;
1510 		nb_msix--;
1511 	}
1512 }
1513 
1514 static void
1515 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1516 {
1517 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1518 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1519 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1520 	uint16_t interval = i40e_calc_itr_interval(\
1521 		RTE_LIBRTE_I40E_ITR_INTERVAL);
1522 	uint16_t msix_intr, i;
1523 
1524 	if (rte_intr_allow_others(intr_handle))
1525 		for (i = 0; i < vsi->nb_msix; i++) {
1526 			msix_intr = vsi->msix_intr + i;
1527 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1528 				I40E_PFINT_DYN_CTLN_INTENA_MASK |
1529 				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1530 				(0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1531 				(interval <<
1532 				 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
1533 		}
1534 	else
1535 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1536 			       I40E_PFINT_DYN_CTL0_INTENA_MASK |
1537 			       I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1538 			       (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1539 			       (interval <<
1540 				I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
1541 
1542 	I40E_WRITE_FLUSH(hw);
1543 }
1544 
1545 static void
1546 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1547 {
1548 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1549 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1550 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1551 	uint16_t msix_intr, i;
1552 
1553 	if (rte_intr_allow_others(intr_handle))
1554 		for (i = 0; i < vsi->nb_msix; i++) {
1555 			msix_intr = vsi->msix_intr + i;
1556 			I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1557 				       0);
1558 		}
1559 	else
1560 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
1561 
1562 	I40E_WRITE_FLUSH(hw);
1563 }
1564 
1565 static inline uint8_t
1566 i40e_parse_link_speeds(uint16_t link_speeds)
1567 {
1568 	uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
1569 
1570 	if (link_speeds & ETH_LINK_SPEED_40G)
1571 		link_speed |= I40E_LINK_SPEED_40GB;
1572 	if (link_speeds & ETH_LINK_SPEED_25G)
1573 		link_speed |= I40E_LINK_SPEED_25GB;
1574 	if (link_speeds & ETH_LINK_SPEED_20G)
1575 		link_speed |= I40E_LINK_SPEED_20GB;
1576 	if (link_speeds & ETH_LINK_SPEED_10G)
1577 		link_speed |= I40E_LINK_SPEED_10GB;
1578 	if (link_speeds & ETH_LINK_SPEED_1G)
1579 		link_speed |= I40E_LINK_SPEED_1GB;
1580 	if (link_speeds & ETH_LINK_SPEED_100M)
1581 		link_speed |= I40E_LINK_SPEED_100MB;
1582 
1583 	return link_speed;
1584 }
1585 
1586 static int
1587 i40e_phy_conf_link(struct i40e_hw *hw,
1588 		   uint8_t abilities,
1589 		   uint8_t force_speed)
1590 {
1591 	enum i40e_status_code status;
1592 	struct i40e_aq_get_phy_abilities_resp phy_ab;
1593 	struct i40e_aq_set_phy_config phy_conf;
1594 	const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
1595 			I40E_AQ_PHY_FLAG_PAUSE_RX |
1596 			I40E_AQ_PHY_FLAG_PAUSE_RX |
1597 			I40E_AQ_PHY_FLAG_LOW_POWER;
1598 	const uint8_t advt = I40E_LINK_SPEED_40GB |
1599 			I40E_LINK_SPEED_25GB |
1600 			I40E_LINK_SPEED_10GB |
1601 			I40E_LINK_SPEED_1GB |
1602 			I40E_LINK_SPEED_100MB;
1603 	int ret = -ENOTSUP;
1604 
1605 
1606 	status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
1607 					      NULL);
1608 	if (status)
1609 		return ret;
1610 
1611 	memset(&phy_conf, 0, sizeof(phy_conf));
1612 
1613 	/* bits 0-2 use the values from get_phy_abilities_resp */
1614 	abilities &= ~mask;
1615 	abilities |= phy_ab.abilities & mask;
1616 
1617 	/* update ablities and speed */
1618 	if (abilities & I40E_AQ_PHY_AN_ENABLED)
1619 		phy_conf.link_speed = advt;
1620 	else
1621 		phy_conf.link_speed = force_speed;
1622 
1623 	phy_conf.abilities = abilities;
1624 
1625 	/* use get_phy_abilities_resp value for the rest */
1626 	phy_conf.phy_type = phy_ab.phy_type;
1627 	phy_conf.eee_capability = phy_ab.eee_capability;
1628 	phy_conf.eeer = phy_ab.eeer_val;
1629 	phy_conf.low_power_ctrl = phy_ab.d3_lpan;
1630 
1631 	PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
1632 		    phy_ab.abilities, phy_ab.link_speed);
1633 	PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
1634 		    phy_conf.abilities, phy_conf.link_speed);
1635 
1636 	status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
1637 	if (status)
1638 		return ret;
1639 
1640 	return I40E_SUCCESS;
1641 }
1642 
1643 static int
1644 i40e_apply_link_speed(struct rte_eth_dev *dev)
1645 {
1646 	uint8_t speed;
1647 	uint8_t abilities = 0;
1648 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1649 	struct rte_eth_conf *conf = &dev->data->dev_conf;
1650 
1651 	speed = i40e_parse_link_speeds(conf->link_speeds);
1652 	if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
1653 		abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1654 	if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
1655 		abilities |= I40E_AQ_PHY_AN_ENABLED;
1656 	abilities |= I40E_AQ_PHY_LINK_ENABLED;
1657 
1658 	/* Skip changing speed on 40G interfaces, FW does not support */
1659 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
1660 		speed =  I40E_LINK_SPEED_UNKNOWN;
1661 		abilities |= I40E_AQ_PHY_AN_ENABLED;
1662 	}
1663 
1664 	return i40e_phy_conf_link(hw, abilities, speed);
1665 }
1666 
1667 static int
1668 i40e_dev_start(struct rte_eth_dev *dev)
1669 {
1670 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1671 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1672 	struct i40e_vsi *main_vsi = pf->main_vsi;
1673 	int ret, i;
1674 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1675 	uint32_t intr_vector = 0;
1676 
1677 	hw->adapter_stopped = 0;
1678 
1679 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1680 		PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
1681 			     dev->data->port_id);
1682 		return -EINVAL;
1683 	}
1684 
1685 	rte_intr_disable(intr_handle);
1686 
1687 	if ((rte_intr_cap_multiple(intr_handle) ||
1688 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1689 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1690 		intr_vector = dev->data->nb_rx_queues;
1691 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1692 			return -1;
1693 	}
1694 
1695 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1696 		intr_handle->intr_vec =
1697 			rte_zmalloc("intr_vec",
1698 				    dev->data->nb_rx_queues * sizeof(int),
1699 				    0);
1700 		if (!intr_handle->intr_vec) {
1701 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1702 				     " intr_vec\n", dev->data->nb_rx_queues);
1703 			return -ENOMEM;
1704 		}
1705 	}
1706 
1707 	/* Initialize VSI */
1708 	ret = i40e_dev_rxtx_init(pf);
1709 	if (ret != I40E_SUCCESS) {
1710 		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
1711 		goto err_up;
1712 	}
1713 
1714 	/* Map queues with MSIX interrupt */
1715 	main_vsi->nb_used_qps = dev->data->nb_rx_queues -
1716 		pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1717 	i40e_vsi_queues_bind_intr(main_vsi);
1718 	i40e_vsi_enable_queues_intr(main_vsi);
1719 
1720 	/* Map VMDQ VSI queues with MSIX interrupt */
1721 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1722 		pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
1723 		i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
1724 		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
1725 	}
1726 
1727 	/* enable FDIR MSIX interrupt */
1728 	if (pf->fdir.fdir_vsi) {
1729 		i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
1730 		i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
1731 	}
1732 
1733 	/* Enable all queues which have been configured */
1734 	ret = i40e_dev_switch_queues(pf, TRUE);
1735 	if (ret != I40E_SUCCESS) {
1736 		PMD_DRV_LOG(ERR, "Failed to enable VSI");
1737 		goto err_up;
1738 	}
1739 
1740 	/* Enable receiving broadcast packets */
1741 	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
1742 	if (ret != I40E_SUCCESS)
1743 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1744 
1745 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1746 		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
1747 						true, NULL);
1748 		if (ret != I40E_SUCCESS)
1749 			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1750 	}
1751 
1752 	/* Apply link configure */
1753 	if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
1754 				ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1755 				ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
1756 				ETH_LINK_SPEED_40G)) {
1757 		PMD_DRV_LOG(ERR, "Invalid link setting");
1758 		goto err_up;
1759 	}
1760 	ret = i40e_apply_link_speed(dev);
1761 	if (I40E_SUCCESS != ret) {
1762 		PMD_DRV_LOG(ERR, "Fail to apply link setting");
1763 		goto err_up;
1764 	}
1765 
1766 	if (!rte_intr_allow_others(intr_handle)) {
1767 		rte_intr_callback_unregister(intr_handle,
1768 					     i40e_dev_interrupt_handler,
1769 					     (void *)dev);
1770 		/* configure and enable device interrupt */
1771 		i40e_pf_config_irq0(hw, FALSE);
1772 		i40e_pf_enable_irq0(hw);
1773 
1774 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1775 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1776 				     " no intr multiplex\n");
1777 	}
1778 
1779 	/* enable uio intr after callback register */
1780 	rte_intr_enable(intr_handle);
1781 
1782 	return I40E_SUCCESS;
1783 
1784 err_up:
1785 	i40e_dev_switch_queues(pf, FALSE);
1786 	i40e_dev_clear_queues(dev);
1787 
1788 	return ret;
1789 }
1790 
1791 static void
1792 i40e_dev_stop(struct rte_eth_dev *dev)
1793 {
1794 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1795 	struct i40e_vsi *main_vsi = pf->main_vsi;
1796 	struct i40e_mirror_rule *p_mirror;
1797 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
1798 	int i;
1799 
1800 	/* Disable all queues */
1801 	i40e_dev_switch_queues(pf, FALSE);
1802 
1803 	/* un-map queues with interrupt registers */
1804 	i40e_vsi_disable_queues_intr(main_vsi);
1805 	i40e_vsi_queues_unbind_intr(main_vsi);
1806 
1807 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1808 		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
1809 		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
1810 	}
1811 
1812 	if (pf->fdir.fdir_vsi) {
1813 		i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
1814 		i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
1815 	}
1816 	/* Clear all queues and release memory */
1817 	i40e_dev_clear_queues(dev);
1818 
1819 	/* Set link down */
1820 	i40e_dev_set_link_down(dev);
1821 
1822 	/* Remove all mirror rules */
1823 	while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
1824 		TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
1825 		rte_free(p_mirror);
1826 	}
1827 	pf->nb_mirror_rule = 0;
1828 
1829 	if (!rte_intr_allow_others(intr_handle))
1830 		/* resume to the default handler */
1831 		rte_intr_callback_register(intr_handle,
1832 					   i40e_dev_interrupt_handler,
1833 					   (void *)dev);
1834 
1835 	/* Clean datapath event and queue/vec mapping */
1836 	rte_intr_efd_disable(intr_handle);
1837 	if (intr_handle->intr_vec) {
1838 		rte_free(intr_handle->intr_vec);
1839 		intr_handle->intr_vec = NULL;
1840 	}
1841 }
1842 
1843 static void
1844 i40e_dev_close(struct rte_eth_dev *dev)
1845 {
1846 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1847 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1848 	uint32_t reg;
1849 	int i;
1850 
1851 	PMD_INIT_FUNC_TRACE();
1852 
1853 	i40e_dev_stop(dev);
1854 	hw->adapter_stopped = 1;
1855 	i40e_dev_free_queues(dev);
1856 
1857 	/* Disable interrupt */
1858 	i40e_pf_disable_irq0(hw);
1859 	rte_intr_disable(&(dev->pci_dev->intr_handle));
1860 
1861 	/* shutdown and destroy the HMC */
1862 	i40e_shutdown_lan_hmc(hw);
1863 
1864 	/* release all the existing VSIs and VEBs */
1865 	i40e_fdir_teardown(pf);
1866 	i40e_vsi_release(pf->main_vsi);
1867 
1868 	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1869 		i40e_vsi_release(pf->vmdq[i].vsi);
1870 		pf->vmdq[i].vsi = NULL;
1871 	}
1872 
1873 	rte_free(pf->vmdq);
1874 	pf->vmdq = NULL;
1875 
1876 	/* shutdown the adminq */
1877 	i40e_aq_queue_shutdown(hw, true);
1878 	i40e_shutdown_adminq(hw);
1879 
1880 	i40e_res_pool_destroy(&pf->qp_pool);
1881 	i40e_res_pool_destroy(&pf->msix_pool);
1882 
1883 	/* force a PF reset to clean anything leftover */
1884 	reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1885 	I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1886 			(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1887 	I40E_WRITE_FLUSH(hw);
1888 }
1889 
1890 static void
1891 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1892 {
1893 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1894 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1895 	struct i40e_vsi *vsi = pf->main_vsi;
1896 	int status;
1897 
1898 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1899 						     true, NULL, true);
1900 	if (status != I40E_SUCCESS)
1901 		PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1902 
1903 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1904 							TRUE, NULL);
1905 	if (status != I40E_SUCCESS)
1906 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1907 
1908 }
1909 
1910 static void
1911 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1912 {
1913 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1914 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1915 	struct i40e_vsi *vsi = pf->main_vsi;
1916 	int status;
1917 
1918 	status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1919 						     false, NULL, true);
1920 	if (status != I40E_SUCCESS)
1921 		PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1922 
1923 	status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1924 							false, NULL);
1925 	if (status != I40E_SUCCESS)
1926 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1927 }
1928 
1929 static void
1930 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1931 {
1932 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1933 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1934 	struct i40e_vsi *vsi = pf->main_vsi;
1935 	int ret;
1936 
1937 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1938 	if (ret != I40E_SUCCESS)
1939 		PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1940 }
1941 
1942 static void
1943 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1944 {
1945 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1946 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1947 	struct i40e_vsi *vsi = pf->main_vsi;
1948 	int ret;
1949 
1950 	if (dev->data->promiscuous == 1)
1951 		return; /* must remain in all_multicast mode */
1952 
1953 	ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1954 				vsi->seid, FALSE, NULL);
1955 	if (ret != I40E_SUCCESS)
1956 		PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1957 }
1958 
1959 /*
1960  * Set device link up.
1961  */
1962 static int
1963 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1964 {
1965 	/* re-apply link speed setting */
1966 	return i40e_apply_link_speed(dev);
1967 }
1968 
1969 /*
1970  * Set device link down.
1971  */
1972 static int
1973 i40e_dev_set_link_down(struct rte_eth_dev *dev)
1974 {
1975 	uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1976 	uint8_t abilities = 0;
1977 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1978 
1979 	if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
1980 		abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1981 	return i40e_phy_conf_link(hw, abilities, speed);
1982 }
1983 
1984 int
1985 i40e_dev_link_update(struct rte_eth_dev *dev,
1986 		     int wait_to_complete)
1987 {
1988 #define CHECK_INTERVAL 100  /* 100ms */
1989 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1990 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1991 	struct i40e_link_status link_status;
1992 	struct rte_eth_link link, old;
1993 	int status;
1994 	unsigned rep_cnt = MAX_REPEAT_TIME;
1995 
1996 	memset(&link, 0, sizeof(link));
1997 	memset(&old, 0, sizeof(old));
1998 	memset(&link_status, 0, sizeof(link_status));
1999 	rte_i40e_dev_atomic_read_link_status(dev, &old);
2000 
2001 	do {
2002 		/* Get link status information from hardware */
2003 		status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
2004 		if (status != I40E_SUCCESS) {
2005 			link.link_speed = ETH_SPEED_NUM_100M;
2006 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
2007 			PMD_DRV_LOG(ERR, "Failed to get link info");
2008 			goto out;
2009 		}
2010 
2011 		link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
2012 		if (!wait_to_complete)
2013 			break;
2014 
2015 		rte_delay_ms(CHECK_INTERVAL);
2016 	} while (!link.link_status && rep_cnt--);
2017 
2018 	if (!link.link_status)
2019 		goto out;
2020 
2021 	/* i40e uses full duplex only */
2022 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2023 
2024 	/* Parse the link status */
2025 	switch (link_status.link_speed) {
2026 	case I40E_LINK_SPEED_100MB:
2027 		link.link_speed = ETH_SPEED_NUM_100M;
2028 		break;
2029 	case I40E_LINK_SPEED_1GB:
2030 		link.link_speed = ETH_SPEED_NUM_1G;
2031 		break;
2032 	case I40E_LINK_SPEED_10GB:
2033 		link.link_speed = ETH_SPEED_NUM_10G;
2034 		break;
2035 	case I40E_LINK_SPEED_20GB:
2036 		link.link_speed = ETH_SPEED_NUM_20G;
2037 		break;
2038 	case I40E_LINK_SPEED_25GB:
2039 		link.link_speed = ETH_SPEED_NUM_25G;
2040 		break;
2041 	case I40E_LINK_SPEED_40GB:
2042 		link.link_speed = ETH_SPEED_NUM_40G;
2043 		break;
2044 	default:
2045 		link.link_speed = ETH_SPEED_NUM_100M;
2046 		break;
2047 	}
2048 
2049 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2050 			ETH_LINK_SPEED_FIXED);
2051 
2052 out:
2053 	rte_i40e_dev_atomic_write_link_status(dev, &link);
2054 	if (link.link_status == old.link_status)
2055 		return -1;
2056 
2057 	return 0;
2058 }
2059 
2060 /* Get all the statistics of a VSI */
2061 void
2062 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2063 {
2064 	struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2065 	struct i40e_eth_stats *nes = &vsi->eth_stats;
2066 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2067 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2068 
2069 	i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2070 			    vsi->offset_loaded, &oes->rx_bytes,
2071 			    &nes->rx_bytes);
2072 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2073 			    vsi->offset_loaded, &oes->rx_unicast,
2074 			    &nes->rx_unicast);
2075 	i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2076 			    vsi->offset_loaded, &oes->rx_multicast,
2077 			    &nes->rx_multicast);
2078 	i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2079 			    vsi->offset_loaded, &oes->rx_broadcast,
2080 			    &nes->rx_broadcast);
2081 	i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2082 			    &oes->rx_discards, &nes->rx_discards);
2083 	/* GLV_REPC not supported */
2084 	/* GLV_RMPC not supported */
2085 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2086 			    &oes->rx_unknown_protocol,
2087 			    &nes->rx_unknown_protocol);
2088 	i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2089 			    vsi->offset_loaded, &oes->tx_bytes,
2090 			    &nes->tx_bytes);
2091 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2092 			    vsi->offset_loaded, &oes->tx_unicast,
2093 			    &nes->tx_unicast);
2094 	i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2095 			    vsi->offset_loaded, &oes->tx_multicast,
2096 			    &nes->tx_multicast);
2097 	i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2098 			    vsi->offset_loaded,  &oes->tx_broadcast,
2099 			    &nes->tx_broadcast);
2100 	/* GLV_TDPC not supported */
2101 	i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2102 			    &oes->tx_errors, &nes->tx_errors);
2103 	vsi->offset_loaded = true;
2104 
2105 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2106 		    vsi->vsi_id);
2107 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
2108 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
2109 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
2110 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
2111 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
2112 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2113 		    nes->rx_unknown_protocol);
2114 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
2115 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
2116 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
2117 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
2118 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
2119 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
2120 	PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2121 		    vsi->vsi_id);
2122 }
2123 
2124 static void
2125 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2126 {
2127 	unsigned int i;
2128 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2129 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2130 
2131 	/* Get statistics of struct i40e_eth_stats */
2132 	i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2133 			    I40E_GLPRT_GORCL(hw->port),
2134 			    pf->offset_loaded, &os->eth.rx_bytes,
2135 			    &ns->eth.rx_bytes);
2136 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2137 			    I40E_GLPRT_UPRCL(hw->port),
2138 			    pf->offset_loaded, &os->eth.rx_unicast,
2139 			    &ns->eth.rx_unicast);
2140 	i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2141 			    I40E_GLPRT_MPRCL(hw->port),
2142 			    pf->offset_loaded, &os->eth.rx_multicast,
2143 			    &ns->eth.rx_multicast);
2144 	i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2145 			    I40E_GLPRT_BPRCL(hw->port),
2146 			    pf->offset_loaded, &os->eth.rx_broadcast,
2147 			    &ns->eth.rx_broadcast);
2148 	/* Workaround: CRC size should not be included in byte statistics,
2149 	 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2150 	 */
2151 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2152 		ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2153 
2154 	i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2155 			    pf->offset_loaded, &os->eth.rx_discards,
2156 			    &ns->eth.rx_discards);
2157 	/* GLPRT_REPC not supported */
2158 	/* GLPRT_RMPC not supported */
2159 	i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2160 			    pf->offset_loaded,
2161 			    &os->eth.rx_unknown_protocol,
2162 			    &ns->eth.rx_unknown_protocol);
2163 	i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2164 			    I40E_GLPRT_GOTCL(hw->port),
2165 			    pf->offset_loaded, &os->eth.tx_bytes,
2166 			    &ns->eth.tx_bytes);
2167 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2168 			    I40E_GLPRT_UPTCL(hw->port),
2169 			    pf->offset_loaded, &os->eth.tx_unicast,
2170 			    &ns->eth.tx_unicast);
2171 	i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2172 			    I40E_GLPRT_MPTCL(hw->port),
2173 			    pf->offset_loaded, &os->eth.tx_multicast,
2174 			    &ns->eth.tx_multicast);
2175 	i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2176 			    I40E_GLPRT_BPTCL(hw->port),
2177 			    pf->offset_loaded, &os->eth.tx_broadcast,
2178 			    &ns->eth.tx_broadcast);
2179 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2180 		ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2181 	/* GLPRT_TEPC not supported */
2182 
2183 	/* additional port specific stats */
2184 	i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2185 			    pf->offset_loaded, &os->tx_dropped_link_down,
2186 			    &ns->tx_dropped_link_down);
2187 	i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2188 			    pf->offset_loaded, &os->crc_errors,
2189 			    &ns->crc_errors);
2190 	i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2191 			    pf->offset_loaded, &os->illegal_bytes,
2192 			    &ns->illegal_bytes);
2193 	/* GLPRT_ERRBC not supported */
2194 	i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2195 			    pf->offset_loaded, &os->mac_local_faults,
2196 			    &ns->mac_local_faults);
2197 	i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2198 			    pf->offset_loaded, &os->mac_remote_faults,
2199 			    &ns->mac_remote_faults);
2200 	i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2201 			    pf->offset_loaded, &os->rx_length_errors,
2202 			    &ns->rx_length_errors);
2203 	i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2204 			    pf->offset_loaded, &os->link_xon_rx,
2205 			    &ns->link_xon_rx);
2206 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2207 			    pf->offset_loaded, &os->link_xoff_rx,
2208 			    &ns->link_xoff_rx);
2209 	for (i = 0; i < 8; i++) {
2210 		i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2211 				    pf->offset_loaded,
2212 				    &os->priority_xon_rx[i],
2213 				    &ns->priority_xon_rx[i]);
2214 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2215 				    pf->offset_loaded,
2216 				    &os->priority_xoff_rx[i],
2217 				    &ns->priority_xoff_rx[i]);
2218 	}
2219 	i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2220 			    pf->offset_loaded, &os->link_xon_tx,
2221 			    &ns->link_xon_tx);
2222 	i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2223 			    pf->offset_loaded, &os->link_xoff_tx,
2224 			    &ns->link_xoff_tx);
2225 	for (i = 0; i < 8; i++) {
2226 		i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2227 				    pf->offset_loaded,
2228 				    &os->priority_xon_tx[i],
2229 				    &ns->priority_xon_tx[i]);
2230 		i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2231 				    pf->offset_loaded,
2232 				    &os->priority_xoff_tx[i],
2233 				    &ns->priority_xoff_tx[i]);
2234 		i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2235 				    pf->offset_loaded,
2236 				    &os->priority_xon_2_xoff[i],
2237 				    &ns->priority_xon_2_xoff[i]);
2238 	}
2239 	i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2240 			    I40E_GLPRT_PRC64L(hw->port),
2241 			    pf->offset_loaded, &os->rx_size_64,
2242 			    &ns->rx_size_64);
2243 	i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2244 			    I40E_GLPRT_PRC127L(hw->port),
2245 			    pf->offset_loaded, &os->rx_size_127,
2246 			    &ns->rx_size_127);
2247 	i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2248 			    I40E_GLPRT_PRC255L(hw->port),
2249 			    pf->offset_loaded, &os->rx_size_255,
2250 			    &ns->rx_size_255);
2251 	i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2252 			    I40E_GLPRT_PRC511L(hw->port),
2253 			    pf->offset_loaded, &os->rx_size_511,
2254 			    &ns->rx_size_511);
2255 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
2256 			    I40E_GLPRT_PRC1023L(hw->port),
2257 			    pf->offset_loaded, &os->rx_size_1023,
2258 			    &ns->rx_size_1023);
2259 	i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
2260 			    I40E_GLPRT_PRC1522L(hw->port),
2261 			    pf->offset_loaded, &os->rx_size_1522,
2262 			    &ns->rx_size_1522);
2263 	i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
2264 			    I40E_GLPRT_PRC9522L(hw->port),
2265 			    pf->offset_loaded, &os->rx_size_big,
2266 			    &ns->rx_size_big);
2267 	i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
2268 			    pf->offset_loaded, &os->rx_undersize,
2269 			    &ns->rx_undersize);
2270 	i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
2271 			    pf->offset_loaded, &os->rx_fragments,
2272 			    &ns->rx_fragments);
2273 	i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
2274 			    pf->offset_loaded, &os->rx_oversize,
2275 			    &ns->rx_oversize);
2276 	i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
2277 			    pf->offset_loaded, &os->rx_jabber,
2278 			    &ns->rx_jabber);
2279 	i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
2280 			    I40E_GLPRT_PTC64L(hw->port),
2281 			    pf->offset_loaded, &os->tx_size_64,
2282 			    &ns->tx_size_64);
2283 	i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
2284 			    I40E_GLPRT_PTC127L(hw->port),
2285 			    pf->offset_loaded, &os->tx_size_127,
2286 			    &ns->tx_size_127);
2287 	i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
2288 			    I40E_GLPRT_PTC255L(hw->port),
2289 			    pf->offset_loaded, &os->tx_size_255,
2290 			    &ns->tx_size_255);
2291 	i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
2292 			    I40E_GLPRT_PTC511L(hw->port),
2293 			    pf->offset_loaded, &os->tx_size_511,
2294 			    &ns->tx_size_511);
2295 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
2296 			    I40E_GLPRT_PTC1023L(hw->port),
2297 			    pf->offset_loaded, &os->tx_size_1023,
2298 			    &ns->tx_size_1023);
2299 	i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
2300 			    I40E_GLPRT_PTC1522L(hw->port),
2301 			    pf->offset_loaded, &os->tx_size_1522,
2302 			    &ns->tx_size_1522);
2303 	i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
2304 			    I40E_GLPRT_PTC9522L(hw->port),
2305 			    pf->offset_loaded, &os->tx_size_big,
2306 			    &ns->tx_size_big);
2307 	i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
2308 			   pf->offset_loaded,
2309 			   &os->fd_sb_match, &ns->fd_sb_match);
2310 	/* GLPRT_MSPDC not supported */
2311 	/* GLPRT_XEC not supported */
2312 
2313 	pf->offset_loaded = true;
2314 
2315 	if (pf->main_vsi)
2316 		i40e_update_vsi_stats(pf->main_vsi);
2317 }
2318 
2319 /* Get all statistics of a port */
2320 static void
2321 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2322 {
2323 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2324 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2325 	struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2326 	unsigned i;
2327 
2328 	/* call read registers - updates values, now write them to struct */
2329 	i40e_read_stats_registers(pf, hw);
2330 
2331 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
2332 			pf->main_vsi->eth_stats.rx_multicast +
2333 			pf->main_vsi->eth_stats.rx_broadcast -
2334 			pf->main_vsi->eth_stats.rx_discards;
2335 	stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
2336 			pf->main_vsi->eth_stats.tx_multicast +
2337 			pf->main_vsi->eth_stats.tx_broadcast;
2338 	stats->ibytes   = ns->eth.rx_bytes;
2339 	stats->obytes   = ns->eth.tx_bytes;
2340 	stats->oerrors  = ns->eth.tx_errors +
2341 			pf->main_vsi->eth_stats.tx_errors;
2342 
2343 	/* Rx Errors */
2344 	stats->imissed  = ns->eth.rx_discards +
2345 			pf->main_vsi->eth_stats.rx_discards;
2346 	stats->ierrors  = ns->crc_errors +
2347 			ns->rx_length_errors + ns->rx_undersize +
2348 			ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
2349 
2350 	PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
2351 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
2352 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
2353 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
2354 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
2355 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
2356 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2357 		    ns->eth.rx_unknown_protocol);
2358 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
2359 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
2360 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
2361 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
2362 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
2363 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
2364 
2365 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
2366 		    ns->tx_dropped_link_down);
2367 	PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
2368 	PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
2369 		    ns->illegal_bytes);
2370 	PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
2371 	PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
2372 		    ns->mac_local_faults);
2373 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
2374 		    ns->mac_remote_faults);
2375 	PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
2376 		    ns->rx_length_errors);
2377 	PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
2378 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
2379 	for (i = 0; i < 8; i++) {
2380 		PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
2381 				i, ns->priority_xon_rx[i]);
2382 		PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
2383 				i, ns->priority_xoff_rx[i]);
2384 	}
2385 	PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
2386 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
2387 	for (i = 0; i < 8; i++) {
2388 		PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
2389 				i, ns->priority_xon_tx[i]);
2390 		PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
2391 				i, ns->priority_xoff_tx[i]);
2392 		PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
2393 				i, ns->priority_xon_2_xoff[i]);
2394 	}
2395 	PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
2396 	PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
2397 	PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
2398 	PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
2399 	PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
2400 	PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
2401 	PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
2402 	PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
2403 	PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
2404 	PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
2405 	PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
2406 	PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
2407 	PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
2408 	PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
2409 	PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
2410 	PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
2411 	PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
2412 	PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
2413 	PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
2414 			ns->mac_short_packet_dropped);
2415 	PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
2416 		    ns->checksum_error);
2417 	PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
2418 	PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
2419 }
2420 
2421 /* Reset the statistics */
2422 static void
2423 i40e_dev_stats_reset(struct rte_eth_dev *dev)
2424 {
2425 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2426 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2427 
2428 	/* Mark PF and VSI stats to update the offset, aka "reset" */
2429 	pf->offset_loaded = false;
2430 	if (pf->main_vsi)
2431 		pf->main_vsi->offset_loaded = false;
2432 
2433 	/* read the stats, reading current register values into offset */
2434 	i40e_read_stats_registers(pf, hw);
2435 }
2436 
2437 static uint32_t
2438 i40e_xstats_calc_num(void)
2439 {
2440 	return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
2441 		(I40E_NB_RXQ_PRIO_XSTATS * 8) +
2442 		(I40E_NB_TXQ_PRIO_XSTATS * 8);
2443 }
2444 
2445 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2446 				     struct rte_eth_xstat_name *xstats_names,
2447 				     __rte_unused unsigned limit)
2448 {
2449 	unsigned count = 0;
2450 	unsigned i, prio;
2451 
2452 	if (xstats_names == NULL)
2453 		return i40e_xstats_calc_num();
2454 
2455 	/* Note: limit checked in rte_eth_xstats_names() */
2456 
2457 	/* Get stats from i40e_eth_stats struct */
2458 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2459 		snprintf(xstats_names[count].name,
2460 			 sizeof(xstats_names[count].name),
2461 			 "%s", rte_i40e_stats_strings[i].name);
2462 		count++;
2463 	}
2464 
2465 	/* Get individiual stats from i40e_hw_port struct */
2466 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2467 		snprintf(xstats_names[count].name,
2468 			sizeof(xstats_names[count].name),
2469 			 "%s", rte_i40e_hw_port_strings[i].name);
2470 		count++;
2471 	}
2472 
2473 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2474 		for (prio = 0; prio < 8; prio++) {
2475 			snprintf(xstats_names[count].name,
2476 				 sizeof(xstats_names[count].name),
2477 				 "rx_priority%u_%s", prio,
2478 				 rte_i40e_rxq_prio_strings[i].name);
2479 			count++;
2480 		}
2481 	}
2482 
2483 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2484 		for (prio = 0; prio < 8; prio++) {
2485 			snprintf(xstats_names[count].name,
2486 				 sizeof(xstats_names[count].name),
2487 				 "tx_priority%u_%s", prio,
2488 				 rte_i40e_txq_prio_strings[i].name);
2489 			count++;
2490 		}
2491 	}
2492 	return count;
2493 }
2494 
2495 static int
2496 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2497 		    unsigned n)
2498 {
2499 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2500 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2501 	unsigned i, count, prio;
2502 	struct i40e_hw_port_stats *hw_stats = &pf->stats;
2503 
2504 	count = i40e_xstats_calc_num();
2505 	if (n < count)
2506 		return count;
2507 
2508 	i40e_read_stats_registers(pf, hw);
2509 
2510 	if (xstats == NULL)
2511 		return 0;
2512 
2513 	count = 0;
2514 
2515 	/* Get stats from i40e_eth_stats struct */
2516 	for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
2517 		xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
2518 			rte_i40e_stats_strings[i].offset);
2519 		count++;
2520 	}
2521 
2522 	/* Get individiual stats from i40e_hw_port struct */
2523 	for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
2524 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
2525 			rte_i40e_hw_port_strings[i].offset);
2526 		count++;
2527 	}
2528 
2529 	for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
2530 		for (prio = 0; prio < 8; prio++) {
2531 			xstats[count].value =
2532 				*(uint64_t *)(((char *)hw_stats) +
2533 				rte_i40e_rxq_prio_strings[i].offset +
2534 				(sizeof(uint64_t) * prio));
2535 			count++;
2536 		}
2537 	}
2538 
2539 	for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
2540 		for (prio = 0; prio < 8; prio++) {
2541 			xstats[count].value =
2542 				*(uint64_t *)(((char *)hw_stats) +
2543 				rte_i40e_txq_prio_strings[i].offset +
2544 				(sizeof(uint64_t) * prio));
2545 			count++;
2546 		}
2547 	}
2548 
2549 	return count;
2550 }
2551 
2552 static int
2553 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
2554 				 __rte_unused uint16_t queue_id,
2555 				 __rte_unused uint8_t stat_idx,
2556 				 __rte_unused uint8_t is_rx)
2557 {
2558 	PMD_INIT_FUNC_TRACE();
2559 
2560 	return -ENOSYS;
2561 }
2562 
2563 static void
2564 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2565 {
2566 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2567 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2568 	struct i40e_vsi *vsi = pf->main_vsi;
2569 
2570 	dev_info->max_rx_queues = vsi->nb_qps;
2571 	dev_info->max_tx_queues = vsi->nb_qps;
2572 	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2573 	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2574 	dev_info->max_mac_addrs = vsi->max_macaddrs;
2575 	dev_info->max_vfs = dev->pci_dev->max_vfs;
2576 	dev_info->rx_offload_capa =
2577 		DEV_RX_OFFLOAD_VLAN_STRIP |
2578 		DEV_RX_OFFLOAD_QINQ_STRIP |
2579 		DEV_RX_OFFLOAD_IPV4_CKSUM |
2580 		DEV_RX_OFFLOAD_UDP_CKSUM |
2581 		DEV_RX_OFFLOAD_TCP_CKSUM;
2582 	dev_info->tx_offload_capa =
2583 		DEV_TX_OFFLOAD_VLAN_INSERT |
2584 		DEV_TX_OFFLOAD_QINQ_INSERT |
2585 		DEV_TX_OFFLOAD_IPV4_CKSUM |
2586 		DEV_TX_OFFLOAD_UDP_CKSUM |
2587 		DEV_TX_OFFLOAD_TCP_CKSUM |
2588 		DEV_TX_OFFLOAD_SCTP_CKSUM |
2589 		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2590 		DEV_TX_OFFLOAD_TCP_TSO |
2591 		DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
2592 		DEV_TX_OFFLOAD_GRE_TNL_TSO |
2593 		DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2594 		DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
2595 	dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
2596 						sizeof(uint32_t);
2597 	dev_info->reta_size = pf->hash_lut_size;
2598 	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2599 
2600 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2601 		.rx_thresh = {
2602 			.pthresh = I40E_DEFAULT_RX_PTHRESH,
2603 			.hthresh = I40E_DEFAULT_RX_HTHRESH,
2604 			.wthresh = I40E_DEFAULT_RX_WTHRESH,
2605 		},
2606 		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2607 		.rx_drop_en = 0,
2608 	};
2609 
2610 	dev_info->default_txconf = (struct rte_eth_txconf) {
2611 		.tx_thresh = {
2612 			.pthresh = I40E_DEFAULT_TX_PTHRESH,
2613 			.hthresh = I40E_DEFAULT_TX_HTHRESH,
2614 			.wthresh = I40E_DEFAULT_TX_WTHRESH,
2615 		},
2616 		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2617 		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2618 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2619 				ETH_TXQ_FLAGS_NOOFFLOADS,
2620 	};
2621 
2622 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2623 		.nb_max = I40E_MAX_RING_DESC,
2624 		.nb_min = I40E_MIN_RING_DESC,
2625 		.nb_align = I40E_ALIGN_RING_DESC,
2626 	};
2627 
2628 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2629 		.nb_max = I40E_MAX_RING_DESC,
2630 		.nb_min = I40E_MIN_RING_DESC,
2631 		.nb_align = I40E_ALIGN_RING_DESC,
2632 	};
2633 
2634 	if (pf->flags & I40E_FLAG_VMDQ) {
2635 		dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
2636 		dev_info->vmdq_queue_base = dev_info->max_rx_queues;
2637 		dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
2638 						pf->max_nb_vmdq_vsi;
2639 		dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
2640 		dev_info->max_rx_queues += dev_info->vmdq_queue_num;
2641 		dev_info->max_tx_queues += dev_info->vmdq_queue_num;
2642 	}
2643 
2644 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
2645 		/* For XL710 */
2646 		dev_info->speed_capa = ETH_LINK_SPEED_40G;
2647 	else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
2648 		/* For XXV710 */
2649 		dev_info->speed_capa = ETH_LINK_SPEED_25G;
2650 	else
2651 		/* For X710 */
2652 		dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2653 }
2654 
2655 static int
2656 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2657 {
2658 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2659 	struct i40e_vsi *vsi = pf->main_vsi;
2660 	PMD_INIT_FUNC_TRACE();
2661 
2662 	if (on)
2663 		return i40e_vsi_add_vlan(vsi, vlan_id);
2664 	else
2665 		return i40e_vsi_delete_vlan(vsi, vlan_id);
2666 }
2667 
2668 static int
2669 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
2670 		   enum rte_vlan_type vlan_type,
2671 		   uint16_t tpid)
2672 {
2673 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2674 	uint64_t reg_r = 0, reg_w = 0;
2675 	uint16_t reg_id = 0;
2676 	int ret = 0;
2677 	int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
2678 
2679 	switch (vlan_type) {
2680 	case ETH_VLAN_TYPE_OUTER:
2681 		if (qinq)
2682 			reg_id = 2;
2683 		else
2684 			reg_id = 3;
2685 		break;
2686 	case ETH_VLAN_TYPE_INNER:
2687 		if (qinq)
2688 			reg_id = 3;
2689 		else {
2690 			ret = -EINVAL;
2691 			PMD_DRV_LOG(ERR,
2692 				"Unsupported vlan type in single vlan.\n");
2693 			return ret;
2694 		}
2695 		break;
2696 	default:
2697 		ret = -EINVAL;
2698 		PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2699 		return ret;
2700 	}
2701 	ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2702 					  &reg_r, NULL);
2703 	if (ret != I40E_SUCCESS) {
2704 		PMD_DRV_LOG(ERR, "Fail to debug read from "
2705 			    "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2706 		ret = -EIO;
2707 		return ret;
2708 	}
2709 	PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
2710 		    "0x%08"PRIx64"", reg_id, reg_r);
2711 
2712 	reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
2713 	reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
2714 	if (reg_r == reg_w) {
2715 		ret = 0;
2716 		PMD_DRV_LOG(DEBUG, "No need to write");
2717 		return ret;
2718 	}
2719 
2720 	ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2721 					   reg_w, NULL);
2722 	if (ret != I40E_SUCCESS) {
2723 		ret = -EIO;
2724 		PMD_DRV_LOG(ERR, "Fail to debug write to "
2725 			    "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
2726 		return ret;
2727 	}
2728 	PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2729 		    "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2730 
2731 	return ret;
2732 }
2733 
2734 static void
2735 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2736 {
2737 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2738 	struct i40e_vsi *vsi = pf->main_vsi;
2739 
2740 	if (mask & ETH_VLAN_FILTER_MASK) {
2741 		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
2742 			i40e_vsi_config_vlan_filter(vsi, TRUE);
2743 		else
2744 			i40e_vsi_config_vlan_filter(vsi, FALSE);
2745 	}
2746 
2747 	if (mask & ETH_VLAN_STRIP_MASK) {
2748 		/* Enable or disable VLAN stripping */
2749 		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
2750 			i40e_vsi_config_vlan_stripping(vsi, TRUE);
2751 		else
2752 			i40e_vsi_config_vlan_stripping(vsi, FALSE);
2753 	}
2754 
2755 	if (mask & ETH_VLAN_EXTEND_MASK) {
2756 		if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
2757 			i40e_vsi_config_double_vlan(vsi, TRUE);
2758 			/* Set global registers with default ether type value */
2759 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
2760 					   ETHER_TYPE_VLAN);
2761 			i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
2762 					   ETHER_TYPE_VLAN);
2763 		}
2764 		else
2765 			i40e_vsi_config_double_vlan(vsi, FALSE);
2766 	}
2767 }
2768 
2769 static void
2770 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
2771 			  __rte_unused uint16_t queue,
2772 			  __rte_unused int on)
2773 {
2774 	PMD_INIT_FUNC_TRACE();
2775 }
2776 
2777 static int
2778 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2779 {
2780 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2781 	struct i40e_vsi *vsi = pf->main_vsi;
2782 	struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2783 	struct i40e_vsi_vlan_pvid_info info;
2784 
2785 	memset(&info, 0, sizeof(info));
2786 	info.on = on;
2787 	if (info.on)
2788 		info.config.pvid = pvid;
2789 	else {
2790 		info.config.reject.tagged =
2791 				data->dev_conf.txmode.hw_vlan_reject_tagged;
2792 		info.config.reject.untagged =
2793 				data->dev_conf.txmode.hw_vlan_reject_untagged;
2794 	}
2795 
2796 	return i40e_vsi_vlan_pvid_set(vsi, &info);
2797 }
2798 
2799 static int
2800 i40e_dev_led_on(struct rte_eth_dev *dev)
2801 {
2802 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2803 	uint32_t mode = i40e_led_get(hw);
2804 
2805 	if (mode == 0)
2806 		i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
2807 
2808 	return 0;
2809 }
2810 
2811 static int
2812 i40e_dev_led_off(struct rte_eth_dev *dev)
2813 {
2814 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2815 	uint32_t mode = i40e_led_get(hw);
2816 
2817 	if (mode != 0)
2818 		i40e_led_set(hw, 0, false);
2819 
2820 	return 0;
2821 }
2822 
2823 static int
2824 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2825 {
2826 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2827 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2828 
2829 	fc_conf->pause_time = pf->fc_conf.pause_time;
2830 	fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
2831 	fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
2832 
2833 	 /* Return current mode according to actual setting*/
2834 	switch (hw->fc.current_mode) {
2835 	case I40E_FC_FULL:
2836 		fc_conf->mode = RTE_FC_FULL;
2837 		break;
2838 	case I40E_FC_TX_PAUSE:
2839 		fc_conf->mode = RTE_FC_TX_PAUSE;
2840 		break;
2841 	case I40E_FC_RX_PAUSE:
2842 		fc_conf->mode = RTE_FC_RX_PAUSE;
2843 		break;
2844 	case I40E_FC_NONE:
2845 	default:
2846 		fc_conf->mode = RTE_FC_NONE;
2847 	};
2848 
2849 	return 0;
2850 }
2851 
2852 static int
2853 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2854 {
2855 	uint32_t mflcn_reg, fctrl_reg, reg;
2856 	uint32_t max_high_water;
2857 	uint8_t i, aq_failure;
2858 	int err;
2859 	struct i40e_hw *hw;
2860 	struct i40e_pf *pf;
2861 	enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
2862 		[RTE_FC_NONE] = I40E_FC_NONE,
2863 		[RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
2864 		[RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
2865 		[RTE_FC_FULL] = I40E_FC_FULL
2866 	};
2867 
2868 	/* high_water field in the rte_eth_fc_conf using the kilobytes unit */
2869 
2870 	max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
2871 	if ((fc_conf->high_water > max_high_water) ||
2872 			(fc_conf->high_water < fc_conf->low_water)) {
2873 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
2874 			"High_water must <= %d.", max_high_water);
2875 		return -EINVAL;
2876 	}
2877 
2878 	hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2879 	pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2880 	hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
2881 
2882 	pf->fc_conf.pause_time = fc_conf->pause_time;
2883 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
2884 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
2885 
2886 	PMD_INIT_FUNC_TRACE();
2887 
2888 	/* All the link flow control related enable/disable register
2889 	 * configuration is handle by the F/W
2890 	 */
2891 	err = i40e_set_fc(hw, &aq_failure, true);
2892 	if (err < 0)
2893 		return -ENOSYS;
2894 
2895 	if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
2896 		/* Configure flow control refresh threshold,
2897 		 * the value for stat_tx_pause_refresh_timer[8]
2898 		 * is used for global pause operation.
2899 		 */
2900 
2901 		I40E_WRITE_REG(hw,
2902 			       I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
2903 			       pf->fc_conf.pause_time);
2904 
2905 		/* configure the timer value included in transmitted pause
2906 		 * frame,
2907 		 * the value for stat_tx_pause_quanta[8] is used for global
2908 		 * pause operation
2909 		 */
2910 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
2911 			       pf->fc_conf.pause_time);
2912 
2913 		fctrl_reg = I40E_READ_REG(hw,
2914 					  I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
2915 
2916 		if (fc_conf->mac_ctrl_frame_fwd != 0)
2917 			fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
2918 		else
2919 			fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
2920 
2921 		I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
2922 			       fctrl_reg);
2923 	} else {
2924 		/* Configure pause time (2 TCs per register) */
2925 		reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
2926 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
2927 			I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
2928 
2929 		/* Configure flow control refresh threshold value */
2930 		I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
2931 			       pf->fc_conf.pause_time / 2);
2932 
2933 		mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2934 
2935 		/* set or clear MFLCN.PMCF & MFLCN.DPF bits
2936 		 *depending on configuration
2937 		 */
2938 		if (fc_conf->mac_ctrl_frame_fwd != 0) {
2939 			mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
2940 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
2941 		} else {
2942 			mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
2943 			mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
2944 		}
2945 
2946 		I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
2947 	}
2948 
2949 	/* config the water marker both based on the packets and bytes */
2950 	I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
2951 		       (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2952 		       << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2953 	I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
2954 		       (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2955 		       << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
2956 	I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
2957 		       pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
2958 		       << I40E_KILOSHIFT);
2959 	I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
2960 		       pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
2961 		       << I40E_KILOSHIFT);
2962 
2963 	I40E_WRITE_FLUSH(hw);
2964 
2965 	return 0;
2966 }
2967 
2968 static int
2969 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
2970 			    __rte_unused struct rte_eth_pfc_conf *pfc_conf)
2971 {
2972 	PMD_INIT_FUNC_TRACE();
2973 
2974 	return -ENOSYS;
2975 }
2976 
2977 /* Add a MAC address, and update filters */
2978 static void
2979 i40e_macaddr_add(struct rte_eth_dev *dev,
2980 		 struct ether_addr *mac_addr,
2981 		 __rte_unused uint32_t index,
2982 		 uint32_t pool)
2983 {
2984 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2985 	struct i40e_mac_filter_info mac_filter;
2986 	struct i40e_vsi *vsi;
2987 	int ret;
2988 
2989 	/* If VMDQ not enabled or configured, return */
2990 	if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
2991 			  !pf->nb_cfg_vmdq_vsi)) {
2992 		PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
2993 			pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
2994 			pool);
2995 		return;
2996 	}
2997 
2998 	if (pool > pf->nb_cfg_vmdq_vsi) {
2999 		PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3000 				pool, pf->nb_cfg_vmdq_vsi);
3001 		return;
3002 	}
3003 
3004 	(void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3005 	if (dev->data->dev_conf.rxmode.hw_vlan_filter)
3006 		mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3007 	else
3008 		mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3009 
3010 	if (pool == 0)
3011 		vsi = pf->main_vsi;
3012 	else
3013 		vsi = pf->vmdq[pool - 1].vsi;
3014 
3015 	ret = i40e_vsi_add_mac(vsi, &mac_filter);
3016 	if (ret != I40E_SUCCESS) {
3017 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3018 		return;
3019 	}
3020 }
3021 
3022 /* Remove a MAC address, and update filters */
3023 static void
3024 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3025 {
3026 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3027 	struct i40e_vsi *vsi;
3028 	struct rte_eth_dev_data *data = dev->data;
3029 	struct ether_addr *macaddr;
3030 	int ret;
3031 	uint32_t i;
3032 	uint64_t pool_sel;
3033 
3034 	macaddr = &(data->mac_addrs[index]);
3035 
3036 	pool_sel = dev->data->mac_pool_sel[index];
3037 
3038 	for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3039 		if (pool_sel & (1ULL << i)) {
3040 			if (i == 0)
3041 				vsi = pf->main_vsi;
3042 			else {
3043 				/* No VMDQ pool enabled or configured */
3044 				if (!(pf->flags & I40E_FLAG_VMDQ) ||
3045 					(i > pf->nb_cfg_vmdq_vsi)) {
3046 					PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
3047 							"/configured");
3048 					return;
3049 				}
3050 				vsi = pf->vmdq[i - 1].vsi;
3051 			}
3052 			ret = i40e_vsi_delete_mac(vsi, macaddr);
3053 
3054 			if (ret) {
3055 				PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3056 				return;
3057 			}
3058 		}
3059 	}
3060 }
3061 
3062 /* Set perfect match or hash match of MAC and VLAN for a VF */
3063 static int
3064 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3065 		 struct rte_eth_mac_filter *filter,
3066 		 bool add)
3067 {
3068 	struct i40e_hw *hw;
3069 	struct i40e_mac_filter_info mac_filter;
3070 	struct ether_addr old_mac;
3071 	struct ether_addr *new_mac;
3072 	struct i40e_pf_vf *vf = NULL;
3073 	uint16_t vf_id;
3074 	int ret;
3075 
3076 	if (pf == NULL) {
3077 		PMD_DRV_LOG(ERR, "Invalid PF argument.");
3078 		return -EINVAL;
3079 	}
3080 	hw = I40E_PF_TO_HW(pf);
3081 
3082 	if (filter == NULL) {
3083 		PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3084 		return -EINVAL;
3085 	}
3086 
3087 	new_mac = &filter->mac_addr;
3088 
3089 	if (is_zero_ether_addr(new_mac)) {
3090 		PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3091 		return -EINVAL;
3092 	}
3093 
3094 	vf_id = filter->dst_id;
3095 
3096 	if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3097 		PMD_DRV_LOG(ERR, "Invalid argument.");
3098 		return -EINVAL;
3099 	}
3100 	vf = &pf->vfs[vf_id];
3101 
3102 	if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3103 		PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3104 		return -EINVAL;
3105 	}
3106 
3107 	if (add) {
3108 		(void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3109 		(void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3110 				ETHER_ADDR_LEN);
3111 		(void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3112 				 ETHER_ADDR_LEN);
3113 
3114 		mac_filter.filter_type = filter->filter_type;
3115 		ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3116 		if (ret != I40E_SUCCESS) {
3117 			PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3118 			return -1;
3119 		}
3120 		ether_addr_copy(new_mac, &pf->dev_addr);
3121 	} else {
3122 		(void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
3123 				ETHER_ADDR_LEN);
3124 		ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
3125 		if (ret != I40E_SUCCESS) {
3126 			PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
3127 			return -1;
3128 		}
3129 
3130 		/* Clear device address as it has been removed */
3131 		if (is_same_ether_addr(&(pf->dev_addr), new_mac))
3132 			memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
3133 	}
3134 
3135 	return 0;
3136 }
3137 
3138 /* MAC filter handle */
3139 static int
3140 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
3141 		void *arg)
3142 {
3143 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3144 	struct rte_eth_mac_filter *filter;
3145 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3146 	int ret = I40E_NOT_SUPPORTED;
3147 
3148 	filter = (struct rte_eth_mac_filter *)(arg);
3149 
3150 	switch (filter_op) {
3151 	case RTE_ETH_FILTER_NOP:
3152 		ret = I40E_SUCCESS;
3153 		break;
3154 	case RTE_ETH_FILTER_ADD:
3155 		i40e_pf_disable_irq0(hw);
3156 		if (filter->is_vf)
3157 			ret = i40e_vf_mac_filter_set(pf, filter, 1);
3158 		i40e_pf_enable_irq0(hw);
3159 		break;
3160 	case RTE_ETH_FILTER_DELETE:
3161 		i40e_pf_disable_irq0(hw);
3162 		if (filter->is_vf)
3163 			ret = i40e_vf_mac_filter_set(pf, filter, 0);
3164 		i40e_pf_enable_irq0(hw);
3165 		break;
3166 	default:
3167 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
3168 		ret = I40E_ERR_PARAM;
3169 		break;
3170 	}
3171 
3172 	return ret;
3173 }
3174 
3175 static int
3176 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3177 {
3178 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3179 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3180 	int ret;
3181 
3182 	if (!lut)
3183 		return -EINVAL;
3184 
3185 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3186 		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
3187 					  lut, lut_size);
3188 		if (ret) {
3189 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3190 			return ret;
3191 		}
3192 	} else {
3193 		uint32_t *lut_dw = (uint32_t *)lut;
3194 		uint16_t i, lut_size_dw = lut_size / 4;
3195 
3196 		for (i = 0; i < lut_size_dw; i++)
3197 			lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i));
3198 	}
3199 
3200 	return 0;
3201 }
3202 
3203 static int
3204 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3205 {
3206 	struct i40e_pf *pf;
3207 	struct i40e_hw *hw;
3208 	int ret;
3209 
3210 	if (!vsi || !lut)
3211 		return -EINVAL;
3212 
3213 	pf = I40E_VSI_TO_PF(vsi);
3214 	hw = I40E_VSI_TO_HW(vsi);
3215 
3216 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
3217 		ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
3218 					  lut, lut_size);
3219 		if (ret) {
3220 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3221 			return ret;
3222 		}
3223 	} else {
3224 		uint32_t *lut_dw = (uint32_t *)lut;
3225 		uint16_t i, lut_size_dw = lut_size / 4;
3226 
3227 		for (i = 0; i < lut_size_dw; i++)
3228 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
3229 		I40E_WRITE_FLUSH(hw);
3230 	}
3231 
3232 	return 0;
3233 }
3234 
3235 static int
3236 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
3237 			 struct rte_eth_rss_reta_entry64 *reta_conf,
3238 			 uint16_t reta_size)
3239 {
3240 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3241 	uint16_t i, lut_size = pf->hash_lut_size;
3242 	uint16_t idx, shift;
3243 	uint8_t *lut;
3244 	int ret;
3245 
3246 	if (reta_size != lut_size ||
3247 		reta_size > ETH_RSS_RETA_SIZE_512) {
3248 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3249 			"(%d) doesn't match the number hardware can supported "
3250 					"(%d)\n", reta_size, lut_size);
3251 		return -EINVAL;
3252 	}
3253 
3254 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3255 	if (!lut) {
3256 		PMD_DRV_LOG(ERR, "No memory can be allocated");
3257 		return -ENOMEM;
3258 	}
3259 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3260 	if (ret)
3261 		goto out;
3262 	for (i = 0; i < reta_size; i++) {
3263 		idx = i / RTE_RETA_GROUP_SIZE;
3264 		shift = i % RTE_RETA_GROUP_SIZE;
3265 		if (reta_conf[idx].mask & (1ULL << shift))
3266 			lut[i] = reta_conf[idx].reta[shift];
3267 	}
3268 	ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
3269 
3270 out:
3271 	rte_free(lut);
3272 
3273 	return ret;
3274 }
3275 
3276 static int
3277 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
3278 			struct rte_eth_rss_reta_entry64 *reta_conf,
3279 			uint16_t reta_size)
3280 {
3281 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3282 	uint16_t i, lut_size = pf->hash_lut_size;
3283 	uint16_t idx, shift;
3284 	uint8_t *lut;
3285 	int ret;
3286 
3287 	if (reta_size != lut_size ||
3288 		reta_size > ETH_RSS_RETA_SIZE_512) {
3289 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3290 			"(%d) doesn't match the number hardware can supported "
3291 					"(%d)\n", reta_size, lut_size);
3292 		return -EINVAL;
3293 	}
3294 
3295 	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
3296 	if (!lut) {
3297 		PMD_DRV_LOG(ERR, "No memory can be allocated");
3298 		return -ENOMEM;
3299 	}
3300 
3301 	ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
3302 	if (ret)
3303 		goto out;
3304 	for (i = 0; i < reta_size; i++) {
3305 		idx = i / RTE_RETA_GROUP_SIZE;
3306 		shift = i % RTE_RETA_GROUP_SIZE;
3307 		if (reta_conf[idx].mask & (1ULL << shift))
3308 			reta_conf[idx].reta[shift] = lut[i];
3309 	}
3310 
3311 out:
3312 	rte_free(lut);
3313 
3314 	return ret;
3315 }
3316 
3317 /**
3318  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
3319  * @hw:   pointer to the HW structure
3320  * @mem:  pointer to mem struct to fill out
3321  * @size: size of memory requested
3322  * @alignment: what to align the allocation to
3323  **/
3324 enum i40e_status_code
3325 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3326 			struct i40e_dma_mem *mem,
3327 			u64 size,
3328 			u32 alignment)
3329 {
3330 	const struct rte_memzone *mz = NULL;
3331 	char z_name[RTE_MEMZONE_NAMESIZE];
3332 
3333 	if (!mem)
3334 		return I40E_ERR_PARAM;
3335 
3336 	snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
3337 	mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
3338 					 alignment, RTE_PGSIZE_2M);
3339 	if (!mz)
3340 		return I40E_ERR_NO_MEMORY;
3341 
3342 	mem->size = size;
3343 	mem->va = mz->addr;
3344 	mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
3345 	mem->zone = (const void *)mz;
3346 	PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
3347 		    "%"PRIu64, mz->name, mem->pa);
3348 
3349 	return I40E_SUCCESS;
3350 }
3351 
3352 /**
3353  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
3354  * @hw:   pointer to the HW structure
3355  * @mem:  ptr to mem struct to free
3356  **/
3357 enum i40e_status_code
3358 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3359 		    struct i40e_dma_mem *mem)
3360 {
3361 	if (!mem)
3362 		return I40E_ERR_PARAM;
3363 
3364 	PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
3365 		    "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
3366 		    mem->pa);
3367 	rte_memzone_free((const struct rte_memzone *)mem->zone);
3368 	mem->zone = NULL;
3369 	mem->va = NULL;
3370 	mem->pa = (u64)0;
3371 
3372 	return I40E_SUCCESS;
3373 }
3374 
3375 /**
3376  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
3377  * @hw:   pointer to the HW structure
3378  * @mem:  pointer to mem struct to fill out
3379  * @size: size of memory requested
3380  **/
3381 enum i40e_status_code
3382 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3383 			 struct i40e_virt_mem *mem,
3384 			 u32 size)
3385 {
3386 	if (!mem)
3387 		return I40E_ERR_PARAM;
3388 
3389 	mem->size = size;
3390 	mem->va = rte_zmalloc("i40e", size, 0);
3391 
3392 	if (mem->va)
3393 		return I40E_SUCCESS;
3394 	else
3395 		return I40E_ERR_NO_MEMORY;
3396 }
3397 
3398 /**
3399  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
3400  * @hw:   pointer to the HW structure
3401  * @mem:  pointer to mem struct to free
3402  **/
3403 enum i40e_status_code
3404 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
3405 		     struct i40e_virt_mem *mem)
3406 {
3407 	if (!mem)
3408 		return I40E_ERR_PARAM;
3409 
3410 	rte_free(mem->va);
3411 	mem->va = NULL;
3412 
3413 	return I40E_SUCCESS;
3414 }
3415 
3416 void
3417 i40e_init_spinlock_d(struct i40e_spinlock *sp)
3418 {
3419 	rte_spinlock_init(&sp->spinlock);
3420 }
3421 
3422 void
3423 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
3424 {
3425 	rte_spinlock_lock(&sp->spinlock);
3426 }
3427 
3428 void
3429 i40e_release_spinlock_d(struct i40e_spinlock *sp)
3430 {
3431 	rte_spinlock_unlock(&sp->spinlock);
3432 }
3433 
3434 void
3435 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
3436 {
3437 	return;
3438 }
3439 
3440 /**
3441  * Get the hardware capabilities, which will be parsed
3442  * and saved into struct i40e_hw.
3443  */
3444 static int
3445 i40e_get_cap(struct i40e_hw *hw)
3446 {
3447 	struct i40e_aqc_list_capabilities_element_resp *buf;
3448 	uint16_t len, size = 0;
3449 	int ret;
3450 
3451 	/* Calculate a huge enough buff for saving response data temporarily */
3452 	len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
3453 						I40E_MAX_CAP_ELE_NUM;
3454 	buf = rte_zmalloc("i40e", len, 0);
3455 	if (!buf) {
3456 		PMD_DRV_LOG(ERR, "Failed to allocate memory");
3457 		return I40E_ERR_NO_MEMORY;
3458 	}
3459 
3460 	/* Get, parse the capabilities and save it to hw */
3461 	ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
3462 			i40e_aqc_opc_list_func_capabilities, NULL);
3463 	if (ret != I40E_SUCCESS)
3464 		PMD_DRV_LOG(ERR, "Failed to discover capabilities");
3465 
3466 	/* Free the temporary buffer after being used */
3467 	rte_free(buf);
3468 
3469 	return ret;
3470 }
3471 
3472 static int
3473 i40e_pf_parameter_init(struct rte_eth_dev *dev)
3474 {
3475 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3476 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3477 	uint16_t qp_count = 0, vsi_count = 0;
3478 
3479 	if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
3480 		PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
3481 		return -EINVAL;
3482 	}
3483 	/* Add the parameter init for LFC */
3484 	pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
3485 	pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
3486 	pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
3487 
3488 	pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
3489 	pf->max_num_vsi = hw->func_caps.num_vsis;
3490 	pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
3491 	pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
3492 	pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3493 
3494 	/* FDir queue/VSI allocation */
3495 	pf->fdir_qp_offset = 0;
3496 	if (hw->func_caps.fd) {
3497 		pf->flags |= I40E_FLAG_FDIR;
3498 		pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
3499 	} else {
3500 		pf->fdir_nb_qps = 0;
3501 	}
3502 	qp_count += pf->fdir_nb_qps;
3503 	vsi_count += 1;
3504 
3505 	/* LAN queue/VSI allocation */
3506 	pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
3507 	if (!hw->func_caps.rss) {
3508 		pf->lan_nb_qps = 1;
3509 	} else {
3510 		pf->flags |= I40E_FLAG_RSS;
3511 		if (hw->mac.type == I40E_MAC_X722)
3512 			pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
3513 		pf->lan_nb_qps = pf->lan_nb_qp_max;
3514 	}
3515 	qp_count += pf->lan_nb_qps;
3516 	vsi_count += 1;
3517 
3518 	/* VF queue/VSI allocation */
3519 	pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
3520 	if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
3521 		pf->flags |= I40E_FLAG_SRIOV;
3522 		pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
3523 		pf->vf_num = dev->pci_dev->max_vfs;
3524 		PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
3525 			    "in total %u queues", pf->vf_num, pf->vf_nb_qps,
3526 			    pf->vf_nb_qps * pf->vf_num);
3527 	} else {
3528 		pf->vf_nb_qps = 0;
3529 		pf->vf_num = 0;
3530 	}
3531 	qp_count += pf->vf_nb_qps * pf->vf_num;
3532 	vsi_count += pf->vf_num;
3533 
3534 	/* VMDq queue/VSI allocation */
3535 	pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
3536 	pf->vmdq_nb_qps = 0;
3537 	pf->max_nb_vmdq_vsi = 0;
3538 	if (hw->func_caps.vmdq) {
3539 		if (qp_count < hw->func_caps.num_tx_qp &&
3540 			vsi_count < hw->func_caps.num_vsis) {
3541 			pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
3542 				qp_count) / pf->vmdq_nb_qp_max;
3543 
3544 			/* Limit the maximum number of VMDq vsi to the maximum
3545 			 * ethdev can support
3546 			 */
3547 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3548 				hw->func_caps.num_vsis - vsi_count);
3549 			pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
3550 				ETH_64_POOLS);
3551 			if (pf->max_nb_vmdq_vsi) {
3552 				pf->flags |= I40E_FLAG_VMDQ;
3553 				pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
3554 				PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
3555 					    "per VMDQ VSI, in total %u queues",
3556 					    pf->max_nb_vmdq_vsi,
3557 					    pf->vmdq_nb_qps, pf->vmdq_nb_qps *
3558 					    pf->max_nb_vmdq_vsi);
3559 			} else {
3560 				PMD_DRV_LOG(INFO, "No enough queues left for "
3561 					    "VMDq");
3562 			}
3563 		} else {
3564 			PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
3565 		}
3566 	}
3567 	qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
3568 	vsi_count += pf->max_nb_vmdq_vsi;
3569 
3570 	if (hw->func_caps.dcb)
3571 		pf->flags |= I40E_FLAG_DCB;
3572 
3573 	if (qp_count > hw->func_caps.num_tx_qp) {
3574 		PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
3575 			    "the hardware maximum %u", qp_count,
3576 			    hw->func_caps.num_tx_qp);
3577 		return -EINVAL;
3578 	}
3579 	if (vsi_count > hw->func_caps.num_vsis) {
3580 		PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
3581 			    "the hardware maximum %u", vsi_count,
3582 			    hw->func_caps.num_vsis);
3583 		return -EINVAL;
3584 	}
3585 
3586 	return 0;
3587 }
3588 
3589 static int
3590 i40e_pf_get_switch_config(struct i40e_pf *pf)
3591 {
3592 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3593 	struct i40e_aqc_get_switch_config_resp *switch_config;
3594 	struct i40e_aqc_switch_config_element_resp *element;
3595 	uint16_t start_seid = 0, num_reported;
3596 	int ret;
3597 
3598 	switch_config = (struct i40e_aqc_get_switch_config_resp *)\
3599 			rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
3600 	if (!switch_config) {
3601 		PMD_DRV_LOG(ERR, "Failed to allocated memory");
3602 		return -ENOMEM;
3603 	}
3604 
3605 	/* Get the switch configurations */
3606 	ret = i40e_aq_get_switch_config(hw, switch_config,
3607 		I40E_AQ_LARGE_BUF, &start_seid, NULL);
3608 	if (ret != I40E_SUCCESS) {
3609 		PMD_DRV_LOG(ERR, "Failed to get switch configurations");
3610 		goto fail;
3611 	}
3612 	num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
3613 	if (num_reported != 1) { /* The number should be 1 */
3614 		PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
3615 		goto fail;
3616 	}
3617 
3618 	/* Parse the switch configuration elements */
3619 	element = &(switch_config->element[0]);
3620 	if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
3621 		pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
3622 		pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
3623 	} else
3624 		PMD_DRV_LOG(INFO, "Unknown element type");
3625 
3626 fail:
3627 	rte_free(switch_config);
3628 
3629 	return ret;
3630 }
3631 
3632 static int
3633 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
3634 			uint32_t num)
3635 {
3636 	struct pool_entry *entry;
3637 
3638 	if (pool == NULL || num == 0)
3639 		return -EINVAL;
3640 
3641 	entry = rte_zmalloc("i40e", sizeof(*entry), 0);
3642 	if (entry == NULL) {
3643 		PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
3644 		return -ENOMEM;
3645 	}
3646 
3647 	/* queue heap initialize */
3648 	pool->num_free = num;
3649 	pool->num_alloc = 0;
3650 	pool->base = base;
3651 	LIST_INIT(&pool->alloc_list);
3652 	LIST_INIT(&pool->free_list);
3653 
3654 	/* Initialize element  */
3655 	entry->base = 0;
3656 	entry->len = num;
3657 
3658 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
3659 	return 0;
3660 }
3661 
3662 static void
3663 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
3664 {
3665 	struct pool_entry *entry, *next_entry;
3666 
3667 	if (pool == NULL)
3668 		return;
3669 
3670 	for (entry = LIST_FIRST(&pool->alloc_list);
3671 			entry && (next_entry = LIST_NEXT(entry, next), 1);
3672 			entry = next_entry) {
3673 		LIST_REMOVE(entry, next);
3674 		rte_free(entry);
3675 	}
3676 
3677 	for (entry = LIST_FIRST(&pool->free_list);
3678 			entry && (next_entry = LIST_NEXT(entry, next), 1);
3679 			entry = next_entry) {
3680 		LIST_REMOVE(entry, next);
3681 		rte_free(entry);
3682 	}
3683 
3684 	pool->num_free = 0;
3685 	pool->num_alloc = 0;
3686 	pool->base = 0;
3687 	LIST_INIT(&pool->alloc_list);
3688 	LIST_INIT(&pool->free_list);
3689 }
3690 
3691 static int
3692 i40e_res_pool_free(struct i40e_res_pool_info *pool,
3693 		       uint32_t base)
3694 {
3695 	struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
3696 	uint32_t pool_offset;
3697 	int insert;
3698 
3699 	if (pool == NULL) {
3700 		PMD_DRV_LOG(ERR, "Invalid parameter");
3701 		return -EINVAL;
3702 	}
3703 
3704 	pool_offset = base - pool->base;
3705 	/* Lookup in alloc list */
3706 	LIST_FOREACH(entry, &pool->alloc_list, next) {
3707 		if (entry->base == pool_offset) {
3708 			valid_entry = entry;
3709 			LIST_REMOVE(entry, next);
3710 			break;
3711 		}
3712 	}
3713 
3714 	/* Not find, return */
3715 	if (valid_entry == NULL) {
3716 		PMD_DRV_LOG(ERR, "Failed to find entry");
3717 		return -EINVAL;
3718 	}
3719 
3720 	/**
3721 	 * Found it, move it to free list  and try to merge.
3722 	 * In order to make merge easier, always sort it by qbase.
3723 	 * Find adjacent prev and last entries.
3724 	 */
3725 	prev = next = NULL;
3726 	LIST_FOREACH(entry, &pool->free_list, next) {
3727 		if (entry->base > valid_entry->base) {
3728 			next = entry;
3729 			break;
3730 		}
3731 		prev = entry;
3732 	}
3733 
3734 	insert = 0;
3735 	/* Try to merge with next one*/
3736 	if (next != NULL) {
3737 		/* Merge with next one */
3738 		if (valid_entry->base + valid_entry->len == next->base) {
3739 			next->base = valid_entry->base;
3740 			next->len += valid_entry->len;
3741 			rte_free(valid_entry);
3742 			valid_entry = next;
3743 			insert = 1;
3744 		}
3745 	}
3746 
3747 	if (prev != NULL) {
3748 		/* Merge with previous one */
3749 		if (prev->base + prev->len == valid_entry->base) {
3750 			prev->len += valid_entry->len;
3751 			/* If it merge with next one, remove next node */
3752 			if (insert == 1) {
3753 				LIST_REMOVE(valid_entry, next);
3754 				rte_free(valid_entry);
3755 			} else {
3756 				rte_free(valid_entry);
3757 				insert = 1;
3758 			}
3759 		}
3760 	}
3761 
3762 	/* Not find any entry to merge, insert */
3763 	if (insert == 0) {
3764 		if (prev != NULL)
3765 			LIST_INSERT_AFTER(prev, valid_entry, next);
3766 		else if (next != NULL)
3767 			LIST_INSERT_BEFORE(next, valid_entry, next);
3768 		else /* It's empty list, insert to head */
3769 			LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
3770 	}
3771 
3772 	pool->num_free += valid_entry->len;
3773 	pool->num_alloc -= valid_entry->len;
3774 
3775 	return 0;
3776 }
3777 
3778 static int
3779 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
3780 		       uint16_t num)
3781 {
3782 	struct pool_entry *entry, *valid_entry;
3783 
3784 	if (pool == NULL || num == 0) {
3785 		PMD_DRV_LOG(ERR, "Invalid parameter");
3786 		return -EINVAL;
3787 	}
3788 
3789 	if (pool->num_free < num) {
3790 		PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
3791 			    num, pool->num_free);
3792 		return -ENOMEM;
3793 	}
3794 
3795 	valid_entry = NULL;
3796 	/* Lookup  in free list and find most fit one */
3797 	LIST_FOREACH(entry, &pool->free_list, next) {
3798 		if (entry->len >= num) {
3799 			/* Find best one */
3800 			if (entry->len == num) {
3801 				valid_entry = entry;
3802 				break;
3803 			}
3804 			if (valid_entry == NULL || valid_entry->len > entry->len)
3805 				valid_entry = entry;
3806 		}
3807 	}
3808 
3809 	/* Not find one to satisfy the request, return */
3810 	if (valid_entry == NULL) {
3811 		PMD_DRV_LOG(ERR, "No valid entry found");
3812 		return -ENOMEM;
3813 	}
3814 	/**
3815 	 * The entry have equal queue number as requested,
3816 	 * remove it from alloc_list.
3817 	 */
3818 	if (valid_entry->len == num) {
3819 		LIST_REMOVE(valid_entry, next);
3820 	} else {
3821 		/**
3822 		 * The entry have more numbers than requested,
3823 		 * create a new entry for alloc_list and minus its
3824 		 * queue base and number in free_list.
3825 		 */
3826 		entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
3827 		if (entry == NULL) {
3828 			PMD_DRV_LOG(ERR, "Failed to allocate memory for "
3829 				    "resource pool");
3830 			return -ENOMEM;
3831 		}
3832 		entry->base = valid_entry->base;
3833 		entry->len = num;
3834 		valid_entry->base += num;
3835 		valid_entry->len -= num;
3836 		valid_entry = entry;
3837 	}
3838 
3839 	/* Insert it into alloc list, not sorted */
3840 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
3841 
3842 	pool->num_free -= valid_entry->len;
3843 	pool->num_alloc += valid_entry->len;
3844 
3845 	return valid_entry->base + pool->base;
3846 }
3847 
3848 /**
3849  * bitmap_is_subset - Check whether src2 is subset of src1
3850  **/
3851 static inline int
3852 bitmap_is_subset(uint8_t src1, uint8_t src2)
3853 {
3854 	return !((src1 ^ src2) & src2);
3855 }
3856 
3857 static enum i40e_status_code
3858 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3859 {
3860 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3861 
3862 	/* If DCB is not supported, only default TC is supported */
3863 	if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
3864 		PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
3865 		return I40E_NOT_SUPPORTED;
3866 	}
3867 
3868 	if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
3869 		PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
3870 			    "HW support 0x%x", hw->func_caps.enabled_tcmap,
3871 			    enabled_tcmap);
3872 		return I40E_NOT_SUPPORTED;
3873 	}
3874 	return I40E_SUCCESS;
3875 }
3876 
3877 int
3878 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
3879 				struct i40e_vsi_vlan_pvid_info *info)
3880 {
3881 	struct i40e_hw *hw;
3882 	struct i40e_vsi_context ctxt;
3883 	uint8_t vlan_flags = 0;
3884 	int ret;
3885 
3886 	if (vsi == NULL || info == NULL) {
3887 		PMD_DRV_LOG(ERR, "invalid parameters");
3888 		return I40E_ERR_PARAM;
3889 	}
3890 
3891 	if (info->on) {
3892 		vsi->info.pvid = info->config.pvid;
3893 		/**
3894 		 * If insert pvid is enabled, only tagged pkts are
3895 		 * allowed to be sent out.
3896 		 */
3897 		vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
3898 				I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3899 	} else {
3900 		vsi->info.pvid = 0;
3901 		if (info->config.reject.tagged == 0)
3902 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
3903 
3904 		if (info->config.reject.untagged == 0)
3905 			vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
3906 	}
3907 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
3908 					I40E_AQ_VSI_PVLAN_MODE_MASK);
3909 	vsi->info.port_vlan_flags |= vlan_flags;
3910 	vsi->info.valid_sections =
3911 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3912 	memset(&ctxt, 0, sizeof(ctxt));
3913 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3914 	ctxt.seid = vsi->seid;
3915 
3916 	hw = I40E_VSI_TO_HW(vsi);
3917 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3918 	if (ret != I40E_SUCCESS)
3919 		PMD_DRV_LOG(ERR, "Failed to update VSI params");
3920 
3921 	return ret;
3922 }
3923 
3924 static int
3925 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
3926 {
3927 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3928 	int i, ret;
3929 	struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
3930 
3931 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3932 	if (ret != I40E_SUCCESS)
3933 		return ret;
3934 
3935 	if (!vsi->seid) {
3936 		PMD_DRV_LOG(ERR, "seid not valid");
3937 		return -EINVAL;
3938 	}
3939 
3940 	memset(&tc_bw_data, 0, sizeof(tc_bw_data));
3941 	tc_bw_data.tc_valid_bits = enabled_tcmap;
3942 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3943 		tc_bw_data.tc_bw_credits[i] =
3944 			(enabled_tcmap & (1 << i)) ? 1 : 0;
3945 
3946 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
3947 	if (ret != I40E_SUCCESS) {
3948 		PMD_DRV_LOG(ERR, "Failed to configure TC BW");
3949 		return ret;
3950 	}
3951 
3952 	(void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
3953 					sizeof(vsi->info.qs_handle));
3954 	return I40E_SUCCESS;
3955 }
3956 
3957 static enum i40e_status_code
3958 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
3959 				 struct i40e_aqc_vsi_properties_data *info,
3960 				 uint8_t enabled_tcmap)
3961 {
3962 	enum i40e_status_code ret;
3963 	int i, total_tc = 0;
3964 	uint16_t qpnum_per_tc, bsf, qp_idx;
3965 
3966 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
3967 	if (ret != I40E_SUCCESS)
3968 		return ret;
3969 
3970 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3971 		if (enabled_tcmap & (1 << i))
3972 			total_tc++;
3973 	vsi->enabled_tc = enabled_tcmap;
3974 
3975 	/* Number of queues per enabled TC */
3976 	qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
3977 	qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
3978 	bsf = rte_bsf32(qpnum_per_tc);
3979 
3980 	/* Adjust the queue number to actual queues that can be applied */
3981 	if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
3982 		vsi->nb_qps = qpnum_per_tc * total_tc;
3983 
3984 	/**
3985 	 * Configure TC and queue mapping parameters, for enabled TC,
3986 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
3987 	 * default queue will serve it.
3988 	 */
3989 	qp_idx = 0;
3990 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3991 		if (vsi->enabled_tc & (1 << i)) {
3992 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
3993 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
3994 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
3995 			qp_idx += qpnum_per_tc;
3996 		} else
3997 			info->tc_mapping[i] = 0;
3998 	}
3999 
4000 	/* Associate queue number with VSI */
4001 	if (vsi->type == I40E_VSI_SRIOV) {
4002 		info->mapping_flags |=
4003 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4004 		for (i = 0; i < vsi->nb_qps; i++)
4005 			info->queue_mapping[i] =
4006 				rte_cpu_to_le_16(vsi->base_queue + i);
4007 	} else {
4008 		info->mapping_flags |=
4009 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4010 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4011 	}
4012 	info->valid_sections |=
4013 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4014 
4015 	return I40E_SUCCESS;
4016 }
4017 
4018 static int
4019 i40e_veb_release(struct i40e_veb *veb)
4020 {
4021 	struct i40e_vsi *vsi;
4022 	struct i40e_hw *hw;
4023 
4024 	if (veb == NULL)
4025 		return -EINVAL;
4026 
4027 	if (!TAILQ_EMPTY(&veb->head)) {
4028 		PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4029 		return -EACCES;
4030 	}
4031 	/* associate_vsi field is NULL for floating VEB */
4032 	if (veb->associate_vsi != NULL) {
4033 		vsi = veb->associate_vsi;
4034 		hw = I40E_VSI_TO_HW(vsi);
4035 
4036 		vsi->uplink_seid = veb->uplink_seid;
4037 		vsi->veb = NULL;
4038 	} else {
4039 		veb->associate_pf->main_vsi->floating_veb = NULL;
4040 		hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
4041 	}
4042 
4043 	i40e_aq_delete_element(hw, veb->seid, NULL);
4044 	rte_free(veb);
4045 	return I40E_SUCCESS;
4046 }
4047 
4048 /* Setup a veb */
4049 static struct i40e_veb *
4050 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
4051 {
4052 	struct i40e_veb *veb;
4053 	int ret;
4054 	struct i40e_hw *hw;
4055 
4056 	if (pf == NULL) {
4057 		PMD_DRV_LOG(ERR,
4058 			    "veb setup failed, associated PF shouldn't null");
4059 		return NULL;
4060 	}
4061 	hw = I40E_PF_TO_HW(pf);
4062 
4063 	veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
4064 	if (!veb) {
4065 		PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
4066 		goto fail;
4067 	}
4068 
4069 	veb->associate_vsi = vsi;
4070 	veb->associate_pf = pf;
4071 	TAILQ_INIT(&veb->head);
4072 	veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
4073 
4074 	/* create floating veb if vsi is NULL */
4075 	if (vsi != NULL) {
4076 		ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
4077 				      I40E_DEFAULT_TCMAP, false,
4078 				      &veb->seid, false, NULL);
4079 	} else {
4080 		ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
4081 				      true, &veb->seid, false, NULL);
4082 	}
4083 
4084 	if (ret != I40E_SUCCESS) {
4085 		PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
4086 			    hw->aq.asq_last_status);
4087 		goto fail;
4088 	}
4089 
4090 	/* get statistics index */
4091 	ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
4092 				&veb->stats_idx, NULL, NULL, NULL);
4093 	if (ret != I40E_SUCCESS) {
4094 		PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
4095 			    hw->aq.asq_last_status);
4096 		goto fail;
4097 	}
4098 	/* Get VEB bandwidth, to be implemented */
4099 	/* Now associated vsi binding to the VEB, set uplink to this VEB */
4100 	if (vsi)
4101 		vsi->uplink_seid = veb->seid;
4102 
4103 	return veb;
4104 fail:
4105 	rte_free(veb);
4106 	return NULL;
4107 }
4108 
4109 int
4110 i40e_vsi_release(struct i40e_vsi *vsi)
4111 {
4112 	struct i40e_pf *pf;
4113 	struct i40e_hw *hw;
4114 	struct i40e_vsi_list *vsi_list;
4115 	void *temp;
4116 	int ret;
4117 	struct i40e_mac_filter *f;
4118 	uint16_t user_param = vsi->user_param;
4119 
4120 	if (!vsi)
4121 		return I40E_SUCCESS;
4122 
4123 	pf = I40E_VSI_TO_PF(vsi);
4124 	hw = I40E_VSI_TO_HW(vsi);
4125 
4126 	/* VSI has child to attach, release child first */
4127 	if (vsi->veb) {
4128 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
4129 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4130 				return -1;
4131 		}
4132 		i40e_veb_release(vsi->veb);
4133 	}
4134 
4135 	if (vsi->floating_veb) {
4136 		TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
4137 			if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
4138 				return -1;
4139 		}
4140 	}
4141 
4142 	/* Remove all macvlan filters of the VSI */
4143 	i40e_vsi_remove_all_macvlan_filter(vsi);
4144 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
4145 		rte_free(f);
4146 
4147 	if (vsi->type != I40E_VSI_MAIN &&
4148 	    ((vsi->type != I40E_VSI_SRIOV) ||
4149 	    !pf->floating_veb_list[user_param])) {
4150 		/* Remove vsi from parent's sibling list */
4151 		if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
4152 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4153 			return I40E_ERR_PARAM;
4154 		}
4155 		TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
4156 				&vsi->sib_vsi_list, list);
4157 
4158 		/* Remove all switch element of the VSI */
4159 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4160 		if (ret != I40E_SUCCESS)
4161 			PMD_DRV_LOG(ERR, "Failed to delete element");
4162 	}
4163 
4164 	if ((vsi->type == I40E_VSI_SRIOV) &&
4165 	    pf->floating_veb_list[user_param]) {
4166 		/* Remove vsi from parent's sibling list */
4167 		if (vsi->parent_vsi == NULL ||
4168 		    vsi->parent_vsi->floating_veb == NULL) {
4169 			PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
4170 			return I40E_ERR_PARAM;
4171 		}
4172 		TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
4173 			     &vsi->sib_vsi_list, list);
4174 
4175 		/* Remove all switch element of the VSI */
4176 		ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
4177 		if (ret != I40E_SUCCESS)
4178 			PMD_DRV_LOG(ERR, "Failed to delete element");
4179 	}
4180 
4181 	i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
4182 
4183 	if (vsi->type != I40E_VSI_SRIOV)
4184 		i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
4185 	rte_free(vsi);
4186 
4187 	return I40E_SUCCESS;
4188 }
4189 
4190 static int
4191 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
4192 {
4193 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4194 	struct i40e_aqc_remove_macvlan_element_data def_filter;
4195 	struct i40e_mac_filter_info filter;
4196 	int ret;
4197 
4198 	if (vsi->type != I40E_VSI_MAIN)
4199 		return I40E_ERR_CONFIG;
4200 	memset(&def_filter, 0, sizeof(def_filter));
4201 	(void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
4202 					ETH_ADDR_LEN);
4203 	def_filter.vlan_tag = 0;
4204 	def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4205 				I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4206 	ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
4207 	if (ret != I40E_SUCCESS) {
4208 		struct i40e_mac_filter *f;
4209 		struct ether_addr *mac;
4210 
4211 		PMD_DRV_LOG(WARNING, "Cannot remove the default "
4212 			    "macvlan filter");
4213 		/* It needs to add the permanent mac into mac list */
4214 		f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4215 		if (f == NULL) {
4216 			PMD_DRV_LOG(ERR, "failed to allocate memory");
4217 			return I40E_ERR_NO_MEMORY;
4218 		}
4219 		mac = &f->mac_info.mac_addr;
4220 		(void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
4221 				ETH_ADDR_LEN);
4222 		f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4223 		TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4224 		vsi->mac_num++;
4225 
4226 		return ret;
4227 	}
4228 	(void)rte_memcpy(&filter.mac_addr,
4229 		(struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
4230 	filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4231 	return i40e_vsi_add_mac(vsi, &filter);
4232 }
4233 
4234 /*
4235  * i40e_vsi_get_bw_config - Query VSI BW Information
4236  * @vsi: the VSI to be queried
4237  *
4238  * Returns 0 on success, negative value on failure
4239  */
4240 static enum i40e_status_code
4241 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
4242 {
4243 	struct i40e_aqc_query_vsi_bw_config_resp bw_config;
4244 	struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
4245 	struct i40e_hw *hw = &vsi->adapter->hw;
4246 	i40e_status ret;
4247 	int i;
4248 	uint32_t bw_max;
4249 
4250 	memset(&bw_config, 0, sizeof(bw_config));
4251 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4252 	if (ret != I40E_SUCCESS) {
4253 		PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
4254 			    hw->aq.asq_last_status);
4255 		return ret;
4256 	}
4257 
4258 	memset(&ets_sla_config, 0, sizeof(ets_sla_config));
4259 	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
4260 					&ets_sla_config, NULL);
4261 	if (ret != I40E_SUCCESS) {
4262 		PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
4263 			    "configuration %u", hw->aq.asq_last_status);
4264 		return ret;
4265 	}
4266 
4267 	/* store and print out BW info */
4268 	vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
4269 	vsi->bw_info.bw_max = bw_config.max_bw;
4270 	PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
4271 	PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
4272 	bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
4273 		    (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
4274 		     I40E_16_BIT_WIDTH);
4275 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4276 		vsi->bw_info.bw_ets_share_credits[i] =
4277 				ets_sla_config.share_credits[i];
4278 		vsi->bw_info.bw_ets_credits[i] =
4279 				rte_le_to_cpu_16(ets_sla_config.credits[i]);
4280 		/* 4 bits per TC, 4th bit is reserved */
4281 		vsi->bw_info.bw_ets_max[i] =
4282 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
4283 				  RTE_LEN2MASK(3, uint8_t));
4284 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
4285 			    vsi->bw_info.bw_ets_share_credits[i]);
4286 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
4287 			    vsi->bw_info.bw_ets_credits[i]);
4288 		PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
4289 			    vsi->bw_info.bw_ets_max[i]);
4290 	}
4291 
4292 	return I40E_SUCCESS;
4293 }
4294 
4295 /* i40e_enable_pf_lb
4296  * @pf: pointer to the pf structure
4297  *
4298  * allow loopback on pf
4299  */
4300 static inline void
4301 i40e_enable_pf_lb(struct i40e_pf *pf)
4302 {
4303 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4304 	struct i40e_vsi_context ctxt;
4305 	int ret;
4306 
4307 	/* Use the FW API if FW >= v5.0 */
4308 	if (hw->aq.fw_maj_ver < 5) {
4309 		PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
4310 		return;
4311 	}
4312 
4313 	memset(&ctxt, 0, sizeof(ctxt));
4314 	ctxt.seid = pf->main_vsi_seid;
4315 	ctxt.pf_num = hw->pf_id;
4316 	ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4317 	if (ret) {
4318 		PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
4319 			    ret, hw->aq.asq_last_status);
4320 		return;
4321 	}
4322 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4323 	ctxt.info.valid_sections =
4324 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4325 	ctxt.info.switch_id |=
4326 		rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4327 
4328 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4329 	if (ret)
4330 		PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
4331 			    hw->aq.asq_last_status);
4332 }
4333 
4334 /* Setup a VSI */
4335 struct i40e_vsi *
4336 i40e_vsi_setup(struct i40e_pf *pf,
4337 	       enum i40e_vsi_type type,
4338 	       struct i40e_vsi *uplink_vsi,
4339 	       uint16_t user_param)
4340 {
4341 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4342 	struct i40e_vsi *vsi;
4343 	struct i40e_mac_filter_info filter;
4344 	int ret;
4345 	struct i40e_vsi_context ctxt;
4346 	struct ether_addr broadcast =
4347 		{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
4348 
4349 	if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
4350 	    uplink_vsi == NULL) {
4351 		PMD_DRV_LOG(ERR, "VSI setup failed, "
4352 			    "VSI link shouldn't be NULL");
4353 		return NULL;
4354 	}
4355 
4356 	if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
4357 		PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
4358 			    "uplink VSI should be NULL");
4359 		return NULL;
4360 	}
4361 
4362 	/* two situations
4363 	 * 1.type is not MAIN and uplink vsi is not NULL
4364 	 * If uplink vsi didn't setup VEB, create one first under veb field
4365 	 * 2.type is SRIOV and the uplink is NULL
4366 	 * If floating VEB is NULL, create one veb under floating veb field
4367 	 */
4368 
4369 	if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
4370 	    uplink_vsi->veb == NULL) {
4371 		uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
4372 
4373 		if (uplink_vsi->veb == NULL) {
4374 			PMD_DRV_LOG(ERR, "VEB setup failed");
4375 			return NULL;
4376 		}
4377 		/* set ALLOWLOOPBACk on pf, when veb is created */
4378 		i40e_enable_pf_lb(pf);
4379 	}
4380 
4381 	if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
4382 	    pf->main_vsi->floating_veb == NULL) {
4383 		pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
4384 
4385 		if (pf->main_vsi->floating_veb == NULL) {
4386 			PMD_DRV_LOG(ERR, "VEB setup failed");
4387 			return NULL;
4388 		}
4389 	}
4390 
4391 	vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
4392 	if (!vsi) {
4393 		PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
4394 		return NULL;
4395 	}
4396 	TAILQ_INIT(&vsi->mac_list);
4397 	vsi->type = type;
4398 	vsi->adapter = I40E_PF_TO_ADAPTER(pf);
4399 	vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
4400 	vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
4401 	vsi->user_param = user_param;
4402 	/* Allocate queues */
4403 	switch (vsi->type) {
4404 	case I40E_VSI_MAIN  :
4405 		vsi->nb_qps = pf->lan_nb_qps;
4406 		break;
4407 	case I40E_VSI_SRIOV :
4408 		vsi->nb_qps = pf->vf_nb_qps;
4409 		break;
4410 	case I40E_VSI_VMDQ2:
4411 		vsi->nb_qps = pf->vmdq_nb_qps;
4412 		break;
4413 	case I40E_VSI_FDIR:
4414 		vsi->nb_qps = pf->fdir_nb_qps;
4415 		break;
4416 	default:
4417 		goto fail_mem;
4418 	}
4419 	/*
4420 	 * The filter status descriptor is reported in rx queue 0,
4421 	 * while the tx queue for fdir filter programming has no
4422 	 * such constraints, can be non-zero queues.
4423 	 * To simplify it, choose FDIR vsi use queue 0 pair.
4424 	 * To make sure it will use queue 0 pair, queue allocation
4425 	 * need be done before this function is called
4426 	 */
4427 	if (type != I40E_VSI_FDIR) {
4428 		ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
4429 			if (ret < 0) {
4430 				PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
4431 						vsi->seid, ret);
4432 				goto fail_mem;
4433 			}
4434 			vsi->base_queue = ret;
4435 	} else
4436 		vsi->base_queue = I40E_FDIR_QUEUE_ID;
4437 
4438 	/* VF has MSIX interrupt in VF range, don't allocate here */
4439 	if (type == I40E_VSI_MAIN) {
4440 		ret = i40e_res_pool_alloc(&pf->msix_pool,
4441 					  RTE_MIN(vsi->nb_qps,
4442 						  RTE_MAX_RXTX_INTR_VEC_ID));
4443 		if (ret < 0) {
4444 			PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
4445 				    vsi->seid, ret);
4446 			goto fail_queue_alloc;
4447 		}
4448 		vsi->msix_intr = ret;
4449 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
4450 	} else if (type != I40E_VSI_SRIOV) {
4451 		ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
4452 		if (ret < 0) {
4453 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
4454 			goto fail_queue_alloc;
4455 		}
4456 		vsi->msix_intr = ret;
4457 		vsi->nb_msix = 1;
4458 	} else {
4459 		vsi->msix_intr = 0;
4460 		vsi->nb_msix = 0;
4461 	}
4462 
4463 	/* Add VSI */
4464 	if (type == I40E_VSI_MAIN) {
4465 		/* For main VSI, no need to add since it's default one */
4466 		vsi->uplink_seid = pf->mac_seid;
4467 		vsi->seid = pf->main_vsi_seid;
4468 		/* Bind queues with specific MSIX interrupt */
4469 		/**
4470 		 * Needs 2 interrupt at least, one for misc cause which will
4471 		 * enabled from OS side, Another for queues binding the
4472 		 * interrupt from device side only.
4473 		 */
4474 
4475 		/* Get default VSI parameters from hardware */
4476 		memset(&ctxt, 0, sizeof(ctxt));
4477 		ctxt.seid = vsi->seid;
4478 		ctxt.pf_num = hw->pf_id;
4479 		ctxt.uplink_seid = vsi->uplink_seid;
4480 		ctxt.vf_num = 0;
4481 		ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
4482 		if (ret != I40E_SUCCESS) {
4483 			PMD_DRV_LOG(ERR, "Failed to get VSI params");
4484 			goto fail_msix_alloc;
4485 		}
4486 		(void)rte_memcpy(&vsi->info, &ctxt.info,
4487 			sizeof(struct i40e_aqc_vsi_properties_data));
4488 		vsi->vsi_id = ctxt.vsi_number;
4489 		vsi->info.valid_sections = 0;
4490 
4491 		/* Configure tc, enabled TC0 only */
4492 		if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
4493 			I40E_SUCCESS) {
4494 			PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
4495 			goto fail_msix_alloc;
4496 		}
4497 
4498 		/* TC, queue mapping */
4499 		memset(&ctxt, 0, sizeof(ctxt));
4500 		vsi->info.valid_sections |=
4501 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4502 		vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
4503 					I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4504 		(void)rte_memcpy(&ctxt.info, &vsi->info,
4505 			sizeof(struct i40e_aqc_vsi_properties_data));
4506 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4507 						I40E_DEFAULT_TCMAP);
4508 		if (ret != I40E_SUCCESS) {
4509 			PMD_DRV_LOG(ERR, "Failed to configure "
4510 				    "TC queue mapping");
4511 			goto fail_msix_alloc;
4512 		}
4513 		ctxt.seid = vsi->seid;
4514 		ctxt.pf_num = hw->pf_id;
4515 		ctxt.uplink_seid = vsi->uplink_seid;
4516 		ctxt.vf_num = 0;
4517 
4518 		/* Update VSI parameters */
4519 		ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4520 		if (ret != I40E_SUCCESS) {
4521 			PMD_DRV_LOG(ERR, "Failed to update VSI params");
4522 			goto fail_msix_alloc;
4523 		}
4524 
4525 		(void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
4526 						sizeof(vsi->info.tc_mapping));
4527 		(void)rte_memcpy(&vsi->info.queue_mapping,
4528 				&ctxt.info.queue_mapping,
4529 			sizeof(vsi->info.queue_mapping));
4530 		vsi->info.mapping_flags = ctxt.info.mapping_flags;
4531 		vsi->info.valid_sections = 0;
4532 
4533 		(void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
4534 				ETH_ADDR_LEN);
4535 
4536 		/**
4537 		 * Updating default filter settings are necessary to prevent
4538 		 * reception of tagged packets.
4539 		 * Some old firmware configurations load a default macvlan
4540 		 * filter which accepts both tagged and untagged packets.
4541 		 * The updating is to use a normal filter instead if needed.
4542 		 * For NVM 4.2.2 or after, the updating is not needed anymore.
4543 		 * The firmware with correct configurations load the default
4544 		 * macvlan filter which is expected and cannot be removed.
4545 		 */
4546 		i40e_update_default_filter_setting(vsi);
4547 		i40e_config_qinq(hw, vsi);
4548 	} else if (type == I40E_VSI_SRIOV) {
4549 		memset(&ctxt, 0, sizeof(ctxt));
4550 		/**
4551 		 * For other VSI, the uplink_seid equals to uplink VSI's
4552 		 * uplink_seid since they share same VEB
4553 		 */
4554 		if (uplink_vsi == NULL)
4555 			vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
4556 		else
4557 			vsi->uplink_seid = uplink_vsi->uplink_seid;
4558 		ctxt.pf_num = hw->pf_id;
4559 		ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
4560 		ctxt.uplink_seid = vsi->uplink_seid;
4561 		ctxt.connection_type = 0x1;
4562 		ctxt.flags = I40E_AQ_VSI_TYPE_VF;
4563 
4564 		/* Use the VEB configuration if FW >= v5.0 */
4565 		if (hw->aq.fw_maj_ver >= 5) {
4566 			/* Configure switch ID */
4567 			ctxt.info.valid_sections |=
4568 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4569 			ctxt.info.switch_id =
4570 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4571 		}
4572 
4573 		/* Configure port/vlan */
4574 		ctxt.info.valid_sections |=
4575 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4576 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4577 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4578 						I40E_DEFAULT_TCMAP);
4579 		if (ret != I40E_SUCCESS) {
4580 			PMD_DRV_LOG(ERR, "Failed to configure "
4581 				    "TC queue mapping");
4582 			goto fail_msix_alloc;
4583 		}
4584 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4585 		ctxt.info.valid_sections |=
4586 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4587 		/**
4588 		 * Since VSI is not created yet, only configure parameter,
4589 		 * will add vsi below.
4590 		 */
4591 
4592 		i40e_config_qinq(hw, vsi);
4593 	} else if (type == I40E_VSI_VMDQ2) {
4594 		memset(&ctxt, 0, sizeof(ctxt));
4595 		/*
4596 		 * For other VSI, the uplink_seid equals to uplink VSI's
4597 		 * uplink_seid since they share same VEB
4598 		 */
4599 		vsi->uplink_seid = uplink_vsi->uplink_seid;
4600 		ctxt.pf_num = hw->pf_id;
4601 		ctxt.vf_num = 0;
4602 		ctxt.uplink_seid = vsi->uplink_seid;
4603 		ctxt.connection_type = 0x1;
4604 		ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
4605 
4606 		ctxt.info.valid_sections |=
4607 				rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
4608 		/* user_param carries flag to enable loop back */
4609 		if (user_param) {
4610 			ctxt.info.switch_id =
4611 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
4612 			ctxt.info.switch_id |=
4613 			rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
4614 		}
4615 
4616 		/* Configure port/vlan */
4617 		ctxt.info.valid_sections |=
4618 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4619 		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
4620 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4621 						I40E_DEFAULT_TCMAP);
4622 		if (ret != I40E_SUCCESS) {
4623 			PMD_DRV_LOG(ERR, "Failed to configure "
4624 					"TC queue mapping");
4625 			goto fail_msix_alloc;
4626 		}
4627 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4628 		ctxt.info.valid_sections |=
4629 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4630 	} else if (type == I40E_VSI_FDIR) {
4631 		memset(&ctxt, 0, sizeof(ctxt));
4632 		vsi->uplink_seid = uplink_vsi->uplink_seid;
4633 		ctxt.pf_num = hw->pf_id;
4634 		ctxt.vf_num = 0;
4635 		ctxt.uplink_seid = vsi->uplink_seid;
4636 		ctxt.connection_type = 0x1;     /* regular data port */
4637 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
4638 		ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
4639 						I40E_DEFAULT_TCMAP);
4640 		if (ret != I40E_SUCCESS) {
4641 			PMD_DRV_LOG(ERR, "Failed to configure "
4642 					"TC queue mapping.");
4643 			goto fail_msix_alloc;
4644 		}
4645 		ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
4646 		ctxt.info.valid_sections |=
4647 			rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
4648 	} else {
4649 		PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
4650 		goto fail_msix_alloc;
4651 	}
4652 
4653 	if (vsi->type != I40E_VSI_MAIN) {
4654 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
4655 		if (ret != I40E_SUCCESS) {
4656 			PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
4657 				    hw->aq.asq_last_status);
4658 			goto fail_msix_alloc;
4659 		}
4660 		memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
4661 		vsi->info.valid_sections = 0;
4662 		vsi->seid = ctxt.seid;
4663 		vsi->vsi_id = ctxt.vsi_number;
4664 		vsi->sib_vsi_list.vsi = vsi;
4665 		if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
4666 			TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
4667 					  &vsi->sib_vsi_list, list);
4668 		} else {
4669 			TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
4670 					  &vsi->sib_vsi_list, list);
4671 		}
4672 	}
4673 
4674 	/* MAC/VLAN configuration */
4675 	(void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
4676 	filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
4677 
4678 	ret = i40e_vsi_add_mac(vsi, &filter);
4679 	if (ret != I40E_SUCCESS) {
4680 		PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
4681 		goto fail_msix_alloc;
4682 	}
4683 
4684 	/* Get VSI BW information */
4685 	i40e_vsi_get_bw_config(vsi);
4686 	return vsi;
4687 fail_msix_alloc:
4688 	i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
4689 fail_queue_alloc:
4690 	i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
4691 fail_mem:
4692 	rte_free(vsi);
4693 	return NULL;
4694 }
4695 
4696 /* Configure vlan filter on or off */
4697 int
4698 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
4699 {
4700 	int i, num;
4701 	struct i40e_mac_filter *f;
4702 	void *temp;
4703 	struct i40e_mac_filter_info *mac_filter;
4704 	enum rte_mac_filter_type desired_filter;
4705 	int ret = I40E_SUCCESS;
4706 
4707 	if (on) {
4708 		/* Filter to match MAC and VLAN */
4709 		desired_filter = RTE_MACVLAN_PERFECT_MATCH;
4710 	} else {
4711 		/* Filter to match only MAC */
4712 		desired_filter = RTE_MAC_PERFECT_MATCH;
4713 	}
4714 
4715 	num = vsi->mac_num;
4716 
4717 	mac_filter = rte_zmalloc("mac_filter_info_data",
4718 				 num * sizeof(*mac_filter), 0);
4719 	if (mac_filter == NULL) {
4720 		PMD_DRV_LOG(ERR, "failed to allocate memory");
4721 		return I40E_ERR_NO_MEMORY;
4722 	}
4723 
4724 	i = 0;
4725 
4726 	/* Remove all existing mac */
4727 	TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
4728 		mac_filter[i] = f->mac_info;
4729 		ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
4730 		if (ret) {
4731 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4732 				    on ? "enable" : "disable");
4733 			goto DONE;
4734 		}
4735 		i++;
4736 	}
4737 
4738 	/* Override with new filter */
4739 	for (i = 0; i < num; i++) {
4740 		mac_filter[i].filter_type = desired_filter;
4741 		ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
4742 		if (ret) {
4743 			PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
4744 				    on ? "enable" : "disable");
4745 			goto DONE;
4746 		}
4747 	}
4748 
4749 DONE:
4750 	rte_free(mac_filter);
4751 	return ret;
4752 }
4753 
4754 /* Configure vlan stripping on or off */
4755 int
4756 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
4757 {
4758 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4759 	struct i40e_vsi_context ctxt;
4760 	uint8_t vlan_flags;
4761 	int ret = I40E_SUCCESS;
4762 
4763 	/* Check if it has been already on or off */
4764 	if (vsi->info.valid_sections &
4765 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
4766 		if (on) {
4767 			if ((vsi->info.port_vlan_flags &
4768 				I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
4769 				return 0; /* already on */
4770 		} else {
4771 			if ((vsi->info.port_vlan_flags &
4772 				I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4773 				I40E_AQ_VSI_PVLAN_EMOD_MASK)
4774 				return 0; /* already off */
4775 		}
4776 	}
4777 
4778 	if (on)
4779 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
4780 	else
4781 		vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
4782 	vsi->info.valid_sections =
4783 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4784 	vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
4785 	vsi->info.port_vlan_flags |= vlan_flags;
4786 	ctxt.seid = vsi->seid;
4787 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4788 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4789 	if (ret)
4790 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
4791 			    on ? "enable" : "disable");
4792 
4793 	return ret;
4794 }
4795 
4796 static int
4797 i40e_dev_init_vlan(struct rte_eth_dev *dev)
4798 {
4799 	struct rte_eth_dev_data *data = dev->data;
4800 	int ret;
4801 	int mask = 0;
4802 
4803 	/* Apply vlan offload setting */
4804 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
4805 	i40e_vlan_offload_set(dev, mask);
4806 
4807 	/* Apply double-vlan setting, not implemented yet */
4808 
4809 	/* Apply pvid setting */
4810 	ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
4811 				data->dev_conf.txmode.hw_vlan_insert_pvid);
4812 	if (ret)
4813 		PMD_DRV_LOG(INFO, "Failed to update VSI params");
4814 
4815 	return ret;
4816 }
4817 
4818 static int
4819 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
4820 {
4821 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4822 
4823 	return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
4824 }
4825 
4826 static int
4827 i40e_update_flow_control(struct i40e_hw *hw)
4828 {
4829 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
4830 	struct i40e_link_status link_status;
4831 	uint32_t rxfc = 0, txfc = 0, reg;
4832 	uint8_t an_info;
4833 	int ret;
4834 
4835 	memset(&link_status, 0, sizeof(link_status));
4836 	ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
4837 	if (ret != I40E_SUCCESS) {
4838 		PMD_DRV_LOG(ERR, "Failed to get link status information");
4839 		goto write_reg; /* Disable flow control */
4840 	}
4841 
4842 	an_info = hw->phy.link_info.an_info;
4843 	if (!(an_info & I40E_AQ_AN_COMPLETED)) {
4844 		PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
4845 		ret = I40E_ERR_NOT_READY;
4846 		goto write_reg; /* Disable flow control */
4847 	}
4848 	/**
4849 	 * If link auto negotiation is enabled, flow control needs to
4850 	 * be configured according to it
4851 	 */
4852 	switch (an_info & I40E_LINK_PAUSE_RXTX) {
4853 	case I40E_LINK_PAUSE_RXTX:
4854 		rxfc = 1;
4855 		txfc = 1;
4856 		hw->fc.current_mode = I40E_FC_FULL;
4857 		break;
4858 	case I40E_AQ_LINK_PAUSE_RX:
4859 		rxfc = 1;
4860 		hw->fc.current_mode = I40E_FC_RX_PAUSE;
4861 		break;
4862 	case I40E_AQ_LINK_PAUSE_TX:
4863 		txfc = 1;
4864 		hw->fc.current_mode = I40E_FC_TX_PAUSE;
4865 		break;
4866 	default:
4867 		hw->fc.current_mode = I40E_FC_NONE;
4868 		break;
4869 	}
4870 
4871 write_reg:
4872 	I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
4873 		txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
4874 	reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
4875 	reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
4876 	reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
4877 	I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
4878 
4879 	return ret;
4880 }
4881 
4882 /* PF setup */
4883 static int
4884 i40e_pf_setup(struct i40e_pf *pf)
4885 {
4886 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4887 	struct i40e_filter_control_settings settings;
4888 	struct i40e_vsi *vsi;
4889 	int ret;
4890 
4891 	/* Clear all stats counters */
4892 	pf->offset_loaded = FALSE;
4893 	memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
4894 	memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
4895 
4896 	ret = i40e_pf_get_switch_config(pf);
4897 	if (ret != I40E_SUCCESS) {
4898 		PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
4899 		return ret;
4900 	}
4901 	if (pf->flags & I40E_FLAG_FDIR) {
4902 		/* make queue allocated first, let FDIR use queue pair 0*/
4903 		ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
4904 		if (ret != I40E_FDIR_QUEUE_ID) {
4905 			PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
4906 				    " ret =%d", ret);
4907 			pf->flags &= ~I40E_FLAG_FDIR;
4908 		}
4909 	}
4910 	/*  main VSI setup */
4911 	vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
4912 	if (!vsi) {
4913 		PMD_DRV_LOG(ERR, "Setup of main vsi failed");
4914 		return I40E_ERR_NOT_READY;
4915 	}
4916 	pf->main_vsi = vsi;
4917 
4918 	/* Configure filter control */
4919 	memset(&settings, 0, sizeof(settings));
4920 	if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
4921 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
4922 	else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
4923 		settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
4924 	else {
4925 		PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
4926 						hw->func_caps.rss_table_size);
4927 		return I40E_ERR_PARAM;
4928 	}
4929 	PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
4930 			"size: %u\n", hw->func_caps.rss_table_size);
4931 	pf->hash_lut_size = hw->func_caps.rss_table_size;
4932 
4933 	/* Enable ethtype and macvlan filters */
4934 	settings.enable_ethtype = TRUE;
4935 	settings.enable_macvlan = TRUE;
4936 	ret = i40e_set_filter_control(hw, &settings);
4937 	if (ret)
4938 		PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
4939 								ret);
4940 
4941 	/* Update flow control according to the auto negotiation */
4942 	i40e_update_flow_control(hw);
4943 
4944 	return I40E_SUCCESS;
4945 }
4946 
4947 int
4948 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
4949 {
4950 	uint32_t reg;
4951 	uint16_t j;
4952 
4953 	/**
4954 	 * Set or clear TX Queue Disable flags,
4955 	 * which is required by hardware.
4956 	 */
4957 	i40e_pre_tx_queue_cfg(hw, q_idx, on);
4958 	rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
4959 
4960 	/* Wait until the request is finished */
4961 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4962 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4963 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4964 		if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
4965 			((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
4966 							& 0x1))) {
4967 			break;
4968 		}
4969 	}
4970 	if (on) {
4971 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
4972 			return I40E_SUCCESS; /* already on, skip next steps */
4973 
4974 		I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
4975 		reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4976 	} else {
4977 		if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4978 			return I40E_SUCCESS; /* already off, skip next steps */
4979 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4980 	}
4981 	/* Write the register */
4982 	I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
4983 	/* Check the result */
4984 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
4985 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4986 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
4987 		if (on) {
4988 			if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4989 				(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4990 				break;
4991 		} else {
4992 			if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
4993 				!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
4994 				break;
4995 		}
4996 	}
4997 	/* Check if it is timeout */
4998 	if (j >= I40E_CHK_Q_ENA_COUNT) {
4999 		PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
5000 			    (on ? "enable" : "disable"), q_idx);
5001 		return I40E_ERR_TIMEOUT;
5002 	}
5003 
5004 	return I40E_SUCCESS;
5005 }
5006 
5007 /* Swith on or off the tx queues */
5008 static int
5009 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
5010 {
5011 	struct rte_eth_dev_data *dev_data = pf->dev_data;
5012 	struct i40e_tx_queue *txq;
5013 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
5014 	uint16_t i;
5015 	int ret;
5016 
5017 	for (i = 0; i < dev_data->nb_tx_queues; i++) {
5018 		txq = dev_data->tx_queues[i];
5019 		/* Don't operate the queue if not configured or
5020 		 * if starting only per queue */
5021 		if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
5022 			continue;
5023 		if (on)
5024 			ret = i40e_dev_tx_queue_start(dev, i);
5025 		else
5026 			ret = i40e_dev_tx_queue_stop(dev, i);
5027 		if ( ret != I40E_SUCCESS)
5028 			return ret;
5029 	}
5030 
5031 	return I40E_SUCCESS;
5032 }
5033 
5034 int
5035 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5036 {
5037 	uint32_t reg;
5038 	uint16_t j;
5039 
5040 	/* Wait until the request is finished */
5041 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5042 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5043 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5044 		if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5045 			((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
5046 			break;
5047 	}
5048 
5049 	if (on) {
5050 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
5051 			return I40E_SUCCESS; /* Already on, skip next steps */
5052 		reg |= I40E_QRX_ENA_QENA_REQ_MASK;
5053 	} else {
5054 		if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5055 			return I40E_SUCCESS; /* Already off, skip next steps */
5056 		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
5057 	}
5058 
5059 	/* Write the register */
5060 	I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
5061 	/* Check the result */
5062 	for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5063 		rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5064 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
5065 		if (on) {
5066 			if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5067 				(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5068 				break;
5069 		} else {
5070 			if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
5071 				!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
5072 				break;
5073 		}
5074 	}
5075 
5076 	/* Check if it is timeout */
5077 	if (j >= I40E_CHK_Q_ENA_COUNT) {
5078 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
5079 			    (on ? "enable" : "disable"), q_idx);
5080 		return I40E_ERR_TIMEOUT;
5081 	}
5082 
5083 	return I40E_SUCCESS;
5084 }
5085 /* Switch on or off the rx queues */
5086 static int
5087 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
5088 {
5089 	struct rte_eth_dev_data *dev_data = pf->dev_data;
5090 	struct i40e_rx_queue *rxq;
5091 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
5092 	uint16_t i;
5093 	int ret;
5094 
5095 	for (i = 0; i < dev_data->nb_rx_queues; i++) {
5096 		rxq = dev_data->rx_queues[i];
5097 		/* Don't operate the queue if not configured or
5098 		 * if starting only per queue */
5099 		if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
5100 			continue;
5101 		if (on)
5102 			ret = i40e_dev_rx_queue_start(dev, i);
5103 		else
5104 			ret = i40e_dev_rx_queue_stop(dev, i);
5105 		if (ret != I40E_SUCCESS)
5106 			return ret;
5107 	}
5108 
5109 	return I40E_SUCCESS;
5110 }
5111 
5112 /* Switch on or off all the rx/tx queues */
5113 int
5114 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
5115 {
5116 	int ret;
5117 
5118 	if (on) {
5119 		/* enable rx queues before enabling tx queues */
5120 		ret = i40e_dev_switch_rx_queues(pf, on);
5121 		if (ret) {
5122 			PMD_DRV_LOG(ERR, "Failed to switch rx queues");
5123 			return ret;
5124 		}
5125 		ret = i40e_dev_switch_tx_queues(pf, on);
5126 	} else {
5127 		/* Stop tx queues before stopping rx queues */
5128 		ret = i40e_dev_switch_tx_queues(pf, on);
5129 		if (ret) {
5130 			PMD_DRV_LOG(ERR, "Failed to switch tx queues");
5131 			return ret;
5132 		}
5133 		ret = i40e_dev_switch_rx_queues(pf, on);
5134 	}
5135 
5136 	return ret;
5137 }
5138 
5139 /* Initialize VSI for TX */
5140 static int
5141 i40e_dev_tx_init(struct i40e_pf *pf)
5142 {
5143 	struct rte_eth_dev_data *data = pf->dev_data;
5144 	uint16_t i;
5145 	uint32_t ret = I40E_SUCCESS;
5146 	struct i40e_tx_queue *txq;
5147 
5148 	for (i = 0; i < data->nb_tx_queues; i++) {
5149 		txq = data->tx_queues[i];
5150 		if (!txq || !txq->q_set)
5151 			continue;
5152 		ret = i40e_tx_queue_init(txq);
5153 		if (ret != I40E_SUCCESS)
5154 			break;
5155 	}
5156 	if (ret == I40E_SUCCESS)
5157 		i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
5158 				     ->eth_dev);
5159 
5160 	return ret;
5161 }
5162 
5163 /* Initialize VSI for RX */
5164 static int
5165 i40e_dev_rx_init(struct i40e_pf *pf)
5166 {
5167 	struct rte_eth_dev_data *data = pf->dev_data;
5168 	int ret = I40E_SUCCESS;
5169 	uint16_t i;
5170 	struct i40e_rx_queue *rxq;
5171 
5172 	i40e_pf_config_mq_rx(pf);
5173 	for (i = 0; i < data->nb_rx_queues; i++) {
5174 		rxq = data->rx_queues[i];
5175 		if (!rxq || !rxq->q_set)
5176 			continue;
5177 
5178 		ret = i40e_rx_queue_init(rxq);
5179 		if (ret != I40E_SUCCESS) {
5180 			PMD_DRV_LOG(ERR, "Failed to do RX queue "
5181 				    "initialization");
5182 			break;
5183 		}
5184 	}
5185 	if (ret == I40E_SUCCESS)
5186 		i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
5187 				     ->eth_dev);
5188 
5189 	return ret;
5190 }
5191 
5192 static int
5193 i40e_dev_rxtx_init(struct i40e_pf *pf)
5194 {
5195 	int err;
5196 
5197 	err = i40e_dev_tx_init(pf);
5198 	if (err) {
5199 		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
5200 		return err;
5201 	}
5202 	err = i40e_dev_rx_init(pf);
5203 	if (err) {
5204 		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
5205 		return err;
5206 	}
5207 
5208 	return err;
5209 }
5210 
5211 static int
5212 i40e_vmdq_setup(struct rte_eth_dev *dev)
5213 {
5214 	struct rte_eth_conf *conf = &dev->data->dev_conf;
5215 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5216 	int i, err, conf_vsis, j, loop;
5217 	struct i40e_vsi *vsi;
5218 	struct i40e_vmdq_info *vmdq_info;
5219 	struct rte_eth_vmdq_rx_conf *vmdq_conf;
5220 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5221 
5222 	/*
5223 	 * Disable interrupt to avoid message from VF. Furthermore, it will
5224 	 * avoid race condition in VSI creation/destroy.
5225 	 */
5226 	i40e_pf_disable_irq0(hw);
5227 
5228 	if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
5229 		PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
5230 		return -ENOTSUP;
5231 	}
5232 
5233 	conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
5234 	if (conf_vsis > pf->max_nb_vmdq_vsi) {
5235 		PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
5236 			conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
5237 			pf->max_nb_vmdq_vsi);
5238 		return -ENOTSUP;
5239 	}
5240 
5241 	if (pf->vmdq != NULL) {
5242 		PMD_INIT_LOG(INFO, "VMDQ already configured");
5243 		return 0;
5244 	}
5245 
5246 	pf->vmdq = rte_zmalloc("vmdq_info_struct",
5247 				sizeof(*vmdq_info) * conf_vsis, 0);
5248 
5249 	if (pf->vmdq == NULL) {
5250 		PMD_INIT_LOG(ERR, "Failed to allocate memory");
5251 		return -ENOMEM;
5252 	}
5253 
5254 	vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
5255 
5256 	/* Create VMDQ VSI */
5257 	for (i = 0; i < conf_vsis; i++) {
5258 		vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
5259 				vmdq_conf->enable_loop_back);
5260 		if (vsi == NULL) {
5261 			PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
5262 			err = -1;
5263 			goto err_vsi_setup;
5264 		}
5265 		vmdq_info = &pf->vmdq[i];
5266 		vmdq_info->pf = pf;
5267 		vmdq_info->vsi = vsi;
5268 	}
5269 	pf->nb_cfg_vmdq_vsi = conf_vsis;
5270 
5271 	/* Configure Vlan */
5272 	loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
5273 	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
5274 		for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
5275 			if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
5276 				PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
5277 					vmdq_conf->pool_map[i].vlan_id, j);
5278 
5279 				err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
5280 						vmdq_conf->pool_map[i].vlan_id);
5281 				if (err) {
5282 					PMD_INIT_LOG(ERR, "Failed to add vlan");
5283 					err = -1;
5284 					goto err_vsi_setup;
5285 				}
5286 			}
5287 		}
5288 	}
5289 
5290 	i40e_pf_enable_irq0(hw);
5291 
5292 	return 0;
5293 
5294 err_vsi_setup:
5295 	for (i = 0; i < conf_vsis; i++)
5296 		if (pf->vmdq[i].vsi == NULL)
5297 			break;
5298 		else
5299 			i40e_vsi_release(pf->vmdq[i].vsi);
5300 
5301 	rte_free(pf->vmdq);
5302 	pf->vmdq = NULL;
5303 	i40e_pf_enable_irq0(hw);
5304 	return err;
5305 }
5306 
5307 static void
5308 i40e_stat_update_32(struct i40e_hw *hw,
5309 		   uint32_t reg,
5310 		   bool offset_loaded,
5311 		   uint64_t *offset,
5312 		   uint64_t *stat)
5313 {
5314 	uint64_t new_data;
5315 
5316 	new_data = (uint64_t)I40E_READ_REG(hw, reg);
5317 	if (!offset_loaded)
5318 		*offset = new_data;
5319 
5320 	if (new_data >= *offset)
5321 		*stat = (uint64_t)(new_data - *offset);
5322 	else
5323 		*stat = (uint64_t)((new_data +
5324 			((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
5325 }
5326 
5327 static void
5328 i40e_stat_update_48(struct i40e_hw *hw,
5329 		   uint32_t hireg,
5330 		   uint32_t loreg,
5331 		   bool offset_loaded,
5332 		   uint64_t *offset,
5333 		   uint64_t *stat)
5334 {
5335 	uint64_t new_data;
5336 
5337 	new_data = (uint64_t)I40E_READ_REG(hw, loreg);
5338 	new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
5339 			I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
5340 
5341 	if (!offset_loaded)
5342 		*offset = new_data;
5343 
5344 	if (new_data >= *offset)
5345 		*stat = new_data - *offset;
5346 	else
5347 		*stat = (uint64_t)((new_data +
5348 			((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
5349 
5350 	*stat &= I40E_48_BIT_MASK;
5351 }
5352 
5353 /* Disable IRQ0 */
5354 void
5355 i40e_pf_disable_irq0(struct i40e_hw *hw)
5356 {
5357 	/* Disable all interrupt types */
5358 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
5359 	I40E_WRITE_FLUSH(hw);
5360 }
5361 
5362 /* Enable IRQ0 */
5363 void
5364 i40e_pf_enable_irq0(struct i40e_hw *hw)
5365 {
5366 	I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
5367 		I40E_PFINT_DYN_CTL0_INTENA_MASK |
5368 		I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
5369 		I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
5370 	I40E_WRITE_FLUSH(hw);
5371 }
5372 
5373 static void
5374 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
5375 {
5376 	/* read pending request and disable first */
5377 	i40e_pf_disable_irq0(hw);
5378 	I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
5379 	I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
5380 		I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
5381 
5382 	if (no_queue)
5383 		/* Link no queues with irq0 */
5384 		I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
5385 			       I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
5386 }
5387 
5388 static void
5389 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
5390 {
5391 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5392 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5393 	int i;
5394 	uint16_t abs_vf_id;
5395 	uint32_t index, offset, val;
5396 
5397 	if (!pf->vfs)
5398 		return;
5399 	/**
5400 	 * Try to find which VF trigger a reset, use absolute VF id to access
5401 	 * since the reg is global register.
5402 	 */
5403 	for (i = 0; i < pf->vf_num; i++) {
5404 		abs_vf_id = hw->func_caps.vf_base_id + i;
5405 		index = abs_vf_id / I40E_UINT32_BIT_SIZE;
5406 		offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
5407 		val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
5408 		/* VFR event occured */
5409 		if (val & (0x1 << offset)) {
5410 			int ret;
5411 
5412 			/* Clear the event first */
5413 			I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
5414 							(0x1 << offset));
5415 			PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
5416 			/**
5417 			 * Only notify a VF reset event occured,
5418 			 * don't trigger another SW reset
5419 			 */
5420 			ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
5421 			if (ret != I40E_SUCCESS)
5422 				PMD_DRV_LOG(ERR, "Failed to do VF reset");
5423 		}
5424 	}
5425 }
5426 
5427 static void
5428 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
5429 {
5430 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5431 	struct i40e_arq_event_info info;
5432 	uint16_t pending, opcode;
5433 	int ret;
5434 
5435 	info.buf_len = I40E_AQ_BUF_SZ;
5436 	info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
5437 	if (!info.msg_buf) {
5438 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
5439 		return;
5440 	}
5441 
5442 	pending = 1;
5443 	while (pending) {
5444 		ret = i40e_clean_arq_element(hw, &info, &pending);
5445 
5446 		if (ret != I40E_SUCCESS) {
5447 			PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
5448 				    "aq_err: %u", hw->aq.asq_last_status);
5449 			break;
5450 		}
5451 		opcode = rte_le_to_cpu_16(info.desc.opcode);
5452 
5453 		switch (opcode) {
5454 		case i40e_aqc_opc_send_msg_to_pf:
5455 			/* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
5456 			i40e_pf_host_handle_vf_msg(dev,
5457 					rte_le_to_cpu_16(info.desc.retval),
5458 					rte_le_to_cpu_32(info.desc.cookie_high),
5459 					rte_le_to_cpu_32(info.desc.cookie_low),
5460 					info.msg_buf,
5461 					info.msg_len);
5462 			break;
5463 		default:
5464 			PMD_DRV_LOG(ERR, "Request %u is not supported yet",
5465 				    opcode);
5466 			break;
5467 		}
5468 	}
5469 	rte_free(info.msg_buf);
5470 }
5471 
5472 /*
5473  * Interrupt handler is registered as the alarm callback for handling LSC
5474  * interrupt in a definite of time, in order to wait the NIC into a stable
5475  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
5476  * no need for link down interrupt.
5477  */
5478 static void
5479 i40e_dev_interrupt_delayed_handler(void *param)
5480 {
5481 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5482 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5483 	uint32_t icr0;
5484 
5485 	/* read interrupt causes again */
5486 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5487 
5488 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5489 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5490 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
5491 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5492 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
5493 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5494 		PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
5495 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5496 		PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
5497 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5498 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
5499 								"state\n");
5500 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5501 		PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
5502 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5503 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
5504 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5505 
5506 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5507 		PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
5508 		i40e_dev_handle_vfr_event(dev);
5509 	}
5510 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5511 		PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
5512 		i40e_dev_handle_aq_msg(dev);
5513 	}
5514 
5515 	/* handle the link up interrupt in an alarm callback */
5516 	i40e_dev_link_update(dev, 0);
5517 	_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
5518 
5519 	i40e_pf_enable_irq0(hw);
5520 	rte_intr_enable(&(dev->pci_dev->intr_handle));
5521 }
5522 
5523 /**
5524  * Interrupt handler triggered by NIC  for handling
5525  * specific interrupt.
5526  *
5527  * @param handle
5528  *  Pointer to interrupt handle.
5529  * @param param
5530  *  The address of parameter (struct rte_eth_dev *) regsitered before.
5531  *
5532  * @return
5533  *  void
5534  */
5535 static void
5536 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
5537 			   void *param)
5538 {
5539 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
5540 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5541 	uint32_t icr0;
5542 
5543 	/* Disable interrupt */
5544 	i40e_pf_disable_irq0(hw);
5545 
5546 	/* read out interrupt causes */
5547 	icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
5548 
5549 	/* No interrupt event indicated */
5550 	if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
5551 		PMD_DRV_LOG(INFO, "No interrupt event");
5552 		goto done;
5553 	}
5554 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
5555 	if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
5556 		PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
5557 	if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
5558 		PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
5559 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
5560 		PMD_DRV_LOG(INFO, "ICR0: global reset requested");
5561 	if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
5562 		PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
5563 	if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
5564 		PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
5565 	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
5566 		PMD_DRV_LOG(ERR, "ICR0: HMC error");
5567 	if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
5568 		PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
5569 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
5570 
5571 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
5572 		PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
5573 		i40e_dev_handle_vfr_event(dev);
5574 	}
5575 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
5576 		PMD_DRV_LOG(INFO, "ICR0: adminq event");
5577 		i40e_dev_handle_aq_msg(dev);
5578 	}
5579 
5580 	/* Link Status Change interrupt */
5581 	if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
5582 #define I40E_US_PER_SECOND 1000000
5583 		struct rte_eth_link link;
5584 
5585 		PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
5586 		memset(&link, 0, sizeof(link));
5587 		rte_i40e_dev_atomic_read_link_status(dev, &link);
5588 		i40e_dev_link_update(dev, 0);
5589 
5590 		/*
5591 		 * For link up interrupt, it needs to wait 1 second to let the
5592 		 * hardware be a stable state. Otherwise several consecutive
5593 		 * interrupts can be observed.
5594 		 * For link down interrupt, no need to wait.
5595 		 */
5596 		if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
5597 			i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
5598 			return;
5599 		else
5600 			_rte_eth_dev_callback_process(dev,
5601 				RTE_ETH_EVENT_INTR_LSC);
5602 	}
5603 
5604 done:
5605 	/* Enable interrupt */
5606 	i40e_pf_enable_irq0(hw);
5607 	rte_intr_enable(&(dev->pci_dev->intr_handle));
5608 }
5609 
5610 static int
5611 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
5612 			 struct i40e_macvlan_filter *filter,
5613 			 int total)
5614 {
5615 	int ele_num, ele_buff_size;
5616 	int num, actual_num, i;
5617 	uint16_t flags;
5618 	int ret = I40E_SUCCESS;
5619 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5620 	struct i40e_aqc_add_macvlan_element_data *req_list;
5621 
5622 	if (filter == NULL  || total == 0)
5623 		return I40E_ERR_PARAM;
5624 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5625 	ele_buff_size = hw->aq.asq_buf_size;
5626 
5627 	req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
5628 	if (req_list == NULL) {
5629 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
5630 		return I40E_ERR_NO_MEMORY;
5631 	}
5632 
5633 	num = 0;
5634 	do {
5635 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5636 		memset(req_list, 0, ele_buff_size);
5637 
5638 		for (i = 0; i < actual_num; i++) {
5639 			(void)rte_memcpy(req_list[i].mac_addr,
5640 				&filter[num + i].macaddr, ETH_ADDR_LEN);
5641 			req_list[i].vlan_tag =
5642 				rte_cpu_to_le_16(filter[num + i].vlan_id);
5643 
5644 			switch (filter[num + i].filter_type) {
5645 			case RTE_MAC_PERFECT_MATCH:
5646 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
5647 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5648 				break;
5649 			case RTE_MACVLAN_PERFECT_MATCH:
5650 				flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
5651 				break;
5652 			case RTE_MAC_HASH_MATCH:
5653 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
5654 					I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
5655 				break;
5656 			case RTE_MACVLAN_HASH_MATCH:
5657 				flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
5658 				break;
5659 			default:
5660 				PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
5661 				ret = I40E_ERR_PARAM;
5662 				goto DONE;
5663 			}
5664 
5665 			req_list[i].queue_number = 0;
5666 
5667 			req_list[i].flags = rte_cpu_to_le_16(flags);
5668 		}
5669 
5670 		ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
5671 						actual_num, NULL);
5672 		if (ret != I40E_SUCCESS) {
5673 			PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
5674 			goto DONE;
5675 		}
5676 		num += actual_num;
5677 	} while (num < total);
5678 
5679 DONE:
5680 	rte_free(req_list);
5681 	return ret;
5682 }
5683 
5684 static int
5685 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
5686 			    struct i40e_macvlan_filter *filter,
5687 			    int total)
5688 {
5689 	int ele_num, ele_buff_size;
5690 	int num, actual_num, i;
5691 	uint16_t flags;
5692 	int ret = I40E_SUCCESS;
5693 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5694 	struct i40e_aqc_remove_macvlan_element_data *req_list;
5695 
5696 	if (filter == NULL  || total == 0)
5697 		return I40E_ERR_PARAM;
5698 
5699 	ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
5700 	ele_buff_size = hw->aq.asq_buf_size;
5701 
5702 	req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
5703 	if (req_list == NULL) {
5704 		PMD_DRV_LOG(ERR, "Fail to allocate memory");
5705 		return I40E_ERR_NO_MEMORY;
5706 	}
5707 
5708 	num = 0;
5709 	do {
5710 		actual_num = (num + ele_num > total) ? (total - num) : ele_num;
5711 		memset(req_list, 0, ele_buff_size);
5712 
5713 		for (i = 0; i < actual_num; i++) {
5714 			(void)rte_memcpy(req_list[i].mac_addr,
5715 				&filter[num + i].macaddr, ETH_ADDR_LEN);
5716 			req_list[i].vlan_tag =
5717 				rte_cpu_to_le_16(filter[num + i].vlan_id);
5718 
5719 			switch (filter[num + i].filter_type) {
5720 			case RTE_MAC_PERFECT_MATCH:
5721 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5722 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5723 				break;
5724 			case RTE_MACVLAN_PERFECT_MATCH:
5725 				flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
5726 				break;
5727 			case RTE_MAC_HASH_MATCH:
5728 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
5729 					I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5730 				break;
5731 			case RTE_MACVLAN_HASH_MATCH:
5732 				flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
5733 				break;
5734 			default:
5735 				PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
5736 				ret = I40E_ERR_PARAM;
5737 				goto DONE;
5738 			}
5739 			req_list[i].flags = rte_cpu_to_le_16(flags);
5740 		}
5741 
5742 		ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
5743 						actual_num, NULL);
5744 		if (ret != I40E_SUCCESS) {
5745 			PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
5746 			goto DONE;
5747 		}
5748 		num += actual_num;
5749 	} while (num < total);
5750 
5751 DONE:
5752 	rte_free(req_list);
5753 	return ret;
5754 }
5755 
5756 /* Find out specific MAC filter */
5757 static struct i40e_mac_filter *
5758 i40e_find_mac_filter(struct i40e_vsi *vsi,
5759 			 struct ether_addr *macaddr)
5760 {
5761 	struct i40e_mac_filter *f;
5762 
5763 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
5764 		if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
5765 			return f;
5766 	}
5767 
5768 	return NULL;
5769 }
5770 
5771 static bool
5772 i40e_find_vlan_filter(struct i40e_vsi *vsi,
5773 			 uint16_t vlan_id)
5774 {
5775 	uint32_t vid_idx, vid_bit;
5776 
5777 	if (vlan_id > ETH_VLAN_ID_MAX)
5778 		return 0;
5779 
5780 	vid_idx = I40E_VFTA_IDX(vlan_id);
5781 	vid_bit = I40E_VFTA_BIT(vlan_id);
5782 
5783 	if (vsi->vfta[vid_idx] & vid_bit)
5784 		return 1;
5785 	else
5786 		return 0;
5787 }
5788 
5789 static void
5790 i40e_set_vlan_filter(struct i40e_vsi *vsi,
5791 			 uint16_t vlan_id, bool on)
5792 {
5793 	uint32_t vid_idx, vid_bit;
5794 
5795 	if (vlan_id > ETH_VLAN_ID_MAX)
5796 		return;
5797 
5798 	vid_idx = I40E_VFTA_IDX(vlan_id);
5799 	vid_bit = I40E_VFTA_BIT(vlan_id);
5800 
5801 	if (on)
5802 		vsi->vfta[vid_idx] |= vid_bit;
5803 	else
5804 		vsi->vfta[vid_idx] &= ~vid_bit;
5805 }
5806 
5807 /**
5808  * Find all vlan options for specific mac addr,
5809  * return with actual vlan found.
5810  */
5811 static inline int
5812 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
5813 			   struct i40e_macvlan_filter *mv_f,
5814 			   int num, struct ether_addr *addr)
5815 {
5816 	int i;
5817 	uint32_t j, k;
5818 
5819 	/**
5820 	 * Not to use i40e_find_vlan_filter to decrease the loop time,
5821 	 * although the code looks complex.
5822 	  */
5823 	if (num < vsi->vlan_num)
5824 		return I40E_ERR_PARAM;
5825 
5826 	i = 0;
5827 	for (j = 0; j < I40E_VFTA_SIZE; j++) {
5828 		if (vsi->vfta[j]) {
5829 			for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
5830 				if (vsi->vfta[j] & (1 << k)) {
5831 					if (i > num - 1) {
5832 						PMD_DRV_LOG(ERR, "vlan number "
5833 							    "not match");
5834 						return I40E_ERR_PARAM;
5835 					}
5836 					(void)rte_memcpy(&mv_f[i].macaddr,
5837 							addr, ETH_ADDR_LEN);
5838 					mv_f[i].vlan_id =
5839 						j * I40E_UINT32_BIT_SIZE + k;
5840 					i++;
5841 				}
5842 			}
5843 		}
5844 	}
5845 	return I40E_SUCCESS;
5846 }
5847 
5848 static inline int
5849 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
5850 			   struct i40e_macvlan_filter *mv_f,
5851 			   int num,
5852 			   uint16_t vlan)
5853 {
5854 	int i = 0;
5855 	struct i40e_mac_filter *f;
5856 
5857 	if (num < vsi->mac_num)
5858 		return I40E_ERR_PARAM;
5859 
5860 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
5861 		if (i > num - 1) {
5862 			PMD_DRV_LOG(ERR, "buffer number not match");
5863 			return I40E_ERR_PARAM;
5864 		}
5865 		(void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
5866 				ETH_ADDR_LEN);
5867 		mv_f[i].vlan_id = vlan;
5868 		mv_f[i].filter_type = f->mac_info.filter_type;
5869 		i++;
5870 	}
5871 
5872 	return I40E_SUCCESS;
5873 }
5874 
5875 static int
5876 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
5877 {
5878 	int i, num;
5879 	struct i40e_mac_filter *f;
5880 	struct i40e_macvlan_filter *mv_f;
5881 	int ret = I40E_SUCCESS;
5882 
5883 	if (vsi == NULL || vsi->mac_num == 0)
5884 		return I40E_ERR_PARAM;
5885 
5886 	/* Case that no vlan is set */
5887 	if (vsi->vlan_num == 0)
5888 		num = vsi->mac_num;
5889 	else
5890 		num = vsi->mac_num * vsi->vlan_num;
5891 
5892 	mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
5893 	if (mv_f == NULL) {
5894 		PMD_DRV_LOG(ERR, "failed to allocate memory");
5895 		return I40E_ERR_NO_MEMORY;
5896 	}
5897 
5898 	i = 0;
5899 	if (vsi->vlan_num == 0) {
5900 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
5901 			(void)rte_memcpy(&mv_f[i].macaddr,
5902 				&f->mac_info.mac_addr, ETH_ADDR_LEN);
5903 			mv_f[i].vlan_id = 0;
5904 			i++;
5905 		}
5906 	} else {
5907 		TAILQ_FOREACH(f, &vsi->mac_list, next) {
5908 			ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
5909 					vsi->vlan_num, &f->mac_info.mac_addr);
5910 			if (ret != I40E_SUCCESS)
5911 				goto DONE;
5912 			i += vsi->vlan_num;
5913 		}
5914 	}
5915 
5916 	ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
5917 DONE:
5918 	rte_free(mv_f);
5919 
5920 	return ret;
5921 }
5922 
5923 int
5924 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5925 {
5926 	struct i40e_macvlan_filter *mv_f;
5927 	int mac_num;
5928 	int ret = I40E_SUCCESS;
5929 
5930 	if (!vsi || vlan > ETHER_MAX_VLAN_ID)
5931 		return I40E_ERR_PARAM;
5932 
5933 	/* If it's already set, just return */
5934 	if (i40e_find_vlan_filter(vsi,vlan))
5935 		return I40E_SUCCESS;
5936 
5937 	mac_num = vsi->mac_num;
5938 
5939 	if (mac_num == 0) {
5940 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5941 		return I40E_ERR_PARAM;
5942 	}
5943 
5944 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5945 
5946 	if (mv_f == NULL) {
5947 		PMD_DRV_LOG(ERR, "failed to allocate memory");
5948 		return I40E_ERR_NO_MEMORY;
5949 	}
5950 
5951 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
5952 
5953 	if (ret != I40E_SUCCESS)
5954 		goto DONE;
5955 
5956 	ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
5957 
5958 	if (ret != I40E_SUCCESS)
5959 		goto DONE;
5960 
5961 	i40e_set_vlan_filter(vsi, vlan, 1);
5962 
5963 	vsi->vlan_num++;
5964 	ret = I40E_SUCCESS;
5965 DONE:
5966 	rte_free(mv_f);
5967 	return ret;
5968 }
5969 
5970 int
5971 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
5972 {
5973 	struct i40e_macvlan_filter *mv_f;
5974 	int mac_num;
5975 	int ret = I40E_SUCCESS;
5976 
5977 	/**
5978 	 * Vlan 0 is the generic filter for untagged packets
5979 	 * and can't be removed.
5980 	 */
5981 	if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
5982 		return I40E_ERR_PARAM;
5983 
5984 	/* If can't find it, just return */
5985 	if (!i40e_find_vlan_filter(vsi, vlan))
5986 		return I40E_ERR_PARAM;
5987 
5988 	mac_num = vsi->mac_num;
5989 
5990 	if (mac_num == 0) {
5991 		PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
5992 		return I40E_ERR_PARAM;
5993 	}
5994 
5995 	mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
5996 
5997 	if (mv_f == NULL) {
5998 		PMD_DRV_LOG(ERR, "failed to allocate memory");
5999 		return I40E_ERR_NO_MEMORY;
6000 	}
6001 
6002 	ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6003 
6004 	if (ret != I40E_SUCCESS)
6005 		goto DONE;
6006 
6007 	ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6008 
6009 	if (ret != I40E_SUCCESS)
6010 		goto DONE;
6011 
6012 	/* This is last vlan to remove, replace all mac filter with vlan 0 */
6013 	if (vsi->vlan_num == 1) {
6014 		ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6015 		if (ret != I40E_SUCCESS)
6016 			goto DONE;
6017 
6018 		ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6019 		if (ret != I40E_SUCCESS)
6020 			goto DONE;
6021 	}
6022 
6023 	i40e_set_vlan_filter(vsi, vlan, 0);
6024 
6025 	vsi->vlan_num--;
6026 	ret = I40E_SUCCESS;
6027 DONE:
6028 	rte_free(mv_f);
6029 	return ret;
6030 }
6031 
6032 int
6033 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
6034 {
6035 	struct i40e_mac_filter *f;
6036 	struct i40e_macvlan_filter *mv_f;
6037 	int i, vlan_num = 0;
6038 	int ret = I40E_SUCCESS;
6039 
6040 	/* If it's add and we've config it, return */
6041 	f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
6042 	if (f != NULL)
6043 		return I40E_SUCCESS;
6044 	if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
6045 		(mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
6046 
6047 		/**
6048 		 * If vlan_num is 0, that's the first time to add mac,
6049 		 * set mask for vlan_id 0.
6050 		 */
6051 		if (vsi->vlan_num == 0) {
6052 			i40e_set_vlan_filter(vsi, 0, 1);
6053 			vsi->vlan_num = 1;
6054 		}
6055 		vlan_num = vsi->vlan_num;
6056 	} else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
6057 			(mac_filter->filter_type == RTE_MAC_HASH_MATCH))
6058 		vlan_num = 1;
6059 
6060 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6061 	if (mv_f == NULL) {
6062 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6063 		return I40E_ERR_NO_MEMORY;
6064 	}
6065 
6066 	for (i = 0; i < vlan_num; i++) {
6067 		mv_f[i].filter_type = mac_filter->filter_type;
6068 		(void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
6069 				ETH_ADDR_LEN);
6070 	}
6071 
6072 	if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6073 		mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
6074 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
6075 					&mac_filter->mac_addr);
6076 		if (ret != I40E_SUCCESS)
6077 			goto DONE;
6078 	}
6079 
6080 	ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
6081 	if (ret != I40E_SUCCESS)
6082 		goto DONE;
6083 
6084 	/* Add the mac addr into mac list */
6085 	f = rte_zmalloc("macv_filter", sizeof(*f), 0);
6086 	if (f == NULL) {
6087 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6088 		ret = I40E_ERR_NO_MEMORY;
6089 		goto DONE;
6090 	}
6091 	(void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
6092 			ETH_ADDR_LEN);
6093 	f->mac_info.filter_type = mac_filter->filter_type;
6094 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
6095 	vsi->mac_num++;
6096 
6097 	ret = I40E_SUCCESS;
6098 DONE:
6099 	rte_free(mv_f);
6100 
6101 	return ret;
6102 }
6103 
6104 int
6105 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
6106 {
6107 	struct i40e_mac_filter *f;
6108 	struct i40e_macvlan_filter *mv_f;
6109 	int i, vlan_num;
6110 	enum rte_mac_filter_type filter_type;
6111 	int ret = I40E_SUCCESS;
6112 
6113 	/* Can't find it, return an error */
6114 	f = i40e_find_mac_filter(vsi, addr);
6115 	if (f == NULL)
6116 		return I40E_ERR_PARAM;
6117 
6118 	vlan_num = vsi->vlan_num;
6119 	filter_type = f->mac_info.filter_type;
6120 	if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6121 		filter_type == RTE_MACVLAN_HASH_MATCH) {
6122 		if (vlan_num == 0) {
6123 			PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
6124 			return I40E_ERR_PARAM;
6125 		}
6126 	} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
6127 			filter_type == RTE_MAC_HASH_MATCH)
6128 		vlan_num = 1;
6129 
6130 	mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
6131 	if (mv_f == NULL) {
6132 		PMD_DRV_LOG(ERR, "failed to allocate memory");
6133 		return I40E_ERR_NO_MEMORY;
6134 	}
6135 
6136 	for (i = 0; i < vlan_num; i++) {
6137 		mv_f[i].filter_type = filter_type;
6138 		(void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6139 				ETH_ADDR_LEN);
6140 	}
6141 	if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
6142 			filter_type == RTE_MACVLAN_HASH_MATCH) {
6143 		ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
6144 		if (ret != I40E_SUCCESS)
6145 			goto DONE;
6146 	}
6147 
6148 	ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
6149 	if (ret != I40E_SUCCESS)
6150 		goto DONE;
6151 
6152 	/* Remove the mac addr into mac list */
6153 	TAILQ_REMOVE(&vsi->mac_list, f, next);
6154 	rte_free(f);
6155 	vsi->mac_num--;
6156 
6157 	ret = I40E_SUCCESS;
6158 DONE:
6159 	rte_free(mv_f);
6160 	return ret;
6161 }
6162 
6163 /* Configure hash enable flags for RSS */
6164 uint64_t
6165 i40e_config_hena(uint64_t flags)
6166 {
6167 	uint64_t hena = 0;
6168 
6169 	if (!flags)
6170 		return hena;
6171 
6172 	if (flags & ETH_RSS_FRAG_IPV4)
6173 		hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
6174 	if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
6175 #ifdef X722_SUPPORT
6176 		hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
6177 			(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
6178 #else
6179 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
6180 #endif
6181 	if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
6182 #ifdef X722_SUPPORT
6183 		hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
6184 			(1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
6185 			(1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
6186 #else
6187 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
6188 #endif
6189 	if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
6190 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
6191 	if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
6192 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
6193 	if (flags & ETH_RSS_FRAG_IPV6)
6194 		hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
6195 	if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
6196 #ifdef X722_SUPPORT
6197 		hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
6198 			(1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
6199 #else
6200 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
6201 #endif
6202 	if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
6203 #ifdef X722_SUPPORT
6204 		hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
6205 			(1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
6206 			(1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
6207 #else
6208 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
6209 #endif
6210 	if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
6211 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
6212 	if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
6213 		hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
6214 	if (flags & ETH_RSS_L2_PAYLOAD)
6215 		hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
6216 
6217 	return hena;
6218 }
6219 
6220 /* Parse the hash enable flags */
6221 uint64_t
6222 i40e_parse_hena(uint64_t flags)
6223 {
6224 	uint64_t rss_hf = 0;
6225 
6226 	if (!flags)
6227 		return rss_hf;
6228 	if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
6229 		rss_hf |= ETH_RSS_FRAG_IPV4;
6230 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
6231 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6232 #ifdef X722_SUPPORT
6233 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
6234 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6235 #endif
6236 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
6237 		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6238 #ifdef X722_SUPPORT
6239 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
6240 		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6241 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
6242 		rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
6243 #endif
6244 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
6245 		rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
6246 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
6247 		rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
6248 	if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
6249 		rss_hf |= ETH_RSS_FRAG_IPV6;
6250 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
6251 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6252 #ifdef X722_SUPPORT
6253 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
6254 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6255 #endif
6256 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
6257 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6258 #ifdef X722_SUPPORT
6259 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
6260 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6261 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
6262 		rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
6263 #endif
6264 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
6265 		rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
6266 	if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
6267 		rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
6268 	if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
6269 		rss_hf |= ETH_RSS_L2_PAYLOAD;
6270 
6271 	return rss_hf;
6272 }
6273 
6274 /* Disable RSS */
6275 static void
6276 i40e_pf_disable_rss(struct i40e_pf *pf)
6277 {
6278 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6279 	uint64_t hena;
6280 
6281 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6282 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6283 	hena &= ~I40E_RSS_HENA_ALL;
6284 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6285 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6286 	I40E_WRITE_FLUSH(hw);
6287 }
6288 
6289 static int
6290 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
6291 {
6292 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6293 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6294 	int ret = 0;
6295 
6296 	if (!key || key_len == 0) {
6297 		PMD_DRV_LOG(DEBUG, "No key to be configured");
6298 		return 0;
6299 	} else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6300 		sizeof(uint32_t)) {
6301 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
6302 		return -EINVAL;
6303 	}
6304 
6305 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6306 		struct i40e_aqc_get_set_rss_key_data *key_dw =
6307 			(struct i40e_aqc_get_set_rss_key_data *)key;
6308 
6309 		ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
6310 		if (ret)
6311 			PMD_INIT_LOG(ERR, "Failed to configure RSS key "
6312 				     "via AQ");
6313 	} else {
6314 		uint32_t *hash_key = (uint32_t *)key;
6315 		uint16_t i;
6316 
6317 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6318 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]);
6319 		I40E_WRITE_FLUSH(hw);
6320 	}
6321 
6322 	return ret;
6323 }
6324 
6325 static int
6326 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
6327 {
6328 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
6329 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6330 	int ret;
6331 
6332 	if (!key || !key_len)
6333 		return -EINVAL;
6334 
6335 	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
6336 		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
6337 			(struct i40e_aqc_get_set_rss_key_data *)key);
6338 		if (ret) {
6339 			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
6340 			return ret;
6341 		}
6342 	} else {
6343 		uint32_t *key_dw = (uint32_t *)key;
6344 		uint16_t i;
6345 
6346 		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6347 			key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
6348 	}
6349 	*key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
6350 
6351 	return 0;
6352 }
6353 
6354 static int
6355 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
6356 {
6357 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6358 	uint64_t rss_hf;
6359 	uint64_t hena;
6360 	int ret;
6361 
6362 	ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
6363 			       rss_conf->rss_key_len);
6364 	if (ret)
6365 		return ret;
6366 
6367 	rss_hf = rss_conf->rss_hf;
6368 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6369 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6370 	hena &= ~I40E_RSS_HENA_ALL;
6371 	hena |= i40e_config_hena(rss_hf);
6372 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
6373 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
6374 	I40E_WRITE_FLUSH(hw);
6375 
6376 	return 0;
6377 }
6378 
6379 static int
6380 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
6381 			 struct rte_eth_rss_conf *rss_conf)
6382 {
6383 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6384 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6385 	uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
6386 	uint64_t hena;
6387 
6388 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6389 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6390 	if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
6391 		if (rss_hf != 0) /* Enable RSS */
6392 			return -EINVAL;
6393 		return 0; /* Nothing to do */
6394 	}
6395 	/* RSS enabled */
6396 	if (rss_hf == 0) /* Disable RSS */
6397 		return -EINVAL;
6398 
6399 	return i40e_hw_rss_hash_set(pf, rss_conf);
6400 }
6401 
6402 static int
6403 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
6404 			   struct rte_eth_rss_conf *rss_conf)
6405 {
6406 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6407 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6408 	uint64_t hena;
6409 
6410 	i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
6411 			 &rss_conf->rss_key_len);
6412 
6413 	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
6414 	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
6415 	rss_conf->rss_hf = i40e_parse_hena(hena);
6416 
6417 	return 0;
6418 }
6419 
6420 static int
6421 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
6422 {
6423 	switch (filter_type) {
6424 	case RTE_TUNNEL_FILTER_IMAC_IVLAN:
6425 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
6426 		break;
6427 	case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
6428 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
6429 		break;
6430 	case RTE_TUNNEL_FILTER_IMAC_TENID:
6431 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
6432 		break;
6433 	case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
6434 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
6435 		break;
6436 	case ETH_TUNNEL_FILTER_IMAC:
6437 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
6438 		break;
6439 	case ETH_TUNNEL_FILTER_OIP:
6440 		*flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
6441 		break;
6442 	case ETH_TUNNEL_FILTER_IIP:
6443 		*flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
6444 		break;
6445 	default:
6446 		PMD_DRV_LOG(ERR, "invalid tunnel filter type");
6447 		return -EINVAL;
6448 	}
6449 
6450 	return 0;
6451 }
6452 
6453 static int
6454 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
6455 			struct rte_eth_tunnel_filter_conf *tunnel_filter,
6456 			uint8_t add)
6457 {
6458 	uint16_t ip_type;
6459 	uint32_t ipv4_addr;
6460 	uint8_t i, tun_type = 0;
6461 	/* internal varialbe to convert ipv6 byte order */
6462 	uint32_t convert_ipv6[4];
6463 	int val, ret = 0;
6464 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6465 	struct i40e_vsi *vsi = pf->main_vsi;
6466 	struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
6467 	struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
6468 
6469 	cld_filter = rte_zmalloc("tunnel_filter",
6470 		sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
6471 		0);
6472 
6473 	if (NULL == cld_filter) {
6474 		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
6475 		return -EINVAL;
6476 	}
6477 	pfilter = cld_filter;
6478 
6479 	ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
6480 	ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
6481 
6482 	pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
6483 	if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
6484 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
6485 		ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
6486 		rte_memcpy(&pfilter->ipaddr.v4.data,
6487 				&rte_cpu_to_le_32(ipv4_addr),
6488 				sizeof(pfilter->ipaddr.v4.data));
6489 	} else {
6490 		ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
6491 		for (i = 0; i < 4; i++) {
6492 			convert_ipv6[i] =
6493 			rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
6494 		}
6495 		rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
6496 				sizeof(pfilter->ipaddr.v6.data));
6497 	}
6498 
6499 	/* check tunneled type */
6500 	switch (tunnel_filter->tunnel_type) {
6501 	case RTE_TUNNEL_TYPE_VXLAN:
6502 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
6503 		break;
6504 	case RTE_TUNNEL_TYPE_NVGRE:
6505 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
6506 		break;
6507 	case RTE_TUNNEL_TYPE_IP_IN_GRE:
6508 		tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
6509 		break;
6510 	default:
6511 		/* Other tunnel types is not supported. */
6512 		PMD_DRV_LOG(ERR, "tunnel type is not supported.");
6513 		rte_free(cld_filter);
6514 		return -EINVAL;
6515 	}
6516 
6517 	val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
6518 						&pfilter->flags);
6519 	if (val < 0) {
6520 		rte_free(cld_filter);
6521 		return -EINVAL;
6522 	}
6523 
6524 	pfilter->flags |= rte_cpu_to_le_16(
6525 		I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
6526 		ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
6527 	pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
6528 	pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
6529 
6530 	if (add)
6531 		ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
6532 	else
6533 		ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
6534 						cld_filter, 1);
6535 
6536 	rte_free(cld_filter);
6537 	return ret;
6538 }
6539 
6540 static int
6541 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
6542 {
6543 	uint8_t i;
6544 
6545 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6546 		if (pf->vxlan_ports[i] == port)
6547 			return i;
6548 	}
6549 
6550 	return -1;
6551 }
6552 
6553 static int
6554 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
6555 {
6556 	int  idx, ret;
6557 	uint8_t filter_idx;
6558 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6559 
6560 	idx = i40e_get_vxlan_port_idx(pf, port);
6561 
6562 	/* Check if port already exists */
6563 	if (idx >= 0) {
6564 		PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
6565 		return -EINVAL;
6566 	}
6567 
6568 	/* Now check if there is space to add the new port */
6569 	idx = i40e_get_vxlan_port_idx(pf, 0);
6570 	if (idx < 0) {
6571 		PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
6572 			"not adding port %d", port);
6573 		return -ENOSPC;
6574 	}
6575 
6576 	ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
6577 					&filter_idx, NULL);
6578 	if (ret < 0) {
6579 		PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
6580 		return -1;
6581 	}
6582 
6583 	PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
6584 			 port,  filter_idx);
6585 
6586 	/* New port: add it and mark its index in the bitmap */
6587 	pf->vxlan_ports[idx] = port;
6588 	pf->vxlan_bitmap |= (1 << idx);
6589 
6590 	if (!(pf->flags & I40E_FLAG_VXLAN))
6591 		pf->flags |= I40E_FLAG_VXLAN;
6592 
6593 	return 0;
6594 }
6595 
6596 static int
6597 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
6598 {
6599 	int idx;
6600 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6601 
6602 	if (!(pf->flags & I40E_FLAG_VXLAN)) {
6603 		PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
6604 		return -EINVAL;
6605 	}
6606 
6607 	idx = i40e_get_vxlan_port_idx(pf, port);
6608 
6609 	if (idx < 0) {
6610 		PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
6611 		return -EINVAL;
6612 	}
6613 
6614 	if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
6615 		PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
6616 		return -1;
6617 	}
6618 
6619 	PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
6620 			port, idx);
6621 
6622 	pf->vxlan_ports[idx] = 0;
6623 	pf->vxlan_bitmap &= ~(1 << idx);
6624 
6625 	if (!pf->vxlan_bitmap)
6626 		pf->flags &= ~I40E_FLAG_VXLAN;
6627 
6628 	return 0;
6629 }
6630 
6631 /* Add UDP tunneling port */
6632 static int
6633 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
6634 			     struct rte_eth_udp_tunnel *udp_tunnel)
6635 {
6636 	int ret = 0;
6637 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6638 
6639 	if (udp_tunnel == NULL)
6640 		return -EINVAL;
6641 
6642 	switch (udp_tunnel->prot_type) {
6643 	case RTE_TUNNEL_TYPE_VXLAN:
6644 		ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
6645 		break;
6646 
6647 	case RTE_TUNNEL_TYPE_GENEVE:
6648 	case RTE_TUNNEL_TYPE_TEREDO:
6649 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6650 		ret = -1;
6651 		break;
6652 
6653 	default:
6654 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
6655 		ret = -1;
6656 		break;
6657 	}
6658 
6659 	return ret;
6660 }
6661 
6662 /* Remove UDP tunneling port */
6663 static int
6664 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
6665 			     struct rte_eth_udp_tunnel *udp_tunnel)
6666 {
6667 	int ret = 0;
6668 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6669 
6670 	if (udp_tunnel == NULL)
6671 		return -EINVAL;
6672 
6673 	switch (udp_tunnel->prot_type) {
6674 	case RTE_TUNNEL_TYPE_VXLAN:
6675 		ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
6676 		break;
6677 	case RTE_TUNNEL_TYPE_GENEVE:
6678 	case RTE_TUNNEL_TYPE_TEREDO:
6679 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
6680 		ret = -1;
6681 		break;
6682 	default:
6683 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
6684 		ret = -1;
6685 		break;
6686 	}
6687 
6688 	return ret;
6689 }
6690 
6691 /* Calculate the maximum number of contiguous PF queues that are configured */
6692 static int
6693 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
6694 {
6695 	struct rte_eth_dev_data *data = pf->dev_data;
6696 	int i, num;
6697 	struct i40e_rx_queue *rxq;
6698 
6699 	num = 0;
6700 	for (i = 0; i < pf->lan_nb_qps; i++) {
6701 		rxq = data->rx_queues[i];
6702 		if (rxq && rxq->q_set)
6703 			num++;
6704 		else
6705 			break;
6706 	}
6707 
6708 	return num;
6709 }
6710 
6711 /* Configure RSS */
6712 static int
6713 i40e_pf_config_rss(struct i40e_pf *pf)
6714 {
6715 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6716 	struct rte_eth_rss_conf rss_conf;
6717 	uint32_t i, lut = 0;
6718 	uint16_t j, num;
6719 
6720 	/*
6721 	 * If both VMDQ and RSS enabled, not all of PF queues are configured.
6722 	 * It's necessary to calulate the actual PF queues that are configured.
6723 	 */
6724 	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
6725 		num = i40e_pf_calc_configured_queues_num(pf);
6726 	else
6727 		num = pf->dev_data->nb_rx_queues;
6728 
6729 	num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
6730 	PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
6731 			num);
6732 
6733 	if (num == 0) {
6734 		PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
6735 		return -ENOTSUP;
6736 	}
6737 
6738 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
6739 		if (j == num)
6740 			j = 0;
6741 		lut = (lut << 8) | (j & ((0x1 <<
6742 			hw->func_caps.rss_table_entry_width) - 1));
6743 		if ((i & 3) == 3)
6744 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
6745 	}
6746 
6747 	rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
6748 	if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
6749 		i40e_pf_disable_rss(pf);
6750 		return 0;
6751 	}
6752 	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
6753 		(I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
6754 		/* Random default keys */
6755 		static uint32_t rss_key_default[] = {0x6b793944,
6756 			0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
6757 			0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
6758 			0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
6759 
6760 		rss_conf.rss_key = (uint8_t *)rss_key_default;
6761 		rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
6762 							sizeof(uint32_t);
6763 	}
6764 
6765 	return i40e_hw_rss_hash_set(pf, &rss_conf);
6766 }
6767 
6768 static int
6769 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
6770 			       struct rte_eth_tunnel_filter_conf *filter)
6771 {
6772 	if (pf == NULL || filter == NULL) {
6773 		PMD_DRV_LOG(ERR, "Invalid parameter");
6774 		return -EINVAL;
6775 	}
6776 
6777 	if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
6778 		PMD_DRV_LOG(ERR, "Invalid queue ID");
6779 		return -EINVAL;
6780 	}
6781 
6782 	if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
6783 		PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
6784 		return -EINVAL;
6785 	}
6786 
6787 	if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
6788 		(is_zero_ether_addr(&filter->outer_mac))) {
6789 		PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
6790 		return -EINVAL;
6791 	}
6792 
6793 	if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
6794 		(is_zero_ether_addr(&filter->inner_mac))) {
6795 		PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
6796 		return -EINVAL;
6797 	}
6798 
6799 	return 0;
6800 }
6801 
6802 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
6803 #define I40E_GL_PRS_FVBM(_i)     (0x00269760 + ((_i) * 4))
6804 static int
6805 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
6806 {
6807 	uint32_t val, reg;
6808 	int ret = -EINVAL;
6809 
6810 	val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
6811 	PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
6812 
6813 	if (len == 3) {
6814 		reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
6815 	} else if (len == 4) {
6816 		reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
6817 	} else {
6818 		PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
6819 		return ret;
6820 	}
6821 
6822 	if (reg != val) {
6823 		ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
6824 						   reg, NULL);
6825 		if (ret != 0)
6826 			return ret;
6827 	} else {
6828 		ret = 0;
6829 	}
6830 	PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
6831 		    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
6832 
6833 	return ret;
6834 }
6835 
6836 static int
6837 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
6838 {
6839 	int ret = -EINVAL;
6840 
6841 	if (!hw || !cfg)
6842 		return -EINVAL;
6843 
6844 	switch (cfg->cfg_type) {
6845 	case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
6846 		ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
6847 		break;
6848 	default:
6849 		PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
6850 		break;
6851 	}
6852 
6853 	return ret;
6854 }
6855 
6856 static int
6857 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
6858 			       enum rte_filter_op filter_op,
6859 			       void *arg)
6860 {
6861 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6862 	int ret = I40E_ERR_PARAM;
6863 
6864 	switch (filter_op) {
6865 	case RTE_ETH_FILTER_SET:
6866 		ret = i40e_dev_global_config_set(hw,
6867 			(struct rte_eth_global_cfg *)arg);
6868 		break;
6869 	default:
6870 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6871 		break;
6872 	}
6873 
6874 	return ret;
6875 }
6876 
6877 static int
6878 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
6879 			  enum rte_filter_op filter_op,
6880 			  void *arg)
6881 {
6882 	struct rte_eth_tunnel_filter_conf *filter;
6883 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6884 	int ret = I40E_SUCCESS;
6885 
6886 	filter = (struct rte_eth_tunnel_filter_conf *)(arg);
6887 
6888 	if (i40e_tunnel_filter_param_check(pf, filter) < 0)
6889 		return I40E_ERR_PARAM;
6890 
6891 	switch (filter_op) {
6892 	case RTE_ETH_FILTER_NOP:
6893 		if (!(pf->flags & I40E_FLAG_VXLAN))
6894 			ret = I40E_NOT_SUPPORTED;
6895 		break;
6896 	case RTE_ETH_FILTER_ADD:
6897 		ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
6898 		break;
6899 	case RTE_ETH_FILTER_DELETE:
6900 		ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
6901 		break;
6902 	default:
6903 		PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
6904 		ret = I40E_ERR_PARAM;
6905 		break;
6906 	}
6907 
6908 	return ret;
6909 }
6910 
6911 static int
6912 i40e_pf_config_mq_rx(struct i40e_pf *pf)
6913 {
6914 	int ret = 0;
6915 	enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
6916 
6917 	/* RSS setup */
6918 	if (mq_mode & ETH_MQ_RX_RSS_FLAG)
6919 		ret = i40e_pf_config_rss(pf);
6920 	else
6921 		i40e_pf_disable_rss(pf);
6922 
6923 	return ret;
6924 }
6925 
6926 /* Get the symmetric hash enable configurations per port */
6927 static void
6928 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
6929 {
6930 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
6931 
6932 	*enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
6933 }
6934 
6935 /* Set the symmetric hash enable configurations per port */
6936 static void
6937 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
6938 {
6939 	uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
6940 
6941 	if (enable > 0) {
6942 		if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
6943 			PMD_DRV_LOG(INFO, "Symmetric hash has already "
6944 							"been enabled");
6945 			return;
6946 		}
6947 		reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6948 	} else {
6949 		if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
6950 			PMD_DRV_LOG(INFO, "Symmetric hash has already "
6951 							"been disabled");
6952 			return;
6953 		}
6954 		reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
6955 	}
6956 	i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
6957 	I40E_WRITE_FLUSH(hw);
6958 }
6959 
6960 /*
6961  * Get global configurations of hash function type and symmetric hash enable
6962  * per flow type (pctype). Note that global configuration means it affects all
6963  * the ports on the same NIC.
6964  */
6965 static int
6966 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
6967 				   struct rte_eth_hash_global_conf *g_cfg)
6968 {
6969 	uint32_t reg, mask = I40E_FLOW_TYPES;
6970 	uint16_t i;
6971 	enum i40e_filter_pctype pctype;
6972 
6973 	memset(g_cfg, 0, sizeof(*g_cfg));
6974 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
6975 	if (reg & I40E_GLQF_CTL_HTOEP_MASK)
6976 		g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
6977 	else
6978 		g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
6979 	PMD_DRV_LOG(DEBUG, "Hash function is %s",
6980 		(reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
6981 
6982 	for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
6983 		if (!(mask & (1UL << i)))
6984 			continue;
6985 		mask &= ~(1UL << i);
6986 		/* Bit set indicats the coresponding flow type is supported */
6987 		g_cfg->valid_bit_mask[0] |= (1UL << i);
6988 		/* if flowtype is invalid, continue */
6989 		if (!I40E_VALID_FLOW(i))
6990 			continue;
6991 		pctype = i40e_flowtype_to_pctype(i);
6992 		reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype));
6993 		if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
6994 			g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
6995 	}
6996 
6997 	return 0;
6998 }
6999 
7000 static int
7001 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
7002 {
7003 	uint32_t i;
7004 	uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
7005 
7006 	if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
7007 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
7008 		g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
7009 		PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
7010 						g_cfg->hash_func);
7011 		return -EINVAL;
7012 	}
7013 
7014 	/*
7015 	 * As i40e supports less than 32 flow types, only first 32 bits need to
7016 	 * be checked.
7017 	 */
7018 	mask0 = g_cfg->valid_bit_mask[0];
7019 	for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
7020 		if (i == 0) {
7021 			/* Check if any unsupported flow type configured */
7022 			if ((mask0 | i40e_mask) ^ i40e_mask)
7023 				goto mask_err;
7024 		} else {
7025 			if (g_cfg->valid_bit_mask[i])
7026 				goto mask_err;
7027 		}
7028 	}
7029 
7030 	return 0;
7031 
7032 mask_err:
7033 	PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
7034 
7035 	return -EINVAL;
7036 }
7037 
7038 /*
7039  * Set global configurations of hash function type and symmetric hash enable
7040  * per flow type (pctype). Note any modifying global configuration will affect
7041  * all the ports on the same NIC.
7042  */
7043 static int
7044 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
7045 				   struct rte_eth_hash_global_conf *g_cfg)
7046 {
7047 	int ret;
7048 	uint16_t i;
7049 	uint32_t reg;
7050 	uint32_t mask0 = g_cfg->valid_bit_mask[0];
7051 	enum i40e_filter_pctype pctype;
7052 
7053 	/* Check the input parameters */
7054 	ret = i40e_hash_global_config_check(g_cfg);
7055 	if (ret < 0)
7056 		return ret;
7057 
7058 	for (i = 0; mask0 && i < UINT32_BIT; i++) {
7059 		if (!(mask0 & (1UL << i)))
7060 			continue;
7061 		mask0 &= ~(1UL << i);
7062 		/* if flowtype is invalid, continue */
7063 		if (!I40E_VALID_FLOW(i))
7064 			continue;
7065 		pctype = i40e_flowtype_to_pctype(i);
7066 		reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
7067 				I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
7068 		i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
7069 	}
7070 
7071 	reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
7072 	if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
7073 		/* Toeplitz */
7074 		if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
7075 			PMD_DRV_LOG(DEBUG, "Hash function already set to "
7076 								"Toeplitz");
7077 			goto out;
7078 		}
7079 		reg |= I40E_GLQF_CTL_HTOEP_MASK;
7080 	} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
7081 		/* Simple XOR */
7082 		if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
7083 			PMD_DRV_LOG(DEBUG, "Hash function already set to "
7084 							"Simple XOR");
7085 			goto out;
7086 		}
7087 		reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
7088 	} else
7089 		/* Use the default, and keep it as it is */
7090 		goto out;
7091 
7092 	i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
7093 
7094 out:
7095 	I40E_WRITE_FLUSH(hw);
7096 
7097 	return 0;
7098 }
7099 
7100 /**
7101  * Valid input sets for hash and flow director filters per PCTYPE
7102  */
7103 static uint64_t
7104 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
7105 		enum rte_filter_type filter)
7106 {
7107 	uint64_t valid;
7108 
7109 	static const uint64_t valid_hash_inset_table[] = {
7110 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
7111 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7112 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7113 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
7114 			I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
7115 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7116 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7117 			I40E_INSET_FLEX_PAYLOAD,
7118 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7119 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7120 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7121 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7122 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7123 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7124 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7125 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7126 			I40E_INSET_FLEX_PAYLOAD,
7127 #ifdef X722_SUPPORT
7128 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7129 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7130 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7131 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7132 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7133 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7134 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7135 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7136 			I40E_INSET_FLEX_PAYLOAD,
7137 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7138 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7139 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7140 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7141 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7142 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7143 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7144 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7145 			I40E_INSET_FLEX_PAYLOAD,
7146 #endif
7147 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7148 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7149 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7150 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7151 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7152 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7153 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7154 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7155 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7156 #ifdef X722_SUPPORT
7157 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7158 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7159 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7160 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7161 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7162 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7163 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7164 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7165 			I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
7166 #endif
7167 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7168 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7169 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7170 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7171 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7172 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7173 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7174 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7175 			I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
7176 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7177 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7178 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7179 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
7180 			I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
7181 			I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
7182 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7183 			I40E_INSET_FLEX_PAYLOAD,
7184 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
7185 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7186 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7187 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7188 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7189 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
7190 			I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
7191 			I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
7192 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7193 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7194 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7195 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7196 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7197 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7198 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7199 			I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
7200 #ifdef X722_SUPPORT
7201 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7202 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7203 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7204 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7205 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7206 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7207 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7208 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7209 			I40E_INSET_FLEX_PAYLOAD,
7210 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7211 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7212 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7213 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7214 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7215 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7216 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7217 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7218 			I40E_INSET_FLEX_PAYLOAD,
7219 #endif
7220 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7221 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7222 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7223 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7224 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7225 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7226 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7227 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7228 			I40E_INSET_FLEX_PAYLOAD,
7229 #ifdef X722_SUPPORT
7230 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7231 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7232 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7233 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7234 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7235 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7236 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7237 			I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
7238 			I40E_INSET_FLEX_PAYLOAD,
7239 #endif
7240 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7241 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7242 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7243 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7244 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7245 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7246 			I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
7247 			I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
7248 			I40E_INSET_FLEX_PAYLOAD,
7249 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7250 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7251 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7252 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
7253 			I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
7254 			I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
7255 			I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
7256 			I40E_INSET_FLEX_PAYLOAD,
7257 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7258 			I40E_INSET_DMAC | I40E_INSET_SMAC |
7259 			I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7260 			I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
7261 			I40E_INSET_FLEX_PAYLOAD,
7262 	};
7263 
7264 	/**
7265 	 * Flow director supports only fields defined in
7266 	 * union rte_eth_fdir_flow.
7267 	 */
7268 	static const uint64_t valid_fdir_inset_table[] = {
7269 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
7270 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7271 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7272 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
7273 		I40E_INSET_IPV4_TTL,
7274 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7275 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7276 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7277 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7278 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7279 #ifdef X722_SUPPORT
7280 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7281 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7282 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7283 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7284 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7285 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7286 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7287 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7288 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7289 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7290 #endif
7291 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7292 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7293 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7294 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7295 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7296 #ifdef X722_SUPPORT
7297 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7298 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7299 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7300 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7301 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7302 #endif
7303 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7304 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7305 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7306 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
7307 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7308 		I40E_INSET_SCTP_VT,
7309 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7310 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7311 		I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7312 		I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
7313 		I40E_INSET_IPV4_TTL,
7314 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
7315 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7316 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7317 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
7318 		I40E_INSET_IPV6_HOP_LIMIT,
7319 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7320 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7321 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7322 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7323 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7324 #ifdef X722_SUPPORT
7325 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7326 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7327 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7328 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7329 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7330 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7331 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7332 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7333 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7334 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7335 #endif
7336 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7337 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7338 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7339 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7340 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7341 #ifdef X722_SUPPORT
7342 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7343 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7344 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7345 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7346 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7347 #endif
7348 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7349 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7350 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7351 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
7352 		I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7353 		I40E_INSET_SCTP_VT,
7354 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7355 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7356 		I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7357 		I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
7358 		I40E_INSET_IPV6_HOP_LIMIT,
7359 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7360 		I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
7361 		I40E_INSET_LAST_ETHER_TYPE,
7362 	};
7363 
7364 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
7365 		return 0;
7366 	if (filter == RTE_ETH_FILTER_HASH)
7367 		valid = valid_hash_inset_table[pctype];
7368 	else
7369 		valid = valid_fdir_inset_table[pctype];
7370 
7371 	return valid;
7372 }
7373 
7374 /**
7375  * Validate if the input set is allowed for a specific PCTYPE
7376  */
7377 static int
7378 i40e_validate_input_set(enum i40e_filter_pctype pctype,
7379 		enum rte_filter_type filter, uint64_t inset)
7380 {
7381 	uint64_t valid;
7382 
7383 	valid = i40e_get_valid_input_set(pctype, filter);
7384 	if (inset & (~valid))
7385 		return -EINVAL;
7386 
7387 	return 0;
7388 }
7389 
7390 /* default input set fields combination per pctype */
7391 static uint64_t
7392 i40e_get_default_input_set(uint16_t pctype)
7393 {
7394 	static const uint64_t default_inset_table[] = {
7395 		[I40E_FILTER_PCTYPE_FRAG_IPV4] =
7396 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
7397 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
7398 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7399 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7400 #ifdef X722_SUPPORT
7401 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
7402 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7403 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7404 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
7405 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7406 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7407 #endif
7408 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
7409 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7410 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7411 #ifdef X722_SUPPORT
7412 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
7413 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7414 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7415 #endif
7416 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
7417 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
7418 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7419 			I40E_INSET_SCTP_VT,
7420 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
7421 			I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
7422 		[I40E_FILTER_PCTYPE_FRAG_IPV6] =
7423 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
7424 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
7425 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7426 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7427 #ifdef X722_SUPPORT
7428 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
7429 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7430 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7431 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
7432 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7433 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7434 #endif
7435 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
7436 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7437 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7438 #ifdef X722_SUPPORT
7439 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
7440 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7441 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
7442 #endif
7443 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
7444 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
7445 			I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
7446 			I40E_INSET_SCTP_VT,
7447 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
7448 			I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
7449 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] =
7450 			I40E_INSET_LAST_ETHER_TYPE,
7451 	};
7452 
7453 	if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
7454 		return 0;
7455 
7456 	return default_inset_table[pctype];
7457 }
7458 
7459 /**
7460  * Parse the input set from index to logical bit masks
7461  */
7462 static int
7463 i40e_parse_input_set(uint64_t *inset,
7464 		     enum i40e_filter_pctype pctype,
7465 		     enum rte_eth_input_set_field *field,
7466 		     uint16_t size)
7467 {
7468 	uint16_t i, j;
7469 	int ret = -EINVAL;
7470 
7471 	static const struct {
7472 		enum rte_eth_input_set_field field;
7473 		uint64_t inset;
7474 	} inset_convert_table[] = {
7475 		{RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
7476 		{RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
7477 		{RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
7478 		{RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
7479 		{RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
7480 		{RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
7481 		{RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
7482 		{RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
7483 		{RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
7484 		{RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
7485 		{RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
7486 		{RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
7487 		{RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
7488 		{RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
7489 		{RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
7490 			I40E_INSET_IPV6_NEXT_HDR},
7491 		{RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
7492 			I40E_INSET_IPV6_HOP_LIMIT},
7493 		{RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
7494 		{RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
7495 		{RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
7496 		{RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
7497 		{RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
7498 		{RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
7499 		{RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
7500 			I40E_INSET_SCTP_VT},
7501 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
7502 			I40E_INSET_TUNNEL_DMAC},
7503 		{RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
7504 			I40E_INSET_VLAN_TUNNEL},
7505 		{RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
7506 			I40E_INSET_TUNNEL_ID},
7507 		{RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
7508 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
7509 			I40E_INSET_FLEX_PAYLOAD_W1},
7510 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
7511 			I40E_INSET_FLEX_PAYLOAD_W2},
7512 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
7513 			I40E_INSET_FLEX_PAYLOAD_W3},
7514 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
7515 			I40E_INSET_FLEX_PAYLOAD_W4},
7516 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
7517 			I40E_INSET_FLEX_PAYLOAD_W5},
7518 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
7519 			I40E_INSET_FLEX_PAYLOAD_W6},
7520 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
7521 			I40E_INSET_FLEX_PAYLOAD_W7},
7522 		{RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
7523 			I40E_INSET_FLEX_PAYLOAD_W8},
7524 	};
7525 
7526 	if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
7527 		return ret;
7528 
7529 	/* Only one item allowed for default or all */
7530 	if (size == 1) {
7531 		if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
7532 			*inset = i40e_get_default_input_set(pctype);
7533 			return 0;
7534 		} else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
7535 			*inset = I40E_INSET_NONE;
7536 			return 0;
7537 		}
7538 	}
7539 
7540 	for (i = 0, *inset = 0; i < size; i++) {
7541 		for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
7542 			if (field[i] == inset_convert_table[j].field) {
7543 				*inset |= inset_convert_table[j].inset;
7544 				break;
7545 			}
7546 		}
7547 
7548 		/* It contains unsupported input set, return immediately */
7549 		if (j == RTE_DIM(inset_convert_table))
7550 			return ret;
7551 	}
7552 
7553 	return 0;
7554 }
7555 
7556 /**
7557  * Translate the input set from bit masks to register aware bit masks
7558  * and vice versa
7559  */
7560 static uint64_t
7561 i40e_translate_input_set_reg(uint64_t input)
7562 {
7563 	uint64_t val = 0;
7564 	uint16_t i;
7565 
7566 	static const struct {
7567 		uint64_t inset;
7568 		uint64_t inset_reg;
7569 	} inset_map[] = {
7570 		{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
7571 		{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
7572 		{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
7573 		{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
7574 		{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
7575 		{I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
7576 		{I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
7577 		{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
7578 		{I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
7579 		{I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
7580 		{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
7581 		{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
7582 		{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
7583 		{I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
7584 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
7585 		{I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
7586 		{I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
7587 		{I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
7588 		{I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
7589 		{I40E_INSET_TUNNEL_DMAC,
7590 			I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
7591 		{I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
7592 		{I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
7593 		{I40E_INSET_TUNNEL_SRC_PORT,
7594 			I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
7595 		{I40E_INSET_TUNNEL_DST_PORT,
7596 			I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
7597 		{I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
7598 		{I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
7599 		{I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
7600 		{I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
7601 		{I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
7602 		{I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
7603 		{I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
7604 		{I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
7605 		{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
7606 	};
7607 
7608 	if (input == 0)
7609 		return val;
7610 
7611 	/* Translate input set to register aware inset */
7612 	for (i = 0; i < RTE_DIM(inset_map); i++) {
7613 		if (input & inset_map[i].inset)
7614 			val |= inset_map[i].inset_reg;
7615 	}
7616 
7617 	return val;
7618 }
7619 
7620 static int
7621 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
7622 {
7623 	uint8_t i, idx = 0;
7624 	uint64_t inset_need_mask = inset;
7625 
7626 	static const struct {
7627 		uint64_t inset;
7628 		uint32_t mask;
7629 	} inset_mask_map[] = {
7630 		{I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
7631 		{I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
7632 		{I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
7633 		{I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
7634 		{I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
7635 		{I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
7636 		{I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
7637 		{I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
7638 	};
7639 
7640 	if (!inset || !mask || !nb_elem)
7641 		return 0;
7642 
7643 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
7644 		/* Clear the inset bit, if no MASK is required,
7645 		 * for example proto + ttl
7646 		 */
7647 		if ((inset & inset_mask_map[i].inset) ==
7648 		     inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
7649 			inset_need_mask &= ~inset_mask_map[i].inset;
7650 		if (!inset_need_mask)
7651 			return 0;
7652 	}
7653 	for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
7654 		if ((inset_need_mask & inset_mask_map[i].inset) ==
7655 		    inset_mask_map[i].inset) {
7656 			if (idx >= nb_elem) {
7657 				PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
7658 				return -EINVAL;
7659 			}
7660 			mask[idx] = inset_mask_map[i].mask;
7661 			idx++;
7662 		}
7663 	}
7664 
7665 	return idx;
7666 }
7667 
7668 static void
7669 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
7670 {
7671 	uint32_t reg = i40e_read_rx_ctl(hw, addr);
7672 
7673 	PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
7674 	if (reg != val)
7675 		i40e_write_rx_ctl(hw, addr, val);
7676 	PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
7677 		    (uint32_t)i40e_read_rx_ctl(hw, addr));
7678 }
7679 
7680 static void
7681 i40e_filter_input_set_init(struct i40e_pf *pf)
7682 {
7683 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7684 	enum i40e_filter_pctype pctype;
7685 	uint64_t input_set, inset_reg;
7686 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7687 	int num, i;
7688 
7689 	for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
7690 	     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
7691 		if (!I40E_VALID_PCTYPE(pctype))
7692 			continue;
7693 		input_set = i40e_get_default_input_set(pctype);
7694 
7695 		num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7696 						   I40E_INSET_MASK_NUM_REG);
7697 		if (num < 0)
7698 			return;
7699 		inset_reg = i40e_translate_input_set_reg(input_set);
7700 
7701 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7702 				      (uint32_t)(inset_reg & UINT32_MAX));
7703 		i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7704 				     (uint32_t)((inset_reg >>
7705 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
7706 		i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7707 				      (uint32_t)(inset_reg & UINT32_MAX));
7708 		i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7709 				     (uint32_t)((inset_reg >>
7710 				     I40E_32_BIT_WIDTH) & UINT32_MAX));
7711 
7712 		for (i = 0; i < num; i++) {
7713 			i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7714 					     mask_reg[i]);
7715 			i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7716 					     mask_reg[i]);
7717 		}
7718 		/*clear unused mask registers of the pctype */
7719 		for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
7720 			i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7721 					     0);
7722 			i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7723 					     0);
7724 		}
7725 		I40E_WRITE_FLUSH(hw);
7726 
7727 		/* store the default input set */
7728 		pf->hash_input_set[pctype] = input_set;
7729 		pf->fdir.input_set[pctype] = input_set;
7730 	}
7731 }
7732 
7733 int
7734 i40e_hash_filter_inset_select(struct i40e_hw *hw,
7735 			 struct rte_eth_input_set_conf *conf)
7736 {
7737 	struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
7738 	enum i40e_filter_pctype pctype;
7739 	uint64_t input_set, inset_reg = 0;
7740 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7741 	int ret, i, num;
7742 
7743 	if (!conf) {
7744 		PMD_DRV_LOG(ERR, "Invalid pointer");
7745 		return -EFAULT;
7746 	}
7747 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
7748 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
7749 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
7750 		return -EINVAL;
7751 	}
7752 
7753 	if (!I40E_VALID_FLOW(conf->flow_type)) {
7754 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
7755 		return -EINVAL;
7756 	}
7757 
7758 #ifdef X722_SUPPORT
7759 	/* get translated pctype value in fd pctype register */
7760 	pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
7761 		I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
7762 		conf->flow_type)));
7763 #else
7764 	pctype = i40e_flowtype_to_pctype(conf->flow_type);
7765 #endif
7766 
7767 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7768 				   conf->inset_size);
7769 	if (ret) {
7770 		PMD_DRV_LOG(ERR, "Failed to parse input set");
7771 		return -EINVAL;
7772 	}
7773 	if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_HASH,
7774 				    input_set) != 0) {
7775 		PMD_DRV_LOG(ERR, "Invalid input set");
7776 		return -EINVAL;
7777 	}
7778 	if (conf->op == RTE_ETH_INPUT_SET_ADD) {
7779 		/* get inset value in register */
7780 		inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
7781 		inset_reg <<= I40E_32_BIT_WIDTH;
7782 		inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
7783 		input_set |= pf->hash_input_set[pctype];
7784 	}
7785 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7786 					   I40E_INSET_MASK_NUM_REG);
7787 	if (num < 0)
7788 		return -EINVAL;
7789 
7790 	inset_reg |= i40e_translate_input_set_reg(input_set);
7791 
7792 	i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
7793 			      (uint32_t)(inset_reg & UINT32_MAX));
7794 	i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
7795 			     (uint32_t)((inset_reg >>
7796 			     I40E_32_BIT_WIDTH) & UINT32_MAX));
7797 
7798 	for (i = 0; i < num; i++)
7799 		i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7800 				     mask_reg[i]);
7801 	/*clear unused mask registers of the pctype */
7802 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
7803 		i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
7804 				     0);
7805 	I40E_WRITE_FLUSH(hw);
7806 
7807 	pf->hash_input_set[pctype] = input_set;
7808 	return 0;
7809 }
7810 
7811 int
7812 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
7813 			 struct rte_eth_input_set_conf *conf)
7814 {
7815 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7816 	enum i40e_filter_pctype pctype;
7817 	uint64_t input_set, inset_reg = 0;
7818 	uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
7819 	int ret, i, num;
7820 
7821 	if (!hw || !conf) {
7822 		PMD_DRV_LOG(ERR, "Invalid pointer");
7823 		return -EFAULT;
7824 	}
7825 	if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
7826 	    conf->op != RTE_ETH_INPUT_SET_ADD) {
7827 		PMD_DRV_LOG(ERR, "Unsupported input set operation");
7828 		return -EINVAL;
7829 	}
7830 
7831 	if (!I40E_VALID_FLOW(conf->flow_type)) {
7832 		PMD_DRV_LOG(ERR, "invalid flow_type input.");
7833 		return -EINVAL;
7834 	}
7835 
7836 	pctype = i40e_flowtype_to_pctype(conf->flow_type);
7837 
7838 	ret = i40e_parse_input_set(&input_set, pctype, conf->field,
7839 				   conf->inset_size);
7840 	if (ret) {
7841 		PMD_DRV_LOG(ERR, "Failed to parse input set");
7842 		return -EINVAL;
7843 	}
7844 	if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
7845 				    input_set) != 0) {
7846 		PMD_DRV_LOG(ERR, "Invalid input set");
7847 		return -EINVAL;
7848 	}
7849 
7850 	/* get inset value in register */
7851 	inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
7852 	inset_reg <<= I40E_32_BIT_WIDTH;
7853 	inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
7854 
7855 	/* Can not change the inset reg for flex payload for fdir,
7856 	 * it is done by writing I40E_PRTQF_FD_FLXINSET
7857 	 * in i40e_set_flex_mask_on_pctype.
7858 	 */
7859 	if (conf->op == RTE_ETH_INPUT_SET_SELECT)
7860 		inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
7861 	else
7862 		input_set |= pf->fdir.input_set[pctype];
7863 	num = i40e_generate_inset_mask_reg(input_set, mask_reg,
7864 					   I40E_INSET_MASK_NUM_REG);
7865 	if (num < 0)
7866 		return -EINVAL;
7867 
7868 	inset_reg |= i40e_translate_input_set_reg(input_set);
7869 
7870 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
7871 			      (uint32_t)(inset_reg & UINT32_MAX));
7872 	i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
7873 			     (uint32_t)((inset_reg >>
7874 			     I40E_32_BIT_WIDTH) & UINT32_MAX));
7875 
7876 	for (i = 0; i < num; i++)
7877 		i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7878 				     mask_reg[i]);
7879 	/*clear unused mask registers of the pctype */
7880 	for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
7881 		i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
7882 				     0);
7883 	I40E_WRITE_FLUSH(hw);
7884 
7885 	pf->fdir.input_set[pctype] = input_set;
7886 	return 0;
7887 }
7888 
7889 static int
7890 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7891 {
7892 	int ret = 0;
7893 
7894 	if (!hw || !info) {
7895 		PMD_DRV_LOG(ERR, "Invalid pointer");
7896 		return -EFAULT;
7897 	}
7898 
7899 	switch (info->info_type) {
7900 	case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7901 		i40e_get_symmetric_hash_enable_per_port(hw,
7902 					&(info->info.enable));
7903 		break;
7904 	case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7905 		ret = i40e_get_hash_filter_global_config(hw,
7906 				&(info->info.global_conf));
7907 		break;
7908 	default:
7909 		PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7910 							info->info_type);
7911 		ret = -EINVAL;
7912 		break;
7913 	}
7914 
7915 	return ret;
7916 }
7917 
7918 static int
7919 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
7920 {
7921 	int ret = 0;
7922 
7923 	if (!hw || !info) {
7924 		PMD_DRV_LOG(ERR, "Invalid pointer");
7925 		return -EFAULT;
7926 	}
7927 
7928 	switch (info->info_type) {
7929 	case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
7930 		i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
7931 		break;
7932 	case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
7933 		ret = i40e_set_hash_filter_global_config(hw,
7934 				&(info->info.global_conf));
7935 		break;
7936 	case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
7937 		ret = i40e_hash_filter_inset_select(hw,
7938 					       &(info->info.input_set_conf));
7939 		break;
7940 
7941 	default:
7942 		PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
7943 							info->info_type);
7944 		ret = -EINVAL;
7945 		break;
7946 	}
7947 
7948 	return ret;
7949 }
7950 
7951 /* Operations for hash function */
7952 static int
7953 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
7954 		      enum rte_filter_op filter_op,
7955 		      void *arg)
7956 {
7957 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7958 	int ret = 0;
7959 
7960 	switch (filter_op) {
7961 	case RTE_ETH_FILTER_NOP:
7962 		break;
7963 	case RTE_ETH_FILTER_GET:
7964 		ret = i40e_hash_filter_get(hw,
7965 			(struct rte_eth_hash_filter_info *)arg);
7966 		break;
7967 	case RTE_ETH_FILTER_SET:
7968 		ret = i40e_hash_filter_set(hw,
7969 			(struct rte_eth_hash_filter_info *)arg);
7970 		break;
7971 	default:
7972 		PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
7973 								filter_op);
7974 		ret = -ENOTSUP;
7975 		break;
7976 	}
7977 
7978 	return ret;
7979 }
7980 
7981 /*
7982  * Configure ethertype filter, which can director packet by filtering
7983  * with mac address and ether_type or only ether_type
7984  */
7985 static int
7986 i40e_ethertype_filter_set(struct i40e_pf *pf,
7987 			struct rte_eth_ethertype_filter *filter,
7988 			bool add)
7989 {
7990 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7991 	struct i40e_control_filter_stats stats;
7992 	uint16_t flags = 0;
7993 	int ret;
7994 
7995 	if (filter->queue >= pf->dev_data->nb_rx_queues) {
7996 		PMD_DRV_LOG(ERR, "Invalid queue ID");
7997 		return -EINVAL;
7998 	}
7999 	if (filter->ether_type == ETHER_TYPE_IPv4 ||
8000 		filter->ether_type == ETHER_TYPE_IPv6) {
8001 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
8002 			" control packet filter.", filter->ether_type);
8003 		return -EINVAL;
8004 	}
8005 	if (filter->ether_type == ETHER_TYPE_VLAN)
8006 		PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
8007 			" not supported.");
8008 
8009 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
8010 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
8011 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
8012 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
8013 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
8014 
8015 	memset(&stats, 0, sizeof(stats));
8016 	ret = i40e_aq_add_rem_control_packet_filter(hw,
8017 			filter->mac_addr.addr_bytes,
8018 			filter->ether_type, flags,
8019 			pf->main_vsi->seid,
8020 			filter->queue, add, &stats, NULL);
8021 
8022 	PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
8023 			 " mac_etype_used = %u, etype_used = %u,"
8024 			 " mac_etype_free = %u, etype_free = %u\n",
8025 			 ret, stats.mac_etype_used, stats.etype_used,
8026 			 stats.mac_etype_free, stats.etype_free);
8027 	if (ret < 0)
8028 		return -ENOSYS;
8029 	return 0;
8030 }
8031 
8032 /*
8033  * Handle operations for ethertype filter.
8034  */
8035 static int
8036 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
8037 				enum rte_filter_op filter_op,
8038 				void *arg)
8039 {
8040 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8041 	int ret = 0;
8042 
8043 	if (filter_op == RTE_ETH_FILTER_NOP)
8044 		return ret;
8045 
8046 	if (arg == NULL) {
8047 		PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
8048 			    filter_op);
8049 		return -EINVAL;
8050 	}
8051 
8052 	switch (filter_op) {
8053 	case RTE_ETH_FILTER_ADD:
8054 		ret = i40e_ethertype_filter_set(pf,
8055 			(struct rte_eth_ethertype_filter *)arg,
8056 			TRUE);
8057 		break;
8058 	case RTE_ETH_FILTER_DELETE:
8059 		ret = i40e_ethertype_filter_set(pf,
8060 			(struct rte_eth_ethertype_filter *)arg,
8061 			FALSE);
8062 		break;
8063 	default:
8064 		PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
8065 		ret = -ENOSYS;
8066 		break;
8067 	}
8068 	return ret;
8069 }
8070 
8071 static int
8072 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
8073 		     enum rte_filter_type filter_type,
8074 		     enum rte_filter_op filter_op,
8075 		     void *arg)
8076 {
8077 	int ret = 0;
8078 
8079 	if (dev == NULL)
8080 		return -EINVAL;
8081 
8082 	switch (filter_type) {
8083 	case RTE_ETH_FILTER_NONE:
8084 		/* For global configuration */
8085 		ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
8086 		break;
8087 	case RTE_ETH_FILTER_HASH:
8088 		ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
8089 		break;
8090 	case RTE_ETH_FILTER_MACVLAN:
8091 		ret = i40e_mac_filter_handle(dev, filter_op, arg);
8092 		break;
8093 	case RTE_ETH_FILTER_ETHERTYPE:
8094 		ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
8095 		break;
8096 	case RTE_ETH_FILTER_TUNNEL:
8097 		ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
8098 		break;
8099 	case RTE_ETH_FILTER_FDIR:
8100 		ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
8101 		break;
8102 	default:
8103 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
8104 							filter_type);
8105 		ret = -EINVAL;
8106 		break;
8107 	}
8108 
8109 	return ret;
8110 }
8111 
8112 /*
8113  * Check and enable Extended Tag.
8114  * Enabling Extended Tag is important for 40G performance.
8115  */
8116 static void
8117 i40e_enable_extended_tag(struct rte_eth_dev *dev)
8118 {
8119 	uint32_t buf = 0;
8120 	int ret;
8121 
8122 	ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
8123 				      PCI_DEV_CAP_REG);
8124 	if (ret < 0) {
8125 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
8126 			    PCI_DEV_CAP_REG);
8127 		return;
8128 	}
8129 	if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
8130 		PMD_DRV_LOG(ERR, "Does not support Extended Tag");
8131 		return;
8132 	}
8133 
8134 	buf = 0;
8135 	ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
8136 				      PCI_DEV_CTRL_REG);
8137 	if (ret < 0) {
8138 		PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
8139 			    PCI_DEV_CTRL_REG);
8140 		return;
8141 	}
8142 	if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
8143 		PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
8144 		return;
8145 	}
8146 	buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
8147 	ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
8148 				       PCI_DEV_CTRL_REG);
8149 	if (ret < 0) {
8150 		PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
8151 			    PCI_DEV_CTRL_REG);
8152 		return;
8153 	}
8154 }
8155 
8156 /*
8157  * As some registers wouldn't be reset unless a global hardware reset,
8158  * hardware initialization is needed to put those registers into an
8159  * expected initial state.
8160  */
8161 static void
8162 i40e_hw_init(struct rte_eth_dev *dev)
8163 {
8164 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8165 
8166 	i40e_enable_extended_tag(dev);
8167 
8168 	/* clear the PF Queue Filter control register */
8169 	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
8170 
8171 	/* Disable symmetric hash per port */
8172 	i40e_set_symmetric_hash_enable_per_port(hw, 0);
8173 }
8174 
8175 enum i40e_filter_pctype
8176 i40e_flowtype_to_pctype(uint16_t flow_type)
8177 {
8178 	static const enum i40e_filter_pctype pctype_table[] = {
8179 		[RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
8180 		[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
8181 			I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
8182 		[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
8183 			I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8184 		[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
8185 			I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
8186 		[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
8187 			I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
8188 		[RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
8189 		[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
8190 			I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
8191 		[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
8192 			I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
8193 		[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
8194 			I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
8195 		[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
8196 			I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
8197 		[RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
8198 	};
8199 
8200 	return pctype_table[flow_type];
8201 }
8202 
8203 uint16_t
8204 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
8205 {
8206 	static const uint16_t flowtype_table[] = {
8207 		[I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
8208 		[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8209 			RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8210 #ifdef X722_SUPPORT
8211 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8212 			RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8213 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8214 			RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
8215 #endif
8216 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8217 			RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
8218 #ifdef X722_SUPPORT
8219 		[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8220 			RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
8221 #endif
8222 		[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8223 			RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
8224 		[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8225 			RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
8226 		[I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
8227 		[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8228 			RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8229 #ifdef X722_SUPPORT
8230 		[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8231 			RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8232 		[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8233 			RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
8234 #endif
8235 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8236 			RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
8237 #ifdef X722_SUPPORT
8238 		[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8239 			RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
8240 #endif
8241 		[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8242 			RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
8243 		[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8244 			RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
8245 		[I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
8246 	};
8247 
8248 	return flowtype_table[pctype];
8249 }
8250 
8251 /*
8252  * On X710, performance number is far from the expectation on recent firmware
8253  * versions; on XL710, performance number is also far from the expectation on
8254  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
8255  * mode is enabled and port MAC address is equal to the packet destination MAC
8256  * address. The fix for this issue may not be integrated in the following
8257  * firmware version. So the workaround in software driver is needed. It needs
8258  * to modify the initial values of 3 internal only registers for both X710 and
8259  * XL710. Note that the values for X710 or XL710 could be different, and the
8260  * workaround can be removed when it is fixed in firmware in the future.
8261  */
8262 
8263 /* For both X710 and XL710 */
8264 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
8265 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
8266 
8267 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
8268 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
8269 
8270 /* For X710 */
8271 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
8272 /* For XL710 */
8273 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
8274 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
8275 
8276 static int
8277 i40e_dev_sync_phy_type(struct i40e_hw *hw)
8278 {
8279 	enum i40e_status_code status;
8280 	struct i40e_aq_get_phy_abilities_resp phy_ab;
8281 	int ret = -ENOTSUP;
8282 
8283 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
8284 					      NULL);
8285 
8286 	if (status)
8287 		return ret;
8288 
8289 	return 0;
8290 }
8291 
8292 
8293 static void
8294 i40e_configure_registers(struct i40e_hw *hw)
8295 {
8296 	static struct {
8297 		uint32_t addr;
8298 		uint64_t val;
8299 	} reg_table[] = {
8300 		{I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
8301 		{I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
8302 		{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
8303 	};
8304 	uint64_t reg;
8305 	uint32_t i;
8306 	int ret;
8307 
8308 	for (i = 0; i < RTE_DIM(reg_table); i++) {
8309 		if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
8310 			if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
8311 			    I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
8312 				reg_table[i].val =
8313 					I40E_GL_SWR_PM_UP_THR_SF_VALUE;
8314 			else /* For X710 */
8315 				reg_table[i].val =
8316 					I40E_GL_SWR_PM_UP_THR_EF_VALUE;
8317 		}
8318 
8319 		ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
8320 							&reg, NULL);
8321 		if (ret < 0) {
8322 			PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
8323 							reg_table[i].addr);
8324 			break;
8325 		}
8326 		PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
8327 						reg_table[i].addr, reg);
8328 		if (reg == reg_table[i].val)
8329 			continue;
8330 
8331 		ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
8332 						reg_table[i].val, NULL);
8333 		if (ret < 0) {
8334 			PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
8335 				"address of 0x%"PRIx32, reg_table[i].val,
8336 							reg_table[i].addr);
8337 			break;
8338 		}
8339 		PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
8340 			"0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
8341 	}
8342 }
8343 
8344 #define I40E_VSI_TSR(_i)            (0x00050800 + ((_i) * 4))
8345 #define I40E_VSI_TSR_QINQ_CONFIG    0xc030
8346 #define I40E_VSI_L2TAGSTXVALID(_i)  (0x00042800 + ((_i) * 4))
8347 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
8348 static int
8349 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
8350 {
8351 	uint32_t reg;
8352 	int ret;
8353 
8354 	if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
8355 		PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
8356 		return -EINVAL;
8357 	}
8358 
8359 	/* Configure for double VLAN RX stripping */
8360 	reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
8361 	if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
8362 		reg |= I40E_VSI_TSR_QINQ_CONFIG;
8363 		ret = i40e_aq_debug_write_register(hw,
8364 						   I40E_VSI_TSR(vsi->vsi_id),
8365 						   reg, NULL);
8366 		if (ret < 0) {
8367 			PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
8368 				    vsi->vsi_id);
8369 			return I40E_ERR_CONFIG;
8370 		}
8371 	}
8372 
8373 	/* Configure for double VLAN TX insertion */
8374 	reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
8375 	if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
8376 		reg = I40E_VSI_L2TAGSTXVALID_QINQ;
8377 		ret = i40e_aq_debug_write_register(hw,
8378 						   I40E_VSI_L2TAGSTXVALID(
8379 						   vsi->vsi_id), reg, NULL);
8380 		if (ret < 0) {
8381 			PMD_DRV_LOG(ERR, "Failed to update "
8382 				"VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
8383 			return I40E_ERR_CONFIG;
8384 		}
8385 	}
8386 
8387 	return 0;
8388 }
8389 
8390 /**
8391  * i40e_aq_add_mirror_rule
8392  * @hw: pointer to the hardware structure
8393  * @seid: VEB seid to add mirror rule to
8394  * @dst_id: destination vsi seid
8395  * @entries: Buffer which contains the entities to be mirrored
8396  * @count: number of entities contained in the buffer
8397  * @rule_id:the rule_id of the rule to be added
8398  *
8399  * Add a mirror rule for a given veb.
8400  *
8401  **/
8402 static enum i40e_status_code
8403 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
8404 			uint16_t seid, uint16_t dst_id,
8405 			uint16_t rule_type, uint16_t *entries,
8406 			uint16_t count, uint16_t *rule_id)
8407 {
8408 	struct i40e_aq_desc desc;
8409 	struct i40e_aqc_add_delete_mirror_rule cmd;
8410 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
8411 		(struct i40e_aqc_add_delete_mirror_rule_completion *)
8412 		&desc.params.raw;
8413 	uint16_t buff_len;
8414 	enum i40e_status_code status;
8415 
8416 	i40e_fill_default_direct_cmd_desc(&desc,
8417 					  i40e_aqc_opc_add_mirror_rule);
8418 	memset(&cmd, 0, sizeof(cmd));
8419 
8420 	buff_len = sizeof(uint16_t) * count;
8421 	desc.datalen = rte_cpu_to_le_16(buff_len);
8422 	if (buff_len > 0)
8423 		desc.flags |= rte_cpu_to_le_16(
8424 			(uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
8425 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
8426 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
8427 	cmd.num_entries = rte_cpu_to_le_16(count);
8428 	cmd.seid = rte_cpu_to_le_16(seid);
8429 	cmd.destination = rte_cpu_to_le_16(dst_id);
8430 
8431 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
8432 	status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
8433 	PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
8434 			 "rule_id = %u"
8435 			 " mirror_rules_used = %u, mirror_rules_free = %u,",
8436 			 hw->aq.asq_last_status, resp->rule_id,
8437 			 resp->mirror_rules_used, resp->mirror_rules_free);
8438 	*rule_id = rte_le_to_cpu_16(resp->rule_id);
8439 
8440 	return status;
8441 }
8442 
8443 /**
8444  * i40e_aq_del_mirror_rule
8445  * @hw: pointer to the hardware structure
8446  * @seid: VEB seid to add mirror rule to
8447  * @entries: Buffer which contains the entities to be mirrored
8448  * @count: number of entities contained in the buffer
8449  * @rule_id:the rule_id of the rule to be delete
8450  *
8451  * Delete a mirror rule for a given veb.
8452  *
8453  **/
8454 static enum i40e_status_code
8455 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
8456 		uint16_t seid, uint16_t rule_type, uint16_t *entries,
8457 		uint16_t count, uint16_t rule_id)
8458 {
8459 	struct i40e_aq_desc desc;
8460 	struct i40e_aqc_add_delete_mirror_rule cmd;
8461 	uint16_t buff_len = 0;
8462 	enum i40e_status_code status;
8463 	void *buff = NULL;
8464 
8465 	i40e_fill_default_direct_cmd_desc(&desc,
8466 					  i40e_aqc_opc_delete_mirror_rule);
8467 	memset(&cmd, 0, sizeof(cmd));
8468 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
8469 		desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
8470 							  I40E_AQ_FLAG_RD));
8471 		cmd.num_entries = count;
8472 		buff_len = sizeof(uint16_t) * count;
8473 		desc.datalen = rte_cpu_to_le_16(buff_len);
8474 		buff = (void *)entries;
8475 	} else
8476 		/* rule id is filled in destination field for deleting mirror rule */
8477 		cmd.destination = rte_cpu_to_le_16(rule_id);
8478 
8479 	cmd.rule_type = rte_cpu_to_le_16(rule_type <<
8480 				I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
8481 	cmd.seid = rte_cpu_to_le_16(seid);
8482 
8483 	rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
8484 	status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
8485 
8486 	return status;
8487 }
8488 
8489 /**
8490  * i40e_mirror_rule_set
8491  * @dev: pointer to the hardware structure
8492  * @mirror_conf: mirror rule info
8493  * @sw_id: mirror rule's sw_id
8494  * @on: enable/disable
8495  *
8496  * set a mirror rule.
8497  *
8498  **/
8499 static int
8500 i40e_mirror_rule_set(struct rte_eth_dev *dev,
8501 			struct rte_eth_mirror_conf *mirror_conf,
8502 			uint8_t sw_id, uint8_t on)
8503 {
8504 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8505 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8506 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
8507 	struct i40e_mirror_rule *parent = NULL;
8508 	uint16_t seid, dst_seid, rule_id;
8509 	uint16_t i, j = 0;
8510 	int ret;
8511 
8512 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
8513 
8514 	if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
8515 		PMD_DRV_LOG(ERR, "mirror rule can not be configured"
8516 			" without veb or vfs.");
8517 		return -ENOSYS;
8518 	}
8519 	if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
8520 		PMD_DRV_LOG(ERR, "mirror table is full.");
8521 		return -ENOSPC;
8522 	}
8523 	if (mirror_conf->dst_pool > pf->vf_num) {
8524 		PMD_DRV_LOG(ERR, "invalid destination pool %u.",
8525 				 mirror_conf->dst_pool);
8526 		return -EINVAL;
8527 	}
8528 
8529 	seid = pf->main_vsi->veb->seid;
8530 
8531 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
8532 		if (sw_id <= it->index) {
8533 			mirr_rule = it;
8534 			break;
8535 		}
8536 		parent = it;
8537 	}
8538 	if (mirr_rule && sw_id == mirr_rule->index) {
8539 		if (on) {
8540 			PMD_DRV_LOG(ERR, "mirror rule exists.");
8541 			return -EEXIST;
8542 		} else {
8543 			ret = i40e_aq_del_mirror_rule(hw, seid,
8544 					mirr_rule->rule_type,
8545 					mirr_rule->entries,
8546 					mirr_rule->num_entries, mirr_rule->id);
8547 			if (ret < 0) {
8548 				PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
8549 						   " ret = %d, aq_err = %d.",
8550 						   ret, hw->aq.asq_last_status);
8551 				return -ENOSYS;
8552 			}
8553 			TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
8554 			rte_free(mirr_rule);
8555 			pf->nb_mirror_rule--;
8556 			return 0;
8557 		}
8558 	} else if (!on) {
8559 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
8560 		return -ENOENT;
8561 	}
8562 
8563 	mirr_rule = rte_zmalloc("i40e_mirror_rule",
8564 				sizeof(struct i40e_mirror_rule) , 0);
8565 	if (!mirr_rule) {
8566 		PMD_DRV_LOG(ERR, "failed to allocate memory");
8567 		return I40E_ERR_NO_MEMORY;
8568 	}
8569 	switch (mirror_conf->rule_type) {
8570 	case ETH_MIRROR_VLAN:
8571 		for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
8572 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
8573 				mirr_rule->entries[j] =
8574 					mirror_conf->vlan.vlan_id[i];
8575 				j++;
8576 			}
8577 		}
8578 		if (j == 0) {
8579 			PMD_DRV_LOG(ERR, "vlan is not specified.");
8580 			rte_free(mirr_rule);
8581 			return -EINVAL;
8582 		}
8583 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
8584 		break;
8585 	case ETH_MIRROR_VIRTUAL_POOL_UP:
8586 	case ETH_MIRROR_VIRTUAL_POOL_DOWN:
8587 		/* check if the specified pool bit is out of range */
8588 		if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
8589 			PMD_DRV_LOG(ERR, "pool mask is out of range.");
8590 			rte_free(mirr_rule);
8591 			return -EINVAL;
8592 		}
8593 		for (i = 0, j = 0; i < pf->vf_num; i++) {
8594 			if (mirror_conf->pool_mask & (1ULL << i)) {
8595 				mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
8596 				j++;
8597 			}
8598 		}
8599 		if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
8600 			/* add pf vsi to entries */
8601 			mirr_rule->entries[j] = pf->main_vsi_seid;
8602 			j++;
8603 		}
8604 		if (j == 0) {
8605 			PMD_DRV_LOG(ERR, "pool is not specified.");
8606 			rte_free(mirr_rule);
8607 			return -EINVAL;
8608 		}
8609 		/* egress and ingress in aq commands means from switch but not port */
8610 		mirr_rule->rule_type =
8611 			(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
8612 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
8613 			I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
8614 		break;
8615 	case ETH_MIRROR_UPLINK_PORT:
8616 		/* egress and ingress in aq commands means from switch but not port*/
8617 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
8618 		break;
8619 	case ETH_MIRROR_DOWNLINK_PORT:
8620 		mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
8621 		break;
8622 	default:
8623 		PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
8624 			mirror_conf->rule_type);
8625 		rte_free(mirr_rule);
8626 		return -EINVAL;
8627 	}
8628 
8629 	/* If the dst_pool is equal to vf_num, consider it as PF */
8630 	if (mirror_conf->dst_pool == pf->vf_num)
8631 		dst_seid = pf->main_vsi_seid;
8632 	else
8633 		dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
8634 
8635 	ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
8636 				      mirr_rule->rule_type, mirr_rule->entries,
8637 				      j, &rule_id);
8638 	if (ret < 0) {
8639 		PMD_DRV_LOG(ERR, "failed to add mirror rule:"
8640 				   " ret = %d, aq_err = %d.",
8641 				   ret, hw->aq.asq_last_status);
8642 		rte_free(mirr_rule);
8643 		return -ENOSYS;
8644 	}
8645 
8646 	mirr_rule->index = sw_id;
8647 	mirr_rule->num_entries = j;
8648 	mirr_rule->id = rule_id;
8649 	mirr_rule->dst_vsi_seid = dst_seid;
8650 
8651 	if (parent)
8652 		TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
8653 	else
8654 		TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
8655 
8656 	pf->nb_mirror_rule++;
8657 	return 0;
8658 }
8659 
8660 /**
8661  * i40e_mirror_rule_reset
8662  * @dev: pointer to the device
8663  * @sw_id: mirror rule's sw_id
8664  *
8665  * reset a mirror rule.
8666  *
8667  **/
8668 static int
8669 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
8670 {
8671 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8672 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8673 	struct i40e_mirror_rule *it, *mirr_rule = NULL;
8674 	uint16_t seid;
8675 	int ret;
8676 
8677 	PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
8678 
8679 	seid = pf->main_vsi->veb->seid;
8680 
8681 	TAILQ_FOREACH(it, &pf->mirror_list, rules) {
8682 		if (sw_id == it->index) {
8683 			mirr_rule = it;
8684 			break;
8685 		}
8686 	}
8687 	if (mirr_rule) {
8688 		ret = i40e_aq_del_mirror_rule(hw, seid,
8689 				mirr_rule->rule_type,
8690 				mirr_rule->entries,
8691 				mirr_rule->num_entries, mirr_rule->id);
8692 		if (ret < 0) {
8693 			PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
8694 					   " status = %d, aq_err = %d.",
8695 					   ret, hw->aq.asq_last_status);
8696 			return -ENOSYS;
8697 		}
8698 		TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
8699 		rte_free(mirr_rule);
8700 		pf->nb_mirror_rule--;
8701 	} else {
8702 		PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
8703 		return -ENOENT;
8704 	}
8705 	return 0;
8706 }
8707 
8708 static uint64_t
8709 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
8710 {
8711 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8712 	uint64_t systim_cycles;
8713 
8714 	systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
8715 	systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
8716 			<< 32;
8717 
8718 	return systim_cycles;
8719 }
8720 
8721 static uint64_t
8722 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
8723 {
8724 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8725 	uint64_t rx_tstamp;
8726 
8727 	rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
8728 	rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
8729 			<< 32;
8730 
8731 	return rx_tstamp;
8732 }
8733 
8734 static uint64_t
8735 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
8736 {
8737 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8738 	uint64_t tx_tstamp;
8739 
8740 	tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
8741 	tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
8742 			<< 32;
8743 
8744 	return tx_tstamp;
8745 }
8746 
8747 static void
8748 i40e_start_timecounters(struct rte_eth_dev *dev)
8749 {
8750 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8751 	struct i40e_adapter *adapter =
8752 			(struct i40e_adapter *)dev->data->dev_private;
8753 	struct rte_eth_link link;
8754 	uint32_t tsync_inc_l;
8755 	uint32_t tsync_inc_h;
8756 
8757 	/* Get current link speed. */
8758 	memset(&link, 0, sizeof(link));
8759 	i40e_dev_link_update(dev, 1);
8760 	rte_i40e_dev_atomic_read_link_status(dev, &link);
8761 
8762 	switch (link.link_speed) {
8763 	case ETH_SPEED_NUM_40G:
8764 		tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
8765 		tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
8766 		break;
8767 	case ETH_SPEED_NUM_10G:
8768 		tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
8769 		tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
8770 		break;
8771 	case ETH_SPEED_NUM_1G:
8772 		tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
8773 		tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
8774 		break;
8775 	default:
8776 		tsync_inc_l = 0x0;
8777 		tsync_inc_h = 0x0;
8778 	}
8779 
8780 	/* Set the timesync increment value. */
8781 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
8782 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
8783 
8784 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
8785 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8786 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
8787 
8788 	adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8789 	adapter->systime_tc.cc_shift = 0;
8790 	adapter->systime_tc.nsec_mask = 0;
8791 
8792 	adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8793 	adapter->rx_tstamp_tc.cc_shift = 0;
8794 	adapter->rx_tstamp_tc.nsec_mask = 0;
8795 
8796 	adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
8797 	adapter->tx_tstamp_tc.cc_shift = 0;
8798 	adapter->tx_tstamp_tc.nsec_mask = 0;
8799 }
8800 
8801 static int
8802 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
8803 {
8804 	struct i40e_adapter *adapter =
8805 			(struct i40e_adapter *)dev->data->dev_private;
8806 
8807 	adapter->systime_tc.nsec += delta;
8808 	adapter->rx_tstamp_tc.nsec += delta;
8809 	adapter->tx_tstamp_tc.nsec += delta;
8810 
8811 	return 0;
8812 }
8813 
8814 static int
8815 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
8816 {
8817 	uint64_t ns;
8818 	struct i40e_adapter *adapter =
8819 			(struct i40e_adapter *)dev->data->dev_private;
8820 
8821 	ns = rte_timespec_to_ns(ts);
8822 
8823 	/* Set the timecounters to a new value. */
8824 	adapter->systime_tc.nsec = ns;
8825 	adapter->rx_tstamp_tc.nsec = ns;
8826 	adapter->tx_tstamp_tc.nsec = ns;
8827 
8828 	return 0;
8829 }
8830 
8831 static int
8832 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
8833 {
8834 	uint64_t ns, systime_cycles;
8835 	struct i40e_adapter *adapter =
8836 			(struct i40e_adapter *)dev->data->dev_private;
8837 
8838 	systime_cycles = i40e_read_systime_cyclecounter(dev);
8839 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
8840 	*ts = rte_ns_to_timespec(ns);
8841 
8842 	return 0;
8843 }
8844 
8845 static int
8846 i40e_timesync_enable(struct rte_eth_dev *dev)
8847 {
8848 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8849 	uint32_t tsync_ctl_l;
8850 	uint32_t tsync_ctl_h;
8851 
8852 	/* Stop the timesync system time. */
8853 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8854 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8855 	/* Reset the timesync system time value. */
8856 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
8857 	I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
8858 
8859 	i40e_start_timecounters(dev);
8860 
8861 	/* Clear timesync registers. */
8862 	I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8863 	I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
8864 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
8865 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
8866 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
8867 	I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
8868 
8869 	/* Enable timestamping of PTP packets. */
8870 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8871 	tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
8872 
8873 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8874 	tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
8875 	tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
8876 
8877 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8878 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8879 
8880 	return 0;
8881 }
8882 
8883 static int
8884 i40e_timesync_disable(struct rte_eth_dev *dev)
8885 {
8886 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8887 	uint32_t tsync_ctl_l;
8888 	uint32_t tsync_ctl_h;
8889 
8890 	/* Disable timestamping of transmitted PTP packets. */
8891 	tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
8892 	tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
8893 
8894 	tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
8895 	tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
8896 
8897 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
8898 	I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
8899 
8900 	/* Reset the timesync increment value. */
8901 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
8902 	I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
8903 
8904 	return 0;
8905 }
8906 
8907 static int
8908 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
8909 				struct timespec *timestamp, uint32_t flags)
8910 {
8911 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8912 	struct i40e_adapter *adapter =
8913 		(struct i40e_adapter *)dev->data->dev_private;
8914 
8915 	uint32_t sync_status;
8916 	uint32_t index = flags & 0x03;
8917 	uint64_t rx_tstamp_cycles;
8918 	uint64_t ns;
8919 
8920 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
8921 	if ((sync_status & (1 << index)) == 0)
8922 		return -EINVAL;
8923 
8924 	rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
8925 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
8926 	*timestamp = rte_ns_to_timespec(ns);
8927 
8928 	return 0;
8929 }
8930 
8931 static int
8932 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
8933 				struct timespec *timestamp)
8934 {
8935 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8936 	struct i40e_adapter *adapter =
8937 		(struct i40e_adapter *)dev->data->dev_private;
8938 
8939 	uint32_t sync_status;
8940 	uint64_t tx_tstamp_cycles;
8941 	uint64_t ns;
8942 
8943 	sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
8944 	if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
8945 		return -EINVAL;
8946 
8947 	tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
8948 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
8949 	*timestamp = rte_ns_to_timespec(ns);
8950 
8951 	return 0;
8952 }
8953 
8954 /*
8955  * i40e_parse_dcb_configure - parse dcb configure from user
8956  * @dev: the device being configured
8957  * @dcb_cfg: pointer of the result of parse
8958  * @*tc_map: bit map of enabled traffic classes
8959  *
8960  * Returns 0 on success, negative value on failure
8961  */
8962 static int
8963 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
8964 			 struct i40e_dcbx_config *dcb_cfg,
8965 			 uint8_t *tc_map)
8966 {
8967 	struct rte_eth_dcb_rx_conf *dcb_rx_conf;
8968 	uint8_t i, tc_bw, bw_lf;
8969 
8970 	memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
8971 
8972 	dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
8973 	if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
8974 		PMD_INIT_LOG(ERR, "number of tc exceeds max.");
8975 		return -EINVAL;
8976 	}
8977 
8978 	/* assume each tc has the same bw */
8979 	tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
8980 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
8981 		dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
8982 	/* to ensure the sum of tcbw is equal to 100 */
8983 	bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
8984 	for (i = 0; i < bw_lf; i++)
8985 		dcb_cfg->etscfg.tcbwtable[i]++;
8986 
8987 	/* assume each tc has the same Transmission Selection Algorithm */
8988 	for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
8989 		dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
8990 
8991 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
8992 		dcb_cfg->etscfg.prioritytable[i] =
8993 				dcb_rx_conf->dcb_tc[i];
8994 
8995 	/* FW needs one App to configure HW */
8996 	dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
8997 	dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
8998 	dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
8999 	dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
9000 
9001 	if (dcb_rx_conf->nb_tcs == 0)
9002 		*tc_map = 1; /* tc0 only */
9003 	else
9004 		*tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
9005 
9006 	if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
9007 		dcb_cfg->pfc.willing = 0;
9008 		dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
9009 		dcb_cfg->pfc.pfcenable = *tc_map;
9010 	}
9011 	return 0;
9012 }
9013 
9014 
9015 static enum i40e_status_code
9016 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
9017 			      struct i40e_aqc_vsi_properties_data *info,
9018 			      uint8_t enabled_tcmap)
9019 {
9020 	enum i40e_status_code ret;
9021 	int i, total_tc = 0;
9022 	uint16_t qpnum_per_tc, bsf, qp_idx;
9023 	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
9024 	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
9025 	uint16_t used_queues;
9026 
9027 	ret = validate_tcmap_parameter(vsi, enabled_tcmap);
9028 	if (ret != I40E_SUCCESS)
9029 		return ret;
9030 
9031 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9032 		if (enabled_tcmap & (1 << i))
9033 			total_tc++;
9034 	}
9035 	if (total_tc == 0)
9036 		total_tc = 1;
9037 	vsi->enabled_tc = enabled_tcmap;
9038 
9039 	/* different VSI has different queues assigned */
9040 	if (vsi->type == I40E_VSI_MAIN)
9041 		used_queues = dev_data->nb_rx_queues -
9042 			pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9043 	else if (vsi->type == I40E_VSI_VMDQ2)
9044 		used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
9045 	else {
9046 		PMD_INIT_LOG(ERR, "unsupported VSI type.");
9047 		return I40E_ERR_NO_AVAILABLE_VSI;
9048 	}
9049 
9050 	qpnum_per_tc = used_queues / total_tc;
9051 	/* Number of queues per enabled TC */
9052 	if (qpnum_per_tc == 0) {
9053 		PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
9054 		return I40E_ERR_INVALID_QP_ID;
9055 	}
9056 	qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
9057 				I40E_MAX_Q_PER_TC);
9058 	bsf = rte_bsf32(qpnum_per_tc);
9059 
9060 	/**
9061 	 * Configure TC and queue mapping parameters, for enabled TC,
9062 	 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
9063 	 * default queue will serve it.
9064 	 */
9065 	qp_idx = 0;
9066 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9067 		if (vsi->enabled_tc & (1 << i)) {
9068 			info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
9069 					I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
9070 				(bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
9071 			qp_idx += qpnum_per_tc;
9072 		} else
9073 			info->tc_mapping[i] = 0;
9074 	}
9075 
9076 	/* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
9077 	if (vsi->type == I40E_VSI_SRIOV) {
9078 		info->mapping_flags |=
9079 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
9080 		for (i = 0; i < vsi->nb_qps; i++)
9081 			info->queue_mapping[i] =
9082 				rte_cpu_to_le_16(vsi->base_queue + i);
9083 	} else {
9084 		info->mapping_flags |=
9085 			rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
9086 		info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
9087 	}
9088 	info->valid_sections |=
9089 		rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
9090 
9091 	return I40E_SUCCESS;
9092 }
9093 
9094 /*
9095  * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
9096  * @veb: VEB to be configured
9097  * @tc_map: enabled TC bitmap
9098  *
9099  * Returns 0 on success, negative value on failure
9100  */
9101 static enum i40e_status_code
9102 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
9103 {
9104 	struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
9105 	struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
9106 	struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
9107 	struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
9108 	enum i40e_status_code ret = I40E_SUCCESS;
9109 	int i;
9110 	uint32_t bw_max;
9111 
9112 	/* Check if enabled_tc is same as existing or new TCs */
9113 	if (veb->enabled_tc == tc_map)
9114 		return ret;
9115 
9116 	/* configure tc bandwidth */
9117 	memset(&veb_bw, 0, sizeof(veb_bw));
9118 	veb_bw.tc_valid_bits = tc_map;
9119 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
9120 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9121 		if (tc_map & BIT_ULL(i))
9122 			veb_bw.tc_bw_share_credits[i] = 1;
9123 	}
9124 	ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
9125 						   &veb_bw, NULL);
9126 	if (ret) {
9127 		PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
9128 				  " per TC failed = %d",
9129 				  hw->aq.asq_last_status);
9130 		return ret;
9131 	}
9132 
9133 	memset(&ets_query, 0, sizeof(ets_query));
9134 	ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9135 						   &ets_query, NULL);
9136 	if (ret != I40E_SUCCESS) {
9137 		PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
9138 				 " configuration %u", hw->aq.asq_last_status);
9139 		return ret;
9140 	}
9141 	memset(&bw_query, 0, sizeof(bw_query));
9142 	ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9143 						  &bw_query, NULL);
9144 	if (ret != I40E_SUCCESS) {
9145 		PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
9146 				 " configuration %u", hw->aq.asq_last_status);
9147 		return ret;
9148 	}
9149 
9150 	/* store and print out BW info */
9151 	veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
9152 	veb->bw_info.bw_max = ets_query.tc_bw_max;
9153 	PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
9154 	PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
9155 	bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
9156 		    (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
9157 		     I40E_16_BIT_WIDTH);
9158 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9159 		veb->bw_info.bw_ets_share_credits[i] =
9160 				bw_query.tc_bw_share_credits[i];
9161 		veb->bw_info.bw_ets_credits[i] =
9162 				rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
9163 		/* 4 bits per TC, 4th bit is reserved */
9164 		veb->bw_info.bw_ets_max[i] =
9165 			(uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
9166 				  RTE_LEN2MASK(3, uint8_t));
9167 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
9168 			    veb->bw_info.bw_ets_share_credits[i]);
9169 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
9170 			    veb->bw_info.bw_ets_credits[i]);
9171 		PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
9172 			    veb->bw_info.bw_ets_max[i]);
9173 	}
9174 
9175 	veb->enabled_tc = tc_map;
9176 
9177 	return ret;
9178 }
9179 
9180 
9181 /*
9182  * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
9183  * @vsi: VSI to be configured
9184  * @tc_map: enabled TC bitmap
9185  *
9186  * Returns 0 on success, negative value on failure
9187  */
9188 static enum i40e_status_code
9189 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
9190 {
9191 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
9192 	struct i40e_vsi_context ctxt;
9193 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
9194 	enum i40e_status_code ret = I40E_SUCCESS;
9195 	int i;
9196 
9197 	/* Check if enabled_tc is same as existing or new TCs */
9198 	if (vsi->enabled_tc == tc_map)
9199 		return ret;
9200 
9201 	/* configure tc bandwidth */
9202 	memset(&bw_data, 0, sizeof(bw_data));
9203 	bw_data.tc_valid_bits = tc_map;
9204 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
9205 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9206 		if (tc_map & BIT_ULL(i))
9207 			bw_data.tc_bw_credits[i] = 1;
9208 	}
9209 	ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
9210 	if (ret) {
9211 		PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
9212 			" per TC failed = %d",
9213 			hw->aq.asq_last_status);
9214 		goto out;
9215 	}
9216 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
9217 		vsi->info.qs_handle[i] = bw_data.qs_handles[i];
9218 
9219 	/* Update Queue Pairs Mapping for currently enabled UPs */
9220 	ctxt.seid = vsi->seid;
9221 	ctxt.pf_num = hw->pf_id;
9222 	ctxt.vf_num = 0;
9223 	ctxt.uplink_seid = vsi->uplink_seid;
9224 	ctxt.info = vsi->info;
9225 	i40e_get_cap(hw);
9226 	ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
9227 	if (ret)
9228 		goto out;
9229 
9230 	/* Update the VSI after updating the VSI queue-mapping information */
9231 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
9232 	if (ret) {
9233 		PMD_INIT_LOG(ERR, "Failed to configure "
9234 			    "TC queue mapping = %d",
9235 			    hw->aq.asq_last_status);
9236 		goto out;
9237 	}
9238 	/* update the local VSI info with updated queue map */
9239 	(void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
9240 					sizeof(vsi->info.tc_mapping));
9241 	(void)rte_memcpy(&vsi->info.queue_mapping,
9242 			&ctxt.info.queue_mapping,
9243 		sizeof(vsi->info.queue_mapping));
9244 	vsi->info.mapping_flags = ctxt.info.mapping_flags;
9245 	vsi->info.valid_sections = 0;
9246 
9247 	/* query and update current VSI BW information */
9248 	ret = i40e_vsi_get_bw_config(vsi);
9249 	if (ret) {
9250 		PMD_INIT_LOG(ERR,
9251 			 "Failed updating vsi bw info, err %s aq_err %s",
9252 			 i40e_stat_str(hw, ret),
9253 			 i40e_aq_str(hw, hw->aq.asq_last_status));
9254 		goto out;
9255 	}
9256 
9257 	vsi->enabled_tc = tc_map;
9258 
9259 out:
9260 	return ret;
9261 }
9262 
9263 /*
9264  * i40e_dcb_hw_configure - program the dcb setting to hw
9265  * @pf: pf the configuration is taken on
9266  * @new_cfg: new configuration
9267  * @tc_map: enabled TC bitmap
9268  *
9269  * Returns 0 on success, negative value on failure
9270  */
9271 static enum i40e_status_code
9272 i40e_dcb_hw_configure(struct i40e_pf *pf,
9273 		      struct i40e_dcbx_config *new_cfg,
9274 		      uint8_t tc_map)
9275 {
9276 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9277 	struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
9278 	struct i40e_vsi *main_vsi = pf->main_vsi;
9279 	struct i40e_vsi_list *vsi_list;
9280 	enum i40e_status_code ret;
9281 	int i;
9282 	uint32_t val;
9283 
9284 	/* Use the FW API if FW > v4.4*/
9285 	if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
9286 	      (hw->aq.fw_maj_ver >= 5))) {
9287 		PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
9288 				  " to configure DCB");
9289 		return I40E_ERR_FIRMWARE_API_VERSION;
9290 	}
9291 
9292 	/* Check if need reconfiguration */
9293 	if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
9294 		PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
9295 		return I40E_SUCCESS;
9296 	}
9297 
9298 	/* Copy the new config to the current config */
9299 	*old_cfg = *new_cfg;
9300 	old_cfg->etsrec = old_cfg->etscfg;
9301 	ret = i40e_set_dcb_config(hw);
9302 	if (ret) {
9303 		PMD_INIT_LOG(ERR,
9304 			 "Set DCB Config failed, err %s aq_err %s\n",
9305 			 i40e_stat_str(hw, ret),
9306 			 i40e_aq_str(hw, hw->aq.asq_last_status));
9307 		return ret;
9308 	}
9309 	/* set receive Arbiter to RR mode and ETS scheme by default */
9310 	for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
9311 		val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
9312 		val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK     |
9313 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
9314 			 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
9315 		val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
9316 			I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
9317 			 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
9318 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
9319 			 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
9320 		val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
9321 			 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
9322 		I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
9323 	}
9324 	/* get local mib to check whether it is configured correctly */
9325 	/* IEEE mode */
9326 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
9327 	/* Get Local DCB Config */
9328 	i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
9329 				     &hw->local_dcbx_config);
9330 
9331 	/* if Veb is created, need to update TC of it at first */
9332 	if (main_vsi->veb) {
9333 		ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
9334 		if (ret)
9335 			PMD_INIT_LOG(WARNING,
9336 				 "Failed configuring TC for VEB seid=%d\n",
9337 				 main_vsi->veb->seid);
9338 	}
9339 	/* Update each VSI */
9340 	i40e_vsi_config_tc(main_vsi, tc_map);
9341 	if (main_vsi->veb) {
9342 		TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
9343 			/* Beside main VSI and VMDQ VSIs, only enable default
9344 			 * TC for other VSIs
9345 			 */
9346 			if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
9347 				ret = i40e_vsi_config_tc(vsi_list->vsi,
9348 							 tc_map);
9349 			else
9350 				ret = i40e_vsi_config_tc(vsi_list->vsi,
9351 							 I40E_DEFAULT_TCMAP);
9352 			if (ret)
9353 				PMD_INIT_LOG(WARNING,
9354 					 "Failed configuring TC for VSI seid=%d\n",
9355 					 vsi_list->vsi->seid);
9356 			/* continue */
9357 		}
9358 	}
9359 	return I40E_SUCCESS;
9360 }
9361 
9362 /*
9363  * i40e_dcb_init_configure - initial dcb config
9364  * @dev: device being configured
9365  * @sw_dcb: indicate whether dcb is sw configured or hw offload
9366  *
9367  * Returns 0 on success, negative value on failure
9368  */
9369 static int
9370 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
9371 {
9372 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9373 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9374 	int ret = 0;
9375 
9376 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
9377 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
9378 		return -ENOTSUP;
9379 	}
9380 
9381 	/* DCB initialization:
9382 	 * Update DCB configuration from the Firmware and configure
9383 	 * LLDP MIB change event.
9384 	 */
9385 	if (sw_dcb == TRUE) {
9386 		ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
9387 		if (ret != I40E_SUCCESS)
9388 			PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
9389 
9390 		ret = i40e_init_dcb(hw);
9391 		/* if sw_dcb, lldp agent is stopped, the return from
9392 		 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
9393 		 * adminq status.
9394 		 */
9395 		if (ret != I40E_SUCCESS &&
9396 		    hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
9397 			memset(&hw->local_dcbx_config, 0,
9398 				sizeof(struct i40e_dcbx_config));
9399 			/* set dcb default configuration */
9400 			hw->local_dcbx_config.etscfg.willing = 0;
9401 			hw->local_dcbx_config.etscfg.maxtcs = 0;
9402 			hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
9403 			hw->local_dcbx_config.etscfg.tsatable[0] =
9404 						I40E_IEEE_TSA_ETS;
9405 			hw->local_dcbx_config.etsrec =
9406 				hw->local_dcbx_config.etscfg;
9407 			hw->local_dcbx_config.pfc.willing = 0;
9408 			hw->local_dcbx_config.pfc.pfccap =
9409 						I40E_MAX_TRAFFIC_CLASS;
9410 			/* FW needs one App to configure HW */
9411 			hw->local_dcbx_config.numapps = 1;
9412 			hw->local_dcbx_config.app[0].selector =
9413 						I40E_APP_SEL_ETHTYPE;
9414 			hw->local_dcbx_config.app[0].priority = 3;
9415 			hw->local_dcbx_config.app[0].protocolid =
9416 						I40E_APP_PROTOID_FCOE;
9417 			ret = i40e_set_dcb_config(hw);
9418 			if (ret) {
9419 				PMD_INIT_LOG(ERR, "default dcb config fails."
9420 					" err = %d, aq_err = %d.", ret,
9421 					  hw->aq.asq_last_status);
9422 				return -ENOSYS;
9423 			}
9424 		} else {
9425 			PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
9426 					  " aq_err = %d.", ret,
9427 					  hw->aq.asq_last_status);
9428 			return -ENOTSUP;
9429 		}
9430 	} else {
9431 		ret = i40e_aq_start_lldp(hw, NULL);
9432 		if (ret != I40E_SUCCESS)
9433 			PMD_INIT_LOG(DEBUG, "Failed to start lldp");
9434 
9435 		ret = i40e_init_dcb(hw);
9436 		if (!ret) {
9437 			if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
9438 				PMD_INIT_LOG(ERR, "HW doesn't support"
9439 						  " DCBX offload.");
9440 				return -ENOTSUP;
9441 			}
9442 		} else {
9443 			PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
9444 					  " aq_err = %d.", ret,
9445 					  hw->aq.asq_last_status);
9446 			return -ENOTSUP;
9447 		}
9448 	}
9449 	return 0;
9450 }
9451 
9452 /*
9453  * i40e_dcb_setup - setup dcb related config
9454  * @dev: device being configured
9455  *
9456  * Returns 0 on success, negative value on failure
9457  */
9458 static int
9459 i40e_dcb_setup(struct rte_eth_dev *dev)
9460 {
9461 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9462 	struct i40e_dcbx_config dcb_cfg;
9463 	uint8_t tc_map = 0;
9464 	int ret = 0;
9465 
9466 	if ((pf->flags & I40E_FLAG_DCB) == 0) {
9467 		PMD_INIT_LOG(ERR, "HW doesn't support DCB");
9468 		return -ENOTSUP;
9469 	}
9470 
9471 	if (pf->vf_num != 0)
9472 		PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
9473 
9474 	ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
9475 	if (ret) {
9476 		PMD_INIT_LOG(ERR, "invalid dcb config");
9477 		return -EINVAL;
9478 	}
9479 	ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
9480 	if (ret) {
9481 		PMD_INIT_LOG(ERR, "dcb sw configure fails");
9482 		return -ENOSYS;
9483 	}
9484 
9485 	return 0;
9486 }
9487 
9488 static int
9489 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
9490 		      struct rte_eth_dcb_info *dcb_info)
9491 {
9492 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9493 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9494 	struct i40e_vsi *vsi = pf->main_vsi;
9495 	struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
9496 	uint16_t bsf, tc_mapping;
9497 	int i, j = 0;
9498 
9499 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
9500 		dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
9501 	else
9502 		dcb_info->nb_tcs = 1;
9503 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
9504 		dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
9505 	for (i = 0; i < dcb_info->nb_tcs; i++)
9506 		dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
9507 
9508 	/* get queue mapping if vmdq is disabled */
9509 	if (!pf->nb_cfg_vmdq_vsi) {
9510 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9511 			if (!(vsi->enabled_tc & (1 << i)))
9512 				continue;
9513 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
9514 			dcb_info->tc_queue.tc_rxq[j][i].base =
9515 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
9516 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
9517 			dcb_info->tc_queue.tc_txq[j][i].base =
9518 				dcb_info->tc_queue.tc_rxq[j][i].base;
9519 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
9520 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
9521 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
9522 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
9523 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
9524 		}
9525 		return 0;
9526 	}
9527 
9528 	/* get queue mapping if vmdq is enabled */
9529 	do {
9530 		vsi = pf->vmdq[j].vsi;
9531 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9532 			if (!(vsi->enabled_tc & (1 << i)))
9533 				continue;
9534 			tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
9535 			dcb_info->tc_queue.tc_rxq[j][i].base =
9536 				(tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
9537 				I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
9538 			dcb_info->tc_queue.tc_txq[j][i].base =
9539 				dcb_info->tc_queue.tc_rxq[j][i].base;
9540 			bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
9541 				I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
9542 			dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
9543 			dcb_info->tc_queue.tc_txq[j][i].nb_queue =
9544 				dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
9545 		}
9546 		j++;
9547 	} while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
9548 	return 0;
9549 }
9550 
9551 static int
9552 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
9553 {
9554 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
9555 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9556 	uint16_t interval =
9557 		i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
9558 	uint16_t msix_intr;
9559 
9560 	msix_intr = intr_handle->intr_vec[queue_id];
9561 	if (msix_intr == I40E_MISC_VEC_ID)
9562 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
9563 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
9564 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
9565 			       (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
9566 			       (interval <<
9567 				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
9568 	else
9569 		I40E_WRITE_REG(hw,
9570 			       I40E_PFINT_DYN_CTLN(msix_intr -
9571 						   I40E_RX_VEC_START),
9572 			       I40E_PFINT_DYN_CTLN_INTENA_MASK |
9573 			       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
9574 			       (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
9575 			       (interval <<
9576 				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
9577 
9578 	I40E_WRITE_FLUSH(hw);
9579 	rte_intr_enable(&dev->pci_dev->intr_handle);
9580 
9581 	return 0;
9582 }
9583 
9584 static int
9585 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
9586 {
9587 	struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
9588 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9589 	uint16_t msix_intr;
9590 
9591 	msix_intr = intr_handle->intr_vec[queue_id];
9592 	if (msix_intr == I40E_MISC_VEC_ID)
9593 		I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
9594 	else
9595 		I40E_WRITE_REG(hw,
9596 			       I40E_PFINT_DYN_CTLN(msix_intr -
9597 						   I40E_RX_VEC_START),
9598 			       0);
9599 	I40E_WRITE_FLUSH(hw);
9600 
9601 	return 0;
9602 }
9603 
9604 static int i40e_get_regs(struct rte_eth_dev *dev,
9605 			 struct rte_dev_reg_info *regs)
9606 {
9607 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9608 	uint32_t *ptr_data = regs->data;
9609 	uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
9610 	const struct i40e_reg_info *reg_info;
9611 
9612 	if (ptr_data == NULL) {
9613 		regs->length = I40E_GLGEN_STAT_CLEAR + 4;
9614 		regs->width = sizeof(uint32_t);
9615 		return 0;
9616 	}
9617 
9618 	/* The first few registers have to be read using AQ operations */
9619 	reg_idx = 0;
9620 	while (i40e_regs_adminq[reg_idx].name) {
9621 		reg_info = &i40e_regs_adminq[reg_idx++];
9622 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
9623 			for (arr_idx2 = 0;
9624 					arr_idx2 <= reg_info->count2;
9625 					arr_idx2++) {
9626 				reg_offset = arr_idx * reg_info->stride1 +
9627 					arr_idx2 * reg_info->stride2;
9628 				reg_offset += reg_info->base_addr;
9629 				ptr_data[reg_offset >> 2] =
9630 					i40e_read_rx_ctl(hw, reg_offset);
9631 			}
9632 	}
9633 
9634 	/* The remaining registers can be read using primitives */
9635 	reg_idx = 0;
9636 	while (i40e_regs_others[reg_idx].name) {
9637 		reg_info = &i40e_regs_others[reg_idx++];
9638 		for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
9639 			for (arr_idx2 = 0;
9640 					arr_idx2 <= reg_info->count2;
9641 					arr_idx2++) {
9642 				reg_offset = arr_idx * reg_info->stride1 +
9643 					arr_idx2 * reg_info->stride2;
9644 				reg_offset += reg_info->base_addr;
9645 				ptr_data[reg_offset >> 2] =
9646 					I40E_READ_REG(hw, reg_offset);
9647 			}
9648 	}
9649 
9650 	return 0;
9651 }
9652 
9653 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
9654 {
9655 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9656 
9657 	/* Convert word count to byte count */
9658 	return hw->nvm.sr_size << 1;
9659 }
9660 
9661 static int i40e_get_eeprom(struct rte_eth_dev *dev,
9662 			   struct rte_dev_eeprom_info *eeprom)
9663 {
9664 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9665 	uint16_t *data = eeprom->data;
9666 	uint16_t offset, length, cnt_words;
9667 	int ret_code;
9668 
9669 	offset = eeprom->offset >> 1;
9670 	length = eeprom->length >> 1;
9671 	cnt_words = length;
9672 
9673 	if (offset > hw->nvm.sr_size ||
9674 		offset + length > hw->nvm.sr_size) {
9675 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
9676 		return -EINVAL;
9677 	}
9678 
9679 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
9680 
9681 	ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
9682 	if (ret_code != I40E_SUCCESS || cnt_words != length) {
9683 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
9684 		return -EIO;
9685 	}
9686 
9687 	return 0;
9688 }
9689 
9690 static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
9691 				      struct ether_addr *mac_addr)
9692 {
9693 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9694 
9695 	if (!is_valid_assigned_ether_addr(mac_addr)) {
9696 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
9697 		return;
9698 	}
9699 
9700 	/* Flags: 0x3 updates port address */
9701 	i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
9702 }
9703 
9704 static int
9705 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
9706 {
9707 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9708 	struct rte_eth_dev_data *dev_data = pf->dev_data;
9709 	uint32_t frame_size = mtu + ETHER_HDR_LEN
9710 			      + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
9711 	int ret = 0;
9712 
9713 	/* check if mtu is within the allowed range */
9714 	if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
9715 		return -EINVAL;
9716 
9717 	/* mtu setting is forbidden if port is start */
9718 	if (dev_data->dev_started) {
9719 		PMD_DRV_LOG(ERR,
9720 			    "port %d must be stopped before configuration\n",
9721 			    dev_data->port_id);
9722 		return -EBUSY;
9723 	}
9724 
9725 	if (frame_size > ETHER_MAX_LEN)
9726 		dev_data->dev_conf.rxmode.jumbo_frame = 1;
9727 	else
9728 		dev_data->dev_conf.rxmode.jumbo_frame = 0;
9729 
9730 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
9731 
9732 	return ret;
9733 }
9734