xref: /dpdk/drivers/net/ice/ice_ethdev.c (revision cdcee2ec)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 
13 #include <rte_tailq.h>
14 
15 #include "base/ice_sched.h"
16 #include "base/ice_flow.h"
17 #include "base/ice_dcb.h"
18 #include "base/ice_common.h"
19 
20 #include "rte_pmd_ice.h"
21 #include "ice_ethdev.h"
22 #include "ice_rxtx.h"
23 #include "ice_generic_flow.h"
24 
25 /* devargs */
26 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
27 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
28 #define ICE_PROTO_XTR_ARG         "proto_xtr"
29 
30 static const char * const ice_valid_args[] = {
31 	ICE_SAFE_MODE_SUPPORT_ARG,
32 	ICE_PIPELINE_MODE_SUPPORT_ARG,
33 	ICE_PROTO_XTR_ARG,
34 	NULL
35 };
36 
37 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
38 	.name = "intel_pmd_dynfield_proto_xtr_metadata",
39 	.size = sizeof(uint32_t),
40 	.align = __alignof__(uint32_t),
41 	.flags = 0,
42 };
43 
44 struct proto_xtr_ol_flag {
45 	const struct rte_mbuf_dynflag param;
46 	uint64_t *ol_flag;
47 	bool required;
48 };
49 
50 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
51 
52 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
53 	[PROTO_XTR_VLAN] = {
54 		.param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
55 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
56 	[PROTO_XTR_IPV4] = {
57 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
58 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
59 	[PROTO_XTR_IPV6] = {
60 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
61 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
62 	[PROTO_XTR_IPV6_FLOW] = {
63 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
64 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
65 	[PROTO_XTR_TCP] = {
66 		.param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
67 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
68 	[PROTO_XTR_IP_OFFSET] = {
69 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
70 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
71 };
72 
73 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
74 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
75 #define ICE_MAX_RES_DESC_NUM        1024
76 
77 static int ice_dev_configure(struct rte_eth_dev *dev);
78 static int ice_dev_start(struct rte_eth_dev *dev);
79 static int ice_dev_stop(struct rte_eth_dev *dev);
80 static int ice_dev_close(struct rte_eth_dev *dev);
81 static int ice_dev_reset(struct rte_eth_dev *dev);
82 static int ice_dev_info_get(struct rte_eth_dev *dev,
83 			    struct rte_eth_dev_info *dev_info);
84 static int ice_link_update(struct rte_eth_dev *dev,
85 			   int wait_to_complete);
86 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
87 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
88 
89 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
90 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
91 static int ice_rss_reta_update(struct rte_eth_dev *dev,
92 			       struct rte_eth_rss_reta_entry64 *reta_conf,
93 			       uint16_t reta_size);
94 static int ice_rss_reta_query(struct rte_eth_dev *dev,
95 			      struct rte_eth_rss_reta_entry64 *reta_conf,
96 			      uint16_t reta_size);
97 static int ice_rss_hash_update(struct rte_eth_dev *dev,
98 			       struct rte_eth_rss_conf *rss_conf);
99 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
100 				 struct rte_eth_rss_conf *rss_conf);
101 static int ice_promisc_enable(struct rte_eth_dev *dev);
102 static int ice_promisc_disable(struct rte_eth_dev *dev);
103 static int ice_allmulti_enable(struct rte_eth_dev *dev);
104 static int ice_allmulti_disable(struct rte_eth_dev *dev);
105 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
106 			       uint16_t vlan_id,
107 			       int on);
108 static int ice_macaddr_set(struct rte_eth_dev *dev,
109 			   struct rte_ether_addr *mac_addr);
110 static int ice_macaddr_add(struct rte_eth_dev *dev,
111 			   struct rte_ether_addr *mac_addr,
112 			   __rte_unused uint32_t index,
113 			   uint32_t pool);
114 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
115 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
116 				    uint16_t queue_id);
117 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
118 				     uint16_t queue_id);
119 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
120 			      size_t fw_size);
121 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
122 			     uint16_t pvid, int on);
123 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
124 static int ice_get_eeprom(struct rte_eth_dev *dev,
125 			  struct rte_dev_eeprom_info *eeprom);
126 static int ice_stats_get(struct rte_eth_dev *dev,
127 			 struct rte_eth_stats *stats);
128 static int ice_stats_reset(struct rte_eth_dev *dev);
129 static int ice_xstats_get(struct rte_eth_dev *dev,
130 			  struct rte_eth_xstat *xstats, unsigned int n);
131 static int ice_xstats_get_names(struct rte_eth_dev *dev,
132 				struct rte_eth_xstat_name *xstats_names,
133 				unsigned int limit);
134 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
135 				const struct rte_flow_ops **ops);
136 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
137 			struct rte_eth_udp_tunnel *udp_tunnel);
138 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
139 			struct rte_eth_udp_tunnel *udp_tunnel);
140 
141 static const struct rte_pci_id pci_id_ice_map[] = {
142 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
143 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
144 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
145 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
146 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
147 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
148 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
149 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
150 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
151 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
152 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
153 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
154 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
155 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
156 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
157 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
158 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
159 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
160 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
161 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
162 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
163 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
164 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
165 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
166 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
167 	{ .vendor_id = 0, /* sentinel */ },
168 };
169 
170 static const struct eth_dev_ops ice_eth_dev_ops = {
171 	.dev_configure                = ice_dev_configure,
172 	.dev_start                    = ice_dev_start,
173 	.dev_stop                     = ice_dev_stop,
174 	.dev_close                    = ice_dev_close,
175 	.dev_reset                    = ice_dev_reset,
176 	.dev_set_link_up              = ice_dev_set_link_up,
177 	.dev_set_link_down            = ice_dev_set_link_down,
178 	.rx_queue_start               = ice_rx_queue_start,
179 	.rx_queue_stop                = ice_rx_queue_stop,
180 	.tx_queue_start               = ice_tx_queue_start,
181 	.tx_queue_stop                = ice_tx_queue_stop,
182 	.rx_queue_setup               = ice_rx_queue_setup,
183 	.rx_queue_release             = ice_rx_queue_release,
184 	.tx_queue_setup               = ice_tx_queue_setup,
185 	.tx_queue_release             = ice_tx_queue_release,
186 	.dev_infos_get                = ice_dev_info_get,
187 	.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
188 	.link_update                  = ice_link_update,
189 	.mtu_set                      = ice_mtu_set,
190 	.mac_addr_set                 = ice_macaddr_set,
191 	.mac_addr_add                 = ice_macaddr_add,
192 	.mac_addr_remove              = ice_macaddr_remove,
193 	.vlan_filter_set              = ice_vlan_filter_set,
194 	.vlan_offload_set             = ice_vlan_offload_set,
195 	.reta_update                  = ice_rss_reta_update,
196 	.reta_query                   = ice_rss_reta_query,
197 	.rss_hash_update              = ice_rss_hash_update,
198 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
199 	.promiscuous_enable           = ice_promisc_enable,
200 	.promiscuous_disable          = ice_promisc_disable,
201 	.allmulticast_enable          = ice_allmulti_enable,
202 	.allmulticast_disable         = ice_allmulti_disable,
203 	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
204 	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
205 	.fw_version_get               = ice_fw_version_get,
206 	.vlan_pvid_set                = ice_vlan_pvid_set,
207 	.rxq_info_get                 = ice_rxq_info_get,
208 	.txq_info_get                 = ice_txq_info_get,
209 	.rx_burst_mode_get            = ice_rx_burst_mode_get,
210 	.tx_burst_mode_get            = ice_tx_burst_mode_get,
211 	.get_eeprom_length            = ice_get_eeprom_length,
212 	.get_eeprom                   = ice_get_eeprom,
213 	.stats_get                    = ice_stats_get,
214 	.stats_reset                  = ice_stats_reset,
215 	.xstats_get                   = ice_xstats_get,
216 	.xstats_get_names             = ice_xstats_get_names,
217 	.xstats_reset                 = ice_stats_reset,
218 	.flow_ops_get                 = ice_dev_flow_ops_get,
219 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
220 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
221 	.tx_done_cleanup              = ice_tx_done_cleanup,
222 	.get_monitor_addr             = ice_get_monitor_addr,
223 };
224 
225 /* store statistics names and its offset in stats structure */
226 struct ice_xstats_name_off {
227 	char name[RTE_ETH_XSTATS_NAME_SIZE];
228 	unsigned int offset;
229 };
230 
231 static const struct ice_xstats_name_off ice_stats_strings[] = {
232 	{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
233 	{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
234 	{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
235 	{"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
236 	{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
237 		rx_unknown_protocol)},
238 	{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
239 	{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
240 	{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
241 	{"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
242 };
243 
244 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
245 		sizeof(ice_stats_strings[0]))
246 
247 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
248 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
249 		tx_dropped_link_down)},
250 	{"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
251 	{"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
252 		illegal_bytes)},
253 	{"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
254 	{"mac_local_errors", offsetof(struct ice_hw_port_stats,
255 		mac_local_faults)},
256 	{"mac_remote_errors", offsetof(struct ice_hw_port_stats,
257 		mac_remote_faults)},
258 	{"rx_len_errors", offsetof(struct ice_hw_port_stats,
259 		rx_len_errors)},
260 	{"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
261 	{"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
262 	{"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
263 	{"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
264 	{"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
265 	{"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
266 		rx_size_127)},
267 	{"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
268 		rx_size_255)},
269 	{"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
270 		rx_size_511)},
271 	{"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
272 		rx_size_1023)},
273 	{"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
274 		rx_size_1522)},
275 	{"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
276 		rx_size_big)},
277 	{"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
278 		rx_undersize)},
279 	{"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
280 		rx_oversize)},
281 	{"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
282 		mac_short_pkt_dropped)},
283 	{"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
284 		rx_fragments)},
285 	{"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
286 	{"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
287 	{"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
288 		tx_size_127)},
289 	{"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
290 		tx_size_255)},
291 	{"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
292 		tx_size_511)},
293 	{"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
294 		tx_size_1023)},
295 	{"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
296 		tx_size_1522)},
297 	{"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
298 		tx_size_big)},
299 };
300 
301 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
302 		sizeof(ice_hw_port_strings[0]))
303 
304 static void
305 ice_init_controlq_parameter(struct ice_hw *hw)
306 {
307 	/* fields for adminq */
308 	hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
309 	hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
310 	hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
311 	hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
312 
313 	/* fields for mailboxq, DPDK used as PF host */
314 	hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
315 	hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
316 	hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
317 	hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
318 }
319 
320 static int
321 lookup_proto_xtr_type(const char *xtr_name)
322 {
323 	static struct {
324 		const char *name;
325 		enum proto_xtr_type type;
326 	} xtr_type_map[] = {
327 		{ "vlan",      PROTO_XTR_VLAN      },
328 		{ "ipv4",      PROTO_XTR_IPV4      },
329 		{ "ipv6",      PROTO_XTR_IPV6      },
330 		{ "ipv6_flow", PROTO_XTR_IPV6_FLOW },
331 		{ "tcp",       PROTO_XTR_TCP       },
332 		{ "ip_offset", PROTO_XTR_IP_OFFSET },
333 	};
334 	uint32_t i;
335 
336 	for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
337 		if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
338 			return xtr_type_map[i].type;
339 	}
340 
341 	return -1;
342 }
343 
344 /*
345  * Parse elem, the elem could be single number/range or '(' ')' group
346  * 1) A single number elem, it's just a simple digit. e.g. 9
347  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
348  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
349  *    Within group elem, '-' used for a range separator;
350  *                       ',' used for a single number.
351  */
352 static int
353 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
354 {
355 	const char *str = input;
356 	char *end = NULL;
357 	uint32_t min, max;
358 	uint32_t idx;
359 
360 	while (isblank(*str))
361 		str++;
362 
363 	if (!isdigit(*str) && *str != '(')
364 		return -1;
365 
366 	/* process single number or single range of number */
367 	if (*str != '(') {
368 		errno = 0;
369 		idx = strtoul(str, &end, 10);
370 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
371 			return -1;
372 
373 		while (isblank(*end))
374 			end++;
375 
376 		min = idx;
377 		max = idx;
378 
379 		/* process single <number>-<number> */
380 		if (*end == '-') {
381 			end++;
382 			while (isblank(*end))
383 				end++;
384 			if (!isdigit(*end))
385 				return -1;
386 
387 			errno = 0;
388 			idx = strtoul(end, &end, 10);
389 			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
390 				return -1;
391 
392 			max = idx;
393 			while (isblank(*end))
394 				end++;
395 		}
396 
397 		if (*end != ':')
398 			return -1;
399 
400 		for (idx = RTE_MIN(min, max);
401 		     idx <= RTE_MAX(min, max); idx++)
402 			devargs->proto_xtr[idx] = xtr_type;
403 
404 		return 0;
405 	}
406 
407 	/* process set within bracket */
408 	str++;
409 	while (isblank(*str))
410 		str++;
411 	if (*str == '\0')
412 		return -1;
413 
414 	min = ICE_MAX_QUEUE_NUM;
415 	do {
416 		/* go ahead to the first digit */
417 		while (isblank(*str))
418 			str++;
419 		if (!isdigit(*str))
420 			return -1;
421 
422 		/* get the digit value */
423 		errno = 0;
424 		idx = strtoul(str, &end, 10);
425 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
426 			return -1;
427 
428 		/* go ahead to separator '-',',' and ')' */
429 		while (isblank(*end))
430 			end++;
431 		if (*end == '-') {
432 			if (min == ICE_MAX_QUEUE_NUM)
433 				min = idx;
434 			else /* avoid continuous '-' */
435 				return -1;
436 		} else if (*end == ',' || *end == ')') {
437 			max = idx;
438 			if (min == ICE_MAX_QUEUE_NUM)
439 				min = idx;
440 
441 			for (idx = RTE_MIN(min, max);
442 			     idx <= RTE_MAX(min, max); idx++)
443 				devargs->proto_xtr[idx] = xtr_type;
444 
445 			min = ICE_MAX_QUEUE_NUM;
446 		} else {
447 			return -1;
448 		}
449 
450 		str = end + 1;
451 	} while (*end != ')' && *end != '\0');
452 
453 	return 0;
454 }
455 
456 static int
457 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
458 {
459 	const char *queue_start;
460 	uint32_t idx;
461 	int xtr_type;
462 	char xtr_name[32];
463 
464 	while (isblank(*queues))
465 		queues++;
466 
467 	if (*queues != '[') {
468 		xtr_type = lookup_proto_xtr_type(queues);
469 		if (xtr_type < 0)
470 			return -1;
471 
472 		devargs->proto_xtr_dflt = xtr_type;
473 
474 		return 0;
475 	}
476 
477 	queues++;
478 	do {
479 		while (isblank(*queues))
480 			queues++;
481 		if (*queues == '\0')
482 			return -1;
483 
484 		queue_start = queues;
485 
486 		/* go across a complete bracket */
487 		if (*queue_start == '(') {
488 			queues += strcspn(queues, ")");
489 			if (*queues != ')')
490 				return -1;
491 		}
492 
493 		/* scan the separator ':' */
494 		queues += strcspn(queues, ":");
495 		if (*queues++ != ':')
496 			return -1;
497 		while (isblank(*queues))
498 			queues++;
499 
500 		for (idx = 0; ; idx++) {
501 			if (isblank(queues[idx]) ||
502 			    queues[idx] == ',' ||
503 			    queues[idx] == ']' ||
504 			    queues[idx] == '\0')
505 				break;
506 
507 			if (idx > sizeof(xtr_name) - 2)
508 				return -1;
509 
510 			xtr_name[idx] = queues[idx];
511 		}
512 		xtr_name[idx] = '\0';
513 		xtr_type = lookup_proto_xtr_type(xtr_name);
514 		if (xtr_type < 0)
515 			return -1;
516 
517 		queues += idx;
518 
519 		while (isblank(*queues) || *queues == ',' || *queues == ']')
520 			queues++;
521 
522 		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
523 			return -1;
524 	} while (*queues != '\0');
525 
526 	return 0;
527 }
528 
529 static int
530 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
531 		     void *extra_args)
532 {
533 	struct ice_devargs *devargs = extra_args;
534 
535 	if (value == NULL || extra_args == NULL)
536 		return -EINVAL;
537 
538 	if (parse_queue_proto_xtr(value, devargs) < 0) {
539 		PMD_DRV_LOG(ERR,
540 			    "The protocol extraction parameter is wrong : '%s'",
541 			    value);
542 		return -1;
543 	}
544 
545 	return 0;
546 }
547 
548 static void
549 ice_check_proto_xtr_support(struct ice_hw *hw)
550 {
551 #define FLX_REG(val, fld, idx) \
552 	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
553 	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
554 	static struct {
555 		uint32_t rxdid;
556 		uint8_t opcode;
557 		uint8_t protid_0;
558 		uint8_t protid_1;
559 	} xtr_sets[] = {
560 		[PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
561 				     ICE_RX_OPC_EXTRACT,
562 				     ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
563 		[PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
564 				     ICE_RX_OPC_EXTRACT,
565 				     ICE_PROT_IPV4_OF_OR_S,
566 				     ICE_PROT_IPV4_OF_OR_S },
567 		[PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
568 				     ICE_RX_OPC_EXTRACT,
569 				     ICE_PROT_IPV6_OF_OR_S,
570 				     ICE_PROT_IPV6_OF_OR_S },
571 		[PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
572 					  ICE_RX_OPC_EXTRACT,
573 					  ICE_PROT_IPV6_OF_OR_S,
574 					  ICE_PROT_IPV6_OF_OR_S },
575 		[PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
576 				    ICE_RX_OPC_EXTRACT,
577 				    ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
578 		[PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
579 					  ICE_RX_OPC_PROTID,
580 					  ICE_PROT_IPV4_OF_OR_S,
581 					  ICE_PROT_IPV6_OF_OR_S },
582 	};
583 	uint32_t i;
584 
585 	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
586 		uint32_t rxdid = xtr_sets[i].rxdid;
587 		uint32_t v;
588 
589 		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
590 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
591 
592 			if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
593 			    FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
594 				ice_proto_xtr_hw_support[i] = true;
595 		}
596 
597 		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
598 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
599 
600 			if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
601 			    FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
602 				ice_proto_xtr_hw_support[i] = true;
603 		}
604 	}
605 }
606 
607 static int
608 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
609 		  uint32_t num)
610 {
611 	struct pool_entry *entry;
612 
613 	if (!pool || !num)
614 		return -EINVAL;
615 
616 	entry = rte_zmalloc(NULL, sizeof(*entry), 0);
617 	if (!entry) {
618 		PMD_INIT_LOG(ERR,
619 			     "Failed to allocate memory for resource pool");
620 		return -ENOMEM;
621 	}
622 
623 	/* queue heap initialize */
624 	pool->num_free = num;
625 	pool->num_alloc = 0;
626 	pool->base = base;
627 	LIST_INIT(&pool->alloc_list);
628 	LIST_INIT(&pool->free_list);
629 
630 	/* Initialize element  */
631 	entry->base = 0;
632 	entry->len = num;
633 
634 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
635 	return 0;
636 }
637 
638 static int
639 ice_res_pool_alloc(struct ice_res_pool_info *pool,
640 		   uint16_t num)
641 {
642 	struct pool_entry *entry, *valid_entry;
643 
644 	if (!pool || !num) {
645 		PMD_INIT_LOG(ERR, "Invalid parameter");
646 		return -EINVAL;
647 	}
648 
649 	if (pool->num_free < num) {
650 		PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
651 			     num, pool->num_free);
652 		return -ENOMEM;
653 	}
654 
655 	valid_entry = NULL;
656 	/* Lookup  in free list and find most fit one */
657 	LIST_FOREACH(entry, &pool->free_list, next) {
658 		if (entry->len >= num) {
659 			/* Find best one */
660 			if (entry->len == num) {
661 				valid_entry = entry;
662 				break;
663 			}
664 			if (!valid_entry ||
665 			    valid_entry->len > entry->len)
666 				valid_entry = entry;
667 		}
668 	}
669 
670 	/* Not find one to satisfy the request, return */
671 	if (!valid_entry) {
672 		PMD_INIT_LOG(ERR, "No valid entry found");
673 		return -ENOMEM;
674 	}
675 	/**
676 	 * The entry have equal queue number as requested,
677 	 * remove it from alloc_list.
678 	 */
679 	if (valid_entry->len == num) {
680 		LIST_REMOVE(valid_entry, next);
681 	} else {
682 		/**
683 		 * The entry have more numbers than requested,
684 		 * create a new entry for alloc_list and minus its
685 		 * queue base and number in free_list.
686 		 */
687 		entry = rte_zmalloc(NULL, sizeof(*entry), 0);
688 		if (!entry) {
689 			PMD_INIT_LOG(ERR,
690 				     "Failed to allocate memory for "
691 				     "resource pool");
692 			return -ENOMEM;
693 		}
694 		entry->base = valid_entry->base;
695 		entry->len = num;
696 		valid_entry->base += num;
697 		valid_entry->len -= num;
698 		valid_entry = entry;
699 	}
700 
701 	/* Insert it into alloc list, not sorted */
702 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
703 
704 	pool->num_free -= valid_entry->len;
705 	pool->num_alloc += valid_entry->len;
706 
707 	return valid_entry->base + pool->base;
708 }
709 
710 static void
711 ice_res_pool_destroy(struct ice_res_pool_info *pool)
712 {
713 	struct pool_entry *entry, *next_entry;
714 
715 	if (!pool)
716 		return;
717 
718 	for (entry = LIST_FIRST(&pool->alloc_list);
719 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
720 	     entry = next_entry) {
721 		LIST_REMOVE(entry, next);
722 		rte_free(entry);
723 	}
724 
725 	for (entry = LIST_FIRST(&pool->free_list);
726 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
727 	     entry = next_entry) {
728 		LIST_REMOVE(entry, next);
729 		rte_free(entry);
730 	}
731 
732 	pool->num_free = 0;
733 	pool->num_alloc = 0;
734 	pool->base = 0;
735 	LIST_INIT(&pool->alloc_list);
736 	LIST_INIT(&pool->free_list);
737 }
738 
739 static void
740 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
741 {
742 	/* Set VSI LUT selection */
743 	info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
744 			  ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
745 	/* Set Hash scheme */
746 	info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
747 			   ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
748 	/* enable TC */
749 	info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
750 }
751 
752 static enum ice_status
753 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
754 				struct ice_aqc_vsi_props *info,
755 				uint8_t enabled_tcmap)
756 {
757 	uint16_t bsf, qp_idx;
758 
759 	/* default tc 0 now. Multi-TC supporting need to be done later.
760 	 * Configure TC and queue mapping parameters, for enabled TC,
761 	 * allocate qpnum_per_tc queues to this traffic.
762 	 */
763 	if (enabled_tcmap != 0x01) {
764 		PMD_INIT_LOG(ERR, "only TC0 is supported");
765 		return -ENOTSUP;
766 	}
767 
768 	vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
769 	bsf = rte_bsf32(vsi->nb_qps);
770 	/* Adjust the queue number to actual queues that can be applied */
771 	vsi->nb_qps = 0x1 << bsf;
772 
773 	qp_idx = 0;
774 	/* Set tc and queue mapping with VSI */
775 	info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
776 						ICE_AQ_VSI_TC_Q_OFFSET_S) |
777 					       (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
778 
779 	/* Associate queue number with VSI */
780 	info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
781 	info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
782 	info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
783 	info->valid_sections |=
784 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
785 	/* Set the info.ingress_table and info.egress_table
786 	 * for UP translate table. Now just set it to 1:1 map by default
787 	 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
788 	 */
789 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
790 	info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
791 	info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
792 	info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
793 	return 0;
794 }
795 
796 static int
797 ice_init_mac_address(struct rte_eth_dev *dev)
798 {
799 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
800 
801 	if (!rte_is_unicast_ether_addr
802 		((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
803 		PMD_INIT_LOG(ERR, "Invalid MAC address");
804 		return -EINVAL;
805 	}
806 
807 	rte_ether_addr_copy(
808 		(struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
809 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
810 
811 	dev->data->mac_addrs =
812 		rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
813 	if (!dev->data->mac_addrs) {
814 		PMD_INIT_LOG(ERR,
815 			     "Failed to allocate memory to store mac address");
816 		return -ENOMEM;
817 	}
818 	/* store it to dev data */
819 	rte_ether_addr_copy(
820 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
821 		&dev->data->mac_addrs[0]);
822 	return 0;
823 }
824 
825 /* Find out specific MAC filter */
826 static struct ice_mac_filter *
827 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
828 {
829 	struct ice_mac_filter *f;
830 
831 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
832 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
833 			return f;
834 	}
835 
836 	return NULL;
837 }
838 
839 static int
840 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
841 {
842 	struct ice_fltr_list_entry *m_list_itr = NULL;
843 	struct ice_mac_filter *f;
844 	struct LIST_HEAD_TYPE list_head;
845 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
846 	int ret = 0;
847 
848 	/* If it's added and configured, return */
849 	f = ice_find_mac_filter(vsi, mac_addr);
850 	if (f) {
851 		PMD_DRV_LOG(INFO, "This MAC filter already exists.");
852 		return 0;
853 	}
854 
855 	INIT_LIST_HEAD(&list_head);
856 
857 	m_list_itr = (struct ice_fltr_list_entry *)
858 		ice_malloc(hw, sizeof(*m_list_itr));
859 	if (!m_list_itr) {
860 		ret = -ENOMEM;
861 		goto DONE;
862 	}
863 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
864 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
865 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
866 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
867 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
868 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
869 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
870 
871 	LIST_ADD(&m_list_itr->list_entry, &list_head);
872 
873 	/* Add the mac */
874 	ret = ice_add_mac(hw, &list_head);
875 	if (ret != ICE_SUCCESS) {
876 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
877 		ret = -EINVAL;
878 		goto DONE;
879 	}
880 	/* Add the mac addr into mac list */
881 	f = rte_zmalloc(NULL, sizeof(*f), 0);
882 	if (!f) {
883 		PMD_DRV_LOG(ERR, "failed to allocate memory");
884 		ret = -ENOMEM;
885 		goto DONE;
886 	}
887 	rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
888 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
889 	vsi->mac_num++;
890 
891 	ret = 0;
892 
893 DONE:
894 	rte_free(m_list_itr);
895 	return ret;
896 }
897 
898 static int
899 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
900 {
901 	struct ice_fltr_list_entry *m_list_itr = NULL;
902 	struct ice_mac_filter *f;
903 	struct LIST_HEAD_TYPE list_head;
904 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
905 	int ret = 0;
906 
907 	/* Can't find it, return an error */
908 	f = ice_find_mac_filter(vsi, mac_addr);
909 	if (!f)
910 		return -EINVAL;
911 
912 	INIT_LIST_HEAD(&list_head);
913 
914 	m_list_itr = (struct ice_fltr_list_entry *)
915 		ice_malloc(hw, sizeof(*m_list_itr));
916 	if (!m_list_itr) {
917 		ret = -ENOMEM;
918 		goto DONE;
919 	}
920 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
921 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
922 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
923 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
924 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
925 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
926 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
927 
928 	LIST_ADD(&m_list_itr->list_entry, &list_head);
929 
930 	/* remove the mac filter */
931 	ret = ice_remove_mac(hw, &list_head);
932 	if (ret != ICE_SUCCESS) {
933 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
934 		ret = -EINVAL;
935 		goto DONE;
936 	}
937 
938 	/* Remove the mac addr from mac list */
939 	TAILQ_REMOVE(&vsi->mac_list, f, next);
940 	rte_free(f);
941 	vsi->mac_num--;
942 
943 	ret = 0;
944 DONE:
945 	rte_free(m_list_itr);
946 	return ret;
947 }
948 
949 /* Find out specific VLAN filter */
950 static struct ice_vlan_filter *
951 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
952 {
953 	struct ice_vlan_filter *f;
954 
955 	TAILQ_FOREACH(f, &vsi->vlan_list, next) {
956 		if (vlan->tpid == f->vlan_info.vlan.tpid &&
957 		    vlan->vid == f->vlan_info.vlan.vid)
958 			return f;
959 	}
960 
961 	return NULL;
962 }
963 
964 static int
965 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
966 {
967 	struct ice_fltr_list_entry *v_list_itr = NULL;
968 	struct ice_vlan_filter *f;
969 	struct LIST_HEAD_TYPE list_head;
970 	struct ice_hw *hw;
971 	int ret = 0;
972 
973 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
974 		return -EINVAL;
975 
976 	hw = ICE_VSI_TO_HW(vsi);
977 
978 	/* If it's added and configured, return. */
979 	f = ice_find_vlan_filter(vsi, vlan);
980 	if (f) {
981 		PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
982 		return 0;
983 	}
984 
985 	if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
986 		return 0;
987 
988 	INIT_LIST_HEAD(&list_head);
989 
990 	v_list_itr = (struct ice_fltr_list_entry *)
991 		      ice_malloc(hw, sizeof(*v_list_itr));
992 	if (!v_list_itr) {
993 		ret = -ENOMEM;
994 		goto DONE;
995 	}
996 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
997 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
998 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
999 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1000 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1001 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1002 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1003 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1004 
1005 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1006 
1007 	/* Add the vlan */
1008 	ret = ice_add_vlan(hw, &list_head);
1009 	if (ret != ICE_SUCCESS) {
1010 		PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1011 		ret = -EINVAL;
1012 		goto DONE;
1013 	}
1014 
1015 	/* Add vlan into vlan list */
1016 	f = rte_zmalloc(NULL, sizeof(*f), 0);
1017 	if (!f) {
1018 		PMD_DRV_LOG(ERR, "failed to allocate memory");
1019 		ret = -ENOMEM;
1020 		goto DONE;
1021 	}
1022 	f->vlan_info.vlan.tpid = vlan->tpid;
1023 	f->vlan_info.vlan.vid = vlan->vid;
1024 	TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1025 	vsi->vlan_num++;
1026 
1027 	ret = 0;
1028 
1029 DONE:
1030 	rte_free(v_list_itr);
1031 	return ret;
1032 }
1033 
1034 static int
1035 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1036 {
1037 	struct ice_fltr_list_entry *v_list_itr = NULL;
1038 	struct ice_vlan_filter *f;
1039 	struct LIST_HEAD_TYPE list_head;
1040 	struct ice_hw *hw;
1041 	int ret = 0;
1042 
1043 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1044 		return -EINVAL;
1045 
1046 	hw = ICE_VSI_TO_HW(vsi);
1047 
1048 	/* Can't find it, return an error */
1049 	f = ice_find_vlan_filter(vsi, vlan);
1050 	if (!f)
1051 		return -EINVAL;
1052 
1053 	INIT_LIST_HEAD(&list_head);
1054 
1055 	v_list_itr = (struct ice_fltr_list_entry *)
1056 		      ice_malloc(hw, sizeof(*v_list_itr));
1057 	if (!v_list_itr) {
1058 		ret = -ENOMEM;
1059 		goto DONE;
1060 	}
1061 
1062 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1063 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1064 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1065 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1066 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1067 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1068 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1069 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1070 
1071 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1072 
1073 	/* remove the vlan filter */
1074 	ret = ice_remove_vlan(hw, &list_head);
1075 	if (ret != ICE_SUCCESS) {
1076 		PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1077 		ret = -EINVAL;
1078 		goto DONE;
1079 	}
1080 
1081 	/* Remove the vlan id from vlan list */
1082 	TAILQ_REMOVE(&vsi->vlan_list, f, next);
1083 	rte_free(f);
1084 	vsi->vlan_num--;
1085 
1086 	ret = 0;
1087 DONE:
1088 	rte_free(v_list_itr);
1089 	return ret;
1090 }
1091 
1092 static int
1093 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1094 {
1095 	struct ice_mac_filter *m_f;
1096 	struct ice_vlan_filter *v_f;
1097 	void *temp;
1098 	int ret = 0;
1099 
1100 	if (!vsi || !vsi->mac_num)
1101 		return -EINVAL;
1102 
1103 	TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1104 		ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1105 		if (ret != ICE_SUCCESS) {
1106 			ret = -EINVAL;
1107 			goto DONE;
1108 		}
1109 	}
1110 
1111 	if (vsi->vlan_num == 0)
1112 		return 0;
1113 
1114 	TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1115 		ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1116 		if (ret != ICE_SUCCESS) {
1117 			ret = -EINVAL;
1118 			goto DONE;
1119 		}
1120 	}
1121 
1122 DONE:
1123 	return ret;
1124 }
1125 
1126 /* Enable IRQ0 */
1127 static void
1128 ice_pf_enable_irq0(struct ice_hw *hw)
1129 {
1130 	/* reset the registers */
1131 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1132 	ICE_READ_REG(hw, PFINT_OICR);
1133 
1134 #ifdef ICE_LSE_SPT
1135 	ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1136 		      (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1137 				 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1138 
1139 	ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1140 		      (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1141 		      ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1142 		       PFINT_OICR_CTL_ITR_INDX_M) |
1143 		      PFINT_OICR_CTL_CAUSE_ENA_M);
1144 
1145 	ICE_WRITE_REG(hw, PFINT_FW_CTL,
1146 		      (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1147 		      ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1148 		       PFINT_FW_CTL_ITR_INDX_M) |
1149 		      PFINT_FW_CTL_CAUSE_ENA_M);
1150 #else
1151 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1152 #endif
1153 
1154 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1155 		      GLINT_DYN_CTL_INTENA_M |
1156 		      GLINT_DYN_CTL_CLEARPBA_M |
1157 		      GLINT_DYN_CTL_ITR_INDX_M);
1158 
1159 	ice_flush(hw);
1160 }
1161 
1162 /* Disable IRQ0 */
1163 static void
1164 ice_pf_disable_irq0(struct ice_hw *hw)
1165 {
1166 	/* Disable all interrupt types */
1167 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1168 	ice_flush(hw);
1169 }
1170 
1171 #ifdef ICE_LSE_SPT
1172 static void
1173 ice_handle_aq_msg(struct rte_eth_dev *dev)
1174 {
1175 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1176 	struct ice_ctl_q_info *cq = &hw->adminq;
1177 	struct ice_rq_event_info event;
1178 	uint16_t pending, opcode;
1179 	int ret;
1180 
1181 	event.buf_len = ICE_AQ_MAX_BUF_LEN;
1182 	event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1183 	if (!event.msg_buf) {
1184 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
1185 		return;
1186 	}
1187 
1188 	pending = 1;
1189 	while (pending) {
1190 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1191 
1192 		if (ret != ICE_SUCCESS) {
1193 			PMD_DRV_LOG(INFO,
1194 				    "Failed to read msg from AdminQ, "
1195 				    "adminq_err: %u",
1196 				    hw->adminq.sq_last_status);
1197 			break;
1198 		}
1199 		opcode = rte_le_to_cpu_16(event.desc.opcode);
1200 
1201 		switch (opcode) {
1202 		case ice_aqc_opc_get_link_status:
1203 			ret = ice_link_update(dev, 0);
1204 			if (!ret)
1205 				rte_eth_dev_callback_process
1206 					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1207 			break;
1208 		default:
1209 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1210 				    opcode);
1211 			break;
1212 		}
1213 	}
1214 	rte_free(event.msg_buf);
1215 }
1216 #endif
1217 
1218 /**
1219  * Interrupt handler triggered by NIC for handling
1220  * specific interrupt.
1221  *
1222  * @param handle
1223  *  Pointer to interrupt handle.
1224  * @param param
1225  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1226  *
1227  * @return
1228  *  void
1229  */
1230 static void
1231 ice_interrupt_handler(void *param)
1232 {
1233 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1234 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1235 	uint32_t oicr;
1236 	uint32_t reg;
1237 	uint8_t pf_num;
1238 	uint8_t event;
1239 	uint16_t queue;
1240 	int ret;
1241 #ifdef ICE_LSE_SPT
1242 	uint32_t int_fw_ctl;
1243 #endif
1244 
1245 	/* Disable interrupt */
1246 	ice_pf_disable_irq0(hw);
1247 
1248 	/* read out interrupt causes */
1249 	oicr = ICE_READ_REG(hw, PFINT_OICR);
1250 #ifdef ICE_LSE_SPT
1251 	int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1252 #endif
1253 
1254 	/* No interrupt event indicated */
1255 	if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1256 		PMD_DRV_LOG(INFO, "No interrupt event");
1257 		goto done;
1258 	}
1259 
1260 #ifdef ICE_LSE_SPT
1261 	if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1262 		PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1263 		ice_handle_aq_msg(dev);
1264 	}
1265 #else
1266 	if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1267 		PMD_DRV_LOG(INFO, "OICR: link state change event");
1268 		ret = ice_link_update(dev, 0);
1269 		if (!ret)
1270 			rte_eth_dev_callback_process
1271 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1272 	}
1273 #endif
1274 
1275 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
1276 		PMD_DRV_LOG(WARNING, "OICR: MDD event");
1277 		reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1278 		if (reg & GL_MDET_TX_PQM_VALID_M) {
1279 			pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1280 				 GL_MDET_TX_PQM_PF_NUM_S;
1281 			event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1282 				GL_MDET_TX_PQM_MAL_TYPE_S;
1283 			queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1284 				GL_MDET_TX_PQM_QNUM_S;
1285 
1286 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1287 				    "%d by PQM on TX queue %d PF# %d",
1288 				    event, queue, pf_num);
1289 		}
1290 
1291 		reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1292 		if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1293 			pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1294 				 GL_MDET_TX_TCLAN_PF_NUM_S;
1295 			event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1296 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1297 			queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1298 				GL_MDET_TX_TCLAN_QNUM_S;
1299 
1300 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1301 				    "%d by TCLAN on TX queue %d PF# %d",
1302 				    event, queue, pf_num);
1303 		}
1304 	}
1305 done:
1306 	/* Enable interrupt */
1307 	ice_pf_enable_irq0(hw);
1308 	rte_intr_ack(dev->intr_handle);
1309 }
1310 
1311 static void
1312 ice_init_proto_xtr(struct rte_eth_dev *dev)
1313 {
1314 	struct ice_adapter *ad =
1315 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1316 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1317 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1318 	const struct proto_xtr_ol_flag *ol_flag;
1319 	bool proto_xtr_enable = false;
1320 	int offset;
1321 	uint16_t i;
1322 
1323 	pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1324 	if (unlikely(pf->proto_xtr == NULL)) {
1325 		PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1326 		return;
1327 	}
1328 
1329 	for (i = 0; i < pf->lan_nb_qps; i++) {
1330 		pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1331 				   ad->devargs.proto_xtr[i] :
1332 				   ad->devargs.proto_xtr_dflt;
1333 
1334 		if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1335 			uint8_t type = pf->proto_xtr[i];
1336 
1337 			ice_proto_xtr_ol_flag_params[type].required = true;
1338 			proto_xtr_enable = true;
1339 		}
1340 	}
1341 
1342 	if (likely(!proto_xtr_enable))
1343 		return;
1344 
1345 	ice_check_proto_xtr_support(hw);
1346 
1347 	offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1348 	if (unlikely(offset == -1)) {
1349 		PMD_DRV_LOG(ERR,
1350 			    "Protocol extraction metadata is disabled in mbuf with error %d",
1351 			    -rte_errno);
1352 		return;
1353 	}
1354 
1355 	PMD_DRV_LOG(DEBUG,
1356 		    "Protocol extraction metadata offset in mbuf is : %d",
1357 		    offset);
1358 	rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1359 
1360 	for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1361 		ol_flag = &ice_proto_xtr_ol_flag_params[i];
1362 
1363 		if (!ol_flag->required)
1364 			continue;
1365 
1366 		if (!ice_proto_xtr_hw_support[i]) {
1367 			PMD_DRV_LOG(ERR,
1368 				    "Protocol extraction type %u is not supported in hardware",
1369 				    i);
1370 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1371 			break;
1372 		}
1373 
1374 		offset = rte_mbuf_dynflag_register(&ol_flag->param);
1375 		if (unlikely(offset == -1)) {
1376 			PMD_DRV_LOG(ERR,
1377 				    "Protocol extraction offload '%s' failed to register with error %d",
1378 				    ol_flag->param.name, -rte_errno);
1379 
1380 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1381 			break;
1382 		}
1383 
1384 		PMD_DRV_LOG(DEBUG,
1385 			    "Protocol extraction offload '%s' offset in mbuf is : %d",
1386 			    ol_flag->param.name, offset);
1387 		*ol_flag->ol_flag = 1ULL << offset;
1388 	}
1389 }
1390 
1391 /*  Initialize SW parameters of PF */
1392 static int
1393 ice_pf_sw_init(struct rte_eth_dev *dev)
1394 {
1395 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1396 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1397 
1398 	pf->lan_nb_qp_max =
1399 		(uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1400 				  hw->func_caps.common_cap.num_rxq);
1401 
1402 	pf->lan_nb_qps = pf->lan_nb_qp_max;
1403 
1404 	ice_init_proto_xtr(dev);
1405 
1406 	if (hw->func_caps.fd_fltr_guar > 0 ||
1407 	    hw->func_caps.fd_fltr_best_effort > 0) {
1408 		pf->flags |= ICE_FLAG_FDIR;
1409 		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1410 		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1411 	} else {
1412 		pf->fdir_nb_qps = 0;
1413 	}
1414 	pf->fdir_qp_offset = 0;
1415 
1416 	return 0;
1417 }
1418 
1419 struct ice_vsi *
1420 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1421 {
1422 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1423 	struct ice_vsi *vsi = NULL;
1424 	struct ice_vsi_ctx vsi_ctx;
1425 	int ret;
1426 	struct rte_ether_addr broadcast = {
1427 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1428 	struct rte_ether_addr mac_addr;
1429 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1430 	uint8_t tc_bitmap = 0x1;
1431 	uint16_t cfg;
1432 
1433 	/* hw->num_lports = 1 in NIC mode */
1434 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1435 	if (!vsi)
1436 		return NULL;
1437 
1438 	vsi->idx = pf->next_vsi_idx;
1439 	pf->next_vsi_idx++;
1440 	vsi->type = type;
1441 	vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1442 	vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1443 	vsi->vlan_anti_spoof_on = 0;
1444 	vsi->vlan_filter_on = 1;
1445 	TAILQ_INIT(&vsi->mac_list);
1446 	TAILQ_INIT(&vsi->vlan_list);
1447 
1448 	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1449 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1450 			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1451 			hw->func_caps.common_cap.rss_table_size;
1452 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1453 
1454 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1455 	switch (type) {
1456 	case ICE_VSI_PF:
1457 		vsi->nb_qps = pf->lan_nb_qps;
1458 		vsi->base_queue = 1;
1459 		ice_vsi_config_default_rss(&vsi_ctx.info);
1460 		vsi_ctx.alloc_from_pool = true;
1461 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1462 		/* switch_id is queried by get_switch_config aq, which is done
1463 		 * by ice_init_hw
1464 		 */
1465 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1466 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1467 		/* Allow all untagged or tagged packets */
1468 		vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1469 		vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1470 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1471 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1472 		if (ice_is_dvm_ena(hw)) {
1473 			vsi_ctx.info.outer_vlan_flags =
1474 				(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1475 				 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1476 				ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1477 			vsi_ctx.info.outer_vlan_flags |=
1478 				(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1479 				 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1480 				ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1481 		}
1482 
1483 		/* FDIR */
1484 		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1485 			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1486 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1487 		cfg = ICE_AQ_VSI_FD_ENABLE;
1488 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1489 		vsi_ctx.info.max_fd_fltr_dedicated =
1490 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1491 		vsi_ctx.info.max_fd_fltr_shared =
1492 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1493 
1494 		/* Enable VLAN/UP trip */
1495 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1496 						      &vsi_ctx.info,
1497 						      ICE_DEFAULT_TCMAP);
1498 		if (ret) {
1499 			PMD_INIT_LOG(ERR,
1500 				     "tc queue mapping with vsi failed, "
1501 				     "err = %d",
1502 				     ret);
1503 			goto fail_mem;
1504 		}
1505 
1506 		break;
1507 	case ICE_VSI_CTRL:
1508 		vsi->nb_qps = pf->fdir_nb_qps;
1509 		vsi->base_queue = ICE_FDIR_QUEUE_ID;
1510 		vsi_ctx.alloc_from_pool = true;
1511 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1512 
1513 		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1514 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1515 		cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1516 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1517 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1518 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1519 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1520 						      &vsi_ctx.info,
1521 						      ICE_DEFAULT_TCMAP);
1522 		if (ret) {
1523 			PMD_INIT_LOG(ERR,
1524 				     "tc queue mapping with vsi failed, "
1525 				     "err = %d",
1526 				     ret);
1527 			goto fail_mem;
1528 		}
1529 		break;
1530 	default:
1531 		/* for other types of VSI */
1532 		PMD_INIT_LOG(ERR, "other types of VSI not supported");
1533 		goto fail_mem;
1534 	}
1535 
1536 	/* VF has MSIX interrupt in VF range, don't allocate here */
1537 	if (type == ICE_VSI_PF) {
1538 		ret = ice_res_pool_alloc(&pf->msix_pool,
1539 					 RTE_MIN(vsi->nb_qps,
1540 						 RTE_MAX_RXTX_INTR_VEC_ID));
1541 		if (ret < 0) {
1542 			PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1543 				     vsi->vsi_id, ret);
1544 		}
1545 		vsi->msix_intr = ret;
1546 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1547 	} else if (type == ICE_VSI_CTRL) {
1548 		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1549 		if (ret < 0) {
1550 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1551 				    vsi->vsi_id, ret);
1552 		}
1553 		vsi->msix_intr = ret;
1554 		vsi->nb_msix = 1;
1555 	} else {
1556 		vsi->msix_intr = 0;
1557 		vsi->nb_msix = 0;
1558 	}
1559 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1560 	if (ret != ICE_SUCCESS) {
1561 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1562 		goto fail_mem;
1563 	}
1564 	/* store vsi information is SW structure */
1565 	vsi->vsi_id = vsi_ctx.vsi_num;
1566 	vsi->info = vsi_ctx.info;
1567 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
1568 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1569 
1570 	if (type == ICE_VSI_PF) {
1571 		/* MAC configuration */
1572 		rte_ether_addr_copy((struct rte_ether_addr *)
1573 					hw->port_info->mac.perm_addr,
1574 				    &pf->dev_addr);
1575 
1576 		rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1577 		ret = ice_add_mac_filter(vsi, &mac_addr);
1578 		if (ret != ICE_SUCCESS)
1579 			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1580 
1581 		rte_ether_addr_copy(&broadcast, &mac_addr);
1582 		ret = ice_add_mac_filter(vsi, &mac_addr);
1583 		if (ret != ICE_SUCCESS)
1584 			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1585 	}
1586 
1587 	/* At the beginning, only TC0. */
1588 	/* What we need here is the maximam number of the TX queues.
1589 	 * Currently vsi->nb_qps means it.
1590 	 * Correct it if any change.
1591 	 */
1592 	max_txqs[0] = vsi->nb_qps;
1593 	ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1594 			      tc_bitmap, max_txqs);
1595 	if (ret != ICE_SUCCESS)
1596 		PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1597 
1598 	return vsi;
1599 fail_mem:
1600 	rte_free(vsi);
1601 	pf->next_vsi_idx--;
1602 	return NULL;
1603 }
1604 
1605 static int
1606 ice_send_driver_ver(struct ice_hw *hw)
1607 {
1608 	struct ice_driver_ver dv;
1609 
1610 	/* we don't have driver version use 0 for dummy */
1611 	dv.major_ver = 0;
1612 	dv.minor_ver = 0;
1613 	dv.build_ver = 0;
1614 	dv.subbuild_ver = 0;
1615 	strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1616 
1617 	return ice_aq_send_driver_ver(hw, &dv, NULL);
1618 }
1619 
1620 static int
1621 ice_pf_setup(struct ice_pf *pf)
1622 {
1623 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1624 	struct ice_vsi *vsi;
1625 	uint16_t unused;
1626 
1627 	/* Clear all stats counters */
1628 	pf->offset_loaded = false;
1629 	memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1630 	memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1631 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1632 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1633 
1634 	/* force guaranteed filter pool for PF */
1635 	ice_alloc_fd_guar_item(hw, &unused,
1636 			       hw->func_caps.fd_fltr_guar);
1637 	/* force shared filter pool for PF */
1638 	ice_alloc_fd_shrd_item(hw, &unused,
1639 			       hw->func_caps.fd_fltr_best_effort);
1640 
1641 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1642 	if (!vsi) {
1643 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1644 		return -EINVAL;
1645 	}
1646 
1647 	pf->main_vsi = vsi;
1648 
1649 	return 0;
1650 }
1651 
1652 /*
1653  * Extract device serial number from PCIe Configuration Space and
1654  * determine the pkg file path according to the DSN.
1655  */
1656 #ifndef RTE_EXEC_ENV_WINDOWS
1657 static int
1658 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1659 {
1660 	off_t pos;
1661 	char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1662 	uint32_t dsn_low, dsn_high;
1663 	memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1664 
1665 	pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
1666 
1667 	if (pos) {
1668 		if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0) {
1669 			PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1670 			return -1;
1671 		}
1672 		if (rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
1673 			PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1674 			return -1;
1675 		}
1676 		snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1677 			 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1678 	} else {
1679 		PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1680 		goto fail_dsn;
1681 	}
1682 
1683 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1684 		ICE_MAX_PKG_FILENAME_SIZE);
1685 	if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1686 		return 0;
1687 
1688 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1689 		ICE_MAX_PKG_FILENAME_SIZE);
1690 	if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1691 		return 0;
1692 
1693 fail_dsn:
1694 	strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1695 	if (!ice_access(pkg_file, 0))
1696 		return 0;
1697 	strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1698 	return 0;
1699 }
1700 #endif
1701 
1702 enum ice_pkg_type
1703 ice_load_pkg_type(struct ice_hw *hw)
1704 {
1705 	enum ice_pkg_type package_type;
1706 
1707 	/* store the activated package type (OS default or Comms) */
1708 	if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1709 		ICE_PKG_NAME_SIZE))
1710 		package_type = ICE_PKG_TYPE_OS_DEFAULT;
1711 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1712 		ICE_PKG_NAME_SIZE))
1713 		package_type = ICE_PKG_TYPE_COMMS;
1714 	else
1715 		package_type = ICE_PKG_TYPE_UNKNOWN;
1716 
1717 	PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1718 		hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1719 		hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1720 		hw->active_pkg_name,
1721 		ice_is_dvm_ena(hw) ? "double" : "single");
1722 
1723 	return package_type;
1724 }
1725 
1726 #ifndef RTE_EXEC_ENV_WINDOWS
1727 static int ice_load_pkg(struct rte_eth_dev *dev)
1728 {
1729 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1730 	char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1731 	int err;
1732 	uint8_t *buf;
1733 	int buf_len;
1734 	FILE *file;
1735 	struct stat fstat;
1736 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1737 	struct ice_adapter *ad =
1738 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1739 
1740 	err = ice_pkg_file_search_path(pci_dev, pkg_file);
1741 	if (err) {
1742 		PMD_INIT_LOG(ERR, "failed to search file path\n");
1743 		return err;
1744 	}
1745 
1746 	file = fopen(pkg_file, "rb");
1747 	if (!file)  {
1748 		PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1749 		return -1;
1750 	}
1751 
1752 	err = stat(pkg_file, &fstat);
1753 	if (err) {
1754 		PMD_INIT_LOG(ERR, "failed to get file stats\n");
1755 		fclose(file);
1756 		return err;
1757 	}
1758 
1759 	buf_len = fstat.st_size;
1760 	buf = rte_malloc(NULL, buf_len, 0);
1761 
1762 	if (!buf) {
1763 		PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1764 				buf_len);
1765 		fclose(file);
1766 		return -1;
1767 	}
1768 
1769 	err = fread(buf, buf_len, 1, file);
1770 	if (err != 1) {
1771 		PMD_INIT_LOG(ERR, "failed to read package data\n");
1772 		fclose(file);
1773 		err = -1;
1774 		goto fail_exit;
1775 	}
1776 
1777 	fclose(file);
1778 
1779 	err = ice_copy_and_init_pkg(hw, buf, buf_len);
1780 	if (err) {
1781 		PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1782 		goto fail_exit;
1783 	}
1784 
1785 	/* store the loaded pkg type info */
1786 	ad->active_pkg_type = ice_load_pkg_type(hw);
1787 
1788 	err = ice_init_hw_tbls(hw);
1789 	if (err) {
1790 		PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1791 		goto fail_init_tbls;
1792 	}
1793 
1794 	return 0;
1795 
1796 fail_init_tbls:
1797 	rte_free(hw->pkg_copy);
1798 fail_exit:
1799 	rte_free(buf);
1800 	return err;
1801 }
1802 #endif
1803 
1804 static void
1805 ice_base_queue_get(struct ice_pf *pf)
1806 {
1807 	uint32_t reg;
1808 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1809 
1810 	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1811 	if (reg & PFLAN_RX_QALLOC_VALID_M) {
1812 		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1813 	} else {
1814 		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1815 					" index");
1816 	}
1817 }
1818 
1819 static int
1820 parse_bool(const char *key, const char *value, void *args)
1821 {
1822 	int *i = (int *)args;
1823 	char *end;
1824 	int num;
1825 
1826 	num = strtoul(value, &end, 10);
1827 
1828 	if (num != 0 && num != 1) {
1829 		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1830 			"value must be 0 or 1",
1831 			value, key);
1832 		return -1;
1833 	}
1834 
1835 	*i = num;
1836 	return 0;
1837 }
1838 
1839 static int ice_parse_devargs(struct rte_eth_dev *dev)
1840 {
1841 	struct ice_adapter *ad =
1842 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1843 	struct rte_devargs *devargs = dev->device->devargs;
1844 	struct rte_kvargs *kvlist;
1845 	int ret;
1846 
1847 	if (devargs == NULL)
1848 		return 0;
1849 
1850 	kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1851 	if (kvlist == NULL) {
1852 		PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1853 		return -EINVAL;
1854 	}
1855 
1856 	ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1857 	memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1858 	       sizeof(ad->devargs.proto_xtr));
1859 
1860 	ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1861 				 &handle_proto_xtr_arg, &ad->devargs);
1862 	if (ret)
1863 		goto bail;
1864 
1865 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1866 				 &parse_bool, &ad->devargs.safe_mode_support);
1867 	if (ret)
1868 		goto bail;
1869 
1870 	ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1871 				 &parse_bool, &ad->devargs.pipe_mode_support);
1872 	if (ret)
1873 		goto bail;
1874 
1875 bail:
1876 	rte_kvargs_free(kvlist);
1877 	return ret;
1878 }
1879 
1880 /* Forward LLDP packets to default VSI by set switch rules */
1881 static int
1882 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
1883 {
1884 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1885 	struct ice_fltr_list_entry *s_list_itr = NULL;
1886 	struct LIST_HEAD_TYPE list_head;
1887 	int ret = 0;
1888 
1889 	INIT_LIST_HEAD(&list_head);
1890 
1891 	s_list_itr = (struct ice_fltr_list_entry *)
1892 			ice_malloc(hw, sizeof(*s_list_itr));
1893 	if (!s_list_itr)
1894 		return -ENOMEM;
1895 	s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1896 	s_list_itr->fltr_info.vsi_handle = vsi->idx;
1897 	s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1898 			RTE_ETHER_TYPE_LLDP;
1899 	s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1900 	s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1901 	s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1902 	LIST_ADD(&s_list_itr->list_entry, &list_head);
1903 	if (on)
1904 		ret = ice_add_eth_mac(hw, &list_head);
1905 	else
1906 		ret = ice_remove_eth_mac(hw, &list_head);
1907 
1908 	rte_free(s_list_itr);
1909 	return ret;
1910 }
1911 
1912 static enum ice_status
1913 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1914 		uint16_t num, uint16_t desc_id,
1915 		uint16_t *prof_buf, uint16_t *num_prof)
1916 {
1917 	struct ice_aqc_res_elem *resp_buf;
1918 	int ret;
1919 	uint16_t buf_len;
1920 	bool res_shared = 1;
1921 	struct ice_aq_desc aq_desc;
1922 	struct ice_sq_cd *cd = NULL;
1923 	struct ice_aqc_get_allocd_res_desc *cmd =
1924 			&aq_desc.params.get_res_desc;
1925 
1926 	buf_len = sizeof(*resp_buf) * num;
1927 	resp_buf = ice_malloc(hw, buf_len);
1928 	if (!resp_buf)
1929 		return -ENOMEM;
1930 
1931 	ice_fill_dflt_direct_cmd_desc(&aq_desc,
1932 			ice_aqc_opc_get_allocd_res_desc);
1933 
1934 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1935 				ICE_AQC_RES_TYPE_M) | (res_shared ?
1936 				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1937 	cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1938 
1939 	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1940 	if (!ret)
1941 		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1942 	else
1943 		goto exit;
1944 
1945 	ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
1946 			(*num_prof), ICE_NONDMA_TO_NONDMA);
1947 
1948 exit:
1949 	rte_free(resp_buf);
1950 	return ret;
1951 }
1952 static int
1953 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1954 {
1955 	int ret;
1956 	uint16_t prof_id;
1957 	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1958 	uint16_t first_desc = 1;
1959 	uint16_t num_prof = 0;
1960 
1961 	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
1962 			first_desc, prof_buf, &num_prof);
1963 	if (ret) {
1964 		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
1965 		return ret;
1966 	}
1967 
1968 	for (prof_id = 0; prof_id < num_prof; prof_id++) {
1969 		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
1970 		if (ret) {
1971 			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
1972 			return ret;
1973 		}
1974 	}
1975 	return 0;
1976 }
1977 
1978 static int
1979 ice_reset_fxp_resource(struct ice_hw *hw)
1980 {
1981 	int ret;
1982 
1983 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
1984 	if (ret) {
1985 		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
1986 		return ret;
1987 	}
1988 
1989 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
1990 	if (ret) {
1991 		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
1992 		return ret;
1993 	}
1994 
1995 	return 0;
1996 }
1997 
1998 static void
1999 ice_rss_ctx_init(struct ice_pf *pf)
2000 {
2001 	memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2002 }
2003 
2004 static uint64_t
2005 ice_get_supported_rxdid(struct ice_hw *hw)
2006 {
2007 	uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2008 	uint32_t regval;
2009 	int i;
2010 
2011 	supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2012 
2013 	for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2014 		regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2015 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2016 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2017 			supported_rxdid |= BIT(i);
2018 	}
2019 	return supported_rxdid;
2020 }
2021 
2022 static int
2023 ice_dev_init(struct rte_eth_dev *dev)
2024 {
2025 	struct rte_pci_device *pci_dev;
2026 	struct rte_intr_handle *intr_handle;
2027 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2028 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2029 	struct ice_adapter *ad =
2030 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2031 	struct ice_vsi *vsi;
2032 	int ret;
2033 
2034 	dev->dev_ops = &ice_eth_dev_ops;
2035 	dev->rx_queue_count = ice_rx_queue_count;
2036 	dev->rx_descriptor_status = ice_rx_descriptor_status;
2037 	dev->tx_descriptor_status = ice_tx_descriptor_status;
2038 	dev->rx_pkt_burst = ice_recv_pkts;
2039 	dev->tx_pkt_burst = ice_xmit_pkts;
2040 	dev->tx_pkt_prepare = ice_prep_pkts;
2041 
2042 	/* for secondary processes, we don't initialise any further as primary
2043 	 * has already done this work.
2044 	 */
2045 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2046 		ice_set_rx_function(dev);
2047 		ice_set_tx_function(dev);
2048 		return 0;
2049 	}
2050 
2051 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2052 
2053 	ice_set_default_ptype_table(dev);
2054 	pci_dev = RTE_DEV_TO_PCI(dev->device);
2055 	intr_handle = &pci_dev->intr_handle;
2056 
2057 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2058 	pf->adapter->eth_dev = dev;
2059 	pf->dev_data = dev->data;
2060 	hw->back = pf->adapter;
2061 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2062 	hw->vendor_id = pci_dev->id.vendor_id;
2063 	hw->device_id = pci_dev->id.device_id;
2064 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2065 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2066 	hw->bus.device = pci_dev->addr.devid;
2067 	hw->bus.func = pci_dev->addr.function;
2068 
2069 	ret = ice_parse_devargs(dev);
2070 	if (ret) {
2071 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
2072 		return -EINVAL;
2073 	}
2074 
2075 	ice_init_controlq_parameter(hw);
2076 
2077 	ret = ice_init_hw(hw);
2078 	if (ret) {
2079 		PMD_INIT_LOG(ERR, "Failed to initialize HW");
2080 		return -EINVAL;
2081 	}
2082 
2083 #ifndef RTE_EXEC_ENV_WINDOWS
2084 	ret = ice_load_pkg(dev);
2085 	if (ret) {
2086 		if (ad->devargs.safe_mode_support == 0) {
2087 			PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2088 					"Use safe-mode-support=1 to enter Safe Mode");
2089 			return ret;
2090 		}
2091 
2092 		PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2093 					"Entering Safe Mode");
2094 		ad->is_safe_mode = 1;
2095 	}
2096 #endif
2097 
2098 	PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2099 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2100 		     hw->api_maj_ver, hw->api_min_ver);
2101 
2102 	ice_pf_sw_init(dev);
2103 	ret = ice_init_mac_address(dev);
2104 	if (ret) {
2105 		PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2106 		goto err_init_mac;
2107 	}
2108 
2109 	ret = ice_res_pool_init(&pf->msix_pool, 1,
2110 				hw->func_caps.common_cap.num_msix_vectors - 1);
2111 	if (ret) {
2112 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2113 		goto err_msix_pool_init;
2114 	}
2115 
2116 	ret = ice_pf_setup(pf);
2117 	if (ret) {
2118 		PMD_INIT_LOG(ERR, "Failed to setup PF");
2119 		goto err_pf_setup;
2120 	}
2121 
2122 	ret = ice_send_driver_ver(hw);
2123 	if (ret) {
2124 		PMD_INIT_LOG(ERR, "Failed to send driver version");
2125 		goto err_pf_setup;
2126 	}
2127 
2128 	vsi = pf->main_vsi;
2129 
2130 	ret = ice_aq_stop_lldp(hw, true, false, NULL);
2131 	if (ret != ICE_SUCCESS)
2132 		PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2133 	ret = ice_init_dcb(hw, true);
2134 	if (ret != ICE_SUCCESS)
2135 		PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2136 	/* Forward LLDP packets to default VSI */
2137 	ret = ice_vsi_config_sw_lldp(vsi, true);
2138 	if (ret != ICE_SUCCESS)
2139 		PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2140 	/* register callback func to eal lib */
2141 	rte_intr_callback_register(intr_handle,
2142 				   ice_interrupt_handler, dev);
2143 
2144 	ice_pf_enable_irq0(hw);
2145 
2146 	/* enable uio intr after callback register */
2147 	rte_intr_enable(intr_handle);
2148 
2149 	/* get base queue pairs index  in the device */
2150 	ice_base_queue_get(pf);
2151 
2152 	/* Initialize RSS context for gtpu_eh */
2153 	ice_rss_ctx_init(pf);
2154 
2155 	if (!ad->is_safe_mode) {
2156 		ret = ice_flow_init(ad);
2157 		if (ret) {
2158 			PMD_INIT_LOG(ERR, "Failed to initialize flow");
2159 			return ret;
2160 		}
2161 	}
2162 
2163 	ret = ice_reset_fxp_resource(hw);
2164 	if (ret) {
2165 		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2166 		return ret;
2167 	}
2168 
2169 	pf->supported_rxdid = ice_get_supported_rxdid(hw);
2170 
2171 	return 0;
2172 
2173 err_pf_setup:
2174 	ice_res_pool_destroy(&pf->msix_pool);
2175 err_msix_pool_init:
2176 	rte_free(dev->data->mac_addrs);
2177 	dev->data->mac_addrs = NULL;
2178 err_init_mac:
2179 	ice_sched_cleanup_all(hw);
2180 	rte_free(hw->port_info);
2181 	ice_shutdown_all_ctrlq(hw);
2182 	rte_free(pf->proto_xtr);
2183 
2184 	return ret;
2185 }
2186 
2187 int
2188 ice_release_vsi(struct ice_vsi *vsi)
2189 {
2190 	struct ice_hw *hw;
2191 	struct ice_vsi_ctx vsi_ctx;
2192 	enum ice_status ret;
2193 	int error = 0;
2194 
2195 	if (!vsi)
2196 		return error;
2197 
2198 	hw = ICE_VSI_TO_HW(vsi);
2199 
2200 	ice_remove_all_mac_vlan_filters(vsi);
2201 
2202 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2203 
2204 	vsi_ctx.vsi_num = vsi->vsi_id;
2205 	vsi_ctx.info = vsi->info;
2206 	ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2207 	if (ret != ICE_SUCCESS) {
2208 		PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2209 		error = -1;
2210 	}
2211 
2212 	rte_free(vsi->rss_lut);
2213 	rte_free(vsi->rss_key);
2214 	rte_free(vsi);
2215 	return error;
2216 }
2217 
2218 void
2219 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2220 {
2221 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2222 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2223 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2224 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2225 	uint16_t msix_intr, i;
2226 
2227 	/* disable interrupt and also clear all the exist config */
2228 	for (i = 0; i < vsi->nb_qps; i++) {
2229 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2230 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2231 		rte_wmb();
2232 	}
2233 
2234 	if (rte_intr_allow_others(intr_handle))
2235 		/* vfio-pci */
2236 		for (i = 0; i < vsi->nb_msix; i++) {
2237 			msix_intr = vsi->msix_intr + i;
2238 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2239 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2240 		}
2241 	else
2242 		/* igb_uio */
2243 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2244 }
2245 
2246 static int
2247 ice_dev_stop(struct rte_eth_dev *dev)
2248 {
2249 	struct rte_eth_dev_data *data = dev->data;
2250 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2251 	struct ice_vsi *main_vsi = pf->main_vsi;
2252 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2253 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2254 	uint16_t i;
2255 
2256 	/* avoid stopping again */
2257 	if (pf->adapter_stopped)
2258 		return 0;
2259 
2260 	/* stop and clear all Rx queues */
2261 	for (i = 0; i < data->nb_rx_queues; i++)
2262 		ice_rx_queue_stop(dev, i);
2263 
2264 	/* stop and clear all Tx queues */
2265 	for (i = 0; i < data->nb_tx_queues; i++)
2266 		ice_tx_queue_stop(dev, i);
2267 
2268 	/* disable all queue interrupts */
2269 	ice_vsi_disable_queues_intr(main_vsi);
2270 
2271 	if (pf->init_link_up)
2272 		ice_dev_set_link_up(dev);
2273 	else
2274 		ice_dev_set_link_down(dev);
2275 
2276 	/* Clean datapath event and queue/vec mapping */
2277 	rte_intr_efd_disable(intr_handle);
2278 	if (intr_handle->intr_vec) {
2279 		rte_free(intr_handle->intr_vec);
2280 		intr_handle->intr_vec = NULL;
2281 	}
2282 
2283 	pf->adapter_stopped = true;
2284 	dev->data->dev_started = 0;
2285 
2286 	return 0;
2287 }
2288 
2289 static int
2290 ice_dev_close(struct rte_eth_dev *dev)
2291 {
2292 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2293 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2294 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2295 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2296 	struct ice_adapter *ad =
2297 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2298 	int ret;
2299 
2300 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2301 		return 0;
2302 
2303 	/* Since stop will make link down, then the link event will be
2304 	 * triggered, disable the irq firstly to avoid the port_infoe etc
2305 	 * resources deallocation causing the interrupt service thread
2306 	 * crash.
2307 	 */
2308 	ice_pf_disable_irq0(hw);
2309 
2310 	ret = ice_dev_stop(dev);
2311 
2312 	if (!ad->is_safe_mode)
2313 		ice_flow_uninit(ad);
2314 
2315 	/* release all queue resource */
2316 	ice_free_queues(dev);
2317 
2318 	ice_res_pool_destroy(&pf->msix_pool);
2319 	ice_release_vsi(pf->main_vsi);
2320 	ice_sched_cleanup_all(hw);
2321 	ice_free_hw_tbls(hw);
2322 	rte_free(hw->port_info);
2323 	hw->port_info = NULL;
2324 	ice_shutdown_all_ctrlq(hw);
2325 	rte_free(pf->proto_xtr);
2326 	pf->proto_xtr = NULL;
2327 
2328 	/* disable uio intr before callback unregister */
2329 	rte_intr_disable(intr_handle);
2330 
2331 	/* unregister callback func from eal lib */
2332 	rte_intr_callback_unregister(intr_handle,
2333 				     ice_interrupt_handler, dev);
2334 
2335 	return ret;
2336 }
2337 
2338 static int
2339 ice_dev_uninit(struct rte_eth_dev *dev)
2340 {
2341 	ice_dev_close(dev);
2342 
2343 	return 0;
2344 }
2345 
2346 static bool
2347 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2348 {
2349 	return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2350 }
2351 
2352 static void
2353 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2354 {
2355 	cfg->hash_flds = 0;
2356 	cfg->addl_hdrs = 0;
2357 	cfg->symm = 0;
2358 	cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2359 }
2360 
2361 static int
2362 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2363 {
2364 	enum ice_status status = ICE_SUCCESS;
2365 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2366 	struct ice_vsi *vsi = pf->main_vsi;
2367 
2368 	if (!is_hash_cfg_valid(cfg))
2369 		return -ENOENT;
2370 
2371 	status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2372 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2373 		PMD_DRV_LOG(ERR,
2374 			    "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2375 			    vsi->idx, status);
2376 		return -EBUSY;
2377 	}
2378 
2379 	return 0;
2380 }
2381 
2382 static int
2383 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2384 {
2385 	enum ice_status status = ICE_SUCCESS;
2386 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2387 	struct ice_vsi *vsi = pf->main_vsi;
2388 
2389 	if (!is_hash_cfg_valid(cfg))
2390 		return -ENOENT;
2391 
2392 	status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2393 	if (status) {
2394 		PMD_DRV_LOG(ERR,
2395 			    "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2396 			    vsi->idx, status);
2397 		return -EBUSY;
2398 	}
2399 
2400 	return 0;
2401 }
2402 
2403 static int
2404 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2405 {
2406 	int ret;
2407 
2408 	ret = ice_hash_moveout(pf, cfg);
2409 	if (ret && (ret != -ENOENT))
2410 		return ret;
2411 
2412 	hash_cfg_reset(cfg);
2413 
2414 	return 0;
2415 }
2416 
2417 static int
2418 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2419 			 u8 ctx_idx)
2420 {
2421 	int ret;
2422 
2423 	switch (ctx_idx) {
2424 	case ICE_HASH_GTPU_CTX_EH_IP:
2425 		ret = ice_hash_remove(pf,
2426 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2427 		if (ret && (ret != -ENOENT))
2428 			return ret;
2429 
2430 		ret = ice_hash_remove(pf,
2431 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2432 		if (ret && (ret != -ENOENT))
2433 			return ret;
2434 
2435 		ret = ice_hash_remove(pf,
2436 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2437 		if (ret && (ret != -ENOENT))
2438 			return ret;
2439 
2440 		ret = ice_hash_remove(pf,
2441 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2442 		if (ret && (ret != -ENOENT))
2443 			return ret;
2444 
2445 		ret = ice_hash_remove(pf,
2446 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2447 		if (ret && (ret != -ENOENT))
2448 			return ret;
2449 
2450 		ret = ice_hash_remove(pf,
2451 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2452 		if (ret && (ret != -ENOENT))
2453 			return ret;
2454 
2455 		ret = ice_hash_remove(pf,
2456 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2457 		if (ret && (ret != -ENOENT))
2458 			return ret;
2459 
2460 		ret = ice_hash_remove(pf,
2461 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2462 		if (ret && (ret != -ENOENT))
2463 			return ret;
2464 
2465 		break;
2466 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2467 		ret = ice_hash_remove(pf,
2468 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2469 		if (ret && (ret != -ENOENT))
2470 			return ret;
2471 
2472 		ret = ice_hash_remove(pf,
2473 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2474 		if (ret && (ret != -ENOENT))
2475 			return ret;
2476 
2477 		ret = ice_hash_moveout(pf,
2478 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2479 		if (ret && (ret != -ENOENT))
2480 			return ret;
2481 
2482 		ret = ice_hash_moveout(pf,
2483 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2484 		if (ret && (ret != -ENOENT))
2485 			return ret;
2486 
2487 		ret = ice_hash_moveout(pf,
2488 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2489 		if (ret && (ret != -ENOENT))
2490 			return ret;
2491 
2492 		ret = ice_hash_moveout(pf,
2493 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2494 		if (ret && (ret != -ENOENT))
2495 			return ret;
2496 
2497 		break;
2498 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2499 		ret = ice_hash_remove(pf,
2500 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2501 		if (ret && (ret != -ENOENT))
2502 			return ret;
2503 
2504 		ret = ice_hash_remove(pf,
2505 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2506 		if (ret && (ret != -ENOENT))
2507 			return ret;
2508 
2509 		ret = ice_hash_moveout(pf,
2510 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2511 		if (ret && (ret != -ENOENT))
2512 			return ret;
2513 
2514 		ret = ice_hash_moveout(pf,
2515 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2516 		if (ret && (ret != -ENOENT))
2517 			return ret;
2518 
2519 		ret = ice_hash_moveout(pf,
2520 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2521 		if (ret && (ret != -ENOENT))
2522 			return ret;
2523 
2524 		ret = ice_hash_moveout(pf,
2525 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2526 		if (ret && (ret != -ENOENT))
2527 			return ret;
2528 
2529 		break;
2530 	case ICE_HASH_GTPU_CTX_UP_IP:
2531 		ret = ice_hash_remove(pf,
2532 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2533 		if (ret && (ret != -ENOENT))
2534 			return ret;
2535 
2536 		ret = ice_hash_remove(pf,
2537 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2538 		if (ret && (ret != -ENOENT))
2539 			return ret;
2540 
2541 		ret = ice_hash_moveout(pf,
2542 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2543 		if (ret && (ret != -ENOENT))
2544 			return ret;
2545 
2546 		ret = ice_hash_moveout(pf,
2547 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2548 		if (ret && (ret != -ENOENT))
2549 			return ret;
2550 
2551 		ret = ice_hash_moveout(pf,
2552 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2553 		if (ret && (ret != -ENOENT))
2554 			return ret;
2555 
2556 		break;
2557 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2558 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2559 		ret = ice_hash_moveout(pf,
2560 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2561 		if (ret && (ret != -ENOENT))
2562 			return ret;
2563 
2564 		ret = ice_hash_moveout(pf,
2565 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2566 		if (ret && (ret != -ENOENT))
2567 			return ret;
2568 
2569 		ret = ice_hash_moveout(pf,
2570 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2571 		if (ret && (ret != -ENOENT))
2572 			return ret;
2573 
2574 		break;
2575 	case ICE_HASH_GTPU_CTX_DW_IP:
2576 		ret = ice_hash_remove(pf,
2577 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2578 		if (ret && (ret != -ENOENT))
2579 			return ret;
2580 
2581 		ret = ice_hash_remove(pf,
2582 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2583 		if (ret && (ret != -ENOENT))
2584 			return ret;
2585 
2586 		ret = ice_hash_moveout(pf,
2587 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2588 		if (ret && (ret != -ENOENT))
2589 			return ret;
2590 
2591 		ret = ice_hash_moveout(pf,
2592 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2593 		if (ret && (ret != -ENOENT))
2594 			return ret;
2595 
2596 		ret = ice_hash_moveout(pf,
2597 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2598 		if (ret && (ret != -ENOENT))
2599 			return ret;
2600 
2601 		break;
2602 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2603 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2604 		ret = ice_hash_moveout(pf,
2605 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2606 		if (ret && (ret != -ENOENT))
2607 			return ret;
2608 
2609 		ret = ice_hash_moveout(pf,
2610 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2611 		if (ret && (ret != -ENOENT))
2612 			return ret;
2613 
2614 		ret = ice_hash_moveout(pf,
2615 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2616 		if (ret && (ret != -ENOENT))
2617 			return ret;
2618 
2619 		break;
2620 	default:
2621 		break;
2622 	}
2623 
2624 	return 0;
2625 }
2626 
2627 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2628 {
2629 	u8 eh_idx, ip_idx;
2630 
2631 	if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2632 		eh_idx = 0;
2633 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2634 		eh_idx = 1;
2635 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2636 		eh_idx = 2;
2637 	else
2638 		return ICE_HASH_GTPU_CTX_MAX;
2639 
2640 	ip_idx = 0;
2641 	if (hdr & ICE_FLOW_SEG_HDR_UDP)
2642 		ip_idx = 1;
2643 	else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2644 		ip_idx = 2;
2645 
2646 	if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2647 		return eh_idx * 3 + ip_idx;
2648 	else
2649 		return ICE_HASH_GTPU_CTX_MAX;
2650 }
2651 
2652 static int
2653 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2654 {
2655 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2656 
2657 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2658 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2659 						gtpu_ctx_idx);
2660 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2661 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2662 						gtpu_ctx_idx);
2663 
2664 	return 0;
2665 }
2666 
2667 static int
2668 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2669 			  u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2670 {
2671 	int ret;
2672 
2673 	if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2674 		ctx->ctx[ctx_idx] = *cfg;
2675 
2676 	switch (ctx_idx) {
2677 	case ICE_HASH_GTPU_CTX_EH_IP:
2678 		break;
2679 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2680 		ret = ice_hash_moveback(pf,
2681 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2682 		if (ret && (ret != -ENOENT))
2683 			return ret;
2684 
2685 		ret = ice_hash_moveback(pf,
2686 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2687 		if (ret && (ret != -ENOENT))
2688 			return ret;
2689 
2690 		ret = ice_hash_moveback(pf,
2691 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2692 		if (ret && (ret != -ENOENT))
2693 			return ret;
2694 
2695 		ret = ice_hash_moveback(pf,
2696 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2697 		if (ret && (ret != -ENOENT))
2698 			return ret;
2699 
2700 		break;
2701 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2702 		ret = ice_hash_moveback(pf,
2703 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2704 		if (ret && (ret != -ENOENT))
2705 			return ret;
2706 
2707 		ret = ice_hash_moveback(pf,
2708 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2709 		if (ret && (ret != -ENOENT))
2710 			return ret;
2711 
2712 		ret = ice_hash_moveback(pf,
2713 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2714 		if (ret && (ret != -ENOENT))
2715 			return ret;
2716 
2717 		ret = ice_hash_moveback(pf,
2718 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2719 		if (ret && (ret != -ENOENT))
2720 			return ret;
2721 
2722 		break;
2723 	case ICE_HASH_GTPU_CTX_UP_IP:
2724 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2725 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2726 	case ICE_HASH_GTPU_CTX_DW_IP:
2727 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2728 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2729 		ret = ice_hash_moveback(pf,
2730 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2731 		if (ret && (ret != -ENOENT))
2732 			return ret;
2733 
2734 		ret = ice_hash_moveback(pf,
2735 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2736 		if (ret && (ret != -ENOENT))
2737 			return ret;
2738 
2739 		ret = ice_hash_moveback(pf,
2740 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2741 		if (ret && (ret != -ENOENT))
2742 			return ret;
2743 
2744 		break;
2745 	default:
2746 		break;
2747 	}
2748 
2749 	return 0;
2750 }
2751 
2752 static int
2753 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2754 {
2755 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2756 
2757 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2758 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2759 						 gtpu_ctx_idx, cfg);
2760 	else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2761 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2762 						 gtpu_ctx_idx, cfg);
2763 
2764 	return 0;
2765 }
2766 
2767 static void
2768 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2769 {
2770 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2771 
2772 	if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2773 		return;
2774 
2775 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2776 		hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2777 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2778 		hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2779 }
2780 
2781 int
2782 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2783 		     struct ice_rss_hash_cfg *cfg)
2784 {
2785 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2786 	int ret;
2787 
2788 	ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2789 	if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2790 		PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2791 
2792 	ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2793 
2794 	return 0;
2795 }
2796 
2797 int
2798 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2799 		     struct ice_rss_hash_cfg *cfg)
2800 {
2801 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2802 	int ret;
2803 
2804 	ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2805 	if (ret)
2806 		PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2807 
2808 	ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2809 	if (ret)
2810 		PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2811 
2812 	ret = ice_add_rss_cfg_post(pf, cfg);
2813 	if (ret)
2814 		PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2815 
2816 	return 0;
2817 }
2818 
2819 static void
2820 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2821 {
2822 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2823 	struct ice_vsi *vsi = pf->main_vsi;
2824 	struct ice_rss_hash_cfg cfg;
2825 	int ret;
2826 
2827 #define ICE_RSS_HF_ALL ( \
2828 	ETH_RSS_IPV4 | \
2829 	ETH_RSS_IPV6 | \
2830 	ETH_RSS_NONFRAG_IPV4_UDP | \
2831 	ETH_RSS_NONFRAG_IPV6_UDP | \
2832 	ETH_RSS_NONFRAG_IPV4_TCP | \
2833 	ETH_RSS_NONFRAG_IPV6_TCP | \
2834 	ETH_RSS_NONFRAG_IPV4_SCTP | \
2835 	ETH_RSS_NONFRAG_IPV6_SCTP)
2836 
2837 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2838 	if (ret)
2839 		PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
2840 			    __func__, ret);
2841 
2842 	cfg.symm = 0;
2843 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2844 	/* Configure RSS for IPv4 with src/dst addr as input set */
2845 	if (rss_hf & ETH_RSS_IPV4) {
2846 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2847 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2848 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2849 		if (ret)
2850 			PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2851 				    __func__, ret);
2852 	}
2853 
2854 	/* Configure RSS for IPv6 with src/dst addr as input set */
2855 	if (rss_hf & ETH_RSS_IPV6) {
2856 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2857 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2858 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2859 		if (ret)
2860 			PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2861 				    __func__, ret);
2862 	}
2863 
2864 	/* Configure RSS for udp4 with src/dst addr and port as input set */
2865 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2866 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
2867 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2868 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
2869 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2870 		if (ret)
2871 			PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2872 				    __func__, ret);
2873 	}
2874 
2875 	/* Configure RSS for udp6 with src/dst addr and port as input set */
2876 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2877 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
2878 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2879 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
2880 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2881 		if (ret)
2882 			PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2883 				    __func__, ret);
2884 	}
2885 
2886 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
2887 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2888 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
2889 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2890 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
2891 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2892 		if (ret)
2893 			PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2894 				    __func__, ret);
2895 	}
2896 
2897 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
2898 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2899 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
2900 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2901 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
2902 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2903 		if (ret)
2904 			PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2905 				    __func__, ret);
2906 	}
2907 
2908 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
2909 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2910 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
2911 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2912 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
2913 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2914 		if (ret)
2915 			PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2916 				    __func__, ret);
2917 	}
2918 
2919 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
2920 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2921 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
2922 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2923 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
2924 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2925 		if (ret)
2926 			PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2927 				    __func__, ret);
2928 	}
2929 
2930 	if (rss_hf & ETH_RSS_IPV4) {
2931 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 |
2932 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2933 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2934 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2935 		if (ret)
2936 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2937 				    __func__, ret);
2938 
2939 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 |
2940 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2941 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2942 		if (ret)
2943 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2944 				    __func__, ret);
2945 
2946 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
2947 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2948 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2949 		if (ret)
2950 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2951 				    __func__, ret);
2952 	}
2953 
2954 	if (rss_hf & ETH_RSS_IPV6) {
2955 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 |
2956 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2957 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2958 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2959 		if (ret)
2960 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2961 				    __func__, ret);
2962 
2963 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 |
2964 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2965 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2966 		if (ret)
2967 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2968 				    __func__, ret);
2969 
2970 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
2971 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2972 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2973 		if (ret)
2974 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2975 				    __func__, ret);
2976 	}
2977 
2978 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2979 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP |
2980 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2981 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
2982 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2983 		if (ret)
2984 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2985 				    __func__, ret);
2986 
2987 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP |
2988 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2989 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2990 		if (ret)
2991 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2992 				    __func__, ret);
2993 
2994 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2995 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2996 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2997 		if (ret)
2998 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2999 				    __func__, ret);
3000 	}
3001 
3002 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
3003 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_UDP |
3004 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3005 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
3006 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3007 		if (ret)
3008 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
3009 				    __func__, ret);
3010 
3011 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_UDP |
3012 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3013 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3014 		if (ret)
3015 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
3016 				    __func__, ret);
3017 
3018 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3019 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3020 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3021 		if (ret)
3022 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3023 				    __func__, ret);
3024 	}
3025 
3026 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
3027 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP |
3028 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3029 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
3030 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3031 		if (ret)
3032 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
3033 				    __func__, ret);
3034 
3035 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP |
3036 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3037 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3038 		if (ret)
3039 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
3040 				    __func__, ret);
3041 
3042 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3043 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3044 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3045 		if (ret)
3046 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3047 				    __func__, ret);
3048 	}
3049 
3050 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
3051 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_TCP |
3052 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3053 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
3054 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3055 		if (ret)
3056 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
3057 				    __func__, ret);
3058 
3059 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_TCP |
3060 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3061 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3062 		if (ret)
3063 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
3064 				    __func__, ret);
3065 
3066 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3067 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3068 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3069 		if (ret)
3070 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3071 				    __func__, ret);
3072 	}
3073 
3074 	pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3075 }
3076 
3077 static int ice_init_rss(struct ice_pf *pf)
3078 {
3079 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
3080 	struct ice_vsi *vsi = pf->main_vsi;
3081 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
3082 	struct ice_aq_get_set_rss_lut_params lut_params;
3083 	struct rte_eth_rss_conf *rss_conf;
3084 	struct ice_aqc_get_set_rss_keys key;
3085 	uint16_t i, nb_q;
3086 	int ret = 0;
3087 	bool is_safe_mode = pf->adapter->is_safe_mode;
3088 	uint32_t reg;
3089 
3090 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
3091 	nb_q = dev->data->nb_rx_queues;
3092 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3093 	vsi->rss_lut_size = pf->hash_lut_size;
3094 
3095 	if (nb_q == 0) {
3096 		PMD_DRV_LOG(WARNING,
3097 			"RSS is not supported as rx queues number is zero\n");
3098 		return 0;
3099 	}
3100 
3101 	if (is_safe_mode) {
3102 		PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3103 		return 0;
3104 	}
3105 
3106 	if (!vsi->rss_key) {
3107 		vsi->rss_key = rte_zmalloc(NULL,
3108 					   vsi->rss_key_size, 0);
3109 		if (vsi->rss_key == NULL) {
3110 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3111 			return -ENOMEM;
3112 		}
3113 	}
3114 	if (!vsi->rss_lut) {
3115 		vsi->rss_lut = rte_zmalloc(NULL,
3116 					   vsi->rss_lut_size, 0);
3117 		if (vsi->rss_lut == NULL) {
3118 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3119 			rte_free(vsi->rss_key);
3120 			vsi->rss_key = NULL;
3121 			return -ENOMEM;
3122 		}
3123 	}
3124 	/* configure RSS key */
3125 	if (!rss_conf->rss_key) {
3126 		/* Calculate the default hash key */
3127 		for (i = 0; i <= vsi->rss_key_size; i++)
3128 			vsi->rss_key[i] = (uint8_t)rte_rand();
3129 	} else {
3130 		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3131 			   RTE_MIN(rss_conf->rss_key_len,
3132 				   vsi->rss_key_size));
3133 	}
3134 	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3135 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3136 	if (ret)
3137 		goto out;
3138 
3139 	/* init RSS LUT table */
3140 	for (i = 0; i < vsi->rss_lut_size; i++)
3141 		vsi->rss_lut[i] = i % nb_q;
3142 
3143 	lut_params.vsi_handle = vsi->idx;
3144 	lut_params.lut_size = vsi->rss_lut_size;
3145 	lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3146 	lut_params.lut = vsi->rss_lut;
3147 	lut_params.global_lut_id = 0;
3148 	ret = ice_aq_set_rss_lut(hw, &lut_params);
3149 	if (ret)
3150 		goto out;
3151 
3152 	/* Enable registers for symmetric_toeplitz function. */
3153 	reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3154 	reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3155 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3156 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3157 
3158 	/* RSS hash configuration */
3159 	ice_rss_hash_set(pf, rss_conf->rss_hf);
3160 
3161 	return 0;
3162 out:
3163 	rte_free(vsi->rss_key);
3164 	vsi->rss_key = NULL;
3165 	rte_free(vsi->rss_lut);
3166 	vsi->rss_lut = NULL;
3167 	return -EINVAL;
3168 }
3169 
3170 static int
3171 ice_dev_configure(struct rte_eth_dev *dev)
3172 {
3173 	struct ice_adapter *ad =
3174 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3175 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3176 	int ret;
3177 
3178 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
3179 	 * bulk allocation or vector Rx preconditions we will reset it.
3180 	 */
3181 	ad->rx_bulk_alloc_allowed = true;
3182 	ad->tx_simple_allowed = true;
3183 
3184 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3185 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3186 
3187 	if (dev->data->nb_rx_queues) {
3188 		ret = ice_init_rss(pf);
3189 		if (ret) {
3190 			PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3191 			return ret;
3192 		}
3193 	}
3194 
3195 	return 0;
3196 }
3197 
3198 static void
3199 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3200 		       int base_queue, int nb_queue)
3201 {
3202 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3203 	uint32_t val, val_tx;
3204 	int i;
3205 
3206 	for (i = 0; i < nb_queue; i++) {
3207 		/*do actual bind*/
3208 		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3209 		      (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3210 		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3211 			 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3212 
3213 		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3214 			    base_queue + i, msix_vect);
3215 		/* set ITR0 value */
3216 		ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3217 		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3218 		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3219 	}
3220 }
3221 
3222 void
3223 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3224 {
3225 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3226 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3227 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3228 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3229 	uint16_t msix_vect = vsi->msix_intr;
3230 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3231 	uint16_t queue_idx = 0;
3232 	int record = 0;
3233 	int i;
3234 
3235 	/* clear Rx/Tx queue interrupt */
3236 	for (i = 0; i < vsi->nb_used_qps; i++) {
3237 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3238 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3239 	}
3240 
3241 	/* PF bind interrupt */
3242 	if (rte_intr_dp_is_en(intr_handle)) {
3243 		queue_idx = 0;
3244 		record = 1;
3245 	}
3246 
3247 	for (i = 0; i < vsi->nb_used_qps; i++) {
3248 		if (nb_msix <= 1) {
3249 			if (!rte_intr_allow_others(intr_handle))
3250 				msix_vect = ICE_MISC_VEC_ID;
3251 
3252 			/* uio mapping all queue to one msix_vect */
3253 			__vsi_queues_bind_intr(vsi, msix_vect,
3254 					       vsi->base_queue + i,
3255 					       vsi->nb_used_qps - i);
3256 
3257 			for (; !!record && i < vsi->nb_used_qps; i++)
3258 				intr_handle->intr_vec[queue_idx + i] =
3259 					msix_vect;
3260 			break;
3261 		}
3262 
3263 		/* vfio 1:1 queue/msix_vect mapping */
3264 		__vsi_queues_bind_intr(vsi, msix_vect,
3265 				       vsi->base_queue + i, 1);
3266 
3267 		if (!!record)
3268 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
3269 
3270 		msix_vect++;
3271 		nb_msix--;
3272 	}
3273 }
3274 
3275 void
3276 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3277 {
3278 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
3279 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3280 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3281 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3282 	uint16_t msix_intr, i;
3283 
3284 	if (rte_intr_allow_others(intr_handle))
3285 		for (i = 0; i < vsi->nb_used_qps; i++) {
3286 			msix_intr = vsi->msix_intr + i;
3287 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3288 				      GLINT_DYN_CTL_INTENA_M |
3289 				      GLINT_DYN_CTL_CLEARPBA_M |
3290 				      GLINT_DYN_CTL_ITR_INDX_M |
3291 				      GLINT_DYN_CTL_WB_ON_ITR_M);
3292 		}
3293 	else
3294 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3295 			      GLINT_DYN_CTL_INTENA_M |
3296 			      GLINT_DYN_CTL_CLEARPBA_M |
3297 			      GLINT_DYN_CTL_ITR_INDX_M |
3298 			      GLINT_DYN_CTL_WB_ON_ITR_M);
3299 }
3300 
3301 static int
3302 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3303 {
3304 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3305 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3306 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3307 	struct ice_vsi *vsi = pf->main_vsi;
3308 	uint32_t intr_vector = 0;
3309 
3310 	rte_intr_disable(intr_handle);
3311 
3312 	/* check and configure queue intr-vector mapping */
3313 	if ((rte_intr_cap_multiple(intr_handle) ||
3314 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
3315 	    dev->data->dev_conf.intr_conf.rxq != 0) {
3316 		intr_vector = dev->data->nb_rx_queues;
3317 		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3318 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3319 				    ICE_MAX_INTR_QUEUE_NUM);
3320 			return -ENOTSUP;
3321 		}
3322 		if (rte_intr_efd_enable(intr_handle, intr_vector))
3323 			return -1;
3324 	}
3325 
3326 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3327 		intr_handle->intr_vec =
3328 		rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3329 			    0);
3330 		if (!intr_handle->intr_vec) {
3331 			PMD_DRV_LOG(ERR,
3332 				    "Failed to allocate %d rx_queues intr_vec",
3333 				    dev->data->nb_rx_queues);
3334 			return -ENOMEM;
3335 		}
3336 	}
3337 
3338 	/* Map queues with MSIX interrupt */
3339 	vsi->nb_used_qps = dev->data->nb_rx_queues;
3340 	ice_vsi_queues_bind_intr(vsi);
3341 
3342 	/* Enable interrupts for all the queues */
3343 	ice_vsi_enable_queues_intr(vsi);
3344 
3345 	rte_intr_enable(intr_handle);
3346 
3347 	return 0;
3348 }
3349 
3350 static void
3351 ice_get_init_link_status(struct rte_eth_dev *dev)
3352 {
3353 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3354 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3355 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3356 	struct ice_link_status link_status;
3357 	int ret;
3358 
3359 	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3360 				   &link_status, NULL);
3361 	if (ret != ICE_SUCCESS) {
3362 		PMD_DRV_LOG(ERR, "Failed to get link info");
3363 		pf->init_link_up = false;
3364 		return;
3365 	}
3366 
3367 	if (link_status.link_info & ICE_AQ_LINK_UP)
3368 		pf->init_link_up = true;
3369 }
3370 
3371 static int
3372 ice_dev_start(struct rte_eth_dev *dev)
3373 {
3374 	struct rte_eth_dev_data *data = dev->data;
3375 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3376 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3377 	struct ice_vsi *vsi = pf->main_vsi;
3378 	uint16_t nb_rxq = 0;
3379 	uint16_t nb_txq, i;
3380 	uint16_t max_frame_size;
3381 	int mask, ret;
3382 
3383 	/* program Tx queues' context in hardware */
3384 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3385 		ret = ice_tx_queue_start(dev, nb_txq);
3386 		if (ret) {
3387 			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3388 			goto tx_err;
3389 		}
3390 	}
3391 
3392 	/* program Rx queues' context in hardware*/
3393 	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3394 		ret = ice_rx_queue_start(dev, nb_rxq);
3395 		if (ret) {
3396 			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3397 			goto rx_err;
3398 		}
3399 	}
3400 
3401 	ice_set_rx_function(dev);
3402 	ice_set_tx_function(dev);
3403 
3404 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3405 			ETH_VLAN_EXTEND_MASK;
3406 	ret = ice_vlan_offload_set(dev, mask);
3407 	if (ret) {
3408 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3409 		goto rx_err;
3410 	}
3411 
3412 	/* enable Rx interrput and mapping Rx queue to interrupt vector */
3413 	if (ice_rxq_intr_setup(dev))
3414 		return -EIO;
3415 
3416 	/* Enable receiving broadcast packets and transmitting packets */
3417 	ret = ice_set_vsi_promisc(hw, vsi->idx,
3418 				  ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3419 				  ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3420 				  0);
3421 	if (ret != ICE_SUCCESS)
3422 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3423 
3424 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3425 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3426 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3427 				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3428 				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3429 				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
3430 				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3431 				     NULL);
3432 	if (ret != ICE_SUCCESS)
3433 		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3434 
3435 	ice_get_init_link_status(dev);
3436 
3437 	ice_dev_set_link_up(dev);
3438 
3439 	/* Call get_link_info aq commond to enable/disable LSE */
3440 	ice_link_update(dev, 0);
3441 
3442 	pf->adapter_stopped = false;
3443 
3444 	/* Set the max frame size to default value*/
3445 	max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3446 		pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3447 		ICE_FRAME_SIZE_MAX;
3448 
3449 	/* Set the max frame size to HW*/
3450 	ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3451 
3452 	return 0;
3453 
3454 	/* stop the started queues if failed to start all queues */
3455 rx_err:
3456 	for (i = 0; i < nb_rxq; i++)
3457 		ice_rx_queue_stop(dev, i);
3458 tx_err:
3459 	for (i = 0; i < nb_txq; i++)
3460 		ice_tx_queue_stop(dev, i);
3461 
3462 	return -EIO;
3463 }
3464 
3465 static int
3466 ice_dev_reset(struct rte_eth_dev *dev)
3467 {
3468 	int ret;
3469 
3470 	if (dev->data->sriov.active)
3471 		return -ENOTSUP;
3472 
3473 	ret = ice_dev_uninit(dev);
3474 	if (ret) {
3475 		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3476 		return -ENXIO;
3477 	}
3478 
3479 	ret = ice_dev_init(dev);
3480 	if (ret) {
3481 		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3482 		return -ENXIO;
3483 	}
3484 
3485 	return 0;
3486 }
3487 
3488 static int
3489 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3490 {
3491 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3492 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3493 	struct ice_vsi *vsi = pf->main_vsi;
3494 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3495 	bool is_safe_mode = pf->adapter->is_safe_mode;
3496 	u64 phy_type_low;
3497 	u64 phy_type_high;
3498 
3499 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3500 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3501 	dev_info->max_rx_queues = vsi->nb_qps;
3502 	dev_info->max_tx_queues = vsi->nb_qps;
3503 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3504 	dev_info->max_vfs = pci_dev->max_vfs;
3505 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3506 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3507 
3508 	dev_info->rx_offload_capa =
3509 		DEV_RX_OFFLOAD_VLAN_STRIP |
3510 		DEV_RX_OFFLOAD_JUMBO_FRAME |
3511 		DEV_RX_OFFLOAD_KEEP_CRC |
3512 		DEV_RX_OFFLOAD_SCATTER |
3513 		DEV_RX_OFFLOAD_VLAN_FILTER;
3514 	dev_info->tx_offload_capa =
3515 		DEV_TX_OFFLOAD_VLAN_INSERT |
3516 		DEV_TX_OFFLOAD_TCP_TSO |
3517 		DEV_TX_OFFLOAD_MULTI_SEGS |
3518 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3519 	dev_info->flow_type_rss_offloads = 0;
3520 
3521 	if (!is_safe_mode) {
3522 		dev_info->rx_offload_capa |=
3523 			DEV_RX_OFFLOAD_IPV4_CKSUM |
3524 			DEV_RX_OFFLOAD_UDP_CKSUM |
3525 			DEV_RX_OFFLOAD_TCP_CKSUM |
3526 			DEV_RX_OFFLOAD_QINQ_STRIP |
3527 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3528 			DEV_RX_OFFLOAD_VLAN_EXTEND |
3529 			DEV_RX_OFFLOAD_RSS_HASH;
3530 		dev_info->tx_offload_capa |=
3531 			DEV_TX_OFFLOAD_QINQ_INSERT |
3532 			DEV_TX_OFFLOAD_IPV4_CKSUM |
3533 			DEV_TX_OFFLOAD_UDP_CKSUM |
3534 			DEV_TX_OFFLOAD_TCP_CKSUM |
3535 			DEV_TX_OFFLOAD_SCTP_CKSUM |
3536 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3537 			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3538 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3539 	}
3540 
3541 	dev_info->rx_queue_offload_capa = 0;
3542 	dev_info->tx_queue_offload_capa = 0;
3543 
3544 	dev_info->reta_size = pf->hash_lut_size;
3545 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3546 
3547 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3548 		.rx_thresh = {
3549 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
3550 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
3551 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
3552 		},
3553 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3554 		.rx_drop_en = 0,
3555 		.offloads = 0,
3556 	};
3557 
3558 	dev_info->default_txconf = (struct rte_eth_txconf) {
3559 		.tx_thresh = {
3560 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
3561 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
3562 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
3563 		},
3564 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3565 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3566 		.offloads = 0,
3567 	};
3568 
3569 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3570 		.nb_max = ICE_MAX_RING_DESC,
3571 		.nb_min = ICE_MIN_RING_DESC,
3572 		.nb_align = ICE_ALIGN_RING_DESC,
3573 	};
3574 
3575 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3576 		.nb_max = ICE_MAX_RING_DESC,
3577 		.nb_min = ICE_MIN_RING_DESC,
3578 		.nb_align = ICE_ALIGN_RING_DESC,
3579 	};
3580 
3581 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
3582 			       ETH_LINK_SPEED_100M |
3583 			       ETH_LINK_SPEED_1G |
3584 			       ETH_LINK_SPEED_2_5G |
3585 			       ETH_LINK_SPEED_5G |
3586 			       ETH_LINK_SPEED_10G |
3587 			       ETH_LINK_SPEED_20G |
3588 			       ETH_LINK_SPEED_25G;
3589 
3590 	phy_type_low = hw->port_info->phy.phy_type_low;
3591 	phy_type_high = hw->port_info->phy.phy_type_high;
3592 
3593 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3594 		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3595 
3596 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3597 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3598 		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3599 
3600 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3601 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3602 
3603 	dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3604 	dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3605 	dev_info->default_rxportconf.nb_queues = 1;
3606 	dev_info->default_txportconf.nb_queues = 1;
3607 	dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3608 	dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3609 
3610 	return 0;
3611 }
3612 
3613 static inline int
3614 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3615 			    struct rte_eth_link *link)
3616 {
3617 	struct rte_eth_link *dst = link;
3618 	struct rte_eth_link *src = &dev->data->dev_link;
3619 
3620 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3621 				*(uint64_t *)src) == 0)
3622 		return -1;
3623 
3624 	return 0;
3625 }
3626 
3627 static inline int
3628 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3629 			     struct rte_eth_link *link)
3630 {
3631 	struct rte_eth_link *dst = &dev->data->dev_link;
3632 	struct rte_eth_link *src = link;
3633 
3634 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3635 				*(uint64_t *)src) == 0)
3636 		return -1;
3637 
3638 	return 0;
3639 }
3640 
3641 static int
3642 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3643 {
3644 #define CHECK_INTERVAL 100  /* 100ms */
3645 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3646 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3647 	struct ice_link_status link_status;
3648 	struct rte_eth_link link, old;
3649 	int status;
3650 	unsigned int rep_cnt = MAX_REPEAT_TIME;
3651 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3652 
3653 	memset(&link, 0, sizeof(link));
3654 	memset(&old, 0, sizeof(old));
3655 	memset(&link_status, 0, sizeof(link_status));
3656 	ice_atomic_read_link_status(dev, &old);
3657 
3658 	do {
3659 		/* Get link status information from hardware */
3660 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
3661 					      &link_status, NULL);
3662 		if (status != ICE_SUCCESS) {
3663 			link.link_speed = ETH_SPEED_NUM_100M;
3664 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
3665 			PMD_DRV_LOG(ERR, "Failed to get link info");
3666 			goto out;
3667 		}
3668 
3669 		link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3670 		if (!wait_to_complete || link.link_status)
3671 			break;
3672 
3673 		rte_delay_ms(CHECK_INTERVAL);
3674 	} while (--rep_cnt);
3675 
3676 	if (!link.link_status)
3677 		goto out;
3678 
3679 	/* Full-duplex operation at all supported speeds */
3680 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
3681 
3682 	/* Parse the link status */
3683 	switch (link_status.link_speed) {
3684 	case ICE_AQ_LINK_SPEED_10MB:
3685 		link.link_speed = ETH_SPEED_NUM_10M;
3686 		break;
3687 	case ICE_AQ_LINK_SPEED_100MB:
3688 		link.link_speed = ETH_SPEED_NUM_100M;
3689 		break;
3690 	case ICE_AQ_LINK_SPEED_1000MB:
3691 		link.link_speed = ETH_SPEED_NUM_1G;
3692 		break;
3693 	case ICE_AQ_LINK_SPEED_2500MB:
3694 		link.link_speed = ETH_SPEED_NUM_2_5G;
3695 		break;
3696 	case ICE_AQ_LINK_SPEED_5GB:
3697 		link.link_speed = ETH_SPEED_NUM_5G;
3698 		break;
3699 	case ICE_AQ_LINK_SPEED_10GB:
3700 		link.link_speed = ETH_SPEED_NUM_10G;
3701 		break;
3702 	case ICE_AQ_LINK_SPEED_20GB:
3703 		link.link_speed = ETH_SPEED_NUM_20G;
3704 		break;
3705 	case ICE_AQ_LINK_SPEED_25GB:
3706 		link.link_speed = ETH_SPEED_NUM_25G;
3707 		break;
3708 	case ICE_AQ_LINK_SPEED_40GB:
3709 		link.link_speed = ETH_SPEED_NUM_40G;
3710 		break;
3711 	case ICE_AQ_LINK_SPEED_50GB:
3712 		link.link_speed = ETH_SPEED_NUM_50G;
3713 		break;
3714 	case ICE_AQ_LINK_SPEED_100GB:
3715 		link.link_speed = ETH_SPEED_NUM_100G;
3716 		break;
3717 	case ICE_AQ_LINK_SPEED_UNKNOWN:
3718 		PMD_DRV_LOG(ERR, "Unknown link speed");
3719 		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3720 		break;
3721 	default:
3722 		PMD_DRV_LOG(ERR, "None link speed");
3723 		link.link_speed = ETH_SPEED_NUM_NONE;
3724 		break;
3725 	}
3726 
3727 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3728 			      ETH_LINK_SPEED_FIXED);
3729 
3730 out:
3731 	ice_atomic_write_link_status(dev, &link);
3732 	if (link.link_status == old.link_status)
3733 		return -1;
3734 
3735 	return 0;
3736 }
3737 
3738 /* Force the physical link state by getting the current PHY capabilities from
3739  * hardware and setting the PHY config based on the determined capabilities. If
3740  * link changes, link event will be triggered because both the Enable Automatic
3741  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3742  */
3743 static enum ice_status
3744 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3745 {
3746 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3747 	struct ice_aqc_get_phy_caps_data *pcaps;
3748 	struct ice_port_info *pi;
3749 	enum ice_status status;
3750 
3751 	if (!hw || !hw->port_info)
3752 		return ICE_ERR_PARAM;
3753 
3754 	pi = hw->port_info;
3755 
3756 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3757 		ice_malloc(hw, sizeof(*pcaps));
3758 	if (!pcaps)
3759 		return ICE_ERR_NO_MEMORY;
3760 
3761 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3762 				     pcaps, NULL);
3763 	if (status)
3764 		goto out;
3765 
3766 	/* No change in link */
3767 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3768 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3769 		goto out;
3770 
3771 	cfg.phy_type_low = pcaps->phy_type_low;
3772 	cfg.phy_type_high = pcaps->phy_type_high;
3773 	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3774 	cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3775 	cfg.eee_cap = pcaps->eee_cap;
3776 	cfg.eeer_value = pcaps->eeer_value;
3777 	cfg.link_fec_opt = pcaps->link_fec_options;
3778 	if (link_up)
3779 		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3780 	else
3781 		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3782 
3783 	status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3784 
3785 out:
3786 	ice_free(hw, pcaps);
3787 	return status;
3788 }
3789 
3790 static int
3791 ice_dev_set_link_up(struct rte_eth_dev *dev)
3792 {
3793 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3794 
3795 	return ice_force_phys_link_state(hw, true);
3796 }
3797 
3798 static int
3799 ice_dev_set_link_down(struct rte_eth_dev *dev)
3800 {
3801 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3802 
3803 	return ice_force_phys_link_state(hw, false);
3804 }
3805 
3806 static int
3807 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3808 {
3809 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3810 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3811 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3812 
3813 	/* check if mtu is within the allowed range */
3814 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3815 		return -EINVAL;
3816 
3817 	/* mtu setting is forbidden if port is start */
3818 	if (dev_data->dev_started) {
3819 		PMD_DRV_LOG(ERR,
3820 			    "port %d must be stopped before configuration",
3821 			    dev_data->port_id);
3822 		return -EBUSY;
3823 	}
3824 
3825 	if (frame_size > ICE_ETH_MAX_LEN)
3826 		dev_data->dev_conf.rxmode.offloads |=
3827 			DEV_RX_OFFLOAD_JUMBO_FRAME;
3828 	else
3829 		dev_data->dev_conf.rxmode.offloads &=
3830 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
3831 
3832 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3833 
3834 	return 0;
3835 }
3836 
3837 static int ice_macaddr_set(struct rte_eth_dev *dev,
3838 			   struct rte_ether_addr *mac_addr)
3839 {
3840 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3841 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3842 	struct ice_vsi *vsi = pf->main_vsi;
3843 	struct ice_mac_filter *f;
3844 	uint8_t flags = 0;
3845 	int ret;
3846 
3847 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3848 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3849 		return -EINVAL;
3850 	}
3851 
3852 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
3853 		if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3854 			break;
3855 	}
3856 
3857 	if (!f) {
3858 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3859 		return -EIO;
3860 	}
3861 
3862 	ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3863 	if (ret != ICE_SUCCESS) {
3864 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3865 		return -EIO;
3866 	}
3867 	ret = ice_add_mac_filter(vsi, mac_addr);
3868 	if (ret != ICE_SUCCESS) {
3869 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
3870 		return -EIO;
3871 	}
3872 	rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3873 
3874 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3875 	ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3876 	if (ret != ICE_SUCCESS)
3877 		PMD_DRV_LOG(ERR, "Failed to set manage mac");
3878 
3879 	return 0;
3880 }
3881 
3882 /* Add a MAC address, and update filters */
3883 static int
3884 ice_macaddr_add(struct rte_eth_dev *dev,
3885 		struct rte_ether_addr *mac_addr,
3886 		__rte_unused uint32_t index,
3887 		__rte_unused uint32_t pool)
3888 {
3889 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3890 	struct ice_vsi *vsi = pf->main_vsi;
3891 	int ret;
3892 
3893 	ret = ice_add_mac_filter(vsi, mac_addr);
3894 	if (ret != ICE_SUCCESS) {
3895 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3896 		return -EINVAL;
3897 	}
3898 
3899 	return ICE_SUCCESS;
3900 }
3901 
3902 /* Remove a MAC address, and update filters */
3903 static void
3904 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3905 {
3906 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3907 	struct ice_vsi *vsi = pf->main_vsi;
3908 	struct rte_eth_dev_data *data = dev->data;
3909 	struct rte_ether_addr *macaddr;
3910 	int ret;
3911 
3912 	macaddr = &data->mac_addrs[index];
3913 	ret = ice_remove_mac_filter(vsi, macaddr);
3914 	if (ret) {
3915 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3916 		return;
3917 	}
3918 }
3919 
3920 static int
3921 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3922 {
3923 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3924 	struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
3925 	struct ice_vsi *vsi = pf->main_vsi;
3926 	int ret;
3927 
3928 	PMD_INIT_FUNC_TRACE();
3929 
3930 	/**
3931 	 * Vlan 0 is the generic filter for untagged packets
3932 	 * and can't be removed or added by user.
3933 	 */
3934 	if (vlan_id == 0)
3935 		return 0;
3936 
3937 	if (on) {
3938 		ret = ice_add_vlan_filter(vsi, &vlan);
3939 		if (ret < 0) {
3940 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3941 			return -EINVAL;
3942 		}
3943 	} else {
3944 		ret = ice_remove_vlan_filter(vsi, &vlan);
3945 		if (ret < 0) {
3946 			PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3947 			return -EINVAL;
3948 		}
3949 	}
3950 
3951 	return 0;
3952 }
3953 
3954 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
3955  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
3956  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3957  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3958  *
3959  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3960  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3961  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3962  *
3963  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3964  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3965  * part of filtering.
3966  */
3967 static int
3968 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3969 {
3970 	struct ice_vlan vlan;
3971 	int err;
3972 
3973 	vlan = ICE_VLAN(0, 0);
3974 	err = ice_add_vlan_filter(vsi, &vlan);
3975 	if (err) {
3976 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
3977 		return err;
3978 	}
3979 
3980 	/* in SVM both VLAN 0 filters are identical */
3981 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
3982 		return 0;
3983 
3984 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3985 	err = ice_add_vlan_filter(vsi, &vlan);
3986 	if (err) {
3987 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
3988 		return err;
3989 	}
3990 
3991 	return 0;
3992 }
3993 
3994 /*
3995  * Delete the VLAN 0 filters in the same manner that they were added in
3996  * ice_vsi_add_vlan_zero.
3997  */
3998 static int
3999 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
4000 {
4001 	struct ice_vlan vlan;
4002 	int err;
4003 
4004 	vlan = ICE_VLAN(0, 0);
4005 	err = ice_remove_vlan_filter(vsi, &vlan);
4006 	if (err) {
4007 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4008 		return err;
4009 	}
4010 
4011 	/* in SVM both VLAN 0 filters are identical */
4012 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
4013 		return 0;
4014 
4015 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4016 	err = ice_remove_vlan_filter(vsi, &vlan);
4017 	if (err) {
4018 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4019 		return err;
4020 	}
4021 
4022 	return 0;
4023 }
4024 
4025 /* Configure vlan filter on or off */
4026 static int
4027 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4028 {
4029 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4030 	struct ice_vsi_ctx ctxt;
4031 	uint8_t sw_flags2;
4032 	int ret = 0;
4033 
4034 	sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4035 
4036 	if (on)
4037 		vsi->info.sw_flags2 |= sw_flags2;
4038 	else
4039 		vsi->info.sw_flags2 &= ~sw_flags2;
4040 
4041 	vsi->info.sw_id = hw->port_info->sw_id;
4042 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4043 	ctxt.info.valid_sections =
4044 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4045 				 ICE_AQ_VSI_PROP_SECURITY_VALID);
4046 	ctxt.vsi_num = vsi->vsi_id;
4047 
4048 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4049 	if (ret) {
4050 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4051 			    on ? "enable" : "disable");
4052 		return -EINVAL;
4053 	} else {
4054 		vsi->info.valid_sections |=
4055 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4056 					 ICE_AQ_VSI_PROP_SECURITY_VALID);
4057 	}
4058 
4059 	/* consist with other drivers, allow untagged packet when vlan filter on */
4060 	if (on)
4061 		ret = ice_vsi_add_vlan_zero(vsi);
4062 	else
4063 		ret = ice_vsi_del_vlan_zero(vsi);
4064 
4065 	return 0;
4066 }
4067 
4068 /* Manage VLAN stripping for the VSI for Rx */
4069 static int
4070 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4071 {
4072 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4073 	struct ice_vsi_ctx ctxt;
4074 	enum ice_status status;
4075 	int err = 0;
4076 
4077 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4078 	 * on this VSI
4079 	 */
4080 	if (vsi->info.port_based_inner_vlan)
4081 		return 0;
4082 
4083 	memset(&ctxt, 0, sizeof(ctxt));
4084 
4085 	if (ena)
4086 		/* Strip VLAN tag from Rx packet and put it in the desc */
4087 		ctxt.info.inner_vlan_flags =
4088 					ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4089 	else
4090 		/* Disable stripping. Leave tag in packet */
4091 		ctxt.info.inner_vlan_flags =
4092 					ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4093 
4094 	/* Allow all packets untagged/tagged */
4095 	ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4096 
4097 	ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4098 
4099 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4100 	if (status) {
4101 		PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4102 			    ena ? "enable" : "disable");
4103 		err = -EIO;
4104 	} else {
4105 		vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4106 	}
4107 
4108 	return err;
4109 }
4110 
4111 static int
4112 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4113 {
4114 	return ice_vsi_manage_vlan_stripping(vsi, true);
4115 }
4116 
4117 static int
4118 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4119 {
4120 	return ice_vsi_manage_vlan_stripping(vsi, false);
4121 }
4122 
4123 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4124 {
4125 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4126 	struct ice_vsi_ctx ctxt;
4127 	enum ice_status status;
4128 	int err = 0;
4129 
4130 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4131 	 * on this VSI
4132 	 */
4133 	if (vsi->info.port_based_outer_vlan)
4134 		return 0;
4135 
4136 	memset(&ctxt, 0, sizeof(ctxt));
4137 
4138 	ctxt.info.valid_sections =
4139 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4140 	/* clear current outer VLAN strip settings */
4141 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4142 		~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4143 	ctxt.info.outer_vlan_flags |=
4144 		(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4145 		 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4146 		(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4147 		 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4148 
4149 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4150 	if (status) {
4151 		PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4152 		err = -EIO;
4153 	} else {
4154 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4155 	}
4156 
4157 	return err;
4158 }
4159 
4160 static int
4161 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4162 {
4163 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4164 	struct ice_vsi_ctx ctxt;
4165 	enum ice_status status;
4166 	int err = 0;
4167 
4168 	if (vsi->info.port_based_outer_vlan)
4169 		return 0;
4170 
4171 	memset(&ctxt, 0, sizeof(ctxt));
4172 
4173 	ctxt.info.valid_sections =
4174 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4175 	/* clear current outer VLAN strip settings */
4176 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4177 		~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4178 	ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4179 		ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4180 
4181 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4182 	if (status) {
4183 		PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4184 		err = -EIO;
4185 	} else {
4186 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4187 	}
4188 
4189 	return err;
4190 }
4191 
4192 static int
4193 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4194 {
4195 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4196 	int ret;
4197 
4198 	if (ice_is_dvm_ena(hw)) {
4199 		if (ena)
4200 			ret = ice_vsi_ena_outer_stripping(vsi);
4201 		else
4202 			ret = ice_vsi_dis_outer_stripping(vsi);
4203 	} else {
4204 		if (ena)
4205 			ret = ice_vsi_ena_inner_stripping(vsi);
4206 		else
4207 			ret = ice_vsi_dis_inner_stripping(vsi);
4208 	}
4209 
4210 	return ret;
4211 }
4212 
4213 static int
4214 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4215 {
4216 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4217 	struct ice_vsi *vsi = pf->main_vsi;
4218 	struct rte_eth_rxmode *rxmode;
4219 
4220 	rxmode = &dev->data->dev_conf.rxmode;
4221 	if (mask & ETH_VLAN_FILTER_MASK) {
4222 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4223 			ice_vsi_config_vlan_filter(vsi, true);
4224 		else
4225 			ice_vsi_config_vlan_filter(vsi, false);
4226 	}
4227 
4228 	if (mask & ETH_VLAN_STRIP_MASK) {
4229 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4230 			ice_vsi_config_vlan_stripping(vsi, true);
4231 		else
4232 			ice_vsi_config_vlan_stripping(vsi, false);
4233 	}
4234 
4235 	return 0;
4236 }
4237 
4238 static int
4239 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4240 {
4241 	struct ice_aq_get_set_rss_lut_params lut_params;
4242 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4243 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4244 	int ret;
4245 
4246 	if (!lut)
4247 		return -EINVAL;
4248 
4249 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4250 		lut_params.vsi_handle = vsi->idx;
4251 		lut_params.lut_size = lut_size;
4252 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4253 		lut_params.lut = lut;
4254 		lut_params.global_lut_id = 0;
4255 		ret = ice_aq_get_rss_lut(hw, &lut_params);
4256 		if (ret) {
4257 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4258 			return -EINVAL;
4259 		}
4260 	} else {
4261 		uint64_t *lut_dw = (uint64_t *)lut;
4262 		uint16_t i, lut_size_dw = lut_size / 4;
4263 
4264 		for (i = 0; i < lut_size_dw; i++)
4265 			lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4266 	}
4267 
4268 	return 0;
4269 }
4270 
4271 static int
4272 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4273 {
4274 	struct ice_aq_get_set_rss_lut_params lut_params;
4275 	struct ice_pf *pf;
4276 	struct ice_hw *hw;
4277 	int ret;
4278 
4279 	if (!vsi || !lut)
4280 		return -EINVAL;
4281 
4282 	pf = ICE_VSI_TO_PF(vsi);
4283 	hw = ICE_VSI_TO_HW(vsi);
4284 
4285 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4286 		lut_params.vsi_handle = vsi->idx;
4287 		lut_params.lut_size = lut_size;
4288 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4289 		lut_params.lut = lut;
4290 		lut_params.global_lut_id = 0;
4291 		ret = ice_aq_set_rss_lut(hw, &lut_params);
4292 		if (ret) {
4293 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4294 			return -EINVAL;
4295 		}
4296 	} else {
4297 		uint64_t *lut_dw = (uint64_t *)lut;
4298 		uint16_t i, lut_size_dw = lut_size / 4;
4299 
4300 		for (i = 0; i < lut_size_dw; i++)
4301 			ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4302 
4303 		ice_flush(hw);
4304 	}
4305 
4306 	return 0;
4307 }
4308 
4309 static int
4310 ice_rss_reta_update(struct rte_eth_dev *dev,
4311 		    struct rte_eth_rss_reta_entry64 *reta_conf,
4312 		    uint16_t reta_size)
4313 {
4314 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4315 	uint16_t i, lut_size = pf->hash_lut_size;
4316 	uint16_t idx, shift;
4317 	uint8_t *lut;
4318 	int ret;
4319 
4320 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4321 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4322 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4323 		PMD_DRV_LOG(ERR,
4324 			    "The size of hash lookup table configured (%d)"
4325 			    "doesn't match the number hardware can "
4326 			    "supported (128, 512, 2048)",
4327 			    reta_size);
4328 		return -EINVAL;
4329 	}
4330 
4331 	/* It MUST use the current LUT size to get the RSS lookup table,
4332 	 * otherwise if will fail with -100 error code.
4333 	 */
4334 	lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4335 	if (!lut) {
4336 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4337 		return -ENOMEM;
4338 	}
4339 	ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4340 	if (ret)
4341 		goto out;
4342 
4343 	for (i = 0; i < reta_size; i++) {
4344 		idx = i / RTE_RETA_GROUP_SIZE;
4345 		shift = i % RTE_RETA_GROUP_SIZE;
4346 		if (reta_conf[idx].mask & (1ULL << shift))
4347 			lut[i] = reta_conf[idx].reta[shift];
4348 	}
4349 	ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4350 	if (ret == 0 && lut_size != reta_size) {
4351 		PMD_DRV_LOG(INFO,
4352 			    "The size of hash lookup table is changed from (%d) to (%d)",
4353 			    lut_size, reta_size);
4354 		pf->hash_lut_size = reta_size;
4355 	}
4356 
4357 out:
4358 	rte_free(lut);
4359 
4360 	return ret;
4361 }
4362 
4363 static int
4364 ice_rss_reta_query(struct rte_eth_dev *dev,
4365 		   struct rte_eth_rss_reta_entry64 *reta_conf,
4366 		   uint16_t reta_size)
4367 {
4368 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4369 	uint16_t i, lut_size = pf->hash_lut_size;
4370 	uint16_t idx, shift;
4371 	uint8_t *lut;
4372 	int ret;
4373 
4374 	if (reta_size != lut_size) {
4375 		PMD_DRV_LOG(ERR,
4376 			    "The size of hash lookup table configured (%d)"
4377 			    "doesn't match the number hardware can "
4378 			    "supported (%d)",
4379 			    reta_size, lut_size);
4380 		return -EINVAL;
4381 	}
4382 
4383 	lut = rte_zmalloc(NULL, reta_size, 0);
4384 	if (!lut) {
4385 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4386 		return -ENOMEM;
4387 	}
4388 
4389 	ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4390 	if (ret)
4391 		goto out;
4392 
4393 	for (i = 0; i < reta_size; i++) {
4394 		idx = i / RTE_RETA_GROUP_SIZE;
4395 		shift = i % RTE_RETA_GROUP_SIZE;
4396 		if (reta_conf[idx].mask & (1ULL << shift))
4397 			reta_conf[idx].reta[shift] = lut[i];
4398 	}
4399 
4400 out:
4401 	rte_free(lut);
4402 
4403 	return ret;
4404 }
4405 
4406 static int
4407 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4408 {
4409 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4410 	int ret = 0;
4411 
4412 	if (!key || key_len == 0) {
4413 		PMD_DRV_LOG(DEBUG, "No key to be configured");
4414 		return 0;
4415 	} else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4416 		   sizeof(uint32_t)) {
4417 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4418 		return -EINVAL;
4419 	}
4420 
4421 	struct ice_aqc_get_set_rss_keys *key_dw =
4422 		(struct ice_aqc_get_set_rss_keys *)key;
4423 
4424 	ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4425 	if (ret) {
4426 		PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4427 		ret = -EINVAL;
4428 	}
4429 
4430 	return ret;
4431 }
4432 
4433 static int
4434 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4435 {
4436 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4437 	int ret;
4438 
4439 	if (!key || !key_len)
4440 		return -EINVAL;
4441 
4442 	ret = ice_aq_get_rss_key
4443 		(hw, vsi->idx,
4444 		 (struct ice_aqc_get_set_rss_keys *)key);
4445 	if (ret) {
4446 		PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4447 		return -EINVAL;
4448 	}
4449 	*key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4450 
4451 	return 0;
4452 }
4453 
4454 static int
4455 ice_rss_hash_update(struct rte_eth_dev *dev,
4456 		    struct rte_eth_rss_conf *rss_conf)
4457 {
4458 	enum ice_status status = ICE_SUCCESS;
4459 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4460 	struct ice_vsi *vsi = pf->main_vsi;
4461 
4462 	/* set hash key */
4463 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4464 	if (status)
4465 		return status;
4466 
4467 	if (rss_conf->rss_hf == 0) {
4468 		pf->rss_hf = 0;
4469 		return 0;
4470 	}
4471 
4472 	/* RSS hash configuration */
4473 	ice_rss_hash_set(pf, rss_conf->rss_hf);
4474 
4475 	return 0;
4476 }
4477 
4478 static int
4479 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4480 		      struct rte_eth_rss_conf *rss_conf)
4481 {
4482 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4483 	struct ice_vsi *vsi = pf->main_vsi;
4484 
4485 	ice_get_rss_key(vsi, rss_conf->rss_key,
4486 			&rss_conf->rss_key_len);
4487 
4488 	rss_conf->rss_hf = pf->rss_hf;
4489 	return 0;
4490 }
4491 
4492 static int
4493 ice_promisc_enable(struct rte_eth_dev *dev)
4494 {
4495 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4496 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4497 	struct ice_vsi *vsi = pf->main_vsi;
4498 	enum ice_status status;
4499 	uint8_t pmask;
4500 	int ret = 0;
4501 
4502 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4503 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4504 
4505 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4506 	switch (status) {
4507 	case ICE_ERR_ALREADY_EXISTS:
4508 		PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4509 	case ICE_SUCCESS:
4510 		break;
4511 	default:
4512 		PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4513 		ret = -EAGAIN;
4514 	}
4515 
4516 	return ret;
4517 }
4518 
4519 static int
4520 ice_promisc_disable(struct rte_eth_dev *dev)
4521 {
4522 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4523 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4524 	struct ice_vsi *vsi = pf->main_vsi;
4525 	enum ice_status status;
4526 	uint8_t pmask;
4527 	int ret = 0;
4528 
4529 	if (dev->data->all_multicast == 1)
4530 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4531 	else
4532 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4533 			ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4534 
4535 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4536 	if (status != ICE_SUCCESS) {
4537 		PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4538 		ret = -EAGAIN;
4539 	}
4540 
4541 	return ret;
4542 }
4543 
4544 static int
4545 ice_allmulti_enable(struct rte_eth_dev *dev)
4546 {
4547 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4548 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4549 	struct ice_vsi *vsi = pf->main_vsi;
4550 	enum ice_status status;
4551 	uint8_t pmask;
4552 	int ret = 0;
4553 
4554 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4555 
4556 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4557 
4558 	switch (status) {
4559 	case ICE_ERR_ALREADY_EXISTS:
4560 		PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4561 	case ICE_SUCCESS:
4562 		break;
4563 	default:
4564 		PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4565 		ret = -EAGAIN;
4566 	}
4567 
4568 	return ret;
4569 }
4570 
4571 static int
4572 ice_allmulti_disable(struct rte_eth_dev *dev)
4573 {
4574 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4575 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4576 	struct ice_vsi *vsi = pf->main_vsi;
4577 	enum ice_status status;
4578 	uint8_t pmask;
4579 	int ret = 0;
4580 
4581 	if (dev->data->promiscuous == 1)
4582 		return 0; /* must remain in all_multicast mode */
4583 
4584 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4585 
4586 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4587 	if (status != ICE_SUCCESS) {
4588 		PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4589 		ret = -EAGAIN;
4590 	}
4591 
4592 	return ret;
4593 }
4594 
4595 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4596 				    uint16_t queue_id)
4597 {
4598 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4599 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4600 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4601 	uint32_t val;
4602 	uint16_t msix_intr;
4603 
4604 	msix_intr = intr_handle->intr_vec[queue_id];
4605 
4606 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4607 	      GLINT_DYN_CTL_ITR_INDX_M;
4608 	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4609 
4610 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4611 	rte_intr_ack(&pci_dev->intr_handle);
4612 
4613 	return 0;
4614 }
4615 
4616 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4617 				     uint16_t queue_id)
4618 {
4619 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4620 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4621 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4622 	uint16_t msix_intr;
4623 
4624 	msix_intr = intr_handle->intr_vec[queue_id];
4625 
4626 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4627 
4628 	return 0;
4629 }
4630 
4631 static int
4632 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4633 {
4634 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4635 	u8 ver, patch;
4636 	u16 build;
4637 	int ret;
4638 
4639 	ver = hw->flash.orom.major;
4640 	patch = hw->flash.orom.patch;
4641 	build = hw->flash.orom.build;
4642 
4643 	ret = snprintf(fw_version, fw_size,
4644 			"%x.%02x 0x%08x %d.%d.%d",
4645 			hw->flash.nvm.major,
4646 			hw->flash.nvm.minor,
4647 			hw->flash.nvm.eetrack,
4648 			ver, build, patch);
4649 
4650 	/* add the size of '\0' */
4651 	ret += 1;
4652 	if (fw_size < (u32)ret)
4653 		return ret;
4654 	else
4655 		return 0;
4656 }
4657 
4658 static int
4659 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4660 {
4661 	struct ice_hw *hw;
4662 	struct ice_vsi_ctx ctxt;
4663 	uint8_t vlan_flags = 0;
4664 	int ret;
4665 
4666 	if (!vsi || !info) {
4667 		PMD_DRV_LOG(ERR, "invalid parameters");
4668 		return -EINVAL;
4669 	}
4670 
4671 	if (info->on) {
4672 		vsi->info.port_based_inner_vlan = info->config.pvid;
4673 		/**
4674 		 * If insert pvid is enabled, only tagged pkts are
4675 		 * allowed to be sent out.
4676 		 */
4677 		vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4678 			     ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4679 	} else {
4680 		vsi->info.port_based_inner_vlan = 0;
4681 		if (info->config.reject.tagged == 0)
4682 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4683 
4684 		if (info->config.reject.untagged == 0)
4685 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4686 	}
4687 	vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4688 				  ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4689 	vsi->info.inner_vlan_flags |= vlan_flags;
4690 	memset(&ctxt, 0, sizeof(ctxt));
4691 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4692 	ctxt.info.valid_sections =
4693 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4694 	ctxt.vsi_num = vsi->vsi_id;
4695 
4696 	hw = ICE_VSI_TO_HW(vsi);
4697 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4698 	if (ret != ICE_SUCCESS) {
4699 		PMD_DRV_LOG(ERR,
4700 			    "update VSI for VLAN insert failed, err %d",
4701 			    ret);
4702 		return -EINVAL;
4703 	}
4704 
4705 	vsi->info.valid_sections |=
4706 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4707 
4708 	return ret;
4709 }
4710 
4711 static int
4712 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4713 {
4714 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4715 	struct ice_vsi *vsi = pf->main_vsi;
4716 	struct rte_eth_dev_data *data = pf->dev_data;
4717 	struct ice_vsi_vlan_pvid_info info;
4718 	int ret;
4719 
4720 	memset(&info, 0, sizeof(info));
4721 	info.on = on;
4722 	if (info.on) {
4723 		info.config.pvid = pvid;
4724 	} else {
4725 		info.config.reject.tagged =
4726 			data->dev_conf.txmode.hw_vlan_reject_tagged;
4727 		info.config.reject.untagged =
4728 			data->dev_conf.txmode.hw_vlan_reject_untagged;
4729 	}
4730 
4731 	ret = ice_vsi_vlan_pvid_set(vsi, &info);
4732 	if (ret < 0) {
4733 		PMD_DRV_LOG(ERR, "Failed to set pvid.");
4734 		return -EINVAL;
4735 	}
4736 
4737 	return 0;
4738 }
4739 
4740 static int
4741 ice_get_eeprom_length(struct rte_eth_dev *dev)
4742 {
4743 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4744 
4745 	return hw->flash.flash_size;
4746 }
4747 
4748 static int
4749 ice_get_eeprom(struct rte_eth_dev *dev,
4750 	       struct rte_dev_eeprom_info *eeprom)
4751 {
4752 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4753 	enum ice_status status = ICE_SUCCESS;
4754 	uint8_t *data = eeprom->data;
4755 
4756 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4757 
4758 	status = ice_acquire_nvm(hw, ICE_RES_READ);
4759 	if (status) {
4760 		PMD_DRV_LOG(ERR, "acquire nvm failed.");
4761 		return -EIO;
4762 	}
4763 
4764 	status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4765 				   data, false);
4766 
4767 	ice_release_nvm(hw);
4768 
4769 	if (status) {
4770 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
4771 		return -EIO;
4772 	}
4773 
4774 	return 0;
4775 }
4776 
4777 static void
4778 ice_stat_update_32(struct ice_hw *hw,
4779 		   uint32_t reg,
4780 		   bool offset_loaded,
4781 		   uint64_t *offset,
4782 		   uint64_t *stat)
4783 {
4784 	uint64_t new_data;
4785 
4786 	new_data = (uint64_t)ICE_READ_REG(hw, reg);
4787 	if (!offset_loaded)
4788 		*offset = new_data;
4789 
4790 	if (new_data >= *offset)
4791 		*stat = (uint64_t)(new_data - *offset);
4792 	else
4793 		*stat = (uint64_t)((new_data +
4794 				    ((uint64_t)1 << ICE_32_BIT_WIDTH))
4795 				   - *offset);
4796 }
4797 
4798 static void
4799 ice_stat_update_40(struct ice_hw *hw,
4800 		   uint32_t hireg,
4801 		   uint32_t loreg,
4802 		   bool offset_loaded,
4803 		   uint64_t *offset,
4804 		   uint64_t *stat)
4805 {
4806 	uint64_t new_data;
4807 
4808 	new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4809 	new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4810 		    ICE_32_BIT_WIDTH;
4811 
4812 	if (!offset_loaded)
4813 		*offset = new_data;
4814 
4815 	if (new_data >= *offset)
4816 		*stat = new_data - *offset;
4817 	else
4818 		*stat = (uint64_t)((new_data +
4819 				    ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4820 				   *offset);
4821 
4822 	*stat &= ICE_40_BIT_MASK;
4823 }
4824 
4825 /* Get all the statistics of a VSI */
4826 static void
4827 ice_update_vsi_stats(struct ice_vsi *vsi)
4828 {
4829 	struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4830 	struct ice_eth_stats *nes = &vsi->eth_stats;
4831 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4832 	int idx = rte_le_to_cpu_16(vsi->vsi_id);
4833 
4834 	ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4835 			   vsi->offset_loaded, &oes->rx_bytes,
4836 			   &nes->rx_bytes);
4837 	ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4838 			   vsi->offset_loaded, &oes->rx_unicast,
4839 			   &nes->rx_unicast);
4840 	ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4841 			   vsi->offset_loaded, &oes->rx_multicast,
4842 			   &nes->rx_multicast);
4843 	ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4844 			   vsi->offset_loaded, &oes->rx_broadcast,
4845 			   &nes->rx_broadcast);
4846 	/* enlarge the limitation when rx_bytes overflowed */
4847 	if (vsi->offset_loaded) {
4848 		if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4849 			nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4850 		nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4851 	}
4852 	vsi->old_rx_bytes = nes->rx_bytes;
4853 	/* exclude CRC bytes */
4854 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4855 			  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4856 
4857 	ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4858 			   &oes->rx_discards, &nes->rx_discards);
4859 	/* GLV_REPC not supported */
4860 	/* GLV_RMPC not supported */
4861 	ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4862 			   &oes->rx_unknown_protocol,
4863 			   &nes->rx_unknown_protocol);
4864 	ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4865 			   vsi->offset_loaded, &oes->tx_bytes,
4866 			   &nes->tx_bytes);
4867 	ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4868 			   vsi->offset_loaded, &oes->tx_unicast,
4869 			   &nes->tx_unicast);
4870 	ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4871 			   vsi->offset_loaded, &oes->tx_multicast,
4872 			   &nes->tx_multicast);
4873 	ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4874 			   vsi->offset_loaded,  &oes->tx_broadcast,
4875 			   &nes->tx_broadcast);
4876 	/* GLV_TDPC not supported */
4877 	ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4878 			   &oes->tx_errors, &nes->tx_errors);
4879 	/* enlarge the limitation when tx_bytes overflowed */
4880 	if (vsi->offset_loaded) {
4881 		if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4882 			nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4883 		nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4884 	}
4885 	vsi->old_tx_bytes = nes->tx_bytes;
4886 	vsi->offset_loaded = true;
4887 
4888 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4889 		    vsi->vsi_id);
4890 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
4891 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
4892 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
4893 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
4894 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
4895 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4896 		    nes->rx_unknown_protocol);
4897 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
4898 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
4899 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
4900 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
4901 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
4902 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
4903 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4904 		    vsi->vsi_id);
4905 }
4906 
4907 static void
4908 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4909 {
4910 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4911 	struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4912 
4913 	/* Get statistics of struct ice_eth_stats */
4914 	ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4915 			   GLPRT_GORCL(hw->port_info->lport),
4916 			   pf->offset_loaded, &os->eth.rx_bytes,
4917 			   &ns->eth.rx_bytes);
4918 	ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4919 			   GLPRT_UPRCL(hw->port_info->lport),
4920 			   pf->offset_loaded, &os->eth.rx_unicast,
4921 			   &ns->eth.rx_unicast);
4922 	ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4923 			   GLPRT_MPRCL(hw->port_info->lport),
4924 			   pf->offset_loaded, &os->eth.rx_multicast,
4925 			   &ns->eth.rx_multicast);
4926 	ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4927 			   GLPRT_BPRCL(hw->port_info->lport),
4928 			   pf->offset_loaded, &os->eth.rx_broadcast,
4929 			   &ns->eth.rx_broadcast);
4930 	ice_stat_update_32(hw, PRTRPB_RDPC,
4931 			   pf->offset_loaded, &os->eth.rx_discards,
4932 			   &ns->eth.rx_discards);
4933 	/* enlarge the limitation when rx_bytes overflowed */
4934 	if (pf->offset_loaded) {
4935 		if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4936 			ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4937 		ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4938 	}
4939 	pf->old_rx_bytes = ns->eth.rx_bytes;
4940 
4941 	/* Workaround: CRC size should not be included in byte statistics,
4942 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4943 	 * packet.
4944 	 */
4945 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4946 			     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4947 
4948 	/* GLPRT_REPC not supported */
4949 	/* GLPRT_RMPC not supported */
4950 	ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4951 			   pf->offset_loaded,
4952 			   &os->eth.rx_unknown_protocol,
4953 			   &ns->eth.rx_unknown_protocol);
4954 	ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4955 			   GLPRT_GOTCL(hw->port_info->lport),
4956 			   pf->offset_loaded, &os->eth.tx_bytes,
4957 			   &ns->eth.tx_bytes);
4958 	ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4959 			   GLPRT_UPTCL(hw->port_info->lport),
4960 			   pf->offset_loaded, &os->eth.tx_unicast,
4961 			   &ns->eth.tx_unicast);
4962 	ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4963 			   GLPRT_MPTCL(hw->port_info->lport),
4964 			   pf->offset_loaded, &os->eth.tx_multicast,
4965 			   &ns->eth.tx_multicast);
4966 	ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4967 			   GLPRT_BPTCL(hw->port_info->lport),
4968 			   pf->offset_loaded, &os->eth.tx_broadcast,
4969 			   &ns->eth.tx_broadcast);
4970 	/* enlarge the limitation when tx_bytes overflowed */
4971 	if (pf->offset_loaded) {
4972 		if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4973 			ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4974 		ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4975 	}
4976 	pf->old_tx_bytes = ns->eth.tx_bytes;
4977 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4978 			     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4979 
4980 	/* GLPRT_TEPC not supported */
4981 
4982 	/* additional port specific stats */
4983 	ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4984 			   pf->offset_loaded, &os->tx_dropped_link_down,
4985 			   &ns->tx_dropped_link_down);
4986 	ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4987 			   pf->offset_loaded, &os->crc_errors,
4988 			   &ns->crc_errors);
4989 	ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4990 			   pf->offset_loaded, &os->illegal_bytes,
4991 			   &ns->illegal_bytes);
4992 	/* GLPRT_ERRBC not supported */
4993 	ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4994 			   pf->offset_loaded, &os->mac_local_faults,
4995 			   &ns->mac_local_faults);
4996 	ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4997 			   pf->offset_loaded, &os->mac_remote_faults,
4998 			   &ns->mac_remote_faults);
4999 
5000 	ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
5001 			   pf->offset_loaded, &os->rx_len_errors,
5002 			   &ns->rx_len_errors);
5003 
5004 	ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
5005 			   pf->offset_loaded, &os->link_xon_rx,
5006 			   &ns->link_xon_rx);
5007 	ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
5008 			   pf->offset_loaded, &os->link_xoff_rx,
5009 			   &ns->link_xoff_rx);
5010 	ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5011 			   pf->offset_loaded, &os->link_xon_tx,
5012 			   &ns->link_xon_tx);
5013 	ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5014 			   pf->offset_loaded, &os->link_xoff_tx,
5015 			   &ns->link_xoff_tx);
5016 	ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5017 			   GLPRT_PRC64L(hw->port_info->lport),
5018 			   pf->offset_loaded, &os->rx_size_64,
5019 			   &ns->rx_size_64);
5020 	ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5021 			   GLPRT_PRC127L(hw->port_info->lport),
5022 			   pf->offset_loaded, &os->rx_size_127,
5023 			   &ns->rx_size_127);
5024 	ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5025 			   GLPRT_PRC255L(hw->port_info->lport),
5026 			   pf->offset_loaded, &os->rx_size_255,
5027 			   &ns->rx_size_255);
5028 	ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5029 			   GLPRT_PRC511L(hw->port_info->lport),
5030 			   pf->offset_loaded, &os->rx_size_511,
5031 			   &ns->rx_size_511);
5032 	ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5033 			   GLPRT_PRC1023L(hw->port_info->lport),
5034 			   pf->offset_loaded, &os->rx_size_1023,
5035 			   &ns->rx_size_1023);
5036 	ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5037 			   GLPRT_PRC1522L(hw->port_info->lport),
5038 			   pf->offset_loaded, &os->rx_size_1522,
5039 			   &ns->rx_size_1522);
5040 	ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5041 			   GLPRT_PRC9522L(hw->port_info->lport),
5042 			   pf->offset_loaded, &os->rx_size_big,
5043 			   &ns->rx_size_big);
5044 	ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5045 			   pf->offset_loaded, &os->rx_undersize,
5046 			   &ns->rx_undersize);
5047 	ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5048 			   pf->offset_loaded, &os->rx_fragments,
5049 			   &ns->rx_fragments);
5050 	ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5051 			   pf->offset_loaded, &os->rx_oversize,
5052 			   &ns->rx_oversize);
5053 	ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5054 			   pf->offset_loaded, &os->rx_jabber,
5055 			   &ns->rx_jabber);
5056 	ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5057 			   GLPRT_PTC64L(hw->port_info->lport),
5058 			   pf->offset_loaded, &os->tx_size_64,
5059 			   &ns->tx_size_64);
5060 	ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5061 			   GLPRT_PTC127L(hw->port_info->lport),
5062 			   pf->offset_loaded, &os->tx_size_127,
5063 			   &ns->tx_size_127);
5064 	ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5065 			   GLPRT_PTC255L(hw->port_info->lport),
5066 			   pf->offset_loaded, &os->tx_size_255,
5067 			   &ns->tx_size_255);
5068 	ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5069 			   GLPRT_PTC511L(hw->port_info->lport),
5070 			   pf->offset_loaded, &os->tx_size_511,
5071 			   &ns->tx_size_511);
5072 	ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5073 			   GLPRT_PTC1023L(hw->port_info->lport),
5074 			   pf->offset_loaded, &os->tx_size_1023,
5075 			   &ns->tx_size_1023);
5076 	ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5077 			   GLPRT_PTC1522L(hw->port_info->lport),
5078 			   pf->offset_loaded, &os->tx_size_1522,
5079 			   &ns->tx_size_1522);
5080 	ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5081 			   GLPRT_PTC9522L(hw->port_info->lport),
5082 			   pf->offset_loaded, &os->tx_size_big,
5083 			   &ns->tx_size_big);
5084 
5085 	/* GLPRT_MSPDC not supported */
5086 	/* GLPRT_XEC not supported */
5087 
5088 	pf->offset_loaded = true;
5089 
5090 	if (pf->main_vsi)
5091 		ice_update_vsi_stats(pf->main_vsi);
5092 }
5093 
5094 /* Get all statistics of a port */
5095 static int
5096 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5097 {
5098 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5099 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5100 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5101 
5102 	/* call read registers - updates values, now write them to struct */
5103 	ice_read_stats_registers(pf, hw);
5104 
5105 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5106 			  pf->main_vsi->eth_stats.rx_multicast +
5107 			  pf->main_vsi->eth_stats.rx_broadcast -
5108 			  pf->main_vsi->eth_stats.rx_discards;
5109 	stats->opackets = ns->eth.tx_unicast +
5110 			  ns->eth.tx_multicast +
5111 			  ns->eth.tx_broadcast;
5112 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5113 	stats->obytes   = ns->eth.tx_bytes;
5114 	stats->oerrors  = ns->eth.tx_errors +
5115 			  pf->main_vsi->eth_stats.tx_errors;
5116 
5117 	/* Rx Errors */
5118 	stats->imissed  = ns->eth.rx_discards +
5119 			  pf->main_vsi->eth_stats.rx_discards;
5120 	stats->ierrors  = ns->crc_errors +
5121 			  ns->rx_undersize +
5122 			  ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5123 
5124 	PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5125 	PMD_DRV_LOG(DEBUG, "rx_bytes:	%"PRIu64"", ns->eth.rx_bytes);
5126 	PMD_DRV_LOG(DEBUG, "rx_unicast:	%"PRIu64"", ns->eth.rx_unicast);
5127 	PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5128 	PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5129 	PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5130 	PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5131 		    pf->main_vsi->eth_stats.rx_discards);
5132 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5133 		    ns->eth.rx_unknown_protocol);
5134 	PMD_DRV_LOG(DEBUG, "tx_bytes:	%"PRIu64"", ns->eth.tx_bytes);
5135 	PMD_DRV_LOG(DEBUG, "tx_unicast:	%"PRIu64"", ns->eth.tx_unicast);
5136 	PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5137 	PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5138 	PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5139 	PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5140 		    pf->main_vsi->eth_stats.tx_discards);
5141 	PMD_DRV_LOG(DEBUG, "tx_errors:		%"PRIu64"", ns->eth.tx_errors);
5142 
5143 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:	%"PRIu64"",
5144 		    ns->tx_dropped_link_down);
5145 	PMD_DRV_LOG(DEBUG, "crc_errors:	%"PRIu64"", ns->crc_errors);
5146 	PMD_DRV_LOG(DEBUG, "illegal_bytes:	%"PRIu64"",
5147 		    ns->illegal_bytes);
5148 	PMD_DRV_LOG(DEBUG, "error_bytes:	%"PRIu64"", ns->error_bytes);
5149 	PMD_DRV_LOG(DEBUG, "mac_local_faults:	%"PRIu64"",
5150 		    ns->mac_local_faults);
5151 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:	%"PRIu64"",
5152 		    ns->mac_remote_faults);
5153 	PMD_DRV_LOG(DEBUG, "link_xon_rx:	%"PRIu64"", ns->link_xon_rx);
5154 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:	%"PRIu64"", ns->link_xoff_rx);
5155 	PMD_DRV_LOG(DEBUG, "link_xon_tx:	%"PRIu64"", ns->link_xon_tx);
5156 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:	%"PRIu64"", ns->link_xoff_tx);
5157 	PMD_DRV_LOG(DEBUG, "rx_size_64:		%"PRIu64"", ns->rx_size_64);
5158 	PMD_DRV_LOG(DEBUG, "rx_size_127:	%"PRIu64"", ns->rx_size_127);
5159 	PMD_DRV_LOG(DEBUG, "rx_size_255:	%"PRIu64"", ns->rx_size_255);
5160 	PMD_DRV_LOG(DEBUG, "rx_size_511:	%"PRIu64"", ns->rx_size_511);
5161 	PMD_DRV_LOG(DEBUG, "rx_size_1023:	%"PRIu64"", ns->rx_size_1023);
5162 	PMD_DRV_LOG(DEBUG, "rx_size_1522:	%"PRIu64"", ns->rx_size_1522);
5163 	PMD_DRV_LOG(DEBUG, "rx_size_big:	%"PRIu64"", ns->rx_size_big);
5164 	PMD_DRV_LOG(DEBUG, "rx_undersize:	%"PRIu64"", ns->rx_undersize);
5165 	PMD_DRV_LOG(DEBUG, "rx_fragments:	%"PRIu64"", ns->rx_fragments);
5166 	PMD_DRV_LOG(DEBUG, "rx_oversize:	%"PRIu64"", ns->rx_oversize);
5167 	PMD_DRV_LOG(DEBUG, "rx_jabber:		%"PRIu64"", ns->rx_jabber);
5168 	PMD_DRV_LOG(DEBUG, "tx_size_64:		%"PRIu64"", ns->tx_size_64);
5169 	PMD_DRV_LOG(DEBUG, "tx_size_127:	%"PRIu64"", ns->tx_size_127);
5170 	PMD_DRV_LOG(DEBUG, "tx_size_255:	%"PRIu64"", ns->tx_size_255);
5171 	PMD_DRV_LOG(DEBUG, "tx_size_511:	%"PRIu64"", ns->tx_size_511);
5172 	PMD_DRV_LOG(DEBUG, "tx_size_1023:	%"PRIu64"", ns->tx_size_1023);
5173 	PMD_DRV_LOG(DEBUG, "tx_size_1522:	%"PRIu64"", ns->tx_size_1522);
5174 	PMD_DRV_LOG(DEBUG, "tx_size_big:	%"PRIu64"", ns->tx_size_big);
5175 	PMD_DRV_LOG(DEBUG, "rx_len_errors:	%"PRIu64"", ns->rx_len_errors);
5176 	PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5177 	return 0;
5178 }
5179 
5180 /* Reset the statistics */
5181 static int
5182 ice_stats_reset(struct rte_eth_dev *dev)
5183 {
5184 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5185 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5186 
5187 	/* Mark PF and VSI stats to update the offset, aka "reset" */
5188 	pf->offset_loaded = false;
5189 	if (pf->main_vsi)
5190 		pf->main_vsi->offset_loaded = false;
5191 
5192 	/* read the stats, reading current register values into offset */
5193 	ice_read_stats_registers(pf, hw);
5194 
5195 	return 0;
5196 }
5197 
5198 static uint32_t
5199 ice_xstats_calc_num(void)
5200 {
5201 	uint32_t num;
5202 
5203 	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5204 
5205 	return num;
5206 }
5207 
5208 static int
5209 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5210 	       unsigned int n)
5211 {
5212 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5213 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5214 	unsigned int i;
5215 	unsigned int count;
5216 	struct ice_hw_port_stats *hw_stats = &pf->stats;
5217 
5218 	count = ice_xstats_calc_num();
5219 	if (n < count)
5220 		return count;
5221 
5222 	ice_read_stats_registers(pf, hw);
5223 
5224 	if (!xstats)
5225 		return 0;
5226 
5227 	count = 0;
5228 
5229 	/* Get stats from ice_eth_stats struct */
5230 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5231 		xstats[count].value =
5232 			*(uint64_t *)((char *)&hw_stats->eth +
5233 				      ice_stats_strings[i].offset);
5234 		xstats[count].id = count;
5235 		count++;
5236 	}
5237 
5238 	/* Get individiual stats from ice_hw_port struct */
5239 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5240 		xstats[count].value =
5241 			*(uint64_t *)((char *)hw_stats +
5242 				      ice_hw_port_strings[i].offset);
5243 		xstats[count].id = count;
5244 		count++;
5245 	}
5246 
5247 	return count;
5248 }
5249 
5250 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5251 				struct rte_eth_xstat_name *xstats_names,
5252 				__rte_unused unsigned int limit)
5253 {
5254 	unsigned int count = 0;
5255 	unsigned int i;
5256 
5257 	if (!xstats_names)
5258 		return ice_xstats_calc_num();
5259 
5260 	/* Note: limit checked in rte_eth_xstats_names() */
5261 
5262 	/* Get stats from ice_eth_stats struct */
5263 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5264 		strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5265 			sizeof(xstats_names[count].name));
5266 		count++;
5267 	}
5268 
5269 	/* Get individiual stats from ice_hw_port struct */
5270 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5271 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5272 			sizeof(xstats_names[count].name));
5273 		count++;
5274 	}
5275 
5276 	return count;
5277 }
5278 
5279 static int
5280 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5281 		     const struct rte_flow_ops **ops)
5282 {
5283 	if (!dev)
5284 		return -EINVAL;
5285 
5286 	*ops = &ice_flow_ops;
5287 	return 0;
5288 }
5289 
5290 /* Add UDP tunneling port */
5291 static int
5292 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5293 			     struct rte_eth_udp_tunnel *udp_tunnel)
5294 {
5295 	int ret = 0;
5296 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5297 
5298 	if (udp_tunnel == NULL)
5299 		return -EINVAL;
5300 
5301 	switch (udp_tunnel->prot_type) {
5302 	case RTE_TUNNEL_TYPE_VXLAN:
5303 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5304 		break;
5305 	default:
5306 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5307 		ret = -EINVAL;
5308 		break;
5309 	}
5310 
5311 	return ret;
5312 }
5313 
5314 /* Delete UDP tunneling port */
5315 static int
5316 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5317 			     struct rte_eth_udp_tunnel *udp_tunnel)
5318 {
5319 	int ret = 0;
5320 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5321 
5322 	if (udp_tunnel == NULL)
5323 		return -EINVAL;
5324 
5325 	switch (udp_tunnel->prot_type) {
5326 	case RTE_TUNNEL_TYPE_VXLAN:
5327 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5328 		break;
5329 	default:
5330 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5331 		ret = -EINVAL;
5332 		break;
5333 	}
5334 
5335 	return ret;
5336 }
5337 
5338 static int
5339 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5340 	      struct rte_pci_device *pci_dev)
5341 {
5342 	return rte_eth_dev_pci_generic_probe(pci_dev,
5343 					     sizeof(struct ice_adapter),
5344 					     ice_dev_init);
5345 }
5346 
5347 static int
5348 ice_pci_remove(struct rte_pci_device *pci_dev)
5349 {
5350 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5351 }
5352 
5353 static struct rte_pci_driver rte_ice_pmd = {
5354 	.id_table = pci_id_ice_map,
5355 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5356 	.probe = ice_pci_probe,
5357 	.remove = ice_pci_remove,
5358 };
5359 
5360 /**
5361  * Driver initialization routine.
5362  * Invoked once at EAL init time.
5363  * Register itself as the [Poll Mode] Driver of PCI devices.
5364  */
5365 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5366 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5367 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5368 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5369 			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5370 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5371 			      ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5372 
5373 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
5374 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
5375 #ifdef RTE_ETHDEV_DEBUG_RX
5376 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
5377 #endif
5378 #ifdef RTE_ETHDEV_DEBUG_TX
5379 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);
5380 #endif
5381