xref: /dpdk/drivers/net/ice/ice_ethdev.c (revision ee2cf75e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
17 
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
20 #include "ice_rxtx.h"
21 #include "ice_generic_flow.h"
22 
23 /* devargs */
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
26 #define ICE_FLOW_MARK_SUPPORT_ARG	"flow-mark-support"
27 #define ICE_PROTO_XTR_ARG         "proto_xtr"
28 
29 static const char * const ice_valid_args[] = {
30 	ICE_SAFE_MODE_SUPPORT_ARG,
31 	ICE_PIPELINE_MODE_SUPPORT_ARG,
32 	ICE_FLOW_MARK_SUPPORT_ARG,
33 	ICE_PROTO_XTR_ARG,
34 	NULL
35 };
36 
37 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
38 	.name = "ice_dynfield_proto_xtr_metadata",
39 	.size = sizeof(uint32_t),
40 	.align = __alignof__(uint32_t),
41 	.flags = 0,
42 };
43 
44 struct proto_xtr_ol_flag {
45 	const struct rte_mbuf_dynflag param;
46 	uint64_t *ol_flag;
47 	bool required;
48 };
49 
50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
51 	[PROTO_XTR_VLAN] = {
52 		.param = { .name = "ice_dynflag_proto_xtr_vlan" },
53 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
54 	[PROTO_XTR_IPV4] = {
55 		.param = { .name = "ice_dynflag_proto_xtr_ipv4" },
56 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
57 	[PROTO_XTR_IPV6] = {
58 		.param = { .name = "ice_dynflag_proto_xtr_ipv6" },
59 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
60 	[PROTO_XTR_IPV6_FLOW] = {
61 		.param = { .name = "ice_dynflag_proto_xtr_ipv6_flow" },
62 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
63 	[PROTO_XTR_TCP] = {
64 		.param = { .name = "ice_dynflag_proto_xtr_tcp" },
65 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
66 };
67 
68 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
69 
70 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
71 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
72 #define ICE_MAX_RES_DESC_NUM        1024
73 
74 static int ice_dev_configure(struct rte_eth_dev *dev);
75 static int ice_dev_start(struct rte_eth_dev *dev);
76 static void ice_dev_stop(struct rte_eth_dev *dev);
77 static void ice_dev_close(struct rte_eth_dev *dev);
78 static int ice_dev_reset(struct rte_eth_dev *dev);
79 static int ice_dev_info_get(struct rte_eth_dev *dev,
80 			    struct rte_eth_dev_info *dev_info);
81 static int ice_link_update(struct rte_eth_dev *dev,
82 			   int wait_to_complete);
83 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
84 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
85 
86 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
87 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
88 static int ice_rss_reta_update(struct rte_eth_dev *dev,
89 			       struct rte_eth_rss_reta_entry64 *reta_conf,
90 			       uint16_t reta_size);
91 static int ice_rss_reta_query(struct rte_eth_dev *dev,
92 			      struct rte_eth_rss_reta_entry64 *reta_conf,
93 			      uint16_t reta_size);
94 static int ice_rss_hash_update(struct rte_eth_dev *dev,
95 			       struct rte_eth_rss_conf *rss_conf);
96 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
97 				 struct rte_eth_rss_conf *rss_conf);
98 static int ice_promisc_enable(struct rte_eth_dev *dev);
99 static int ice_promisc_disable(struct rte_eth_dev *dev);
100 static int ice_allmulti_enable(struct rte_eth_dev *dev);
101 static int ice_allmulti_disable(struct rte_eth_dev *dev);
102 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
103 			       uint16_t vlan_id,
104 			       int on);
105 static int ice_macaddr_set(struct rte_eth_dev *dev,
106 			   struct rte_ether_addr *mac_addr);
107 static int ice_macaddr_add(struct rte_eth_dev *dev,
108 			   struct rte_ether_addr *mac_addr,
109 			   __rte_unused uint32_t index,
110 			   uint32_t pool);
111 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
112 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
113 				    uint16_t queue_id);
114 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
115 				     uint16_t queue_id);
116 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
117 			      size_t fw_size);
118 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
119 			     uint16_t pvid, int on);
120 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
121 static int ice_get_eeprom(struct rte_eth_dev *dev,
122 			  struct rte_dev_eeprom_info *eeprom);
123 static int ice_stats_get(struct rte_eth_dev *dev,
124 			 struct rte_eth_stats *stats);
125 static int ice_stats_reset(struct rte_eth_dev *dev);
126 static int ice_xstats_get(struct rte_eth_dev *dev,
127 			  struct rte_eth_xstat *xstats, unsigned int n);
128 static int ice_xstats_get_names(struct rte_eth_dev *dev,
129 				struct rte_eth_xstat_name *xstats_names,
130 				unsigned int limit);
131 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
132 			enum rte_filter_type filter_type,
133 			enum rte_filter_op filter_op,
134 			void *arg);
135 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
136 			struct rte_eth_udp_tunnel *udp_tunnel);
137 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
138 			struct rte_eth_udp_tunnel *udp_tunnel);
139 
140 static const struct rte_pci_id pci_id_ice_map[] = {
141 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
142 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
143 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
144 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
145 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
146 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
147 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
148 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
149 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
150 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
151 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
152 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
153 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
154 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
155 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
156 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
157 	{ .vendor_id = 0, /* sentinel */ },
158 };
159 
160 static const struct eth_dev_ops ice_eth_dev_ops = {
161 	.dev_configure                = ice_dev_configure,
162 	.dev_start                    = ice_dev_start,
163 	.dev_stop                     = ice_dev_stop,
164 	.dev_close                    = ice_dev_close,
165 	.dev_reset                    = ice_dev_reset,
166 	.dev_set_link_up              = ice_dev_set_link_up,
167 	.dev_set_link_down            = ice_dev_set_link_down,
168 	.rx_queue_start               = ice_rx_queue_start,
169 	.rx_queue_stop                = ice_rx_queue_stop,
170 	.tx_queue_start               = ice_tx_queue_start,
171 	.tx_queue_stop                = ice_tx_queue_stop,
172 	.rx_queue_setup               = ice_rx_queue_setup,
173 	.rx_queue_release             = ice_rx_queue_release,
174 	.tx_queue_setup               = ice_tx_queue_setup,
175 	.tx_queue_release             = ice_tx_queue_release,
176 	.dev_infos_get                = ice_dev_info_get,
177 	.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
178 	.link_update                  = ice_link_update,
179 	.mtu_set                      = ice_mtu_set,
180 	.mac_addr_set                 = ice_macaddr_set,
181 	.mac_addr_add                 = ice_macaddr_add,
182 	.mac_addr_remove              = ice_macaddr_remove,
183 	.vlan_filter_set              = ice_vlan_filter_set,
184 	.vlan_offload_set             = ice_vlan_offload_set,
185 	.reta_update                  = ice_rss_reta_update,
186 	.reta_query                   = ice_rss_reta_query,
187 	.rss_hash_update              = ice_rss_hash_update,
188 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
189 	.promiscuous_enable           = ice_promisc_enable,
190 	.promiscuous_disable          = ice_promisc_disable,
191 	.allmulticast_enable          = ice_allmulti_enable,
192 	.allmulticast_disable         = ice_allmulti_disable,
193 	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
194 	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
195 	.fw_version_get               = ice_fw_version_get,
196 	.vlan_pvid_set                = ice_vlan_pvid_set,
197 	.rxq_info_get                 = ice_rxq_info_get,
198 	.txq_info_get                 = ice_txq_info_get,
199 	.rx_burst_mode_get            = ice_rx_burst_mode_get,
200 	.tx_burst_mode_get            = ice_tx_burst_mode_get,
201 	.get_eeprom_length            = ice_get_eeprom_length,
202 	.get_eeprom                   = ice_get_eeprom,
203 	.rx_queue_count               = ice_rx_queue_count,
204 	.rx_descriptor_status         = ice_rx_descriptor_status,
205 	.tx_descriptor_status         = ice_tx_descriptor_status,
206 	.stats_get                    = ice_stats_get,
207 	.stats_reset                  = ice_stats_reset,
208 	.xstats_get                   = ice_xstats_get,
209 	.xstats_get_names             = ice_xstats_get_names,
210 	.xstats_reset                 = ice_stats_reset,
211 	.filter_ctrl                  = ice_dev_filter_ctrl,
212 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
213 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
214 	.tx_done_cleanup              = ice_tx_done_cleanup,
215 };
216 
217 /* store statistics names and its offset in stats structure */
218 struct ice_xstats_name_off {
219 	char name[RTE_ETH_XSTATS_NAME_SIZE];
220 	unsigned int offset;
221 };
222 
223 static const struct ice_xstats_name_off ice_stats_strings[] = {
224 	{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
225 	{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
226 	{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
227 	{"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
228 	{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
229 		rx_unknown_protocol)},
230 	{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
231 	{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
232 	{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
233 	{"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
234 };
235 
236 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
237 		sizeof(ice_stats_strings[0]))
238 
239 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
240 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
241 		tx_dropped_link_down)},
242 	{"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
243 	{"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
244 		illegal_bytes)},
245 	{"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
246 	{"mac_local_errors", offsetof(struct ice_hw_port_stats,
247 		mac_local_faults)},
248 	{"mac_remote_errors", offsetof(struct ice_hw_port_stats,
249 		mac_remote_faults)},
250 	{"rx_len_errors", offsetof(struct ice_hw_port_stats,
251 		rx_len_errors)},
252 	{"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
253 	{"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
254 	{"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
255 	{"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
256 	{"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
257 	{"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
258 		rx_size_127)},
259 	{"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
260 		rx_size_255)},
261 	{"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
262 		rx_size_511)},
263 	{"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
264 		rx_size_1023)},
265 	{"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
266 		rx_size_1522)},
267 	{"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
268 		rx_size_big)},
269 	{"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
270 		rx_undersize)},
271 	{"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
272 		rx_oversize)},
273 	{"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
274 		mac_short_pkt_dropped)},
275 	{"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
276 		rx_fragments)},
277 	{"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
278 	{"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
279 	{"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
280 		tx_size_127)},
281 	{"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
282 		tx_size_255)},
283 	{"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
284 		tx_size_511)},
285 	{"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
286 		tx_size_1023)},
287 	{"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
288 		tx_size_1522)},
289 	{"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
290 		tx_size_big)},
291 };
292 
293 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
294 		sizeof(ice_hw_port_strings[0]))
295 
296 static void
297 ice_init_controlq_parameter(struct ice_hw *hw)
298 {
299 	/* fields for adminq */
300 	hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
301 	hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
302 	hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
303 	hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
304 
305 	/* fields for mailboxq, DPDK used as PF host */
306 	hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
307 	hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
308 	hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
309 	hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
310 }
311 
312 static int
313 lookup_proto_xtr_type(const char *xtr_name)
314 {
315 	static struct {
316 		const char *name;
317 		enum proto_xtr_type type;
318 	} xtr_type_map[] = {
319 		{ "vlan",      PROTO_XTR_VLAN      },
320 		{ "ipv4",      PROTO_XTR_IPV4      },
321 		{ "ipv6",      PROTO_XTR_IPV6      },
322 		{ "ipv6_flow", PROTO_XTR_IPV6_FLOW },
323 		{ "tcp",       PROTO_XTR_TCP       },
324 	};
325 	uint32_t i;
326 
327 	for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
328 		if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
329 			return xtr_type_map[i].type;
330 	}
331 
332 	return -1;
333 }
334 
335 /*
336  * Parse elem, the elem could be single number/range or '(' ')' group
337  * 1) A single number elem, it's just a simple digit. e.g. 9
338  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
339  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
340  *    Within group elem, '-' used for a range separator;
341  *                       ',' used for a single number.
342  */
343 static int
344 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
345 {
346 	const char *str = input;
347 	char *end = NULL;
348 	uint32_t min, max;
349 	uint32_t idx;
350 
351 	while (isblank(*str))
352 		str++;
353 
354 	if (!isdigit(*str) && *str != '(')
355 		return -1;
356 
357 	/* process single number or single range of number */
358 	if (*str != '(') {
359 		errno = 0;
360 		idx = strtoul(str, &end, 10);
361 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
362 			return -1;
363 
364 		while (isblank(*end))
365 			end++;
366 
367 		min = idx;
368 		max = idx;
369 
370 		/* process single <number>-<number> */
371 		if (*end == '-') {
372 			end++;
373 			while (isblank(*end))
374 				end++;
375 			if (!isdigit(*end))
376 				return -1;
377 
378 			errno = 0;
379 			idx = strtoul(end, &end, 10);
380 			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
381 				return -1;
382 
383 			max = idx;
384 			while (isblank(*end))
385 				end++;
386 		}
387 
388 		if (*end != ':')
389 			return -1;
390 
391 		for (idx = RTE_MIN(min, max);
392 		     idx <= RTE_MAX(min, max); idx++)
393 			devargs->proto_xtr[idx] = xtr_type;
394 
395 		return 0;
396 	}
397 
398 	/* process set within bracket */
399 	str++;
400 	while (isblank(*str))
401 		str++;
402 	if (*str == '\0')
403 		return -1;
404 
405 	min = ICE_MAX_QUEUE_NUM;
406 	do {
407 		/* go ahead to the first digit */
408 		while (isblank(*str))
409 			str++;
410 		if (!isdigit(*str))
411 			return -1;
412 
413 		/* get the digit value */
414 		errno = 0;
415 		idx = strtoul(str, &end, 10);
416 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
417 			return -1;
418 
419 		/* go ahead to separator '-',',' and ')' */
420 		while (isblank(*end))
421 			end++;
422 		if (*end == '-') {
423 			if (min == ICE_MAX_QUEUE_NUM)
424 				min = idx;
425 			else /* avoid continuous '-' */
426 				return -1;
427 		} else if (*end == ',' || *end == ')') {
428 			max = idx;
429 			if (min == ICE_MAX_QUEUE_NUM)
430 				min = idx;
431 
432 			for (idx = RTE_MIN(min, max);
433 			     idx <= RTE_MAX(min, max); idx++)
434 				devargs->proto_xtr[idx] = xtr_type;
435 
436 			min = ICE_MAX_QUEUE_NUM;
437 		} else {
438 			return -1;
439 		}
440 
441 		str = end + 1;
442 	} while (*end != ')' && *end != '\0');
443 
444 	return 0;
445 }
446 
447 static int
448 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
449 {
450 	const char *queue_start;
451 	uint32_t idx;
452 	int xtr_type;
453 	char xtr_name[32];
454 
455 	while (isblank(*queues))
456 		queues++;
457 
458 	if (*queues != '[') {
459 		xtr_type = lookup_proto_xtr_type(queues);
460 		if (xtr_type < 0)
461 			return -1;
462 
463 		devargs->proto_xtr_dflt = xtr_type;
464 
465 		return 0;
466 	}
467 
468 	queues++;
469 	do {
470 		while (isblank(*queues))
471 			queues++;
472 		if (*queues == '\0')
473 			return -1;
474 
475 		queue_start = queues;
476 
477 		/* go across a complete bracket */
478 		if (*queue_start == '(') {
479 			queues += strcspn(queues, ")");
480 			if (*queues != ')')
481 				return -1;
482 		}
483 
484 		/* scan the separator ':' */
485 		queues += strcspn(queues, ":");
486 		if (*queues++ != ':')
487 			return -1;
488 		while (isblank(*queues))
489 			queues++;
490 
491 		for (idx = 0; ; idx++) {
492 			if (isblank(queues[idx]) ||
493 			    queues[idx] == ',' ||
494 			    queues[idx] == ']' ||
495 			    queues[idx] == '\0')
496 				break;
497 
498 			if (idx > sizeof(xtr_name) - 2)
499 				return -1;
500 
501 			xtr_name[idx] = queues[idx];
502 		}
503 		xtr_name[idx] = '\0';
504 		xtr_type = lookup_proto_xtr_type(xtr_name);
505 		if (xtr_type < 0)
506 			return -1;
507 
508 		queues += idx;
509 
510 		while (isblank(*queues) || *queues == ',' || *queues == ']')
511 			queues++;
512 
513 		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
514 			return -1;
515 	} while (*queues != '\0');
516 
517 	return 0;
518 }
519 
520 static int
521 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
522 		     void *extra_args)
523 {
524 	struct ice_devargs *devargs = extra_args;
525 
526 	if (value == NULL || extra_args == NULL)
527 		return -EINVAL;
528 
529 	if (parse_queue_proto_xtr(value, devargs) < 0) {
530 		PMD_DRV_LOG(ERR,
531 			    "The protocol extraction parameter is wrong : '%s'",
532 			    value);
533 		return -1;
534 	}
535 
536 	return 0;
537 }
538 
539 static bool
540 ice_proto_xtr_support(struct ice_hw *hw)
541 {
542 #define FLX_REG(val, fld, idx) \
543 	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
544 	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
545 	static struct {
546 		uint32_t rxdid;
547 		uint16_t protid_0;
548 		uint16_t protid_1;
549 	} xtr_sets[] = {
550 		{ ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O },
551 		{ ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S,
552 		  ICE_PROT_IPV4_OF_OR_S },
553 		{ ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S,
554 		  ICE_PROT_IPV6_OF_OR_S },
555 		{ ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S,
556 		  ICE_PROT_IPV6_OF_OR_S },
557 		{ ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
558 	};
559 	uint32_t i;
560 
561 	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
562 		uint32_t rxdid = xtr_sets[i].rxdid;
563 		uint32_t v;
564 
565 		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
566 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
567 
568 			if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 ||
569 			    FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT)
570 				return false;
571 		}
572 
573 		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
574 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
575 
576 			if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 ||
577 			    FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT)
578 				return false;
579 		}
580 	}
581 
582 	return true;
583 }
584 
585 static int
586 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
587 		  uint32_t num)
588 {
589 	struct pool_entry *entry;
590 
591 	if (!pool || !num)
592 		return -EINVAL;
593 
594 	entry = rte_zmalloc(NULL, sizeof(*entry), 0);
595 	if (!entry) {
596 		PMD_INIT_LOG(ERR,
597 			     "Failed to allocate memory for resource pool");
598 		return -ENOMEM;
599 	}
600 
601 	/* queue heap initialize */
602 	pool->num_free = num;
603 	pool->num_alloc = 0;
604 	pool->base = base;
605 	LIST_INIT(&pool->alloc_list);
606 	LIST_INIT(&pool->free_list);
607 
608 	/* Initialize element  */
609 	entry->base = 0;
610 	entry->len = num;
611 
612 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
613 	return 0;
614 }
615 
616 static int
617 ice_res_pool_alloc(struct ice_res_pool_info *pool,
618 		   uint16_t num)
619 {
620 	struct pool_entry *entry, *valid_entry;
621 
622 	if (!pool || !num) {
623 		PMD_INIT_LOG(ERR, "Invalid parameter");
624 		return -EINVAL;
625 	}
626 
627 	if (pool->num_free < num) {
628 		PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
629 			     num, pool->num_free);
630 		return -ENOMEM;
631 	}
632 
633 	valid_entry = NULL;
634 	/* Lookup  in free list and find most fit one */
635 	LIST_FOREACH(entry, &pool->free_list, next) {
636 		if (entry->len >= num) {
637 			/* Find best one */
638 			if (entry->len == num) {
639 				valid_entry = entry;
640 				break;
641 			}
642 			if (!valid_entry ||
643 			    valid_entry->len > entry->len)
644 				valid_entry = entry;
645 		}
646 	}
647 
648 	/* Not find one to satisfy the request, return */
649 	if (!valid_entry) {
650 		PMD_INIT_LOG(ERR, "No valid entry found");
651 		return -ENOMEM;
652 	}
653 	/**
654 	 * The entry have equal queue number as requested,
655 	 * remove it from alloc_list.
656 	 */
657 	if (valid_entry->len == num) {
658 		LIST_REMOVE(valid_entry, next);
659 	} else {
660 		/**
661 		 * The entry have more numbers than requested,
662 		 * create a new entry for alloc_list and minus its
663 		 * queue base and number in free_list.
664 		 */
665 		entry = rte_zmalloc(NULL, sizeof(*entry), 0);
666 		if (!entry) {
667 			PMD_INIT_LOG(ERR,
668 				     "Failed to allocate memory for "
669 				     "resource pool");
670 			return -ENOMEM;
671 		}
672 		entry->base = valid_entry->base;
673 		entry->len = num;
674 		valid_entry->base += num;
675 		valid_entry->len -= num;
676 		valid_entry = entry;
677 	}
678 
679 	/* Insert it into alloc list, not sorted */
680 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
681 
682 	pool->num_free -= valid_entry->len;
683 	pool->num_alloc += valid_entry->len;
684 
685 	return valid_entry->base + pool->base;
686 }
687 
688 static void
689 ice_res_pool_destroy(struct ice_res_pool_info *pool)
690 {
691 	struct pool_entry *entry, *next_entry;
692 
693 	if (!pool)
694 		return;
695 
696 	for (entry = LIST_FIRST(&pool->alloc_list);
697 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
698 	     entry = next_entry) {
699 		LIST_REMOVE(entry, next);
700 		rte_free(entry);
701 	}
702 
703 	for (entry = LIST_FIRST(&pool->free_list);
704 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
705 	     entry = next_entry) {
706 		LIST_REMOVE(entry, next);
707 		rte_free(entry);
708 	}
709 
710 	pool->num_free = 0;
711 	pool->num_alloc = 0;
712 	pool->base = 0;
713 	LIST_INIT(&pool->alloc_list);
714 	LIST_INIT(&pool->free_list);
715 }
716 
717 static void
718 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
719 {
720 	/* Set VSI LUT selection */
721 	info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
722 			  ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
723 	/* Set Hash scheme */
724 	info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
725 			   ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
726 	/* enable TC */
727 	info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
728 }
729 
730 static enum ice_status
731 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
732 				struct ice_aqc_vsi_props *info,
733 				uint8_t enabled_tcmap)
734 {
735 	uint16_t bsf, qp_idx;
736 
737 	/* default tc 0 now. Multi-TC supporting need to be done later.
738 	 * Configure TC and queue mapping parameters, for enabled TC,
739 	 * allocate qpnum_per_tc queues to this traffic.
740 	 */
741 	if (enabled_tcmap != 0x01) {
742 		PMD_INIT_LOG(ERR, "only TC0 is supported");
743 		return -ENOTSUP;
744 	}
745 
746 	vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
747 	bsf = rte_bsf32(vsi->nb_qps);
748 	/* Adjust the queue number to actual queues that can be applied */
749 	vsi->nb_qps = 0x1 << bsf;
750 
751 	qp_idx = 0;
752 	/* Set tc and queue mapping with VSI */
753 	info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
754 						ICE_AQ_VSI_TC_Q_OFFSET_S) |
755 					       (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
756 
757 	/* Associate queue number with VSI */
758 	info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
759 	info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
760 	info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
761 	info->valid_sections |=
762 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
763 	/* Set the info.ingress_table and info.egress_table
764 	 * for UP translate table. Now just set it to 1:1 map by default
765 	 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
766 	 */
767 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
768 	info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
769 	info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
770 	info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
771 	return 0;
772 }
773 
774 static int
775 ice_init_mac_address(struct rte_eth_dev *dev)
776 {
777 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
778 
779 	if (!rte_is_unicast_ether_addr
780 		((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
781 		PMD_INIT_LOG(ERR, "Invalid MAC address");
782 		return -EINVAL;
783 	}
784 
785 	rte_ether_addr_copy(
786 		(struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
787 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
788 
789 	dev->data->mac_addrs =
790 		rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
791 	if (!dev->data->mac_addrs) {
792 		PMD_INIT_LOG(ERR,
793 			     "Failed to allocate memory to store mac address");
794 		return -ENOMEM;
795 	}
796 	/* store it to dev data */
797 	rte_ether_addr_copy(
798 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
799 		&dev->data->mac_addrs[0]);
800 	return 0;
801 }
802 
803 /* Find out specific MAC filter */
804 static struct ice_mac_filter *
805 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
806 {
807 	struct ice_mac_filter *f;
808 
809 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
810 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
811 			return f;
812 	}
813 
814 	return NULL;
815 }
816 
817 static int
818 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
819 {
820 	struct ice_fltr_list_entry *m_list_itr = NULL;
821 	struct ice_mac_filter *f;
822 	struct LIST_HEAD_TYPE list_head;
823 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
824 	int ret = 0;
825 
826 	/* If it's added and configured, return */
827 	f = ice_find_mac_filter(vsi, mac_addr);
828 	if (f) {
829 		PMD_DRV_LOG(INFO, "This MAC filter already exists.");
830 		return 0;
831 	}
832 
833 	INIT_LIST_HEAD(&list_head);
834 
835 	m_list_itr = (struct ice_fltr_list_entry *)
836 		ice_malloc(hw, sizeof(*m_list_itr));
837 	if (!m_list_itr) {
838 		ret = -ENOMEM;
839 		goto DONE;
840 	}
841 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
842 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
843 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
844 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
845 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
846 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
847 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
848 
849 	LIST_ADD(&m_list_itr->list_entry, &list_head);
850 
851 	/* Add the mac */
852 	ret = ice_add_mac(hw, &list_head);
853 	if (ret != ICE_SUCCESS) {
854 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
855 		ret = -EINVAL;
856 		goto DONE;
857 	}
858 	/* Add the mac addr into mac list */
859 	f = rte_zmalloc(NULL, sizeof(*f), 0);
860 	if (!f) {
861 		PMD_DRV_LOG(ERR, "failed to allocate memory");
862 		ret = -ENOMEM;
863 		goto DONE;
864 	}
865 	rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
866 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
867 	vsi->mac_num++;
868 
869 	ret = 0;
870 
871 DONE:
872 	rte_free(m_list_itr);
873 	return ret;
874 }
875 
876 static int
877 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
878 {
879 	struct ice_fltr_list_entry *m_list_itr = NULL;
880 	struct ice_mac_filter *f;
881 	struct LIST_HEAD_TYPE list_head;
882 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
883 	int ret = 0;
884 
885 	/* Can't find it, return an error */
886 	f = ice_find_mac_filter(vsi, mac_addr);
887 	if (!f)
888 		return -EINVAL;
889 
890 	INIT_LIST_HEAD(&list_head);
891 
892 	m_list_itr = (struct ice_fltr_list_entry *)
893 		ice_malloc(hw, sizeof(*m_list_itr));
894 	if (!m_list_itr) {
895 		ret = -ENOMEM;
896 		goto DONE;
897 	}
898 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
899 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
900 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
901 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
902 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
903 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
904 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
905 
906 	LIST_ADD(&m_list_itr->list_entry, &list_head);
907 
908 	/* remove the mac filter */
909 	ret = ice_remove_mac(hw, &list_head);
910 	if (ret != ICE_SUCCESS) {
911 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
912 		ret = -EINVAL;
913 		goto DONE;
914 	}
915 
916 	/* Remove the mac addr from mac list */
917 	TAILQ_REMOVE(&vsi->mac_list, f, next);
918 	rte_free(f);
919 	vsi->mac_num--;
920 
921 	ret = 0;
922 DONE:
923 	rte_free(m_list_itr);
924 	return ret;
925 }
926 
927 /* Find out specific VLAN filter */
928 static struct ice_vlan_filter *
929 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
930 {
931 	struct ice_vlan_filter *f;
932 
933 	TAILQ_FOREACH(f, &vsi->vlan_list, next) {
934 		if (vlan_id == f->vlan_info.vlan_id)
935 			return f;
936 	}
937 
938 	return NULL;
939 }
940 
941 static int
942 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
943 {
944 	struct ice_fltr_list_entry *v_list_itr = NULL;
945 	struct ice_vlan_filter *f;
946 	struct LIST_HEAD_TYPE list_head;
947 	struct ice_hw *hw;
948 	int ret = 0;
949 
950 	if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
951 		return -EINVAL;
952 
953 	hw = ICE_VSI_TO_HW(vsi);
954 
955 	/* If it's added and configured, return. */
956 	f = ice_find_vlan_filter(vsi, vlan_id);
957 	if (f) {
958 		PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
959 		return 0;
960 	}
961 
962 	if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
963 		return 0;
964 
965 	INIT_LIST_HEAD(&list_head);
966 
967 	v_list_itr = (struct ice_fltr_list_entry *)
968 		      ice_malloc(hw, sizeof(*v_list_itr));
969 	if (!v_list_itr) {
970 		ret = -ENOMEM;
971 		goto DONE;
972 	}
973 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
974 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
975 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
976 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
977 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
978 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
979 
980 	LIST_ADD(&v_list_itr->list_entry, &list_head);
981 
982 	/* Add the vlan */
983 	ret = ice_add_vlan(hw, &list_head);
984 	if (ret != ICE_SUCCESS) {
985 		PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
986 		ret = -EINVAL;
987 		goto DONE;
988 	}
989 
990 	/* Add vlan into vlan list */
991 	f = rte_zmalloc(NULL, sizeof(*f), 0);
992 	if (!f) {
993 		PMD_DRV_LOG(ERR, "failed to allocate memory");
994 		ret = -ENOMEM;
995 		goto DONE;
996 	}
997 	f->vlan_info.vlan_id = vlan_id;
998 	TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
999 	vsi->vlan_num++;
1000 
1001 	ret = 0;
1002 
1003 DONE:
1004 	rte_free(v_list_itr);
1005 	return ret;
1006 }
1007 
1008 static int
1009 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
1010 {
1011 	struct ice_fltr_list_entry *v_list_itr = NULL;
1012 	struct ice_vlan_filter *f;
1013 	struct LIST_HEAD_TYPE list_head;
1014 	struct ice_hw *hw;
1015 	int ret = 0;
1016 
1017 	/**
1018 	 * Vlan 0 is the generic filter for untagged packets
1019 	 * and can't be removed.
1020 	 */
1021 	if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
1022 		return -EINVAL;
1023 
1024 	hw = ICE_VSI_TO_HW(vsi);
1025 
1026 	/* Can't find it, return an error */
1027 	f = ice_find_vlan_filter(vsi, vlan_id);
1028 	if (!f)
1029 		return -EINVAL;
1030 
1031 	INIT_LIST_HEAD(&list_head);
1032 
1033 	v_list_itr = (struct ice_fltr_list_entry *)
1034 		      ice_malloc(hw, sizeof(*v_list_itr));
1035 	if (!v_list_itr) {
1036 		ret = -ENOMEM;
1037 		goto DONE;
1038 	}
1039 
1040 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
1041 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1042 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1043 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1044 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1045 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1046 
1047 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1048 
1049 	/* remove the vlan filter */
1050 	ret = ice_remove_vlan(hw, &list_head);
1051 	if (ret != ICE_SUCCESS) {
1052 		PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1053 		ret = -EINVAL;
1054 		goto DONE;
1055 	}
1056 
1057 	/* Remove the vlan id from vlan list */
1058 	TAILQ_REMOVE(&vsi->vlan_list, f, next);
1059 	rte_free(f);
1060 	vsi->vlan_num--;
1061 
1062 	ret = 0;
1063 DONE:
1064 	rte_free(v_list_itr);
1065 	return ret;
1066 }
1067 
1068 static int
1069 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1070 {
1071 	struct ice_mac_filter *m_f;
1072 	struct ice_vlan_filter *v_f;
1073 	int ret = 0;
1074 
1075 	if (!vsi || !vsi->mac_num)
1076 		return -EINVAL;
1077 
1078 	TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1079 		ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1080 		if (ret != ICE_SUCCESS) {
1081 			ret = -EINVAL;
1082 			goto DONE;
1083 		}
1084 	}
1085 
1086 	if (vsi->vlan_num == 0)
1087 		return 0;
1088 
1089 	TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1090 		ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
1091 		if (ret != ICE_SUCCESS) {
1092 			ret = -EINVAL;
1093 			goto DONE;
1094 		}
1095 	}
1096 
1097 DONE:
1098 	return ret;
1099 }
1100 
1101 static int
1102 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
1103 {
1104 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1105 	struct ice_vsi_ctx ctxt;
1106 	uint8_t qinq_flags;
1107 	int ret = 0;
1108 
1109 	/* Check if it has been already on or off */
1110 	if (vsi->info.valid_sections &
1111 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1112 		if (on) {
1113 			if ((vsi->info.outer_tag_flags &
1114 			     ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
1115 			    ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
1116 				return 0; /* already on */
1117 		} else {
1118 			if (!(vsi->info.outer_tag_flags &
1119 			      ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
1120 				return 0; /* already off */
1121 		}
1122 	}
1123 
1124 	if (on)
1125 		qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
1126 	else
1127 		qinq_flags = 0;
1128 	/* clear global insertion and use per packet insertion */
1129 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
1130 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
1131 	vsi->info.outer_tag_flags |= qinq_flags;
1132 	/* use default vlan type 0x8100 */
1133 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1134 	vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1135 				     ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1136 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1137 	ctxt.info.valid_sections =
1138 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1139 	ctxt.vsi_num = vsi->vsi_id;
1140 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1141 	if (ret) {
1142 		PMD_DRV_LOG(INFO,
1143 			    "Update VSI failed to %s qinq stripping",
1144 			    on ? "enable" : "disable");
1145 		return -EINVAL;
1146 	}
1147 
1148 	vsi->info.valid_sections |=
1149 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1150 
1151 	return ret;
1152 }
1153 
1154 static int
1155 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
1156 {
1157 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1158 	struct ice_vsi_ctx ctxt;
1159 	uint8_t qinq_flags;
1160 	int ret = 0;
1161 
1162 	/* Check if it has been already on or off */
1163 	if (vsi->info.valid_sections &
1164 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1165 		if (on) {
1166 			if ((vsi->info.outer_tag_flags &
1167 			     ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1168 			    ICE_AQ_VSI_OUTER_TAG_COPY)
1169 				return 0; /* already on */
1170 		} else {
1171 			if ((vsi->info.outer_tag_flags &
1172 			     ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1173 			    ICE_AQ_VSI_OUTER_TAG_NOTHING)
1174 				return 0; /* already off */
1175 		}
1176 	}
1177 
1178 	if (on)
1179 		qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
1180 	else
1181 		qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
1182 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
1183 	vsi->info.outer_tag_flags |= qinq_flags;
1184 	/* use default vlan type 0x8100 */
1185 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1186 	vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1187 				     ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1188 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1189 	ctxt.info.valid_sections =
1190 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1191 	ctxt.vsi_num = vsi->vsi_id;
1192 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1193 	if (ret) {
1194 		PMD_DRV_LOG(INFO,
1195 			    "Update VSI failed to %s qinq stripping",
1196 			    on ? "enable" : "disable");
1197 		return -EINVAL;
1198 	}
1199 
1200 	vsi->info.valid_sections |=
1201 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1202 
1203 	return ret;
1204 }
1205 
1206 static int
1207 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
1208 {
1209 	int ret;
1210 
1211 	ret = ice_vsi_config_qinq_stripping(vsi, on);
1212 	if (ret)
1213 		PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
1214 
1215 	ret = ice_vsi_config_qinq_insertion(vsi, on);
1216 	if (ret)
1217 		PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
1218 
1219 	return ret;
1220 }
1221 
1222 /* Enable IRQ0 */
1223 static void
1224 ice_pf_enable_irq0(struct ice_hw *hw)
1225 {
1226 	/* reset the registers */
1227 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1228 	ICE_READ_REG(hw, PFINT_OICR);
1229 
1230 #ifdef ICE_LSE_SPT
1231 	ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1232 		      (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1233 				 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1234 
1235 	ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1236 		      (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1237 		      ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1238 		       PFINT_OICR_CTL_ITR_INDX_M) |
1239 		      PFINT_OICR_CTL_CAUSE_ENA_M);
1240 
1241 	ICE_WRITE_REG(hw, PFINT_FW_CTL,
1242 		      (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1243 		      ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1244 		       PFINT_FW_CTL_ITR_INDX_M) |
1245 		      PFINT_FW_CTL_CAUSE_ENA_M);
1246 #else
1247 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1248 #endif
1249 
1250 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1251 		      GLINT_DYN_CTL_INTENA_M |
1252 		      GLINT_DYN_CTL_CLEARPBA_M |
1253 		      GLINT_DYN_CTL_ITR_INDX_M);
1254 
1255 	ice_flush(hw);
1256 }
1257 
1258 /* Disable IRQ0 */
1259 static void
1260 ice_pf_disable_irq0(struct ice_hw *hw)
1261 {
1262 	/* Disable all interrupt types */
1263 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1264 	ice_flush(hw);
1265 }
1266 
1267 #ifdef ICE_LSE_SPT
1268 static void
1269 ice_handle_aq_msg(struct rte_eth_dev *dev)
1270 {
1271 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272 	struct ice_ctl_q_info *cq = &hw->adminq;
1273 	struct ice_rq_event_info event;
1274 	uint16_t pending, opcode;
1275 	int ret;
1276 
1277 	event.buf_len = ICE_AQ_MAX_BUF_LEN;
1278 	event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1279 	if (!event.msg_buf) {
1280 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
1281 		return;
1282 	}
1283 
1284 	pending = 1;
1285 	while (pending) {
1286 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1287 
1288 		if (ret != ICE_SUCCESS) {
1289 			PMD_DRV_LOG(INFO,
1290 				    "Failed to read msg from AdminQ, "
1291 				    "adminq_err: %u",
1292 				    hw->adminq.sq_last_status);
1293 			break;
1294 		}
1295 		opcode = rte_le_to_cpu_16(event.desc.opcode);
1296 
1297 		switch (opcode) {
1298 		case ice_aqc_opc_get_link_status:
1299 			ret = ice_link_update(dev, 0);
1300 			if (!ret)
1301 				_rte_eth_dev_callback_process
1302 					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1303 			break;
1304 		default:
1305 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1306 				    opcode);
1307 			break;
1308 		}
1309 	}
1310 	rte_free(event.msg_buf);
1311 }
1312 #endif
1313 
1314 /**
1315  * Interrupt handler triggered by NIC for handling
1316  * specific interrupt.
1317  *
1318  * @param handle
1319  *  Pointer to interrupt handle.
1320  * @param param
1321  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1322  *
1323  * @return
1324  *  void
1325  */
1326 static void
1327 ice_interrupt_handler(void *param)
1328 {
1329 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1330 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1331 	uint32_t oicr;
1332 	uint32_t reg;
1333 	uint8_t pf_num;
1334 	uint8_t event;
1335 	uint16_t queue;
1336 	int ret;
1337 #ifdef ICE_LSE_SPT
1338 	uint32_t int_fw_ctl;
1339 #endif
1340 
1341 	/* Disable interrupt */
1342 	ice_pf_disable_irq0(hw);
1343 
1344 	/* read out interrupt causes */
1345 	oicr = ICE_READ_REG(hw, PFINT_OICR);
1346 #ifdef ICE_LSE_SPT
1347 	int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1348 #endif
1349 
1350 	/* No interrupt event indicated */
1351 	if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1352 		PMD_DRV_LOG(INFO, "No interrupt event");
1353 		goto done;
1354 	}
1355 
1356 #ifdef ICE_LSE_SPT
1357 	if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1358 		PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1359 		ice_handle_aq_msg(dev);
1360 	}
1361 #else
1362 	if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1363 		PMD_DRV_LOG(INFO, "OICR: link state change event");
1364 		ret = ice_link_update(dev, 0);
1365 		if (!ret)
1366 			_rte_eth_dev_callback_process
1367 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1368 	}
1369 #endif
1370 
1371 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
1372 		PMD_DRV_LOG(WARNING, "OICR: MDD event");
1373 		reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1374 		if (reg & GL_MDET_TX_PQM_VALID_M) {
1375 			pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1376 				 GL_MDET_TX_PQM_PF_NUM_S;
1377 			event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1378 				GL_MDET_TX_PQM_MAL_TYPE_S;
1379 			queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1380 				GL_MDET_TX_PQM_QNUM_S;
1381 
1382 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1383 				    "%d by PQM on TX queue %d PF# %d",
1384 				    event, queue, pf_num);
1385 		}
1386 
1387 		reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1388 		if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1389 			pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1390 				 GL_MDET_TX_TCLAN_PF_NUM_S;
1391 			event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1392 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1393 			queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1394 				GL_MDET_TX_TCLAN_QNUM_S;
1395 
1396 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1397 				    "%d by TCLAN on TX queue %d PF# %d",
1398 				    event, queue, pf_num);
1399 		}
1400 	}
1401 done:
1402 	/* Enable interrupt */
1403 	ice_pf_enable_irq0(hw);
1404 	rte_intr_ack(dev->intr_handle);
1405 }
1406 
1407 static void
1408 ice_init_proto_xtr(struct rte_eth_dev *dev)
1409 {
1410 	struct ice_adapter *ad =
1411 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1412 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1413 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1414 	const struct proto_xtr_ol_flag *ol_flag;
1415 	bool proto_xtr_enable = false;
1416 	int offset;
1417 	uint16_t i;
1418 
1419 	if (!ice_proto_xtr_support(hw)) {
1420 		PMD_DRV_LOG(NOTICE, "Protocol extraction is not supported");
1421 		return;
1422 	}
1423 
1424 	pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1425 	if (unlikely(pf->proto_xtr == NULL)) {
1426 		PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1427 		return;
1428 	}
1429 
1430 	for (i = 0; i < pf->lan_nb_qps; i++) {
1431 		pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1432 				   ad->devargs.proto_xtr[i] :
1433 				   ad->devargs.proto_xtr_dflt;
1434 
1435 		if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1436 			uint8_t type = pf->proto_xtr[i];
1437 
1438 			ice_proto_xtr_ol_flag_params[type].required = true;
1439 			proto_xtr_enable = true;
1440 		}
1441 	}
1442 
1443 	if (likely(!proto_xtr_enable))
1444 		return;
1445 
1446 	offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1447 	if (unlikely(offset == -1)) {
1448 		PMD_DRV_LOG(ERR,
1449 			    "Protocol extraction metadata is disabled in mbuf with error %d",
1450 			    -rte_errno);
1451 		return;
1452 	}
1453 
1454 	PMD_DRV_LOG(DEBUG,
1455 		    "Protocol extraction metadata offset in mbuf is : %d",
1456 		    offset);
1457 	rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1458 
1459 	for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1460 		ol_flag = &ice_proto_xtr_ol_flag_params[i];
1461 
1462 		if (!ol_flag->required)
1463 			continue;
1464 
1465 		offset = rte_mbuf_dynflag_register(&ol_flag->param);
1466 		if (unlikely(offset == -1)) {
1467 			PMD_DRV_LOG(ERR,
1468 				    "Protocol extraction offload '%s' failed to register with error %d",
1469 				    ol_flag->param.name, -rte_errno);
1470 
1471 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1472 			break;
1473 		}
1474 
1475 		PMD_DRV_LOG(DEBUG,
1476 			    "Protocol extraction offload '%s' offset in mbuf is : %d",
1477 			    ol_flag->param.name, offset);
1478 		*ol_flag->ol_flag = 1ULL << offset;
1479 	}
1480 }
1481 
1482 /*  Initialize SW parameters of PF */
1483 static int
1484 ice_pf_sw_init(struct rte_eth_dev *dev)
1485 {
1486 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1487 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1488 
1489 	pf->lan_nb_qp_max =
1490 		(uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1491 				  hw->func_caps.common_cap.num_rxq);
1492 
1493 	pf->lan_nb_qps = pf->lan_nb_qp_max;
1494 
1495 	ice_init_proto_xtr(dev);
1496 
1497 	if (hw->func_caps.fd_fltr_guar > 0 ||
1498 	    hw->func_caps.fd_fltr_best_effort > 0) {
1499 		pf->flags |= ICE_FLAG_FDIR;
1500 		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1501 		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1502 	} else {
1503 		pf->fdir_nb_qps = 0;
1504 	}
1505 	pf->fdir_qp_offset = 0;
1506 
1507 	return 0;
1508 }
1509 
1510 struct ice_vsi *
1511 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1512 {
1513 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1514 	struct ice_vsi *vsi = NULL;
1515 	struct ice_vsi_ctx vsi_ctx;
1516 	int ret;
1517 	struct rte_ether_addr broadcast = {
1518 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1519 	struct rte_ether_addr mac_addr;
1520 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1521 	uint8_t tc_bitmap = 0x1;
1522 	uint16_t cfg;
1523 
1524 	/* hw->num_lports = 1 in NIC mode */
1525 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1526 	if (!vsi)
1527 		return NULL;
1528 
1529 	vsi->idx = pf->next_vsi_idx;
1530 	pf->next_vsi_idx++;
1531 	vsi->type = type;
1532 	vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1533 	vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1534 	vsi->vlan_anti_spoof_on = 0;
1535 	vsi->vlan_filter_on = 1;
1536 	TAILQ_INIT(&vsi->mac_list);
1537 	TAILQ_INIT(&vsi->vlan_list);
1538 
1539 	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1540 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1541 			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1542 			hw->func_caps.common_cap.rss_table_size;
1543 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1544 
1545 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1546 	switch (type) {
1547 	case ICE_VSI_PF:
1548 		vsi->nb_qps = pf->lan_nb_qps;
1549 		vsi->base_queue = 1;
1550 		ice_vsi_config_default_rss(&vsi_ctx.info);
1551 		vsi_ctx.alloc_from_pool = true;
1552 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1553 		/* switch_id is queried by get_switch_config aq, which is done
1554 		 * by ice_init_hw
1555 		 */
1556 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1557 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1558 		/* Allow all untagged or tagged packets */
1559 		vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1560 		vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1561 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1562 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1563 
1564 		/* FDIR */
1565 		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1566 			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1567 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1568 		cfg = ICE_AQ_VSI_FD_ENABLE;
1569 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1570 		vsi_ctx.info.max_fd_fltr_dedicated =
1571 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1572 		vsi_ctx.info.max_fd_fltr_shared =
1573 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1574 
1575 		/* Enable VLAN/UP trip */
1576 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1577 						      &vsi_ctx.info,
1578 						      ICE_DEFAULT_TCMAP);
1579 		if (ret) {
1580 			PMD_INIT_LOG(ERR,
1581 				     "tc queue mapping with vsi failed, "
1582 				     "err = %d",
1583 				     ret);
1584 			goto fail_mem;
1585 		}
1586 
1587 		break;
1588 	case ICE_VSI_CTRL:
1589 		vsi->nb_qps = pf->fdir_nb_qps;
1590 		vsi->base_queue = ICE_FDIR_QUEUE_ID;
1591 		vsi_ctx.alloc_from_pool = true;
1592 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1593 
1594 		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1595 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1596 		cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1597 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1598 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1599 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1600 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1601 						      &vsi_ctx.info,
1602 						      ICE_DEFAULT_TCMAP);
1603 		if (ret) {
1604 			PMD_INIT_LOG(ERR,
1605 				     "tc queue mapping with vsi failed, "
1606 				     "err = %d",
1607 				     ret);
1608 			goto fail_mem;
1609 		}
1610 		break;
1611 	default:
1612 		/* for other types of VSI */
1613 		PMD_INIT_LOG(ERR, "other types of VSI not supported");
1614 		goto fail_mem;
1615 	}
1616 
1617 	/* VF has MSIX interrupt in VF range, don't allocate here */
1618 	if (type == ICE_VSI_PF) {
1619 		ret = ice_res_pool_alloc(&pf->msix_pool,
1620 					 RTE_MIN(vsi->nb_qps,
1621 						 RTE_MAX_RXTX_INTR_VEC_ID));
1622 		if (ret < 0) {
1623 			PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1624 				     vsi->vsi_id, ret);
1625 		}
1626 		vsi->msix_intr = ret;
1627 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1628 	} else if (type == ICE_VSI_CTRL) {
1629 		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1630 		if (ret < 0) {
1631 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1632 				    vsi->vsi_id, ret);
1633 		}
1634 		vsi->msix_intr = ret;
1635 		vsi->nb_msix = 1;
1636 	} else {
1637 		vsi->msix_intr = 0;
1638 		vsi->nb_msix = 0;
1639 	}
1640 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1641 	if (ret != ICE_SUCCESS) {
1642 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1643 		goto fail_mem;
1644 	}
1645 	/* store vsi information is SW structure */
1646 	vsi->vsi_id = vsi_ctx.vsi_num;
1647 	vsi->info = vsi_ctx.info;
1648 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
1649 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1650 
1651 	if (type == ICE_VSI_PF) {
1652 		/* MAC configuration */
1653 		rte_ether_addr_copy((struct rte_ether_addr *)
1654 					hw->port_info->mac.perm_addr,
1655 				    &pf->dev_addr);
1656 
1657 		rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1658 		ret = ice_add_mac_filter(vsi, &mac_addr);
1659 		if (ret != ICE_SUCCESS)
1660 			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1661 
1662 		rte_ether_addr_copy(&broadcast, &mac_addr);
1663 		ret = ice_add_mac_filter(vsi, &mac_addr);
1664 		if (ret != ICE_SUCCESS)
1665 			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1666 	}
1667 
1668 	/* At the beginning, only TC0. */
1669 	/* What we need here is the maximam number of the TX queues.
1670 	 * Currently vsi->nb_qps means it.
1671 	 * Correct it if any change.
1672 	 */
1673 	max_txqs[0] = vsi->nb_qps;
1674 	ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1675 			      tc_bitmap, max_txqs);
1676 	if (ret != ICE_SUCCESS)
1677 		PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1678 
1679 	return vsi;
1680 fail_mem:
1681 	rte_free(vsi);
1682 	pf->next_vsi_idx--;
1683 	return NULL;
1684 }
1685 
1686 static int
1687 ice_send_driver_ver(struct ice_hw *hw)
1688 {
1689 	struct ice_driver_ver dv;
1690 
1691 	/* we don't have driver version use 0 for dummy */
1692 	dv.major_ver = 0;
1693 	dv.minor_ver = 0;
1694 	dv.build_ver = 0;
1695 	dv.subbuild_ver = 0;
1696 	strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1697 
1698 	return ice_aq_send_driver_ver(hw, &dv, NULL);
1699 }
1700 
1701 static int
1702 ice_pf_setup(struct ice_pf *pf)
1703 {
1704 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1705 	struct ice_vsi *vsi;
1706 	uint16_t unused;
1707 
1708 	/* Clear all stats counters */
1709 	pf->offset_loaded = false;
1710 	memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1711 	memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1712 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1713 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1714 
1715 	/* force guaranteed filter pool for PF */
1716 	ice_alloc_fd_guar_item(hw, &unused,
1717 			       hw->func_caps.fd_fltr_guar);
1718 	/* force shared filter pool for PF */
1719 	ice_alloc_fd_shrd_item(hw, &unused,
1720 			       hw->func_caps.fd_fltr_best_effort);
1721 
1722 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1723 	if (!vsi) {
1724 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1725 		return -EINVAL;
1726 	}
1727 
1728 	pf->main_vsi = vsi;
1729 
1730 	return 0;
1731 }
1732 
1733 /* PCIe configuration space setting */
1734 #define PCI_CFG_SPACE_SIZE          256
1735 #define PCI_CFG_SPACE_EXP_SIZE      4096
1736 #define PCI_EXT_CAP_ID(header)      (int)((header) & 0x0000ffff)
1737 #define PCI_EXT_CAP_NEXT(header)    (((header) >> 20) & 0xffc)
1738 #define PCI_EXT_CAP_ID_DSN          0x03
1739 
1740 static int
1741 ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
1742 {
1743 	uint32_t header;
1744 	int ttl;
1745 	int pos = PCI_CFG_SPACE_SIZE;
1746 
1747 	/* minimum 8 bytes per capability */
1748 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1749 
1750 	if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1751 		PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1752 		return -1;
1753 	}
1754 
1755 	/*
1756 	 * If we have no capabilities, this is indicated by cap ID,
1757 	 * cap version and next pointer all being 0.
1758 	 */
1759 	if (header == 0)
1760 		return 0;
1761 
1762 	while (ttl-- > 0) {
1763 		if (PCI_EXT_CAP_ID(header) == cap)
1764 			return pos;
1765 
1766 		pos = PCI_EXT_CAP_NEXT(header);
1767 
1768 		if (pos < PCI_CFG_SPACE_SIZE)
1769 			break;
1770 
1771 		if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1772 			PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1773 			return -1;
1774 		}
1775 	}
1776 
1777 	return 0;
1778 }
1779 
1780 /*
1781  * Extract device serial number from PCIe Configuration Space and
1782  * determine the pkg file path according to the DSN.
1783  */
1784 static int
1785 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1786 {
1787 	int pos;
1788 	char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1789 	uint32_t dsn_low, dsn_high;
1790 	memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1791 
1792 	pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
1793 
1794 	if (pos) {
1795 		rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
1796 		rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
1797 		snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1798 			 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1799 	} else {
1800 		PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1801 		goto fail_dsn;
1802 	}
1803 
1804 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1805 		ICE_MAX_PKG_FILENAME_SIZE);
1806 	if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1807 		return 0;
1808 
1809 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1810 		ICE_MAX_PKG_FILENAME_SIZE);
1811 	if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1812 		return 0;
1813 
1814 fail_dsn:
1815 	strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1816 	if (!access(pkg_file, 0))
1817 		return 0;
1818 	strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1819 	return 0;
1820 }
1821 
1822 enum ice_pkg_type
1823 ice_load_pkg_type(struct ice_hw *hw)
1824 {
1825 	enum ice_pkg_type package_type;
1826 
1827 	/* store the activated package type (OS default or Comms) */
1828 	if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1829 		ICE_PKG_NAME_SIZE))
1830 		package_type = ICE_PKG_TYPE_OS_DEFAULT;
1831 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1832 		ICE_PKG_NAME_SIZE))
1833 		package_type = ICE_PKG_TYPE_COMMS;
1834 	else
1835 		package_type = ICE_PKG_TYPE_UNKNOWN;
1836 
1837 	PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
1838 		hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1839 		hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1840 		hw->active_pkg_name);
1841 
1842 	return package_type;
1843 }
1844 
1845 static int ice_load_pkg(struct rte_eth_dev *dev)
1846 {
1847 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1848 	char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1849 	int err;
1850 	uint8_t *buf;
1851 	int buf_len;
1852 	FILE *file;
1853 	struct stat fstat;
1854 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1855 	struct ice_adapter *ad =
1856 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1857 
1858 	ice_pkg_file_search_path(pci_dev, pkg_file);
1859 
1860 	file = fopen(pkg_file, "rb");
1861 	if (!file)  {
1862 		PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1863 		return -1;
1864 	}
1865 
1866 	err = stat(pkg_file, &fstat);
1867 	if (err) {
1868 		PMD_INIT_LOG(ERR, "failed to get file stats\n");
1869 		fclose(file);
1870 		return err;
1871 	}
1872 
1873 	buf_len = fstat.st_size;
1874 	buf = rte_malloc(NULL, buf_len, 0);
1875 
1876 	if (!buf) {
1877 		PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1878 				buf_len);
1879 		fclose(file);
1880 		return -1;
1881 	}
1882 
1883 	err = fread(buf, buf_len, 1, file);
1884 	if (err != 1) {
1885 		PMD_INIT_LOG(ERR, "failed to read package data\n");
1886 		fclose(file);
1887 		err = -1;
1888 		goto fail_exit;
1889 	}
1890 
1891 	fclose(file);
1892 
1893 	err = ice_copy_and_init_pkg(hw, buf, buf_len);
1894 	if (err) {
1895 		PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1896 		goto fail_exit;
1897 	}
1898 
1899 	/* store the loaded pkg type info */
1900 	ad->active_pkg_type = ice_load_pkg_type(hw);
1901 
1902 	err = ice_init_hw_tbls(hw);
1903 	if (err) {
1904 		PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1905 		goto fail_init_tbls;
1906 	}
1907 
1908 	return 0;
1909 
1910 fail_init_tbls:
1911 	rte_free(hw->pkg_copy);
1912 fail_exit:
1913 	rte_free(buf);
1914 	return err;
1915 }
1916 
1917 static void
1918 ice_base_queue_get(struct ice_pf *pf)
1919 {
1920 	uint32_t reg;
1921 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1922 
1923 	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1924 	if (reg & PFLAN_RX_QALLOC_VALID_M) {
1925 		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1926 	} else {
1927 		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1928 					" index");
1929 	}
1930 }
1931 
1932 static int
1933 parse_bool(const char *key, const char *value, void *args)
1934 {
1935 	int *i = (int *)args;
1936 	char *end;
1937 	int num;
1938 
1939 	num = strtoul(value, &end, 10);
1940 
1941 	if (num != 0 && num != 1) {
1942 		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1943 			"value must be 0 or 1",
1944 			value, key);
1945 		return -1;
1946 	}
1947 
1948 	*i = num;
1949 	return 0;
1950 }
1951 
1952 static int ice_parse_devargs(struct rte_eth_dev *dev)
1953 {
1954 	struct ice_adapter *ad =
1955 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1956 	struct rte_devargs *devargs = dev->device->devargs;
1957 	struct rte_kvargs *kvlist;
1958 	int ret;
1959 
1960 	if (devargs == NULL)
1961 		return 0;
1962 
1963 	kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1964 	if (kvlist == NULL) {
1965 		PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1966 		return -EINVAL;
1967 	}
1968 
1969 	ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1970 	memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1971 	       sizeof(ad->devargs.proto_xtr));
1972 
1973 	ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1974 				 &handle_proto_xtr_arg, &ad->devargs);
1975 	if (ret)
1976 		goto bail;
1977 
1978 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1979 				 &parse_bool, &ad->devargs.safe_mode_support);
1980 	if (ret)
1981 		goto bail;
1982 
1983 	ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1984 				 &parse_bool, &ad->devargs.pipe_mode_support);
1985 	if (ret)
1986 		goto bail;
1987 
1988 	ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
1989 				 &parse_bool, &ad->devargs.flow_mark_support);
1990 	if (ret)
1991 		goto bail;
1992 
1993 bail:
1994 	rte_kvargs_free(kvlist);
1995 	return ret;
1996 }
1997 
1998 /* Forward LLDP packets to default VSI by set switch rules */
1999 static int
2000 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
2001 {
2002 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2003 	struct ice_fltr_list_entry *s_list_itr = NULL;
2004 	struct LIST_HEAD_TYPE list_head;
2005 	int ret = 0;
2006 
2007 	INIT_LIST_HEAD(&list_head);
2008 
2009 	s_list_itr = (struct ice_fltr_list_entry *)
2010 			ice_malloc(hw, sizeof(*s_list_itr));
2011 	if (!s_list_itr)
2012 		return -ENOMEM;
2013 	s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2014 	s_list_itr->fltr_info.vsi_handle = vsi->idx;
2015 	s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2016 			RTE_ETHER_TYPE_LLDP;
2017 	s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2018 	s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2019 	s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2020 	LIST_ADD(&s_list_itr->list_entry, &list_head);
2021 	if (on)
2022 		ret = ice_add_eth_mac(hw, &list_head);
2023 	else
2024 		ret = ice_remove_eth_mac(hw, &list_head);
2025 
2026 	rte_free(s_list_itr);
2027 	return ret;
2028 }
2029 
2030 static enum ice_status
2031 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2032 		uint16_t num, uint16_t desc_id,
2033 		uint16_t *prof_buf, uint16_t *num_prof)
2034 {
2035 	struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
2036 	int ret;
2037 	uint16_t buf_len;
2038 	bool res_shared = 1;
2039 	struct ice_aq_desc aq_desc;
2040 	struct ice_sq_cd *cd = NULL;
2041 	struct ice_aqc_get_allocd_res_desc *cmd =
2042 			&aq_desc.params.get_res_desc;
2043 
2044 	buf_len = sizeof(resp_buf->elem) * num;
2045 	resp_buf = ice_malloc(hw, buf_len);
2046 	if (!resp_buf)
2047 		return -ENOMEM;
2048 
2049 	ice_fill_dflt_direct_cmd_desc(&aq_desc,
2050 			ice_aqc_opc_get_allocd_res_desc);
2051 
2052 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2053 				ICE_AQC_RES_TYPE_M) | (res_shared ?
2054 				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2055 	cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2056 
2057 	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2058 	if (!ret)
2059 		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2060 	else
2061 		goto exit;
2062 
2063 	ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
2064 			(*num_prof), ICE_NONDMA_TO_NONDMA);
2065 
2066 exit:
2067 	rte_free(resp_buf);
2068 	return ret;
2069 }
2070 static int
2071 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2072 {
2073 	int ret;
2074 	uint16_t prof_id;
2075 	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2076 	uint16_t first_desc = 1;
2077 	uint16_t num_prof = 0;
2078 
2079 	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2080 			first_desc, prof_buf, &num_prof);
2081 	if (ret) {
2082 		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2083 		return ret;
2084 	}
2085 
2086 	for (prof_id = 0; prof_id < num_prof; prof_id++) {
2087 		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2088 		if (ret) {
2089 			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2090 			return ret;
2091 		}
2092 	}
2093 	return 0;
2094 }
2095 
2096 static int
2097 ice_reset_fxp_resource(struct ice_hw *hw)
2098 {
2099 	int ret;
2100 
2101 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2102 	if (ret) {
2103 		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2104 		return ret;
2105 	}
2106 
2107 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2108 	if (ret) {
2109 		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2110 		return ret;
2111 	}
2112 
2113 	return 0;
2114 }
2115 
2116 static int
2117 ice_dev_init(struct rte_eth_dev *dev)
2118 {
2119 	struct rte_pci_device *pci_dev;
2120 	struct rte_intr_handle *intr_handle;
2121 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2122 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2123 	struct ice_adapter *ad =
2124 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2125 	struct ice_vsi *vsi;
2126 	int ret;
2127 
2128 	dev->dev_ops = &ice_eth_dev_ops;
2129 	dev->rx_pkt_burst = ice_recv_pkts;
2130 	dev->tx_pkt_burst = ice_xmit_pkts;
2131 	dev->tx_pkt_prepare = ice_prep_pkts;
2132 
2133 	/* for secondary processes, we don't initialise any further as primary
2134 	 * has already done this work.
2135 	 */
2136 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2137 		ice_set_rx_function(dev);
2138 		ice_set_tx_function(dev);
2139 		return 0;
2140 	}
2141 
2142 	ice_set_default_ptype_table(dev);
2143 	pci_dev = RTE_DEV_TO_PCI(dev->device);
2144 	intr_handle = &pci_dev->intr_handle;
2145 
2146 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2147 	pf->adapter->eth_dev = dev;
2148 	pf->dev_data = dev->data;
2149 	hw->back = pf->adapter;
2150 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2151 	hw->vendor_id = pci_dev->id.vendor_id;
2152 	hw->device_id = pci_dev->id.device_id;
2153 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2154 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2155 	hw->bus.device = pci_dev->addr.devid;
2156 	hw->bus.func = pci_dev->addr.function;
2157 
2158 	ret = ice_parse_devargs(dev);
2159 	if (ret) {
2160 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
2161 		return -EINVAL;
2162 	}
2163 
2164 	ice_init_controlq_parameter(hw);
2165 
2166 	ret = ice_init_hw(hw);
2167 	if (ret) {
2168 		PMD_INIT_LOG(ERR, "Failed to initialize HW");
2169 		return -EINVAL;
2170 	}
2171 
2172 	ret = ice_load_pkg(dev);
2173 	if (ret) {
2174 		if (ad->devargs.safe_mode_support == 0) {
2175 			PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2176 					"Use safe-mode-support=1 to enter Safe Mode");
2177 			return ret;
2178 		}
2179 
2180 		PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2181 					"Entering Safe Mode");
2182 		ad->is_safe_mode = 1;
2183 	}
2184 
2185 	PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2186 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2187 		     hw->api_maj_ver, hw->api_min_ver);
2188 
2189 	ice_pf_sw_init(dev);
2190 	ret = ice_init_mac_address(dev);
2191 	if (ret) {
2192 		PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2193 		goto err_init_mac;
2194 	}
2195 
2196 	/* Pass the information to the rte_eth_dev_close() that it should also
2197 	 * release the private port resources.
2198 	 */
2199 	dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2200 
2201 	ret = ice_res_pool_init(&pf->msix_pool, 1,
2202 				hw->func_caps.common_cap.num_msix_vectors - 1);
2203 	if (ret) {
2204 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2205 		goto err_msix_pool_init;
2206 	}
2207 
2208 	ret = ice_pf_setup(pf);
2209 	if (ret) {
2210 		PMD_INIT_LOG(ERR, "Failed to setup PF");
2211 		goto err_pf_setup;
2212 	}
2213 
2214 	ret = ice_send_driver_ver(hw);
2215 	if (ret) {
2216 		PMD_INIT_LOG(ERR, "Failed to send driver version");
2217 		goto err_pf_setup;
2218 	}
2219 
2220 	vsi = pf->main_vsi;
2221 
2222 	/* Disable double vlan by default */
2223 	ice_vsi_config_double_vlan(vsi, false);
2224 
2225 	ret = ice_aq_stop_lldp(hw, true, false, NULL);
2226 	if (ret != ICE_SUCCESS)
2227 		PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2228 	ret = ice_init_dcb(hw, true);
2229 	if (ret != ICE_SUCCESS)
2230 		PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2231 	/* Forward LLDP packets to default VSI */
2232 	ret = ice_vsi_config_sw_lldp(vsi, true);
2233 	if (ret != ICE_SUCCESS)
2234 		PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2235 	/* register callback func to eal lib */
2236 	rte_intr_callback_register(intr_handle,
2237 				   ice_interrupt_handler, dev);
2238 
2239 	ice_pf_enable_irq0(hw);
2240 
2241 	/* enable uio intr after callback register */
2242 	rte_intr_enable(intr_handle);
2243 
2244 	/* get base queue pairs index  in the device */
2245 	ice_base_queue_get(pf);
2246 
2247 	if (!ad->is_safe_mode) {
2248 		ret = ice_flow_init(ad);
2249 		if (ret) {
2250 			PMD_INIT_LOG(ERR, "Failed to initialize flow");
2251 			return ret;
2252 		}
2253 	}
2254 
2255 	ret = ice_reset_fxp_resource(hw);
2256 	if (ret) {
2257 		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2258 		return ret;
2259 	}
2260 
2261 	return 0;
2262 
2263 err_pf_setup:
2264 	ice_res_pool_destroy(&pf->msix_pool);
2265 err_msix_pool_init:
2266 	rte_free(dev->data->mac_addrs);
2267 	dev->data->mac_addrs = NULL;
2268 err_init_mac:
2269 	ice_sched_cleanup_all(hw);
2270 	rte_free(hw->port_info);
2271 	ice_shutdown_all_ctrlq(hw);
2272 	rte_free(pf->proto_xtr);
2273 
2274 	return ret;
2275 }
2276 
2277 int
2278 ice_release_vsi(struct ice_vsi *vsi)
2279 {
2280 	struct ice_hw *hw;
2281 	struct ice_vsi_ctx vsi_ctx;
2282 	enum ice_status ret;
2283 
2284 	if (!vsi)
2285 		return 0;
2286 
2287 	hw = ICE_VSI_TO_HW(vsi);
2288 
2289 	ice_remove_all_mac_vlan_filters(vsi);
2290 
2291 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2292 
2293 	vsi_ctx.vsi_num = vsi->vsi_id;
2294 	vsi_ctx.info = vsi->info;
2295 	ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2296 	if (ret != ICE_SUCCESS) {
2297 		PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2298 		rte_free(vsi);
2299 		return -1;
2300 	}
2301 
2302 	rte_free(vsi);
2303 	return 0;
2304 }
2305 
2306 void
2307 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2308 {
2309 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2310 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2311 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2312 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2313 	uint16_t msix_intr, i;
2314 
2315 	/* disable interrupt and also clear all the exist config */
2316 	for (i = 0; i < vsi->nb_qps; i++) {
2317 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2318 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2319 		rte_wmb();
2320 	}
2321 
2322 	if (rte_intr_allow_others(intr_handle))
2323 		/* vfio-pci */
2324 		for (i = 0; i < vsi->nb_msix; i++) {
2325 			msix_intr = vsi->msix_intr + i;
2326 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2327 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2328 		}
2329 	else
2330 		/* igb_uio */
2331 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2332 }
2333 
2334 static void
2335 ice_dev_stop(struct rte_eth_dev *dev)
2336 {
2337 	struct rte_eth_dev_data *data = dev->data;
2338 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2339 	struct ice_vsi *main_vsi = pf->main_vsi;
2340 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2341 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2342 	uint16_t i;
2343 
2344 	/* avoid stopping again */
2345 	if (pf->adapter_stopped)
2346 		return;
2347 
2348 	/* stop and clear all Rx queues */
2349 	for (i = 0; i < data->nb_rx_queues; i++)
2350 		ice_rx_queue_stop(dev, i);
2351 
2352 	/* stop and clear all Tx queues */
2353 	for (i = 0; i < data->nb_tx_queues; i++)
2354 		ice_tx_queue_stop(dev, i);
2355 
2356 	/* disable all queue interrupts */
2357 	ice_vsi_disable_queues_intr(main_vsi);
2358 
2359 	if (pf->init_link_up)
2360 		ice_dev_set_link_up(dev);
2361 	else
2362 		ice_dev_set_link_down(dev);
2363 
2364 	/* Clean datapath event and queue/vec mapping */
2365 	rte_intr_efd_disable(intr_handle);
2366 	if (intr_handle->intr_vec) {
2367 		rte_free(intr_handle->intr_vec);
2368 		intr_handle->intr_vec = NULL;
2369 	}
2370 
2371 	pf->adapter_stopped = true;
2372 }
2373 
2374 static void
2375 ice_dev_close(struct rte_eth_dev *dev)
2376 {
2377 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2378 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2379 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2380 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2381 	struct ice_adapter *ad =
2382 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2383 
2384 	/* Since stop will make link down, then the link event will be
2385 	 * triggered, disable the irq firstly to avoid the port_infoe etc
2386 	 * resources deallocation causing the interrupt service thread
2387 	 * crash.
2388 	 */
2389 	ice_pf_disable_irq0(hw);
2390 
2391 	ice_dev_stop(dev);
2392 
2393 	if (!ad->is_safe_mode)
2394 		ice_flow_uninit(ad);
2395 
2396 	/* release all queue resource */
2397 	ice_free_queues(dev);
2398 
2399 	ice_res_pool_destroy(&pf->msix_pool);
2400 	ice_release_vsi(pf->main_vsi);
2401 	ice_sched_cleanup_all(hw);
2402 	ice_free_hw_tbls(hw);
2403 	rte_free(hw->port_info);
2404 	hw->port_info = NULL;
2405 	ice_shutdown_all_ctrlq(hw);
2406 	rte_free(pf->proto_xtr);
2407 	pf->proto_xtr = NULL;
2408 
2409 	dev->dev_ops = NULL;
2410 	dev->rx_pkt_burst = NULL;
2411 	dev->tx_pkt_burst = NULL;
2412 
2413 	rte_free(dev->data->mac_addrs);
2414 	dev->data->mac_addrs = NULL;
2415 
2416 	/* disable uio intr before callback unregister */
2417 	rte_intr_disable(intr_handle);
2418 
2419 	/* unregister callback func from eal lib */
2420 	rte_intr_callback_unregister(intr_handle,
2421 				     ice_interrupt_handler, dev);
2422 }
2423 
2424 static int
2425 ice_dev_uninit(struct rte_eth_dev *dev)
2426 {
2427 	ice_dev_close(dev);
2428 
2429 	return 0;
2430 }
2431 
2432 static void
2433 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2434 {
2435 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2436 	struct ice_vsi *vsi = pf->main_vsi;
2437 	int ret;
2438 
2439 	/* Configure RSS for IPv4 with src/dst addr as input set */
2440 	if (rss_hf & ETH_RSS_IPV4) {
2441 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2442 				      ICE_FLOW_SEG_HDR_IPV4 |
2443 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2444 		if (ret)
2445 			PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2446 				    __func__, ret);
2447 	}
2448 
2449 	/* Configure RSS for IPv6 with src/dst addr as input set */
2450 	if (rss_hf & ETH_RSS_IPV6) {
2451 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2452 				      ICE_FLOW_SEG_HDR_IPV6 |
2453 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2454 		if (ret)
2455 			PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2456 				    __func__, ret);
2457 	}
2458 
2459 	/* Configure RSS for udp4 with src/dst addr and port as input set */
2460 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2461 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
2462 				      ICE_FLOW_SEG_HDR_UDP |
2463 				      ICE_FLOW_SEG_HDR_IPV4 |
2464 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2465 		if (ret)
2466 			PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2467 				    __func__, ret);
2468 	}
2469 
2470 	/* Configure RSS for udp6 with src/dst addr and port as input set */
2471 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2472 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
2473 				      ICE_FLOW_SEG_HDR_UDP |
2474 				      ICE_FLOW_SEG_HDR_IPV6 |
2475 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2476 		if (ret)
2477 			PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2478 				    __func__, ret);
2479 	}
2480 
2481 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
2482 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2483 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
2484 				      ICE_FLOW_SEG_HDR_TCP |
2485 				      ICE_FLOW_SEG_HDR_IPV4 |
2486 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2487 		if (ret)
2488 			PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2489 				    __func__, ret);
2490 	}
2491 
2492 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
2493 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2494 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
2495 				      ICE_FLOW_SEG_HDR_TCP |
2496 				      ICE_FLOW_SEG_HDR_IPV6 |
2497 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2498 		if (ret)
2499 			PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2500 				    __func__, ret);
2501 	}
2502 
2503 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
2504 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2505 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2506 				      ICE_FLOW_SEG_HDR_SCTP |
2507 				      ICE_FLOW_SEG_HDR_IPV4 |
2508 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2509 		if (ret)
2510 			PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2511 				    __func__, ret);
2512 	}
2513 
2514 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
2515 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2516 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2517 				      ICE_FLOW_SEG_HDR_SCTP |
2518 				      ICE_FLOW_SEG_HDR_IPV6 |
2519 				      ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2520 		if (ret)
2521 			PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2522 				    __func__, ret);
2523 	}
2524 
2525 	if (rss_hf & ETH_RSS_IPV4) {
2526 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2527 				ICE_FLOW_SEG_HDR_GTPU_IP |
2528 				ICE_FLOW_SEG_HDR_IPV4 |
2529 				ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2530 		if (ret)
2531 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
2532 				    __func__, ret);
2533 
2534 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2535 				ICE_FLOW_SEG_HDR_GTPU_EH |
2536 				ICE_FLOW_SEG_HDR_IPV4 |
2537 				ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2538 		if (ret)
2539 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
2540 				    __func__, ret);
2541 
2542 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2543 				ICE_FLOW_SEG_HDR_PPPOE |
2544 				ICE_FLOW_SEG_HDR_IPV4 |
2545 				ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2546 		if (ret)
2547 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2548 				    __func__, ret);
2549 	}
2550 
2551 	if (rss_hf & ETH_RSS_IPV6) {
2552 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2553 				ICE_FLOW_SEG_HDR_GTPU_IP |
2554 				ICE_FLOW_SEG_HDR_IPV6 |
2555 				ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2556 		if (ret)
2557 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
2558 				    __func__, ret);
2559 
2560 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2561 				ICE_FLOW_SEG_HDR_GTPU_EH |
2562 				ICE_FLOW_SEG_HDR_IPV6 |
2563 				ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2564 		if (ret)
2565 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
2566 				    __func__, ret);
2567 
2568 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2569 				ICE_FLOW_SEG_HDR_PPPOE |
2570 				ICE_FLOW_SEG_HDR_IPV6 |
2571 				ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
2572 		if (ret)
2573 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2574 				    __func__, ret);
2575 	}
2576 
2577 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2578 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
2579 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2580 		if (ret)
2581 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
2582 				    __func__, ret);
2583 
2584 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
2585 				ICE_FLOW_SEG_HDR_GTPU_EH, 0);
2586 		if (ret)
2587 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
2588 				    __func__, ret);
2589 
2590 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
2591 				ICE_FLOW_SEG_HDR_PPPOE, 0);
2592 		if (ret)
2593 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2594 				    __func__, ret);
2595 	}
2596 
2597 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2598 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
2599 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2600 		if (ret)
2601 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
2602 				    __func__, ret);
2603 
2604 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
2605 				ICE_FLOW_SEG_HDR_GTPU_EH, 0);
2606 		if (ret)
2607 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
2608 				    __func__, ret);
2609 
2610 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
2611 				ICE_FLOW_SEG_HDR_PPPOE, 0);
2612 		if (ret)
2613 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2614 				    __func__, ret);
2615 	}
2616 
2617 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2618 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
2619 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2620 		if (ret)
2621 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
2622 				    __func__, ret);
2623 
2624 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
2625 				ICE_FLOW_SEG_HDR_GTPU_EH, 0);
2626 		if (ret)
2627 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
2628 				    __func__, ret);
2629 
2630 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
2631 				ICE_FLOW_SEG_HDR_PPPOE, 0);
2632 		if (ret)
2633 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
2634 				    __func__, ret);
2635 	}
2636 
2637 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2638 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
2639 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2640 		if (ret)
2641 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
2642 				    __func__, ret);
2643 
2644 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
2645 				ICE_FLOW_SEG_HDR_GTPU_EH, 0);
2646 		if (ret)
2647 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
2648 				    __func__, ret);
2649 
2650 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
2651 				ICE_FLOW_SEG_HDR_PPPOE, 0);
2652 		if (ret)
2653 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
2654 				    __func__, ret);
2655 	}
2656 
2657 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2658 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4,
2659 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2660 		if (ret)
2661 			PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d",
2662 				    __func__, ret);
2663 
2664 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4,
2665 				ICE_FLOW_SEG_HDR_GTPU_EH, 0);
2666 		if (ret)
2667 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d",
2668 				    __func__, ret);
2669 	}
2670 
2671 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2672 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6,
2673 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2674 		if (ret)
2675 			PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d",
2676 				    __func__, ret);
2677 
2678 		ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6,
2679 				ICE_FLOW_SEG_HDR_GTPU_EH, 0);
2680 		if (ret)
2681 			PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d",
2682 				    __func__, ret);
2683 	}
2684 }
2685 
2686 static int ice_init_rss(struct ice_pf *pf)
2687 {
2688 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2689 	struct ice_vsi *vsi = pf->main_vsi;
2690 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
2691 	struct rte_eth_rss_conf *rss_conf;
2692 	struct ice_aqc_get_set_rss_keys key;
2693 	uint16_t i, nb_q;
2694 	int ret = 0;
2695 	bool is_safe_mode = pf->adapter->is_safe_mode;
2696 	uint32_t reg;
2697 
2698 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
2699 	nb_q = dev->data->nb_rx_queues;
2700 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
2701 	vsi->rss_lut_size = pf->hash_lut_size;
2702 
2703 	if (is_safe_mode) {
2704 		PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
2705 		return 0;
2706 	}
2707 
2708 	if (!vsi->rss_key)
2709 		vsi->rss_key = rte_zmalloc(NULL,
2710 					   vsi->rss_key_size, 0);
2711 	if (!vsi->rss_lut)
2712 		vsi->rss_lut = rte_zmalloc(NULL,
2713 					   vsi->rss_lut_size, 0);
2714 
2715 	/* configure RSS key */
2716 	if (!rss_conf->rss_key) {
2717 		/* Calculate the default hash key */
2718 		for (i = 0; i <= vsi->rss_key_size; i++)
2719 			vsi->rss_key[i] = (uint8_t)rte_rand();
2720 	} else {
2721 		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
2722 			   RTE_MIN(rss_conf->rss_key_len,
2723 				   vsi->rss_key_size));
2724 	}
2725 	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
2726 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
2727 	if (ret)
2728 		return -EINVAL;
2729 
2730 	/* init RSS LUT table */
2731 	for (i = 0; i < vsi->rss_lut_size; i++)
2732 		vsi->rss_lut[i] = i % nb_q;
2733 
2734 	ret = ice_aq_set_rss_lut(hw, vsi->idx,
2735 				 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
2736 				 vsi->rss_lut, vsi->rss_lut_size);
2737 	if (ret)
2738 		return -EINVAL;
2739 
2740 	/* Enable registers for symmetric_toeplitz function. */
2741 	reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
2742 	reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
2743 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
2744 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
2745 
2746 	/* RSS hash configuration */
2747 	ice_rss_hash_set(pf, rss_conf->rss_hf);
2748 
2749 	return 0;
2750 }
2751 
2752 static int
2753 ice_dev_configure(struct rte_eth_dev *dev)
2754 {
2755 	struct ice_adapter *ad =
2756 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2757 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2758 	int ret;
2759 
2760 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
2761 	 * bulk allocation or vector Rx preconditions we will reset it.
2762 	 */
2763 	ad->rx_bulk_alloc_allowed = true;
2764 	ad->tx_simple_allowed = true;
2765 
2766 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
2767 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
2768 
2769 	ret = ice_init_rss(pf);
2770 	if (ret) {
2771 		PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
2772 		return ret;
2773 	}
2774 
2775 	return 0;
2776 }
2777 
2778 static void
2779 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
2780 		       int base_queue, int nb_queue)
2781 {
2782 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2783 	uint32_t val, val_tx;
2784 	int i;
2785 
2786 	for (i = 0; i < nb_queue; i++) {
2787 		/*do actual bind*/
2788 		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
2789 		      (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
2790 		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
2791 			 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
2792 
2793 		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
2794 			    base_queue + i, msix_vect);
2795 		/* set ITR0 value */
2796 		ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
2797 		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
2798 		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
2799 	}
2800 }
2801 
2802 void
2803 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
2804 {
2805 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2806 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2807 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2808 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2809 	uint16_t msix_vect = vsi->msix_intr;
2810 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2811 	uint16_t queue_idx = 0;
2812 	int record = 0;
2813 	int i;
2814 
2815 	/* clear Rx/Tx queue interrupt */
2816 	for (i = 0; i < vsi->nb_used_qps; i++) {
2817 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2818 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2819 	}
2820 
2821 	/* PF bind interrupt */
2822 	if (rte_intr_dp_is_en(intr_handle)) {
2823 		queue_idx = 0;
2824 		record = 1;
2825 	}
2826 
2827 	for (i = 0; i < vsi->nb_used_qps; i++) {
2828 		if (nb_msix <= 1) {
2829 			if (!rte_intr_allow_others(intr_handle))
2830 				msix_vect = ICE_MISC_VEC_ID;
2831 
2832 			/* uio mapping all queue to one msix_vect */
2833 			__vsi_queues_bind_intr(vsi, msix_vect,
2834 					       vsi->base_queue + i,
2835 					       vsi->nb_used_qps - i);
2836 
2837 			for (; !!record && i < vsi->nb_used_qps; i++)
2838 				intr_handle->intr_vec[queue_idx + i] =
2839 					msix_vect;
2840 			break;
2841 		}
2842 
2843 		/* vfio 1:1 queue/msix_vect mapping */
2844 		__vsi_queues_bind_intr(vsi, msix_vect,
2845 				       vsi->base_queue + i, 1);
2846 
2847 		if (!!record)
2848 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
2849 
2850 		msix_vect++;
2851 		nb_msix--;
2852 	}
2853 }
2854 
2855 void
2856 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
2857 {
2858 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2859 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2860 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2861 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2862 	uint16_t msix_intr, i;
2863 
2864 	if (rte_intr_allow_others(intr_handle))
2865 		for (i = 0; i < vsi->nb_used_qps; i++) {
2866 			msix_intr = vsi->msix_intr + i;
2867 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2868 				      GLINT_DYN_CTL_INTENA_M |
2869 				      GLINT_DYN_CTL_CLEARPBA_M |
2870 				      GLINT_DYN_CTL_ITR_INDX_M |
2871 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2872 		}
2873 	else
2874 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
2875 			      GLINT_DYN_CTL_INTENA_M |
2876 			      GLINT_DYN_CTL_CLEARPBA_M |
2877 			      GLINT_DYN_CTL_ITR_INDX_M |
2878 			      GLINT_DYN_CTL_WB_ON_ITR_M);
2879 }
2880 
2881 static int
2882 ice_rxq_intr_setup(struct rte_eth_dev *dev)
2883 {
2884 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2885 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2886 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2887 	struct ice_vsi *vsi = pf->main_vsi;
2888 	uint32_t intr_vector = 0;
2889 
2890 	rte_intr_disable(intr_handle);
2891 
2892 	/* check and configure queue intr-vector mapping */
2893 	if ((rte_intr_cap_multiple(intr_handle) ||
2894 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2895 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2896 		intr_vector = dev->data->nb_rx_queues;
2897 		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
2898 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
2899 				    ICE_MAX_INTR_QUEUE_NUM);
2900 			return -ENOTSUP;
2901 		}
2902 		if (rte_intr_efd_enable(intr_handle, intr_vector))
2903 			return -1;
2904 	}
2905 
2906 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2907 		intr_handle->intr_vec =
2908 		rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
2909 			    0);
2910 		if (!intr_handle->intr_vec) {
2911 			PMD_DRV_LOG(ERR,
2912 				    "Failed to allocate %d rx_queues intr_vec",
2913 				    dev->data->nb_rx_queues);
2914 			return -ENOMEM;
2915 		}
2916 	}
2917 
2918 	/* Map queues with MSIX interrupt */
2919 	vsi->nb_used_qps = dev->data->nb_rx_queues;
2920 	ice_vsi_queues_bind_intr(vsi);
2921 
2922 	/* Enable interrupts for all the queues */
2923 	ice_vsi_enable_queues_intr(vsi);
2924 
2925 	rte_intr_enable(intr_handle);
2926 
2927 	return 0;
2928 }
2929 
2930 static void
2931 ice_get_init_link_status(struct rte_eth_dev *dev)
2932 {
2933 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2934 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2935 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2936 	struct ice_link_status link_status;
2937 	int ret;
2938 
2939 	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
2940 				   &link_status, NULL);
2941 	if (ret != ICE_SUCCESS) {
2942 		PMD_DRV_LOG(ERR, "Failed to get link info");
2943 		pf->init_link_up = false;
2944 		return;
2945 	}
2946 
2947 	if (link_status.link_info & ICE_AQ_LINK_UP)
2948 		pf->init_link_up = true;
2949 }
2950 
2951 static int
2952 ice_dev_start(struct rte_eth_dev *dev)
2953 {
2954 	struct rte_eth_dev_data *data = dev->data;
2955 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2956 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2957 	struct ice_vsi *vsi = pf->main_vsi;
2958 	uint16_t nb_rxq = 0;
2959 	uint16_t nb_txq, i;
2960 	uint16_t max_frame_size;
2961 	int mask, ret;
2962 
2963 	/* program Tx queues' context in hardware */
2964 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
2965 		ret = ice_tx_queue_start(dev, nb_txq);
2966 		if (ret) {
2967 			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
2968 			goto tx_err;
2969 		}
2970 	}
2971 
2972 	/* program Rx queues' context in hardware*/
2973 	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
2974 		ret = ice_rx_queue_start(dev, nb_rxq);
2975 		if (ret) {
2976 			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
2977 			goto rx_err;
2978 		}
2979 	}
2980 
2981 	ice_set_rx_function(dev);
2982 	ice_set_tx_function(dev);
2983 
2984 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2985 			ETH_VLAN_EXTEND_MASK;
2986 	ret = ice_vlan_offload_set(dev, mask);
2987 	if (ret) {
2988 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2989 		goto rx_err;
2990 	}
2991 
2992 	/* enable Rx interrput and mapping Rx queue to interrupt vector */
2993 	if (ice_rxq_intr_setup(dev))
2994 		return -EIO;
2995 
2996 	/* Enable receiving broadcast packets and transmitting packets */
2997 	ret = ice_set_vsi_promisc(hw, vsi->idx,
2998 				  ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
2999 				  ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3000 				  0);
3001 	if (ret != ICE_SUCCESS)
3002 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3003 
3004 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3005 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3006 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3007 				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3008 				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3009 				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
3010 				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3011 				     NULL);
3012 	if (ret != ICE_SUCCESS)
3013 		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3014 
3015 	ice_get_init_link_status(dev);
3016 
3017 	ice_dev_set_link_up(dev);
3018 
3019 	/* Call get_link_info aq commond to enable/disable LSE */
3020 	ice_link_update(dev, 0);
3021 
3022 	pf->adapter_stopped = false;
3023 
3024 	/* Set the max frame size to default value*/
3025 	max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3026 		pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3027 		ICE_FRAME_SIZE_MAX;
3028 
3029 	/* Set the max frame size to HW*/
3030 	ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3031 
3032 	return 0;
3033 
3034 	/* stop the started queues if failed to start all queues */
3035 rx_err:
3036 	for (i = 0; i < nb_rxq; i++)
3037 		ice_rx_queue_stop(dev, i);
3038 tx_err:
3039 	for (i = 0; i < nb_txq; i++)
3040 		ice_tx_queue_stop(dev, i);
3041 
3042 	return -EIO;
3043 }
3044 
3045 static int
3046 ice_dev_reset(struct rte_eth_dev *dev)
3047 {
3048 	int ret;
3049 
3050 	if (dev->data->sriov.active)
3051 		return -ENOTSUP;
3052 
3053 	ret = ice_dev_uninit(dev);
3054 	if (ret) {
3055 		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3056 		return -ENXIO;
3057 	}
3058 
3059 	ret = ice_dev_init(dev);
3060 	if (ret) {
3061 		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3062 		return -ENXIO;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 static int
3069 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3070 {
3071 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3072 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3073 	struct ice_vsi *vsi = pf->main_vsi;
3074 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3075 	bool is_safe_mode = pf->adapter->is_safe_mode;
3076 	u64 phy_type_low;
3077 	u64 phy_type_high;
3078 
3079 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3080 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3081 	dev_info->max_rx_queues = vsi->nb_qps;
3082 	dev_info->max_tx_queues = vsi->nb_qps;
3083 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3084 	dev_info->max_vfs = pci_dev->max_vfs;
3085 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3086 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3087 
3088 	dev_info->rx_offload_capa =
3089 		DEV_RX_OFFLOAD_VLAN_STRIP |
3090 		DEV_RX_OFFLOAD_JUMBO_FRAME |
3091 		DEV_RX_OFFLOAD_KEEP_CRC |
3092 		DEV_RX_OFFLOAD_SCATTER |
3093 		DEV_RX_OFFLOAD_VLAN_FILTER;
3094 	dev_info->tx_offload_capa =
3095 		DEV_TX_OFFLOAD_VLAN_INSERT |
3096 		DEV_TX_OFFLOAD_TCP_TSO |
3097 		DEV_TX_OFFLOAD_MULTI_SEGS |
3098 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3099 	dev_info->flow_type_rss_offloads = 0;
3100 
3101 	if (!is_safe_mode) {
3102 		dev_info->rx_offload_capa |=
3103 			DEV_RX_OFFLOAD_IPV4_CKSUM |
3104 			DEV_RX_OFFLOAD_UDP_CKSUM |
3105 			DEV_RX_OFFLOAD_TCP_CKSUM |
3106 			DEV_RX_OFFLOAD_QINQ_STRIP |
3107 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3108 			DEV_RX_OFFLOAD_VLAN_EXTEND |
3109 			DEV_RX_OFFLOAD_RSS_HASH;
3110 		dev_info->tx_offload_capa |=
3111 			DEV_TX_OFFLOAD_QINQ_INSERT |
3112 			DEV_TX_OFFLOAD_IPV4_CKSUM |
3113 			DEV_TX_OFFLOAD_UDP_CKSUM |
3114 			DEV_TX_OFFLOAD_TCP_CKSUM |
3115 			DEV_TX_OFFLOAD_SCTP_CKSUM |
3116 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3117 			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3118 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3119 	}
3120 
3121 	dev_info->rx_queue_offload_capa = 0;
3122 	dev_info->tx_queue_offload_capa = 0;
3123 
3124 	dev_info->reta_size = pf->hash_lut_size;
3125 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3126 
3127 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3128 		.rx_thresh = {
3129 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
3130 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
3131 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
3132 		},
3133 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3134 		.rx_drop_en = 0,
3135 		.offloads = 0,
3136 	};
3137 
3138 	dev_info->default_txconf = (struct rte_eth_txconf) {
3139 		.tx_thresh = {
3140 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
3141 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
3142 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
3143 		},
3144 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3145 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3146 		.offloads = 0,
3147 	};
3148 
3149 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3150 		.nb_max = ICE_MAX_RING_DESC,
3151 		.nb_min = ICE_MIN_RING_DESC,
3152 		.nb_align = ICE_ALIGN_RING_DESC,
3153 	};
3154 
3155 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3156 		.nb_max = ICE_MAX_RING_DESC,
3157 		.nb_min = ICE_MIN_RING_DESC,
3158 		.nb_align = ICE_ALIGN_RING_DESC,
3159 	};
3160 
3161 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
3162 			       ETH_LINK_SPEED_100M |
3163 			       ETH_LINK_SPEED_1G |
3164 			       ETH_LINK_SPEED_2_5G |
3165 			       ETH_LINK_SPEED_5G |
3166 			       ETH_LINK_SPEED_10G |
3167 			       ETH_LINK_SPEED_20G |
3168 			       ETH_LINK_SPEED_25G;
3169 
3170 	phy_type_low = hw->port_info->phy.phy_type_low;
3171 	phy_type_high = hw->port_info->phy.phy_type_high;
3172 
3173 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3174 		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3175 
3176 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3177 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3178 		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3179 
3180 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3181 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3182 
3183 	dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3184 	dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3185 	dev_info->default_rxportconf.nb_queues = 1;
3186 	dev_info->default_txportconf.nb_queues = 1;
3187 	dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3188 	dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3189 
3190 	return 0;
3191 }
3192 
3193 static inline int
3194 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3195 			    struct rte_eth_link *link)
3196 {
3197 	struct rte_eth_link *dst = link;
3198 	struct rte_eth_link *src = &dev->data->dev_link;
3199 
3200 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3201 				*(uint64_t *)src) == 0)
3202 		return -1;
3203 
3204 	return 0;
3205 }
3206 
3207 static inline int
3208 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3209 			     struct rte_eth_link *link)
3210 {
3211 	struct rte_eth_link *dst = &dev->data->dev_link;
3212 	struct rte_eth_link *src = link;
3213 
3214 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3215 				*(uint64_t *)src) == 0)
3216 		return -1;
3217 
3218 	return 0;
3219 }
3220 
3221 static int
3222 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3223 {
3224 #define CHECK_INTERVAL 100  /* 100ms */
3225 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3226 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3227 	struct ice_link_status link_status;
3228 	struct rte_eth_link link, old;
3229 	int status;
3230 	unsigned int rep_cnt = MAX_REPEAT_TIME;
3231 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3232 
3233 	memset(&link, 0, sizeof(link));
3234 	memset(&old, 0, sizeof(old));
3235 	memset(&link_status, 0, sizeof(link_status));
3236 	ice_atomic_read_link_status(dev, &old);
3237 
3238 	do {
3239 		/* Get link status information from hardware */
3240 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
3241 					      &link_status, NULL);
3242 		if (status != ICE_SUCCESS) {
3243 			link.link_speed = ETH_SPEED_NUM_100M;
3244 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
3245 			PMD_DRV_LOG(ERR, "Failed to get link info");
3246 			goto out;
3247 		}
3248 
3249 		link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3250 		if (!wait_to_complete || link.link_status)
3251 			break;
3252 
3253 		rte_delay_ms(CHECK_INTERVAL);
3254 	} while (--rep_cnt);
3255 
3256 	if (!link.link_status)
3257 		goto out;
3258 
3259 	/* Full-duplex operation at all supported speeds */
3260 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
3261 
3262 	/* Parse the link status */
3263 	switch (link_status.link_speed) {
3264 	case ICE_AQ_LINK_SPEED_10MB:
3265 		link.link_speed = ETH_SPEED_NUM_10M;
3266 		break;
3267 	case ICE_AQ_LINK_SPEED_100MB:
3268 		link.link_speed = ETH_SPEED_NUM_100M;
3269 		break;
3270 	case ICE_AQ_LINK_SPEED_1000MB:
3271 		link.link_speed = ETH_SPEED_NUM_1G;
3272 		break;
3273 	case ICE_AQ_LINK_SPEED_2500MB:
3274 		link.link_speed = ETH_SPEED_NUM_2_5G;
3275 		break;
3276 	case ICE_AQ_LINK_SPEED_5GB:
3277 		link.link_speed = ETH_SPEED_NUM_5G;
3278 		break;
3279 	case ICE_AQ_LINK_SPEED_10GB:
3280 		link.link_speed = ETH_SPEED_NUM_10G;
3281 		break;
3282 	case ICE_AQ_LINK_SPEED_20GB:
3283 		link.link_speed = ETH_SPEED_NUM_20G;
3284 		break;
3285 	case ICE_AQ_LINK_SPEED_25GB:
3286 		link.link_speed = ETH_SPEED_NUM_25G;
3287 		break;
3288 	case ICE_AQ_LINK_SPEED_40GB:
3289 		link.link_speed = ETH_SPEED_NUM_40G;
3290 		break;
3291 	case ICE_AQ_LINK_SPEED_50GB:
3292 		link.link_speed = ETH_SPEED_NUM_50G;
3293 		break;
3294 	case ICE_AQ_LINK_SPEED_100GB:
3295 		link.link_speed = ETH_SPEED_NUM_100G;
3296 		break;
3297 	case ICE_AQ_LINK_SPEED_UNKNOWN:
3298 	default:
3299 		PMD_DRV_LOG(ERR, "Unknown link speed");
3300 		link.link_speed = ETH_SPEED_NUM_NONE;
3301 		break;
3302 	}
3303 
3304 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3305 			      ETH_LINK_SPEED_FIXED);
3306 
3307 out:
3308 	ice_atomic_write_link_status(dev, &link);
3309 	if (link.link_status == old.link_status)
3310 		return -1;
3311 
3312 	return 0;
3313 }
3314 
3315 /* Force the physical link state by getting the current PHY capabilities from
3316  * hardware and setting the PHY config based on the determined capabilities. If
3317  * link changes, link event will be triggered because both the Enable Automatic
3318  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3319  */
3320 static enum ice_status
3321 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3322 {
3323 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3324 	struct ice_aqc_get_phy_caps_data *pcaps;
3325 	struct ice_port_info *pi;
3326 	enum ice_status status;
3327 
3328 	if (!hw || !hw->port_info)
3329 		return ICE_ERR_PARAM;
3330 
3331 	pi = hw->port_info;
3332 
3333 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3334 		ice_malloc(hw, sizeof(*pcaps));
3335 	if (!pcaps)
3336 		return ICE_ERR_NO_MEMORY;
3337 
3338 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3339 				     NULL);
3340 	if (status)
3341 		goto out;
3342 
3343 	/* No change in link */
3344 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3345 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3346 		goto out;
3347 
3348 	cfg.phy_type_low = pcaps->phy_type_low;
3349 	cfg.phy_type_high = pcaps->phy_type_high;
3350 	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3351 	cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3352 	cfg.eee_cap = pcaps->eee_cap;
3353 	cfg.eeer_value = pcaps->eeer_value;
3354 	cfg.link_fec_opt = pcaps->link_fec_options;
3355 	if (link_up)
3356 		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3357 	else
3358 		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3359 
3360 	status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3361 
3362 out:
3363 	ice_free(hw, pcaps);
3364 	return status;
3365 }
3366 
3367 static int
3368 ice_dev_set_link_up(struct rte_eth_dev *dev)
3369 {
3370 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3371 
3372 	return ice_force_phys_link_state(hw, true);
3373 }
3374 
3375 static int
3376 ice_dev_set_link_down(struct rte_eth_dev *dev)
3377 {
3378 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3379 
3380 	return ice_force_phys_link_state(hw, false);
3381 }
3382 
3383 static int
3384 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3385 {
3386 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3387 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3388 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3389 
3390 	/* check if mtu is within the allowed range */
3391 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3392 		return -EINVAL;
3393 
3394 	/* mtu setting is forbidden if port is start */
3395 	if (dev_data->dev_started) {
3396 		PMD_DRV_LOG(ERR,
3397 			    "port %d must be stopped before configuration",
3398 			    dev_data->port_id);
3399 		return -EBUSY;
3400 	}
3401 
3402 	if (frame_size > RTE_ETHER_MAX_LEN)
3403 		dev_data->dev_conf.rxmode.offloads |=
3404 			DEV_RX_OFFLOAD_JUMBO_FRAME;
3405 	else
3406 		dev_data->dev_conf.rxmode.offloads &=
3407 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
3408 
3409 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3410 
3411 	return 0;
3412 }
3413 
3414 static int ice_macaddr_set(struct rte_eth_dev *dev,
3415 			   struct rte_ether_addr *mac_addr)
3416 {
3417 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3418 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3419 	struct ice_vsi *vsi = pf->main_vsi;
3420 	struct ice_mac_filter *f;
3421 	uint8_t flags = 0;
3422 	int ret;
3423 
3424 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3425 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3426 		return -EINVAL;
3427 	}
3428 
3429 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
3430 		if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3431 			break;
3432 	}
3433 
3434 	if (!f) {
3435 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3436 		return -EIO;
3437 	}
3438 
3439 	ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3440 	if (ret != ICE_SUCCESS) {
3441 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3442 		return -EIO;
3443 	}
3444 	ret = ice_add_mac_filter(vsi, mac_addr);
3445 	if (ret != ICE_SUCCESS) {
3446 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
3447 		return -EIO;
3448 	}
3449 	rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3450 
3451 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3452 	ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3453 	if (ret != ICE_SUCCESS)
3454 		PMD_DRV_LOG(ERR, "Failed to set manage mac");
3455 
3456 	return 0;
3457 }
3458 
3459 /* Add a MAC address, and update filters */
3460 static int
3461 ice_macaddr_add(struct rte_eth_dev *dev,
3462 		struct rte_ether_addr *mac_addr,
3463 		__rte_unused uint32_t index,
3464 		__rte_unused uint32_t pool)
3465 {
3466 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3467 	struct ice_vsi *vsi = pf->main_vsi;
3468 	int ret;
3469 
3470 	ret = ice_add_mac_filter(vsi, mac_addr);
3471 	if (ret != ICE_SUCCESS) {
3472 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3473 		return -EINVAL;
3474 	}
3475 
3476 	return ICE_SUCCESS;
3477 }
3478 
3479 /* Remove a MAC address, and update filters */
3480 static void
3481 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3482 {
3483 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3484 	struct ice_vsi *vsi = pf->main_vsi;
3485 	struct rte_eth_dev_data *data = dev->data;
3486 	struct rte_ether_addr *macaddr;
3487 	int ret;
3488 
3489 	macaddr = &data->mac_addrs[index];
3490 	ret = ice_remove_mac_filter(vsi, macaddr);
3491 	if (ret) {
3492 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3493 		return;
3494 	}
3495 }
3496 
3497 static int
3498 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3499 {
3500 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3501 	struct ice_vsi *vsi = pf->main_vsi;
3502 	int ret;
3503 
3504 	PMD_INIT_FUNC_TRACE();
3505 
3506 	if (on) {
3507 		ret = ice_add_vlan_filter(vsi, vlan_id);
3508 		if (ret < 0) {
3509 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3510 			return -EINVAL;
3511 		}
3512 	} else {
3513 		ret = ice_remove_vlan_filter(vsi, vlan_id);
3514 		if (ret < 0) {
3515 			PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3516 			return -EINVAL;
3517 		}
3518 	}
3519 
3520 	return 0;
3521 }
3522 
3523 /* Configure vlan filter on or off */
3524 static int
3525 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3526 {
3527 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3528 	struct ice_vsi_ctx ctxt;
3529 	uint8_t sec_flags, sw_flags2;
3530 	int ret = 0;
3531 
3532 	sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3533 		    ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
3534 	sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3535 
3536 	if (on) {
3537 		vsi->info.sec_flags |= sec_flags;
3538 		vsi->info.sw_flags2 |= sw_flags2;
3539 	} else {
3540 		vsi->info.sec_flags &= ~sec_flags;
3541 		vsi->info.sw_flags2 &= ~sw_flags2;
3542 	}
3543 	vsi->info.sw_id = hw->port_info->sw_id;
3544 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3545 	ctxt.info.valid_sections =
3546 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3547 				 ICE_AQ_VSI_PROP_SECURITY_VALID);
3548 	ctxt.vsi_num = vsi->vsi_id;
3549 
3550 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3551 	if (ret) {
3552 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3553 			    on ? "enable" : "disable");
3554 		return -EINVAL;
3555 	} else {
3556 		vsi->info.valid_sections |=
3557 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3558 					 ICE_AQ_VSI_PROP_SECURITY_VALID);
3559 	}
3560 
3561 	/* consist with other drivers, allow untagged packet when vlan filter on */
3562 	if (on)
3563 		ret = ice_add_vlan_filter(vsi, 0);
3564 	else
3565 		ret = ice_remove_vlan_filter(vsi, 0);
3566 
3567 	return 0;
3568 }
3569 
3570 static int
3571 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
3572 {
3573 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3574 	struct ice_vsi_ctx ctxt;
3575 	uint8_t vlan_flags;
3576 	int ret = 0;
3577 
3578 	/* Check if it has been already on or off */
3579 	if (vsi->info.valid_sections &
3580 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
3581 		if (on) {
3582 			if ((vsi->info.vlan_flags &
3583 			     ICE_AQ_VSI_VLAN_EMOD_M) ==
3584 			    ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
3585 				return 0; /* already on */
3586 		} else {
3587 			if ((vsi->info.vlan_flags &
3588 			     ICE_AQ_VSI_VLAN_EMOD_M) ==
3589 			    ICE_AQ_VSI_VLAN_EMOD_NOTHING)
3590 				return 0; /* already off */
3591 		}
3592 	}
3593 
3594 	if (on)
3595 		vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3596 	else
3597 		vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3598 	vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
3599 	vsi->info.vlan_flags |= vlan_flags;
3600 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3601 	ctxt.info.valid_sections =
3602 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3603 	ctxt.vsi_num = vsi->vsi_id;
3604 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3605 	if (ret) {
3606 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3607 			    on ? "enable" : "disable");
3608 		return -EINVAL;
3609 	}
3610 
3611 	vsi->info.valid_sections |=
3612 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3613 
3614 	return ret;
3615 }
3616 
3617 static int
3618 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3619 {
3620 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3621 	struct ice_vsi *vsi = pf->main_vsi;
3622 	struct rte_eth_rxmode *rxmode;
3623 
3624 	rxmode = &dev->data->dev_conf.rxmode;
3625 	if (mask & ETH_VLAN_FILTER_MASK) {
3626 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3627 			ice_vsi_config_vlan_filter(vsi, true);
3628 		else
3629 			ice_vsi_config_vlan_filter(vsi, false);
3630 	}
3631 
3632 	if (mask & ETH_VLAN_STRIP_MASK) {
3633 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3634 			ice_vsi_config_vlan_stripping(vsi, true);
3635 		else
3636 			ice_vsi_config_vlan_stripping(vsi, false);
3637 	}
3638 
3639 	if (mask & ETH_VLAN_EXTEND_MASK) {
3640 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3641 			ice_vsi_config_double_vlan(vsi, true);
3642 		else
3643 			ice_vsi_config_double_vlan(vsi, false);
3644 	}
3645 
3646 	return 0;
3647 }
3648 
3649 static int
3650 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3651 {
3652 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
3653 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3654 	int ret;
3655 
3656 	if (!lut)
3657 		return -EINVAL;
3658 
3659 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
3660 		ret = ice_aq_get_rss_lut(hw, vsi->idx,
3661 			ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
3662 		if (ret) {
3663 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3664 			return -EINVAL;
3665 		}
3666 	} else {
3667 		uint64_t *lut_dw = (uint64_t *)lut;
3668 		uint16_t i, lut_size_dw = lut_size / 4;
3669 
3670 		for (i = 0; i < lut_size_dw; i++)
3671 			lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
3672 	}
3673 
3674 	return 0;
3675 }
3676 
3677 static int
3678 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3679 {
3680 	struct ice_pf *pf;
3681 	struct ice_hw *hw;
3682 	int ret;
3683 
3684 	if (!vsi || !lut)
3685 		return -EINVAL;
3686 
3687 	pf = ICE_VSI_TO_PF(vsi);
3688 	hw = ICE_VSI_TO_HW(vsi);
3689 
3690 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
3691 		ret = ice_aq_set_rss_lut(hw, vsi->idx,
3692 			ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
3693 		if (ret) {
3694 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3695 			return -EINVAL;
3696 		}
3697 	} else {
3698 		uint64_t *lut_dw = (uint64_t *)lut;
3699 		uint16_t i, lut_size_dw = lut_size / 4;
3700 
3701 		for (i = 0; i < lut_size_dw; i++)
3702 			ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
3703 
3704 		ice_flush(hw);
3705 	}
3706 
3707 	return 0;
3708 }
3709 
3710 static int
3711 ice_rss_reta_update(struct rte_eth_dev *dev,
3712 		    struct rte_eth_rss_reta_entry64 *reta_conf,
3713 		    uint16_t reta_size)
3714 {
3715 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3716 	uint16_t i, lut_size = pf->hash_lut_size;
3717 	uint16_t idx, shift;
3718 	uint8_t *lut;
3719 	int ret;
3720 
3721 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
3722 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
3723 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
3724 		PMD_DRV_LOG(ERR,
3725 			    "The size of hash lookup table configured (%d)"
3726 			    "doesn't match the number hardware can "
3727 			    "supported (128, 512, 2048)",
3728 			    reta_size);
3729 		return -EINVAL;
3730 	}
3731 
3732 	/* It MUST use the current LUT size to get the RSS lookup table,
3733 	 * otherwise if will fail with -100 error code.
3734 	 */
3735 	lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
3736 	if (!lut) {
3737 		PMD_DRV_LOG(ERR, "No memory can be allocated");
3738 		return -ENOMEM;
3739 	}
3740 	ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
3741 	if (ret)
3742 		goto out;
3743 
3744 	for (i = 0; i < reta_size; i++) {
3745 		idx = i / RTE_RETA_GROUP_SIZE;
3746 		shift = i % RTE_RETA_GROUP_SIZE;
3747 		if (reta_conf[idx].mask & (1ULL << shift))
3748 			lut[i] = reta_conf[idx].reta[shift];
3749 	}
3750 	ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
3751 	if (ret == 0 && lut_size != reta_size) {
3752 		PMD_DRV_LOG(INFO,
3753 			    "The size of hash lookup table is changed from (%d) to (%d)",
3754 			    lut_size, reta_size);
3755 		pf->hash_lut_size = reta_size;
3756 	}
3757 
3758 out:
3759 	rte_free(lut);
3760 
3761 	return ret;
3762 }
3763 
3764 static int
3765 ice_rss_reta_query(struct rte_eth_dev *dev,
3766 		   struct rte_eth_rss_reta_entry64 *reta_conf,
3767 		   uint16_t reta_size)
3768 {
3769 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3770 	uint16_t i, lut_size = pf->hash_lut_size;
3771 	uint16_t idx, shift;
3772 	uint8_t *lut;
3773 	int ret;
3774 
3775 	if (reta_size != lut_size) {
3776 		PMD_DRV_LOG(ERR,
3777 			    "The size of hash lookup table configured (%d)"
3778 			    "doesn't match the number hardware can "
3779 			    "supported (%d)",
3780 			    reta_size, lut_size);
3781 		return -EINVAL;
3782 	}
3783 
3784 	lut = rte_zmalloc(NULL, reta_size, 0);
3785 	if (!lut) {
3786 		PMD_DRV_LOG(ERR, "No memory can be allocated");
3787 		return -ENOMEM;
3788 	}
3789 
3790 	ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
3791 	if (ret)
3792 		goto out;
3793 
3794 	for (i = 0; i < reta_size; i++) {
3795 		idx = i / RTE_RETA_GROUP_SIZE;
3796 		shift = i % RTE_RETA_GROUP_SIZE;
3797 		if (reta_conf[idx].mask & (1ULL << shift))
3798 			reta_conf[idx].reta[shift] = lut[i];
3799 	}
3800 
3801 out:
3802 	rte_free(lut);
3803 
3804 	return ret;
3805 }
3806 
3807 static int
3808 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
3809 {
3810 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3811 	int ret = 0;
3812 
3813 	if (!key || key_len == 0) {
3814 		PMD_DRV_LOG(DEBUG, "No key to be configured");
3815 		return 0;
3816 	} else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
3817 		   sizeof(uint32_t)) {
3818 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
3819 		return -EINVAL;
3820 	}
3821 
3822 	struct ice_aqc_get_set_rss_keys *key_dw =
3823 		(struct ice_aqc_get_set_rss_keys *)key;
3824 
3825 	ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
3826 	if (ret) {
3827 		PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
3828 		ret = -EINVAL;
3829 	}
3830 
3831 	return ret;
3832 }
3833 
3834 static int
3835 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
3836 {
3837 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3838 	int ret;
3839 
3840 	if (!key || !key_len)
3841 		return -EINVAL;
3842 
3843 	ret = ice_aq_get_rss_key
3844 		(hw, vsi->idx,
3845 		 (struct ice_aqc_get_set_rss_keys *)key);
3846 	if (ret) {
3847 		PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
3848 		return -EINVAL;
3849 	}
3850 	*key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3851 
3852 	return 0;
3853 }
3854 
3855 static int
3856 ice_rss_hash_update(struct rte_eth_dev *dev,
3857 		    struct rte_eth_rss_conf *rss_conf)
3858 {
3859 	enum ice_status status = ICE_SUCCESS;
3860 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3861 	struct ice_vsi *vsi = pf->main_vsi;
3862 
3863 	/* set hash key */
3864 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
3865 	if (status)
3866 		return status;
3867 
3868 	if (rss_conf->rss_hf == 0)
3869 		return 0;
3870 
3871 	/* RSS hash configuration */
3872 	ice_rss_hash_set(pf, rss_conf->rss_hf);
3873 
3874 	return 0;
3875 }
3876 
3877 static int
3878 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
3879 		      struct rte_eth_rss_conf *rss_conf)
3880 {
3881 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3882 	struct ice_vsi *vsi = pf->main_vsi;
3883 
3884 	ice_get_rss_key(vsi, rss_conf->rss_key,
3885 			&rss_conf->rss_key_len);
3886 
3887 	/* TODO: default set to 0 as hf config is not supported now */
3888 	rss_conf->rss_hf = 0;
3889 	return 0;
3890 }
3891 
3892 static int
3893 ice_promisc_enable(struct rte_eth_dev *dev)
3894 {
3895 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3896 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3897 	struct ice_vsi *vsi = pf->main_vsi;
3898 	enum ice_status status;
3899 	uint8_t pmask;
3900 	int ret = 0;
3901 
3902 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
3903 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3904 
3905 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
3906 	switch (status) {
3907 	case ICE_ERR_ALREADY_EXISTS:
3908 		PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
3909 	case ICE_SUCCESS:
3910 		break;
3911 	default:
3912 		PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
3913 		ret = -EAGAIN;
3914 	}
3915 
3916 	return ret;
3917 }
3918 
3919 static int
3920 ice_promisc_disable(struct rte_eth_dev *dev)
3921 {
3922 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3923 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3924 	struct ice_vsi *vsi = pf->main_vsi;
3925 	enum ice_status status;
3926 	uint8_t pmask;
3927 	int ret = 0;
3928 
3929 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
3930 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3931 
3932 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
3933 	if (status != ICE_SUCCESS) {
3934 		PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
3935 		ret = -EAGAIN;
3936 	}
3937 
3938 	return ret;
3939 }
3940 
3941 static int
3942 ice_allmulti_enable(struct rte_eth_dev *dev)
3943 {
3944 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3945 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3946 	struct ice_vsi *vsi = pf->main_vsi;
3947 	enum ice_status status;
3948 	uint8_t pmask;
3949 	int ret = 0;
3950 
3951 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3952 
3953 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
3954 
3955 	switch (status) {
3956 	case ICE_ERR_ALREADY_EXISTS:
3957 		PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
3958 	case ICE_SUCCESS:
3959 		break;
3960 	default:
3961 		PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
3962 		ret = -EAGAIN;
3963 	}
3964 
3965 	return ret;
3966 }
3967 
3968 static int
3969 ice_allmulti_disable(struct rte_eth_dev *dev)
3970 {
3971 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3972 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3973 	struct ice_vsi *vsi = pf->main_vsi;
3974 	enum ice_status status;
3975 	uint8_t pmask;
3976 	int ret = 0;
3977 
3978 	if (dev->data->promiscuous == 1)
3979 		return 0; /* must remain in all_multicast mode */
3980 
3981 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3982 
3983 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
3984 	if (status != ICE_SUCCESS) {
3985 		PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
3986 		ret = -EAGAIN;
3987 	}
3988 
3989 	return ret;
3990 }
3991 
3992 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
3993 				    uint16_t queue_id)
3994 {
3995 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3996 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3997 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3998 	uint32_t val;
3999 	uint16_t msix_intr;
4000 
4001 	msix_intr = intr_handle->intr_vec[queue_id];
4002 
4003 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4004 	      GLINT_DYN_CTL_ITR_INDX_M;
4005 	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4006 
4007 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4008 	rte_intr_ack(&pci_dev->intr_handle);
4009 
4010 	return 0;
4011 }
4012 
4013 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4014 				     uint16_t queue_id)
4015 {
4016 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4017 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4018 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4019 	uint16_t msix_intr;
4020 
4021 	msix_intr = intr_handle->intr_vec[queue_id];
4022 
4023 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4024 
4025 	return 0;
4026 }
4027 
4028 static int
4029 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4030 {
4031 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4032 	u8 ver, patch;
4033 	u16 build;
4034 	int ret;
4035 
4036 	ver = hw->nvm.orom.major;
4037 	patch = hw->nvm.orom.patch;
4038 	build = hw->nvm.orom.build;
4039 
4040 	ret = snprintf(fw_version, fw_size,
4041 			"%d.%d 0x%08x %d.%d.%d",
4042 			hw->nvm.major_ver,
4043 			hw->nvm.minor_ver,
4044 			hw->nvm.eetrack,
4045 			ver, build, patch);
4046 
4047 	/* add the size of '\0' */
4048 	ret += 1;
4049 	if (fw_size < (u32)ret)
4050 		return ret;
4051 	else
4052 		return 0;
4053 }
4054 
4055 static int
4056 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4057 {
4058 	struct ice_hw *hw;
4059 	struct ice_vsi_ctx ctxt;
4060 	uint8_t vlan_flags = 0;
4061 	int ret;
4062 
4063 	if (!vsi || !info) {
4064 		PMD_DRV_LOG(ERR, "invalid parameters");
4065 		return -EINVAL;
4066 	}
4067 
4068 	if (info->on) {
4069 		vsi->info.pvid = info->config.pvid;
4070 		/**
4071 		 * If insert pvid is enabled, only tagged pkts are
4072 		 * allowed to be sent out.
4073 		 */
4074 		vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
4075 			     ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4076 	} else {
4077 		vsi->info.pvid = 0;
4078 		if (info->config.reject.tagged == 0)
4079 			vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
4080 
4081 		if (info->config.reject.untagged == 0)
4082 			vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
4083 	}
4084 	vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
4085 				  ICE_AQ_VSI_VLAN_MODE_M);
4086 	vsi->info.vlan_flags |= vlan_flags;
4087 	memset(&ctxt, 0, sizeof(ctxt));
4088 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4089 	ctxt.info.valid_sections =
4090 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4091 	ctxt.vsi_num = vsi->vsi_id;
4092 
4093 	hw = ICE_VSI_TO_HW(vsi);
4094 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4095 	if (ret != ICE_SUCCESS) {
4096 		PMD_DRV_LOG(ERR,
4097 			    "update VSI for VLAN insert failed, err %d",
4098 			    ret);
4099 		return -EINVAL;
4100 	}
4101 
4102 	vsi->info.valid_sections |=
4103 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4104 
4105 	return ret;
4106 }
4107 
4108 static int
4109 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4110 {
4111 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4112 	struct ice_vsi *vsi = pf->main_vsi;
4113 	struct rte_eth_dev_data *data = pf->dev_data;
4114 	struct ice_vsi_vlan_pvid_info info;
4115 	int ret;
4116 
4117 	memset(&info, 0, sizeof(info));
4118 	info.on = on;
4119 	if (info.on) {
4120 		info.config.pvid = pvid;
4121 	} else {
4122 		info.config.reject.tagged =
4123 			data->dev_conf.txmode.hw_vlan_reject_tagged;
4124 		info.config.reject.untagged =
4125 			data->dev_conf.txmode.hw_vlan_reject_untagged;
4126 	}
4127 
4128 	ret = ice_vsi_vlan_pvid_set(vsi, &info);
4129 	if (ret < 0) {
4130 		PMD_DRV_LOG(ERR, "Failed to set pvid.");
4131 		return -EINVAL;
4132 	}
4133 
4134 	return 0;
4135 }
4136 
4137 static int
4138 ice_get_eeprom_length(struct rte_eth_dev *dev)
4139 {
4140 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4141 
4142 	return hw->nvm.flash_size;
4143 }
4144 
4145 static int
4146 ice_get_eeprom(struct rte_eth_dev *dev,
4147 	       struct rte_dev_eeprom_info *eeprom)
4148 {
4149 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4150 	enum ice_status status = ICE_SUCCESS;
4151 	uint8_t *data = eeprom->data;
4152 
4153 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4154 
4155 	status = ice_acquire_nvm(hw, ICE_RES_READ);
4156 	if (status) {
4157 		PMD_DRV_LOG(ERR, "acquire nvm failed.");
4158 		return -EIO;
4159 	}
4160 
4161 	status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4162 				   data, false);
4163 
4164 	ice_release_nvm(hw);
4165 
4166 	if (status) {
4167 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
4168 		return -EIO;
4169 	}
4170 
4171 	return 0;
4172 }
4173 
4174 static void
4175 ice_stat_update_32(struct ice_hw *hw,
4176 		   uint32_t reg,
4177 		   bool offset_loaded,
4178 		   uint64_t *offset,
4179 		   uint64_t *stat)
4180 {
4181 	uint64_t new_data;
4182 
4183 	new_data = (uint64_t)ICE_READ_REG(hw, reg);
4184 	if (!offset_loaded)
4185 		*offset = new_data;
4186 
4187 	if (new_data >= *offset)
4188 		*stat = (uint64_t)(new_data - *offset);
4189 	else
4190 		*stat = (uint64_t)((new_data +
4191 				    ((uint64_t)1 << ICE_32_BIT_WIDTH))
4192 				   - *offset);
4193 }
4194 
4195 static void
4196 ice_stat_update_40(struct ice_hw *hw,
4197 		   uint32_t hireg,
4198 		   uint32_t loreg,
4199 		   bool offset_loaded,
4200 		   uint64_t *offset,
4201 		   uint64_t *stat)
4202 {
4203 	uint64_t new_data;
4204 
4205 	new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4206 	new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4207 		    ICE_32_BIT_WIDTH;
4208 
4209 	if (!offset_loaded)
4210 		*offset = new_data;
4211 
4212 	if (new_data >= *offset)
4213 		*stat = new_data - *offset;
4214 	else
4215 		*stat = (uint64_t)((new_data +
4216 				    ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4217 				   *offset);
4218 
4219 	*stat &= ICE_40_BIT_MASK;
4220 }
4221 
4222 /* Get all the statistics of a VSI */
4223 static void
4224 ice_update_vsi_stats(struct ice_vsi *vsi)
4225 {
4226 	struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4227 	struct ice_eth_stats *nes = &vsi->eth_stats;
4228 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4229 	int idx = rte_le_to_cpu_16(vsi->vsi_id);
4230 
4231 	ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4232 			   vsi->offset_loaded, &oes->rx_bytes,
4233 			   &nes->rx_bytes);
4234 	ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4235 			   vsi->offset_loaded, &oes->rx_unicast,
4236 			   &nes->rx_unicast);
4237 	ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4238 			   vsi->offset_loaded, &oes->rx_multicast,
4239 			   &nes->rx_multicast);
4240 	ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4241 			   vsi->offset_loaded, &oes->rx_broadcast,
4242 			   &nes->rx_broadcast);
4243 	/* enlarge the limitation when rx_bytes overflowed */
4244 	if (vsi->offset_loaded) {
4245 		if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4246 			nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4247 		nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4248 	}
4249 	vsi->old_rx_bytes = nes->rx_bytes;
4250 	/* exclude CRC bytes */
4251 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4252 			  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4253 
4254 	ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4255 			   &oes->rx_discards, &nes->rx_discards);
4256 	/* GLV_REPC not supported */
4257 	/* GLV_RMPC not supported */
4258 	ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4259 			   &oes->rx_unknown_protocol,
4260 			   &nes->rx_unknown_protocol);
4261 	ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4262 			   vsi->offset_loaded, &oes->tx_bytes,
4263 			   &nes->tx_bytes);
4264 	ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4265 			   vsi->offset_loaded, &oes->tx_unicast,
4266 			   &nes->tx_unicast);
4267 	ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4268 			   vsi->offset_loaded, &oes->tx_multicast,
4269 			   &nes->tx_multicast);
4270 	ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4271 			   vsi->offset_loaded,  &oes->tx_broadcast,
4272 			   &nes->tx_broadcast);
4273 	/* GLV_TDPC not supported */
4274 	ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4275 			   &oes->tx_errors, &nes->tx_errors);
4276 	/* enlarge the limitation when tx_bytes overflowed */
4277 	if (vsi->offset_loaded) {
4278 		if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4279 			nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4280 		nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4281 	}
4282 	vsi->old_tx_bytes = nes->tx_bytes;
4283 	vsi->offset_loaded = true;
4284 
4285 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4286 		    vsi->vsi_id);
4287 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
4288 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
4289 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
4290 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
4291 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
4292 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4293 		    nes->rx_unknown_protocol);
4294 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
4295 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
4296 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
4297 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
4298 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
4299 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
4300 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4301 		    vsi->vsi_id);
4302 }
4303 
4304 static void
4305 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4306 {
4307 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4308 	struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4309 
4310 	/* Get statistics of struct ice_eth_stats */
4311 	ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4312 			   GLPRT_GORCL(hw->port_info->lport),
4313 			   pf->offset_loaded, &os->eth.rx_bytes,
4314 			   &ns->eth.rx_bytes);
4315 	ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4316 			   GLPRT_UPRCL(hw->port_info->lport),
4317 			   pf->offset_loaded, &os->eth.rx_unicast,
4318 			   &ns->eth.rx_unicast);
4319 	ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4320 			   GLPRT_MPRCL(hw->port_info->lport),
4321 			   pf->offset_loaded, &os->eth.rx_multicast,
4322 			   &ns->eth.rx_multicast);
4323 	ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4324 			   GLPRT_BPRCL(hw->port_info->lport),
4325 			   pf->offset_loaded, &os->eth.rx_broadcast,
4326 			   &ns->eth.rx_broadcast);
4327 	ice_stat_update_32(hw, PRTRPB_RDPC,
4328 			   pf->offset_loaded, &os->eth.rx_discards,
4329 			   &ns->eth.rx_discards);
4330 	/* enlarge the limitation when rx_bytes overflowed */
4331 	if (pf->offset_loaded) {
4332 		if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4333 			ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4334 		ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4335 	}
4336 	pf->old_rx_bytes = ns->eth.rx_bytes;
4337 
4338 	/* Workaround: CRC size should not be included in byte statistics,
4339 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4340 	 * packet.
4341 	 */
4342 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4343 			     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4344 
4345 	/* GLPRT_REPC not supported */
4346 	/* GLPRT_RMPC not supported */
4347 	ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4348 			   pf->offset_loaded,
4349 			   &os->eth.rx_unknown_protocol,
4350 			   &ns->eth.rx_unknown_protocol);
4351 	ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4352 			   GLPRT_GOTCL(hw->port_info->lport),
4353 			   pf->offset_loaded, &os->eth.tx_bytes,
4354 			   &ns->eth.tx_bytes);
4355 	ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4356 			   GLPRT_UPTCL(hw->port_info->lport),
4357 			   pf->offset_loaded, &os->eth.tx_unicast,
4358 			   &ns->eth.tx_unicast);
4359 	ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4360 			   GLPRT_MPTCL(hw->port_info->lport),
4361 			   pf->offset_loaded, &os->eth.tx_multicast,
4362 			   &ns->eth.tx_multicast);
4363 	ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4364 			   GLPRT_BPTCL(hw->port_info->lport),
4365 			   pf->offset_loaded, &os->eth.tx_broadcast,
4366 			   &ns->eth.tx_broadcast);
4367 	/* enlarge the limitation when tx_bytes overflowed */
4368 	if (pf->offset_loaded) {
4369 		if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4370 			ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4371 		ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4372 	}
4373 	pf->old_tx_bytes = ns->eth.tx_bytes;
4374 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4375 			     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4376 
4377 	/* GLPRT_TEPC not supported */
4378 
4379 	/* additional port specific stats */
4380 	ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4381 			   pf->offset_loaded, &os->tx_dropped_link_down,
4382 			   &ns->tx_dropped_link_down);
4383 	ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4384 			   pf->offset_loaded, &os->crc_errors,
4385 			   &ns->crc_errors);
4386 	ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4387 			   pf->offset_loaded, &os->illegal_bytes,
4388 			   &ns->illegal_bytes);
4389 	/* GLPRT_ERRBC not supported */
4390 	ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4391 			   pf->offset_loaded, &os->mac_local_faults,
4392 			   &ns->mac_local_faults);
4393 	ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4394 			   pf->offset_loaded, &os->mac_remote_faults,
4395 			   &ns->mac_remote_faults);
4396 
4397 	ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4398 			   pf->offset_loaded, &os->rx_len_errors,
4399 			   &ns->rx_len_errors);
4400 
4401 	ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4402 			   pf->offset_loaded, &os->link_xon_rx,
4403 			   &ns->link_xon_rx);
4404 	ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4405 			   pf->offset_loaded, &os->link_xoff_rx,
4406 			   &ns->link_xoff_rx);
4407 	ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4408 			   pf->offset_loaded, &os->link_xon_tx,
4409 			   &ns->link_xon_tx);
4410 	ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4411 			   pf->offset_loaded, &os->link_xoff_tx,
4412 			   &ns->link_xoff_tx);
4413 	ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4414 			   GLPRT_PRC64L(hw->port_info->lport),
4415 			   pf->offset_loaded, &os->rx_size_64,
4416 			   &ns->rx_size_64);
4417 	ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4418 			   GLPRT_PRC127L(hw->port_info->lport),
4419 			   pf->offset_loaded, &os->rx_size_127,
4420 			   &ns->rx_size_127);
4421 	ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4422 			   GLPRT_PRC255L(hw->port_info->lport),
4423 			   pf->offset_loaded, &os->rx_size_255,
4424 			   &ns->rx_size_255);
4425 	ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4426 			   GLPRT_PRC511L(hw->port_info->lport),
4427 			   pf->offset_loaded, &os->rx_size_511,
4428 			   &ns->rx_size_511);
4429 	ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4430 			   GLPRT_PRC1023L(hw->port_info->lport),
4431 			   pf->offset_loaded, &os->rx_size_1023,
4432 			   &ns->rx_size_1023);
4433 	ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4434 			   GLPRT_PRC1522L(hw->port_info->lport),
4435 			   pf->offset_loaded, &os->rx_size_1522,
4436 			   &ns->rx_size_1522);
4437 	ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4438 			   GLPRT_PRC9522L(hw->port_info->lport),
4439 			   pf->offset_loaded, &os->rx_size_big,
4440 			   &ns->rx_size_big);
4441 	ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4442 			   pf->offset_loaded, &os->rx_undersize,
4443 			   &ns->rx_undersize);
4444 	ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4445 			   pf->offset_loaded, &os->rx_fragments,
4446 			   &ns->rx_fragments);
4447 	ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4448 			   pf->offset_loaded, &os->rx_oversize,
4449 			   &ns->rx_oversize);
4450 	ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4451 			   pf->offset_loaded, &os->rx_jabber,
4452 			   &ns->rx_jabber);
4453 	ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4454 			   GLPRT_PTC64L(hw->port_info->lport),
4455 			   pf->offset_loaded, &os->tx_size_64,
4456 			   &ns->tx_size_64);
4457 	ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
4458 			   GLPRT_PTC127L(hw->port_info->lport),
4459 			   pf->offset_loaded, &os->tx_size_127,
4460 			   &ns->tx_size_127);
4461 	ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
4462 			   GLPRT_PTC255L(hw->port_info->lport),
4463 			   pf->offset_loaded, &os->tx_size_255,
4464 			   &ns->tx_size_255);
4465 	ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
4466 			   GLPRT_PTC511L(hw->port_info->lport),
4467 			   pf->offset_loaded, &os->tx_size_511,
4468 			   &ns->tx_size_511);
4469 	ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
4470 			   GLPRT_PTC1023L(hw->port_info->lport),
4471 			   pf->offset_loaded, &os->tx_size_1023,
4472 			   &ns->tx_size_1023);
4473 	ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
4474 			   GLPRT_PTC1522L(hw->port_info->lport),
4475 			   pf->offset_loaded, &os->tx_size_1522,
4476 			   &ns->tx_size_1522);
4477 	ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
4478 			   GLPRT_PTC9522L(hw->port_info->lport),
4479 			   pf->offset_loaded, &os->tx_size_big,
4480 			   &ns->tx_size_big);
4481 
4482 	/* GLPRT_MSPDC not supported */
4483 	/* GLPRT_XEC not supported */
4484 
4485 	pf->offset_loaded = true;
4486 
4487 	if (pf->main_vsi)
4488 		ice_update_vsi_stats(pf->main_vsi);
4489 }
4490 
4491 /* Get all statistics of a port */
4492 static int
4493 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
4494 {
4495 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4496 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4497 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4498 
4499 	/* call read registers - updates values, now write them to struct */
4500 	ice_read_stats_registers(pf, hw);
4501 
4502 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
4503 			  pf->main_vsi->eth_stats.rx_multicast +
4504 			  pf->main_vsi->eth_stats.rx_broadcast -
4505 			  pf->main_vsi->eth_stats.rx_discards;
4506 	stats->opackets = ns->eth.tx_unicast +
4507 			  ns->eth.tx_multicast +
4508 			  ns->eth.tx_broadcast;
4509 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
4510 	stats->obytes   = ns->eth.tx_bytes;
4511 	stats->oerrors  = ns->eth.tx_errors +
4512 			  pf->main_vsi->eth_stats.tx_errors;
4513 
4514 	/* Rx Errors */
4515 	stats->imissed  = ns->eth.rx_discards +
4516 			  pf->main_vsi->eth_stats.rx_discards;
4517 	stats->ierrors  = ns->crc_errors +
4518 			  ns->rx_undersize +
4519 			  ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
4520 
4521 	PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
4522 	PMD_DRV_LOG(DEBUG, "rx_bytes:	%"PRIu64"", ns->eth.rx_bytes);
4523 	PMD_DRV_LOG(DEBUG, "rx_unicast:	%"PRIu64"", ns->eth.rx_unicast);
4524 	PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
4525 	PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
4526 	PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
4527 	PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
4528 		    pf->main_vsi->eth_stats.rx_discards);
4529 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
4530 		    ns->eth.rx_unknown_protocol);
4531 	PMD_DRV_LOG(DEBUG, "tx_bytes:	%"PRIu64"", ns->eth.tx_bytes);
4532 	PMD_DRV_LOG(DEBUG, "tx_unicast:	%"PRIu64"", ns->eth.tx_unicast);
4533 	PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
4534 	PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
4535 	PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
4536 	PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
4537 		    pf->main_vsi->eth_stats.tx_discards);
4538 	PMD_DRV_LOG(DEBUG, "tx_errors:		%"PRIu64"", ns->eth.tx_errors);
4539 
4540 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:	%"PRIu64"",
4541 		    ns->tx_dropped_link_down);
4542 	PMD_DRV_LOG(DEBUG, "crc_errors:	%"PRIu64"", ns->crc_errors);
4543 	PMD_DRV_LOG(DEBUG, "illegal_bytes:	%"PRIu64"",
4544 		    ns->illegal_bytes);
4545 	PMD_DRV_LOG(DEBUG, "error_bytes:	%"PRIu64"", ns->error_bytes);
4546 	PMD_DRV_LOG(DEBUG, "mac_local_faults:	%"PRIu64"",
4547 		    ns->mac_local_faults);
4548 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:	%"PRIu64"",
4549 		    ns->mac_remote_faults);
4550 	PMD_DRV_LOG(DEBUG, "link_xon_rx:	%"PRIu64"", ns->link_xon_rx);
4551 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:	%"PRIu64"", ns->link_xoff_rx);
4552 	PMD_DRV_LOG(DEBUG, "link_xon_tx:	%"PRIu64"", ns->link_xon_tx);
4553 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:	%"PRIu64"", ns->link_xoff_tx);
4554 	PMD_DRV_LOG(DEBUG, "rx_size_64:		%"PRIu64"", ns->rx_size_64);
4555 	PMD_DRV_LOG(DEBUG, "rx_size_127:	%"PRIu64"", ns->rx_size_127);
4556 	PMD_DRV_LOG(DEBUG, "rx_size_255:	%"PRIu64"", ns->rx_size_255);
4557 	PMD_DRV_LOG(DEBUG, "rx_size_511:	%"PRIu64"", ns->rx_size_511);
4558 	PMD_DRV_LOG(DEBUG, "rx_size_1023:	%"PRIu64"", ns->rx_size_1023);
4559 	PMD_DRV_LOG(DEBUG, "rx_size_1522:	%"PRIu64"", ns->rx_size_1522);
4560 	PMD_DRV_LOG(DEBUG, "rx_size_big:	%"PRIu64"", ns->rx_size_big);
4561 	PMD_DRV_LOG(DEBUG, "rx_undersize:	%"PRIu64"", ns->rx_undersize);
4562 	PMD_DRV_LOG(DEBUG, "rx_fragments:	%"PRIu64"", ns->rx_fragments);
4563 	PMD_DRV_LOG(DEBUG, "rx_oversize:	%"PRIu64"", ns->rx_oversize);
4564 	PMD_DRV_LOG(DEBUG, "rx_jabber:		%"PRIu64"", ns->rx_jabber);
4565 	PMD_DRV_LOG(DEBUG, "tx_size_64:		%"PRIu64"", ns->tx_size_64);
4566 	PMD_DRV_LOG(DEBUG, "tx_size_127:	%"PRIu64"", ns->tx_size_127);
4567 	PMD_DRV_LOG(DEBUG, "tx_size_255:	%"PRIu64"", ns->tx_size_255);
4568 	PMD_DRV_LOG(DEBUG, "tx_size_511:	%"PRIu64"", ns->tx_size_511);
4569 	PMD_DRV_LOG(DEBUG, "tx_size_1023:	%"PRIu64"", ns->tx_size_1023);
4570 	PMD_DRV_LOG(DEBUG, "tx_size_1522:	%"PRIu64"", ns->tx_size_1522);
4571 	PMD_DRV_LOG(DEBUG, "tx_size_big:	%"PRIu64"", ns->tx_size_big);
4572 	PMD_DRV_LOG(DEBUG, "rx_len_errors:	%"PRIu64"", ns->rx_len_errors);
4573 	PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
4574 	return 0;
4575 }
4576 
4577 /* Reset the statistics */
4578 static int
4579 ice_stats_reset(struct rte_eth_dev *dev)
4580 {
4581 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4582 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4583 
4584 	/* Mark PF and VSI stats to update the offset, aka "reset" */
4585 	pf->offset_loaded = false;
4586 	if (pf->main_vsi)
4587 		pf->main_vsi->offset_loaded = false;
4588 
4589 	/* read the stats, reading current register values into offset */
4590 	ice_read_stats_registers(pf, hw);
4591 
4592 	return 0;
4593 }
4594 
4595 static uint32_t
4596 ice_xstats_calc_num(void)
4597 {
4598 	uint32_t num;
4599 
4600 	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
4601 
4602 	return num;
4603 }
4604 
4605 static int
4606 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
4607 	       unsigned int n)
4608 {
4609 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4610 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4611 	unsigned int i;
4612 	unsigned int count;
4613 	struct ice_hw_port_stats *hw_stats = &pf->stats;
4614 
4615 	count = ice_xstats_calc_num();
4616 	if (n < count)
4617 		return count;
4618 
4619 	ice_read_stats_registers(pf, hw);
4620 
4621 	if (!xstats)
4622 		return 0;
4623 
4624 	count = 0;
4625 
4626 	/* Get stats from ice_eth_stats struct */
4627 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4628 		xstats[count].value =
4629 			*(uint64_t *)((char *)&hw_stats->eth +
4630 				      ice_stats_strings[i].offset);
4631 		xstats[count].id = count;
4632 		count++;
4633 	}
4634 
4635 	/* Get individiual stats from ice_hw_port struct */
4636 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4637 		xstats[count].value =
4638 			*(uint64_t *)((char *)hw_stats +
4639 				      ice_hw_port_strings[i].offset);
4640 		xstats[count].id = count;
4641 		count++;
4642 	}
4643 
4644 	return count;
4645 }
4646 
4647 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
4648 				struct rte_eth_xstat_name *xstats_names,
4649 				__rte_unused unsigned int limit)
4650 {
4651 	unsigned int count = 0;
4652 	unsigned int i;
4653 
4654 	if (!xstats_names)
4655 		return ice_xstats_calc_num();
4656 
4657 	/* Note: limit checked in rte_eth_xstats_names() */
4658 
4659 	/* Get stats from ice_eth_stats struct */
4660 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4661 		strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
4662 			sizeof(xstats_names[count].name));
4663 		count++;
4664 	}
4665 
4666 	/* Get individiual stats from ice_hw_port struct */
4667 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4668 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
4669 			sizeof(xstats_names[count].name));
4670 		count++;
4671 	}
4672 
4673 	return count;
4674 }
4675 
4676 static int
4677 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
4678 		     enum rte_filter_type filter_type,
4679 		     enum rte_filter_op filter_op,
4680 		     void *arg)
4681 {
4682 	int ret = 0;
4683 
4684 	if (!dev)
4685 		return -EINVAL;
4686 
4687 	switch (filter_type) {
4688 	case RTE_ETH_FILTER_GENERIC:
4689 		if (filter_op != RTE_ETH_FILTER_GET)
4690 			return -EINVAL;
4691 		*(const void **)arg = &ice_flow_ops;
4692 		break;
4693 	default:
4694 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4695 					filter_type);
4696 		ret = -EINVAL;
4697 		break;
4698 	}
4699 
4700 	return ret;
4701 }
4702 
4703 /* Add UDP tunneling port */
4704 static int
4705 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4706 			     struct rte_eth_udp_tunnel *udp_tunnel)
4707 {
4708 	int ret = 0;
4709 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4710 
4711 	if (udp_tunnel == NULL)
4712 		return -EINVAL;
4713 
4714 	switch (udp_tunnel->prot_type) {
4715 	case RTE_TUNNEL_TYPE_VXLAN:
4716 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
4717 		break;
4718 	default:
4719 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4720 		ret = -EINVAL;
4721 		break;
4722 	}
4723 
4724 	return ret;
4725 }
4726 
4727 /* Delete UDP tunneling port */
4728 static int
4729 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
4730 			     struct rte_eth_udp_tunnel *udp_tunnel)
4731 {
4732 	int ret = 0;
4733 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4734 
4735 	if (udp_tunnel == NULL)
4736 		return -EINVAL;
4737 
4738 	switch (udp_tunnel->prot_type) {
4739 	case RTE_TUNNEL_TYPE_VXLAN:
4740 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
4741 		break;
4742 	default:
4743 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4744 		ret = -EINVAL;
4745 		break;
4746 	}
4747 
4748 	return ret;
4749 }
4750 
4751 static int
4752 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4753 	      struct rte_pci_device *pci_dev)
4754 {
4755 	return rte_eth_dev_pci_generic_probe(pci_dev,
4756 					     sizeof(struct ice_adapter),
4757 					     ice_dev_init);
4758 }
4759 
4760 static int
4761 ice_pci_remove(struct rte_pci_device *pci_dev)
4762 {
4763 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
4764 }
4765 
4766 static struct rte_pci_driver rte_ice_pmd = {
4767 	.id_table = pci_id_ice_map,
4768 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
4769 	.probe = ice_pci_probe,
4770 	.remove = ice_pci_remove,
4771 };
4772 
4773 /**
4774  * Driver initialization routine.
4775  * Invoked once at EAL init time.
4776  * Register itself as the [Poll Mode] Driver of PCI devices.
4777  */
4778 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
4779 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
4780 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
4781 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
4782 			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
4783 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
4784 			      ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
4785 			      ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
4786 
4787 RTE_LOG_REGISTER(ice_logtype_init, pmd.net.ice.init, NOTICE);
4788 RTE_LOG_REGISTER(ice_logtype_driver, pmd.net.ice.driver, NOTICE);
4789 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
4790 RTE_LOG_REGISTER(ice_logtype_rx, pmd.net.ice.rx, DEBUG);
4791 #endif
4792 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
4793 RTE_LOG_REGISTER(ice_logtype_tx, pmd.net.ice.tx, DEBUG);
4794 #endif
4795 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
4796 RTE_LOG_REGISTER(ice_logtype_tx_free, pmd.net.ice.tx_free, DEBUG);
4797 #endif
4798