xref: /f-stack/dpdk/drivers/net/ice/ice_ethdev.c (revision ebf5cedb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "base/ice_common.h"
17 
18 #include "rte_pmd_ice.h"
19 #include "ice_ethdev.h"
20 #include "ice_rxtx.h"
21 #include "ice_generic_flow.h"
22 
23 /* devargs */
24 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
25 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
26 #define ICE_FLOW_MARK_SUPPORT_ARG	"flow-mark-support"
27 #define ICE_PROTO_XTR_ARG         "proto_xtr"
28 
29 static const char * const ice_valid_args[] = {
30 	ICE_SAFE_MODE_SUPPORT_ARG,
31 	ICE_PIPELINE_MODE_SUPPORT_ARG,
32 	ICE_FLOW_MARK_SUPPORT_ARG,
33 	ICE_PROTO_XTR_ARG,
34 	NULL
35 };
36 
37 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
38 	.name = "ice_dynfield_proto_xtr_metadata",
39 	.size = sizeof(uint32_t),
40 	.align = __alignof__(uint32_t),
41 	.flags = 0,
42 };
43 
44 struct proto_xtr_ol_flag {
45 	const struct rte_mbuf_dynflag param;
46 	uint64_t *ol_flag;
47 	bool required;
48 };
49 
50 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
51 	[PROTO_XTR_VLAN] = {
52 		.param = { .name = "ice_dynflag_proto_xtr_vlan" },
53 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
54 	[PROTO_XTR_IPV4] = {
55 		.param = { .name = "ice_dynflag_proto_xtr_ipv4" },
56 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
57 	[PROTO_XTR_IPV6] = {
58 		.param = { .name = "ice_dynflag_proto_xtr_ipv6" },
59 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
60 	[PROTO_XTR_IPV6_FLOW] = {
61 		.param = { .name = "ice_dynflag_proto_xtr_ipv6_flow" },
62 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
63 	[PROTO_XTR_TCP] = {
64 		.param = { .name = "ice_dynflag_proto_xtr_tcp" },
65 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
66 };
67 
68 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
69 
70 /* DDP package search path */
71 #define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg"
72 #define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg"
73 #define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/"
74 #define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
75 
76 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
77 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
78 #define ICE_MAX_PKG_FILENAME_SIZE   256
79 #define ICE_MAX_RES_DESC_NUM        1024
80 
81 int ice_logtype_init;
82 int ice_logtype_driver;
83 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
84 int ice_logtype_rx;
85 #endif
86 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
87 int ice_logtype_tx;
88 #endif
89 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
90 int ice_logtype_tx_free;
91 #endif
92 
93 static int ice_dev_configure(struct rte_eth_dev *dev);
94 static int ice_dev_start(struct rte_eth_dev *dev);
95 static void ice_dev_stop(struct rte_eth_dev *dev);
96 static void ice_dev_close(struct rte_eth_dev *dev);
97 static int ice_dev_reset(struct rte_eth_dev *dev);
98 static int ice_dev_info_get(struct rte_eth_dev *dev,
99 			    struct rte_eth_dev_info *dev_info);
100 static int ice_link_update(struct rte_eth_dev *dev,
101 			   int wait_to_complete);
102 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
103 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
104 
105 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
106 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
107 static int ice_rss_reta_update(struct rte_eth_dev *dev,
108 			       struct rte_eth_rss_reta_entry64 *reta_conf,
109 			       uint16_t reta_size);
110 static int ice_rss_reta_query(struct rte_eth_dev *dev,
111 			      struct rte_eth_rss_reta_entry64 *reta_conf,
112 			      uint16_t reta_size);
113 static int ice_rss_hash_update(struct rte_eth_dev *dev,
114 			       struct rte_eth_rss_conf *rss_conf);
115 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				 struct rte_eth_rss_conf *rss_conf);
117 static int ice_promisc_enable(struct rte_eth_dev *dev);
118 static int ice_promisc_disable(struct rte_eth_dev *dev);
119 static int ice_allmulti_enable(struct rte_eth_dev *dev);
120 static int ice_allmulti_disable(struct rte_eth_dev *dev);
121 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
122 			       uint16_t vlan_id,
123 			       int on);
124 static int ice_macaddr_set(struct rte_eth_dev *dev,
125 			   struct rte_ether_addr *mac_addr);
126 static int ice_macaddr_add(struct rte_eth_dev *dev,
127 			   struct rte_ether_addr *mac_addr,
128 			   __rte_unused uint32_t index,
129 			   uint32_t pool);
130 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
131 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
132 				    uint16_t queue_id);
133 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
134 				     uint16_t queue_id);
135 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
136 			      size_t fw_size);
137 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
138 			     uint16_t pvid, int on);
139 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
140 static int ice_get_eeprom(struct rte_eth_dev *dev,
141 			  struct rte_dev_eeprom_info *eeprom);
142 static int ice_stats_get(struct rte_eth_dev *dev,
143 			 struct rte_eth_stats *stats);
144 static int ice_stats_reset(struct rte_eth_dev *dev);
145 static int ice_xstats_get(struct rte_eth_dev *dev,
146 			  struct rte_eth_xstat *xstats, unsigned int n);
147 static int ice_xstats_get_names(struct rte_eth_dev *dev,
148 				struct rte_eth_xstat_name *xstats_names,
149 				unsigned int limit);
150 static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
151 			enum rte_filter_type filter_type,
152 			enum rte_filter_op filter_op,
153 			void *arg);
154 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
155 			struct rte_eth_udp_tunnel *udp_tunnel);
156 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
157 			struct rte_eth_udp_tunnel *udp_tunnel);
158 
159 static const struct rte_pci_id pci_id_ice_map[] = {
160 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
161 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
162 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
163 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
164 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
165 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
166 	{ .vendor_id = 0, /* sentinel */ },
167 };
168 
169 static const struct eth_dev_ops ice_eth_dev_ops = {
170 	.dev_configure                = ice_dev_configure,
171 	.dev_start                    = ice_dev_start,
172 	.dev_stop                     = ice_dev_stop,
173 	.dev_close                    = ice_dev_close,
174 	.dev_reset                    = ice_dev_reset,
175 	.dev_set_link_up              = ice_dev_set_link_up,
176 	.dev_set_link_down            = ice_dev_set_link_down,
177 	.rx_queue_start               = ice_rx_queue_start,
178 	.rx_queue_stop                = ice_rx_queue_stop,
179 	.tx_queue_start               = ice_tx_queue_start,
180 	.tx_queue_stop                = ice_tx_queue_stop,
181 	.rx_queue_setup               = ice_rx_queue_setup,
182 	.rx_queue_release             = ice_rx_queue_release,
183 	.tx_queue_setup               = ice_tx_queue_setup,
184 	.tx_queue_release             = ice_tx_queue_release,
185 	.dev_infos_get                = ice_dev_info_get,
186 	.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
187 	.link_update                  = ice_link_update,
188 	.mtu_set                      = ice_mtu_set,
189 	.mac_addr_set                 = ice_macaddr_set,
190 	.mac_addr_add                 = ice_macaddr_add,
191 	.mac_addr_remove              = ice_macaddr_remove,
192 	.vlan_filter_set              = ice_vlan_filter_set,
193 	.vlan_offload_set             = ice_vlan_offload_set,
194 	.reta_update                  = ice_rss_reta_update,
195 	.reta_query                   = ice_rss_reta_query,
196 	.rss_hash_update              = ice_rss_hash_update,
197 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
198 	.promiscuous_enable           = ice_promisc_enable,
199 	.promiscuous_disable          = ice_promisc_disable,
200 	.allmulticast_enable          = ice_allmulti_enable,
201 	.allmulticast_disable         = ice_allmulti_disable,
202 	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
203 	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
204 	.fw_version_get               = ice_fw_version_get,
205 	.vlan_pvid_set                = ice_vlan_pvid_set,
206 	.rxq_info_get                 = ice_rxq_info_get,
207 	.txq_info_get                 = ice_txq_info_get,
208 	.rx_burst_mode_get            = ice_rx_burst_mode_get,
209 	.tx_burst_mode_get            = ice_tx_burst_mode_get,
210 	.get_eeprom_length            = ice_get_eeprom_length,
211 	.get_eeprom                   = ice_get_eeprom,
212 	.rx_queue_count               = ice_rx_queue_count,
213 	.rx_descriptor_status         = ice_rx_descriptor_status,
214 	.tx_descriptor_status         = ice_tx_descriptor_status,
215 	.stats_get                    = ice_stats_get,
216 	.stats_reset                  = ice_stats_reset,
217 	.xstats_get                   = ice_xstats_get,
218 	.xstats_get_names             = ice_xstats_get_names,
219 	.xstats_reset                 = ice_stats_reset,
220 	.filter_ctrl                  = ice_dev_filter_ctrl,
221 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
222 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
223 };
224 
225 /* store statistics names and its offset in stats structure */
226 struct ice_xstats_name_off {
227 	char name[RTE_ETH_XSTATS_NAME_SIZE];
228 	unsigned int offset;
229 };
230 
231 static const struct ice_xstats_name_off ice_stats_strings[] = {
232 	{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
233 	{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
234 	{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
235 	{"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
236 	{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
237 		rx_unknown_protocol)},
238 	{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
239 	{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
240 	{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
241 	{"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
242 };
243 
244 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
245 		sizeof(ice_stats_strings[0]))
246 
247 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
248 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
249 		tx_dropped_link_down)},
250 	{"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
251 	{"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
252 		illegal_bytes)},
253 	{"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
254 	{"mac_local_errors", offsetof(struct ice_hw_port_stats,
255 		mac_local_faults)},
256 	{"mac_remote_errors", offsetof(struct ice_hw_port_stats,
257 		mac_remote_faults)},
258 	{"rx_len_errors", offsetof(struct ice_hw_port_stats,
259 		rx_len_errors)},
260 	{"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
261 	{"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
262 	{"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
263 	{"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
264 	{"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
265 	{"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
266 		rx_size_127)},
267 	{"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
268 		rx_size_255)},
269 	{"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
270 		rx_size_511)},
271 	{"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
272 		rx_size_1023)},
273 	{"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
274 		rx_size_1522)},
275 	{"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
276 		rx_size_big)},
277 	{"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
278 		rx_undersize)},
279 	{"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
280 		rx_oversize)},
281 	{"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
282 		mac_short_pkt_dropped)},
283 	{"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
284 		rx_fragments)},
285 	{"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
286 	{"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
287 	{"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
288 		tx_size_127)},
289 	{"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
290 		tx_size_255)},
291 	{"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
292 		tx_size_511)},
293 	{"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
294 		tx_size_1023)},
295 	{"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
296 		tx_size_1522)},
297 	{"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
298 		tx_size_big)},
299 };
300 
301 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
302 		sizeof(ice_hw_port_strings[0]))
303 
304 static void
305 ice_init_controlq_parameter(struct ice_hw *hw)
306 {
307 	/* fields for adminq */
308 	hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
309 	hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
310 	hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
311 	hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
312 
313 	/* fields for mailboxq, DPDK used as PF host */
314 	hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
315 	hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
316 	hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
317 	hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
318 }
319 
320 static int
321 lookup_proto_xtr_type(const char *xtr_name)
322 {
323 	static struct {
324 		const char *name;
325 		enum proto_xtr_type type;
326 	} xtr_type_map[] = {
327 		{ "vlan",      PROTO_XTR_VLAN      },
328 		{ "ipv4",      PROTO_XTR_IPV4      },
329 		{ "ipv6",      PROTO_XTR_IPV6      },
330 		{ "ipv6_flow", PROTO_XTR_IPV6_FLOW },
331 		{ "tcp",       PROTO_XTR_TCP       },
332 	};
333 	uint32_t i;
334 
335 	for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
336 		if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
337 			return xtr_type_map[i].type;
338 	}
339 
340 	return -1;
341 }
342 
343 /*
344  * Parse elem, the elem could be single number/range or '(' ')' group
345  * 1) A single number elem, it's just a simple digit. e.g. 9
346  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
347  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
348  *    Within group elem, '-' used for a range separator;
349  *                       ',' used for a single number.
350  */
351 static int
352 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
353 {
354 	const char *str = input;
355 	char *end = NULL;
356 	uint32_t min, max;
357 	uint32_t idx;
358 
359 	while (isblank(*str))
360 		str++;
361 
362 	if (!isdigit(*str) && *str != '(')
363 		return -1;
364 
365 	/* process single number or single range of number */
366 	if (*str != '(') {
367 		errno = 0;
368 		idx = strtoul(str, &end, 10);
369 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
370 			return -1;
371 
372 		while (isblank(*end))
373 			end++;
374 
375 		min = idx;
376 		max = idx;
377 
378 		/* process single <number>-<number> */
379 		if (*end == '-') {
380 			end++;
381 			while (isblank(*end))
382 				end++;
383 			if (!isdigit(*end))
384 				return -1;
385 
386 			errno = 0;
387 			idx = strtoul(end, &end, 10);
388 			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
389 				return -1;
390 
391 			max = idx;
392 			while (isblank(*end))
393 				end++;
394 		}
395 
396 		if (*end != ':')
397 			return -1;
398 
399 		for (idx = RTE_MIN(min, max);
400 		     idx <= RTE_MAX(min, max); idx++)
401 			devargs->proto_xtr[idx] = xtr_type;
402 
403 		return 0;
404 	}
405 
406 	/* process set within bracket */
407 	str++;
408 	while (isblank(*str))
409 		str++;
410 	if (*str == '\0')
411 		return -1;
412 
413 	min = ICE_MAX_QUEUE_NUM;
414 	do {
415 		/* go ahead to the first digit */
416 		while (isblank(*str))
417 			str++;
418 		if (!isdigit(*str))
419 			return -1;
420 
421 		/* get the digit value */
422 		errno = 0;
423 		idx = strtoul(str, &end, 10);
424 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
425 			return -1;
426 
427 		/* go ahead to separator '-',',' and ')' */
428 		while (isblank(*end))
429 			end++;
430 		if (*end == '-') {
431 			if (min == ICE_MAX_QUEUE_NUM)
432 				min = idx;
433 			else /* avoid continuous '-' */
434 				return -1;
435 		} else if (*end == ',' || *end == ')') {
436 			max = idx;
437 			if (min == ICE_MAX_QUEUE_NUM)
438 				min = idx;
439 
440 			for (idx = RTE_MIN(min, max);
441 			     idx <= RTE_MAX(min, max); idx++)
442 				devargs->proto_xtr[idx] = xtr_type;
443 
444 			min = ICE_MAX_QUEUE_NUM;
445 		} else {
446 			return -1;
447 		}
448 
449 		str = end + 1;
450 	} while (*end != ')' && *end != '\0');
451 
452 	return 0;
453 }
454 
455 static int
456 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
457 {
458 	const char *queue_start;
459 	uint32_t idx;
460 	int xtr_type;
461 	char xtr_name[32];
462 
463 	while (isblank(*queues))
464 		queues++;
465 
466 	if (*queues != '[') {
467 		xtr_type = lookup_proto_xtr_type(queues);
468 		if (xtr_type < 0)
469 			return -1;
470 
471 		devargs->proto_xtr_dflt = xtr_type;
472 
473 		return 0;
474 	}
475 
476 	queues++;
477 	do {
478 		while (isblank(*queues))
479 			queues++;
480 		if (*queues == '\0')
481 			return -1;
482 
483 		queue_start = queues;
484 
485 		/* go across a complete bracket */
486 		if (*queue_start == '(') {
487 			queues += strcspn(queues, ")");
488 			if (*queues != ')')
489 				return -1;
490 		}
491 
492 		/* scan the separator ':' */
493 		queues += strcspn(queues, ":");
494 		if (*queues++ != ':')
495 			return -1;
496 		while (isblank(*queues))
497 			queues++;
498 
499 		for (idx = 0; ; idx++) {
500 			if (isblank(queues[idx]) ||
501 			    queues[idx] == ',' ||
502 			    queues[idx] == ']' ||
503 			    queues[idx] == '\0')
504 				break;
505 
506 			if (idx > sizeof(xtr_name) - 2)
507 				return -1;
508 
509 			xtr_name[idx] = queues[idx];
510 		}
511 		xtr_name[idx] = '\0';
512 		xtr_type = lookup_proto_xtr_type(xtr_name);
513 		if (xtr_type < 0)
514 			return -1;
515 
516 		queues += idx;
517 
518 		while (isblank(*queues) || *queues == ',' || *queues == ']')
519 			queues++;
520 
521 		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
522 			return -1;
523 	} while (*queues != '\0');
524 
525 	return 0;
526 }
527 
528 static int
529 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
530 		     void *extra_args)
531 {
532 	struct ice_devargs *devargs = extra_args;
533 
534 	if (value == NULL || extra_args == NULL)
535 		return -EINVAL;
536 
537 	if (parse_queue_proto_xtr(value, devargs) < 0) {
538 		PMD_DRV_LOG(ERR,
539 			    "The protocol extraction parameter is wrong : '%s'",
540 			    value);
541 		return -1;
542 	}
543 
544 	return 0;
545 }
546 
547 static bool
548 ice_proto_xtr_support(struct ice_hw *hw)
549 {
550 #define FLX_REG(val, fld, idx) \
551 	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
552 	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
553 	static struct {
554 		uint32_t rxdid;
555 		uint16_t protid_0;
556 		uint16_t protid_1;
557 	} xtr_sets[] = {
558 		{ ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O },
559 		{ ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S,
560 		  ICE_PROT_IPV4_OF_OR_S },
561 		{ ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S,
562 		  ICE_PROT_IPV6_OF_OR_S },
563 		{ ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S,
564 		  ICE_PROT_IPV6_OF_OR_S },
565 		{ ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
566 	};
567 	uint32_t i;
568 
569 	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
570 		uint32_t rxdid = xtr_sets[i].rxdid;
571 		uint32_t v;
572 
573 		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
574 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
575 
576 			if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 ||
577 			    FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT)
578 				return false;
579 		}
580 
581 		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
582 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
583 
584 			if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 ||
585 			    FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT)
586 				return false;
587 		}
588 	}
589 
590 	return true;
591 }
592 
593 static int
594 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
595 		  uint32_t num)
596 {
597 	struct pool_entry *entry;
598 
599 	if (!pool || !num)
600 		return -EINVAL;
601 
602 	entry = rte_zmalloc(NULL, sizeof(*entry), 0);
603 	if (!entry) {
604 		PMD_INIT_LOG(ERR,
605 			     "Failed to allocate memory for resource pool");
606 		return -ENOMEM;
607 	}
608 
609 	/* queue heap initialize */
610 	pool->num_free = num;
611 	pool->num_alloc = 0;
612 	pool->base = base;
613 	LIST_INIT(&pool->alloc_list);
614 	LIST_INIT(&pool->free_list);
615 
616 	/* Initialize element  */
617 	entry->base = 0;
618 	entry->len = num;
619 
620 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
621 	return 0;
622 }
623 
624 static int
625 ice_res_pool_alloc(struct ice_res_pool_info *pool,
626 		   uint16_t num)
627 {
628 	struct pool_entry *entry, *valid_entry;
629 
630 	if (!pool || !num) {
631 		PMD_INIT_LOG(ERR, "Invalid parameter");
632 		return -EINVAL;
633 	}
634 
635 	if (pool->num_free < num) {
636 		PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
637 			     num, pool->num_free);
638 		return -ENOMEM;
639 	}
640 
641 	valid_entry = NULL;
642 	/* Lookup  in free list and find most fit one */
643 	LIST_FOREACH(entry, &pool->free_list, next) {
644 		if (entry->len >= num) {
645 			/* Find best one */
646 			if (entry->len == num) {
647 				valid_entry = entry;
648 				break;
649 			}
650 			if (!valid_entry ||
651 			    valid_entry->len > entry->len)
652 				valid_entry = entry;
653 		}
654 	}
655 
656 	/* Not find one to satisfy the request, return */
657 	if (!valid_entry) {
658 		PMD_INIT_LOG(ERR, "No valid entry found");
659 		return -ENOMEM;
660 	}
661 	/**
662 	 * The entry have equal queue number as requested,
663 	 * remove it from alloc_list.
664 	 */
665 	if (valid_entry->len == num) {
666 		LIST_REMOVE(valid_entry, next);
667 	} else {
668 		/**
669 		 * The entry have more numbers than requested,
670 		 * create a new entry for alloc_list and minus its
671 		 * queue base and number in free_list.
672 		 */
673 		entry = rte_zmalloc(NULL, sizeof(*entry), 0);
674 		if (!entry) {
675 			PMD_INIT_LOG(ERR,
676 				     "Failed to allocate memory for "
677 				     "resource pool");
678 			return -ENOMEM;
679 		}
680 		entry->base = valid_entry->base;
681 		entry->len = num;
682 		valid_entry->base += num;
683 		valid_entry->len -= num;
684 		valid_entry = entry;
685 	}
686 
687 	/* Insert it into alloc list, not sorted */
688 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
689 
690 	pool->num_free -= valid_entry->len;
691 	pool->num_alloc += valid_entry->len;
692 
693 	return valid_entry->base + pool->base;
694 }
695 
696 static void
697 ice_res_pool_destroy(struct ice_res_pool_info *pool)
698 {
699 	struct pool_entry *entry, *next_entry;
700 
701 	if (!pool)
702 		return;
703 
704 	for (entry = LIST_FIRST(&pool->alloc_list);
705 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
706 	     entry = next_entry) {
707 		LIST_REMOVE(entry, next);
708 		rte_free(entry);
709 	}
710 
711 	for (entry = LIST_FIRST(&pool->free_list);
712 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
713 	     entry = next_entry) {
714 		LIST_REMOVE(entry, next);
715 		rte_free(entry);
716 	}
717 
718 	pool->num_free = 0;
719 	pool->num_alloc = 0;
720 	pool->base = 0;
721 	LIST_INIT(&pool->alloc_list);
722 	LIST_INIT(&pool->free_list);
723 }
724 
725 static void
726 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
727 {
728 	/* Set VSI LUT selection */
729 	info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
730 			  ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
731 	/* Set Hash scheme */
732 	info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
733 			   ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
734 	/* enable TC */
735 	info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
736 }
737 
738 static enum ice_status
739 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
740 				struct ice_aqc_vsi_props *info,
741 				uint8_t enabled_tcmap)
742 {
743 	uint16_t bsf, qp_idx;
744 
745 	/* default tc 0 now. Multi-TC supporting need to be done later.
746 	 * Configure TC and queue mapping parameters, for enabled TC,
747 	 * allocate qpnum_per_tc queues to this traffic.
748 	 */
749 	if (enabled_tcmap != 0x01) {
750 		PMD_INIT_LOG(ERR, "only TC0 is supported");
751 		return -ENOTSUP;
752 	}
753 
754 	vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
755 	bsf = rte_bsf32(vsi->nb_qps);
756 	/* Adjust the queue number to actual queues that can be applied */
757 	vsi->nb_qps = 0x1 << bsf;
758 
759 	qp_idx = 0;
760 	/* Set tc and queue mapping with VSI */
761 	info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
762 						ICE_AQ_VSI_TC_Q_OFFSET_S) |
763 					       (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
764 
765 	/* Associate queue number with VSI */
766 	info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
767 	info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
768 	info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
769 	info->valid_sections |=
770 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
771 	/* Set the info.ingress_table and info.egress_table
772 	 * for UP translate table. Now just set it to 1:1 map by default
773 	 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
774 	 */
775 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
776 	info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
777 	info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
778 	info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
779 	return 0;
780 }
781 
782 static int
783 ice_init_mac_address(struct rte_eth_dev *dev)
784 {
785 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 
787 	if (!rte_is_unicast_ether_addr
788 		((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
789 		PMD_INIT_LOG(ERR, "Invalid MAC address");
790 		return -EINVAL;
791 	}
792 
793 	rte_ether_addr_copy(
794 		(struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
795 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
796 
797 	dev->data->mac_addrs =
798 		rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
799 	if (!dev->data->mac_addrs) {
800 		PMD_INIT_LOG(ERR,
801 			     "Failed to allocate memory to store mac address");
802 		return -ENOMEM;
803 	}
804 	/* store it to dev data */
805 	rte_ether_addr_copy(
806 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
807 		&dev->data->mac_addrs[0]);
808 	return 0;
809 }
810 
811 /* Find out specific MAC filter */
812 static struct ice_mac_filter *
813 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
814 {
815 	struct ice_mac_filter *f;
816 
817 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
818 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
819 			return f;
820 	}
821 
822 	return NULL;
823 }
824 
825 static int
826 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
827 {
828 	struct ice_fltr_list_entry *m_list_itr = NULL;
829 	struct ice_mac_filter *f;
830 	struct LIST_HEAD_TYPE list_head;
831 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
832 	int ret = 0;
833 
834 	/* If it's added and configured, return */
835 	f = ice_find_mac_filter(vsi, mac_addr);
836 	if (f) {
837 		PMD_DRV_LOG(INFO, "This MAC filter already exists.");
838 		return 0;
839 	}
840 
841 	INIT_LIST_HEAD(&list_head);
842 
843 	m_list_itr = (struct ice_fltr_list_entry *)
844 		ice_malloc(hw, sizeof(*m_list_itr));
845 	if (!m_list_itr) {
846 		ret = -ENOMEM;
847 		goto DONE;
848 	}
849 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
850 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
851 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
852 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
853 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
854 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
855 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
856 
857 	LIST_ADD(&m_list_itr->list_entry, &list_head);
858 
859 	/* Add the mac */
860 	ret = ice_add_mac(hw, &list_head);
861 	if (ret != ICE_SUCCESS) {
862 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
863 		ret = -EINVAL;
864 		goto DONE;
865 	}
866 	/* Add the mac addr into mac list */
867 	f = rte_zmalloc(NULL, sizeof(*f), 0);
868 	if (!f) {
869 		PMD_DRV_LOG(ERR, "failed to allocate memory");
870 		ret = -ENOMEM;
871 		goto DONE;
872 	}
873 	rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
874 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
875 	vsi->mac_num++;
876 
877 	ret = 0;
878 
879 DONE:
880 	rte_free(m_list_itr);
881 	return ret;
882 }
883 
884 static int
885 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
886 {
887 	struct ice_fltr_list_entry *m_list_itr = NULL;
888 	struct ice_mac_filter *f;
889 	struct LIST_HEAD_TYPE list_head;
890 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
891 	int ret = 0;
892 
893 	/* Can't find it, return an error */
894 	f = ice_find_mac_filter(vsi, mac_addr);
895 	if (!f)
896 		return -EINVAL;
897 
898 	INIT_LIST_HEAD(&list_head);
899 
900 	m_list_itr = (struct ice_fltr_list_entry *)
901 		ice_malloc(hw, sizeof(*m_list_itr));
902 	if (!m_list_itr) {
903 		ret = -ENOMEM;
904 		goto DONE;
905 	}
906 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
907 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
908 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
909 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
910 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
911 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
912 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
913 
914 	LIST_ADD(&m_list_itr->list_entry, &list_head);
915 
916 	/* remove the mac filter */
917 	ret = ice_remove_mac(hw, &list_head);
918 	if (ret != ICE_SUCCESS) {
919 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
920 		ret = -EINVAL;
921 		goto DONE;
922 	}
923 
924 	/* Remove the mac addr from mac list */
925 	TAILQ_REMOVE(&vsi->mac_list, f, next);
926 	rte_free(f);
927 	vsi->mac_num--;
928 
929 	ret = 0;
930 DONE:
931 	rte_free(m_list_itr);
932 	return ret;
933 }
934 
935 /* Find out specific VLAN filter */
936 static struct ice_vlan_filter *
937 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
938 {
939 	struct ice_vlan_filter *f;
940 
941 	TAILQ_FOREACH(f, &vsi->vlan_list, next) {
942 		if (vlan_id == f->vlan_info.vlan_id)
943 			return f;
944 	}
945 
946 	return NULL;
947 }
948 
949 static int
950 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
951 {
952 	struct ice_fltr_list_entry *v_list_itr = NULL;
953 	struct ice_vlan_filter *f;
954 	struct LIST_HEAD_TYPE list_head;
955 	struct ice_hw *hw;
956 	int ret = 0;
957 
958 	if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
959 		return -EINVAL;
960 
961 	hw = ICE_VSI_TO_HW(vsi);
962 
963 	/* If it's added and configured, return. */
964 	f = ice_find_vlan_filter(vsi, vlan_id);
965 	if (f) {
966 		PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
967 		return 0;
968 	}
969 
970 	if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
971 		return 0;
972 
973 	INIT_LIST_HEAD(&list_head);
974 
975 	v_list_itr = (struct ice_fltr_list_entry *)
976 		      ice_malloc(hw, sizeof(*v_list_itr));
977 	if (!v_list_itr) {
978 		ret = -ENOMEM;
979 		goto DONE;
980 	}
981 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
982 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
983 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
984 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
985 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
986 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
987 
988 	LIST_ADD(&v_list_itr->list_entry, &list_head);
989 
990 	/* Add the vlan */
991 	ret = ice_add_vlan(hw, &list_head);
992 	if (ret != ICE_SUCCESS) {
993 		PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
994 		ret = -EINVAL;
995 		goto DONE;
996 	}
997 
998 	/* Add vlan into vlan list */
999 	f = rte_zmalloc(NULL, sizeof(*f), 0);
1000 	if (!f) {
1001 		PMD_DRV_LOG(ERR, "failed to allocate memory");
1002 		ret = -ENOMEM;
1003 		goto DONE;
1004 	}
1005 	f->vlan_info.vlan_id = vlan_id;
1006 	TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1007 	vsi->vlan_num++;
1008 
1009 	ret = 0;
1010 
1011 DONE:
1012 	rte_free(v_list_itr);
1013 	return ret;
1014 }
1015 
1016 static int
1017 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
1018 {
1019 	struct ice_fltr_list_entry *v_list_itr = NULL;
1020 	struct ice_vlan_filter *f;
1021 	struct LIST_HEAD_TYPE list_head;
1022 	struct ice_hw *hw;
1023 	int ret = 0;
1024 
1025 	/**
1026 	 * Vlan 0 is the generic filter for untagged packets
1027 	 * and can't be removed.
1028 	 */
1029 	if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
1030 		return -EINVAL;
1031 
1032 	hw = ICE_VSI_TO_HW(vsi);
1033 
1034 	/* Can't find it, return an error */
1035 	f = ice_find_vlan_filter(vsi, vlan_id);
1036 	if (!f)
1037 		return -EINVAL;
1038 
1039 	INIT_LIST_HEAD(&list_head);
1040 
1041 	v_list_itr = (struct ice_fltr_list_entry *)
1042 		      ice_malloc(hw, sizeof(*v_list_itr));
1043 	if (!v_list_itr) {
1044 		ret = -ENOMEM;
1045 		goto DONE;
1046 	}
1047 
1048 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
1049 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1050 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1051 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1052 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1053 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1054 
1055 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1056 
1057 	/* remove the vlan filter */
1058 	ret = ice_remove_vlan(hw, &list_head);
1059 	if (ret != ICE_SUCCESS) {
1060 		PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1061 		ret = -EINVAL;
1062 		goto DONE;
1063 	}
1064 
1065 	/* Remove the vlan id from vlan list */
1066 	TAILQ_REMOVE(&vsi->vlan_list, f, next);
1067 	rte_free(f);
1068 	vsi->vlan_num--;
1069 
1070 	ret = 0;
1071 DONE:
1072 	rte_free(v_list_itr);
1073 	return ret;
1074 }
1075 
1076 static int
1077 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1078 {
1079 	struct ice_mac_filter *m_f;
1080 	struct ice_vlan_filter *v_f;
1081 	int ret = 0;
1082 
1083 	if (!vsi || !vsi->mac_num)
1084 		return -EINVAL;
1085 
1086 	TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
1087 		ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1088 		if (ret != ICE_SUCCESS) {
1089 			ret = -EINVAL;
1090 			goto DONE;
1091 		}
1092 	}
1093 
1094 	if (vsi->vlan_num == 0)
1095 		return 0;
1096 
1097 	TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
1098 		ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
1099 		if (ret != ICE_SUCCESS) {
1100 			ret = -EINVAL;
1101 			goto DONE;
1102 		}
1103 	}
1104 
1105 DONE:
1106 	return ret;
1107 }
1108 
1109 static int
1110 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
1111 {
1112 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1113 	struct ice_vsi_ctx ctxt;
1114 	uint8_t qinq_flags;
1115 	int ret = 0;
1116 
1117 	/* Check if it has been already on or off */
1118 	if (vsi->info.valid_sections &
1119 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1120 		if (on) {
1121 			if ((vsi->info.outer_tag_flags &
1122 			     ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
1123 			    ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
1124 				return 0; /* already on */
1125 		} else {
1126 			if (!(vsi->info.outer_tag_flags &
1127 			      ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
1128 				return 0; /* already off */
1129 		}
1130 	}
1131 
1132 	if (on)
1133 		qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
1134 	else
1135 		qinq_flags = 0;
1136 	/* clear global insertion and use per packet insertion */
1137 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
1138 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
1139 	vsi->info.outer_tag_flags |= qinq_flags;
1140 	/* use default vlan type 0x8100 */
1141 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1142 	vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1143 				     ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1144 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1145 	ctxt.info.valid_sections =
1146 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1147 	ctxt.vsi_num = vsi->vsi_id;
1148 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1149 	if (ret) {
1150 		PMD_DRV_LOG(INFO,
1151 			    "Update VSI failed to %s qinq stripping",
1152 			    on ? "enable" : "disable");
1153 		return -EINVAL;
1154 	}
1155 
1156 	vsi->info.valid_sections |=
1157 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1158 
1159 	return ret;
1160 }
1161 
1162 static int
1163 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
1164 {
1165 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1166 	struct ice_vsi_ctx ctxt;
1167 	uint8_t qinq_flags;
1168 	int ret = 0;
1169 
1170 	/* Check if it has been already on or off */
1171 	if (vsi->info.valid_sections &
1172 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
1173 		if (on) {
1174 			if ((vsi->info.outer_tag_flags &
1175 			     ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1176 			    ICE_AQ_VSI_OUTER_TAG_COPY)
1177 				return 0; /* already on */
1178 		} else {
1179 			if ((vsi->info.outer_tag_flags &
1180 			     ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
1181 			    ICE_AQ_VSI_OUTER_TAG_NOTHING)
1182 				return 0; /* already off */
1183 		}
1184 	}
1185 
1186 	if (on)
1187 		qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
1188 	else
1189 		qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
1190 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
1191 	vsi->info.outer_tag_flags |= qinq_flags;
1192 	/* use default vlan type 0x8100 */
1193 	vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
1194 	vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
1195 				     ICE_AQ_VSI_OUTER_TAG_TYPE_S;
1196 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1197 	ctxt.info.valid_sections =
1198 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1199 	ctxt.vsi_num = vsi->vsi_id;
1200 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
1201 	if (ret) {
1202 		PMD_DRV_LOG(INFO,
1203 			    "Update VSI failed to %s qinq stripping",
1204 			    on ? "enable" : "disable");
1205 		return -EINVAL;
1206 	}
1207 
1208 	vsi->info.valid_sections |=
1209 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
1210 
1211 	return ret;
1212 }
1213 
1214 static int
1215 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
1216 {
1217 	int ret;
1218 
1219 	ret = ice_vsi_config_qinq_stripping(vsi, on);
1220 	if (ret)
1221 		PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
1222 
1223 	ret = ice_vsi_config_qinq_insertion(vsi, on);
1224 	if (ret)
1225 		PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
1226 
1227 	return ret;
1228 }
1229 
1230 /* Enable IRQ0 */
1231 static void
1232 ice_pf_enable_irq0(struct ice_hw *hw)
1233 {
1234 	/* reset the registers */
1235 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1236 	ICE_READ_REG(hw, PFINT_OICR);
1237 
1238 #ifdef ICE_LSE_SPT
1239 	ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1240 		      (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1241 				 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1242 
1243 	ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1244 		      (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1245 		      ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1246 		       PFINT_OICR_CTL_ITR_INDX_M) |
1247 		      PFINT_OICR_CTL_CAUSE_ENA_M);
1248 
1249 	ICE_WRITE_REG(hw, PFINT_FW_CTL,
1250 		      (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1251 		      ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1252 		       PFINT_FW_CTL_ITR_INDX_M) |
1253 		      PFINT_FW_CTL_CAUSE_ENA_M);
1254 #else
1255 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1256 #endif
1257 
1258 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1259 		      GLINT_DYN_CTL_INTENA_M |
1260 		      GLINT_DYN_CTL_CLEARPBA_M |
1261 		      GLINT_DYN_CTL_ITR_INDX_M);
1262 
1263 	ice_flush(hw);
1264 }
1265 
1266 /* Disable IRQ0 */
1267 static void
1268 ice_pf_disable_irq0(struct ice_hw *hw)
1269 {
1270 	/* Disable all interrupt types */
1271 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1272 	ice_flush(hw);
1273 }
1274 
1275 #ifdef ICE_LSE_SPT
1276 static void
1277 ice_handle_aq_msg(struct rte_eth_dev *dev)
1278 {
1279 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 	struct ice_ctl_q_info *cq = &hw->adminq;
1281 	struct ice_rq_event_info event;
1282 	uint16_t pending, opcode;
1283 	int ret;
1284 
1285 	event.buf_len = ICE_AQ_MAX_BUF_LEN;
1286 	event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1287 	if (!event.msg_buf) {
1288 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
1289 		return;
1290 	}
1291 
1292 	pending = 1;
1293 	while (pending) {
1294 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1295 
1296 		if (ret != ICE_SUCCESS) {
1297 			PMD_DRV_LOG(INFO,
1298 				    "Failed to read msg from AdminQ, "
1299 				    "adminq_err: %u",
1300 				    hw->adminq.sq_last_status);
1301 			break;
1302 		}
1303 		opcode = rte_le_to_cpu_16(event.desc.opcode);
1304 
1305 		switch (opcode) {
1306 		case ice_aqc_opc_get_link_status:
1307 			ret = ice_link_update(dev, 0);
1308 			if (!ret)
1309 				_rte_eth_dev_callback_process
1310 					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1311 			break;
1312 		default:
1313 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1314 				    opcode);
1315 			break;
1316 		}
1317 	}
1318 	rte_free(event.msg_buf);
1319 }
1320 #endif
1321 
1322 /**
1323  * Interrupt handler triggered by NIC for handling
1324  * specific interrupt.
1325  *
1326  * @param handle
1327  *  Pointer to interrupt handle.
1328  * @param param
1329  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1330  *
1331  * @return
1332  *  void
1333  */
1334 static void
1335 ice_interrupt_handler(void *param)
1336 {
1337 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1338 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1339 	uint32_t oicr;
1340 	uint32_t reg;
1341 	uint8_t pf_num;
1342 	uint8_t event;
1343 	uint16_t queue;
1344 	int ret;
1345 #ifdef ICE_LSE_SPT
1346 	uint32_t int_fw_ctl;
1347 #endif
1348 
1349 	/* Disable interrupt */
1350 	ice_pf_disable_irq0(hw);
1351 
1352 	/* read out interrupt causes */
1353 	oicr = ICE_READ_REG(hw, PFINT_OICR);
1354 #ifdef ICE_LSE_SPT
1355 	int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1356 #endif
1357 
1358 	/* No interrupt event indicated */
1359 	if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1360 		PMD_DRV_LOG(INFO, "No interrupt event");
1361 		goto done;
1362 	}
1363 
1364 #ifdef ICE_LSE_SPT
1365 	if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1366 		PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1367 		ice_handle_aq_msg(dev);
1368 	}
1369 #else
1370 	if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1371 		PMD_DRV_LOG(INFO, "OICR: link state change event");
1372 		ret = ice_link_update(dev, 0);
1373 		if (!ret)
1374 			_rte_eth_dev_callback_process
1375 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1376 	}
1377 #endif
1378 
1379 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
1380 		PMD_DRV_LOG(WARNING, "OICR: MDD event");
1381 		reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1382 		if (reg & GL_MDET_TX_PQM_VALID_M) {
1383 			pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1384 				 GL_MDET_TX_PQM_PF_NUM_S;
1385 			event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1386 				GL_MDET_TX_PQM_MAL_TYPE_S;
1387 			queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1388 				GL_MDET_TX_PQM_QNUM_S;
1389 
1390 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1391 				    "%d by PQM on TX queue %d PF# %d",
1392 				    event, queue, pf_num);
1393 		}
1394 
1395 		reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1396 		if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1397 			pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1398 				 GL_MDET_TX_TCLAN_PF_NUM_S;
1399 			event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1400 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1401 			queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1402 				GL_MDET_TX_TCLAN_QNUM_S;
1403 
1404 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1405 				    "%d by TCLAN on TX queue %d PF# %d",
1406 				    event, queue, pf_num);
1407 		}
1408 	}
1409 done:
1410 	/* Enable interrupt */
1411 	ice_pf_enable_irq0(hw);
1412 	rte_intr_ack(dev->intr_handle);
1413 }
1414 
1415 static void
1416 ice_init_proto_xtr(struct rte_eth_dev *dev)
1417 {
1418 	struct ice_adapter *ad =
1419 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1420 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1421 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1422 	const struct proto_xtr_ol_flag *ol_flag;
1423 	bool proto_xtr_enable = false;
1424 	int offset;
1425 	uint16_t i;
1426 
1427 	if (!ice_proto_xtr_support(hw)) {
1428 		PMD_DRV_LOG(NOTICE, "Protocol extraction is not supported");
1429 		return;
1430 	}
1431 
1432 	pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1433 	if (unlikely(pf->proto_xtr == NULL)) {
1434 		PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1435 		return;
1436 	}
1437 
1438 	for (i = 0; i < pf->lan_nb_qps; i++) {
1439 		pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1440 				   ad->devargs.proto_xtr[i] :
1441 				   ad->devargs.proto_xtr_dflt;
1442 
1443 		if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1444 			uint8_t type = pf->proto_xtr[i];
1445 
1446 			ice_proto_xtr_ol_flag_params[type].required = true;
1447 			proto_xtr_enable = true;
1448 		}
1449 	}
1450 
1451 	if (likely(!proto_xtr_enable))
1452 		return;
1453 
1454 	offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1455 	if (unlikely(offset == -1)) {
1456 		PMD_DRV_LOG(ERR,
1457 			    "Protocol extraction metadata is disabled in mbuf with error %d",
1458 			    -rte_errno);
1459 		return;
1460 	}
1461 
1462 	PMD_DRV_LOG(DEBUG,
1463 		    "Protocol extraction metadata offset in mbuf is : %d",
1464 		    offset);
1465 	rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1466 
1467 	for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1468 		ol_flag = &ice_proto_xtr_ol_flag_params[i];
1469 
1470 		if (!ol_flag->required)
1471 			continue;
1472 
1473 		offset = rte_mbuf_dynflag_register(&ol_flag->param);
1474 		if (unlikely(offset == -1)) {
1475 			PMD_DRV_LOG(ERR,
1476 				    "Protocol extraction offload '%s' failed to register with error %d",
1477 				    ol_flag->param.name, -rte_errno);
1478 
1479 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1480 			break;
1481 		}
1482 
1483 		PMD_DRV_LOG(DEBUG,
1484 			    "Protocol extraction offload '%s' offset in mbuf is : %d",
1485 			    ol_flag->param.name, offset);
1486 		*ol_flag->ol_flag = 1ULL << offset;
1487 	}
1488 }
1489 
1490 /*  Initialize SW parameters of PF */
1491 static int
1492 ice_pf_sw_init(struct rte_eth_dev *dev)
1493 {
1494 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1495 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1496 
1497 	pf->lan_nb_qp_max =
1498 		(uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1499 				  hw->func_caps.common_cap.num_rxq);
1500 
1501 	pf->lan_nb_qps = pf->lan_nb_qp_max;
1502 
1503 	ice_init_proto_xtr(dev);
1504 
1505 	if (hw->func_caps.fd_fltr_guar > 0 ||
1506 	    hw->func_caps.fd_fltr_best_effort > 0) {
1507 		pf->flags |= ICE_FLAG_FDIR;
1508 		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1509 		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1510 	} else {
1511 		pf->fdir_nb_qps = 0;
1512 	}
1513 	pf->fdir_qp_offset = 0;
1514 
1515 	return 0;
1516 }
1517 
1518 struct ice_vsi *
1519 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1520 {
1521 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1522 	struct ice_vsi *vsi = NULL;
1523 	struct ice_vsi_ctx vsi_ctx;
1524 	int ret;
1525 	struct rte_ether_addr broadcast = {
1526 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1527 	struct rte_ether_addr mac_addr;
1528 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1529 	uint8_t tc_bitmap = 0x1;
1530 	uint16_t cfg;
1531 
1532 	/* hw->num_lports = 1 in NIC mode */
1533 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1534 	if (!vsi)
1535 		return NULL;
1536 
1537 	vsi->idx = pf->next_vsi_idx;
1538 	pf->next_vsi_idx++;
1539 	vsi->type = type;
1540 	vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1541 	vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1542 	vsi->vlan_anti_spoof_on = 0;
1543 	vsi->vlan_filter_on = 1;
1544 	TAILQ_INIT(&vsi->mac_list);
1545 	TAILQ_INIT(&vsi->vlan_list);
1546 
1547 	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1548 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1549 			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1550 			hw->func_caps.common_cap.rss_table_size;
1551 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1552 
1553 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1554 	switch (type) {
1555 	case ICE_VSI_PF:
1556 		vsi->nb_qps = pf->lan_nb_qps;
1557 		vsi->base_queue = 1;
1558 		ice_vsi_config_default_rss(&vsi_ctx.info);
1559 		vsi_ctx.alloc_from_pool = true;
1560 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1561 		/* switch_id is queried by get_switch_config aq, which is done
1562 		 * by ice_init_hw
1563 		 */
1564 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1565 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1566 		/* Allow all untagged or tagged packets */
1567 		vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1568 		vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1569 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1570 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1571 
1572 		/* FDIR */
1573 		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1574 			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1575 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1576 		cfg = ICE_AQ_VSI_FD_ENABLE;
1577 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1578 		vsi_ctx.info.max_fd_fltr_dedicated =
1579 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1580 		vsi_ctx.info.max_fd_fltr_shared =
1581 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1582 
1583 		/* Enable VLAN/UP trip */
1584 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1585 						      &vsi_ctx.info,
1586 						      ICE_DEFAULT_TCMAP);
1587 		if (ret) {
1588 			PMD_INIT_LOG(ERR,
1589 				     "tc queue mapping with vsi failed, "
1590 				     "err = %d",
1591 				     ret);
1592 			goto fail_mem;
1593 		}
1594 
1595 		break;
1596 	case ICE_VSI_CTRL:
1597 		vsi->nb_qps = pf->fdir_nb_qps;
1598 		vsi->base_queue = ICE_FDIR_QUEUE_ID;
1599 		vsi_ctx.alloc_from_pool = true;
1600 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1601 
1602 		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1603 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1604 		cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1605 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1606 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1607 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1608 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1609 						      &vsi_ctx.info,
1610 						      ICE_DEFAULT_TCMAP);
1611 		if (ret) {
1612 			PMD_INIT_LOG(ERR,
1613 				     "tc queue mapping with vsi failed, "
1614 				     "err = %d",
1615 				     ret);
1616 			goto fail_mem;
1617 		}
1618 		break;
1619 	default:
1620 		/* for other types of VSI */
1621 		PMD_INIT_LOG(ERR, "other types of VSI not supported");
1622 		goto fail_mem;
1623 	}
1624 
1625 	/* VF has MSIX interrupt in VF range, don't allocate here */
1626 	if (type == ICE_VSI_PF) {
1627 		ret = ice_res_pool_alloc(&pf->msix_pool,
1628 					 RTE_MIN(vsi->nb_qps,
1629 						 RTE_MAX_RXTX_INTR_VEC_ID));
1630 		if (ret < 0) {
1631 			PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1632 				     vsi->vsi_id, ret);
1633 		}
1634 		vsi->msix_intr = ret;
1635 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1636 	} else if (type == ICE_VSI_CTRL) {
1637 		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1638 		if (ret < 0) {
1639 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1640 				    vsi->vsi_id, ret);
1641 		}
1642 		vsi->msix_intr = ret;
1643 		vsi->nb_msix = 1;
1644 	} else {
1645 		vsi->msix_intr = 0;
1646 		vsi->nb_msix = 0;
1647 	}
1648 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1649 	if (ret != ICE_SUCCESS) {
1650 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1651 		goto fail_mem;
1652 	}
1653 	/* store vsi information is SW structure */
1654 	vsi->vsi_id = vsi_ctx.vsi_num;
1655 	vsi->info = vsi_ctx.info;
1656 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
1657 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1658 
1659 	if (type == ICE_VSI_PF) {
1660 		/* MAC configuration */
1661 		rte_ether_addr_copy((struct rte_ether_addr *)
1662 					hw->port_info->mac.perm_addr,
1663 				    &pf->dev_addr);
1664 
1665 		rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1666 		ret = ice_add_mac_filter(vsi, &mac_addr);
1667 		if (ret != ICE_SUCCESS)
1668 			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1669 
1670 		rte_ether_addr_copy(&broadcast, &mac_addr);
1671 		ret = ice_add_mac_filter(vsi, &mac_addr);
1672 		if (ret != ICE_SUCCESS)
1673 			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1674 	}
1675 
1676 	/* At the beginning, only TC0. */
1677 	/* What we need here is the maximam number of the TX queues.
1678 	 * Currently vsi->nb_qps means it.
1679 	 * Correct it if any change.
1680 	 */
1681 	max_txqs[0] = vsi->nb_qps;
1682 	ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1683 			      tc_bitmap, max_txqs);
1684 	if (ret != ICE_SUCCESS)
1685 		PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1686 
1687 	return vsi;
1688 fail_mem:
1689 	rte_free(vsi);
1690 	pf->next_vsi_idx--;
1691 	return NULL;
1692 }
1693 
1694 static int
1695 ice_send_driver_ver(struct ice_hw *hw)
1696 {
1697 	struct ice_driver_ver dv;
1698 
1699 	/* we don't have driver version use 0 for dummy */
1700 	dv.major_ver = 0;
1701 	dv.minor_ver = 0;
1702 	dv.build_ver = 0;
1703 	dv.subbuild_ver = 0;
1704 	strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1705 
1706 	return ice_aq_send_driver_ver(hw, &dv, NULL);
1707 }
1708 
1709 static int
1710 ice_pf_setup(struct ice_pf *pf)
1711 {
1712 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1713 	struct ice_vsi *vsi;
1714 	uint16_t unused;
1715 
1716 	/* Clear all stats counters */
1717 	pf->offset_loaded = FALSE;
1718 	memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1719 	memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1720 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1721 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1722 
1723 	/* force guaranteed filter pool for PF */
1724 	ice_alloc_fd_guar_item(hw, &unused,
1725 			       hw->func_caps.fd_fltr_guar);
1726 	/* force shared filter pool for PF */
1727 	ice_alloc_fd_shrd_item(hw, &unused,
1728 			       hw->func_caps.fd_fltr_best_effort);
1729 
1730 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1731 	if (!vsi) {
1732 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1733 		return -EINVAL;
1734 	}
1735 
1736 	pf->main_vsi = vsi;
1737 
1738 	return 0;
1739 }
1740 
1741 /* PCIe configuration space setting */
1742 #define PCI_CFG_SPACE_SIZE          256
1743 #define PCI_CFG_SPACE_EXP_SIZE      4096
1744 #define PCI_EXT_CAP_ID(header)      (int)((header) & 0x0000ffff)
1745 #define PCI_EXT_CAP_NEXT(header)    (((header) >> 20) & 0xffc)
1746 #define PCI_EXT_CAP_ID_DSN          0x03
1747 
1748 static int
1749 ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
1750 {
1751 	uint32_t header;
1752 	int ttl;
1753 	int pos = PCI_CFG_SPACE_SIZE;
1754 
1755 	/* minimum 8 bytes per capability */
1756 	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
1757 
1758 	if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1759 		PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1760 		return -1;
1761 	}
1762 
1763 	/*
1764 	 * If we have no capabilities, this is indicated by cap ID,
1765 	 * cap version and next pointer all being 0.
1766 	 */
1767 	if (header == 0)
1768 		return 0;
1769 
1770 	while (ttl-- > 0) {
1771 		if (PCI_EXT_CAP_ID(header) == cap)
1772 			return pos;
1773 
1774 		pos = PCI_EXT_CAP_NEXT(header);
1775 
1776 		if (pos < PCI_CFG_SPACE_SIZE)
1777 			break;
1778 
1779 		if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
1780 			PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
1781 			return -1;
1782 		}
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 /*
1789  * Extract device serial number from PCIe Configuration Space and
1790  * determine the pkg file path according to the DSN.
1791  */
1792 static int
1793 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1794 {
1795 	int pos;
1796 	char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1797 	uint32_t dsn_low, dsn_high;
1798 	memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1799 
1800 	pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
1801 
1802 	if (pos) {
1803 		rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
1804 		rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8);
1805 		snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1806 			 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1807 	} else {
1808 		PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1809 		goto fail_dsn;
1810 	}
1811 
1812 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1813 		ICE_MAX_PKG_FILENAME_SIZE);
1814 	if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1815 		return 0;
1816 
1817 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1818 		ICE_MAX_PKG_FILENAME_SIZE);
1819 	if (!access(strcat(pkg_file, opt_ddp_filename), 0))
1820 		return 0;
1821 
1822 fail_dsn:
1823 	strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1824 	if (!access(pkg_file, 0))
1825 		return 0;
1826 	strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1827 	return 0;
1828 }
1829 
1830 static enum ice_pkg_type
1831 ice_load_pkg_type(struct ice_hw *hw)
1832 {
1833 	enum ice_pkg_type package_type;
1834 
1835 	/* store the activated package type (OS default or Comms) */
1836 	if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1837 		ICE_PKG_NAME_SIZE))
1838 		package_type = ICE_PKG_TYPE_OS_DEFAULT;
1839 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1840 		ICE_PKG_NAME_SIZE))
1841 		package_type = ICE_PKG_TYPE_COMMS;
1842 	else
1843 		package_type = ICE_PKG_TYPE_UNKNOWN;
1844 
1845 	PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s",
1846 		hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1847 		hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1848 		hw->active_pkg_name);
1849 
1850 	return package_type;
1851 }
1852 
1853 static int ice_load_pkg(struct rte_eth_dev *dev)
1854 {
1855 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1856 	char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1857 	int err;
1858 	uint8_t *buf;
1859 	int buf_len;
1860 	FILE *file;
1861 	struct stat fstat;
1862 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1863 	struct ice_adapter *ad =
1864 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1865 
1866 	ice_pkg_file_search_path(pci_dev, pkg_file);
1867 
1868 	file = fopen(pkg_file, "rb");
1869 	if (!file)  {
1870 		PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1871 		return -1;
1872 	}
1873 
1874 	err = stat(pkg_file, &fstat);
1875 	if (err) {
1876 		PMD_INIT_LOG(ERR, "failed to get file stats\n");
1877 		fclose(file);
1878 		return err;
1879 	}
1880 
1881 	buf_len = fstat.st_size;
1882 	buf = rte_malloc(NULL, buf_len, 0);
1883 
1884 	if (!buf) {
1885 		PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1886 				buf_len);
1887 		fclose(file);
1888 		return -1;
1889 	}
1890 
1891 	err = fread(buf, buf_len, 1, file);
1892 	if (err != 1) {
1893 		PMD_INIT_LOG(ERR, "failed to read package data\n");
1894 		fclose(file);
1895 		err = -1;
1896 		goto fail_exit;
1897 	}
1898 
1899 	fclose(file);
1900 
1901 	err = ice_copy_and_init_pkg(hw, buf, buf_len);
1902 	if (err) {
1903 		PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1904 		goto fail_exit;
1905 	}
1906 
1907 	/* store the loaded pkg type info */
1908 	ad->active_pkg_type = ice_load_pkg_type(hw);
1909 
1910 	err = ice_init_hw_tbls(hw);
1911 	if (err) {
1912 		PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1913 		goto fail_init_tbls;
1914 	}
1915 
1916 	return 0;
1917 
1918 fail_init_tbls:
1919 	rte_free(hw->pkg_copy);
1920 fail_exit:
1921 	rte_free(buf);
1922 	return err;
1923 }
1924 
1925 static void
1926 ice_base_queue_get(struct ice_pf *pf)
1927 {
1928 	uint32_t reg;
1929 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1930 
1931 	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1932 	if (reg & PFLAN_RX_QALLOC_VALID_M) {
1933 		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1934 	} else {
1935 		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1936 					" index");
1937 	}
1938 }
1939 
1940 static int
1941 parse_bool(const char *key, const char *value, void *args)
1942 {
1943 	int *i = (int *)args;
1944 	char *end;
1945 	int num;
1946 
1947 	num = strtoul(value, &end, 10);
1948 
1949 	if (num != 0 && num != 1) {
1950 		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1951 			"value must be 0 or 1",
1952 			value, key);
1953 		return -1;
1954 	}
1955 
1956 	*i = num;
1957 	return 0;
1958 }
1959 
1960 static int ice_parse_devargs(struct rte_eth_dev *dev)
1961 {
1962 	struct ice_adapter *ad =
1963 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1964 	struct rte_devargs *devargs = dev->device->devargs;
1965 	struct rte_kvargs *kvlist;
1966 	int ret;
1967 
1968 	if (devargs == NULL)
1969 		return 0;
1970 
1971 	kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1972 	if (kvlist == NULL) {
1973 		PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1974 		return -EINVAL;
1975 	}
1976 
1977 	ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1978 	memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1979 	       sizeof(ad->devargs.proto_xtr));
1980 
1981 	ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1982 				 &handle_proto_xtr_arg, &ad->devargs);
1983 	if (ret)
1984 		goto bail;
1985 
1986 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1987 				 &parse_bool, &ad->devargs.safe_mode_support);
1988 	if (ret)
1989 		goto bail;
1990 
1991 	ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1992 				 &parse_bool, &ad->devargs.pipe_mode_support);
1993 	if (ret)
1994 		goto bail;
1995 
1996 	ret = rte_kvargs_process(kvlist, ICE_FLOW_MARK_SUPPORT_ARG,
1997 				 &parse_bool, &ad->devargs.flow_mark_support);
1998 	if (ret)
1999 		goto bail;
2000 
2001 bail:
2002 	rte_kvargs_free(kvlist);
2003 	return ret;
2004 }
2005 
2006 /* Forward LLDP packets to default VSI by set switch rules */
2007 static int
2008 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
2009 {
2010 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2011 	struct ice_fltr_list_entry *s_list_itr = NULL;
2012 	struct LIST_HEAD_TYPE list_head;
2013 	int ret = 0;
2014 
2015 	INIT_LIST_HEAD(&list_head);
2016 
2017 	s_list_itr = (struct ice_fltr_list_entry *)
2018 			ice_malloc(hw, sizeof(*s_list_itr));
2019 	if (!s_list_itr)
2020 		return -ENOMEM;
2021 	s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2022 	s_list_itr->fltr_info.vsi_handle = vsi->idx;
2023 	s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2024 			RTE_ETHER_TYPE_LLDP;
2025 	s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2026 	s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2027 	s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2028 	LIST_ADD(&s_list_itr->list_entry, &list_head);
2029 	if (on)
2030 		ret = ice_add_eth_mac(hw, &list_head);
2031 	else
2032 		ret = ice_remove_eth_mac(hw, &list_head);
2033 
2034 	rte_free(s_list_itr);
2035 	return ret;
2036 }
2037 
2038 static enum ice_status
2039 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2040 		uint16_t num, uint16_t desc_id,
2041 		uint16_t *prof_buf, uint16_t *num_prof)
2042 {
2043 	struct ice_aqc_get_allocd_res_desc_resp *resp_buf;
2044 	int ret;
2045 	uint16_t buf_len;
2046 	bool res_shared = 1;
2047 	struct ice_aq_desc aq_desc;
2048 	struct ice_sq_cd *cd = NULL;
2049 	struct ice_aqc_get_allocd_res_desc *cmd =
2050 			&aq_desc.params.get_res_desc;
2051 
2052 	buf_len = sizeof(resp_buf->elem) * num;
2053 	resp_buf = ice_malloc(hw, buf_len);
2054 	if (!resp_buf)
2055 		return -ENOMEM;
2056 
2057 	ice_fill_dflt_direct_cmd_desc(&aq_desc,
2058 			ice_aqc_opc_get_allocd_res_desc);
2059 
2060 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2061 				ICE_AQC_RES_TYPE_M) | (res_shared ?
2062 				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2063 	cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2064 
2065 	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2066 	if (!ret)
2067 		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2068 	else
2069 		goto exit;
2070 
2071 	ice_memcpy(prof_buf, resp_buf->elem, sizeof(resp_buf->elem) *
2072 			(*num_prof), ICE_NONDMA_TO_NONDMA);
2073 
2074 exit:
2075 	rte_free(resp_buf);
2076 	return ret;
2077 }
2078 static int
2079 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2080 {
2081 	int ret;
2082 	uint16_t prof_id;
2083 	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2084 	uint16_t first_desc = 1;
2085 	uint16_t num_prof = 0;
2086 
2087 	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2088 			first_desc, prof_buf, &num_prof);
2089 	if (ret) {
2090 		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2091 		return ret;
2092 	}
2093 
2094 	for (prof_id = 0; prof_id < num_prof; prof_id++) {
2095 		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2096 		if (ret) {
2097 			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2098 			return ret;
2099 		}
2100 	}
2101 	return 0;
2102 }
2103 
2104 static int
2105 ice_reset_fxp_resource(struct ice_hw *hw)
2106 {
2107 	int ret;
2108 
2109 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2110 	if (ret) {
2111 		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2112 		return ret;
2113 	}
2114 
2115 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2116 	if (ret) {
2117 		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2118 		return ret;
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 static int
2125 ice_dev_init(struct rte_eth_dev *dev)
2126 {
2127 	struct rte_pci_device *pci_dev;
2128 	struct rte_intr_handle *intr_handle;
2129 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2130 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2131 	struct ice_adapter *ad =
2132 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2133 	struct ice_vsi *vsi;
2134 	int ret;
2135 
2136 	dev->dev_ops = &ice_eth_dev_ops;
2137 	dev->rx_pkt_burst = ice_recv_pkts;
2138 	dev->tx_pkt_burst = ice_xmit_pkts;
2139 	dev->tx_pkt_prepare = ice_prep_pkts;
2140 
2141 	/* for secondary processes, we don't initialise any further as primary
2142 	 * has already done this work.
2143 	 */
2144 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2145 		ice_set_rx_function(dev);
2146 		ice_set_tx_function(dev);
2147 		return 0;
2148 	}
2149 
2150 	ice_set_default_ptype_table(dev);
2151 	pci_dev = RTE_DEV_TO_PCI(dev->device);
2152 	intr_handle = &pci_dev->intr_handle;
2153 
2154 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2155 	pf->adapter->eth_dev = dev;
2156 	pf->dev_data = dev->data;
2157 	hw->back = pf->adapter;
2158 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2159 	hw->vendor_id = pci_dev->id.vendor_id;
2160 	hw->device_id = pci_dev->id.device_id;
2161 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2162 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2163 	hw->bus.device = pci_dev->addr.devid;
2164 	hw->bus.func = pci_dev->addr.function;
2165 
2166 	ret = ice_parse_devargs(dev);
2167 	if (ret) {
2168 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
2169 		return -EINVAL;
2170 	}
2171 
2172 	ice_init_controlq_parameter(hw);
2173 
2174 	ret = ice_init_hw(hw);
2175 	if (ret) {
2176 		PMD_INIT_LOG(ERR, "Failed to initialize HW");
2177 		return -EINVAL;
2178 	}
2179 
2180 	ret = ice_load_pkg(dev);
2181 	if (ret) {
2182 		if (ad->devargs.safe_mode_support == 0) {
2183 			PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2184 					"Use safe-mode-support=1 to enter Safe Mode");
2185 			return ret;
2186 		}
2187 
2188 		PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2189 					"Entering Safe Mode");
2190 		ad->is_safe_mode = 1;
2191 	}
2192 
2193 	PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2194 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2195 		     hw->api_maj_ver, hw->api_min_ver);
2196 
2197 	ice_pf_sw_init(dev);
2198 	ret = ice_init_mac_address(dev);
2199 	if (ret) {
2200 		PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2201 		goto err_init_mac;
2202 	}
2203 
2204 	/* Pass the information to the rte_eth_dev_close() that it should also
2205 	 * release the private port resources.
2206 	 */
2207 	dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2208 
2209 	ret = ice_res_pool_init(&pf->msix_pool, 1,
2210 				hw->func_caps.common_cap.num_msix_vectors - 1);
2211 	if (ret) {
2212 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2213 		goto err_msix_pool_init;
2214 	}
2215 
2216 	ret = ice_pf_setup(pf);
2217 	if (ret) {
2218 		PMD_INIT_LOG(ERR, "Failed to setup PF");
2219 		goto err_pf_setup;
2220 	}
2221 
2222 	ret = ice_send_driver_ver(hw);
2223 	if (ret) {
2224 		PMD_INIT_LOG(ERR, "Failed to send driver version");
2225 		goto err_pf_setup;
2226 	}
2227 
2228 	vsi = pf->main_vsi;
2229 
2230 	/* Disable double vlan by default */
2231 	ice_vsi_config_double_vlan(vsi, FALSE);
2232 
2233 	ret = ice_aq_stop_lldp(hw, TRUE, FALSE, NULL);
2234 	if (ret != ICE_SUCCESS)
2235 		PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2236 	ret = ice_init_dcb(hw, TRUE);
2237 	if (ret != ICE_SUCCESS)
2238 		PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2239 	/* Forward LLDP packets to default VSI */
2240 	ret = ice_vsi_config_sw_lldp(vsi, TRUE);
2241 	if (ret != ICE_SUCCESS)
2242 		PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2243 	/* register callback func to eal lib */
2244 	rte_intr_callback_register(intr_handle,
2245 				   ice_interrupt_handler, dev);
2246 
2247 	ice_pf_enable_irq0(hw);
2248 
2249 	/* enable uio intr after callback register */
2250 	rte_intr_enable(intr_handle);
2251 
2252 	/* get base queue pairs index  in the device */
2253 	ice_base_queue_get(pf);
2254 
2255 	if (!ad->is_safe_mode) {
2256 		ret = ice_flow_init(ad);
2257 		if (ret) {
2258 			PMD_INIT_LOG(ERR, "Failed to initialize flow");
2259 			return ret;
2260 		}
2261 	}
2262 
2263 	ret = ice_reset_fxp_resource(hw);
2264 	if (ret) {
2265 		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2266 		return ret;
2267 	}
2268 
2269 	return 0;
2270 
2271 err_pf_setup:
2272 	ice_res_pool_destroy(&pf->msix_pool);
2273 err_msix_pool_init:
2274 	rte_free(dev->data->mac_addrs);
2275 	dev->data->mac_addrs = NULL;
2276 err_init_mac:
2277 	ice_sched_cleanup_all(hw);
2278 	rte_free(hw->port_info);
2279 	ice_shutdown_all_ctrlq(hw);
2280 	rte_free(pf->proto_xtr);
2281 
2282 	return ret;
2283 }
2284 
2285 int
2286 ice_release_vsi(struct ice_vsi *vsi)
2287 {
2288 	struct ice_hw *hw;
2289 	struct ice_vsi_ctx vsi_ctx;
2290 	enum ice_status ret;
2291 
2292 	if (!vsi)
2293 		return 0;
2294 
2295 	hw = ICE_VSI_TO_HW(vsi);
2296 
2297 	ice_remove_all_mac_vlan_filters(vsi);
2298 
2299 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2300 
2301 	vsi_ctx.vsi_num = vsi->vsi_id;
2302 	vsi_ctx.info = vsi->info;
2303 	ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2304 	if (ret != ICE_SUCCESS) {
2305 		PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2306 		rte_free(vsi);
2307 		return -1;
2308 	}
2309 
2310 	rte_free(vsi);
2311 	return 0;
2312 }
2313 
2314 void
2315 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2316 {
2317 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2318 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2319 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2320 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2321 	uint16_t msix_intr, i;
2322 
2323 	/* disable interrupt and also clear all the exist config */
2324 	for (i = 0; i < vsi->nb_qps; i++) {
2325 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2326 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2327 		rte_wmb();
2328 	}
2329 
2330 	if (rte_intr_allow_others(intr_handle))
2331 		/* vfio-pci */
2332 		for (i = 0; i < vsi->nb_msix; i++) {
2333 			msix_intr = vsi->msix_intr + i;
2334 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2335 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2336 		}
2337 	else
2338 		/* igb_uio */
2339 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2340 }
2341 
2342 static void
2343 ice_dev_stop(struct rte_eth_dev *dev)
2344 {
2345 	struct rte_eth_dev_data *data = dev->data;
2346 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2347 	struct ice_vsi *main_vsi = pf->main_vsi;
2348 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2349 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2350 	uint16_t i;
2351 
2352 	/* avoid stopping again */
2353 	if (pf->adapter_stopped)
2354 		return;
2355 
2356 	/* stop and clear all Rx queues */
2357 	for (i = 0; i < data->nb_rx_queues; i++)
2358 		ice_rx_queue_stop(dev, i);
2359 
2360 	/* stop and clear all Tx queues */
2361 	for (i = 0; i < data->nb_tx_queues; i++)
2362 		ice_tx_queue_stop(dev, i);
2363 
2364 	/* disable all queue interrupts */
2365 	ice_vsi_disable_queues_intr(main_vsi);
2366 
2367 	/* Clear all queues and release mbufs */
2368 	ice_clear_queues(dev);
2369 
2370 	if (pf->init_link_up)
2371 		ice_dev_set_link_up(dev);
2372 	else
2373 		ice_dev_set_link_down(dev);
2374 
2375 	/* Clean datapath event and queue/vec mapping */
2376 	rte_intr_efd_disable(intr_handle);
2377 	if (intr_handle->intr_vec) {
2378 		rte_free(intr_handle->intr_vec);
2379 		intr_handle->intr_vec = NULL;
2380 	}
2381 
2382 	pf->adapter_stopped = true;
2383 }
2384 
2385 static void
2386 ice_dev_close(struct rte_eth_dev *dev)
2387 {
2388 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2389 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2390 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2391 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2392 	struct ice_adapter *ad =
2393 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2394 
2395 	/* Since stop will make link down, then the link event will be
2396 	 * triggered, disable the irq firstly to avoid the port_infoe etc
2397 	 * resources deallocation causing the interrupt service thread
2398 	 * crash.
2399 	 */
2400 	ice_pf_disable_irq0(hw);
2401 
2402 	ice_dev_stop(dev);
2403 
2404 	if (!ad->is_safe_mode)
2405 		ice_flow_uninit(ad);
2406 
2407 	/* release all queue resource */
2408 	ice_free_queues(dev);
2409 
2410 	ice_res_pool_destroy(&pf->msix_pool);
2411 	ice_release_vsi(pf->main_vsi);
2412 	ice_sched_cleanup_all(hw);
2413 	ice_free_hw_tbls(hw);
2414 	rte_free(hw->port_info);
2415 	hw->port_info = NULL;
2416 	ice_shutdown_all_ctrlq(hw);
2417 	rte_free(pf->proto_xtr);
2418 	pf->proto_xtr = NULL;
2419 
2420 	dev->dev_ops = NULL;
2421 	dev->rx_pkt_burst = NULL;
2422 	dev->tx_pkt_burst = NULL;
2423 
2424 	rte_free(dev->data->mac_addrs);
2425 	dev->data->mac_addrs = NULL;
2426 
2427 	/* disable uio intr before callback unregister */
2428 	rte_intr_disable(intr_handle);
2429 
2430 	/* unregister callback func from eal lib */
2431 	rte_intr_callback_unregister(intr_handle,
2432 				     ice_interrupt_handler, dev);
2433 }
2434 
2435 static int
2436 ice_dev_uninit(struct rte_eth_dev *dev)
2437 {
2438 	ice_dev_close(dev);
2439 
2440 	return 0;
2441 }
2442 
2443 static int
2444 ice_dev_configure(struct rte_eth_dev *dev)
2445 {
2446 	struct ice_adapter *ad =
2447 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2448 
2449 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
2450 	 * bulk allocation or vector Rx preconditions we will reset it.
2451 	 */
2452 	ad->rx_bulk_alloc_allowed = true;
2453 	ad->tx_simple_allowed = true;
2454 
2455 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
2456 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
2457 
2458 	return 0;
2459 }
2460 
2461 static int ice_init_rss(struct ice_pf *pf)
2462 {
2463 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2464 	struct ice_vsi *vsi = pf->main_vsi;
2465 	struct rte_eth_dev *dev = pf->adapter->eth_dev;
2466 	struct rte_eth_rss_conf *rss_conf;
2467 	struct ice_aqc_get_set_rss_keys key;
2468 	uint16_t i, nb_q;
2469 	int ret = 0;
2470 	bool is_safe_mode = pf->adapter->is_safe_mode;
2471 	uint32_t reg;
2472 
2473 	rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
2474 	nb_q = dev->data->nb_rx_queues;
2475 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
2476 	vsi->rss_lut_size = pf->hash_lut_size;
2477 
2478 	if (is_safe_mode) {
2479 		PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
2480 		return 0;
2481 	}
2482 
2483 	if (!vsi->rss_key)
2484 		vsi->rss_key = rte_zmalloc(NULL,
2485 					   vsi->rss_key_size, 0);
2486 	if (!vsi->rss_lut)
2487 		vsi->rss_lut = rte_zmalloc(NULL,
2488 					   vsi->rss_lut_size, 0);
2489 
2490 	/* configure RSS key */
2491 	if (!rss_conf->rss_key) {
2492 		/* Calculate the default hash key */
2493 		for (i = 0; i <= vsi->rss_key_size; i++)
2494 			vsi->rss_key[i] = (uint8_t)rte_rand();
2495 	} else {
2496 		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
2497 			   RTE_MIN(rss_conf->rss_key_len,
2498 				   vsi->rss_key_size));
2499 	}
2500 	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
2501 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
2502 	if (ret)
2503 		return -EINVAL;
2504 
2505 	/* init RSS LUT table */
2506 	for (i = 0; i < vsi->rss_lut_size; i++)
2507 		vsi->rss_lut[i] = i % nb_q;
2508 
2509 	ret = ice_aq_set_rss_lut(hw, vsi->idx,
2510 				 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
2511 				 vsi->rss_lut, vsi->rss_lut_size);
2512 	if (ret)
2513 		return -EINVAL;
2514 
2515 	/* Enable registers for symmetric_toeplitz function. */
2516 	reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
2517 	reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
2518 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
2519 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
2520 
2521 	/* configure RSS for IPv4 with input set IPv4 src/dst */
2522 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2523 			      ICE_FLOW_SEG_HDR_IPV4, 0);
2524 	if (ret)
2525 		PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
2526 
2527 	/* configure RSS for IPv6 with input set IPv6 src/dst */
2528 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2529 			      ICE_FLOW_SEG_HDR_IPV6, 0);
2530 	if (ret)
2531 		PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
2532 
2533 	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
2534 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
2535 			      ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6, 0);
2536 	if (ret)
2537 		PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
2538 
2539 	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
2540 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
2541 			      ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6, 0);
2542 	if (ret)
2543 		PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
2544 
2545 	/* configure RSS for sctp6 with input set IPv6 src/dst */
2546 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
2547 			      ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6, 0);
2548 	if (ret)
2549 		PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2550 				__func__, ret);
2551 
2552 	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
2553 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
2554 			      ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4, 0);
2555 	if (ret)
2556 		PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
2557 
2558 	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
2559 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
2560 			      ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4, 0);
2561 	if (ret)
2562 		PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
2563 
2564 	/* configure RSS for sctp4 with input set IP src/dst */
2565 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
2566 			      ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4, 0);
2567 	if (ret)
2568 		PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2569 				__func__, ret);
2570 
2571 	/* configure RSS for gtpu with input set TEID */
2572 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
2573 				ICE_FLOW_SEG_HDR_GTPU_IP, 0);
2574 	if (ret)
2575 		PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d",
2576 				__func__, ret);
2577 
2578 	/**
2579 	 * configure RSS for pppoe/pppod with input set
2580 	 * Source MAC and Session ID
2581 	 */
2582 	ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_PPPOE_SESS_ID_ETH,
2583 				ICE_FLOW_SEG_HDR_PPPOE, 0);
2584 	if (ret)
2585 		PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
2586 				__func__, ret);
2587 
2588 	return 0;
2589 }
2590 
2591 static void
2592 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
2593 		       int base_queue, int nb_queue)
2594 {
2595 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2596 	uint32_t val, val_tx;
2597 	int i;
2598 
2599 	for (i = 0; i < nb_queue; i++) {
2600 		/*do actual bind*/
2601 		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
2602 		      (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
2603 		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
2604 			 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
2605 
2606 		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
2607 			    base_queue + i, msix_vect);
2608 		/* set ITR0 value */
2609 		ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
2610 		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
2611 		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
2612 	}
2613 }
2614 
2615 void
2616 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
2617 {
2618 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2619 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2620 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2621 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2622 	uint16_t msix_vect = vsi->msix_intr;
2623 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
2624 	uint16_t queue_idx = 0;
2625 	int record = 0;
2626 	int i;
2627 
2628 	/* clear Rx/Tx queue interrupt */
2629 	for (i = 0; i < vsi->nb_used_qps; i++) {
2630 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2631 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2632 	}
2633 
2634 	/* PF bind interrupt */
2635 	if (rte_intr_dp_is_en(intr_handle)) {
2636 		queue_idx = 0;
2637 		record = 1;
2638 	}
2639 
2640 	for (i = 0; i < vsi->nb_used_qps; i++) {
2641 		if (nb_msix <= 1) {
2642 			if (!rte_intr_allow_others(intr_handle))
2643 				msix_vect = ICE_MISC_VEC_ID;
2644 
2645 			/* uio mapping all queue to one msix_vect */
2646 			__vsi_queues_bind_intr(vsi, msix_vect,
2647 					       vsi->base_queue + i,
2648 					       vsi->nb_used_qps - i);
2649 
2650 			for (; !!record && i < vsi->nb_used_qps; i++)
2651 				intr_handle->intr_vec[queue_idx + i] =
2652 					msix_vect;
2653 			break;
2654 		}
2655 
2656 		/* vfio 1:1 queue/msix_vect mapping */
2657 		__vsi_queues_bind_intr(vsi, msix_vect,
2658 				       vsi->base_queue + i, 1);
2659 
2660 		if (!!record)
2661 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
2662 
2663 		msix_vect++;
2664 		nb_msix--;
2665 	}
2666 }
2667 
2668 void
2669 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
2670 {
2671 	struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2672 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2673 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2674 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2675 	uint16_t msix_intr, i;
2676 
2677 	if (rte_intr_allow_others(intr_handle))
2678 		for (i = 0; i < vsi->nb_used_qps; i++) {
2679 			msix_intr = vsi->msix_intr + i;
2680 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2681 				      GLINT_DYN_CTL_INTENA_M |
2682 				      GLINT_DYN_CTL_CLEARPBA_M |
2683 				      GLINT_DYN_CTL_ITR_INDX_M |
2684 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2685 		}
2686 	else
2687 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
2688 			      GLINT_DYN_CTL_INTENA_M |
2689 			      GLINT_DYN_CTL_CLEARPBA_M |
2690 			      GLINT_DYN_CTL_ITR_INDX_M |
2691 			      GLINT_DYN_CTL_WB_ON_ITR_M);
2692 }
2693 
2694 static int
2695 ice_rxq_intr_setup(struct rte_eth_dev *dev)
2696 {
2697 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2698 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2699 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2700 	struct ice_vsi *vsi = pf->main_vsi;
2701 	uint32_t intr_vector = 0;
2702 
2703 	rte_intr_disable(intr_handle);
2704 
2705 	/* check and configure queue intr-vector mapping */
2706 	if ((rte_intr_cap_multiple(intr_handle) ||
2707 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2708 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2709 		intr_vector = dev->data->nb_rx_queues;
2710 		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
2711 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
2712 				    ICE_MAX_INTR_QUEUE_NUM);
2713 			return -ENOTSUP;
2714 		}
2715 		if (rte_intr_efd_enable(intr_handle, intr_vector))
2716 			return -1;
2717 	}
2718 
2719 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2720 		intr_handle->intr_vec =
2721 		rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
2722 			    0);
2723 		if (!intr_handle->intr_vec) {
2724 			PMD_DRV_LOG(ERR,
2725 				    "Failed to allocate %d rx_queues intr_vec",
2726 				    dev->data->nb_rx_queues);
2727 			return -ENOMEM;
2728 		}
2729 	}
2730 
2731 	/* Map queues with MSIX interrupt */
2732 	vsi->nb_used_qps = dev->data->nb_rx_queues;
2733 	ice_vsi_queues_bind_intr(vsi);
2734 
2735 	/* Enable interrupts for all the queues */
2736 	ice_vsi_enable_queues_intr(vsi);
2737 
2738 	rte_intr_enable(intr_handle);
2739 
2740 	return 0;
2741 }
2742 
2743 static void
2744 ice_get_init_link_status(struct rte_eth_dev *dev)
2745 {
2746 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2747 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2748 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2749 	struct ice_link_status link_status;
2750 	int ret;
2751 
2752 	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
2753 				   &link_status, NULL);
2754 	if (ret != ICE_SUCCESS) {
2755 		PMD_DRV_LOG(ERR, "Failed to get link info");
2756 		pf->init_link_up = false;
2757 		return;
2758 	}
2759 
2760 	if (link_status.link_info & ICE_AQ_LINK_UP)
2761 		pf->init_link_up = true;
2762 }
2763 
2764 static int
2765 ice_dev_start(struct rte_eth_dev *dev)
2766 {
2767 	struct rte_eth_dev_data *data = dev->data;
2768 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2769 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2770 	struct ice_vsi *vsi = pf->main_vsi;
2771 	uint16_t nb_rxq = 0;
2772 	uint16_t nb_txq, i;
2773 	uint16_t max_frame_size;
2774 	int mask, ret;
2775 
2776 	/* program Tx queues' context in hardware */
2777 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
2778 		ret = ice_tx_queue_start(dev, nb_txq);
2779 		if (ret) {
2780 			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
2781 			goto tx_err;
2782 		}
2783 	}
2784 
2785 	/* program Rx queues' context in hardware*/
2786 	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
2787 		ret = ice_rx_queue_start(dev, nb_rxq);
2788 		if (ret) {
2789 			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
2790 			goto rx_err;
2791 		}
2792 	}
2793 
2794 	ret = ice_init_rss(pf);
2795 	if (ret) {
2796 		PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
2797 		goto rx_err;
2798 	}
2799 
2800 	ice_set_rx_function(dev);
2801 	ice_set_tx_function(dev);
2802 
2803 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
2804 			ETH_VLAN_EXTEND_MASK;
2805 	ret = ice_vlan_offload_set(dev, mask);
2806 	if (ret) {
2807 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
2808 		goto rx_err;
2809 	}
2810 
2811 	/* enable Rx interrput and mapping Rx queue to interrupt vector */
2812 	if (ice_rxq_intr_setup(dev))
2813 		return -EIO;
2814 
2815 	/* Enable receiving broadcast packets and transmitting packets */
2816 	ret = ice_set_vsi_promisc(hw, vsi->idx,
2817 				  ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
2818 				  ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
2819 				  0);
2820 	if (ret != ICE_SUCCESS)
2821 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2822 
2823 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
2824 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
2825 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
2826 				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
2827 				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
2828 				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
2829 				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
2830 				     NULL);
2831 	if (ret != ICE_SUCCESS)
2832 		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2833 
2834 	ice_get_init_link_status(dev);
2835 
2836 	ice_dev_set_link_up(dev);
2837 
2838 	/* Call get_link_info aq commond to enable/disable LSE */
2839 	ice_link_update(dev, 0);
2840 
2841 	pf->adapter_stopped = false;
2842 
2843 	/* Set the max frame size to default value*/
2844 	max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
2845 		pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
2846 		ICE_FRAME_SIZE_MAX;
2847 
2848 	/* Set the max frame size to HW*/
2849 	ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
2850 
2851 	return 0;
2852 
2853 	/* stop the started queues if failed to start all queues */
2854 rx_err:
2855 	for (i = 0; i < nb_rxq; i++)
2856 		ice_rx_queue_stop(dev, i);
2857 tx_err:
2858 	for (i = 0; i < nb_txq; i++)
2859 		ice_tx_queue_stop(dev, i);
2860 
2861 	return -EIO;
2862 }
2863 
2864 static int
2865 ice_dev_reset(struct rte_eth_dev *dev)
2866 {
2867 	int ret;
2868 
2869 	if (dev->data->sriov.active)
2870 		return -ENOTSUP;
2871 
2872 	ret = ice_dev_uninit(dev);
2873 	if (ret) {
2874 		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
2875 		return -ENXIO;
2876 	}
2877 
2878 	ret = ice_dev_init(dev);
2879 	if (ret) {
2880 		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
2881 		return -ENXIO;
2882 	}
2883 
2884 	return 0;
2885 }
2886 
2887 static int
2888 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2889 {
2890 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2891 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2892 	struct ice_vsi *vsi = pf->main_vsi;
2893 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2894 	bool is_safe_mode = pf->adapter->is_safe_mode;
2895 	u64 phy_type_low;
2896 	u64 phy_type_high;
2897 
2898 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
2899 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
2900 	dev_info->max_rx_queues = vsi->nb_qps;
2901 	dev_info->max_tx_queues = vsi->nb_qps;
2902 	dev_info->max_mac_addrs = vsi->max_macaddrs;
2903 	dev_info->max_vfs = pci_dev->max_vfs;
2904 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
2905 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2906 
2907 	dev_info->rx_offload_capa =
2908 		DEV_RX_OFFLOAD_VLAN_STRIP |
2909 		DEV_RX_OFFLOAD_JUMBO_FRAME |
2910 		DEV_RX_OFFLOAD_KEEP_CRC |
2911 		DEV_RX_OFFLOAD_SCATTER |
2912 		DEV_RX_OFFLOAD_VLAN_FILTER;
2913 	dev_info->tx_offload_capa =
2914 		DEV_TX_OFFLOAD_VLAN_INSERT |
2915 		DEV_TX_OFFLOAD_TCP_TSO |
2916 		DEV_TX_OFFLOAD_MULTI_SEGS |
2917 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2918 	dev_info->flow_type_rss_offloads = 0;
2919 
2920 	if (!is_safe_mode) {
2921 		dev_info->rx_offload_capa |=
2922 			DEV_RX_OFFLOAD_IPV4_CKSUM |
2923 			DEV_RX_OFFLOAD_UDP_CKSUM |
2924 			DEV_RX_OFFLOAD_TCP_CKSUM |
2925 			DEV_RX_OFFLOAD_QINQ_STRIP |
2926 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2927 			DEV_RX_OFFLOAD_VLAN_EXTEND |
2928 			DEV_RX_OFFLOAD_RSS_HASH;
2929 		dev_info->tx_offload_capa |=
2930 			DEV_TX_OFFLOAD_QINQ_INSERT |
2931 			DEV_TX_OFFLOAD_IPV4_CKSUM |
2932 			DEV_TX_OFFLOAD_UDP_CKSUM |
2933 			DEV_TX_OFFLOAD_TCP_CKSUM |
2934 			DEV_TX_OFFLOAD_SCTP_CKSUM |
2935 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2936 			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
2937 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
2938 	}
2939 
2940 	dev_info->rx_queue_offload_capa = 0;
2941 	dev_info->tx_queue_offload_capa = 0;
2942 
2943 	dev_info->reta_size = pf->hash_lut_size;
2944 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2945 
2946 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2947 		.rx_thresh = {
2948 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
2949 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
2950 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
2951 		},
2952 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
2953 		.rx_drop_en = 0,
2954 		.offloads = 0,
2955 	};
2956 
2957 	dev_info->default_txconf = (struct rte_eth_txconf) {
2958 		.tx_thresh = {
2959 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
2960 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
2961 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
2962 		},
2963 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
2964 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
2965 		.offloads = 0,
2966 	};
2967 
2968 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2969 		.nb_max = ICE_MAX_RING_DESC,
2970 		.nb_min = ICE_MIN_RING_DESC,
2971 		.nb_align = ICE_ALIGN_RING_DESC,
2972 	};
2973 
2974 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2975 		.nb_max = ICE_MAX_RING_DESC,
2976 		.nb_min = ICE_MIN_RING_DESC,
2977 		.nb_align = ICE_ALIGN_RING_DESC,
2978 	};
2979 
2980 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
2981 			       ETH_LINK_SPEED_100M |
2982 			       ETH_LINK_SPEED_1G |
2983 			       ETH_LINK_SPEED_2_5G |
2984 			       ETH_LINK_SPEED_5G |
2985 			       ETH_LINK_SPEED_10G |
2986 			       ETH_LINK_SPEED_20G |
2987 			       ETH_LINK_SPEED_25G;
2988 
2989 	phy_type_low = hw->port_info->phy.phy_type_low;
2990 	phy_type_high = hw->port_info->phy.phy_type_high;
2991 
2992 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
2993 		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
2994 
2995 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
2996 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
2997 		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
2998 
2999 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3000 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3001 
3002 	dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3003 	dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3004 	dev_info->default_rxportconf.nb_queues = 1;
3005 	dev_info->default_txportconf.nb_queues = 1;
3006 	dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3007 	dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3008 
3009 	return 0;
3010 }
3011 
3012 static inline int
3013 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3014 			    struct rte_eth_link *link)
3015 {
3016 	struct rte_eth_link *dst = link;
3017 	struct rte_eth_link *src = &dev->data->dev_link;
3018 
3019 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3020 				*(uint64_t *)src) == 0)
3021 		return -1;
3022 
3023 	return 0;
3024 }
3025 
3026 static inline int
3027 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3028 			     struct rte_eth_link *link)
3029 {
3030 	struct rte_eth_link *dst = &dev->data->dev_link;
3031 	struct rte_eth_link *src = link;
3032 
3033 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3034 				*(uint64_t *)src) == 0)
3035 		return -1;
3036 
3037 	return 0;
3038 }
3039 
3040 static int
3041 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3042 {
3043 #define CHECK_INTERVAL 100  /* 100ms */
3044 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3045 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3046 	struct ice_link_status link_status;
3047 	struct rte_eth_link link, old;
3048 	int status;
3049 	unsigned int rep_cnt = MAX_REPEAT_TIME;
3050 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3051 
3052 	memset(&link, 0, sizeof(link));
3053 	memset(&old, 0, sizeof(old));
3054 	memset(&link_status, 0, sizeof(link_status));
3055 	ice_atomic_read_link_status(dev, &old);
3056 
3057 	do {
3058 		/* Get link status information from hardware */
3059 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
3060 					      &link_status, NULL);
3061 		if (status != ICE_SUCCESS) {
3062 			link.link_speed = ETH_SPEED_NUM_100M;
3063 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
3064 			PMD_DRV_LOG(ERR, "Failed to get link info");
3065 			goto out;
3066 		}
3067 
3068 		link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3069 		if (!wait_to_complete || link.link_status)
3070 			break;
3071 
3072 		rte_delay_ms(CHECK_INTERVAL);
3073 	} while (--rep_cnt);
3074 
3075 	if (!link.link_status)
3076 		goto out;
3077 
3078 	/* Full-duplex operation at all supported speeds */
3079 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
3080 
3081 	/* Parse the link status */
3082 	switch (link_status.link_speed) {
3083 	case ICE_AQ_LINK_SPEED_10MB:
3084 		link.link_speed = ETH_SPEED_NUM_10M;
3085 		break;
3086 	case ICE_AQ_LINK_SPEED_100MB:
3087 		link.link_speed = ETH_SPEED_NUM_100M;
3088 		break;
3089 	case ICE_AQ_LINK_SPEED_1000MB:
3090 		link.link_speed = ETH_SPEED_NUM_1G;
3091 		break;
3092 	case ICE_AQ_LINK_SPEED_2500MB:
3093 		link.link_speed = ETH_SPEED_NUM_2_5G;
3094 		break;
3095 	case ICE_AQ_LINK_SPEED_5GB:
3096 		link.link_speed = ETH_SPEED_NUM_5G;
3097 		break;
3098 	case ICE_AQ_LINK_SPEED_10GB:
3099 		link.link_speed = ETH_SPEED_NUM_10G;
3100 		break;
3101 	case ICE_AQ_LINK_SPEED_20GB:
3102 		link.link_speed = ETH_SPEED_NUM_20G;
3103 		break;
3104 	case ICE_AQ_LINK_SPEED_25GB:
3105 		link.link_speed = ETH_SPEED_NUM_25G;
3106 		break;
3107 	case ICE_AQ_LINK_SPEED_40GB:
3108 		link.link_speed = ETH_SPEED_NUM_40G;
3109 		break;
3110 	case ICE_AQ_LINK_SPEED_50GB:
3111 		link.link_speed = ETH_SPEED_NUM_50G;
3112 		break;
3113 	case ICE_AQ_LINK_SPEED_100GB:
3114 		link.link_speed = ETH_SPEED_NUM_100G;
3115 		break;
3116 	case ICE_AQ_LINK_SPEED_UNKNOWN:
3117 	default:
3118 		PMD_DRV_LOG(ERR, "Unknown link speed");
3119 		link.link_speed = ETH_SPEED_NUM_NONE;
3120 		break;
3121 	}
3122 
3123 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3124 			      ETH_LINK_SPEED_FIXED);
3125 
3126 out:
3127 	ice_atomic_write_link_status(dev, &link);
3128 	if (link.link_status == old.link_status)
3129 		return -1;
3130 
3131 	return 0;
3132 }
3133 
3134 /* Force the physical link state by getting the current PHY capabilities from
3135  * hardware and setting the PHY config based on the determined capabilities. If
3136  * link changes, link event will be triggered because both the Enable Automatic
3137  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3138  */
3139 static enum ice_status
3140 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3141 {
3142 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3143 	struct ice_aqc_get_phy_caps_data *pcaps;
3144 	struct ice_port_info *pi;
3145 	enum ice_status status;
3146 
3147 	if (!hw || !hw->port_info)
3148 		return ICE_ERR_PARAM;
3149 
3150 	pi = hw->port_info;
3151 
3152 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3153 		ice_malloc(hw, sizeof(*pcaps));
3154 	if (!pcaps)
3155 		return ICE_ERR_NO_MEMORY;
3156 
3157 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3158 				     NULL);
3159 	if (status)
3160 		goto out;
3161 
3162 	/* No change in link */
3163 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3164 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3165 		goto out;
3166 
3167 	cfg.phy_type_low = pcaps->phy_type_low;
3168 	cfg.phy_type_high = pcaps->phy_type_high;
3169 	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3170 	cfg.low_power_ctrl = pcaps->low_power_ctrl;
3171 	cfg.eee_cap = pcaps->eee_cap;
3172 	cfg.eeer_value = pcaps->eeer_value;
3173 	cfg.link_fec_opt = pcaps->link_fec_options;
3174 	if (link_up)
3175 		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3176 	else
3177 		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3178 
3179 	status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3180 
3181 out:
3182 	ice_free(hw, pcaps);
3183 	return status;
3184 }
3185 
3186 static int
3187 ice_dev_set_link_up(struct rte_eth_dev *dev)
3188 {
3189 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3190 
3191 	return ice_force_phys_link_state(hw, true);
3192 }
3193 
3194 static int
3195 ice_dev_set_link_down(struct rte_eth_dev *dev)
3196 {
3197 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3198 
3199 	return ice_force_phys_link_state(hw, false);
3200 }
3201 
3202 static int
3203 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3204 {
3205 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3206 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3207 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3208 
3209 	/* check if mtu is within the allowed range */
3210 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3211 		return -EINVAL;
3212 
3213 	/* mtu setting is forbidden if port is start */
3214 	if (dev_data->dev_started) {
3215 		PMD_DRV_LOG(ERR,
3216 			    "port %d must be stopped before configuration",
3217 			    dev_data->port_id);
3218 		return -EBUSY;
3219 	}
3220 
3221 	if (frame_size > RTE_ETHER_MAX_LEN)
3222 		dev_data->dev_conf.rxmode.offloads |=
3223 			DEV_RX_OFFLOAD_JUMBO_FRAME;
3224 	else
3225 		dev_data->dev_conf.rxmode.offloads &=
3226 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
3227 
3228 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3229 
3230 	return 0;
3231 }
3232 
3233 static int ice_macaddr_set(struct rte_eth_dev *dev,
3234 			   struct rte_ether_addr *mac_addr)
3235 {
3236 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3237 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3238 	struct ice_vsi *vsi = pf->main_vsi;
3239 	struct ice_mac_filter *f;
3240 	uint8_t flags = 0;
3241 	int ret;
3242 
3243 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3244 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3245 		return -EINVAL;
3246 	}
3247 
3248 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
3249 		if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3250 			break;
3251 	}
3252 
3253 	if (!f) {
3254 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3255 		return -EIO;
3256 	}
3257 
3258 	ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3259 	if (ret != ICE_SUCCESS) {
3260 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3261 		return -EIO;
3262 	}
3263 	ret = ice_add_mac_filter(vsi, mac_addr);
3264 	if (ret != ICE_SUCCESS) {
3265 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
3266 		return -EIO;
3267 	}
3268 	rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3269 
3270 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3271 	ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3272 	if (ret != ICE_SUCCESS)
3273 		PMD_DRV_LOG(ERR, "Failed to set manage mac");
3274 
3275 	return 0;
3276 }
3277 
3278 /* Add a MAC address, and update filters */
3279 static int
3280 ice_macaddr_add(struct rte_eth_dev *dev,
3281 		struct rte_ether_addr *mac_addr,
3282 		__rte_unused uint32_t index,
3283 		__rte_unused uint32_t pool)
3284 {
3285 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3286 	struct ice_vsi *vsi = pf->main_vsi;
3287 	int ret;
3288 
3289 	ret = ice_add_mac_filter(vsi, mac_addr);
3290 	if (ret != ICE_SUCCESS) {
3291 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3292 		return -EINVAL;
3293 	}
3294 
3295 	return ICE_SUCCESS;
3296 }
3297 
3298 /* Remove a MAC address, and update filters */
3299 static void
3300 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3301 {
3302 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3303 	struct ice_vsi *vsi = pf->main_vsi;
3304 	struct rte_eth_dev_data *data = dev->data;
3305 	struct rte_ether_addr *macaddr;
3306 	int ret;
3307 
3308 	macaddr = &data->mac_addrs[index];
3309 	ret = ice_remove_mac_filter(vsi, macaddr);
3310 	if (ret) {
3311 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3312 		return;
3313 	}
3314 }
3315 
3316 static int
3317 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3318 {
3319 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3320 	struct ice_vsi *vsi = pf->main_vsi;
3321 	int ret;
3322 
3323 	PMD_INIT_FUNC_TRACE();
3324 
3325 	if (on) {
3326 		ret = ice_add_vlan_filter(vsi, vlan_id);
3327 		if (ret < 0) {
3328 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3329 			return -EINVAL;
3330 		}
3331 	} else {
3332 		ret = ice_remove_vlan_filter(vsi, vlan_id);
3333 		if (ret < 0) {
3334 			PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3335 			return -EINVAL;
3336 		}
3337 	}
3338 
3339 	return 0;
3340 }
3341 
3342 /* Configure vlan filter on or off */
3343 static int
3344 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3345 {
3346 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3347 	struct ice_vsi_ctx ctxt;
3348 	uint8_t sec_flags, sw_flags2;
3349 	int ret = 0;
3350 
3351 	sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3352 		    ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
3353 	sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3354 
3355 	if (on) {
3356 		vsi->info.sec_flags |= sec_flags;
3357 		vsi->info.sw_flags2 |= sw_flags2;
3358 	} else {
3359 		vsi->info.sec_flags &= ~sec_flags;
3360 		vsi->info.sw_flags2 &= ~sw_flags2;
3361 	}
3362 	vsi->info.sw_id = hw->port_info->sw_id;
3363 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3364 	ctxt.info.valid_sections =
3365 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3366 				 ICE_AQ_VSI_PROP_SECURITY_VALID);
3367 	ctxt.vsi_num = vsi->vsi_id;
3368 
3369 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3370 	if (ret) {
3371 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3372 			    on ? "enable" : "disable");
3373 		return -EINVAL;
3374 	} else {
3375 		vsi->info.valid_sections |=
3376 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3377 					 ICE_AQ_VSI_PROP_SECURITY_VALID);
3378 	}
3379 
3380 	/* consist with other drivers, allow untagged packet when vlan filter on */
3381 	if (on)
3382 		ret = ice_add_vlan_filter(vsi, 0);
3383 	else
3384 		ret = ice_remove_vlan_filter(vsi, 0);
3385 
3386 	return 0;
3387 }
3388 
3389 static int
3390 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
3391 {
3392 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3393 	struct ice_vsi_ctx ctxt;
3394 	uint8_t vlan_flags;
3395 	int ret = 0;
3396 
3397 	/* Check if it has been already on or off */
3398 	if (vsi->info.valid_sections &
3399 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
3400 		if (on) {
3401 			if ((vsi->info.vlan_flags &
3402 			     ICE_AQ_VSI_VLAN_EMOD_M) ==
3403 			    ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
3404 				return 0; /* already on */
3405 		} else {
3406 			if ((vsi->info.vlan_flags &
3407 			     ICE_AQ_VSI_VLAN_EMOD_M) ==
3408 			    ICE_AQ_VSI_VLAN_EMOD_NOTHING)
3409 				return 0; /* already off */
3410 		}
3411 	}
3412 
3413 	if (on)
3414 		vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3415 	else
3416 		vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3417 	vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
3418 	vsi->info.vlan_flags |= vlan_flags;
3419 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3420 	ctxt.info.valid_sections =
3421 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3422 	ctxt.vsi_num = vsi->vsi_id;
3423 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3424 	if (ret) {
3425 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3426 			    on ? "enable" : "disable");
3427 		return -EINVAL;
3428 	}
3429 
3430 	vsi->info.valid_sections |=
3431 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3432 
3433 	return ret;
3434 }
3435 
3436 static int
3437 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3438 {
3439 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3440 	struct ice_vsi *vsi = pf->main_vsi;
3441 	struct rte_eth_rxmode *rxmode;
3442 
3443 	rxmode = &dev->data->dev_conf.rxmode;
3444 	if (mask & ETH_VLAN_FILTER_MASK) {
3445 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3446 			ice_vsi_config_vlan_filter(vsi, TRUE);
3447 		else
3448 			ice_vsi_config_vlan_filter(vsi, FALSE);
3449 	}
3450 
3451 	if (mask & ETH_VLAN_STRIP_MASK) {
3452 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3453 			ice_vsi_config_vlan_stripping(vsi, TRUE);
3454 		else
3455 			ice_vsi_config_vlan_stripping(vsi, FALSE);
3456 	}
3457 
3458 	if (mask & ETH_VLAN_EXTEND_MASK) {
3459 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
3460 			ice_vsi_config_double_vlan(vsi, TRUE);
3461 		else
3462 			ice_vsi_config_double_vlan(vsi, FALSE);
3463 	}
3464 
3465 	return 0;
3466 }
3467 
3468 static int
3469 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3470 {
3471 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
3472 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3473 	int ret;
3474 
3475 	if (!lut)
3476 		return -EINVAL;
3477 
3478 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
3479 		ret = ice_aq_get_rss_lut(hw, vsi->idx,
3480 			ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
3481 		if (ret) {
3482 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
3483 			return -EINVAL;
3484 		}
3485 	} else {
3486 		uint64_t *lut_dw = (uint64_t *)lut;
3487 		uint16_t i, lut_size_dw = lut_size / 4;
3488 
3489 		for (i = 0; i < lut_size_dw; i++)
3490 			lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
3491 	}
3492 
3493 	return 0;
3494 }
3495 
3496 static int
3497 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
3498 {
3499 	struct ice_pf *pf;
3500 	struct ice_hw *hw;
3501 	int ret;
3502 
3503 	if (!vsi || !lut)
3504 		return -EINVAL;
3505 
3506 	pf = ICE_VSI_TO_PF(vsi);
3507 	hw = ICE_VSI_TO_HW(vsi);
3508 
3509 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
3510 		ret = ice_aq_set_rss_lut(hw, vsi->idx,
3511 			ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF, lut, lut_size);
3512 		if (ret) {
3513 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
3514 			return -EINVAL;
3515 		}
3516 	} else {
3517 		uint64_t *lut_dw = (uint64_t *)lut;
3518 		uint16_t i, lut_size_dw = lut_size / 4;
3519 
3520 		for (i = 0; i < lut_size_dw; i++)
3521 			ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
3522 
3523 		ice_flush(hw);
3524 	}
3525 
3526 	return 0;
3527 }
3528 
3529 static int
3530 ice_rss_reta_update(struct rte_eth_dev *dev,
3531 		    struct rte_eth_rss_reta_entry64 *reta_conf,
3532 		    uint16_t reta_size)
3533 {
3534 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3535 	uint16_t i, lut_size = pf->hash_lut_size;
3536 	uint16_t idx, shift;
3537 	uint8_t *lut;
3538 	int ret;
3539 
3540 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
3541 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
3542 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
3543 		PMD_DRV_LOG(ERR,
3544 			    "The size of hash lookup table configured (%d)"
3545 			    "doesn't match the number hardware can "
3546 			    "supported (128, 512, 2048)",
3547 			    reta_size);
3548 		return -EINVAL;
3549 	}
3550 
3551 	/* It MUST use the current LUT size to get the RSS lookup table,
3552 	 * otherwise if will fail with -100 error code.
3553 	 */
3554 	lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
3555 	if (!lut) {
3556 		PMD_DRV_LOG(ERR, "No memory can be allocated");
3557 		return -ENOMEM;
3558 	}
3559 	ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
3560 	if (ret)
3561 		goto out;
3562 
3563 	for (i = 0; i < reta_size; i++) {
3564 		idx = i / RTE_RETA_GROUP_SIZE;
3565 		shift = i % RTE_RETA_GROUP_SIZE;
3566 		if (reta_conf[idx].mask & (1ULL << shift))
3567 			lut[i] = reta_conf[idx].reta[shift];
3568 	}
3569 	ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
3570 	if (ret == 0 && lut_size != reta_size) {
3571 		PMD_DRV_LOG(INFO,
3572 			    "The size of hash lookup table is changed from (%d) to (%d)",
3573 			    lut_size, reta_size);
3574 		pf->hash_lut_size = reta_size;
3575 	}
3576 
3577 out:
3578 	rte_free(lut);
3579 
3580 	return ret;
3581 }
3582 
3583 static int
3584 ice_rss_reta_query(struct rte_eth_dev *dev,
3585 		   struct rte_eth_rss_reta_entry64 *reta_conf,
3586 		   uint16_t reta_size)
3587 {
3588 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3589 	uint16_t i, lut_size = pf->hash_lut_size;
3590 	uint16_t idx, shift;
3591 	uint8_t *lut;
3592 	int ret;
3593 
3594 	if (reta_size != lut_size) {
3595 		PMD_DRV_LOG(ERR,
3596 			    "The size of hash lookup table configured (%d)"
3597 			    "doesn't match the number hardware can "
3598 			    "supported (%d)",
3599 			    reta_size, lut_size);
3600 		return -EINVAL;
3601 	}
3602 
3603 	lut = rte_zmalloc(NULL, reta_size, 0);
3604 	if (!lut) {
3605 		PMD_DRV_LOG(ERR, "No memory can be allocated");
3606 		return -ENOMEM;
3607 	}
3608 
3609 	ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
3610 	if (ret)
3611 		goto out;
3612 
3613 	for (i = 0; i < reta_size; i++) {
3614 		idx = i / RTE_RETA_GROUP_SIZE;
3615 		shift = i % RTE_RETA_GROUP_SIZE;
3616 		if (reta_conf[idx].mask & (1ULL << shift))
3617 			reta_conf[idx].reta[shift] = lut[i];
3618 	}
3619 
3620 out:
3621 	rte_free(lut);
3622 
3623 	return ret;
3624 }
3625 
3626 static int
3627 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
3628 {
3629 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3630 	int ret = 0;
3631 
3632 	if (!key || key_len == 0) {
3633 		PMD_DRV_LOG(DEBUG, "No key to be configured");
3634 		return 0;
3635 	} else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
3636 		   sizeof(uint32_t)) {
3637 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
3638 		return -EINVAL;
3639 	}
3640 
3641 	struct ice_aqc_get_set_rss_keys *key_dw =
3642 		(struct ice_aqc_get_set_rss_keys *)key;
3643 
3644 	ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
3645 	if (ret) {
3646 		PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
3647 		ret = -EINVAL;
3648 	}
3649 
3650 	return ret;
3651 }
3652 
3653 static int
3654 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
3655 {
3656 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3657 	int ret;
3658 
3659 	if (!key || !key_len)
3660 		return -EINVAL;
3661 
3662 	ret = ice_aq_get_rss_key
3663 		(hw, vsi->idx,
3664 		 (struct ice_aqc_get_set_rss_keys *)key);
3665 	if (ret) {
3666 		PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
3667 		return -EINVAL;
3668 	}
3669 	*key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3670 
3671 	return 0;
3672 }
3673 
3674 static int
3675 ice_rss_hash_update(struct rte_eth_dev *dev,
3676 		    struct rte_eth_rss_conf *rss_conf)
3677 {
3678 	enum ice_status status = ICE_SUCCESS;
3679 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3680 	struct ice_vsi *vsi = pf->main_vsi;
3681 
3682 	/* set hash key */
3683 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
3684 	if (status)
3685 		return status;
3686 
3687 	/* TODO: hash enable config, ice_add_rss_cfg */
3688 	return 0;
3689 }
3690 
3691 static int
3692 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
3693 		      struct rte_eth_rss_conf *rss_conf)
3694 {
3695 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3696 	struct ice_vsi *vsi = pf->main_vsi;
3697 
3698 	ice_get_rss_key(vsi, rss_conf->rss_key,
3699 			&rss_conf->rss_key_len);
3700 
3701 	/* TODO: default set to 0 as hf config is not supported now */
3702 	rss_conf->rss_hf = 0;
3703 	return 0;
3704 }
3705 
3706 static int
3707 ice_promisc_enable(struct rte_eth_dev *dev)
3708 {
3709 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3710 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3711 	struct ice_vsi *vsi = pf->main_vsi;
3712 	enum ice_status status;
3713 	uint8_t pmask;
3714 	int ret = 0;
3715 
3716 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
3717 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3718 
3719 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
3720 	switch (status) {
3721 	case ICE_ERR_ALREADY_EXISTS:
3722 		PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
3723 	case ICE_SUCCESS:
3724 		break;
3725 	default:
3726 		PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
3727 		ret = -EAGAIN;
3728 	}
3729 
3730 	return ret;
3731 }
3732 
3733 static int
3734 ice_promisc_disable(struct rte_eth_dev *dev)
3735 {
3736 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3737 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3738 	struct ice_vsi *vsi = pf->main_vsi;
3739 	enum ice_status status;
3740 	uint8_t pmask;
3741 	int ret = 0;
3742 
3743 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
3744 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3745 
3746 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
3747 	if (status != ICE_SUCCESS) {
3748 		PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
3749 		ret = -EAGAIN;
3750 	}
3751 
3752 	return ret;
3753 }
3754 
3755 static int
3756 ice_allmulti_enable(struct rte_eth_dev *dev)
3757 {
3758 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3759 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3760 	struct ice_vsi *vsi = pf->main_vsi;
3761 	enum ice_status status;
3762 	uint8_t pmask;
3763 	int ret = 0;
3764 
3765 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3766 
3767 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
3768 
3769 	switch (status) {
3770 	case ICE_ERR_ALREADY_EXISTS:
3771 		PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
3772 	case ICE_SUCCESS:
3773 		break;
3774 	default:
3775 		PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
3776 		ret = -EAGAIN;
3777 	}
3778 
3779 	return ret;
3780 }
3781 
3782 static int
3783 ice_allmulti_disable(struct rte_eth_dev *dev)
3784 {
3785 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3786 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3787 	struct ice_vsi *vsi = pf->main_vsi;
3788 	enum ice_status status;
3789 	uint8_t pmask;
3790 	int ret = 0;
3791 
3792 	if (dev->data->promiscuous == 1)
3793 		return 0; /* must remain in all_multicast mode */
3794 
3795 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
3796 
3797 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
3798 	if (status != ICE_SUCCESS) {
3799 		PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
3800 		ret = -EAGAIN;
3801 	}
3802 
3803 	return ret;
3804 }
3805 
3806 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
3807 				    uint16_t queue_id)
3808 {
3809 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3810 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3811 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3812 	uint32_t val;
3813 	uint16_t msix_intr;
3814 
3815 	msix_intr = intr_handle->intr_vec[queue_id];
3816 
3817 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
3818 	      GLINT_DYN_CTL_ITR_INDX_M;
3819 	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
3820 
3821 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
3822 	rte_intr_ack(&pci_dev->intr_handle);
3823 
3824 	return 0;
3825 }
3826 
3827 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
3828 				     uint16_t queue_id)
3829 {
3830 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3831 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3832 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3833 	uint16_t msix_intr;
3834 
3835 	msix_intr = intr_handle->intr_vec[queue_id];
3836 
3837 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
3838 
3839 	return 0;
3840 }
3841 
3842 static int
3843 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3844 {
3845 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3846 	u32 full_ver;
3847 	u8 ver, patch;
3848 	u16 build;
3849 	int ret;
3850 
3851 	full_ver = hw->nvm.oem_ver;
3852 	ver = (u8)(full_ver >> 24);
3853 	build = (u16)((full_ver >> 8) & 0xffff);
3854 	patch = (u8)(full_ver & 0xff);
3855 
3856 	ret = snprintf(fw_version, fw_size,
3857 			"%d.%d%d 0x%08x %d.%d.%d",
3858 			((hw->nvm.ver >> 12) & 0xf),
3859 			((hw->nvm.ver >> 4) & 0xff),
3860 			(hw->nvm.ver & 0xf), hw->nvm.eetrack,
3861 			ver, build, patch);
3862 
3863 	/* add the size of '\0' */
3864 	ret += 1;
3865 	if (fw_size < (u32)ret)
3866 		return ret;
3867 	else
3868 		return 0;
3869 }
3870 
3871 static int
3872 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
3873 {
3874 	struct ice_hw *hw;
3875 	struct ice_vsi_ctx ctxt;
3876 	uint8_t vlan_flags = 0;
3877 	int ret;
3878 
3879 	if (!vsi || !info) {
3880 		PMD_DRV_LOG(ERR, "invalid parameters");
3881 		return -EINVAL;
3882 	}
3883 
3884 	if (info->on) {
3885 		vsi->info.pvid = info->config.pvid;
3886 		/**
3887 		 * If insert pvid is enabled, only tagged pkts are
3888 		 * allowed to be sent out.
3889 		 */
3890 		vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
3891 			     ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
3892 	} else {
3893 		vsi->info.pvid = 0;
3894 		if (info->config.reject.tagged == 0)
3895 			vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
3896 
3897 		if (info->config.reject.untagged == 0)
3898 			vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
3899 	}
3900 	vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
3901 				  ICE_AQ_VSI_VLAN_MODE_M);
3902 	vsi->info.vlan_flags |= vlan_flags;
3903 	memset(&ctxt, 0, sizeof(ctxt));
3904 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3905 	ctxt.info.valid_sections =
3906 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3907 	ctxt.vsi_num = vsi->vsi_id;
3908 
3909 	hw = ICE_VSI_TO_HW(vsi);
3910 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3911 	if (ret != ICE_SUCCESS) {
3912 		PMD_DRV_LOG(ERR,
3913 			    "update VSI for VLAN insert failed, err %d",
3914 			    ret);
3915 		return -EINVAL;
3916 	}
3917 
3918 	vsi->info.valid_sections |=
3919 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3920 
3921 	return ret;
3922 }
3923 
3924 static int
3925 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3926 {
3927 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3928 	struct ice_vsi *vsi = pf->main_vsi;
3929 	struct rte_eth_dev_data *data = pf->dev_data;
3930 	struct ice_vsi_vlan_pvid_info info;
3931 	int ret;
3932 
3933 	memset(&info, 0, sizeof(info));
3934 	info.on = on;
3935 	if (info.on) {
3936 		info.config.pvid = pvid;
3937 	} else {
3938 		info.config.reject.tagged =
3939 			data->dev_conf.txmode.hw_vlan_reject_tagged;
3940 		info.config.reject.untagged =
3941 			data->dev_conf.txmode.hw_vlan_reject_untagged;
3942 	}
3943 
3944 	ret = ice_vsi_vlan_pvid_set(vsi, &info);
3945 	if (ret < 0) {
3946 		PMD_DRV_LOG(ERR, "Failed to set pvid.");
3947 		return -EINVAL;
3948 	}
3949 
3950 	return 0;
3951 }
3952 
3953 static int
3954 ice_get_eeprom_length(struct rte_eth_dev *dev)
3955 {
3956 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3957 
3958 	/* Convert word count to byte count */
3959 	return hw->nvm.sr_words << 1;
3960 }
3961 
3962 static int
3963 ice_get_eeprom(struct rte_eth_dev *dev,
3964 	       struct rte_dev_eeprom_info *eeprom)
3965 {
3966 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3967 	uint16_t *data = eeprom->data;
3968 	uint16_t first_word, last_word, nwords;
3969 	enum ice_status status = ICE_SUCCESS;
3970 
3971 	first_word = eeprom->offset >> 1;
3972 	last_word = (eeprom->offset + eeprom->length - 1) >> 1;
3973 	nwords = last_word - first_word + 1;
3974 
3975 	if (first_word >= hw->nvm.sr_words ||
3976 	    last_word >= hw->nvm.sr_words) {
3977 		PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
3978 		return -EINVAL;
3979 	}
3980 
3981 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3982 
3983 	status = ice_read_sr_buf(hw, first_word, &nwords, data);
3984 	if (status) {
3985 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
3986 		eeprom->length = sizeof(uint16_t) * nwords;
3987 		return -EIO;
3988 	}
3989 
3990 	return 0;
3991 }
3992 
3993 static void
3994 ice_stat_update_32(struct ice_hw *hw,
3995 		   uint32_t reg,
3996 		   bool offset_loaded,
3997 		   uint64_t *offset,
3998 		   uint64_t *stat)
3999 {
4000 	uint64_t new_data;
4001 
4002 	new_data = (uint64_t)ICE_READ_REG(hw, reg);
4003 	if (!offset_loaded)
4004 		*offset = new_data;
4005 
4006 	if (new_data >= *offset)
4007 		*stat = (uint64_t)(new_data - *offset);
4008 	else
4009 		*stat = (uint64_t)((new_data +
4010 				    ((uint64_t)1 << ICE_32_BIT_WIDTH))
4011 				   - *offset);
4012 }
4013 
4014 static void
4015 ice_stat_update_40(struct ice_hw *hw,
4016 		   uint32_t hireg,
4017 		   uint32_t loreg,
4018 		   bool offset_loaded,
4019 		   uint64_t *offset,
4020 		   uint64_t *stat)
4021 {
4022 	uint64_t new_data;
4023 
4024 	new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4025 	new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4026 		    ICE_32_BIT_WIDTH;
4027 
4028 	if (!offset_loaded)
4029 		*offset = new_data;
4030 
4031 	if (new_data >= *offset)
4032 		*stat = new_data - *offset;
4033 	else
4034 		*stat = (uint64_t)((new_data +
4035 				    ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4036 				   *offset);
4037 
4038 	*stat &= ICE_40_BIT_MASK;
4039 }
4040 
4041 /* Get all the statistics of a VSI */
4042 static void
4043 ice_update_vsi_stats(struct ice_vsi *vsi)
4044 {
4045 	struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4046 	struct ice_eth_stats *nes = &vsi->eth_stats;
4047 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4048 	int idx = rte_le_to_cpu_16(vsi->vsi_id);
4049 
4050 	ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4051 			   vsi->offset_loaded, &oes->rx_bytes,
4052 			   &nes->rx_bytes);
4053 	ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4054 			   vsi->offset_loaded, &oes->rx_unicast,
4055 			   &nes->rx_unicast);
4056 	ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4057 			   vsi->offset_loaded, &oes->rx_multicast,
4058 			   &nes->rx_multicast);
4059 	ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4060 			   vsi->offset_loaded, &oes->rx_broadcast,
4061 			   &nes->rx_broadcast);
4062 	/* exclude CRC bytes */
4063 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4064 			  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4065 
4066 	ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4067 			   &oes->rx_discards, &nes->rx_discards);
4068 	/* GLV_REPC not supported */
4069 	/* GLV_RMPC not supported */
4070 	ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4071 			   &oes->rx_unknown_protocol,
4072 			   &nes->rx_unknown_protocol);
4073 	ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4074 			   vsi->offset_loaded, &oes->tx_bytes,
4075 			   &nes->tx_bytes);
4076 	ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4077 			   vsi->offset_loaded, &oes->tx_unicast,
4078 			   &nes->tx_unicast);
4079 	ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4080 			   vsi->offset_loaded, &oes->tx_multicast,
4081 			   &nes->tx_multicast);
4082 	ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4083 			   vsi->offset_loaded,  &oes->tx_broadcast,
4084 			   &nes->tx_broadcast);
4085 	/* GLV_TDPC not supported */
4086 	ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4087 			   &oes->tx_errors, &nes->tx_errors);
4088 	vsi->offset_loaded = true;
4089 
4090 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4091 		    vsi->vsi_id);
4092 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
4093 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
4094 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
4095 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
4096 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
4097 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4098 		    nes->rx_unknown_protocol);
4099 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
4100 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
4101 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
4102 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
4103 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
4104 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
4105 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4106 		    vsi->vsi_id);
4107 }
4108 
4109 static void
4110 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4111 {
4112 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4113 	struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4114 
4115 	/* Get statistics of struct ice_eth_stats */
4116 	ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4117 			   GLPRT_GORCL(hw->port_info->lport),
4118 			   pf->offset_loaded, &os->eth.rx_bytes,
4119 			   &ns->eth.rx_bytes);
4120 	ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4121 			   GLPRT_UPRCL(hw->port_info->lport),
4122 			   pf->offset_loaded, &os->eth.rx_unicast,
4123 			   &ns->eth.rx_unicast);
4124 	ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4125 			   GLPRT_MPRCL(hw->port_info->lport),
4126 			   pf->offset_loaded, &os->eth.rx_multicast,
4127 			   &ns->eth.rx_multicast);
4128 	ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4129 			   GLPRT_BPRCL(hw->port_info->lport),
4130 			   pf->offset_loaded, &os->eth.rx_broadcast,
4131 			   &ns->eth.rx_broadcast);
4132 	ice_stat_update_32(hw, PRTRPB_RDPC,
4133 			   pf->offset_loaded, &os->eth.rx_discards,
4134 			   &ns->eth.rx_discards);
4135 
4136 	/* Workaround: CRC size should not be included in byte statistics,
4137 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4138 	 * packet.
4139 	 */
4140 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4141 			     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4142 
4143 	/* GLPRT_REPC not supported */
4144 	/* GLPRT_RMPC not supported */
4145 	ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4146 			   pf->offset_loaded,
4147 			   &os->eth.rx_unknown_protocol,
4148 			   &ns->eth.rx_unknown_protocol);
4149 	ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4150 			   GLPRT_GOTCL(hw->port_info->lport),
4151 			   pf->offset_loaded, &os->eth.tx_bytes,
4152 			   &ns->eth.tx_bytes);
4153 	ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4154 			   GLPRT_UPTCL(hw->port_info->lport),
4155 			   pf->offset_loaded, &os->eth.tx_unicast,
4156 			   &ns->eth.tx_unicast);
4157 	ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4158 			   GLPRT_MPTCL(hw->port_info->lport),
4159 			   pf->offset_loaded, &os->eth.tx_multicast,
4160 			   &ns->eth.tx_multicast);
4161 	ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4162 			   GLPRT_BPTCL(hw->port_info->lport),
4163 			   pf->offset_loaded, &os->eth.tx_broadcast,
4164 			   &ns->eth.tx_broadcast);
4165 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4166 			     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4167 
4168 	/* GLPRT_TEPC not supported */
4169 
4170 	/* additional port specific stats */
4171 	ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4172 			   pf->offset_loaded, &os->tx_dropped_link_down,
4173 			   &ns->tx_dropped_link_down);
4174 	ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4175 			   pf->offset_loaded, &os->crc_errors,
4176 			   &ns->crc_errors);
4177 	ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4178 			   pf->offset_loaded, &os->illegal_bytes,
4179 			   &ns->illegal_bytes);
4180 	/* GLPRT_ERRBC not supported */
4181 	ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4182 			   pf->offset_loaded, &os->mac_local_faults,
4183 			   &ns->mac_local_faults);
4184 	ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4185 			   pf->offset_loaded, &os->mac_remote_faults,
4186 			   &ns->mac_remote_faults);
4187 
4188 	ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4189 			   pf->offset_loaded, &os->rx_len_errors,
4190 			   &ns->rx_len_errors);
4191 
4192 	ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4193 			   pf->offset_loaded, &os->link_xon_rx,
4194 			   &ns->link_xon_rx);
4195 	ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4196 			   pf->offset_loaded, &os->link_xoff_rx,
4197 			   &ns->link_xoff_rx);
4198 	ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4199 			   pf->offset_loaded, &os->link_xon_tx,
4200 			   &ns->link_xon_tx);
4201 	ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4202 			   pf->offset_loaded, &os->link_xoff_tx,
4203 			   &ns->link_xoff_tx);
4204 	ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4205 			   GLPRT_PRC64L(hw->port_info->lport),
4206 			   pf->offset_loaded, &os->rx_size_64,
4207 			   &ns->rx_size_64);
4208 	ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4209 			   GLPRT_PRC127L(hw->port_info->lport),
4210 			   pf->offset_loaded, &os->rx_size_127,
4211 			   &ns->rx_size_127);
4212 	ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4213 			   GLPRT_PRC255L(hw->port_info->lport),
4214 			   pf->offset_loaded, &os->rx_size_255,
4215 			   &ns->rx_size_255);
4216 	ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4217 			   GLPRT_PRC511L(hw->port_info->lport),
4218 			   pf->offset_loaded, &os->rx_size_511,
4219 			   &ns->rx_size_511);
4220 	ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4221 			   GLPRT_PRC1023L(hw->port_info->lport),
4222 			   pf->offset_loaded, &os->rx_size_1023,
4223 			   &ns->rx_size_1023);
4224 	ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4225 			   GLPRT_PRC1522L(hw->port_info->lport),
4226 			   pf->offset_loaded, &os->rx_size_1522,
4227 			   &ns->rx_size_1522);
4228 	ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4229 			   GLPRT_PRC9522L(hw->port_info->lport),
4230 			   pf->offset_loaded, &os->rx_size_big,
4231 			   &ns->rx_size_big);
4232 	ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4233 			   pf->offset_loaded, &os->rx_undersize,
4234 			   &ns->rx_undersize);
4235 	ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4236 			   pf->offset_loaded, &os->rx_fragments,
4237 			   &ns->rx_fragments);
4238 	ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4239 			   pf->offset_loaded, &os->rx_oversize,
4240 			   &ns->rx_oversize);
4241 	ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4242 			   pf->offset_loaded, &os->rx_jabber,
4243 			   &ns->rx_jabber);
4244 	ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4245 			   GLPRT_PTC64L(hw->port_info->lport),
4246 			   pf->offset_loaded, &os->tx_size_64,
4247 			   &ns->tx_size_64);
4248 	ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
4249 			   GLPRT_PTC127L(hw->port_info->lport),
4250 			   pf->offset_loaded, &os->tx_size_127,
4251 			   &ns->tx_size_127);
4252 	ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
4253 			   GLPRT_PTC255L(hw->port_info->lport),
4254 			   pf->offset_loaded, &os->tx_size_255,
4255 			   &ns->tx_size_255);
4256 	ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
4257 			   GLPRT_PTC511L(hw->port_info->lport),
4258 			   pf->offset_loaded, &os->tx_size_511,
4259 			   &ns->tx_size_511);
4260 	ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
4261 			   GLPRT_PTC1023L(hw->port_info->lport),
4262 			   pf->offset_loaded, &os->tx_size_1023,
4263 			   &ns->tx_size_1023);
4264 	ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
4265 			   GLPRT_PTC1522L(hw->port_info->lport),
4266 			   pf->offset_loaded, &os->tx_size_1522,
4267 			   &ns->tx_size_1522);
4268 	ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
4269 			   GLPRT_PTC9522L(hw->port_info->lport),
4270 			   pf->offset_loaded, &os->tx_size_big,
4271 			   &ns->tx_size_big);
4272 
4273 	/* GLPRT_MSPDC not supported */
4274 	/* GLPRT_XEC not supported */
4275 
4276 	pf->offset_loaded = true;
4277 
4278 	if (pf->main_vsi)
4279 		ice_update_vsi_stats(pf->main_vsi);
4280 }
4281 
4282 /* Get all statistics of a port */
4283 static int
4284 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
4285 {
4286 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4287 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4288 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4289 
4290 	/* call read registers - updates values, now write them to struct */
4291 	ice_read_stats_registers(pf, hw);
4292 
4293 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
4294 			  pf->main_vsi->eth_stats.rx_multicast +
4295 			  pf->main_vsi->eth_stats.rx_broadcast -
4296 			  pf->main_vsi->eth_stats.rx_discards;
4297 	stats->opackets = ns->eth.tx_unicast +
4298 			  ns->eth.tx_multicast +
4299 			  ns->eth.tx_broadcast;
4300 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
4301 	stats->obytes   = ns->eth.tx_bytes;
4302 	stats->oerrors  = ns->eth.tx_errors +
4303 			  pf->main_vsi->eth_stats.tx_errors;
4304 
4305 	/* Rx Errors */
4306 	stats->imissed  = ns->eth.rx_discards +
4307 			  pf->main_vsi->eth_stats.rx_discards;
4308 	stats->ierrors  = ns->crc_errors +
4309 			  ns->rx_undersize +
4310 			  ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
4311 
4312 	PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
4313 	PMD_DRV_LOG(DEBUG, "rx_bytes:	%"PRIu64"", ns->eth.rx_bytes);
4314 	PMD_DRV_LOG(DEBUG, "rx_unicast:	%"PRIu64"", ns->eth.rx_unicast);
4315 	PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
4316 	PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
4317 	PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
4318 	PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
4319 		    pf->main_vsi->eth_stats.rx_discards);
4320 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
4321 		    ns->eth.rx_unknown_protocol);
4322 	PMD_DRV_LOG(DEBUG, "tx_bytes:	%"PRIu64"", ns->eth.tx_bytes);
4323 	PMD_DRV_LOG(DEBUG, "tx_unicast:	%"PRIu64"", ns->eth.tx_unicast);
4324 	PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
4325 	PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
4326 	PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
4327 	PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
4328 		    pf->main_vsi->eth_stats.tx_discards);
4329 	PMD_DRV_LOG(DEBUG, "tx_errors:		%"PRIu64"", ns->eth.tx_errors);
4330 
4331 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:	%"PRIu64"",
4332 		    ns->tx_dropped_link_down);
4333 	PMD_DRV_LOG(DEBUG, "crc_errors:	%"PRIu64"", ns->crc_errors);
4334 	PMD_DRV_LOG(DEBUG, "illegal_bytes:	%"PRIu64"",
4335 		    ns->illegal_bytes);
4336 	PMD_DRV_LOG(DEBUG, "error_bytes:	%"PRIu64"", ns->error_bytes);
4337 	PMD_DRV_LOG(DEBUG, "mac_local_faults:	%"PRIu64"",
4338 		    ns->mac_local_faults);
4339 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:	%"PRIu64"",
4340 		    ns->mac_remote_faults);
4341 	PMD_DRV_LOG(DEBUG, "link_xon_rx:	%"PRIu64"", ns->link_xon_rx);
4342 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:	%"PRIu64"", ns->link_xoff_rx);
4343 	PMD_DRV_LOG(DEBUG, "link_xon_tx:	%"PRIu64"", ns->link_xon_tx);
4344 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:	%"PRIu64"", ns->link_xoff_tx);
4345 	PMD_DRV_LOG(DEBUG, "rx_size_64:		%"PRIu64"", ns->rx_size_64);
4346 	PMD_DRV_LOG(DEBUG, "rx_size_127:	%"PRIu64"", ns->rx_size_127);
4347 	PMD_DRV_LOG(DEBUG, "rx_size_255:	%"PRIu64"", ns->rx_size_255);
4348 	PMD_DRV_LOG(DEBUG, "rx_size_511:	%"PRIu64"", ns->rx_size_511);
4349 	PMD_DRV_LOG(DEBUG, "rx_size_1023:	%"PRIu64"", ns->rx_size_1023);
4350 	PMD_DRV_LOG(DEBUG, "rx_size_1522:	%"PRIu64"", ns->rx_size_1522);
4351 	PMD_DRV_LOG(DEBUG, "rx_size_big:	%"PRIu64"", ns->rx_size_big);
4352 	PMD_DRV_LOG(DEBUG, "rx_undersize:	%"PRIu64"", ns->rx_undersize);
4353 	PMD_DRV_LOG(DEBUG, "rx_fragments:	%"PRIu64"", ns->rx_fragments);
4354 	PMD_DRV_LOG(DEBUG, "rx_oversize:	%"PRIu64"", ns->rx_oversize);
4355 	PMD_DRV_LOG(DEBUG, "rx_jabber:		%"PRIu64"", ns->rx_jabber);
4356 	PMD_DRV_LOG(DEBUG, "tx_size_64:		%"PRIu64"", ns->tx_size_64);
4357 	PMD_DRV_LOG(DEBUG, "tx_size_127:	%"PRIu64"", ns->tx_size_127);
4358 	PMD_DRV_LOG(DEBUG, "tx_size_255:	%"PRIu64"", ns->tx_size_255);
4359 	PMD_DRV_LOG(DEBUG, "tx_size_511:	%"PRIu64"", ns->tx_size_511);
4360 	PMD_DRV_LOG(DEBUG, "tx_size_1023:	%"PRIu64"", ns->tx_size_1023);
4361 	PMD_DRV_LOG(DEBUG, "tx_size_1522:	%"PRIu64"", ns->tx_size_1522);
4362 	PMD_DRV_LOG(DEBUG, "tx_size_big:	%"PRIu64"", ns->tx_size_big);
4363 	PMD_DRV_LOG(DEBUG, "rx_len_errors:	%"PRIu64"", ns->rx_len_errors);
4364 	PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
4365 	return 0;
4366 }
4367 
4368 /* Reset the statistics */
4369 static int
4370 ice_stats_reset(struct rte_eth_dev *dev)
4371 {
4372 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4373 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4374 
4375 	/* Mark PF and VSI stats to update the offset, aka "reset" */
4376 	pf->offset_loaded = false;
4377 	if (pf->main_vsi)
4378 		pf->main_vsi->offset_loaded = false;
4379 
4380 	/* read the stats, reading current register values into offset */
4381 	ice_read_stats_registers(pf, hw);
4382 
4383 	return 0;
4384 }
4385 
4386 static uint32_t
4387 ice_xstats_calc_num(void)
4388 {
4389 	uint32_t num;
4390 
4391 	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
4392 
4393 	return num;
4394 }
4395 
4396 static int
4397 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
4398 	       unsigned int n)
4399 {
4400 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4401 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4402 	unsigned int i;
4403 	unsigned int count;
4404 	struct ice_hw_port_stats *hw_stats = &pf->stats;
4405 
4406 	count = ice_xstats_calc_num();
4407 	if (n < count)
4408 		return count;
4409 
4410 	ice_read_stats_registers(pf, hw);
4411 
4412 	if (!xstats)
4413 		return 0;
4414 
4415 	count = 0;
4416 
4417 	/* Get stats from ice_eth_stats struct */
4418 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4419 		xstats[count].value =
4420 			*(uint64_t *)((char *)&hw_stats->eth +
4421 				      ice_stats_strings[i].offset);
4422 		xstats[count].id = count;
4423 		count++;
4424 	}
4425 
4426 	/* Get individiual stats from ice_hw_port struct */
4427 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4428 		xstats[count].value =
4429 			*(uint64_t *)((char *)hw_stats +
4430 				      ice_hw_port_strings[i].offset);
4431 		xstats[count].id = count;
4432 		count++;
4433 	}
4434 
4435 	return count;
4436 }
4437 
4438 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
4439 				struct rte_eth_xstat_name *xstats_names,
4440 				__rte_unused unsigned int limit)
4441 {
4442 	unsigned int count = 0;
4443 	unsigned int i;
4444 
4445 	if (!xstats_names)
4446 		return ice_xstats_calc_num();
4447 
4448 	/* Note: limit checked in rte_eth_xstats_names() */
4449 
4450 	/* Get stats from ice_eth_stats struct */
4451 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
4452 		strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
4453 			sizeof(xstats_names[count].name));
4454 		count++;
4455 	}
4456 
4457 	/* Get individiual stats from ice_hw_port struct */
4458 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
4459 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
4460 			sizeof(xstats_names[count].name));
4461 		count++;
4462 	}
4463 
4464 	return count;
4465 }
4466 
4467 static int
4468 ice_dev_filter_ctrl(struct rte_eth_dev *dev,
4469 		     enum rte_filter_type filter_type,
4470 		     enum rte_filter_op filter_op,
4471 		     void *arg)
4472 {
4473 	int ret = 0;
4474 
4475 	if (!dev)
4476 		return -EINVAL;
4477 
4478 	switch (filter_type) {
4479 	case RTE_ETH_FILTER_GENERIC:
4480 		if (filter_op != RTE_ETH_FILTER_GET)
4481 			return -EINVAL;
4482 		*(const void **)arg = &ice_flow_ops;
4483 		break;
4484 	default:
4485 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4486 					filter_type);
4487 		ret = -EINVAL;
4488 		break;
4489 	}
4490 
4491 	return ret;
4492 }
4493 
4494 /* Add UDP tunneling port */
4495 static int
4496 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4497 			     struct rte_eth_udp_tunnel *udp_tunnel)
4498 {
4499 	int ret = 0;
4500 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4501 
4502 	if (udp_tunnel == NULL)
4503 		return -EINVAL;
4504 
4505 	switch (udp_tunnel->prot_type) {
4506 	case RTE_TUNNEL_TYPE_VXLAN:
4507 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
4508 		break;
4509 	default:
4510 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4511 		ret = -EINVAL;
4512 		break;
4513 	}
4514 
4515 	return ret;
4516 }
4517 
4518 /* Delete UDP tunneling port */
4519 static int
4520 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
4521 			     struct rte_eth_udp_tunnel *udp_tunnel)
4522 {
4523 	int ret = 0;
4524 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4525 
4526 	if (udp_tunnel == NULL)
4527 		return -EINVAL;
4528 
4529 	switch (udp_tunnel->prot_type) {
4530 	case RTE_TUNNEL_TYPE_VXLAN:
4531 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
4532 		break;
4533 	default:
4534 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4535 		ret = -EINVAL;
4536 		break;
4537 	}
4538 
4539 	return ret;
4540 }
4541 
4542 static int
4543 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
4544 	      struct rte_pci_device *pci_dev)
4545 {
4546 	return rte_eth_dev_pci_generic_probe(pci_dev,
4547 					     sizeof(struct ice_adapter),
4548 					     ice_dev_init);
4549 }
4550 
4551 static int
4552 ice_pci_remove(struct rte_pci_device *pci_dev)
4553 {
4554 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
4555 }
4556 
4557 static struct rte_pci_driver rte_ice_pmd = {
4558 	.id_table = pci_id_ice_map,
4559 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
4560 	.probe = ice_pci_probe,
4561 	.remove = ice_pci_remove,
4562 };
4563 
4564 /**
4565  * Driver initialization routine.
4566  * Invoked once at EAL init time.
4567  * Register itself as the [Poll Mode] Driver of PCI devices.
4568  */
4569 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
4570 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
4571 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
4572 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
4573 			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
4574 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
4575 			      ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
4576 			      ICE_FLOW_MARK_SUPPORT_ARG "=<0|1>");
4577 
4578 RTE_INIT(ice_init_log)
4579 {
4580 	ice_logtype_init = rte_log_register("pmd.net.ice.init");
4581 	if (ice_logtype_init >= 0)
4582 		rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
4583 	ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
4584 	if (ice_logtype_driver >= 0)
4585 		rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);
4586 
4587 #ifdef RTE_LIBRTE_ICE_DEBUG_RX
4588 	ice_logtype_rx = rte_log_register("pmd.net.ice.rx");
4589 	if (ice_logtype_rx >= 0)
4590 		rte_log_set_level(ice_logtype_rx, RTE_LOG_DEBUG);
4591 #endif
4592 
4593 #ifdef RTE_LIBRTE_ICE_DEBUG_TX
4594 	ice_logtype_tx = rte_log_register("pmd.net.ice.tx");
4595 	if (ice_logtype_tx >= 0)
4596 		rte_log_set_level(ice_logtype_tx, RTE_LOG_DEBUG);
4597 #endif
4598 
4599 #ifdef RTE_LIBRTE_ICE_DEBUG_TX_FREE
4600 	ice_logtype_tx_free = rte_log_register("pmd.net.ice.tx_free");
4601 	if (ice_logtype_tx_free >= 0)
4602 		rte_log_set_level(ice_logtype_tx_free, RTE_LOG_DEBUG);
4603 #endif
4604 }
4605