xref: /dpdk/drivers/net/ice/ice_ethdev.c (revision cf412ff7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 
13 #include <rte_tailq.h>
14 
15 #include "base/ice_sched.h"
16 #include "base/ice_flow.h"
17 #include "base/ice_dcb.h"
18 #include "base/ice_common.h"
19 
20 #include "rte_pmd_ice.h"
21 #include "ice_ethdev.h"
22 #include "ice_rxtx.h"
23 #include "ice_generic_flow.h"
24 
25 /* devargs */
26 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
27 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
28 #define ICE_PROTO_XTR_ARG         "proto_xtr"
29 
30 static const char * const ice_valid_args[] = {
31 	ICE_SAFE_MODE_SUPPORT_ARG,
32 	ICE_PIPELINE_MODE_SUPPORT_ARG,
33 	ICE_PROTO_XTR_ARG,
34 	NULL
35 };
36 
37 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
38 	.name = "intel_pmd_dynfield_proto_xtr_metadata",
39 	.size = sizeof(uint32_t),
40 	.align = __alignof__(uint32_t),
41 	.flags = 0,
42 };
43 
44 struct proto_xtr_ol_flag {
45 	const struct rte_mbuf_dynflag param;
46 	uint64_t *ol_flag;
47 	bool required;
48 };
49 
50 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
51 
52 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
53 	[PROTO_XTR_VLAN] = {
54 		.param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
55 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
56 	[PROTO_XTR_IPV4] = {
57 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
58 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
59 	[PROTO_XTR_IPV6] = {
60 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
61 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
62 	[PROTO_XTR_IPV6_FLOW] = {
63 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
64 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
65 	[PROTO_XTR_TCP] = {
66 		.param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
67 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
68 	[PROTO_XTR_IP_OFFSET] = {
69 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
70 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
71 };
72 
73 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
74 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
75 #define ICE_MAX_RES_DESC_NUM        1024
76 
77 static int ice_dev_configure(struct rte_eth_dev *dev);
78 static int ice_dev_start(struct rte_eth_dev *dev);
79 static int ice_dev_stop(struct rte_eth_dev *dev);
80 static int ice_dev_close(struct rte_eth_dev *dev);
81 static int ice_dev_reset(struct rte_eth_dev *dev);
82 static int ice_dev_info_get(struct rte_eth_dev *dev,
83 			    struct rte_eth_dev_info *dev_info);
84 static int ice_link_update(struct rte_eth_dev *dev,
85 			   int wait_to_complete);
86 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
87 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
88 
89 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
90 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
91 static int ice_rss_reta_update(struct rte_eth_dev *dev,
92 			       struct rte_eth_rss_reta_entry64 *reta_conf,
93 			       uint16_t reta_size);
94 static int ice_rss_reta_query(struct rte_eth_dev *dev,
95 			      struct rte_eth_rss_reta_entry64 *reta_conf,
96 			      uint16_t reta_size);
97 static int ice_rss_hash_update(struct rte_eth_dev *dev,
98 			       struct rte_eth_rss_conf *rss_conf);
99 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
100 				 struct rte_eth_rss_conf *rss_conf);
101 static int ice_promisc_enable(struct rte_eth_dev *dev);
102 static int ice_promisc_disable(struct rte_eth_dev *dev);
103 static int ice_allmulti_enable(struct rte_eth_dev *dev);
104 static int ice_allmulti_disable(struct rte_eth_dev *dev);
105 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
106 			       uint16_t vlan_id,
107 			       int on);
108 static int ice_macaddr_set(struct rte_eth_dev *dev,
109 			   struct rte_ether_addr *mac_addr);
110 static int ice_macaddr_add(struct rte_eth_dev *dev,
111 			   struct rte_ether_addr *mac_addr,
112 			   __rte_unused uint32_t index,
113 			   uint32_t pool);
114 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
115 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
116 				    uint16_t queue_id);
117 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
118 				     uint16_t queue_id);
119 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
120 			      size_t fw_size);
121 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
122 			     uint16_t pvid, int on);
123 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
124 static int ice_get_eeprom(struct rte_eth_dev *dev,
125 			  struct rte_dev_eeprom_info *eeprom);
126 static int ice_stats_get(struct rte_eth_dev *dev,
127 			 struct rte_eth_stats *stats);
128 static int ice_stats_reset(struct rte_eth_dev *dev);
129 static int ice_xstats_get(struct rte_eth_dev *dev,
130 			  struct rte_eth_xstat *xstats, unsigned int n);
131 static int ice_xstats_get_names(struct rte_eth_dev *dev,
132 				struct rte_eth_xstat_name *xstats_names,
133 				unsigned int limit);
134 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
135 				const struct rte_flow_ops **ops);
136 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
137 			struct rte_eth_udp_tunnel *udp_tunnel);
138 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
139 			struct rte_eth_udp_tunnel *udp_tunnel);
140 
141 static const struct rte_pci_id pci_id_ice_map[] = {
142 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
143 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
144 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
145 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
146 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
147 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
148 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
149 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
150 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
151 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
152 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
153 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
154 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
155 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
156 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
157 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
158 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
159 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
160 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
161 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
162 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
163 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
164 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
165 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
166 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
167 	{ .vendor_id = 0, /* sentinel */ },
168 };
169 
170 static const struct eth_dev_ops ice_eth_dev_ops = {
171 	.dev_configure                = ice_dev_configure,
172 	.dev_start                    = ice_dev_start,
173 	.dev_stop                     = ice_dev_stop,
174 	.dev_close                    = ice_dev_close,
175 	.dev_reset                    = ice_dev_reset,
176 	.dev_set_link_up              = ice_dev_set_link_up,
177 	.dev_set_link_down            = ice_dev_set_link_down,
178 	.rx_queue_start               = ice_rx_queue_start,
179 	.rx_queue_stop                = ice_rx_queue_stop,
180 	.tx_queue_start               = ice_tx_queue_start,
181 	.tx_queue_stop                = ice_tx_queue_stop,
182 	.rx_queue_setup               = ice_rx_queue_setup,
183 	.rx_queue_release             = ice_rx_queue_release,
184 	.tx_queue_setup               = ice_tx_queue_setup,
185 	.tx_queue_release             = ice_tx_queue_release,
186 	.dev_infos_get                = ice_dev_info_get,
187 	.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
188 	.link_update                  = ice_link_update,
189 	.mtu_set                      = ice_mtu_set,
190 	.mac_addr_set                 = ice_macaddr_set,
191 	.mac_addr_add                 = ice_macaddr_add,
192 	.mac_addr_remove              = ice_macaddr_remove,
193 	.vlan_filter_set              = ice_vlan_filter_set,
194 	.vlan_offload_set             = ice_vlan_offload_set,
195 	.reta_update                  = ice_rss_reta_update,
196 	.reta_query                   = ice_rss_reta_query,
197 	.rss_hash_update              = ice_rss_hash_update,
198 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
199 	.promiscuous_enable           = ice_promisc_enable,
200 	.promiscuous_disable          = ice_promisc_disable,
201 	.allmulticast_enable          = ice_allmulti_enable,
202 	.allmulticast_disable         = ice_allmulti_disable,
203 	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
204 	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
205 	.fw_version_get               = ice_fw_version_get,
206 	.vlan_pvid_set                = ice_vlan_pvid_set,
207 	.rxq_info_get                 = ice_rxq_info_get,
208 	.txq_info_get                 = ice_txq_info_get,
209 	.rx_burst_mode_get            = ice_rx_burst_mode_get,
210 	.tx_burst_mode_get            = ice_tx_burst_mode_get,
211 	.get_eeprom_length            = ice_get_eeprom_length,
212 	.get_eeprom                   = ice_get_eeprom,
213 	.stats_get                    = ice_stats_get,
214 	.stats_reset                  = ice_stats_reset,
215 	.xstats_get                   = ice_xstats_get,
216 	.xstats_get_names             = ice_xstats_get_names,
217 	.xstats_reset                 = ice_stats_reset,
218 	.flow_ops_get                 = ice_dev_flow_ops_get,
219 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
220 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
221 	.tx_done_cleanup              = ice_tx_done_cleanup,
222 	.get_monitor_addr             = ice_get_monitor_addr,
223 };
224 
225 /* store statistics names and its offset in stats structure */
226 struct ice_xstats_name_off {
227 	char name[RTE_ETH_XSTATS_NAME_SIZE];
228 	unsigned int offset;
229 };
230 
231 static const struct ice_xstats_name_off ice_stats_strings[] = {
232 	{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
233 	{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
234 	{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
235 	{"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
236 	{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
237 		rx_unknown_protocol)},
238 	{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
239 	{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
240 	{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
241 	{"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
242 };
243 
244 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
245 		sizeof(ice_stats_strings[0]))
246 
247 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
248 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
249 		tx_dropped_link_down)},
250 	{"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
251 	{"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
252 		illegal_bytes)},
253 	{"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
254 	{"mac_local_errors", offsetof(struct ice_hw_port_stats,
255 		mac_local_faults)},
256 	{"mac_remote_errors", offsetof(struct ice_hw_port_stats,
257 		mac_remote_faults)},
258 	{"rx_len_errors", offsetof(struct ice_hw_port_stats,
259 		rx_len_errors)},
260 	{"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
261 	{"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
262 	{"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
263 	{"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
264 	{"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
265 	{"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
266 		rx_size_127)},
267 	{"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
268 		rx_size_255)},
269 	{"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
270 		rx_size_511)},
271 	{"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
272 		rx_size_1023)},
273 	{"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
274 		rx_size_1522)},
275 	{"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
276 		rx_size_big)},
277 	{"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
278 		rx_undersize)},
279 	{"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
280 		rx_oversize)},
281 	{"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
282 		mac_short_pkt_dropped)},
283 	{"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
284 		rx_fragments)},
285 	{"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
286 	{"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
287 	{"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
288 		tx_size_127)},
289 	{"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
290 		tx_size_255)},
291 	{"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
292 		tx_size_511)},
293 	{"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
294 		tx_size_1023)},
295 	{"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
296 		tx_size_1522)},
297 	{"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
298 		tx_size_big)},
299 };
300 
301 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
302 		sizeof(ice_hw_port_strings[0]))
303 
304 static void
305 ice_init_controlq_parameter(struct ice_hw *hw)
306 {
307 	/* fields for adminq */
308 	hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
309 	hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
310 	hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
311 	hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
312 
313 	/* fields for mailboxq, DPDK used as PF host */
314 	hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
315 	hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
316 	hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
317 	hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
318 }
319 
320 static int
321 lookup_proto_xtr_type(const char *xtr_name)
322 {
323 	static struct {
324 		const char *name;
325 		enum proto_xtr_type type;
326 	} xtr_type_map[] = {
327 		{ "vlan",      PROTO_XTR_VLAN      },
328 		{ "ipv4",      PROTO_XTR_IPV4      },
329 		{ "ipv6",      PROTO_XTR_IPV6      },
330 		{ "ipv6_flow", PROTO_XTR_IPV6_FLOW },
331 		{ "tcp",       PROTO_XTR_TCP       },
332 		{ "ip_offset", PROTO_XTR_IP_OFFSET },
333 	};
334 	uint32_t i;
335 
336 	for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
337 		if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
338 			return xtr_type_map[i].type;
339 	}
340 
341 	return -1;
342 }
343 
344 /*
345  * Parse elem, the elem could be single number/range or '(' ')' group
346  * 1) A single number elem, it's just a simple digit. e.g. 9
347  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
348  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
349  *    Within group elem, '-' used for a range separator;
350  *                       ',' used for a single number.
351  */
352 static int
353 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
354 {
355 	const char *str = input;
356 	char *end = NULL;
357 	uint32_t min, max;
358 	uint32_t idx;
359 
360 	while (isblank(*str))
361 		str++;
362 
363 	if (!isdigit(*str) && *str != '(')
364 		return -1;
365 
366 	/* process single number or single range of number */
367 	if (*str != '(') {
368 		errno = 0;
369 		idx = strtoul(str, &end, 10);
370 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
371 			return -1;
372 
373 		while (isblank(*end))
374 			end++;
375 
376 		min = idx;
377 		max = idx;
378 
379 		/* process single <number>-<number> */
380 		if (*end == '-') {
381 			end++;
382 			while (isblank(*end))
383 				end++;
384 			if (!isdigit(*end))
385 				return -1;
386 
387 			errno = 0;
388 			idx = strtoul(end, &end, 10);
389 			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
390 				return -1;
391 
392 			max = idx;
393 			while (isblank(*end))
394 				end++;
395 		}
396 
397 		if (*end != ':')
398 			return -1;
399 
400 		for (idx = RTE_MIN(min, max);
401 		     idx <= RTE_MAX(min, max); idx++)
402 			devargs->proto_xtr[idx] = xtr_type;
403 
404 		return 0;
405 	}
406 
407 	/* process set within bracket */
408 	str++;
409 	while (isblank(*str))
410 		str++;
411 	if (*str == '\0')
412 		return -1;
413 
414 	min = ICE_MAX_QUEUE_NUM;
415 	do {
416 		/* go ahead to the first digit */
417 		while (isblank(*str))
418 			str++;
419 		if (!isdigit(*str))
420 			return -1;
421 
422 		/* get the digit value */
423 		errno = 0;
424 		idx = strtoul(str, &end, 10);
425 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
426 			return -1;
427 
428 		/* go ahead to separator '-',',' and ')' */
429 		while (isblank(*end))
430 			end++;
431 		if (*end == '-') {
432 			if (min == ICE_MAX_QUEUE_NUM)
433 				min = idx;
434 			else /* avoid continuous '-' */
435 				return -1;
436 		} else if (*end == ',' || *end == ')') {
437 			max = idx;
438 			if (min == ICE_MAX_QUEUE_NUM)
439 				min = idx;
440 
441 			for (idx = RTE_MIN(min, max);
442 			     idx <= RTE_MAX(min, max); idx++)
443 				devargs->proto_xtr[idx] = xtr_type;
444 
445 			min = ICE_MAX_QUEUE_NUM;
446 		} else {
447 			return -1;
448 		}
449 
450 		str = end + 1;
451 	} while (*end != ')' && *end != '\0');
452 
453 	return 0;
454 }
455 
456 static int
457 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
458 {
459 	const char *queue_start;
460 	uint32_t idx;
461 	int xtr_type;
462 	char xtr_name[32];
463 
464 	while (isblank(*queues))
465 		queues++;
466 
467 	if (*queues != '[') {
468 		xtr_type = lookup_proto_xtr_type(queues);
469 		if (xtr_type < 0)
470 			return -1;
471 
472 		devargs->proto_xtr_dflt = xtr_type;
473 
474 		return 0;
475 	}
476 
477 	queues++;
478 	do {
479 		while (isblank(*queues))
480 			queues++;
481 		if (*queues == '\0')
482 			return -1;
483 
484 		queue_start = queues;
485 
486 		/* go across a complete bracket */
487 		if (*queue_start == '(') {
488 			queues += strcspn(queues, ")");
489 			if (*queues != ')')
490 				return -1;
491 		}
492 
493 		/* scan the separator ':' */
494 		queues += strcspn(queues, ":");
495 		if (*queues++ != ':')
496 			return -1;
497 		while (isblank(*queues))
498 			queues++;
499 
500 		for (idx = 0; ; idx++) {
501 			if (isblank(queues[idx]) ||
502 			    queues[idx] == ',' ||
503 			    queues[idx] == ']' ||
504 			    queues[idx] == '\0')
505 				break;
506 
507 			if (idx > sizeof(xtr_name) - 2)
508 				return -1;
509 
510 			xtr_name[idx] = queues[idx];
511 		}
512 		xtr_name[idx] = '\0';
513 		xtr_type = lookup_proto_xtr_type(xtr_name);
514 		if (xtr_type < 0)
515 			return -1;
516 
517 		queues += idx;
518 
519 		while (isblank(*queues) || *queues == ',' || *queues == ']')
520 			queues++;
521 
522 		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
523 			return -1;
524 	} while (*queues != '\0');
525 
526 	return 0;
527 }
528 
529 static int
530 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
531 		     void *extra_args)
532 {
533 	struct ice_devargs *devargs = extra_args;
534 
535 	if (value == NULL || extra_args == NULL)
536 		return -EINVAL;
537 
538 	if (parse_queue_proto_xtr(value, devargs) < 0) {
539 		PMD_DRV_LOG(ERR,
540 			    "The protocol extraction parameter is wrong : '%s'",
541 			    value);
542 		return -1;
543 	}
544 
545 	return 0;
546 }
547 
548 static void
549 ice_check_proto_xtr_support(struct ice_hw *hw)
550 {
551 #define FLX_REG(val, fld, idx) \
552 	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
553 	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
554 	static struct {
555 		uint32_t rxdid;
556 		uint8_t opcode;
557 		uint8_t protid_0;
558 		uint8_t protid_1;
559 	} xtr_sets[] = {
560 		[PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
561 				     ICE_RX_OPC_EXTRACT,
562 				     ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
563 		[PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
564 				     ICE_RX_OPC_EXTRACT,
565 				     ICE_PROT_IPV4_OF_OR_S,
566 				     ICE_PROT_IPV4_OF_OR_S },
567 		[PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
568 				     ICE_RX_OPC_EXTRACT,
569 				     ICE_PROT_IPV6_OF_OR_S,
570 				     ICE_PROT_IPV6_OF_OR_S },
571 		[PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
572 					  ICE_RX_OPC_EXTRACT,
573 					  ICE_PROT_IPV6_OF_OR_S,
574 					  ICE_PROT_IPV6_OF_OR_S },
575 		[PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
576 				    ICE_RX_OPC_EXTRACT,
577 				    ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
578 		[PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
579 					  ICE_RX_OPC_PROTID,
580 					  ICE_PROT_IPV4_OF_OR_S,
581 					  ICE_PROT_IPV6_OF_OR_S },
582 	};
583 	uint32_t i;
584 
585 	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
586 		uint32_t rxdid = xtr_sets[i].rxdid;
587 		uint32_t v;
588 
589 		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
590 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
591 
592 			if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
593 			    FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
594 				ice_proto_xtr_hw_support[i] = true;
595 		}
596 
597 		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
598 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
599 
600 			if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
601 			    FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
602 				ice_proto_xtr_hw_support[i] = true;
603 		}
604 	}
605 }
606 
607 static int
608 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
609 		  uint32_t num)
610 {
611 	struct pool_entry *entry;
612 
613 	if (!pool || !num)
614 		return -EINVAL;
615 
616 	entry = rte_zmalloc(NULL, sizeof(*entry), 0);
617 	if (!entry) {
618 		PMD_INIT_LOG(ERR,
619 			     "Failed to allocate memory for resource pool");
620 		return -ENOMEM;
621 	}
622 
623 	/* queue heap initialize */
624 	pool->num_free = num;
625 	pool->num_alloc = 0;
626 	pool->base = base;
627 	LIST_INIT(&pool->alloc_list);
628 	LIST_INIT(&pool->free_list);
629 
630 	/* Initialize element  */
631 	entry->base = 0;
632 	entry->len = num;
633 
634 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
635 	return 0;
636 }
637 
638 static int
639 ice_res_pool_alloc(struct ice_res_pool_info *pool,
640 		   uint16_t num)
641 {
642 	struct pool_entry *entry, *valid_entry;
643 
644 	if (!pool || !num) {
645 		PMD_INIT_LOG(ERR, "Invalid parameter");
646 		return -EINVAL;
647 	}
648 
649 	if (pool->num_free < num) {
650 		PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
651 			     num, pool->num_free);
652 		return -ENOMEM;
653 	}
654 
655 	valid_entry = NULL;
656 	/* Lookup  in free list and find most fit one */
657 	LIST_FOREACH(entry, &pool->free_list, next) {
658 		if (entry->len >= num) {
659 			/* Find best one */
660 			if (entry->len == num) {
661 				valid_entry = entry;
662 				break;
663 			}
664 			if (!valid_entry ||
665 			    valid_entry->len > entry->len)
666 				valid_entry = entry;
667 		}
668 	}
669 
670 	/* Not find one to satisfy the request, return */
671 	if (!valid_entry) {
672 		PMD_INIT_LOG(ERR, "No valid entry found");
673 		return -ENOMEM;
674 	}
675 	/**
676 	 * The entry have equal queue number as requested,
677 	 * remove it from alloc_list.
678 	 */
679 	if (valid_entry->len == num) {
680 		LIST_REMOVE(valid_entry, next);
681 	} else {
682 		/**
683 		 * The entry have more numbers than requested,
684 		 * create a new entry for alloc_list and minus its
685 		 * queue base and number in free_list.
686 		 */
687 		entry = rte_zmalloc(NULL, sizeof(*entry), 0);
688 		if (!entry) {
689 			PMD_INIT_LOG(ERR,
690 				     "Failed to allocate memory for "
691 				     "resource pool");
692 			return -ENOMEM;
693 		}
694 		entry->base = valid_entry->base;
695 		entry->len = num;
696 		valid_entry->base += num;
697 		valid_entry->len -= num;
698 		valid_entry = entry;
699 	}
700 
701 	/* Insert it into alloc list, not sorted */
702 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
703 
704 	pool->num_free -= valid_entry->len;
705 	pool->num_alloc += valid_entry->len;
706 
707 	return valid_entry->base + pool->base;
708 }
709 
710 static void
711 ice_res_pool_destroy(struct ice_res_pool_info *pool)
712 {
713 	struct pool_entry *entry, *next_entry;
714 
715 	if (!pool)
716 		return;
717 
718 	for (entry = LIST_FIRST(&pool->alloc_list);
719 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
720 	     entry = next_entry) {
721 		LIST_REMOVE(entry, next);
722 		rte_free(entry);
723 	}
724 
725 	for (entry = LIST_FIRST(&pool->free_list);
726 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
727 	     entry = next_entry) {
728 		LIST_REMOVE(entry, next);
729 		rte_free(entry);
730 	}
731 
732 	pool->num_free = 0;
733 	pool->num_alloc = 0;
734 	pool->base = 0;
735 	LIST_INIT(&pool->alloc_list);
736 	LIST_INIT(&pool->free_list);
737 }
738 
739 static void
740 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
741 {
742 	/* Set VSI LUT selection */
743 	info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
744 			  ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
745 	/* Set Hash scheme */
746 	info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
747 			   ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
748 	/* enable TC */
749 	info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
750 }
751 
752 static enum ice_status
753 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
754 				struct ice_aqc_vsi_props *info,
755 				uint8_t enabled_tcmap)
756 {
757 	uint16_t bsf, qp_idx;
758 
759 	/* default tc 0 now. Multi-TC supporting need to be done later.
760 	 * Configure TC and queue mapping parameters, for enabled TC,
761 	 * allocate qpnum_per_tc queues to this traffic.
762 	 */
763 	if (enabled_tcmap != 0x01) {
764 		PMD_INIT_LOG(ERR, "only TC0 is supported");
765 		return -ENOTSUP;
766 	}
767 
768 	vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
769 	bsf = rte_bsf32(vsi->nb_qps);
770 	/* Adjust the queue number to actual queues that can be applied */
771 	vsi->nb_qps = 0x1 << bsf;
772 
773 	qp_idx = 0;
774 	/* Set tc and queue mapping with VSI */
775 	info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
776 						ICE_AQ_VSI_TC_Q_OFFSET_S) |
777 					       (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
778 
779 	/* Associate queue number with VSI */
780 	info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
781 	info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
782 	info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
783 	info->valid_sections |=
784 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
785 	/* Set the info.ingress_table and info.egress_table
786 	 * for UP translate table. Now just set it to 1:1 map by default
787 	 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
788 	 */
789 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
790 	info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
791 	info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
792 	info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
793 	return 0;
794 }
795 
796 static int
797 ice_init_mac_address(struct rte_eth_dev *dev)
798 {
799 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
800 
801 	if (!rte_is_unicast_ether_addr
802 		((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
803 		PMD_INIT_LOG(ERR, "Invalid MAC address");
804 		return -EINVAL;
805 	}
806 
807 	rte_ether_addr_copy(
808 		(struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
809 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
810 
811 	dev->data->mac_addrs =
812 		rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
813 	if (!dev->data->mac_addrs) {
814 		PMD_INIT_LOG(ERR,
815 			     "Failed to allocate memory to store mac address");
816 		return -ENOMEM;
817 	}
818 	/* store it to dev data */
819 	rte_ether_addr_copy(
820 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
821 		&dev->data->mac_addrs[0]);
822 	return 0;
823 }
824 
825 /* Find out specific MAC filter */
826 static struct ice_mac_filter *
827 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
828 {
829 	struct ice_mac_filter *f;
830 
831 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
832 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
833 			return f;
834 	}
835 
836 	return NULL;
837 }
838 
839 static int
840 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
841 {
842 	struct ice_fltr_list_entry *m_list_itr = NULL;
843 	struct ice_mac_filter *f;
844 	struct LIST_HEAD_TYPE list_head;
845 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
846 	int ret = 0;
847 
848 	/* If it's added and configured, return */
849 	f = ice_find_mac_filter(vsi, mac_addr);
850 	if (f) {
851 		PMD_DRV_LOG(INFO, "This MAC filter already exists.");
852 		return 0;
853 	}
854 
855 	INIT_LIST_HEAD(&list_head);
856 
857 	m_list_itr = (struct ice_fltr_list_entry *)
858 		ice_malloc(hw, sizeof(*m_list_itr));
859 	if (!m_list_itr) {
860 		ret = -ENOMEM;
861 		goto DONE;
862 	}
863 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
864 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
865 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
866 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
867 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
868 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
869 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
870 
871 	LIST_ADD(&m_list_itr->list_entry, &list_head);
872 
873 	/* Add the mac */
874 	ret = ice_add_mac(hw, &list_head);
875 	if (ret != ICE_SUCCESS) {
876 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
877 		ret = -EINVAL;
878 		goto DONE;
879 	}
880 	/* Add the mac addr into mac list */
881 	f = rte_zmalloc(NULL, sizeof(*f), 0);
882 	if (!f) {
883 		PMD_DRV_LOG(ERR, "failed to allocate memory");
884 		ret = -ENOMEM;
885 		goto DONE;
886 	}
887 	rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
888 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
889 	vsi->mac_num++;
890 
891 	ret = 0;
892 
893 DONE:
894 	rte_free(m_list_itr);
895 	return ret;
896 }
897 
898 static int
899 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
900 {
901 	struct ice_fltr_list_entry *m_list_itr = NULL;
902 	struct ice_mac_filter *f;
903 	struct LIST_HEAD_TYPE list_head;
904 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
905 	int ret = 0;
906 
907 	/* Can't find it, return an error */
908 	f = ice_find_mac_filter(vsi, mac_addr);
909 	if (!f)
910 		return -EINVAL;
911 
912 	INIT_LIST_HEAD(&list_head);
913 
914 	m_list_itr = (struct ice_fltr_list_entry *)
915 		ice_malloc(hw, sizeof(*m_list_itr));
916 	if (!m_list_itr) {
917 		ret = -ENOMEM;
918 		goto DONE;
919 	}
920 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
921 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
922 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
923 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
924 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
925 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
926 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
927 
928 	LIST_ADD(&m_list_itr->list_entry, &list_head);
929 
930 	/* remove the mac filter */
931 	ret = ice_remove_mac(hw, &list_head);
932 	if (ret != ICE_SUCCESS) {
933 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
934 		ret = -EINVAL;
935 		goto DONE;
936 	}
937 
938 	/* Remove the mac addr from mac list */
939 	TAILQ_REMOVE(&vsi->mac_list, f, next);
940 	rte_free(f);
941 	vsi->mac_num--;
942 
943 	ret = 0;
944 DONE:
945 	rte_free(m_list_itr);
946 	return ret;
947 }
948 
949 /* Find out specific VLAN filter */
950 static struct ice_vlan_filter *
951 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
952 {
953 	struct ice_vlan_filter *f;
954 
955 	TAILQ_FOREACH(f, &vsi->vlan_list, next) {
956 		if (vlan->tpid == f->vlan_info.vlan.tpid &&
957 		    vlan->vid == f->vlan_info.vlan.vid)
958 			return f;
959 	}
960 
961 	return NULL;
962 }
963 
964 static int
965 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
966 {
967 	struct ice_fltr_list_entry *v_list_itr = NULL;
968 	struct ice_vlan_filter *f;
969 	struct LIST_HEAD_TYPE list_head;
970 	struct ice_hw *hw;
971 	int ret = 0;
972 
973 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
974 		return -EINVAL;
975 
976 	hw = ICE_VSI_TO_HW(vsi);
977 
978 	/* If it's added and configured, return. */
979 	f = ice_find_vlan_filter(vsi, vlan);
980 	if (f) {
981 		PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
982 		return 0;
983 	}
984 
985 	if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
986 		return 0;
987 
988 	INIT_LIST_HEAD(&list_head);
989 
990 	v_list_itr = (struct ice_fltr_list_entry *)
991 		      ice_malloc(hw, sizeof(*v_list_itr));
992 	if (!v_list_itr) {
993 		ret = -ENOMEM;
994 		goto DONE;
995 	}
996 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
997 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
998 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
999 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1000 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1001 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1002 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1003 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1004 
1005 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1006 
1007 	/* Add the vlan */
1008 	ret = ice_add_vlan(hw, &list_head);
1009 	if (ret != ICE_SUCCESS) {
1010 		PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1011 		ret = -EINVAL;
1012 		goto DONE;
1013 	}
1014 
1015 	/* Add vlan into vlan list */
1016 	f = rte_zmalloc(NULL, sizeof(*f), 0);
1017 	if (!f) {
1018 		PMD_DRV_LOG(ERR, "failed to allocate memory");
1019 		ret = -ENOMEM;
1020 		goto DONE;
1021 	}
1022 	f->vlan_info.vlan.tpid = vlan->tpid;
1023 	f->vlan_info.vlan.vid = vlan->vid;
1024 	TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1025 	vsi->vlan_num++;
1026 
1027 	ret = 0;
1028 
1029 DONE:
1030 	rte_free(v_list_itr);
1031 	return ret;
1032 }
1033 
1034 static int
1035 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1036 {
1037 	struct ice_fltr_list_entry *v_list_itr = NULL;
1038 	struct ice_vlan_filter *f;
1039 	struct LIST_HEAD_TYPE list_head;
1040 	struct ice_hw *hw;
1041 	int ret = 0;
1042 
1043 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1044 		return -EINVAL;
1045 
1046 	hw = ICE_VSI_TO_HW(vsi);
1047 
1048 	/* Can't find it, return an error */
1049 	f = ice_find_vlan_filter(vsi, vlan);
1050 	if (!f)
1051 		return -EINVAL;
1052 
1053 	INIT_LIST_HEAD(&list_head);
1054 
1055 	v_list_itr = (struct ice_fltr_list_entry *)
1056 		      ice_malloc(hw, sizeof(*v_list_itr));
1057 	if (!v_list_itr) {
1058 		ret = -ENOMEM;
1059 		goto DONE;
1060 	}
1061 
1062 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1063 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1064 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1065 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1066 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1067 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1068 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1069 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1070 
1071 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1072 
1073 	/* remove the vlan filter */
1074 	ret = ice_remove_vlan(hw, &list_head);
1075 	if (ret != ICE_SUCCESS) {
1076 		PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1077 		ret = -EINVAL;
1078 		goto DONE;
1079 	}
1080 
1081 	/* Remove the vlan id from vlan list */
1082 	TAILQ_REMOVE(&vsi->vlan_list, f, next);
1083 	rte_free(f);
1084 	vsi->vlan_num--;
1085 
1086 	ret = 0;
1087 DONE:
1088 	rte_free(v_list_itr);
1089 	return ret;
1090 }
1091 
1092 static int
1093 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1094 {
1095 	struct ice_mac_filter *m_f;
1096 	struct ice_vlan_filter *v_f;
1097 	void *temp;
1098 	int ret = 0;
1099 
1100 	if (!vsi || !vsi->mac_num)
1101 		return -EINVAL;
1102 
1103 	TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1104 		ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1105 		if (ret != ICE_SUCCESS) {
1106 			ret = -EINVAL;
1107 			goto DONE;
1108 		}
1109 	}
1110 
1111 	if (vsi->vlan_num == 0)
1112 		return 0;
1113 
1114 	TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1115 		ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1116 		if (ret != ICE_SUCCESS) {
1117 			ret = -EINVAL;
1118 			goto DONE;
1119 		}
1120 	}
1121 
1122 DONE:
1123 	return ret;
1124 }
1125 
1126 /* Enable IRQ0 */
1127 static void
1128 ice_pf_enable_irq0(struct ice_hw *hw)
1129 {
1130 	/* reset the registers */
1131 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1132 	ICE_READ_REG(hw, PFINT_OICR);
1133 
1134 #ifdef ICE_LSE_SPT
1135 	ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1136 		      (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1137 				 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1138 
1139 	ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1140 		      (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1141 		      ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1142 		       PFINT_OICR_CTL_ITR_INDX_M) |
1143 		      PFINT_OICR_CTL_CAUSE_ENA_M);
1144 
1145 	ICE_WRITE_REG(hw, PFINT_FW_CTL,
1146 		      (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1147 		      ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1148 		       PFINT_FW_CTL_ITR_INDX_M) |
1149 		      PFINT_FW_CTL_CAUSE_ENA_M);
1150 #else
1151 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1152 #endif
1153 
1154 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1155 		      GLINT_DYN_CTL_INTENA_M |
1156 		      GLINT_DYN_CTL_CLEARPBA_M |
1157 		      GLINT_DYN_CTL_ITR_INDX_M);
1158 
1159 	ice_flush(hw);
1160 }
1161 
1162 /* Disable IRQ0 */
1163 static void
1164 ice_pf_disable_irq0(struct ice_hw *hw)
1165 {
1166 	/* Disable all interrupt types */
1167 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1168 	ice_flush(hw);
1169 }
1170 
1171 #ifdef ICE_LSE_SPT
1172 static void
1173 ice_handle_aq_msg(struct rte_eth_dev *dev)
1174 {
1175 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1176 	struct ice_ctl_q_info *cq = &hw->adminq;
1177 	struct ice_rq_event_info event;
1178 	uint16_t pending, opcode;
1179 	int ret;
1180 
1181 	event.buf_len = ICE_AQ_MAX_BUF_LEN;
1182 	event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1183 	if (!event.msg_buf) {
1184 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
1185 		return;
1186 	}
1187 
1188 	pending = 1;
1189 	while (pending) {
1190 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1191 
1192 		if (ret != ICE_SUCCESS) {
1193 			PMD_DRV_LOG(INFO,
1194 				    "Failed to read msg from AdminQ, "
1195 				    "adminq_err: %u",
1196 				    hw->adminq.sq_last_status);
1197 			break;
1198 		}
1199 		opcode = rte_le_to_cpu_16(event.desc.opcode);
1200 
1201 		switch (opcode) {
1202 		case ice_aqc_opc_get_link_status:
1203 			ret = ice_link_update(dev, 0);
1204 			if (!ret)
1205 				rte_eth_dev_callback_process
1206 					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1207 			break;
1208 		default:
1209 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1210 				    opcode);
1211 			break;
1212 		}
1213 	}
1214 	rte_free(event.msg_buf);
1215 }
1216 #endif
1217 
1218 /**
1219  * Interrupt handler triggered by NIC for handling
1220  * specific interrupt.
1221  *
1222  * @param handle
1223  *  Pointer to interrupt handle.
1224  * @param param
1225  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1226  *
1227  * @return
1228  *  void
1229  */
1230 static void
1231 ice_interrupt_handler(void *param)
1232 {
1233 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1234 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1235 	uint32_t oicr;
1236 	uint32_t reg;
1237 	uint8_t pf_num;
1238 	uint8_t event;
1239 	uint16_t queue;
1240 	int ret;
1241 #ifdef ICE_LSE_SPT
1242 	uint32_t int_fw_ctl;
1243 #endif
1244 
1245 	/* Disable interrupt */
1246 	ice_pf_disable_irq0(hw);
1247 
1248 	/* read out interrupt causes */
1249 	oicr = ICE_READ_REG(hw, PFINT_OICR);
1250 #ifdef ICE_LSE_SPT
1251 	int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1252 #endif
1253 
1254 	/* No interrupt event indicated */
1255 	if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1256 		PMD_DRV_LOG(INFO, "No interrupt event");
1257 		goto done;
1258 	}
1259 
1260 #ifdef ICE_LSE_SPT
1261 	if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1262 		PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1263 		ice_handle_aq_msg(dev);
1264 	}
1265 #else
1266 	if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1267 		PMD_DRV_LOG(INFO, "OICR: link state change event");
1268 		ret = ice_link_update(dev, 0);
1269 		if (!ret)
1270 			rte_eth_dev_callback_process
1271 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1272 	}
1273 #endif
1274 
1275 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
1276 		PMD_DRV_LOG(WARNING, "OICR: MDD event");
1277 		reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1278 		if (reg & GL_MDET_TX_PQM_VALID_M) {
1279 			pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1280 				 GL_MDET_TX_PQM_PF_NUM_S;
1281 			event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1282 				GL_MDET_TX_PQM_MAL_TYPE_S;
1283 			queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1284 				GL_MDET_TX_PQM_QNUM_S;
1285 
1286 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1287 				    "%d by PQM on TX queue %d PF# %d",
1288 				    event, queue, pf_num);
1289 		}
1290 
1291 		reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1292 		if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1293 			pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1294 				 GL_MDET_TX_TCLAN_PF_NUM_S;
1295 			event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1296 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1297 			queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1298 				GL_MDET_TX_TCLAN_QNUM_S;
1299 
1300 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1301 				    "%d by TCLAN on TX queue %d PF# %d",
1302 				    event, queue, pf_num);
1303 		}
1304 	}
1305 done:
1306 	/* Enable interrupt */
1307 	ice_pf_enable_irq0(hw);
1308 	rte_intr_ack(dev->intr_handle);
1309 }
1310 
1311 static void
1312 ice_init_proto_xtr(struct rte_eth_dev *dev)
1313 {
1314 	struct ice_adapter *ad =
1315 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1316 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1317 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1318 	const struct proto_xtr_ol_flag *ol_flag;
1319 	bool proto_xtr_enable = false;
1320 	int offset;
1321 	uint16_t i;
1322 
1323 	pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1324 	if (unlikely(pf->proto_xtr == NULL)) {
1325 		PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1326 		return;
1327 	}
1328 
1329 	for (i = 0; i < pf->lan_nb_qps; i++) {
1330 		pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1331 				   ad->devargs.proto_xtr[i] :
1332 				   ad->devargs.proto_xtr_dflt;
1333 
1334 		if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1335 			uint8_t type = pf->proto_xtr[i];
1336 
1337 			ice_proto_xtr_ol_flag_params[type].required = true;
1338 			proto_xtr_enable = true;
1339 		}
1340 	}
1341 
1342 	if (likely(!proto_xtr_enable))
1343 		return;
1344 
1345 	ice_check_proto_xtr_support(hw);
1346 
1347 	offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1348 	if (unlikely(offset == -1)) {
1349 		PMD_DRV_LOG(ERR,
1350 			    "Protocol extraction metadata is disabled in mbuf with error %d",
1351 			    -rte_errno);
1352 		return;
1353 	}
1354 
1355 	PMD_DRV_LOG(DEBUG,
1356 		    "Protocol extraction metadata offset in mbuf is : %d",
1357 		    offset);
1358 	rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1359 
1360 	for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1361 		ol_flag = &ice_proto_xtr_ol_flag_params[i];
1362 
1363 		if (!ol_flag->required)
1364 			continue;
1365 
1366 		if (!ice_proto_xtr_hw_support[i]) {
1367 			PMD_DRV_LOG(ERR,
1368 				    "Protocol extraction type %u is not supported in hardware",
1369 				    i);
1370 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1371 			break;
1372 		}
1373 
1374 		offset = rte_mbuf_dynflag_register(&ol_flag->param);
1375 		if (unlikely(offset == -1)) {
1376 			PMD_DRV_LOG(ERR,
1377 				    "Protocol extraction offload '%s' failed to register with error %d",
1378 				    ol_flag->param.name, -rte_errno);
1379 
1380 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1381 			break;
1382 		}
1383 
1384 		PMD_DRV_LOG(DEBUG,
1385 			    "Protocol extraction offload '%s' offset in mbuf is : %d",
1386 			    ol_flag->param.name, offset);
1387 		*ol_flag->ol_flag = 1ULL << offset;
1388 	}
1389 }
1390 
1391 /*  Initialize SW parameters of PF */
1392 static int
1393 ice_pf_sw_init(struct rte_eth_dev *dev)
1394 {
1395 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1396 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1397 
1398 	pf->lan_nb_qp_max =
1399 		(uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1400 				  hw->func_caps.common_cap.num_rxq);
1401 
1402 	pf->lan_nb_qps = pf->lan_nb_qp_max;
1403 
1404 	ice_init_proto_xtr(dev);
1405 
1406 	if (hw->func_caps.fd_fltr_guar > 0 ||
1407 	    hw->func_caps.fd_fltr_best_effort > 0) {
1408 		pf->flags |= ICE_FLAG_FDIR;
1409 		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1410 		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1411 	} else {
1412 		pf->fdir_nb_qps = 0;
1413 	}
1414 	pf->fdir_qp_offset = 0;
1415 
1416 	return 0;
1417 }
1418 
1419 struct ice_vsi *
1420 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1421 {
1422 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1423 	struct ice_vsi *vsi = NULL;
1424 	struct ice_vsi_ctx vsi_ctx;
1425 	int ret;
1426 	struct rte_ether_addr broadcast = {
1427 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1428 	struct rte_ether_addr mac_addr;
1429 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1430 	uint8_t tc_bitmap = 0x1;
1431 	uint16_t cfg;
1432 
1433 	/* hw->num_lports = 1 in NIC mode */
1434 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1435 	if (!vsi)
1436 		return NULL;
1437 
1438 	vsi->idx = pf->next_vsi_idx;
1439 	pf->next_vsi_idx++;
1440 	vsi->type = type;
1441 	vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1442 	vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1443 	vsi->vlan_anti_spoof_on = 0;
1444 	vsi->vlan_filter_on = 1;
1445 	TAILQ_INIT(&vsi->mac_list);
1446 	TAILQ_INIT(&vsi->vlan_list);
1447 
1448 	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1449 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1450 			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1451 			hw->func_caps.common_cap.rss_table_size;
1452 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1453 
1454 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1455 	switch (type) {
1456 	case ICE_VSI_PF:
1457 		vsi->nb_qps = pf->lan_nb_qps;
1458 		vsi->base_queue = 1;
1459 		ice_vsi_config_default_rss(&vsi_ctx.info);
1460 		vsi_ctx.alloc_from_pool = true;
1461 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1462 		/* switch_id is queried by get_switch_config aq, which is done
1463 		 * by ice_init_hw
1464 		 */
1465 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1466 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1467 		/* Allow all untagged or tagged packets */
1468 		vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1469 		vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1470 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1471 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1472 		if (ice_is_dvm_ena(hw)) {
1473 			vsi_ctx.info.outer_vlan_flags =
1474 				(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1475 				 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1476 				ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1477 			vsi_ctx.info.outer_vlan_flags |=
1478 				(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1479 				 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1480 				ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1481 		}
1482 
1483 		/* FDIR */
1484 		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1485 			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1486 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1487 		cfg = ICE_AQ_VSI_FD_ENABLE;
1488 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1489 		vsi_ctx.info.max_fd_fltr_dedicated =
1490 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1491 		vsi_ctx.info.max_fd_fltr_shared =
1492 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1493 
1494 		/* Enable VLAN/UP trip */
1495 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1496 						      &vsi_ctx.info,
1497 						      ICE_DEFAULT_TCMAP);
1498 		if (ret) {
1499 			PMD_INIT_LOG(ERR,
1500 				     "tc queue mapping with vsi failed, "
1501 				     "err = %d",
1502 				     ret);
1503 			goto fail_mem;
1504 		}
1505 
1506 		break;
1507 	case ICE_VSI_CTRL:
1508 		vsi->nb_qps = pf->fdir_nb_qps;
1509 		vsi->base_queue = ICE_FDIR_QUEUE_ID;
1510 		vsi_ctx.alloc_from_pool = true;
1511 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1512 
1513 		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1514 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1515 		cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1516 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1517 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1518 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1519 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1520 						      &vsi_ctx.info,
1521 						      ICE_DEFAULT_TCMAP);
1522 		if (ret) {
1523 			PMD_INIT_LOG(ERR,
1524 				     "tc queue mapping with vsi failed, "
1525 				     "err = %d",
1526 				     ret);
1527 			goto fail_mem;
1528 		}
1529 		break;
1530 	default:
1531 		/* for other types of VSI */
1532 		PMD_INIT_LOG(ERR, "other types of VSI not supported");
1533 		goto fail_mem;
1534 	}
1535 
1536 	/* VF has MSIX interrupt in VF range, don't allocate here */
1537 	if (type == ICE_VSI_PF) {
1538 		ret = ice_res_pool_alloc(&pf->msix_pool,
1539 					 RTE_MIN(vsi->nb_qps,
1540 						 RTE_MAX_RXTX_INTR_VEC_ID));
1541 		if (ret < 0) {
1542 			PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1543 				     vsi->vsi_id, ret);
1544 		}
1545 		vsi->msix_intr = ret;
1546 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1547 	} else if (type == ICE_VSI_CTRL) {
1548 		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1549 		if (ret < 0) {
1550 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1551 				    vsi->vsi_id, ret);
1552 		}
1553 		vsi->msix_intr = ret;
1554 		vsi->nb_msix = 1;
1555 	} else {
1556 		vsi->msix_intr = 0;
1557 		vsi->nb_msix = 0;
1558 	}
1559 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1560 	if (ret != ICE_SUCCESS) {
1561 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1562 		goto fail_mem;
1563 	}
1564 	/* store vsi information is SW structure */
1565 	vsi->vsi_id = vsi_ctx.vsi_num;
1566 	vsi->info = vsi_ctx.info;
1567 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
1568 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1569 
1570 	if (type == ICE_VSI_PF) {
1571 		/* MAC configuration */
1572 		rte_ether_addr_copy((struct rte_ether_addr *)
1573 					hw->port_info->mac.perm_addr,
1574 				    &pf->dev_addr);
1575 
1576 		rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1577 		ret = ice_add_mac_filter(vsi, &mac_addr);
1578 		if (ret != ICE_SUCCESS)
1579 			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1580 
1581 		rte_ether_addr_copy(&broadcast, &mac_addr);
1582 		ret = ice_add_mac_filter(vsi, &mac_addr);
1583 		if (ret != ICE_SUCCESS)
1584 			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1585 	}
1586 
1587 	/* At the beginning, only TC0. */
1588 	/* What we need here is the maximam number of the TX queues.
1589 	 * Currently vsi->nb_qps means it.
1590 	 * Correct it if any change.
1591 	 */
1592 	max_txqs[0] = vsi->nb_qps;
1593 	ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1594 			      tc_bitmap, max_txqs);
1595 	if (ret != ICE_SUCCESS)
1596 		PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1597 
1598 	return vsi;
1599 fail_mem:
1600 	rte_free(vsi);
1601 	pf->next_vsi_idx--;
1602 	return NULL;
1603 }
1604 
1605 static int
1606 ice_send_driver_ver(struct ice_hw *hw)
1607 {
1608 	struct ice_driver_ver dv;
1609 
1610 	/* we don't have driver version use 0 for dummy */
1611 	dv.major_ver = 0;
1612 	dv.minor_ver = 0;
1613 	dv.build_ver = 0;
1614 	dv.subbuild_ver = 0;
1615 	strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1616 
1617 	return ice_aq_send_driver_ver(hw, &dv, NULL);
1618 }
1619 
1620 static int
1621 ice_pf_setup(struct ice_pf *pf)
1622 {
1623 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1624 	struct ice_vsi *vsi;
1625 	uint16_t unused;
1626 
1627 	/* Clear all stats counters */
1628 	pf->offset_loaded = false;
1629 	memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1630 	memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1631 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1632 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1633 
1634 	/* force guaranteed filter pool for PF */
1635 	ice_alloc_fd_guar_item(hw, &unused,
1636 			       hw->func_caps.fd_fltr_guar);
1637 	/* force shared filter pool for PF */
1638 	ice_alloc_fd_shrd_item(hw, &unused,
1639 			       hw->func_caps.fd_fltr_best_effort);
1640 
1641 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1642 	if (!vsi) {
1643 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1644 		return -EINVAL;
1645 	}
1646 
1647 	pf->main_vsi = vsi;
1648 
1649 	return 0;
1650 }
1651 
1652 /*
1653  * Extract device serial number from PCIe Configuration Space and
1654  * determine the pkg file path according to the DSN.
1655  */
1656 #ifndef RTE_EXEC_ENV_WINDOWS
1657 static int
1658 ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
1659 {
1660 	off_t pos;
1661 	char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1662 	uint32_t dsn_low, dsn_high;
1663 	memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1664 
1665 	pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
1666 
1667 	if (pos) {
1668 		if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0) {
1669 			PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1670 			return -1;
1671 		}
1672 		if (rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
1673 			PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
1674 			return -1;
1675 		}
1676 		snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1677 			 "ice-%08x%08x.pkg", dsn_high, dsn_low);
1678 	} else {
1679 		PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
1680 		goto fail_dsn;
1681 	}
1682 
1683 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1684 		ICE_MAX_PKG_FILENAME_SIZE);
1685 	if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1686 		return 0;
1687 
1688 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1689 		ICE_MAX_PKG_FILENAME_SIZE);
1690 	if (!ice_access(strcat(pkg_file, opt_ddp_filename), 0))
1691 		return 0;
1692 
1693 fail_dsn:
1694 	strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1695 	if (!ice_access(pkg_file, 0))
1696 		return 0;
1697 	strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1698 	return 0;
1699 }
1700 #endif
1701 
1702 enum ice_pkg_type
1703 ice_load_pkg_type(struct ice_hw *hw)
1704 {
1705 	enum ice_pkg_type package_type;
1706 
1707 	/* store the activated package type (OS default or Comms) */
1708 	if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1709 		ICE_PKG_NAME_SIZE))
1710 		package_type = ICE_PKG_TYPE_OS_DEFAULT;
1711 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1712 		ICE_PKG_NAME_SIZE))
1713 		package_type = ICE_PKG_TYPE_COMMS;
1714 	else
1715 		package_type = ICE_PKG_TYPE_UNKNOWN;
1716 
1717 	PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1718 		hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1719 		hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1720 		hw->active_pkg_name,
1721 		ice_is_dvm_ena(hw) ? "double" : "single");
1722 
1723 	return package_type;
1724 }
1725 
1726 #ifndef RTE_EXEC_ENV_WINDOWS
1727 static int ice_load_pkg(struct rte_eth_dev *dev)
1728 {
1729 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1730 	char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1731 	int err;
1732 	uint8_t *buf;
1733 	int buf_len;
1734 	FILE *file;
1735 	struct stat fstat;
1736 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1737 	struct ice_adapter *ad =
1738 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1739 
1740 	err = ice_pkg_file_search_path(pci_dev, pkg_file);
1741 	if (err) {
1742 		PMD_INIT_LOG(ERR, "failed to search file path\n");
1743 		return err;
1744 	}
1745 
1746 	file = fopen(pkg_file, "rb");
1747 	if (!file)  {
1748 		PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1749 		return -1;
1750 	}
1751 
1752 	err = stat(pkg_file, &fstat);
1753 	if (err) {
1754 		PMD_INIT_LOG(ERR, "failed to get file stats\n");
1755 		fclose(file);
1756 		return err;
1757 	}
1758 
1759 	buf_len = fstat.st_size;
1760 	buf = rte_malloc(NULL, buf_len, 0);
1761 
1762 	if (!buf) {
1763 		PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1764 				buf_len);
1765 		fclose(file);
1766 		return -1;
1767 	}
1768 
1769 	err = fread(buf, buf_len, 1, file);
1770 	if (err != 1) {
1771 		PMD_INIT_LOG(ERR, "failed to read package data\n");
1772 		fclose(file);
1773 		err = -1;
1774 		goto fail_exit;
1775 	}
1776 
1777 	fclose(file);
1778 
1779 	err = ice_copy_and_init_pkg(hw, buf, buf_len);
1780 	if (err) {
1781 		PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1782 		goto fail_exit;
1783 	}
1784 
1785 	/* store the loaded pkg type info */
1786 	ad->active_pkg_type = ice_load_pkg_type(hw);
1787 
1788 	err = ice_init_hw_tbls(hw);
1789 	if (err) {
1790 		PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1791 		goto fail_init_tbls;
1792 	}
1793 
1794 	return 0;
1795 
1796 fail_init_tbls:
1797 	rte_free(hw->pkg_copy);
1798 fail_exit:
1799 	rte_free(buf);
1800 	return err;
1801 }
1802 #endif
1803 
1804 static void
1805 ice_base_queue_get(struct ice_pf *pf)
1806 {
1807 	uint32_t reg;
1808 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1809 
1810 	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1811 	if (reg & PFLAN_RX_QALLOC_VALID_M) {
1812 		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1813 	} else {
1814 		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1815 					" index");
1816 	}
1817 }
1818 
1819 static int
1820 parse_bool(const char *key, const char *value, void *args)
1821 {
1822 	int *i = (int *)args;
1823 	char *end;
1824 	int num;
1825 
1826 	num = strtoul(value, &end, 10);
1827 
1828 	if (num != 0 && num != 1) {
1829 		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1830 			"value must be 0 or 1",
1831 			value, key);
1832 		return -1;
1833 	}
1834 
1835 	*i = num;
1836 	return 0;
1837 }
1838 
1839 static int ice_parse_devargs(struct rte_eth_dev *dev)
1840 {
1841 	struct ice_adapter *ad =
1842 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1843 	struct rte_devargs *devargs = dev->device->devargs;
1844 	struct rte_kvargs *kvlist;
1845 	int ret;
1846 
1847 	if (devargs == NULL)
1848 		return 0;
1849 
1850 	kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1851 	if (kvlist == NULL) {
1852 		PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1853 		return -EINVAL;
1854 	}
1855 
1856 	ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1857 	memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1858 	       sizeof(ad->devargs.proto_xtr));
1859 
1860 	ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1861 				 &handle_proto_xtr_arg, &ad->devargs);
1862 	if (ret)
1863 		goto bail;
1864 
1865 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1866 				 &parse_bool, &ad->devargs.safe_mode_support);
1867 	if (ret)
1868 		goto bail;
1869 
1870 	ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1871 				 &parse_bool, &ad->devargs.pipe_mode_support);
1872 	if (ret)
1873 		goto bail;
1874 
1875 bail:
1876 	rte_kvargs_free(kvlist);
1877 	return ret;
1878 }
1879 
1880 /* Forward LLDP packets to default VSI by set switch rules */
1881 static int
1882 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
1883 {
1884 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1885 	struct ice_fltr_list_entry *s_list_itr = NULL;
1886 	struct LIST_HEAD_TYPE list_head;
1887 	int ret = 0;
1888 
1889 	INIT_LIST_HEAD(&list_head);
1890 
1891 	s_list_itr = (struct ice_fltr_list_entry *)
1892 			ice_malloc(hw, sizeof(*s_list_itr));
1893 	if (!s_list_itr)
1894 		return -ENOMEM;
1895 	s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1896 	s_list_itr->fltr_info.vsi_handle = vsi->idx;
1897 	s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
1898 			RTE_ETHER_TYPE_LLDP;
1899 	s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1900 	s_list_itr->fltr_info.flag = ICE_FLTR_RX;
1901 	s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
1902 	LIST_ADD(&s_list_itr->list_entry, &list_head);
1903 	if (on)
1904 		ret = ice_add_eth_mac(hw, &list_head);
1905 	else
1906 		ret = ice_remove_eth_mac(hw, &list_head);
1907 
1908 	rte_free(s_list_itr);
1909 	return ret;
1910 }
1911 
1912 static enum ice_status
1913 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
1914 		uint16_t num, uint16_t desc_id,
1915 		uint16_t *prof_buf, uint16_t *num_prof)
1916 {
1917 	struct ice_aqc_res_elem *resp_buf;
1918 	int ret;
1919 	uint16_t buf_len;
1920 	bool res_shared = 1;
1921 	struct ice_aq_desc aq_desc;
1922 	struct ice_sq_cd *cd = NULL;
1923 	struct ice_aqc_get_allocd_res_desc *cmd =
1924 			&aq_desc.params.get_res_desc;
1925 
1926 	buf_len = sizeof(*resp_buf) * num;
1927 	resp_buf = ice_malloc(hw, buf_len);
1928 	if (!resp_buf)
1929 		return -ENOMEM;
1930 
1931 	ice_fill_dflt_direct_cmd_desc(&aq_desc,
1932 			ice_aqc_opc_get_allocd_res_desc);
1933 
1934 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
1935 				ICE_AQC_RES_TYPE_M) | (res_shared ?
1936 				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
1937 	cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
1938 
1939 	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
1940 	if (!ret)
1941 		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
1942 	else
1943 		goto exit;
1944 
1945 	ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
1946 			(*num_prof), ICE_NONDMA_TO_NONDMA);
1947 
1948 exit:
1949 	rte_free(resp_buf);
1950 	return ret;
1951 }
1952 static int
1953 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
1954 {
1955 	int ret;
1956 	uint16_t prof_id;
1957 	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
1958 	uint16_t first_desc = 1;
1959 	uint16_t num_prof = 0;
1960 
1961 	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
1962 			first_desc, prof_buf, &num_prof);
1963 	if (ret) {
1964 		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
1965 		return ret;
1966 	}
1967 
1968 	for (prof_id = 0; prof_id < num_prof; prof_id++) {
1969 		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
1970 		if (ret) {
1971 			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
1972 			return ret;
1973 		}
1974 	}
1975 	return 0;
1976 }
1977 
1978 static int
1979 ice_reset_fxp_resource(struct ice_hw *hw)
1980 {
1981 	int ret;
1982 
1983 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
1984 	if (ret) {
1985 		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
1986 		return ret;
1987 	}
1988 
1989 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
1990 	if (ret) {
1991 		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
1992 		return ret;
1993 	}
1994 
1995 	return 0;
1996 }
1997 
1998 static void
1999 ice_rss_ctx_init(struct ice_pf *pf)
2000 {
2001 	memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2002 }
2003 
2004 static uint64_t
2005 ice_get_supported_rxdid(struct ice_hw *hw)
2006 {
2007 	uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2008 	uint32_t regval;
2009 	int i;
2010 
2011 	supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2012 
2013 	for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2014 		regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2015 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2016 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2017 			supported_rxdid |= BIT(i);
2018 	}
2019 	return supported_rxdid;
2020 }
2021 
2022 static int
2023 ice_dev_init(struct rte_eth_dev *dev)
2024 {
2025 	struct rte_pci_device *pci_dev;
2026 	struct rte_intr_handle *intr_handle;
2027 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2028 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2029 	struct ice_adapter *ad =
2030 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2031 	struct ice_vsi *vsi;
2032 	int ret;
2033 
2034 	dev->dev_ops = &ice_eth_dev_ops;
2035 	dev->rx_queue_count = ice_rx_queue_count;
2036 	dev->rx_descriptor_status = ice_rx_descriptor_status;
2037 	dev->tx_descriptor_status = ice_tx_descriptor_status;
2038 	dev->rx_pkt_burst = ice_recv_pkts;
2039 	dev->tx_pkt_burst = ice_xmit_pkts;
2040 	dev->tx_pkt_prepare = ice_prep_pkts;
2041 
2042 	/* for secondary processes, we don't initialise any further as primary
2043 	 * has already done this work.
2044 	 */
2045 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2046 		ice_set_rx_function(dev);
2047 		ice_set_tx_function(dev);
2048 		return 0;
2049 	}
2050 
2051 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2052 
2053 	ice_set_default_ptype_table(dev);
2054 	pci_dev = RTE_DEV_TO_PCI(dev->device);
2055 	intr_handle = &pci_dev->intr_handle;
2056 
2057 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2058 	pf->dev_data = dev->data;
2059 	hw->back = pf->adapter;
2060 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2061 	hw->vendor_id = pci_dev->id.vendor_id;
2062 	hw->device_id = pci_dev->id.device_id;
2063 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2064 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2065 	hw->bus.device = pci_dev->addr.devid;
2066 	hw->bus.func = pci_dev->addr.function;
2067 
2068 	ret = ice_parse_devargs(dev);
2069 	if (ret) {
2070 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
2071 		return -EINVAL;
2072 	}
2073 
2074 	ice_init_controlq_parameter(hw);
2075 
2076 	ret = ice_init_hw(hw);
2077 	if (ret) {
2078 		PMD_INIT_LOG(ERR, "Failed to initialize HW");
2079 		return -EINVAL;
2080 	}
2081 
2082 #ifndef RTE_EXEC_ENV_WINDOWS
2083 	ret = ice_load_pkg(dev);
2084 	if (ret) {
2085 		if (ad->devargs.safe_mode_support == 0) {
2086 			PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2087 					"Use safe-mode-support=1 to enter Safe Mode");
2088 			return ret;
2089 		}
2090 
2091 		PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2092 					"Entering Safe Mode");
2093 		ad->is_safe_mode = 1;
2094 	}
2095 #endif
2096 
2097 	PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2098 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2099 		     hw->api_maj_ver, hw->api_min_ver);
2100 
2101 	ice_pf_sw_init(dev);
2102 	ret = ice_init_mac_address(dev);
2103 	if (ret) {
2104 		PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2105 		goto err_init_mac;
2106 	}
2107 
2108 	ret = ice_res_pool_init(&pf->msix_pool, 1,
2109 				hw->func_caps.common_cap.num_msix_vectors - 1);
2110 	if (ret) {
2111 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2112 		goto err_msix_pool_init;
2113 	}
2114 
2115 	ret = ice_pf_setup(pf);
2116 	if (ret) {
2117 		PMD_INIT_LOG(ERR, "Failed to setup PF");
2118 		goto err_pf_setup;
2119 	}
2120 
2121 	ret = ice_send_driver_ver(hw);
2122 	if (ret) {
2123 		PMD_INIT_LOG(ERR, "Failed to send driver version");
2124 		goto err_pf_setup;
2125 	}
2126 
2127 	vsi = pf->main_vsi;
2128 
2129 	ret = ice_aq_stop_lldp(hw, true, false, NULL);
2130 	if (ret != ICE_SUCCESS)
2131 		PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2132 	ret = ice_init_dcb(hw, true);
2133 	if (ret != ICE_SUCCESS)
2134 		PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2135 	/* Forward LLDP packets to default VSI */
2136 	ret = ice_vsi_config_sw_lldp(vsi, true);
2137 	if (ret != ICE_SUCCESS)
2138 		PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2139 	/* register callback func to eal lib */
2140 	rte_intr_callback_register(intr_handle,
2141 				   ice_interrupt_handler, dev);
2142 
2143 	ice_pf_enable_irq0(hw);
2144 
2145 	/* enable uio intr after callback register */
2146 	rte_intr_enable(intr_handle);
2147 
2148 	/* get base queue pairs index  in the device */
2149 	ice_base_queue_get(pf);
2150 
2151 	/* Initialize RSS context for gtpu_eh */
2152 	ice_rss_ctx_init(pf);
2153 
2154 	if (!ad->is_safe_mode) {
2155 		ret = ice_flow_init(ad);
2156 		if (ret) {
2157 			PMD_INIT_LOG(ERR, "Failed to initialize flow");
2158 			return ret;
2159 		}
2160 	}
2161 
2162 	ret = ice_reset_fxp_resource(hw);
2163 	if (ret) {
2164 		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2165 		return ret;
2166 	}
2167 
2168 	pf->supported_rxdid = ice_get_supported_rxdid(hw);
2169 
2170 	return 0;
2171 
2172 err_pf_setup:
2173 	ice_res_pool_destroy(&pf->msix_pool);
2174 err_msix_pool_init:
2175 	rte_free(dev->data->mac_addrs);
2176 	dev->data->mac_addrs = NULL;
2177 err_init_mac:
2178 	ice_sched_cleanup_all(hw);
2179 	rte_free(hw->port_info);
2180 	ice_shutdown_all_ctrlq(hw);
2181 	rte_free(pf->proto_xtr);
2182 
2183 	return ret;
2184 }
2185 
2186 int
2187 ice_release_vsi(struct ice_vsi *vsi)
2188 {
2189 	struct ice_hw *hw;
2190 	struct ice_vsi_ctx vsi_ctx;
2191 	enum ice_status ret;
2192 	int error = 0;
2193 
2194 	if (!vsi)
2195 		return error;
2196 
2197 	hw = ICE_VSI_TO_HW(vsi);
2198 
2199 	ice_remove_all_mac_vlan_filters(vsi);
2200 
2201 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2202 
2203 	vsi_ctx.vsi_num = vsi->vsi_id;
2204 	vsi_ctx.info = vsi->info;
2205 	ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2206 	if (ret != ICE_SUCCESS) {
2207 		PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2208 		error = -1;
2209 	}
2210 
2211 	rte_free(vsi->rss_lut);
2212 	rte_free(vsi->rss_key);
2213 	rte_free(vsi);
2214 	return error;
2215 }
2216 
2217 void
2218 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2219 {
2220 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2221 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2222 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2223 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2224 	uint16_t msix_intr, i;
2225 
2226 	/* disable interrupt and also clear all the exist config */
2227 	for (i = 0; i < vsi->nb_qps; i++) {
2228 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2229 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2230 		rte_wmb();
2231 	}
2232 
2233 	if (rte_intr_allow_others(intr_handle))
2234 		/* vfio-pci */
2235 		for (i = 0; i < vsi->nb_msix; i++) {
2236 			msix_intr = vsi->msix_intr + i;
2237 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2238 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2239 		}
2240 	else
2241 		/* igb_uio */
2242 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2243 }
2244 
2245 static int
2246 ice_dev_stop(struct rte_eth_dev *dev)
2247 {
2248 	struct rte_eth_dev_data *data = dev->data;
2249 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2250 	struct ice_vsi *main_vsi = pf->main_vsi;
2251 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2252 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2253 	uint16_t i;
2254 
2255 	/* avoid stopping again */
2256 	if (pf->adapter_stopped)
2257 		return 0;
2258 
2259 	/* stop and clear all Rx queues */
2260 	for (i = 0; i < data->nb_rx_queues; i++)
2261 		ice_rx_queue_stop(dev, i);
2262 
2263 	/* stop and clear all Tx queues */
2264 	for (i = 0; i < data->nb_tx_queues; i++)
2265 		ice_tx_queue_stop(dev, i);
2266 
2267 	/* disable all queue interrupts */
2268 	ice_vsi_disable_queues_intr(main_vsi);
2269 
2270 	if (pf->init_link_up)
2271 		ice_dev_set_link_up(dev);
2272 	else
2273 		ice_dev_set_link_down(dev);
2274 
2275 	/* Clean datapath event and queue/vec mapping */
2276 	rte_intr_efd_disable(intr_handle);
2277 	if (intr_handle->intr_vec) {
2278 		rte_free(intr_handle->intr_vec);
2279 		intr_handle->intr_vec = NULL;
2280 	}
2281 
2282 	pf->adapter_stopped = true;
2283 	dev->data->dev_started = 0;
2284 
2285 	return 0;
2286 }
2287 
2288 static int
2289 ice_dev_close(struct rte_eth_dev *dev)
2290 {
2291 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2292 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2293 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2294 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2295 	struct ice_adapter *ad =
2296 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2297 	int ret;
2298 
2299 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2300 		return 0;
2301 
2302 	/* Since stop will make link down, then the link event will be
2303 	 * triggered, disable the irq firstly to avoid the port_infoe etc
2304 	 * resources deallocation causing the interrupt service thread
2305 	 * crash.
2306 	 */
2307 	ice_pf_disable_irq0(hw);
2308 
2309 	ret = ice_dev_stop(dev);
2310 
2311 	if (!ad->is_safe_mode)
2312 		ice_flow_uninit(ad);
2313 
2314 	/* release all queue resource */
2315 	ice_free_queues(dev);
2316 
2317 	ice_res_pool_destroy(&pf->msix_pool);
2318 	ice_release_vsi(pf->main_vsi);
2319 	ice_sched_cleanup_all(hw);
2320 	ice_free_hw_tbls(hw);
2321 	rte_free(hw->port_info);
2322 	hw->port_info = NULL;
2323 	ice_shutdown_all_ctrlq(hw);
2324 	rte_free(pf->proto_xtr);
2325 	pf->proto_xtr = NULL;
2326 
2327 	/* disable uio intr before callback unregister */
2328 	rte_intr_disable(intr_handle);
2329 
2330 	/* unregister callback func from eal lib */
2331 	rte_intr_callback_unregister(intr_handle,
2332 				     ice_interrupt_handler, dev);
2333 
2334 	return ret;
2335 }
2336 
2337 static int
2338 ice_dev_uninit(struct rte_eth_dev *dev)
2339 {
2340 	ice_dev_close(dev);
2341 
2342 	return 0;
2343 }
2344 
2345 static bool
2346 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2347 {
2348 	return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2349 }
2350 
2351 static void
2352 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2353 {
2354 	cfg->hash_flds = 0;
2355 	cfg->addl_hdrs = 0;
2356 	cfg->symm = 0;
2357 	cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2358 }
2359 
2360 static int
2361 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2362 {
2363 	enum ice_status status = ICE_SUCCESS;
2364 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2365 	struct ice_vsi *vsi = pf->main_vsi;
2366 
2367 	if (!is_hash_cfg_valid(cfg))
2368 		return -ENOENT;
2369 
2370 	status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2371 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2372 		PMD_DRV_LOG(ERR,
2373 			    "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2374 			    vsi->idx, status);
2375 		return -EBUSY;
2376 	}
2377 
2378 	return 0;
2379 }
2380 
2381 static int
2382 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2383 {
2384 	enum ice_status status = ICE_SUCCESS;
2385 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2386 	struct ice_vsi *vsi = pf->main_vsi;
2387 
2388 	if (!is_hash_cfg_valid(cfg))
2389 		return -ENOENT;
2390 
2391 	status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2392 	if (status) {
2393 		PMD_DRV_LOG(ERR,
2394 			    "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2395 			    vsi->idx, status);
2396 		return -EBUSY;
2397 	}
2398 
2399 	return 0;
2400 }
2401 
2402 static int
2403 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2404 {
2405 	int ret;
2406 
2407 	ret = ice_hash_moveout(pf, cfg);
2408 	if (ret && (ret != -ENOENT))
2409 		return ret;
2410 
2411 	hash_cfg_reset(cfg);
2412 
2413 	return 0;
2414 }
2415 
2416 static int
2417 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2418 			 u8 ctx_idx)
2419 {
2420 	int ret;
2421 
2422 	switch (ctx_idx) {
2423 	case ICE_HASH_GTPU_CTX_EH_IP:
2424 		ret = ice_hash_remove(pf,
2425 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2426 		if (ret && (ret != -ENOENT))
2427 			return ret;
2428 
2429 		ret = ice_hash_remove(pf,
2430 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2431 		if (ret && (ret != -ENOENT))
2432 			return ret;
2433 
2434 		ret = ice_hash_remove(pf,
2435 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2436 		if (ret && (ret != -ENOENT))
2437 			return ret;
2438 
2439 		ret = ice_hash_remove(pf,
2440 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2441 		if (ret && (ret != -ENOENT))
2442 			return ret;
2443 
2444 		ret = ice_hash_remove(pf,
2445 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2446 		if (ret && (ret != -ENOENT))
2447 			return ret;
2448 
2449 		ret = ice_hash_remove(pf,
2450 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2451 		if (ret && (ret != -ENOENT))
2452 			return ret;
2453 
2454 		ret = ice_hash_remove(pf,
2455 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2456 		if (ret && (ret != -ENOENT))
2457 			return ret;
2458 
2459 		ret = ice_hash_remove(pf,
2460 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2461 		if (ret && (ret != -ENOENT))
2462 			return ret;
2463 
2464 		break;
2465 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2466 		ret = ice_hash_remove(pf,
2467 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2468 		if (ret && (ret != -ENOENT))
2469 			return ret;
2470 
2471 		ret = ice_hash_remove(pf,
2472 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2473 		if (ret && (ret != -ENOENT))
2474 			return ret;
2475 
2476 		ret = ice_hash_moveout(pf,
2477 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2478 		if (ret && (ret != -ENOENT))
2479 			return ret;
2480 
2481 		ret = ice_hash_moveout(pf,
2482 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2483 		if (ret && (ret != -ENOENT))
2484 			return ret;
2485 
2486 		ret = ice_hash_moveout(pf,
2487 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2488 		if (ret && (ret != -ENOENT))
2489 			return ret;
2490 
2491 		ret = ice_hash_moveout(pf,
2492 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2493 		if (ret && (ret != -ENOENT))
2494 			return ret;
2495 
2496 		break;
2497 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2498 		ret = ice_hash_remove(pf,
2499 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2500 		if (ret && (ret != -ENOENT))
2501 			return ret;
2502 
2503 		ret = ice_hash_remove(pf,
2504 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2505 		if (ret && (ret != -ENOENT))
2506 			return ret;
2507 
2508 		ret = ice_hash_moveout(pf,
2509 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2510 		if (ret && (ret != -ENOENT))
2511 			return ret;
2512 
2513 		ret = ice_hash_moveout(pf,
2514 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2515 		if (ret && (ret != -ENOENT))
2516 			return ret;
2517 
2518 		ret = ice_hash_moveout(pf,
2519 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2520 		if (ret && (ret != -ENOENT))
2521 			return ret;
2522 
2523 		ret = ice_hash_moveout(pf,
2524 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2525 		if (ret && (ret != -ENOENT))
2526 			return ret;
2527 
2528 		break;
2529 	case ICE_HASH_GTPU_CTX_UP_IP:
2530 		ret = ice_hash_remove(pf,
2531 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2532 		if (ret && (ret != -ENOENT))
2533 			return ret;
2534 
2535 		ret = ice_hash_remove(pf,
2536 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2537 		if (ret && (ret != -ENOENT))
2538 			return ret;
2539 
2540 		ret = ice_hash_moveout(pf,
2541 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2542 		if (ret && (ret != -ENOENT))
2543 			return ret;
2544 
2545 		ret = ice_hash_moveout(pf,
2546 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2547 		if (ret && (ret != -ENOENT))
2548 			return ret;
2549 
2550 		ret = ice_hash_moveout(pf,
2551 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2552 		if (ret && (ret != -ENOENT))
2553 			return ret;
2554 
2555 		break;
2556 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2557 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2558 		ret = ice_hash_moveout(pf,
2559 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2560 		if (ret && (ret != -ENOENT))
2561 			return ret;
2562 
2563 		ret = ice_hash_moveout(pf,
2564 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2565 		if (ret && (ret != -ENOENT))
2566 			return ret;
2567 
2568 		ret = ice_hash_moveout(pf,
2569 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2570 		if (ret && (ret != -ENOENT))
2571 			return ret;
2572 
2573 		break;
2574 	case ICE_HASH_GTPU_CTX_DW_IP:
2575 		ret = ice_hash_remove(pf,
2576 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2577 		if (ret && (ret != -ENOENT))
2578 			return ret;
2579 
2580 		ret = ice_hash_remove(pf,
2581 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2582 		if (ret && (ret != -ENOENT))
2583 			return ret;
2584 
2585 		ret = ice_hash_moveout(pf,
2586 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2587 		if (ret && (ret != -ENOENT))
2588 			return ret;
2589 
2590 		ret = ice_hash_moveout(pf,
2591 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2592 		if (ret && (ret != -ENOENT))
2593 			return ret;
2594 
2595 		ret = ice_hash_moveout(pf,
2596 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2597 		if (ret && (ret != -ENOENT))
2598 			return ret;
2599 
2600 		break;
2601 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2602 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2603 		ret = ice_hash_moveout(pf,
2604 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2605 		if (ret && (ret != -ENOENT))
2606 			return ret;
2607 
2608 		ret = ice_hash_moveout(pf,
2609 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2610 		if (ret && (ret != -ENOENT))
2611 			return ret;
2612 
2613 		ret = ice_hash_moveout(pf,
2614 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2615 		if (ret && (ret != -ENOENT))
2616 			return ret;
2617 
2618 		break;
2619 	default:
2620 		break;
2621 	}
2622 
2623 	return 0;
2624 }
2625 
2626 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2627 {
2628 	u8 eh_idx, ip_idx;
2629 
2630 	if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2631 		eh_idx = 0;
2632 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2633 		eh_idx = 1;
2634 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2635 		eh_idx = 2;
2636 	else
2637 		return ICE_HASH_GTPU_CTX_MAX;
2638 
2639 	ip_idx = 0;
2640 	if (hdr & ICE_FLOW_SEG_HDR_UDP)
2641 		ip_idx = 1;
2642 	else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2643 		ip_idx = 2;
2644 
2645 	if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2646 		return eh_idx * 3 + ip_idx;
2647 	else
2648 		return ICE_HASH_GTPU_CTX_MAX;
2649 }
2650 
2651 static int
2652 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2653 {
2654 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2655 
2656 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2657 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2658 						gtpu_ctx_idx);
2659 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2660 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2661 						gtpu_ctx_idx);
2662 
2663 	return 0;
2664 }
2665 
2666 static int
2667 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2668 			  u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2669 {
2670 	int ret;
2671 
2672 	if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2673 		ctx->ctx[ctx_idx] = *cfg;
2674 
2675 	switch (ctx_idx) {
2676 	case ICE_HASH_GTPU_CTX_EH_IP:
2677 		break;
2678 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2679 		ret = ice_hash_moveback(pf,
2680 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2681 		if (ret && (ret != -ENOENT))
2682 			return ret;
2683 
2684 		ret = ice_hash_moveback(pf,
2685 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2686 		if (ret && (ret != -ENOENT))
2687 			return ret;
2688 
2689 		ret = ice_hash_moveback(pf,
2690 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2691 		if (ret && (ret != -ENOENT))
2692 			return ret;
2693 
2694 		ret = ice_hash_moveback(pf,
2695 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2696 		if (ret && (ret != -ENOENT))
2697 			return ret;
2698 
2699 		break;
2700 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2701 		ret = ice_hash_moveback(pf,
2702 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2703 		if (ret && (ret != -ENOENT))
2704 			return ret;
2705 
2706 		ret = ice_hash_moveback(pf,
2707 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2708 		if (ret && (ret != -ENOENT))
2709 			return ret;
2710 
2711 		ret = ice_hash_moveback(pf,
2712 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2713 		if (ret && (ret != -ENOENT))
2714 			return ret;
2715 
2716 		ret = ice_hash_moveback(pf,
2717 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2718 		if (ret && (ret != -ENOENT))
2719 			return ret;
2720 
2721 		break;
2722 	case ICE_HASH_GTPU_CTX_UP_IP:
2723 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2724 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2725 	case ICE_HASH_GTPU_CTX_DW_IP:
2726 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2727 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2728 		ret = ice_hash_moveback(pf,
2729 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2730 		if (ret && (ret != -ENOENT))
2731 			return ret;
2732 
2733 		ret = ice_hash_moveback(pf,
2734 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2735 		if (ret && (ret != -ENOENT))
2736 			return ret;
2737 
2738 		ret = ice_hash_moveback(pf,
2739 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2740 		if (ret && (ret != -ENOENT))
2741 			return ret;
2742 
2743 		break;
2744 	default:
2745 		break;
2746 	}
2747 
2748 	return 0;
2749 }
2750 
2751 static int
2752 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2753 {
2754 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2755 
2756 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2757 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2758 						 gtpu_ctx_idx, cfg);
2759 	else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2760 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2761 						 gtpu_ctx_idx, cfg);
2762 
2763 	return 0;
2764 }
2765 
2766 static void
2767 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2768 {
2769 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2770 
2771 	if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2772 		return;
2773 
2774 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2775 		hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2776 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2777 		hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2778 }
2779 
2780 int
2781 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2782 		     struct ice_rss_hash_cfg *cfg)
2783 {
2784 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2785 	int ret;
2786 
2787 	ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2788 	if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2789 		PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2790 
2791 	ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2792 
2793 	return 0;
2794 }
2795 
2796 int
2797 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2798 		     struct ice_rss_hash_cfg *cfg)
2799 {
2800 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2801 	int ret;
2802 
2803 	ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2804 	if (ret)
2805 		PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2806 
2807 	ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2808 	if (ret)
2809 		PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2810 
2811 	ret = ice_add_rss_cfg_post(pf, cfg);
2812 	if (ret)
2813 		PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2814 
2815 	return 0;
2816 }
2817 
2818 static void
2819 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2820 {
2821 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2822 	struct ice_vsi *vsi = pf->main_vsi;
2823 	struct ice_rss_hash_cfg cfg;
2824 	int ret;
2825 
2826 #define ICE_RSS_HF_ALL ( \
2827 	ETH_RSS_IPV4 | \
2828 	ETH_RSS_IPV6 | \
2829 	ETH_RSS_NONFRAG_IPV4_UDP | \
2830 	ETH_RSS_NONFRAG_IPV6_UDP | \
2831 	ETH_RSS_NONFRAG_IPV4_TCP | \
2832 	ETH_RSS_NONFRAG_IPV6_TCP | \
2833 	ETH_RSS_NONFRAG_IPV4_SCTP | \
2834 	ETH_RSS_NONFRAG_IPV6_SCTP)
2835 
2836 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2837 	if (ret)
2838 		PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
2839 			    __func__, ret);
2840 
2841 	cfg.symm = 0;
2842 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
2843 	/* Configure RSS for IPv4 with src/dst addr as input set */
2844 	if (rss_hf & ETH_RSS_IPV4) {
2845 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2846 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2847 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2848 		if (ret)
2849 			PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
2850 				    __func__, ret);
2851 	}
2852 
2853 	/* Configure RSS for IPv6 with src/dst addr as input set */
2854 	if (rss_hf & ETH_RSS_IPV6) {
2855 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2856 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2857 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2858 		if (ret)
2859 			PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
2860 				    __func__, ret);
2861 	}
2862 
2863 	/* Configure RSS for udp4 with src/dst addr and port as input set */
2864 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2865 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
2866 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2867 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
2868 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2869 		if (ret)
2870 			PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
2871 				    __func__, ret);
2872 	}
2873 
2874 	/* Configure RSS for udp6 with src/dst addr and port as input set */
2875 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2876 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
2877 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2878 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
2879 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2880 		if (ret)
2881 			PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
2882 				    __func__, ret);
2883 	}
2884 
2885 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
2886 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2887 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
2888 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2889 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
2890 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2891 		if (ret)
2892 			PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
2893 				    __func__, ret);
2894 	}
2895 
2896 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
2897 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2898 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
2899 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2900 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
2901 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2902 		if (ret)
2903 			PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
2904 				    __func__, ret);
2905 	}
2906 
2907 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
2908 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
2909 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
2910 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2911 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
2912 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2913 		if (ret)
2914 			PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
2915 				    __func__, ret);
2916 	}
2917 
2918 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
2919 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
2920 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
2921 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2922 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
2923 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2924 		if (ret)
2925 			PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
2926 				    __func__, ret);
2927 	}
2928 
2929 	if (rss_hf & ETH_RSS_IPV4) {
2930 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
2931 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2932 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
2933 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2934 		if (ret)
2935 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
2936 				    __func__, ret);
2937 	}
2938 
2939 	if (rss_hf & ETH_RSS_IPV6) {
2940 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
2941 				ICE_FLOW_SEG_HDR_IPV_OTHER;
2942 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
2943 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2944 		if (ret)
2945 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
2946 				    __func__, ret);
2947 	}
2948 
2949 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
2950 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2951 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2952 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
2953 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2954 		if (ret)
2955 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
2956 				    __func__, ret);
2957 	}
2958 
2959 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
2960 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
2961 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2962 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
2963 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2964 		if (ret)
2965 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
2966 				    __func__, ret);
2967 	}
2968 
2969 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
2970 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
2971 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2972 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
2973 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2974 		if (ret)
2975 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
2976 				    __func__, ret);
2977 	}
2978 
2979 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
2980 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
2981 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
2982 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
2983 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
2984 		if (ret)
2985 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
2986 				    __func__, ret);
2987 	}
2988 
2989 	pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
2990 }
2991 
2992 static void
2993 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
2994 {
2995 	static struct ice_aqc_get_set_rss_keys default_key;
2996 	static bool default_key_done;
2997 	uint8_t *key = (uint8_t *)&default_key;
2998 	size_t i;
2999 
3000 	if (rss_key_size > sizeof(default_key)) {
3001 		PMD_DRV_LOG(WARNING,
3002 			    "requested size %u is larger than default %zu, "
3003 			    "only %zu bytes are gotten for key\n",
3004 			    rss_key_size, sizeof(default_key),
3005 			    sizeof(default_key));
3006 	}
3007 
3008 	if (!default_key_done) {
3009 		/* Calculate the default hash key */
3010 		for (i = 0; i < sizeof(default_key); i++)
3011 			key[i] = (uint8_t)rte_rand();
3012 		default_key_done = true;
3013 	}
3014 	rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3015 }
3016 
3017 static int ice_init_rss(struct ice_pf *pf)
3018 {
3019 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
3020 	struct ice_vsi *vsi = pf->main_vsi;
3021 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3022 	struct ice_aq_get_set_rss_lut_params lut_params;
3023 	struct rte_eth_rss_conf *rss_conf;
3024 	struct ice_aqc_get_set_rss_keys key;
3025 	uint16_t i, nb_q;
3026 	int ret = 0;
3027 	bool is_safe_mode = pf->adapter->is_safe_mode;
3028 	uint32_t reg;
3029 
3030 	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3031 	nb_q = dev_data->nb_rx_queues;
3032 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3033 	vsi->rss_lut_size = pf->hash_lut_size;
3034 
3035 	if (nb_q == 0) {
3036 		PMD_DRV_LOG(WARNING,
3037 			"RSS is not supported as rx queues number is zero\n");
3038 		return 0;
3039 	}
3040 
3041 	if (is_safe_mode) {
3042 		PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3043 		return 0;
3044 	}
3045 
3046 	if (!vsi->rss_key) {
3047 		vsi->rss_key = rte_zmalloc(NULL,
3048 					   vsi->rss_key_size, 0);
3049 		if (vsi->rss_key == NULL) {
3050 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3051 			return -ENOMEM;
3052 		}
3053 	}
3054 	if (!vsi->rss_lut) {
3055 		vsi->rss_lut = rte_zmalloc(NULL,
3056 					   vsi->rss_lut_size, 0);
3057 		if (vsi->rss_lut == NULL) {
3058 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3059 			rte_free(vsi->rss_key);
3060 			vsi->rss_key = NULL;
3061 			return -ENOMEM;
3062 		}
3063 	}
3064 	/* configure RSS key */
3065 	if (!rss_conf->rss_key)
3066 		ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3067 	else
3068 		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3069 			   RTE_MIN(rss_conf->rss_key_len,
3070 				   vsi->rss_key_size));
3071 
3072 	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3073 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3074 	if (ret)
3075 		goto out;
3076 
3077 	/* init RSS LUT table */
3078 	for (i = 0; i < vsi->rss_lut_size; i++)
3079 		vsi->rss_lut[i] = i % nb_q;
3080 
3081 	lut_params.vsi_handle = vsi->idx;
3082 	lut_params.lut_size = vsi->rss_lut_size;
3083 	lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3084 	lut_params.lut = vsi->rss_lut;
3085 	lut_params.global_lut_id = 0;
3086 	ret = ice_aq_set_rss_lut(hw, &lut_params);
3087 	if (ret)
3088 		goto out;
3089 
3090 	/* Enable registers for symmetric_toeplitz function. */
3091 	reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3092 	reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3093 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3094 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3095 
3096 	/* RSS hash configuration */
3097 	ice_rss_hash_set(pf, rss_conf->rss_hf);
3098 
3099 	return 0;
3100 out:
3101 	rte_free(vsi->rss_key);
3102 	vsi->rss_key = NULL;
3103 	rte_free(vsi->rss_lut);
3104 	vsi->rss_lut = NULL;
3105 	return -EINVAL;
3106 }
3107 
3108 static int
3109 ice_dev_configure(struct rte_eth_dev *dev)
3110 {
3111 	struct ice_adapter *ad =
3112 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3113 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3114 	int ret;
3115 
3116 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
3117 	 * bulk allocation or vector Rx preconditions we will reset it.
3118 	 */
3119 	ad->rx_bulk_alloc_allowed = true;
3120 	ad->tx_simple_allowed = true;
3121 
3122 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3123 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3124 
3125 	if (dev->data->nb_rx_queues) {
3126 		ret = ice_init_rss(pf);
3127 		if (ret) {
3128 			PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3129 			return ret;
3130 		}
3131 	}
3132 
3133 	return 0;
3134 }
3135 
3136 static void
3137 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3138 		       int base_queue, int nb_queue)
3139 {
3140 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3141 	uint32_t val, val_tx;
3142 	int i;
3143 
3144 	for (i = 0; i < nb_queue; i++) {
3145 		/*do actual bind*/
3146 		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3147 		      (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3148 		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3149 			 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3150 
3151 		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3152 			    base_queue + i, msix_vect);
3153 		/* set ITR0 value */
3154 		ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3155 		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3156 		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3157 	}
3158 }
3159 
3160 void
3161 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3162 {
3163 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3164 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3165 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3166 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3167 	uint16_t msix_vect = vsi->msix_intr;
3168 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3169 	uint16_t queue_idx = 0;
3170 	int record = 0;
3171 	int i;
3172 
3173 	/* clear Rx/Tx queue interrupt */
3174 	for (i = 0; i < vsi->nb_used_qps; i++) {
3175 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3176 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3177 	}
3178 
3179 	/* PF bind interrupt */
3180 	if (rte_intr_dp_is_en(intr_handle)) {
3181 		queue_idx = 0;
3182 		record = 1;
3183 	}
3184 
3185 	for (i = 0; i < vsi->nb_used_qps; i++) {
3186 		if (nb_msix <= 1) {
3187 			if (!rte_intr_allow_others(intr_handle))
3188 				msix_vect = ICE_MISC_VEC_ID;
3189 
3190 			/* uio mapping all queue to one msix_vect */
3191 			__vsi_queues_bind_intr(vsi, msix_vect,
3192 					       vsi->base_queue + i,
3193 					       vsi->nb_used_qps - i);
3194 
3195 			for (; !!record && i < vsi->nb_used_qps; i++)
3196 				intr_handle->intr_vec[queue_idx + i] =
3197 					msix_vect;
3198 			break;
3199 		}
3200 
3201 		/* vfio 1:1 queue/msix_vect mapping */
3202 		__vsi_queues_bind_intr(vsi, msix_vect,
3203 				       vsi->base_queue + i, 1);
3204 
3205 		if (!!record)
3206 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
3207 
3208 		msix_vect++;
3209 		nb_msix--;
3210 	}
3211 }
3212 
3213 void
3214 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3215 {
3216 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3217 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3218 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3219 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3220 	uint16_t msix_intr, i;
3221 
3222 	if (rte_intr_allow_others(intr_handle))
3223 		for (i = 0; i < vsi->nb_used_qps; i++) {
3224 			msix_intr = vsi->msix_intr + i;
3225 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3226 				      GLINT_DYN_CTL_INTENA_M |
3227 				      GLINT_DYN_CTL_CLEARPBA_M |
3228 				      GLINT_DYN_CTL_ITR_INDX_M |
3229 				      GLINT_DYN_CTL_WB_ON_ITR_M);
3230 		}
3231 	else
3232 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3233 			      GLINT_DYN_CTL_INTENA_M |
3234 			      GLINT_DYN_CTL_CLEARPBA_M |
3235 			      GLINT_DYN_CTL_ITR_INDX_M |
3236 			      GLINT_DYN_CTL_WB_ON_ITR_M);
3237 }
3238 
3239 static int
3240 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3241 {
3242 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3243 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3244 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3245 	struct ice_vsi *vsi = pf->main_vsi;
3246 	uint32_t intr_vector = 0;
3247 
3248 	rte_intr_disable(intr_handle);
3249 
3250 	/* check and configure queue intr-vector mapping */
3251 	if ((rte_intr_cap_multiple(intr_handle) ||
3252 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
3253 	    dev->data->dev_conf.intr_conf.rxq != 0) {
3254 		intr_vector = dev->data->nb_rx_queues;
3255 		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3256 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3257 				    ICE_MAX_INTR_QUEUE_NUM);
3258 			return -ENOTSUP;
3259 		}
3260 		if (rte_intr_efd_enable(intr_handle, intr_vector))
3261 			return -1;
3262 	}
3263 
3264 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3265 		intr_handle->intr_vec =
3266 		rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3267 			    0);
3268 		if (!intr_handle->intr_vec) {
3269 			PMD_DRV_LOG(ERR,
3270 				    "Failed to allocate %d rx_queues intr_vec",
3271 				    dev->data->nb_rx_queues);
3272 			return -ENOMEM;
3273 		}
3274 	}
3275 
3276 	/* Map queues with MSIX interrupt */
3277 	vsi->nb_used_qps = dev->data->nb_rx_queues;
3278 	ice_vsi_queues_bind_intr(vsi);
3279 
3280 	/* Enable interrupts for all the queues */
3281 	ice_vsi_enable_queues_intr(vsi);
3282 
3283 	rte_intr_enable(intr_handle);
3284 
3285 	return 0;
3286 }
3287 
3288 static void
3289 ice_get_init_link_status(struct rte_eth_dev *dev)
3290 {
3291 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3292 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3293 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3294 	struct ice_link_status link_status;
3295 	int ret;
3296 
3297 	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3298 				   &link_status, NULL);
3299 	if (ret != ICE_SUCCESS) {
3300 		PMD_DRV_LOG(ERR, "Failed to get link info");
3301 		pf->init_link_up = false;
3302 		return;
3303 	}
3304 
3305 	if (link_status.link_info & ICE_AQ_LINK_UP)
3306 		pf->init_link_up = true;
3307 }
3308 
3309 static int
3310 ice_dev_start(struct rte_eth_dev *dev)
3311 {
3312 	struct rte_eth_dev_data *data = dev->data;
3313 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3314 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3315 	struct ice_vsi *vsi = pf->main_vsi;
3316 	uint16_t nb_rxq = 0;
3317 	uint16_t nb_txq, i;
3318 	uint16_t max_frame_size;
3319 	int mask, ret;
3320 
3321 	/* program Tx queues' context in hardware */
3322 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3323 		ret = ice_tx_queue_start(dev, nb_txq);
3324 		if (ret) {
3325 			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3326 			goto tx_err;
3327 		}
3328 	}
3329 
3330 	/* program Rx queues' context in hardware*/
3331 	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3332 		ret = ice_rx_queue_start(dev, nb_rxq);
3333 		if (ret) {
3334 			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3335 			goto rx_err;
3336 		}
3337 	}
3338 
3339 	ice_set_rx_function(dev);
3340 	ice_set_tx_function(dev);
3341 
3342 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3343 			ETH_VLAN_EXTEND_MASK;
3344 	ret = ice_vlan_offload_set(dev, mask);
3345 	if (ret) {
3346 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3347 		goto rx_err;
3348 	}
3349 
3350 	/* enable Rx interrput and mapping Rx queue to interrupt vector */
3351 	if (ice_rxq_intr_setup(dev))
3352 		return -EIO;
3353 
3354 	/* Enable receiving broadcast packets and transmitting packets */
3355 	ret = ice_set_vsi_promisc(hw, vsi->idx,
3356 				  ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3357 				  ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3358 				  0);
3359 	if (ret != ICE_SUCCESS)
3360 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3361 
3362 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3363 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3364 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3365 				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3366 				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3367 				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
3368 				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3369 				     NULL);
3370 	if (ret != ICE_SUCCESS)
3371 		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3372 
3373 	ice_get_init_link_status(dev);
3374 
3375 	ice_dev_set_link_up(dev);
3376 
3377 	/* Call get_link_info aq commond to enable/disable LSE */
3378 	ice_link_update(dev, 0);
3379 
3380 	pf->adapter_stopped = false;
3381 
3382 	/* Set the max frame size to default value*/
3383 	max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3384 		pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3385 		ICE_FRAME_SIZE_MAX;
3386 
3387 	/* Set the max frame size to HW*/
3388 	ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3389 
3390 	return 0;
3391 
3392 	/* stop the started queues if failed to start all queues */
3393 rx_err:
3394 	for (i = 0; i < nb_rxq; i++)
3395 		ice_rx_queue_stop(dev, i);
3396 tx_err:
3397 	for (i = 0; i < nb_txq; i++)
3398 		ice_tx_queue_stop(dev, i);
3399 
3400 	return -EIO;
3401 }
3402 
3403 static int
3404 ice_dev_reset(struct rte_eth_dev *dev)
3405 {
3406 	int ret;
3407 
3408 	if (dev->data->sriov.active)
3409 		return -ENOTSUP;
3410 
3411 	ret = ice_dev_uninit(dev);
3412 	if (ret) {
3413 		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3414 		return -ENXIO;
3415 	}
3416 
3417 	ret = ice_dev_init(dev);
3418 	if (ret) {
3419 		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3420 		return -ENXIO;
3421 	}
3422 
3423 	return 0;
3424 }
3425 
3426 static int
3427 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3428 {
3429 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3430 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3431 	struct ice_vsi *vsi = pf->main_vsi;
3432 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3433 	bool is_safe_mode = pf->adapter->is_safe_mode;
3434 	u64 phy_type_low;
3435 	u64 phy_type_high;
3436 
3437 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3438 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3439 	dev_info->max_rx_queues = vsi->nb_qps;
3440 	dev_info->max_tx_queues = vsi->nb_qps;
3441 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3442 	dev_info->max_vfs = pci_dev->max_vfs;
3443 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3444 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3445 
3446 	dev_info->rx_offload_capa =
3447 		DEV_RX_OFFLOAD_VLAN_STRIP |
3448 		DEV_RX_OFFLOAD_JUMBO_FRAME |
3449 		DEV_RX_OFFLOAD_KEEP_CRC |
3450 		DEV_RX_OFFLOAD_SCATTER |
3451 		DEV_RX_OFFLOAD_VLAN_FILTER;
3452 	dev_info->tx_offload_capa =
3453 		DEV_TX_OFFLOAD_VLAN_INSERT |
3454 		DEV_TX_OFFLOAD_TCP_TSO |
3455 		DEV_TX_OFFLOAD_MULTI_SEGS |
3456 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3457 	dev_info->flow_type_rss_offloads = 0;
3458 
3459 	if (!is_safe_mode) {
3460 		dev_info->rx_offload_capa |=
3461 			DEV_RX_OFFLOAD_IPV4_CKSUM |
3462 			DEV_RX_OFFLOAD_UDP_CKSUM |
3463 			DEV_RX_OFFLOAD_TCP_CKSUM |
3464 			DEV_RX_OFFLOAD_QINQ_STRIP |
3465 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3466 			DEV_RX_OFFLOAD_VLAN_EXTEND |
3467 			DEV_RX_OFFLOAD_RSS_HASH;
3468 		dev_info->tx_offload_capa |=
3469 			DEV_TX_OFFLOAD_QINQ_INSERT |
3470 			DEV_TX_OFFLOAD_IPV4_CKSUM |
3471 			DEV_TX_OFFLOAD_UDP_CKSUM |
3472 			DEV_TX_OFFLOAD_TCP_CKSUM |
3473 			DEV_TX_OFFLOAD_SCTP_CKSUM |
3474 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3475 			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3476 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3477 	}
3478 
3479 	dev_info->rx_queue_offload_capa = 0;
3480 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3481 
3482 	dev_info->reta_size = pf->hash_lut_size;
3483 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3484 
3485 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3486 		.rx_thresh = {
3487 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
3488 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
3489 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
3490 		},
3491 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3492 		.rx_drop_en = 0,
3493 		.offloads = 0,
3494 	};
3495 
3496 	dev_info->default_txconf = (struct rte_eth_txconf) {
3497 		.tx_thresh = {
3498 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
3499 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
3500 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
3501 		},
3502 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3503 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3504 		.offloads = 0,
3505 	};
3506 
3507 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3508 		.nb_max = ICE_MAX_RING_DESC,
3509 		.nb_min = ICE_MIN_RING_DESC,
3510 		.nb_align = ICE_ALIGN_RING_DESC,
3511 	};
3512 
3513 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3514 		.nb_max = ICE_MAX_RING_DESC,
3515 		.nb_min = ICE_MIN_RING_DESC,
3516 		.nb_align = ICE_ALIGN_RING_DESC,
3517 	};
3518 
3519 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
3520 			       ETH_LINK_SPEED_100M |
3521 			       ETH_LINK_SPEED_1G |
3522 			       ETH_LINK_SPEED_2_5G |
3523 			       ETH_LINK_SPEED_5G |
3524 			       ETH_LINK_SPEED_10G |
3525 			       ETH_LINK_SPEED_20G |
3526 			       ETH_LINK_SPEED_25G;
3527 
3528 	phy_type_low = hw->port_info->phy.phy_type_low;
3529 	phy_type_high = hw->port_info->phy.phy_type_high;
3530 
3531 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3532 		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3533 
3534 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3535 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3536 		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3537 
3538 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3539 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3540 
3541 	dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3542 	dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3543 	dev_info->default_rxportconf.nb_queues = 1;
3544 	dev_info->default_txportconf.nb_queues = 1;
3545 	dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3546 	dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3547 
3548 	return 0;
3549 }
3550 
3551 static inline int
3552 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3553 			    struct rte_eth_link *link)
3554 {
3555 	struct rte_eth_link *dst = link;
3556 	struct rte_eth_link *src = &dev->data->dev_link;
3557 
3558 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3559 				*(uint64_t *)src) == 0)
3560 		return -1;
3561 
3562 	return 0;
3563 }
3564 
3565 static inline int
3566 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3567 			     struct rte_eth_link *link)
3568 {
3569 	struct rte_eth_link *dst = &dev->data->dev_link;
3570 	struct rte_eth_link *src = link;
3571 
3572 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3573 				*(uint64_t *)src) == 0)
3574 		return -1;
3575 
3576 	return 0;
3577 }
3578 
3579 static int
3580 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3581 {
3582 #define CHECK_INTERVAL 100  /* 100ms */
3583 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3584 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3585 	struct ice_link_status link_status;
3586 	struct rte_eth_link link, old;
3587 	int status;
3588 	unsigned int rep_cnt = MAX_REPEAT_TIME;
3589 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3590 
3591 	memset(&link, 0, sizeof(link));
3592 	memset(&old, 0, sizeof(old));
3593 	memset(&link_status, 0, sizeof(link_status));
3594 	ice_atomic_read_link_status(dev, &old);
3595 
3596 	do {
3597 		/* Get link status information from hardware */
3598 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
3599 					      &link_status, NULL);
3600 		if (status != ICE_SUCCESS) {
3601 			link.link_speed = ETH_SPEED_NUM_100M;
3602 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
3603 			PMD_DRV_LOG(ERR, "Failed to get link info");
3604 			goto out;
3605 		}
3606 
3607 		link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3608 		if (!wait_to_complete || link.link_status)
3609 			break;
3610 
3611 		rte_delay_ms(CHECK_INTERVAL);
3612 	} while (--rep_cnt);
3613 
3614 	if (!link.link_status)
3615 		goto out;
3616 
3617 	/* Full-duplex operation at all supported speeds */
3618 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
3619 
3620 	/* Parse the link status */
3621 	switch (link_status.link_speed) {
3622 	case ICE_AQ_LINK_SPEED_10MB:
3623 		link.link_speed = ETH_SPEED_NUM_10M;
3624 		break;
3625 	case ICE_AQ_LINK_SPEED_100MB:
3626 		link.link_speed = ETH_SPEED_NUM_100M;
3627 		break;
3628 	case ICE_AQ_LINK_SPEED_1000MB:
3629 		link.link_speed = ETH_SPEED_NUM_1G;
3630 		break;
3631 	case ICE_AQ_LINK_SPEED_2500MB:
3632 		link.link_speed = ETH_SPEED_NUM_2_5G;
3633 		break;
3634 	case ICE_AQ_LINK_SPEED_5GB:
3635 		link.link_speed = ETH_SPEED_NUM_5G;
3636 		break;
3637 	case ICE_AQ_LINK_SPEED_10GB:
3638 		link.link_speed = ETH_SPEED_NUM_10G;
3639 		break;
3640 	case ICE_AQ_LINK_SPEED_20GB:
3641 		link.link_speed = ETH_SPEED_NUM_20G;
3642 		break;
3643 	case ICE_AQ_LINK_SPEED_25GB:
3644 		link.link_speed = ETH_SPEED_NUM_25G;
3645 		break;
3646 	case ICE_AQ_LINK_SPEED_40GB:
3647 		link.link_speed = ETH_SPEED_NUM_40G;
3648 		break;
3649 	case ICE_AQ_LINK_SPEED_50GB:
3650 		link.link_speed = ETH_SPEED_NUM_50G;
3651 		break;
3652 	case ICE_AQ_LINK_SPEED_100GB:
3653 		link.link_speed = ETH_SPEED_NUM_100G;
3654 		break;
3655 	case ICE_AQ_LINK_SPEED_UNKNOWN:
3656 		PMD_DRV_LOG(ERR, "Unknown link speed");
3657 		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3658 		break;
3659 	default:
3660 		PMD_DRV_LOG(ERR, "None link speed");
3661 		link.link_speed = ETH_SPEED_NUM_NONE;
3662 		break;
3663 	}
3664 
3665 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3666 			      ETH_LINK_SPEED_FIXED);
3667 
3668 out:
3669 	ice_atomic_write_link_status(dev, &link);
3670 	if (link.link_status == old.link_status)
3671 		return -1;
3672 
3673 	return 0;
3674 }
3675 
3676 /* Force the physical link state by getting the current PHY capabilities from
3677  * hardware and setting the PHY config based on the determined capabilities. If
3678  * link changes, link event will be triggered because both the Enable Automatic
3679  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3680  */
3681 static enum ice_status
3682 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3683 {
3684 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3685 	struct ice_aqc_get_phy_caps_data *pcaps;
3686 	struct ice_port_info *pi;
3687 	enum ice_status status;
3688 
3689 	if (!hw || !hw->port_info)
3690 		return ICE_ERR_PARAM;
3691 
3692 	pi = hw->port_info;
3693 
3694 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3695 		ice_malloc(hw, sizeof(*pcaps));
3696 	if (!pcaps)
3697 		return ICE_ERR_NO_MEMORY;
3698 
3699 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3700 				     pcaps, NULL);
3701 	if (status)
3702 		goto out;
3703 
3704 	/* No change in link */
3705 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3706 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3707 		goto out;
3708 
3709 	cfg.phy_type_low = pcaps->phy_type_low;
3710 	cfg.phy_type_high = pcaps->phy_type_high;
3711 	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3712 	cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3713 	cfg.eee_cap = pcaps->eee_cap;
3714 	cfg.eeer_value = pcaps->eeer_value;
3715 	cfg.link_fec_opt = pcaps->link_fec_options;
3716 	if (link_up)
3717 		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3718 	else
3719 		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3720 
3721 	status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3722 
3723 out:
3724 	ice_free(hw, pcaps);
3725 	return status;
3726 }
3727 
3728 static int
3729 ice_dev_set_link_up(struct rte_eth_dev *dev)
3730 {
3731 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3732 
3733 	return ice_force_phys_link_state(hw, true);
3734 }
3735 
3736 static int
3737 ice_dev_set_link_down(struct rte_eth_dev *dev)
3738 {
3739 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3740 
3741 	return ice_force_phys_link_state(hw, false);
3742 }
3743 
3744 static int
3745 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3746 {
3747 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3748 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3749 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3750 
3751 	/* check if mtu is within the allowed range */
3752 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3753 		return -EINVAL;
3754 
3755 	/* mtu setting is forbidden if port is start */
3756 	if (dev_data->dev_started) {
3757 		PMD_DRV_LOG(ERR,
3758 			    "port %d must be stopped before configuration",
3759 			    dev_data->port_id);
3760 		return -EBUSY;
3761 	}
3762 
3763 	if (frame_size > ICE_ETH_MAX_LEN)
3764 		dev_data->dev_conf.rxmode.offloads |=
3765 			DEV_RX_OFFLOAD_JUMBO_FRAME;
3766 	else
3767 		dev_data->dev_conf.rxmode.offloads &=
3768 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
3769 
3770 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3771 
3772 	return 0;
3773 }
3774 
3775 static int ice_macaddr_set(struct rte_eth_dev *dev,
3776 			   struct rte_ether_addr *mac_addr)
3777 {
3778 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3779 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3780 	struct ice_vsi *vsi = pf->main_vsi;
3781 	struct ice_mac_filter *f;
3782 	uint8_t flags = 0;
3783 	int ret;
3784 
3785 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
3786 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
3787 		return -EINVAL;
3788 	}
3789 
3790 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
3791 		if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
3792 			break;
3793 	}
3794 
3795 	if (!f) {
3796 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
3797 		return -EIO;
3798 	}
3799 
3800 	ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
3801 	if (ret != ICE_SUCCESS) {
3802 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
3803 		return -EIO;
3804 	}
3805 	ret = ice_add_mac_filter(vsi, mac_addr);
3806 	if (ret != ICE_SUCCESS) {
3807 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
3808 		return -EIO;
3809 	}
3810 	rte_ether_addr_copy(mac_addr, &pf->dev_addr);
3811 
3812 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
3813 	ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
3814 	if (ret != ICE_SUCCESS)
3815 		PMD_DRV_LOG(ERR, "Failed to set manage mac");
3816 
3817 	return 0;
3818 }
3819 
3820 /* Add a MAC address, and update filters */
3821 static int
3822 ice_macaddr_add(struct rte_eth_dev *dev,
3823 		struct rte_ether_addr *mac_addr,
3824 		__rte_unused uint32_t index,
3825 		__rte_unused uint32_t pool)
3826 {
3827 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3828 	struct ice_vsi *vsi = pf->main_vsi;
3829 	int ret;
3830 
3831 	ret = ice_add_mac_filter(vsi, mac_addr);
3832 	if (ret != ICE_SUCCESS) {
3833 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
3834 		return -EINVAL;
3835 	}
3836 
3837 	return ICE_SUCCESS;
3838 }
3839 
3840 /* Remove a MAC address, and update filters */
3841 static void
3842 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3843 {
3844 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3845 	struct ice_vsi *vsi = pf->main_vsi;
3846 	struct rte_eth_dev_data *data = dev->data;
3847 	struct rte_ether_addr *macaddr;
3848 	int ret;
3849 
3850 	macaddr = &data->mac_addrs[index];
3851 	ret = ice_remove_mac_filter(vsi, macaddr);
3852 	if (ret) {
3853 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
3854 		return;
3855 	}
3856 }
3857 
3858 static int
3859 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3860 {
3861 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3862 	struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
3863 	struct ice_vsi *vsi = pf->main_vsi;
3864 	int ret;
3865 
3866 	PMD_INIT_FUNC_TRACE();
3867 
3868 	/**
3869 	 * Vlan 0 is the generic filter for untagged packets
3870 	 * and can't be removed or added by user.
3871 	 */
3872 	if (vlan_id == 0)
3873 		return 0;
3874 
3875 	if (on) {
3876 		ret = ice_add_vlan_filter(vsi, &vlan);
3877 		if (ret < 0) {
3878 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
3879 			return -EINVAL;
3880 		}
3881 	} else {
3882 		ret = ice_remove_vlan_filter(vsi, &vlan);
3883 		if (ret < 0) {
3884 			PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
3885 			return -EINVAL;
3886 		}
3887 	}
3888 
3889 	return 0;
3890 }
3891 
3892 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
3893  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
3894  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3895  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3896  *
3897  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3898  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3899  * traffic in SVM, since the VLAN TPID isn't part of filtering.
3900  *
3901  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3902  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3903  * part of filtering.
3904  */
3905 static int
3906 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3907 {
3908 	struct ice_vlan vlan;
3909 	int err;
3910 
3911 	vlan = ICE_VLAN(0, 0);
3912 	err = ice_add_vlan_filter(vsi, &vlan);
3913 	if (err) {
3914 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
3915 		return err;
3916 	}
3917 
3918 	/* in SVM both VLAN 0 filters are identical */
3919 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
3920 		return 0;
3921 
3922 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3923 	err = ice_add_vlan_filter(vsi, &vlan);
3924 	if (err) {
3925 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
3926 		return err;
3927 	}
3928 
3929 	return 0;
3930 }
3931 
3932 /*
3933  * Delete the VLAN 0 filters in the same manner that they were added in
3934  * ice_vsi_add_vlan_zero.
3935  */
3936 static int
3937 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3938 {
3939 	struct ice_vlan vlan;
3940 	int err;
3941 
3942 	vlan = ICE_VLAN(0, 0);
3943 	err = ice_remove_vlan_filter(vsi, &vlan);
3944 	if (err) {
3945 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
3946 		return err;
3947 	}
3948 
3949 	/* in SVM both VLAN 0 filters are identical */
3950 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
3951 		return 0;
3952 
3953 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
3954 	err = ice_remove_vlan_filter(vsi, &vlan);
3955 	if (err) {
3956 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
3957 		return err;
3958 	}
3959 
3960 	return 0;
3961 }
3962 
3963 /* Configure vlan filter on or off */
3964 static int
3965 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
3966 {
3967 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3968 	struct ice_vsi_ctx ctxt;
3969 	uint8_t sw_flags2;
3970 	int ret = 0;
3971 
3972 	sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3973 
3974 	if (on)
3975 		vsi->info.sw_flags2 |= sw_flags2;
3976 	else
3977 		vsi->info.sw_flags2 &= ~sw_flags2;
3978 
3979 	vsi->info.sw_id = hw->port_info->sw_id;
3980 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3981 	ctxt.info.valid_sections =
3982 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3983 				 ICE_AQ_VSI_PROP_SECURITY_VALID);
3984 	ctxt.vsi_num = vsi->vsi_id;
3985 
3986 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3987 	if (ret) {
3988 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
3989 			    on ? "enable" : "disable");
3990 		return -EINVAL;
3991 	} else {
3992 		vsi->info.valid_sections |=
3993 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
3994 					 ICE_AQ_VSI_PROP_SECURITY_VALID);
3995 	}
3996 
3997 	/* consist with other drivers, allow untagged packet when vlan filter on */
3998 	if (on)
3999 		ret = ice_vsi_add_vlan_zero(vsi);
4000 	else
4001 		ret = ice_vsi_del_vlan_zero(vsi);
4002 
4003 	return 0;
4004 }
4005 
4006 /* Manage VLAN stripping for the VSI for Rx */
4007 static int
4008 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4009 {
4010 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4011 	struct ice_vsi_ctx ctxt;
4012 	enum ice_status status;
4013 	int err = 0;
4014 
4015 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4016 	 * on this VSI
4017 	 */
4018 	if (vsi->info.port_based_inner_vlan)
4019 		return 0;
4020 
4021 	memset(&ctxt, 0, sizeof(ctxt));
4022 
4023 	if (ena)
4024 		/* Strip VLAN tag from Rx packet and put it in the desc */
4025 		ctxt.info.inner_vlan_flags =
4026 					ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4027 	else
4028 		/* Disable stripping. Leave tag in packet */
4029 		ctxt.info.inner_vlan_flags =
4030 					ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4031 
4032 	/* Allow all packets untagged/tagged */
4033 	ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4034 
4035 	ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4036 
4037 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4038 	if (status) {
4039 		PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4040 			    ena ? "enable" : "disable");
4041 		err = -EIO;
4042 	} else {
4043 		vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4044 	}
4045 
4046 	return err;
4047 }
4048 
4049 static int
4050 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4051 {
4052 	return ice_vsi_manage_vlan_stripping(vsi, true);
4053 }
4054 
4055 static int
4056 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4057 {
4058 	return ice_vsi_manage_vlan_stripping(vsi, false);
4059 }
4060 
4061 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4062 {
4063 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4064 	struct ice_vsi_ctx ctxt;
4065 	enum ice_status status;
4066 	int err = 0;
4067 
4068 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4069 	 * on this VSI
4070 	 */
4071 	if (vsi->info.port_based_outer_vlan)
4072 		return 0;
4073 
4074 	memset(&ctxt, 0, sizeof(ctxt));
4075 
4076 	ctxt.info.valid_sections =
4077 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4078 	/* clear current outer VLAN strip settings */
4079 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4080 		~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4081 	ctxt.info.outer_vlan_flags |=
4082 		(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4083 		 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4084 		(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4085 		 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4086 
4087 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4088 	if (status) {
4089 		PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4090 		err = -EIO;
4091 	} else {
4092 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4093 	}
4094 
4095 	return err;
4096 }
4097 
4098 static int
4099 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4100 {
4101 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4102 	struct ice_vsi_ctx ctxt;
4103 	enum ice_status status;
4104 	int err = 0;
4105 
4106 	if (vsi->info.port_based_outer_vlan)
4107 		return 0;
4108 
4109 	memset(&ctxt, 0, sizeof(ctxt));
4110 
4111 	ctxt.info.valid_sections =
4112 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4113 	/* clear current outer VLAN strip settings */
4114 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4115 		~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4116 	ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4117 		ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4118 
4119 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4120 	if (status) {
4121 		PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4122 		err = -EIO;
4123 	} else {
4124 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4125 	}
4126 
4127 	return err;
4128 }
4129 
4130 static int
4131 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4132 {
4133 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4134 	int ret;
4135 
4136 	if (ice_is_dvm_ena(hw)) {
4137 		if (ena)
4138 			ret = ice_vsi_ena_outer_stripping(vsi);
4139 		else
4140 			ret = ice_vsi_dis_outer_stripping(vsi);
4141 	} else {
4142 		if (ena)
4143 			ret = ice_vsi_ena_inner_stripping(vsi);
4144 		else
4145 			ret = ice_vsi_dis_inner_stripping(vsi);
4146 	}
4147 
4148 	return ret;
4149 }
4150 
4151 static int
4152 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4153 {
4154 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4155 	struct ice_vsi *vsi = pf->main_vsi;
4156 	struct rte_eth_rxmode *rxmode;
4157 
4158 	rxmode = &dev->data->dev_conf.rxmode;
4159 	if (mask & ETH_VLAN_FILTER_MASK) {
4160 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4161 			ice_vsi_config_vlan_filter(vsi, true);
4162 		else
4163 			ice_vsi_config_vlan_filter(vsi, false);
4164 	}
4165 
4166 	if (mask & ETH_VLAN_STRIP_MASK) {
4167 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4168 			ice_vsi_config_vlan_stripping(vsi, true);
4169 		else
4170 			ice_vsi_config_vlan_stripping(vsi, false);
4171 	}
4172 
4173 	return 0;
4174 }
4175 
4176 static int
4177 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4178 {
4179 	struct ice_aq_get_set_rss_lut_params lut_params;
4180 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4181 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4182 	int ret;
4183 
4184 	if (!lut)
4185 		return -EINVAL;
4186 
4187 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4188 		lut_params.vsi_handle = vsi->idx;
4189 		lut_params.lut_size = lut_size;
4190 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4191 		lut_params.lut = lut;
4192 		lut_params.global_lut_id = 0;
4193 		ret = ice_aq_get_rss_lut(hw, &lut_params);
4194 		if (ret) {
4195 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4196 			return -EINVAL;
4197 		}
4198 	} else {
4199 		uint64_t *lut_dw = (uint64_t *)lut;
4200 		uint16_t i, lut_size_dw = lut_size / 4;
4201 
4202 		for (i = 0; i < lut_size_dw; i++)
4203 			lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4204 	}
4205 
4206 	return 0;
4207 }
4208 
4209 static int
4210 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4211 {
4212 	struct ice_aq_get_set_rss_lut_params lut_params;
4213 	struct ice_pf *pf;
4214 	struct ice_hw *hw;
4215 	int ret;
4216 
4217 	if (!vsi || !lut)
4218 		return -EINVAL;
4219 
4220 	pf = ICE_VSI_TO_PF(vsi);
4221 	hw = ICE_VSI_TO_HW(vsi);
4222 
4223 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4224 		lut_params.vsi_handle = vsi->idx;
4225 		lut_params.lut_size = lut_size;
4226 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4227 		lut_params.lut = lut;
4228 		lut_params.global_lut_id = 0;
4229 		ret = ice_aq_set_rss_lut(hw, &lut_params);
4230 		if (ret) {
4231 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4232 			return -EINVAL;
4233 		}
4234 	} else {
4235 		uint64_t *lut_dw = (uint64_t *)lut;
4236 		uint16_t i, lut_size_dw = lut_size / 4;
4237 
4238 		for (i = 0; i < lut_size_dw; i++)
4239 			ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4240 
4241 		ice_flush(hw);
4242 	}
4243 
4244 	return 0;
4245 }
4246 
4247 static int
4248 ice_rss_reta_update(struct rte_eth_dev *dev,
4249 		    struct rte_eth_rss_reta_entry64 *reta_conf,
4250 		    uint16_t reta_size)
4251 {
4252 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4253 	uint16_t i, lut_size = pf->hash_lut_size;
4254 	uint16_t idx, shift;
4255 	uint8_t *lut;
4256 	int ret;
4257 
4258 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4259 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4260 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4261 		PMD_DRV_LOG(ERR,
4262 			    "The size of hash lookup table configured (%d)"
4263 			    "doesn't match the number hardware can "
4264 			    "supported (128, 512, 2048)",
4265 			    reta_size);
4266 		return -EINVAL;
4267 	}
4268 
4269 	/* It MUST use the current LUT size to get the RSS lookup table,
4270 	 * otherwise if will fail with -100 error code.
4271 	 */
4272 	lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4273 	if (!lut) {
4274 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4275 		return -ENOMEM;
4276 	}
4277 	ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4278 	if (ret)
4279 		goto out;
4280 
4281 	for (i = 0; i < reta_size; i++) {
4282 		idx = i / RTE_RETA_GROUP_SIZE;
4283 		shift = i % RTE_RETA_GROUP_SIZE;
4284 		if (reta_conf[idx].mask & (1ULL << shift))
4285 			lut[i] = reta_conf[idx].reta[shift];
4286 	}
4287 	ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4288 	if (ret == 0 && lut_size != reta_size) {
4289 		PMD_DRV_LOG(INFO,
4290 			    "The size of hash lookup table is changed from (%d) to (%d)",
4291 			    lut_size, reta_size);
4292 		pf->hash_lut_size = reta_size;
4293 	}
4294 
4295 out:
4296 	rte_free(lut);
4297 
4298 	return ret;
4299 }
4300 
4301 static int
4302 ice_rss_reta_query(struct rte_eth_dev *dev,
4303 		   struct rte_eth_rss_reta_entry64 *reta_conf,
4304 		   uint16_t reta_size)
4305 {
4306 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4307 	uint16_t i, lut_size = pf->hash_lut_size;
4308 	uint16_t idx, shift;
4309 	uint8_t *lut;
4310 	int ret;
4311 
4312 	if (reta_size != lut_size) {
4313 		PMD_DRV_LOG(ERR,
4314 			    "The size of hash lookup table configured (%d)"
4315 			    "doesn't match the number hardware can "
4316 			    "supported (%d)",
4317 			    reta_size, lut_size);
4318 		return -EINVAL;
4319 	}
4320 
4321 	lut = rte_zmalloc(NULL, reta_size, 0);
4322 	if (!lut) {
4323 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4324 		return -ENOMEM;
4325 	}
4326 
4327 	ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4328 	if (ret)
4329 		goto out;
4330 
4331 	for (i = 0; i < reta_size; i++) {
4332 		idx = i / RTE_RETA_GROUP_SIZE;
4333 		shift = i % RTE_RETA_GROUP_SIZE;
4334 		if (reta_conf[idx].mask & (1ULL << shift))
4335 			reta_conf[idx].reta[shift] = lut[i];
4336 	}
4337 
4338 out:
4339 	rte_free(lut);
4340 
4341 	return ret;
4342 }
4343 
4344 static int
4345 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4346 {
4347 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4348 	int ret = 0;
4349 
4350 	if (!key || key_len == 0) {
4351 		PMD_DRV_LOG(DEBUG, "No key to be configured");
4352 		return 0;
4353 	} else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4354 		   sizeof(uint32_t)) {
4355 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4356 		return -EINVAL;
4357 	}
4358 
4359 	struct ice_aqc_get_set_rss_keys *key_dw =
4360 		(struct ice_aqc_get_set_rss_keys *)key;
4361 
4362 	ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4363 	if (ret) {
4364 		PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4365 		ret = -EINVAL;
4366 	}
4367 
4368 	return ret;
4369 }
4370 
4371 static int
4372 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4373 {
4374 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4375 	int ret;
4376 
4377 	if (!key || !key_len)
4378 		return -EINVAL;
4379 
4380 	ret = ice_aq_get_rss_key
4381 		(hw, vsi->idx,
4382 		 (struct ice_aqc_get_set_rss_keys *)key);
4383 	if (ret) {
4384 		PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4385 		return -EINVAL;
4386 	}
4387 	*key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4388 
4389 	return 0;
4390 }
4391 
4392 static int
4393 ice_rss_hash_update(struct rte_eth_dev *dev,
4394 		    struct rte_eth_rss_conf *rss_conf)
4395 {
4396 	enum ice_status status = ICE_SUCCESS;
4397 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4398 	struct ice_vsi *vsi = pf->main_vsi;
4399 
4400 	/* set hash key */
4401 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4402 	if (status)
4403 		return status;
4404 
4405 	if (rss_conf->rss_hf == 0) {
4406 		pf->rss_hf = 0;
4407 		return 0;
4408 	}
4409 
4410 	/* RSS hash configuration */
4411 	ice_rss_hash_set(pf, rss_conf->rss_hf);
4412 
4413 	return 0;
4414 }
4415 
4416 static int
4417 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4418 		      struct rte_eth_rss_conf *rss_conf)
4419 {
4420 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4421 	struct ice_vsi *vsi = pf->main_vsi;
4422 
4423 	ice_get_rss_key(vsi, rss_conf->rss_key,
4424 			&rss_conf->rss_key_len);
4425 
4426 	rss_conf->rss_hf = pf->rss_hf;
4427 	return 0;
4428 }
4429 
4430 static int
4431 ice_promisc_enable(struct rte_eth_dev *dev)
4432 {
4433 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4434 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4435 	struct ice_vsi *vsi = pf->main_vsi;
4436 	enum ice_status status;
4437 	uint8_t pmask;
4438 	int ret = 0;
4439 
4440 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4441 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4442 
4443 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4444 	switch (status) {
4445 	case ICE_ERR_ALREADY_EXISTS:
4446 		PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4447 	case ICE_SUCCESS:
4448 		break;
4449 	default:
4450 		PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4451 		ret = -EAGAIN;
4452 	}
4453 
4454 	return ret;
4455 }
4456 
4457 static int
4458 ice_promisc_disable(struct rte_eth_dev *dev)
4459 {
4460 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4461 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4462 	struct ice_vsi *vsi = pf->main_vsi;
4463 	enum ice_status status;
4464 	uint8_t pmask;
4465 	int ret = 0;
4466 
4467 	if (dev->data->all_multicast == 1)
4468 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4469 	else
4470 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4471 			ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4472 
4473 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4474 	if (status != ICE_SUCCESS) {
4475 		PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4476 		ret = -EAGAIN;
4477 	}
4478 
4479 	return ret;
4480 }
4481 
4482 static int
4483 ice_allmulti_enable(struct rte_eth_dev *dev)
4484 {
4485 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4486 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4487 	struct ice_vsi *vsi = pf->main_vsi;
4488 	enum ice_status status;
4489 	uint8_t pmask;
4490 	int ret = 0;
4491 
4492 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4493 
4494 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4495 
4496 	switch (status) {
4497 	case ICE_ERR_ALREADY_EXISTS:
4498 		PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4499 	case ICE_SUCCESS:
4500 		break;
4501 	default:
4502 		PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4503 		ret = -EAGAIN;
4504 	}
4505 
4506 	return ret;
4507 }
4508 
4509 static int
4510 ice_allmulti_disable(struct rte_eth_dev *dev)
4511 {
4512 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4513 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4514 	struct ice_vsi *vsi = pf->main_vsi;
4515 	enum ice_status status;
4516 	uint8_t pmask;
4517 	int ret = 0;
4518 
4519 	if (dev->data->promiscuous == 1)
4520 		return 0; /* must remain in all_multicast mode */
4521 
4522 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4523 
4524 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4525 	if (status != ICE_SUCCESS) {
4526 		PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4527 		ret = -EAGAIN;
4528 	}
4529 
4530 	return ret;
4531 }
4532 
4533 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4534 				    uint16_t queue_id)
4535 {
4536 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4537 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4538 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4539 	uint32_t val;
4540 	uint16_t msix_intr;
4541 
4542 	msix_intr = intr_handle->intr_vec[queue_id];
4543 
4544 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4545 	      GLINT_DYN_CTL_ITR_INDX_M;
4546 	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4547 
4548 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4549 	rte_intr_ack(&pci_dev->intr_handle);
4550 
4551 	return 0;
4552 }
4553 
4554 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4555 				     uint16_t queue_id)
4556 {
4557 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4558 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4559 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4560 	uint16_t msix_intr;
4561 
4562 	msix_intr = intr_handle->intr_vec[queue_id];
4563 
4564 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4565 
4566 	return 0;
4567 }
4568 
4569 static int
4570 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4571 {
4572 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4573 	u8 ver, patch;
4574 	u16 build;
4575 	int ret;
4576 
4577 	ver = hw->flash.orom.major;
4578 	patch = hw->flash.orom.patch;
4579 	build = hw->flash.orom.build;
4580 
4581 	ret = snprintf(fw_version, fw_size,
4582 			"%x.%02x 0x%08x %d.%d.%d",
4583 			hw->flash.nvm.major,
4584 			hw->flash.nvm.minor,
4585 			hw->flash.nvm.eetrack,
4586 			ver, build, patch);
4587 	if (ret < 0)
4588 		return -EINVAL;
4589 
4590 	/* add the size of '\0' */
4591 	ret += 1;
4592 	if (fw_size < (size_t)ret)
4593 		return ret;
4594 	else
4595 		return 0;
4596 }
4597 
4598 static int
4599 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4600 {
4601 	struct ice_hw *hw;
4602 	struct ice_vsi_ctx ctxt;
4603 	uint8_t vlan_flags = 0;
4604 	int ret;
4605 
4606 	if (!vsi || !info) {
4607 		PMD_DRV_LOG(ERR, "invalid parameters");
4608 		return -EINVAL;
4609 	}
4610 
4611 	if (info->on) {
4612 		vsi->info.port_based_inner_vlan = info->config.pvid;
4613 		/**
4614 		 * If insert pvid is enabled, only tagged pkts are
4615 		 * allowed to be sent out.
4616 		 */
4617 		vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4618 			     ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4619 	} else {
4620 		vsi->info.port_based_inner_vlan = 0;
4621 		if (info->config.reject.tagged == 0)
4622 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4623 
4624 		if (info->config.reject.untagged == 0)
4625 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4626 	}
4627 	vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4628 				  ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4629 	vsi->info.inner_vlan_flags |= vlan_flags;
4630 	memset(&ctxt, 0, sizeof(ctxt));
4631 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4632 	ctxt.info.valid_sections =
4633 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4634 	ctxt.vsi_num = vsi->vsi_id;
4635 
4636 	hw = ICE_VSI_TO_HW(vsi);
4637 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4638 	if (ret != ICE_SUCCESS) {
4639 		PMD_DRV_LOG(ERR,
4640 			    "update VSI for VLAN insert failed, err %d",
4641 			    ret);
4642 		return -EINVAL;
4643 	}
4644 
4645 	vsi->info.valid_sections |=
4646 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4647 
4648 	return ret;
4649 }
4650 
4651 static int
4652 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4653 {
4654 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4655 	struct ice_vsi *vsi = pf->main_vsi;
4656 	struct rte_eth_dev_data *data = pf->dev_data;
4657 	struct ice_vsi_vlan_pvid_info info;
4658 	int ret;
4659 
4660 	memset(&info, 0, sizeof(info));
4661 	info.on = on;
4662 	if (info.on) {
4663 		info.config.pvid = pvid;
4664 	} else {
4665 		info.config.reject.tagged =
4666 			data->dev_conf.txmode.hw_vlan_reject_tagged;
4667 		info.config.reject.untagged =
4668 			data->dev_conf.txmode.hw_vlan_reject_untagged;
4669 	}
4670 
4671 	ret = ice_vsi_vlan_pvid_set(vsi, &info);
4672 	if (ret < 0) {
4673 		PMD_DRV_LOG(ERR, "Failed to set pvid.");
4674 		return -EINVAL;
4675 	}
4676 
4677 	return 0;
4678 }
4679 
4680 static int
4681 ice_get_eeprom_length(struct rte_eth_dev *dev)
4682 {
4683 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4684 
4685 	return hw->flash.flash_size;
4686 }
4687 
4688 static int
4689 ice_get_eeprom(struct rte_eth_dev *dev,
4690 	       struct rte_dev_eeprom_info *eeprom)
4691 {
4692 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4693 	enum ice_status status = ICE_SUCCESS;
4694 	uint8_t *data = eeprom->data;
4695 
4696 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4697 
4698 	status = ice_acquire_nvm(hw, ICE_RES_READ);
4699 	if (status) {
4700 		PMD_DRV_LOG(ERR, "acquire nvm failed.");
4701 		return -EIO;
4702 	}
4703 
4704 	status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4705 				   data, false);
4706 
4707 	ice_release_nvm(hw);
4708 
4709 	if (status) {
4710 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
4711 		return -EIO;
4712 	}
4713 
4714 	return 0;
4715 }
4716 
4717 static void
4718 ice_stat_update_32(struct ice_hw *hw,
4719 		   uint32_t reg,
4720 		   bool offset_loaded,
4721 		   uint64_t *offset,
4722 		   uint64_t *stat)
4723 {
4724 	uint64_t new_data;
4725 
4726 	new_data = (uint64_t)ICE_READ_REG(hw, reg);
4727 	if (!offset_loaded)
4728 		*offset = new_data;
4729 
4730 	if (new_data >= *offset)
4731 		*stat = (uint64_t)(new_data - *offset);
4732 	else
4733 		*stat = (uint64_t)((new_data +
4734 				    ((uint64_t)1 << ICE_32_BIT_WIDTH))
4735 				   - *offset);
4736 }
4737 
4738 static void
4739 ice_stat_update_40(struct ice_hw *hw,
4740 		   uint32_t hireg,
4741 		   uint32_t loreg,
4742 		   bool offset_loaded,
4743 		   uint64_t *offset,
4744 		   uint64_t *stat)
4745 {
4746 	uint64_t new_data;
4747 
4748 	new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4749 	new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4750 		    ICE_32_BIT_WIDTH;
4751 
4752 	if (!offset_loaded)
4753 		*offset = new_data;
4754 
4755 	if (new_data >= *offset)
4756 		*stat = new_data - *offset;
4757 	else
4758 		*stat = (uint64_t)((new_data +
4759 				    ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4760 				   *offset);
4761 
4762 	*stat &= ICE_40_BIT_MASK;
4763 }
4764 
4765 /* Get all the statistics of a VSI */
4766 static void
4767 ice_update_vsi_stats(struct ice_vsi *vsi)
4768 {
4769 	struct ice_eth_stats *oes = &vsi->eth_stats_offset;
4770 	struct ice_eth_stats *nes = &vsi->eth_stats;
4771 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4772 	int idx = rte_le_to_cpu_16(vsi->vsi_id);
4773 
4774 	ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
4775 			   vsi->offset_loaded, &oes->rx_bytes,
4776 			   &nes->rx_bytes);
4777 	ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
4778 			   vsi->offset_loaded, &oes->rx_unicast,
4779 			   &nes->rx_unicast);
4780 	ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
4781 			   vsi->offset_loaded, &oes->rx_multicast,
4782 			   &nes->rx_multicast);
4783 	ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
4784 			   vsi->offset_loaded, &oes->rx_broadcast,
4785 			   &nes->rx_broadcast);
4786 	/* enlarge the limitation when rx_bytes overflowed */
4787 	if (vsi->offset_loaded) {
4788 		if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
4789 			nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4790 		nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
4791 	}
4792 	vsi->old_rx_bytes = nes->rx_bytes;
4793 	/* exclude CRC bytes */
4794 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
4795 			  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
4796 
4797 	ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
4798 			   &oes->rx_discards, &nes->rx_discards);
4799 	/* GLV_REPC not supported */
4800 	/* GLV_RMPC not supported */
4801 	ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
4802 			   &oes->rx_unknown_protocol,
4803 			   &nes->rx_unknown_protocol);
4804 	ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
4805 			   vsi->offset_loaded, &oes->tx_bytes,
4806 			   &nes->tx_bytes);
4807 	ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
4808 			   vsi->offset_loaded, &oes->tx_unicast,
4809 			   &nes->tx_unicast);
4810 	ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
4811 			   vsi->offset_loaded, &oes->tx_multicast,
4812 			   &nes->tx_multicast);
4813 	ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
4814 			   vsi->offset_loaded,  &oes->tx_broadcast,
4815 			   &nes->tx_broadcast);
4816 	/* GLV_TDPC not supported */
4817 	ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
4818 			   &oes->tx_errors, &nes->tx_errors);
4819 	/* enlarge the limitation when tx_bytes overflowed */
4820 	if (vsi->offset_loaded) {
4821 		if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
4822 			nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4823 		nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
4824 	}
4825 	vsi->old_tx_bytes = nes->tx_bytes;
4826 	vsi->offset_loaded = true;
4827 
4828 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
4829 		    vsi->vsi_id);
4830 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
4831 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
4832 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
4833 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
4834 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
4835 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
4836 		    nes->rx_unknown_protocol);
4837 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
4838 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
4839 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
4840 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
4841 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
4842 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
4843 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
4844 		    vsi->vsi_id);
4845 }
4846 
4847 static void
4848 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
4849 {
4850 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
4851 	struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
4852 
4853 	/* Get statistics of struct ice_eth_stats */
4854 	ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
4855 			   GLPRT_GORCL(hw->port_info->lport),
4856 			   pf->offset_loaded, &os->eth.rx_bytes,
4857 			   &ns->eth.rx_bytes);
4858 	ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
4859 			   GLPRT_UPRCL(hw->port_info->lport),
4860 			   pf->offset_loaded, &os->eth.rx_unicast,
4861 			   &ns->eth.rx_unicast);
4862 	ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
4863 			   GLPRT_MPRCL(hw->port_info->lport),
4864 			   pf->offset_loaded, &os->eth.rx_multicast,
4865 			   &ns->eth.rx_multicast);
4866 	ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
4867 			   GLPRT_BPRCL(hw->port_info->lport),
4868 			   pf->offset_loaded, &os->eth.rx_broadcast,
4869 			   &ns->eth.rx_broadcast);
4870 	ice_stat_update_32(hw, PRTRPB_RDPC,
4871 			   pf->offset_loaded, &os->eth.rx_discards,
4872 			   &ns->eth.rx_discards);
4873 	/* enlarge the limitation when rx_bytes overflowed */
4874 	if (pf->offset_loaded) {
4875 		if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
4876 			ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4877 		ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
4878 	}
4879 	pf->old_rx_bytes = ns->eth.rx_bytes;
4880 
4881 	/* Workaround: CRC size should not be included in byte statistics,
4882 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
4883 	 * packet.
4884 	 */
4885 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
4886 			     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
4887 
4888 	/* GLPRT_REPC not supported */
4889 	/* GLPRT_RMPC not supported */
4890 	ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
4891 			   pf->offset_loaded,
4892 			   &os->eth.rx_unknown_protocol,
4893 			   &ns->eth.rx_unknown_protocol);
4894 	ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
4895 			   GLPRT_GOTCL(hw->port_info->lport),
4896 			   pf->offset_loaded, &os->eth.tx_bytes,
4897 			   &ns->eth.tx_bytes);
4898 	ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
4899 			   GLPRT_UPTCL(hw->port_info->lport),
4900 			   pf->offset_loaded, &os->eth.tx_unicast,
4901 			   &ns->eth.tx_unicast);
4902 	ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
4903 			   GLPRT_MPTCL(hw->port_info->lport),
4904 			   pf->offset_loaded, &os->eth.tx_multicast,
4905 			   &ns->eth.tx_multicast);
4906 	ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
4907 			   GLPRT_BPTCL(hw->port_info->lport),
4908 			   pf->offset_loaded, &os->eth.tx_broadcast,
4909 			   &ns->eth.tx_broadcast);
4910 	/* enlarge the limitation when tx_bytes overflowed */
4911 	if (pf->offset_loaded) {
4912 		if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
4913 			ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
4914 		ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
4915 	}
4916 	pf->old_tx_bytes = ns->eth.tx_bytes;
4917 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
4918 			     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
4919 
4920 	/* GLPRT_TEPC not supported */
4921 
4922 	/* additional port specific stats */
4923 	ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
4924 			   pf->offset_loaded, &os->tx_dropped_link_down,
4925 			   &ns->tx_dropped_link_down);
4926 	ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
4927 			   pf->offset_loaded, &os->crc_errors,
4928 			   &ns->crc_errors);
4929 	ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
4930 			   pf->offset_loaded, &os->illegal_bytes,
4931 			   &ns->illegal_bytes);
4932 	/* GLPRT_ERRBC not supported */
4933 	ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
4934 			   pf->offset_loaded, &os->mac_local_faults,
4935 			   &ns->mac_local_faults);
4936 	ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
4937 			   pf->offset_loaded, &os->mac_remote_faults,
4938 			   &ns->mac_remote_faults);
4939 
4940 	ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
4941 			   pf->offset_loaded, &os->rx_len_errors,
4942 			   &ns->rx_len_errors);
4943 
4944 	ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
4945 			   pf->offset_loaded, &os->link_xon_rx,
4946 			   &ns->link_xon_rx);
4947 	ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
4948 			   pf->offset_loaded, &os->link_xoff_rx,
4949 			   &ns->link_xoff_rx);
4950 	ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
4951 			   pf->offset_loaded, &os->link_xon_tx,
4952 			   &ns->link_xon_tx);
4953 	ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
4954 			   pf->offset_loaded, &os->link_xoff_tx,
4955 			   &ns->link_xoff_tx);
4956 	ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
4957 			   GLPRT_PRC64L(hw->port_info->lport),
4958 			   pf->offset_loaded, &os->rx_size_64,
4959 			   &ns->rx_size_64);
4960 	ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
4961 			   GLPRT_PRC127L(hw->port_info->lport),
4962 			   pf->offset_loaded, &os->rx_size_127,
4963 			   &ns->rx_size_127);
4964 	ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
4965 			   GLPRT_PRC255L(hw->port_info->lport),
4966 			   pf->offset_loaded, &os->rx_size_255,
4967 			   &ns->rx_size_255);
4968 	ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
4969 			   GLPRT_PRC511L(hw->port_info->lport),
4970 			   pf->offset_loaded, &os->rx_size_511,
4971 			   &ns->rx_size_511);
4972 	ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
4973 			   GLPRT_PRC1023L(hw->port_info->lport),
4974 			   pf->offset_loaded, &os->rx_size_1023,
4975 			   &ns->rx_size_1023);
4976 	ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
4977 			   GLPRT_PRC1522L(hw->port_info->lport),
4978 			   pf->offset_loaded, &os->rx_size_1522,
4979 			   &ns->rx_size_1522);
4980 	ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
4981 			   GLPRT_PRC9522L(hw->port_info->lport),
4982 			   pf->offset_loaded, &os->rx_size_big,
4983 			   &ns->rx_size_big);
4984 	ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
4985 			   pf->offset_loaded, &os->rx_undersize,
4986 			   &ns->rx_undersize);
4987 	ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
4988 			   pf->offset_loaded, &os->rx_fragments,
4989 			   &ns->rx_fragments);
4990 	ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
4991 			   pf->offset_loaded, &os->rx_oversize,
4992 			   &ns->rx_oversize);
4993 	ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
4994 			   pf->offset_loaded, &os->rx_jabber,
4995 			   &ns->rx_jabber);
4996 	ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
4997 			   GLPRT_PTC64L(hw->port_info->lport),
4998 			   pf->offset_loaded, &os->tx_size_64,
4999 			   &ns->tx_size_64);
5000 	ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5001 			   GLPRT_PTC127L(hw->port_info->lport),
5002 			   pf->offset_loaded, &os->tx_size_127,
5003 			   &ns->tx_size_127);
5004 	ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5005 			   GLPRT_PTC255L(hw->port_info->lport),
5006 			   pf->offset_loaded, &os->tx_size_255,
5007 			   &ns->tx_size_255);
5008 	ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5009 			   GLPRT_PTC511L(hw->port_info->lport),
5010 			   pf->offset_loaded, &os->tx_size_511,
5011 			   &ns->tx_size_511);
5012 	ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5013 			   GLPRT_PTC1023L(hw->port_info->lport),
5014 			   pf->offset_loaded, &os->tx_size_1023,
5015 			   &ns->tx_size_1023);
5016 	ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5017 			   GLPRT_PTC1522L(hw->port_info->lport),
5018 			   pf->offset_loaded, &os->tx_size_1522,
5019 			   &ns->tx_size_1522);
5020 	ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5021 			   GLPRT_PTC9522L(hw->port_info->lport),
5022 			   pf->offset_loaded, &os->tx_size_big,
5023 			   &ns->tx_size_big);
5024 
5025 	/* GLPRT_MSPDC not supported */
5026 	/* GLPRT_XEC not supported */
5027 
5028 	pf->offset_loaded = true;
5029 
5030 	if (pf->main_vsi)
5031 		ice_update_vsi_stats(pf->main_vsi);
5032 }
5033 
5034 /* Get all statistics of a port */
5035 static int
5036 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5037 {
5038 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5039 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5040 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5041 
5042 	/* call read registers - updates values, now write them to struct */
5043 	ice_read_stats_registers(pf, hw);
5044 
5045 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5046 			  pf->main_vsi->eth_stats.rx_multicast +
5047 			  pf->main_vsi->eth_stats.rx_broadcast -
5048 			  pf->main_vsi->eth_stats.rx_discards;
5049 	stats->opackets = ns->eth.tx_unicast +
5050 			  ns->eth.tx_multicast +
5051 			  ns->eth.tx_broadcast;
5052 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5053 	stats->obytes   = ns->eth.tx_bytes;
5054 	stats->oerrors  = ns->eth.tx_errors +
5055 			  pf->main_vsi->eth_stats.tx_errors;
5056 
5057 	/* Rx Errors */
5058 	stats->imissed  = ns->eth.rx_discards +
5059 			  pf->main_vsi->eth_stats.rx_discards;
5060 	stats->ierrors  = ns->crc_errors +
5061 			  ns->rx_undersize +
5062 			  ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5063 
5064 	PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5065 	PMD_DRV_LOG(DEBUG, "rx_bytes:	%"PRIu64"", ns->eth.rx_bytes);
5066 	PMD_DRV_LOG(DEBUG, "rx_unicast:	%"PRIu64"", ns->eth.rx_unicast);
5067 	PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5068 	PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5069 	PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5070 	PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5071 		    pf->main_vsi->eth_stats.rx_discards);
5072 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5073 		    ns->eth.rx_unknown_protocol);
5074 	PMD_DRV_LOG(DEBUG, "tx_bytes:	%"PRIu64"", ns->eth.tx_bytes);
5075 	PMD_DRV_LOG(DEBUG, "tx_unicast:	%"PRIu64"", ns->eth.tx_unicast);
5076 	PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5077 	PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5078 	PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5079 	PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5080 		    pf->main_vsi->eth_stats.tx_discards);
5081 	PMD_DRV_LOG(DEBUG, "tx_errors:		%"PRIu64"", ns->eth.tx_errors);
5082 
5083 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:	%"PRIu64"",
5084 		    ns->tx_dropped_link_down);
5085 	PMD_DRV_LOG(DEBUG, "crc_errors:	%"PRIu64"", ns->crc_errors);
5086 	PMD_DRV_LOG(DEBUG, "illegal_bytes:	%"PRIu64"",
5087 		    ns->illegal_bytes);
5088 	PMD_DRV_LOG(DEBUG, "error_bytes:	%"PRIu64"", ns->error_bytes);
5089 	PMD_DRV_LOG(DEBUG, "mac_local_faults:	%"PRIu64"",
5090 		    ns->mac_local_faults);
5091 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:	%"PRIu64"",
5092 		    ns->mac_remote_faults);
5093 	PMD_DRV_LOG(DEBUG, "link_xon_rx:	%"PRIu64"", ns->link_xon_rx);
5094 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:	%"PRIu64"", ns->link_xoff_rx);
5095 	PMD_DRV_LOG(DEBUG, "link_xon_tx:	%"PRIu64"", ns->link_xon_tx);
5096 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:	%"PRIu64"", ns->link_xoff_tx);
5097 	PMD_DRV_LOG(DEBUG, "rx_size_64:		%"PRIu64"", ns->rx_size_64);
5098 	PMD_DRV_LOG(DEBUG, "rx_size_127:	%"PRIu64"", ns->rx_size_127);
5099 	PMD_DRV_LOG(DEBUG, "rx_size_255:	%"PRIu64"", ns->rx_size_255);
5100 	PMD_DRV_LOG(DEBUG, "rx_size_511:	%"PRIu64"", ns->rx_size_511);
5101 	PMD_DRV_LOG(DEBUG, "rx_size_1023:	%"PRIu64"", ns->rx_size_1023);
5102 	PMD_DRV_LOG(DEBUG, "rx_size_1522:	%"PRIu64"", ns->rx_size_1522);
5103 	PMD_DRV_LOG(DEBUG, "rx_size_big:	%"PRIu64"", ns->rx_size_big);
5104 	PMD_DRV_LOG(DEBUG, "rx_undersize:	%"PRIu64"", ns->rx_undersize);
5105 	PMD_DRV_LOG(DEBUG, "rx_fragments:	%"PRIu64"", ns->rx_fragments);
5106 	PMD_DRV_LOG(DEBUG, "rx_oversize:	%"PRIu64"", ns->rx_oversize);
5107 	PMD_DRV_LOG(DEBUG, "rx_jabber:		%"PRIu64"", ns->rx_jabber);
5108 	PMD_DRV_LOG(DEBUG, "tx_size_64:		%"PRIu64"", ns->tx_size_64);
5109 	PMD_DRV_LOG(DEBUG, "tx_size_127:	%"PRIu64"", ns->tx_size_127);
5110 	PMD_DRV_LOG(DEBUG, "tx_size_255:	%"PRIu64"", ns->tx_size_255);
5111 	PMD_DRV_LOG(DEBUG, "tx_size_511:	%"PRIu64"", ns->tx_size_511);
5112 	PMD_DRV_LOG(DEBUG, "tx_size_1023:	%"PRIu64"", ns->tx_size_1023);
5113 	PMD_DRV_LOG(DEBUG, "tx_size_1522:	%"PRIu64"", ns->tx_size_1522);
5114 	PMD_DRV_LOG(DEBUG, "tx_size_big:	%"PRIu64"", ns->tx_size_big);
5115 	PMD_DRV_LOG(DEBUG, "rx_len_errors:	%"PRIu64"", ns->rx_len_errors);
5116 	PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5117 	return 0;
5118 }
5119 
5120 /* Reset the statistics */
5121 static int
5122 ice_stats_reset(struct rte_eth_dev *dev)
5123 {
5124 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5125 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5126 
5127 	/* Mark PF and VSI stats to update the offset, aka "reset" */
5128 	pf->offset_loaded = false;
5129 	if (pf->main_vsi)
5130 		pf->main_vsi->offset_loaded = false;
5131 
5132 	/* read the stats, reading current register values into offset */
5133 	ice_read_stats_registers(pf, hw);
5134 
5135 	return 0;
5136 }
5137 
5138 static uint32_t
5139 ice_xstats_calc_num(void)
5140 {
5141 	uint32_t num;
5142 
5143 	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5144 
5145 	return num;
5146 }
5147 
5148 static int
5149 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5150 	       unsigned int n)
5151 {
5152 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5153 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5154 	unsigned int i;
5155 	unsigned int count;
5156 	struct ice_hw_port_stats *hw_stats = &pf->stats;
5157 
5158 	count = ice_xstats_calc_num();
5159 	if (n < count)
5160 		return count;
5161 
5162 	ice_read_stats_registers(pf, hw);
5163 
5164 	if (!xstats)
5165 		return 0;
5166 
5167 	count = 0;
5168 
5169 	/* Get stats from ice_eth_stats struct */
5170 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5171 		xstats[count].value =
5172 			*(uint64_t *)((char *)&hw_stats->eth +
5173 				      ice_stats_strings[i].offset);
5174 		xstats[count].id = count;
5175 		count++;
5176 	}
5177 
5178 	/* Get individiual stats from ice_hw_port struct */
5179 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5180 		xstats[count].value =
5181 			*(uint64_t *)((char *)hw_stats +
5182 				      ice_hw_port_strings[i].offset);
5183 		xstats[count].id = count;
5184 		count++;
5185 	}
5186 
5187 	return count;
5188 }
5189 
5190 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5191 				struct rte_eth_xstat_name *xstats_names,
5192 				__rte_unused unsigned int limit)
5193 {
5194 	unsigned int count = 0;
5195 	unsigned int i;
5196 
5197 	if (!xstats_names)
5198 		return ice_xstats_calc_num();
5199 
5200 	/* Note: limit checked in rte_eth_xstats_names() */
5201 
5202 	/* Get stats from ice_eth_stats struct */
5203 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5204 		strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5205 			sizeof(xstats_names[count].name));
5206 		count++;
5207 	}
5208 
5209 	/* Get individiual stats from ice_hw_port struct */
5210 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5211 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5212 			sizeof(xstats_names[count].name));
5213 		count++;
5214 	}
5215 
5216 	return count;
5217 }
5218 
5219 static int
5220 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5221 		     const struct rte_flow_ops **ops)
5222 {
5223 	if (!dev)
5224 		return -EINVAL;
5225 
5226 	*ops = &ice_flow_ops;
5227 	return 0;
5228 }
5229 
5230 /* Add UDP tunneling port */
5231 static int
5232 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5233 			     struct rte_eth_udp_tunnel *udp_tunnel)
5234 {
5235 	int ret = 0;
5236 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5237 
5238 	if (udp_tunnel == NULL)
5239 		return -EINVAL;
5240 
5241 	switch (udp_tunnel->prot_type) {
5242 	case RTE_TUNNEL_TYPE_VXLAN:
5243 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5244 		break;
5245 	default:
5246 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5247 		ret = -EINVAL;
5248 		break;
5249 	}
5250 
5251 	return ret;
5252 }
5253 
5254 /* Delete UDP tunneling port */
5255 static int
5256 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5257 			     struct rte_eth_udp_tunnel *udp_tunnel)
5258 {
5259 	int ret = 0;
5260 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5261 
5262 	if (udp_tunnel == NULL)
5263 		return -EINVAL;
5264 
5265 	switch (udp_tunnel->prot_type) {
5266 	case RTE_TUNNEL_TYPE_VXLAN:
5267 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5268 		break;
5269 	default:
5270 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5271 		ret = -EINVAL;
5272 		break;
5273 	}
5274 
5275 	return ret;
5276 }
5277 
5278 static int
5279 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5280 	      struct rte_pci_device *pci_dev)
5281 {
5282 	return rte_eth_dev_pci_generic_probe(pci_dev,
5283 					     sizeof(struct ice_adapter),
5284 					     ice_dev_init);
5285 }
5286 
5287 static int
5288 ice_pci_remove(struct rte_pci_device *pci_dev)
5289 {
5290 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5291 }
5292 
5293 static struct rte_pci_driver rte_ice_pmd = {
5294 	.id_table = pci_id_ice_map,
5295 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5296 	.probe = ice_pci_probe,
5297 	.remove = ice_pci_remove,
5298 };
5299 
5300 /**
5301  * Driver initialization routine.
5302  * Invoked once at EAL init time.
5303  * Register itself as the [Poll Mode] Driver of PCI devices.
5304  */
5305 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5306 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5307 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5308 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5309 			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5310 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5311 			      ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>");
5312 
5313 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5314 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5315 #ifdef RTE_ETHDEV_DEBUG_RX
5316 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5317 #endif
5318 #ifdef RTE_ETHDEV_DEBUG_TX
5319 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);
5320 #endif
5321