xref: /dpdk/drivers/net/ice/ice_ethdev.c (revision 49c19c94)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 
13 #include <rte_tailq.h>
14 
15 #include "eal_firmware.h"
16 
17 #include "base/ice_sched.h"
18 #include "base/ice_flow.h"
19 #include "base/ice_dcb.h"
20 #include "base/ice_common.h"
21 #include "base/ice_ptp_hw.h"
22 
23 #include "rte_pmd_ice.h"
24 #include "ice_ethdev.h"
25 #include "ice_rxtx.h"
26 #include "ice_generic_flow.h"
27 
28 /* devargs */
29 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
30 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
31 #define ICE_PROTO_XTR_ARG         "proto_xtr"
32 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
33 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
34 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
35 
36 #define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
37 
38 uint64_t ice_timestamp_dynflag;
39 int ice_timestamp_dynfield_offset = -1;
40 
41 static const char * const ice_valid_args[] = {
42 	ICE_SAFE_MODE_SUPPORT_ARG,
43 	ICE_PIPELINE_MODE_SUPPORT_ARG,
44 	ICE_PROTO_XTR_ARG,
45 	ICE_HW_DEBUG_MASK_ARG,
46 	ICE_ONE_PPS_OUT_ARG,
47 	ICE_RX_LOW_LATENCY_ARG,
48 	NULL
49 };
50 
51 #define PPS_OUT_DELAY_NS  1
52 
53 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
54 	.name = "intel_pmd_dynfield_proto_xtr_metadata",
55 	.size = sizeof(uint32_t),
56 	.align = __alignof__(uint32_t),
57 	.flags = 0,
58 };
59 
60 struct proto_xtr_ol_flag {
61 	const struct rte_mbuf_dynflag param;
62 	uint64_t *ol_flag;
63 	bool required;
64 };
65 
66 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
67 
68 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
69 	[PROTO_XTR_VLAN] = {
70 		.param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
71 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
72 	[PROTO_XTR_IPV4] = {
73 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
74 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
75 	[PROTO_XTR_IPV6] = {
76 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
77 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
78 	[PROTO_XTR_IPV6_FLOW] = {
79 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
80 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
81 	[PROTO_XTR_TCP] = {
82 		.param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
83 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
84 	[PROTO_XTR_IP_OFFSET] = {
85 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
86 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
87 };
88 
89 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
90 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
91 #define ICE_MAX_RES_DESC_NUM        1024
92 
93 static int ice_dev_configure(struct rte_eth_dev *dev);
94 static int ice_dev_start(struct rte_eth_dev *dev);
95 static int ice_dev_stop(struct rte_eth_dev *dev);
96 static int ice_dev_close(struct rte_eth_dev *dev);
97 static int ice_dev_reset(struct rte_eth_dev *dev);
98 static int ice_dev_info_get(struct rte_eth_dev *dev,
99 			    struct rte_eth_dev_info *dev_info);
100 static int ice_link_update(struct rte_eth_dev *dev,
101 			   int wait_to_complete);
102 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
103 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
104 
105 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
106 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
107 static int ice_rss_reta_update(struct rte_eth_dev *dev,
108 			       struct rte_eth_rss_reta_entry64 *reta_conf,
109 			       uint16_t reta_size);
110 static int ice_rss_reta_query(struct rte_eth_dev *dev,
111 			      struct rte_eth_rss_reta_entry64 *reta_conf,
112 			      uint16_t reta_size);
113 static int ice_rss_hash_update(struct rte_eth_dev *dev,
114 			       struct rte_eth_rss_conf *rss_conf);
115 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				 struct rte_eth_rss_conf *rss_conf);
117 static int ice_promisc_enable(struct rte_eth_dev *dev);
118 static int ice_promisc_disable(struct rte_eth_dev *dev);
119 static int ice_allmulti_enable(struct rte_eth_dev *dev);
120 static int ice_allmulti_disable(struct rte_eth_dev *dev);
121 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
122 			       uint16_t vlan_id,
123 			       int on);
124 static int ice_macaddr_set(struct rte_eth_dev *dev,
125 			   struct rte_ether_addr *mac_addr);
126 static int ice_macaddr_add(struct rte_eth_dev *dev,
127 			   struct rte_ether_addr *mac_addr,
128 			   __rte_unused uint32_t index,
129 			   uint32_t pool);
130 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
131 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
132 				    uint16_t queue_id);
133 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
134 				     uint16_t queue_id);
135 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
136 			      size_t fw_size);
137 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
138 			     uint16_t pvid, int on);
139 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
140 static int ice_get_eeprom(struct rte_eth_dev *dev,
141 			  struct rte_dev_eeprom_info *eeprom);
142 static int ice_stats_get(struct rte_eth_dev *dev,
143 			 struct rte_eth_stats *stats);
144 static int ice_stats_reset(struct rte_eth_dev *dev);
145 static int ice_xstats_get(struct rte_eth_dev *dev,
146 			  struct rte_eth_xstat *xstats, unsigned int n);
147 static int ice_xstats_get_names(struct rte_eth_dev *dev,
148 				struct rte_eth_xstat_name *xstats_names,
149 				unsigned int limit);
150 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
151 				const struct rte_flow_ops **ops);
152 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
153 			struct rte_eth_udp_tunnel *udp_tunnel);
154 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
155 			struct rte_eth_udp_tunnel *udp_tunnel);
156 static int ice_timesync_enable(struct rte_eth_dev *dev);
157 static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
158 					  struct timespec *timestamp,
159 					  uint32_t flags);
160 static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
161 					  struct timespec *timestamp);
162 static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
163 static int ice_timesync_read_time(struct rte_eth_dev *dev,
164 				  struct timespec *timestamp);
165 static int ice_timesync_write_time(struct rte_eth_dev *dev,
166 				   const struct timespec *timestamp);
167 static int ice_timesync_disable(struct rte_eth_dev *dev);
168 
169 static const struct rte_pci_id pci_id_ice_map[] = {
170 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
171 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
172 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
173 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
174 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
175 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
176 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
177 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
178 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
179 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
180 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
181 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
182 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
183 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
184 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
185 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
186 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
187 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
188 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
189 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
190 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
191 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
192 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
193 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
194 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
195 	{ .vendor_id = 0, /* sentinel */ },
196 };
197 
198 static const struct eth_dev_ops ice_eth_dev_ops = {
199 	.dev_configure                = ice_dev_configure,
200 	.dev_start                    = ice_dev_start,
201 	.dev_stop                     = ice_dev_stop,
202 	.dev_close                    = ice_dev_close,
203 	.dev_reset                    = ice_dev_reset,
204 	.dev_set_link_up              = ice_dev_set_link_up,
205 	.dev_set_link_down            = ice_dev_set_link_down,
206 	.rx_queue_start               = ice_rx_queue_start,
207 	.rx_queue_stop                = ice_rx_queue_stop,
208 	.tx_queue_start               = ice_tx_queue_start,
209 	.tx_queue_stop                = ice_tx_queue_stop,
210 	.rx_queue_setup               = ice_rx_queue_setup,
211 	.rx_queue_release             = ice_dev_rx_queue_release,
212 	.tx_queue_setup               = ice_tx_queue_setup,
213 	.tx_queue_release             = ice_dev_tx_queue_release,
214 	.dev_infos_get                = ice_dev_info_get,
215 	.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
216 	.link_update                  = ice_link_update,
217 	.mtu_set                      = ice_mtu_set,
218 	.mac_addr_set                 = ice_macaddr_set,
219 	.mac_addr_add                 = ice_macaddr_add,
220 	.mac_addr_remove              = ice_macaddr_remove,
221 	.vlan_filter_set              = ice_vlan_filter_set,
222 	.vlan_offload_set             = ice_vlan_offload_set,
223 	.reta_update                  = ice_rss_reta_update,
224 	.reta_query                   = ice_rss_reta_query,
225 	.rss_hash_update              = ice_rss_hash_update,
226 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
227 	.promiscuous_enable           = ice_promisc_enable,
228 	.promiscuous_disable          = ice_promisc_disable,
229 	.allmulticast_enable          = ice_allmulti_enable,
230 	.allmulticast_disable         = ice_allmulti_disable,
231 	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
232 	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
233 	.fw_version_get               = ice_fw_version_get,
234 	.vlan_pvid_set                = ice_vlan_pvid_set,
235 	.rxq_info_get                 = ice_rxq_info_get,
236 	.txq_info_get                 = ice_txq_info_get,
237 	.rx_burst_mode_get            = ice_rx_burst_mode_get,
238 	.tx_burst_mode_get            = ice_tx_burst_mode_get,
239 	.get_eeprom_length            = ice_get_eeprom_length,
240 	.get_eeprom                   = ice_get_eeprom,
241 	.stats_get                    = ice_stats_get,
242 	.stats_reset                  = ice_stats_reset,
243 	.xstats_get                   = ice_xstats_get,
244 	.xstats_get_names             = ice_xstats_get_names,
245 	.xstats_reset                 = ice_stats_reset,
246 	.flow_ops_get                 = ice_dev_flow_ops_get,
247 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
248 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
249 	.tx_done_cleanup              = ice_tx_done_cleanup,
250 	.get_monitor_addr             = ice_get_monitor_addr,
251 	.timesync_enable              = ice_timesync_enable,
252 	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
253 	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
254 	.timesync_adjust_time         = ice_timesync_adjust_time,
255 	.timesync_read_time           = ice_timesync_read_time,
256 	.timesync_write_time          = ice_timesync_write_time,
257 	.timesync_disable             = ice_timesync_disable,
258 };
259 
260 /* store statistics names and its offset in stats structure */
261 struct ice_xstats_name_off {
262 	char name[RTE_ETH_XSTATS_NAME_SIZE];
263 	unsigned int offset;
264 };
265 
266 static const struct ice_xstats_name_off ice_stats_strings[] = {
267 	{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
268 	{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
269 	{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
270 	{"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
271 	{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
272 		rx_unknown_protocol)},
273 	{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
274 	{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
275 	{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
276 	{"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
277 };
278 
279 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
280 		sizeof(ice_stats_strings[0]))
281 
282 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
283 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
284 		tx_dropped_link_down)},
285 	{"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
286 	{"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
287 		illegal_bytes)},
288 	{"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
289 	{"mac_local_errors", offsetof(struct ice_hw_port_stats,
290 		mac_local_faults)},
291 	{"mac_remote_errors", offsetof(struct ice_hw_port_stats,
292 		mac_remote_faults)},
293 	{"rx_len_errors", offsetof(struct ice_hw_port_stats,
294 		rx_len_errors)},
295 	{"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
296 	{"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
297 	{"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
298 	{"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
299 	{"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
300 	{"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
301 		rx_size_127)},
302 	{"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
303 		rx_size_255)},
304 	{"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
305 		rx_size_511)},
306 	{"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
307 		rx_size_1023)},
308 	{"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
309 		rx_size_1522)},
310 	{"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
311 		rx_size_big)},
312 	{"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
313 		rx_undersize)},
314 	{"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
315 		rx_oversize)},
316 	{"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
317 		mac_short_pkt_dropped)},
318 	{"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
319 		rx_fragments)},
320 	{"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
321 	{"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
322 	{"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
323 		tx_size_127)},
324 	{"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
325 		tx_size_255)},
326 	{"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
327 		tx_size_511)},
328 	{"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
329 		tx_size_1023)},
330 	{"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
331 		tx_size_1522)},
332 	{"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
333 		tx_size_big)},
334 };
335 
336 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
337 		sizeof(ice_hw_port_strings[0]))
338 
339 static void
340 ice_init_controlq_parameter(struct ice_hw *hw)
341 {
342 	/* fields for adminq */
343 	hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
344 	hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
345 	hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
346 	hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
347 
348 	/* fields for mailboxq, DPDK used as PF host */
349 	hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
350 	hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
351 	hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
352 	hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
353 }
354 
355 static int
356 lookup_proto_xtr_type(const char *xtr_name)
357 {
358 	static struct {
359 		const char *name;
360 		enum proto_xtr_type type;
361 	} xtr_type_map[] = {
362 		{ "vlan",      PROTO_XTR_VLAN      },
363 		{ "ipv4",      PROTO_XTR_IPV4      },
364 		{ "ipv6",      PROTO_XTR_IPV6      },
365 		{ "ipv6_flow", PROTO_XTR_IPV6_FLOW },
366 		{ "tcp",       PROTO_XTR_TCP       },
367 		{ "ip_offset", PROTO_XTR_IP_OFFSET },
368 	};
369 	uint32_t i;
370 
371 	for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
372 		if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
373 			return xtr_type_map[i].type;
374 	}
375 
376 	return -1;
377 }
378 
379 /*
380  * Parse elem, the elem could be single number/range or '(' ')' group
381  * 1) A single number elem, it's just a simple digit. e.g. 9
382  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
383  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
384  *    Within group elem, '-' used for a range separator;
385  *                       ',' used for a single number.
386  */
387 static int
388 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
389 {
390 	const char *str = input;
391 	char *end = NULL;
392 	uint32_t min, max;
393 	uint32_t idx;
394 
395 	while (isblank(*str))
396 		str++;
397 
398 	if (!isdigit(*str) && *str != '(')
399 		return -1;
400 
401 	/* process single number or single range of number */
402 	if (*str != '(') {
403 		errno = 0;
404 		idx = strtoul(str, &end, 10);
405 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
406 			return -1;
407 
408 		while (isblank(*end))
409 			end++;
410 
411 		min = idx;
412 		max = idx;
413 
414 		/* process single <number>-<number> */
415 		if (*end == '-') {
416 			end++;
417 			while (isblank(*end))
418 				end++;
419 			if (!isdigit(*end))
420 				return -1;
421 
422 			errno = 0;
423 			idx = strtoul(end, &end, 10);
424 			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
425 				return -1;
426 
427 			max = idx;
428 			while (isblank(*end))
429 				end++;
430 		}
431 
432 		if (*end != ':')
433 			return -1;
434 
435 		for (idx = RTE_MIN(min, max);
436 		     idx <= RTE_MAX(min, max); idx++)
437 			devargs->proto_xtr[idx] = xtr_type;
438 
439 		return 0;
440 	}
441 
442 	/* process set within bracket */
443 	str++;
444 	while (isblank(*str))
445 		str++;
446 	if (*str == '\0')
447 		return -1;
448 
449 	min = ICE_MAX_QUEUE_NUM;
450 	do {
451 		/* go ahead to the first digit */
452 		while (isblank(*str))
453 			str++;
454 		if (!isdigit(*str))
455 			return -1;
456 
457 		/* get the digit value */
458 		errno = 0;
459 		idx = strtoul(str, &end, 10);
460 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
461 			return -1;
462 
463 		/* go ahead to separator '-',',' and ')' */
464 		while (isblank(*end))
465 			end++;
466 		if (*end == '-') {
467 			if (min == ICE_MAX_QUEUE_NUM)
468 				min = idx;
469 			else /* avoid continuous '-' */
470 				return -1;
471 		} else if (*end == ',' || *end == ')') {
472 			max = idx;
473 			if (min == ICE_MAX_QUEUE_NUM)
474 				min = idx;
475 
476 			for (idx = RTE_MIN(min, max);
477 			     idx <= RTE_MAX(min, max); idx++)
478 				devargs->proto_xtr[idx] = xtr_type;
479 
480 			min = ICE_MAX_QUEUE_NUM;
481 		} else {
482 			return -1;
483 		}
484 
485 		str = end + 1;
486 	} while (*end != ')' && *end != '\0');
487 
488 	return 0;
489 }
490 
491 static int
492 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
493 {
494 	const char *queue_start;
495 	uint32_t idx;
496 	int xtr_type;
497 	char xtr_name[32];
498 
499 	while (isblank(*queues))
500 		queues++;
501 
502 	if (*queues != '[') {
503 		xtr_type = lookup_proto_xtr_type(queues);
504 		if (xtr_type < 0)
505 			return -1;
506 
507 		devargs->proto_xtr_dflt = xtr_type;
508 
509 		return 0;
510 	}
511 
512 	queues++;
513 	do {
514 		while (isblank(*queues))
515 			queues++;
516 		if (*queues == '\0')
517 			return -1;
518 
519 		queue_start = queues;
520 
521 		/* go across a complete bracket */
522 		if (*queue_start == '(') {
523 			queues += strcspn(queues, ")");
524 			if (*queues != ')')
525 				return -1;
526 		}
527 
528 		/* scan the separator ':' */
529 		queues += strcspn(queues, ":");
530 		if (*queues++ != ':')
531 			return -1;
532 		while (isblank(*queues))
533 			queues++;
534 
535 		for (idx = 0; ; idx++) {
536 			if (isblank(queues[idx]) ||
537 			    queues[idx] == ',' ||
538 			    queues[idx] == ']' ||
539 			    queues[idx] == '\0')
540 				break;
541 
542 			if (idx > sizeof(xtr_name) - 2)
543 				return -1;
544 
545 			xtr_name[idx] = queues[idx];
546 		}
547 		xtr_name[idx] = '\0';
548 		xtr_type = lookup_proto_xtr_type(xtr_name);
549 		if (xtr_type < 0)
550 			return -1;
551 
552 		queues += idx;
553 
554 		while (isblank(*queues) || *queues == ',' || *queues == ']')
555 			queues++;
556 
557 		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
558 			return -1;
559 	} while (*queues != '\0');
560 
561 	return 0;
562 }
563 
564 static int
565 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
566 		     void *extra_args)
567 {
568 	struct ice_devargs *devargs = extra_args;
569 
570 	if (value == NULL || extra_args == NULL)
571 		return -EINVAL;
572 
573 	if (parse_queue_proto_xtr(value, devargs) < 0) {
574 		PMD_DRV_LOG(ERR,
575 			    "The protocol extraction parameter is wrong : '%s'",
576 			    value);
577 		return -1;
578 	}
579 
580 	return 0;
581 }
582 
583 static void
584 ice_check_proto_xtr_support(struct ice_hw *hw)
585 {
586 #define FLX_REG(val, fld, idx) \
587 	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
588 	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
589 	static struct {
590 		uint32_t rxdid;
591 		uint8_t opcode;
592 		uint8_t protid_0;
593 		uint8_t protid_1;
594 	} xtr_sets[] = {
595 		[PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
596 				     ICE_RX_OPC_EXTRACT,
597 				     ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
598 		[PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
599 				     ICE_RX_OPC_EXTRACT,
600 				     ICE_PROT_IPV4_OF_OR_S,
601 				     ICE_PROT_IPV4_OF_OR_S },
602 		[PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
603 				     ICE_RX_OPC_EXTRACT,
604 				     ICE_PROT_IPV6_OF_OR_S,
605 				     ICE_PROT_IPV6_OF_OR_S },
606 		[PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
607 					  ICE_RX_OPC_EXTRACT,
608 					  ICE_PROT_IPV6_OF_OR_S,
609 					  ICE_PROT_IPV6_OF_OR_S },
610 		[PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
611 				    ICE_RX_OPC_EXTRACT,
612 				    ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
613 		[PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
614 					  ICE_RX_OPC_PROTID,
615 					  ICE_PROT_IPV4_OF_OR_S,
616 					  ICE_PROT_IPV6_OF_OR_S },
617 	};
618 	uint32_t i;
619 
620 	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
621 		uint32_t rxdid = xtr_sets[i].rxdid;
622 		uint32_t v;
623 
624 		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
625 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
626 
627 			if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
628 			    FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
629 				ice_proto_xtr_hw_support[i] = true;
630 		}
631 
632 		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
633 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
634 
635 			if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
636 			    FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
637 				ice_proto_xtr_hw_support[i] = true;
638 		}
639 	}
640 }
641 
642 static int
643 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
644 		  uint32_t num)
645 {
646 	struct pool_entry *entry;
647 
648 	if (!pool || !num)
649 		return -EINVAL;
650 
651 	entry = rte_zmalloc(NULL, sizeof(*entry), 0);
652 	if (!entry) {
653 		PMD_INIT_LOG(ERR,
654 			     "Failed to allocate memory for resource pool");
655 		return -ENOMEM;
656 	}
657 
658 	/* queue heap initialize */
659 	pool->num_free = num;
660 	pool->num_alloc = 0;
661 	pool->base = base;
662 	LIST_INIT(&pool->alloc_list);
663 	LIST_INIT(&pool->free_list);
664 
665 	/* Initialize element  */
666 	entry->base = 0;
667 	entry->len = num;
668 
669 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
670 	return 0;
671 }
672 
673 static int
674 ice_res_pool_alloc(struct ice_res_pool_info *pool,
675 		   uint16_t num)
676 {
677 	struct pool_entry *entry, *valid_entry;
678 
679 	if (!pool || !num) {
680 		PMD_INIT_LOG(ERR, "Invalid parameter");
681 		return -EINVAL;
682 	}
683 
684 	if (pool->num_free < num) {
685 		PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
686 			     num, pool->num_free);
687 		return -ENOMEM;
688 	}
689 
690 	valid_entry = NULL;
691 	/* Lookup  in free list and find most fit one */
692 	LIST_FOREACH(entry, &pool->free_list, next) {
693 		if (entry->len >= num) {
694 			/* Find best one */
695 			if (entry->len == num) {
696 				valid_entry = entry;
697 				break;
698 			}
699 			if (!valid_entry ||
700 			    valid_entry->len > entry->len)
701 				valid_entry = entry;
702 		}
703 	}
704 
705 	/* Not find one to satisfy the request, return */
706 	if (!valid_entry) {
707 		PMD_INIT_LOG(ERR, "No valid entry found");
708 		return -ENOMEM;
709 	}
710 	/**
711 	 * The entry have equal queue number as requested,
712 	 * remove it from alloc_list.
713 	 */
714 	if (valid_entry->len == num) {
715 		LIST_REMOVE(valid_entry, next);
716 	} else {
717 		/**
718 		 * The entry have more numbers than requested,
719 		 * create a new entry for alloc_list and minus its
720 		 * queue base and number in free_list.
721 		 */
722 		entry = rte_zmalloc(NULL, sizeof(*entry), 0);
723 		if (!entry) {
724 			PMD_INIT_LOG(ERR,
725 				     "Failed to allocate memory for "
726 				     "resource pool");
727 			return -ENOMEM;
728 		}
729 		entry->base = valid_entry->base;
730 		entry->len = num;
731 		valid_entry->base += num;
732 		valid_entry->len -= num;
733 		valid_entry = entry;
734 	}
735 
736 	/* Insert it into alloc list, not sorted */
737 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
738 
739 	pool->num_free -= valid_entry->len;
740 	pool->num_alloc += valid_entry->len;
741 
742 	return valid_entry->base + pool->base;
743 }
744 
745 static void
746 ice_res_pool_destroy(struct ice_res_pool_info *pool)
747 {
748 	struct pool_entry *entry, *next_entry;
749 
750 	if (!pool)
751 		return;
752 
753 	for (entry = LIST_FIRST(&pool->alloc_list);
754 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
755 	     entry = next_entry) {
756 		LIST_REMOVE(entry, next);
757 		rte_free(entry);
758 	}
759 
760 	for (entry = LIST_FIRST(&pool->free_list);
761 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
762 	     entry = next_entry) {
763 		LIST_REMOVE(entry, next);
764 		rte_free(entry);
765 	}
766 
767 	pool->num_free = 0;
768 	pool->num_alloc = 0;
769 	pool->base = 0;
770 	LIST_INIT(&pool->alloc_list);
771 	LIST_INIT(&pool->free_list);
772 }
773 
774 static void
775 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
776 {
777 	/* Set VSI LUT selection */
778 	info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
779 			  ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
780 	/* Set Hash scheme */
781 	info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
782 			   ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
783 	/* enable TC */
784 	info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
785 }
786 
787 static enum ice_status
788 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
789 				struct ice_aqc_vsi_props *info,
790 				uint8_t enabled_tcmap)
791 {
792 	uint16_t bsf, qp_idx;
793 
794 	/* default tc 0 now. Multi-TC supporting need to be done later.
795 	 * Configure TC and queue mapping parameters, for enabled TC,
796 	 * allocate qpnum_per_tc queues to this traffic.
797 	 */
798 	if (enabled_tcmap != 0x01) {
799 		PMD_INIT_LOG(ERR, "only TC0 is supported");
800 		return -ENOTSUP;
801 	}
802 
803 	vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
804 	bsf = rte_bsf32(vsi->nb_qps);
805 	/* Adjust the queue number to actual queues that can be applied */
806 	vsi->nb_qps = 0x1 << bsf;
807 
808 	qp_idx = 0;
809 	/* Set tc and queue mapping with VSI */
810 	info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
811 						ICE_AQ_VSI_TC_Q_OFFSET_S) |
812 					       (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
813 
814 	/* Associate queue number with VSI */
815 	info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
816 	info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
817 	info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
818 	info->valid_sections |=
819 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
820 	/* Set the info.ingress_table and info.egress_table
821 	 * for UP translate table. Now just set it to 1:1 map by default
822 	 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
823 	 */
824 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
825 	info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
826 	info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
827 	info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
828 	return 0;
829 }
830 
831 static int
832 ice_init_mac_address(struct rte_eth_dev *dev)
833 {
834 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
835 
836 	if (!rte_is_unicast_ether_addr
837 		((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
838 		PMD_INIT_LOG(ERR, "Invalid MAC address");
839 		return -EINVAL;
840 	}
841 
842 	rte_ether_addr_copy(
843 		(struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
844 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
845 
846 	dev->data->mac_addrs =
847 		rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
848 	if (!dev->data->mac_addrs) {
849 		PMD_INIT_LOG(ERR,
850 			     "Failed to allocate memory to store mac address");
851 		return -ENOMEM;
852 	}
853 	/* store it to dev data */
854 	rte_ether_addr_copy(
855 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
856 		&dev->data->mac_addrs[0]);
857 	return 0;
858 }
859 
860 /* Find out specific MAC filter */
861 static struct ice_mac_filter *
862 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
863 {
864 	struct ice_mac_filter *f;
865 
866 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
867 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
868 			return f;
869 	}
870 
871 	return NULL;
872 }
873 
874 static int
875 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
876 {
877 	struct ice_fltr_list_entry *m_list_itr = NULL;
878 	struct ice_mac_filter *f;
879 	struct LIST_HEAD_TYPE list_head;
880 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
881 	int ret = 0;
882 
883 	/* If it's added and configured, return */
884 	f = ice_find_mac_filter(vsi, mac_addr);
885 	if (f) {
886 		PMD_DRV_LOG(INFO, "This MAC filter already exists.");
887 		return 0;
888 	}
889 
890 	INIT_LIST_HEAD(&list_head);
891 
892 	m_list_itr = (struct ice_fltr_list_entry *)
893 		ice_malloc(hw, sizeof(*m_list_itr));
894 	if (!m_list_itr) {
895 		ret = -ENOMEM;
896 		goto DONE;
897 	}
898 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
899 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
900 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
901 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
902 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
903 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
904 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
905 
906 	LIST_ADD(&m_list_itr->list_entry, &list_head);
907 
908 	/* Add the mac */
909 	ret = ice_add_mac(hw, &list_head);
910 	if (ret != ICE_SUCCESS) {
911 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
912 		ret = -EINVAL;
913 		goto DONE;
914 	}
915 	/* Add the mac addr into mac list */
916 	f = rte_zmalloc(NULL, sizeof(*f), 0);
917 	if (!f) {
918 		PMD_DRV_LOG(ERR, "failed to allocate memory");
919 		ret = -ENOMEM;
920 		goto DONE;
921 	}
922 	rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
923 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
924 	vsi->mac_num++;
925 
926 	ret = 0;
927 
928 DONE:
929 	rte_free(m_list_itr);
930 	return ret;
931 }
932 
933 static int
934 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
935 {
936 	struct ice_fltr_list_entry *m_list_itr = NULL;
937 	struct ice_mac_filter *f;
938 	struct LIST_HEAD_TYPE list_head;
939 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
940 	int ret = 0;
941 
942 	/* Can't find it, return an error */
943 	f = ice_find_mac_filter(vsi, mac_addr);
944 	if (!f)
945 		return -EINVAL;
946 
947 	INIT_LIST_HEAD(&list_head);
948 
949 	m_list_itr = (struct ice_fltr_list_entry *)
950 		ice_malloc(hw, sizeof(*m_list_itr));
951 	if (!m_list_itr) {
952 		ret = -ENOMEM;
953 		goto DONE;
954 	}
955 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
956 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
957 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
958 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
959 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
960 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
961 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
962 
963 	LIST_ADD(&m_list_itr->list_entry, &list_head);
964 
965 	/* remove the mac filter */
966 	ret = ice_remove_mac(hw, &list_head);
967 	if (ret != ICE_SUCCESS) {
968 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
969 		ret = -EINVAL;
970 		goto DONE;
971 	}
972 
973 	/* Remove the mac addr from mac list */
974 	TAILQ_REMOVE(&vsi->mac_list, f, next);
975 	rte_free(f);
976 	vsi->mac_num--;
977 
978 	ret = 0;
979 DONE:
980 	rte_free(m_list_itr);
981 	return ret;
982 }
983 
984 /* Find out specific VLAN filter */
985 static struct ice_vlan_filter *
986 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
987 {
988 	struct ice_vlan_filter *f;
989 
990 	TAILQ_FOREACH(f, &vsi->vlan_list, next) {
991 		if (vlan->tpid == f->vlan_info.vlan.tpid &&
992 		    vlan->vid == f->vlan_info.vlan.vid)
993 			return f;
994 	}
995 
996 	return NULL;
997 }
998 
999 static int
1000 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1001 {
1002 	struct ice_fltr_list_entry *v_list_itr = NULL;
1003 	struct ice_vlan_filter *f;
1004 	struct LIST_HEAD_TYPE list_head;
1005 	struct ice_hw *hw;
1006 	int ret = 0;
1007 
1008 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1009 		return -EINVAL;
1010 
1011 	hw = ICE_VSI_TO_HW(vsi);
1012 
1013 	/* If it's added and configured, return. */
1014 	f = ice_find_vlan_filter(vsi, vlan);
1015 	if (f) {
1016 		PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
1017 		return 0;
1018 	}
1019 
1020 	if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
1021 		return 0;
1022 
1023 	INIT_LIST_HEAD(&list_head);
1024 
1025 	v_list_itr = (struct ice_fltr_list_entry *)
1026 		      ice_malloc(hw, sizeof(*v_list_itr));
1027 	if (!v_list_itr) {
1028 		ret = -ENOMEM;
1029 		goto DONE;
1030 	}
1031 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1032 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1033 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1034 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1035 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1036 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1037 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1038 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1039 
1040 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1041 
1042 	/* Add the vlan */
1043 	ret = ice_add_vlan(hw, &list_head);
1044 	if (ret != ICE_SUCCESS) {
1045 		PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1046 		ret = -EINVAL;
1047 		goto DONE;
1048 	}
1049 
1050 	/* Add vlan into vlan list */
1051 	f = rte_zmalloc(NULL, sizeof(*f), 0);
1052 	if (!f) {
1053 		PMD_DRV_LOG(ERR, "failed to allocate memory");
1054 		ret = -ENOMEM;
1055 		goto DONE;
1056 	}
1057 	f->vlan_info.vlan.tpid = vlan->tpid;
1058 	f->vlan_info.vlan.vid = vlan->vid;
1059 	TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1060 	vsi->vlan_num++;
1061 
1062 	ret = 0;
1063 
1064 DONE:
1065 	rte_free(v_list_itr);
1066 	return ret;
1067 }
1068 
1069 static int
1070 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1071 {
1072 	struct ice_fltr_list_entry *v_list_itr = NULL;
1073 	struct ice_vlan_filter *f;
1074 	struct LIST_HEAD_TYPE list_head;
1075 	struct ice_hw *hw;
1076 	int ret = 0;
1077 
1078 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1079 		return -EINVAL;
1080 
1081 	hw = ICE_VSI_TO_HW(vsi);
1082 
1083 	/* Can't find it, return an error */
1084 	f = ice_find_vlan_filter(vsi, vlan);
1085 	if (!f)
1086 		return -EINVAL;
1087 
1088 	INIT_LIST_HEAD(&list_head);
1089 
1090 	v_list_itr = (struct ice_fltr_list_entry *)
1091 		      ice_malloc(hw, sizeof(*v_list_itr));
1092 	if (!v_list_itr) {
1093 		ret = -ENOMEM;
1094 		goto DONE;
1095 	}
1096 
1097 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1098 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1099 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1100 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1101 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1102 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1103 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1104 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1105 
1106 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1107 
1108 	/* remove the vlan filter */
1109 	ret = ice_remove_vlan(hw, &list_head);
1110 	if (ret != ICE_SUCCESS) {
1111 		PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1112 		ret = -EINVAL;
1113 		goto DONE;
1114 	}
1115 
1116 	/* Remove the vlan id from vlan list */
1117 	TAILQ_REMOVE(&vsi->vlan_list, f, next);
1118 	rte_free(f);
1119 	vsi->vlan_num--;
1120 
1121 	ret = 0;
1122 DONE:
1123 	rte_free(v_list_itr);
1124 	return ret;
1125 }
1126 
1127 static int
1128 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1129 {
1130 	struct ice_mac_filter *m_f;
1131 	struct ice_vlan_filter *v_f;
1132 	void *temp;
1133 	int ret = 0;
1134 
1135 	if (!vsi || !vsi->mac_num)
1136 		return -EINVAL;
1137 
1138 	RTE_TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1139 		ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1140 		if (ret != ICE_SUCCESS) {
1141 			ret = -EINVAL;
1142 			goto DONE;
1143 		}
1144 	}
1145 
1146 	if (vsi->vlan_num == 0)
1147 		return 0;
1148 
1149 	RTE_TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1150 		ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1151 		if (ret != ICE_SUCCESS) {
1152 			ret = -EINVAL;
1153 			goto DONE;
1154 		}
1155 	}
1156 
1157 DONE:
1158 	return ret;
1159 }
1160 
1161 /* Enable IRQ0 */
1162 static void
1163 ice_pf_enable_irq0(struct ice_hw *hw)
1164 {
1165 	/* reset the registers */
1166 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1167 	ICE_READ_REG(hw, PFINT_OICR);
1168 
1169 #ifdef ICE_LSE_SPT
1170 	ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1171 		      (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1172 				 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1173 
1174 	ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1175 		      (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1176 		      ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1177 		       PFINT_OICR_CTL_ITR_INDX_M) |
1178 		      PFINT_OICR_CTL_CAUSE_ENA_M);
1179 
1180 	ICE_WRITE_REG(hw, PFINT_FW_CTL,
1181 		      (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1182 		      ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1183 		       PFINT_FW_CTL_ITR_INDX_M) |
1184 		      PFINT_FW_CTL_CAUSE_ENA_M);
1185 #else
1186 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1187 #endif
1188 
1189 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1190 		      GLINT_DYN_CTL_INTENA_M |
1191 		      GLINT_DYN_CTL_CLEARPBA_M |
1192 		      GLINT_DYN_CTL_ITR_INDX_M);
1193 
1194 	ice_flush(hw);
1195 }
1196 
1197 /* Disable IRQ0 */
1198 static void
1199 ice_pf_disable_irq0(struct ice_hw *hw)
1200 {
1201 	/* Disable all interrupt types */
1202 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1203 	ice_flush(hw);
1204 }
1205 
1206 #ifdef ICE_LSE_SPT
1207 static void
1208 ice_handle_aq_msg(struct rte_eth_dev *dev)
1209 {
1210 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1211 	struct ice_ctl_q_info *cq = &hw->adminq;
1212 	struct ice_rq_event_info event;
1213 	uint16_t pending, opcode;
1214 	int ret;
1215 
1216 	event.buf_len = ICE_AQ_MAX_BUF_LEN;
1217 	event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1218 	if (!event.msg_buf) {
1219 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
1220 		return;
1221 	}
1222 
1223 	pending = 1;
1224 	while (pending) {
1225 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1226 
1227 		if (ret != ICE_SUCCESS) {
1228 			PMD_DRV_LOG(INFO,
1229 				    "Failed to read msg from AdminQ, "
1230 				    "adminq_err: %u",
1231 				    hw->adminq.sq_last_status);
1232 			break;
1233 		}
1234 		opcode = rte_le_to_cpu_16(event.desc.opcode);
1235 
1236 		switch (opcode) {
1237 		case ice_aqc_opc_get_link_status:
1238 			ret = ice_link_update(dev, 0);
1239 			if (!ret)
1240 				rte_eth_dev_callback_process
1241 					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1242 			break;
1243 		default:
1244 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1245 				    opcode);
1246 			break;
1247 		}
1248 	}
1249 	rte_free(event.msg_buf);
1250 }
1251 #endif
1252 
1253 /**
1254  * Interrupt handler triggered by NIC for handling
1255  * specific interrupt.
1256  *
1257  * @param handle
1258  *  Pointer to interrupt handle.
1259  * @param param
1260  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1261  *
1262  * @return
1263  *  void
1264  */
1265 static void
1266 ice_interrupt_handler(void *param)
1267 {
1268 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1269 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1270 	uint32_t oicr;
1271 	uint32_t reg;
1272 	uint8_t pf_num;
1273 	uint8_t event;
1274 	uint16_t queue;
1275 	int ret;
1276 #ifdef ICE_LSE_SPT
1277 	uint32_t int_fw_ctl;
1278 #endif
1279 
1280 	/* Disable interrupt */
1281 	ice_pf_disable_irq0(hw);
1282 
1283 	/* read out interrupt causes */
1284 	oicr = ICE_READ_REG(hw, PFINT_OICR);
1285 #ifdef ICE_LSE_SPT
1286 	int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1287 #endif
1288 
1289 	/* No interrupt event indicated */
1290 	if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1291 		PMD_DRV_LOG(INFO, "No interrupt event");
1292 		goto done;
1293 	}
1294 
1295 #ifdef ICE_LSE_SPT
1296 	if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1297 		PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1298 		ice_handle_aq_msg(dev);
1299 	}
1300 #else
1301 	if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1302 		PMD_DRV_LOG(INFO, "OICR: link state change event");
1303 		ret = ice_link_update(dev, 0);
1304 		if (!ret)
1305 			rte_eth_dev_callback_process
1306 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1307 	}
1308 #endif
1309 
1310 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
1311 		PMD_DRV_LOG(WARNING, "OICR: MDD event");
1312 		reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1313 		if (reg & GL_MDET_TX_PQM_VALID_M) {
1314 			pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1315 				 GL_MDET_TX_PQM_PF_NUM_S;
1316 			event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1317 				GL_MDET_TX_PQM_MAL_TYPE_S;
1318 			queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1319 				GL_MDET_TX_PQM_QNUM_S;
1320 
1321 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1322 				    "%d by PQM on TX queue %d PF# %d",
1323 				    event, queue, pf_num);
1324 		}
1325 
1326 		reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1327 		if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1328 			pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1329 				 GL_MDET_TX_TCLAN_PF_NUM_S;
1330 			event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1331 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1332 			queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1333 				GL_MDET_TX_TCLAN_QNUM_S;
1334 
1335 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1336 				    "%d by TCLAN on TX queue %d PF# %d",
1337 				    event, queue, pf_num);
1338 		}
1339 	}
1340 done:
1341 	/* Enable interrupt */
1342 	ice_pf_enable_irq0(hw);
1343 	rte_intr_ack(dev->intr_handle);
1344 }
1345 
1346 static void
1347 ice_init_proto_xtr(struct rte_eth_dev *dev)
1348 {
1349 	struct ice_adapter *ad =
1350 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1351 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1352 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1353 	const struct proto_xtr_ol_flag *ol_flag;
1354 	bool proto_xtr_enable = false;
1355 	int offset;
1356 	uint16_t i;
1357 
1358 	pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1359 	if (unlikely(pf->proto_xtr == NULL)) {
1360 		PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1361 		return;
1362 	}
1363 
1364 	for (i = 0; i < pf->lan_nb_qps; i++) {
1365 		pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1366 				   ad->devargs.proto_xtr[i] :
1367 				   ad->devargs.proto_xtr_dflt;
1368 
1369 		if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1370 			uint8_t type = pf->proto_xtr[i];
1371 
1372 			ice_proto_xtr_ol_flag_params[type].required = true;
1373 			proto_xtr_enable = true;
1374 		}
1375 	}
1376 
1377 	if (likely(!proto_xtr_enable))
1378 		return;
1379 
1380 	ice_check_proto_xtr_support(hw);
1381 
1382 	offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1383 	if (unlikely(offset == -1)) {
1384 		PMD_DRV_LOG(ERR,
1385 			    "Protocol extraction metadata is disabled in mbuf with error %d",
1386 			    -rte_errno);
1387 		return;
1388 	}
1389 
1390 	PMD_DRV_LOG(DEBUG,
1391 		    "Protocol extraction metadata offset in mbuf is : %d",
1392 		    offset);
1393 	rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1394 
1395 	for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1396 		ol_flag = &ice_proto_xtr_ol_flag_params[i];
1397 
1398 		if (!ol_flag->required)
1399 			continue;
1400 
1401 		if (!ice_proto_xtr_hw_support[i]) {
1402 			PMD_DRV_LOG(ERR,
1403 				    "Protocol extraction type %u is not supported in hardware",
1404 				    i);
1405 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1406 			break;
1407 		}
1408 
1409 		offset = rte_mbuf_dynflag_register(&ol_flag->param);
1410 		if (unlikely(offset == -1)) {
1411 			PMD_DRV_LOG(ERR,
1412 				    "Protocol extraction offload '%s' failed to register with error %d",
1413 				    ol_flag->param.name, -rte_errno);
1414 
1415 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1416 			break;
1417 		}
1418 
1419 		PMD_DRV_LOG(DEBUG,
1420 			    "Protocol extraction offload '%s' offset in mbuf is : %d",
1421 			    ol_flag->param.name, offset);
1422 		*ol_flag->ol_flag = 1ULL << offset;
1423 	}
1424 }
1425 
1426 /*  Initialize SW parameters of PF */
1427 static int
1428 ice_pf_sw_init(struct rte_eth_dev *dev)
1429 {
1430 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1431 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1432 
1433 	pf->lan_nb_qp_max =
1434 		(uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1435 				  hw->func_caps.common_cap.num_rxq);
1436 
1437 	pf->lan_nb_qps = pf->lan_nb_qp_max;
1438 
1439 	ice_init_proto_xtr(dev);
1440 
1441 	if (hw->func_caps.fd_fltr_guar > 0 ||
1442 	    hw->func_caps.fd_fltr_best_effort > 0) {
1443 		pf->flags |= ICE_FLAG_FDIR;
1444 		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1445 		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1446 	} else {
1447 		pf->fdir_nb_qps = 0;
1448 	}
1449 	pf->fdir_qp_offset = 0;
1450 
1451 	return 0;
1452 }
1453 
1454 struct ice_vsi *
1455 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1456 {
1457 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1458 	struct ice_vsi *vsi = NULL;
1459 	struct ice_vsi_ctx vsi_ctx;
1460 	int ret;
1461 	struct rte_ether_addr broadcast = {
1462 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1463 	struct rte_ether_addr mac_addr;
1464 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1465 	uint8_t tc_bitmap = 0x1;
1466 	uint16_t cfg;
1467 
1468 	/* hw->num_lports = 1 in NIC mode */
1469 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1470 	if (!vsi)
1471 		return NULL;
1472 
1473 	vsi->idx = pf->next_vsi_idx;
1474 	pf->next_vsi_idx++;
1475 	vsi->type = type;
1476 	vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1477 	vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1478 	vsi->vlan_anti_spoof_on = 0;
1479 	vsi->vlan_filter_on = 1;
1480 	TAILQ_INIT(&vsi->mac_list);
1481 	TAILQ_INIT(&vsi->vlan_list);
1482 
1483 	/* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1484 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1485 			ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1486 			hw->func_caps.common_cap.rss_table_size;
1487 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1488 
1489 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1490 	switch (type) {
1491 	case ICE_VSI_PF:
1492 		vsi->nb_qps = pf->lan_nb_qps;
1493 		vsi->base_queue = 1;
1494 		ice_vsi_config_default_rss(&vsi_ctx.info);
1495 		vsi_ctx.alloc_from_pool = true;
1496 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1497 		/* switch_id is queried by get_switch_config aq, which is done
1498 		 * by ice_init_hw
1499 		 */
1500 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1501 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1502 		/* Allow all untagged or tagged packets */
1503 		vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1504 		vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1505 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1506 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1507 		if (ice_is_dvm_ena(hw)) {
1508 			vsi_ctx.info.outer_vlan_flags =
1509 				(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1510 				 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1511 				ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1512 			vsi_ctx.info.outer_vlan_flags |=
1513 				(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1514 				 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1515 				ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1516 		}
1517 
1518 		/* FDIR */
1519 		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1520 			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1521 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1522 		cfg = ICE_AQ_VSI_FD_ENABLE;
1523 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1524 		vsi_ctx.info.max_fd_fltr_dedicated =
1525 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1526 		vsi_ctx.info.max_fd_fltr_shared =
1527 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1528 
1529 		/* Enable VLAN/UP trip */
1530 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1531 						      &vsi_ctx.info,
1532 						      ICE_DEFAULT_TCMAP);
1533 		if (ret) {
1534 			PMD_INIT_LOG(ERR,
1535 				     "tc queue mapping with vsi failed, "
1536 				     "err = %d",
1537 				     ret);
1538 			goto fail_mem;
1539 		}
1540 
1541 		break;
1542 	case ICE_VSI_CTRL:
1543 		vsi->nb_qps = pf->fdir_nb_qps;
1544 		vsi->base_queue = ICE_FDIR_QUEUE_ID;
1545 		vsi_ctx.alloc_from_pool = true;
1546 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1547 
1548 		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1549 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1550 		cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1551 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1552 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1553 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1554 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1555 						      &vsi_ctx.info,
1556 						      ICE_DEFAULT_TCMAP);
1557 		if (ret) {
1558 			PMD_INIT_LOG(ERR,
1559 				     "tc queue mapping with vsi failed, "
1560 				     "err = %d",
1561 				     ret);
1562 			goto fail_mem;
1563 		}
1564 		break;
1565 	default:
1566 		/* for other types of VSI */
1567 		PMD_INIT_LOG(ERR, "other types of VSI not supported");
1568 		goto fail_mem;
1569 	}
1570 
1571 	/* VF has MSIX interrupt in VF range, don't allocate here */
1572 	if (type == ICE_VSI_PF) {
1573 		ret = ice_res_pool_alloc(&pf->msix_pool,
1574 					 RTE_MIN(vsi->nb_qps,
1575 						 RTE_MAX_RXTX_INTR_VEC_ID));
1576 		if (ret < 0) {
1577 			PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1578 				     vsi->vsi_id, ret);
1579 		}
1580 		vsi->msix_intr = ret;
1581 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1582 	} else if (type == ICE_VSI_CTRL) {
1583 		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1584 		if (ret < 0) {
1585 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1586 				    vsi->vsi_id, ret);
1587 		}
1588 		vsi->msix_intr = ret;
1589 		vsi->nb_msix = 1;
1590 	} else {
1591 		vsi->msix_intr = 0;
1592 		vsi->nb_msix = 0;
1593 	}
1594 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1595 	if (ret != ICE_SUCCESS) {
1596 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1597 		goto fail_mem;
1598 	}
1599 	/* store vsi information is SW structure */
1600 	vsi->vsi_id = vsi_ctx.vsi_num;
1601 	vsi->info = vsi_ctx.info;
1602 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
1603 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1604 
1605 	if (type == ICE_VSI_PF) {
1606 		/* MAC configuration */
1607 		rte_ether_addr_copy((struct rte_ether_addr *)
1608 					hw->port_info->mac.perm_addr,
1609 				    &pf->dev_addr);
1610 
1611 		rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1612 		ret = ice_add_mac_filter(vsi, &mac_addr);
1613 		if (ret != ICE_SUCCESS)
1614 			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1615 
1616 		rte_ether_addr_copy(&broadcast, &mac_addr);
1617 		ret = ice_add_mac_filter(vsi, &mac_addr);
1618 		if (ret != ICE_SUCCESS)
1619 			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1620 	}
1621 
1622 	/* At the beginning, only TC0. */
1623 	/* What we need here is the maximam number of the TX queues.
1624 	 * Currently vsi->nb_qps means it.
1625 	 * Correct it if any change.
1626 	 */
1627 	max_txqs[0] = vsi->nb_qps;
1628 	ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1629 			      tc_bitmap, max_txqs);
1630 	if (ret != ICE_SUCCESS)
1631 		PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1632 
1633 	return vsi;
1634 fail_mem:
1635 	rte_free(vsi);
1636 	pf->next_vsi_idx--;
1637 	return NULL;
1638 }
1639 
1640 static int
1641 ice_send_driver_ver(struct ice_hw *hw)
1642 {
1643 	struct ice_driver_ver dv;
1644 
1645 	/* we don't have driver version use 0 for dummy */
1646 	dv.major_ver = 0;
1647 	dv.minor_ver = 0;
1648 	dv.build_ver = 0;
1649 	dv.subbuild_ver = 0;
1650 	strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1651 
1652 	return ice_aq_send_driver_ver(hw, &dv, NULL);
1653 }
1654 
1655 static int
1656 ice_pf_setup(struct ice_pf *pf)
1657 {
1658 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1659 	struct ice_vsi *vsi;
1660 	uint16_t unused;
1661 
1662 	/* Clear all stats counters */
1663 	pf->offset_loaded = false;
1664 	memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1665 	memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1666 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1667 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1668 
1669 	/* force guaranteed filter pool for PF */
1670 	ice_alloc_fd_guar_item(hw, &unused,
1671 			       hw->func_caps.fd_fltr_guar);
1672 	/* force shared filter pool for PF */
1673 	ice_alloc_fd_shrd_item(hw, &unused,
1674 			       hw->func_caps.fd_fltr_best_effort);
1675 
1676 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1677 	if (!vsi) {
1678 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1679 		return -EINVAL;
1680 	}
1681 
1682 	pf->main_vsi = vsi;
1683 
1684 	return 0;
1685 }
1686 
1687 static enum ice_pkg_type
1688 ice_load_pkg_type(struct ice_hw *hw)
1689 {
1690 	enum ice_pkg_type package_type;
1691 
1692 	/* store the activated package type (OS default or Comms) */
1693 	if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1694 		ICE_PKG_NAME_SIZE))
1695 		package_type = ICE_PKG_TYPE_OS_DEFAULT;
1696 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1697 		ICE_PKG_NAME_SIZE))
1698 		package_type = ICE_PKG_TYPE_COMMS;
1699 	else
1700 		package_type = ICE_PKG_TYPE_UNKNOWN;
1701 
1702 	PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1703 		hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1704 		hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1705 		hw->active_pkg_name,
1706 		ice_is_dvm_ena(hw) ? "double" : "single");
1707 
1708 	return package_type;
1709 }
1710 
1711 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn)
1712 {
1713 	struct ice_hw *hw = &adapter->hw;
1714 	char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1715 	char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1716 	void *buf;
1717 	size_t bufsz;
1718 	int err;
1719 
1720 	if (!use_dsn)
1721 		goto no_dsn;
1722 
1723 	memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1724 	snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1725 		"ice-%016" PRIx64 ".pkg", dsn);
1726 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1727 		ICE_MAX_PKG_FILENAME_SIZE);
1728 	strcat(pkg_file, opt_ddp_filename);
1729 	if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1730 		goto load_fw;
1731 
1732 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1733 		ICE_MAX_PKG_FILENAME_SIZE);
1734 	strcat(pkg_file, opt_ddp_filename);
1735 	if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1736 		goto load_fw;
1737 
1738 no_dsn:
1739 	strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1740 	if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1741 		goto load_fw;
1742 
1743 	strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1744 	if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) {
1745 		PMD_INIT_LOG(ERR, "failed to search file path\n");
1746 		return -1;
1747 	}
1748 
1749 load_fw:
1750 	PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file);
1751 
1752 	err = ice_copy_and_init_pkg(hw, buf, bufsz);
1753 	if (err) {
1754 		PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1755 		goto out;
1756 	}
1757 
1758 	/* store the loaded pkg type info */
1759 	adapter->active_pkg_type = ice_load_pkg_type(hw);
1760 
1761 out:
1762 	free(buf);
1763 	return err;
1764 }
1765 
1766 static void
1767 ice_base_queue_get(struct ice_pf *pf)
1768 {
1769 	uint32_t reg;
1770 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1771 
1772 	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1773 	if (reg & PFLAN_RX_QALLOC_VALID_M) {
1774 		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1775 	} else {
1776 		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1777 					" index");
1778 	}
1779 }
1780 
1781 static int
1782 parse_bool(const char *key, const char *value, void *args)
1783 {
1784 	int *i = (int *)args;
1785 	char *end;
1786 	int num;
1787 
1788 	num = strtoul(value, &end, 10);
1789 
1790 	if (num != 0 && num != 1) {
1791 		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1792 			"value must be 0 or 1",
1793 			value, key);
1794 		return -1;
1795 	}
1796 
1797 	*i = num;
1798 	return 0;
1799 }
1800 
1801 static int
1802 parse_u64(const char *key, const char *value, void *args)
1803 {
1804 	u64 *num = (u64 *)args;
1805 	u64 tmp;
1806 
1807 	errno = 0;
1808 	tmp = strtoull(value, NULL, 16);
1809 	if (errno) {
1810 		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64",
1811 			    key, value);
1812 		return -1;
1813 	}
1814 
1815 	*num = tmp;
1816 
1817 	return 0;
1818 }
1819 
1820 static int
1821 lookup_pps_type(const char *pps_name)
1822 {
1823 	static struct {
1824 		const char *name;
1825 		enum pps_type type;
1826 	} pps_type_map[] = {
1827 		{ "pin",  PPS_PIN  },
1828 	};
1829 
1830 	uint32_t i;
1831 
1832 	for (i = 0; i < RTE_DIM(pps_type_map); i++) {
1833 		if (strcmp(pps_name, pps_type_map[i].name) == 0)
1834 			return pps_type_map[i].type;
1835 	}
1836 
1837 	return -1;
1838 }
1839 
1840 static int
1841 parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs)
1842 {
1843 	const char *str = input;
1844 	char *end = NULL;
1845 	uint32_t idx;
1846 
1847 	while (isblank(*str))
1848 		str++;
1849 
1850 	if (!isdigit(*str))
1851 		return -1;
1852 
1853 	if (pps_type == PPS_PIN) {
1854 		idx = strtoul(str, &end, 10);
1855 		if (end == NULL || idx >= ICE_MAX_PIN_NUM)
1856 			return -1;
1857 
1858 		devargs->pin_idx = idx;
1859 		devargs->pps_out_ena = 1;
1860 	}
1861 
1862 	while (isblank(*end))
1863 		end++;
1864 
1865 	if (*end != ']')
1866 		return -1;
1867 
1868 	return 0;
1869 }
1870 
1871 static int
1872 parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs)
1873 {
1874 	const char *pin_start;
1875 	uint32_t idx;
1876 	int pps_type;
1877 	char pps_name[32];
1878 
1879 	while (isblank(*pins))
1880 		pins++;
1881 
1882 	pins++;
1883 	while (isblank(*pins))
1884 		pins++;
1885 	if (*pins == '\0')
1886 		return -1;
1887 
1888 	for (idx = 0; ; idx++) {
1889 		if (isblank(pins[idx]) ||
1890 		    pins[idx] == ':' ||
1891 		    pins[idx] == '\0')
1892 			break;
1893 
1894 		pps_name[idx] = pins[idx];
1895 	}
1896 	pps_name[idx] = '\0';
1897 	pps_type = lookup_pps_type(pps_name);
1898 	if (pps_type < 0)
1899 		return -1;
1900 
1901 	pins += idx;
1902 
1903 	pins += strcspn(pins, ":");
1904 	if (*pins++ != ':')
1905 		return -1;
1906 	while (isblank(*pins))
1907 		pins++;
1908 
1909 	pin_start = pins;
1910 
1911 	while (isblank(*pins))
1912 		pins++;
1913 
1914 	if (parse_pin_set(pin_start, pps_type, devargs) < 0)
1915 		return -1;
1916 
1917 	return 0;
1918 }
1919 
1920 static int
1921 handle_pps_out_arg(__rte_unused const char *key, const char *value,
1922 		   void *extra_args)
1923 {
1924 	struct ice_devargs *devargs = extra_args;
1925 
1926 	if (value == NULL || extra_args == NULL)
1927 		return -EINVAL;
1928 
1929 	if (parse_pps_out_parameter(value, devargs) < 0) {
1930 		PMD_DRV_LOG(ERR,
1931 			    "The GPIO pin parameter is wrong : '%s'",
1932 			    value);
1933 		return -1;
1934 	}
1935 
1936 	return 0;
1937 }
1938 
1939 static int ice_parse_devargs(struct rte_eth_dev *dev)
1940 {
1941 	struct ice_adapter *ad =
1942 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1943 	struct rte_devargs *devargs = dev->device->devargs;
1944 	struct rte_kvargs *kvlist;
1945 	int ret;
1946 
1947 	if (devargs == NULL)
1948 		return 0;
1949 
1950 	kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1951 	if (kvlist == NULL) {
1952 		PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1953 		return -EINVAL;
1954 	}
1955 
1956 	ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1957 	memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1958 	       sizeof(ad->devargs.proto_xtr));
1959 
1960 	ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1961 				 &handle_proto_xtr_arg, &ad->devargs);
1962 	if (ret)
1963 		goto bail;
1964 
1965 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1966 				 &parse_bool, &ad->devargs.safe_mode_support);
1967 	if (ret)
1968 		goto bail;
1969 
1970 	ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1971 				 &parse_bool, &ad->devargs.pipe_mode_support);
1972 	if (ret)
1973 		goto bail;
1974 
1975 	ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG,
1976 				 &parse_u64, &ad->hw.debug_mask);
1977 	if (ret)
1978 		goto bail;
1979 
1980 	ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG,
1981 				 &handle_pps_out_arg, &ad->devargs);
1982 	if (ret)
1983 		goto bail;
1984 
1985 	ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
1986 				 &parse_bool, &ad->devargs.rx_low_latency);
1987 
1988 bail:
1989 	rte_kvargs_free(kvlist);
1990 	return ret;
1991 }
1992 
1993 /* Forward LLDP packets to default VSI by set switch rules */
1994 static int
1995 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
1996 {
1997 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1998 	struct ice_fltr_list_entry *s_list_itr = NULL;
1999 	struct LIST_HEAD_TYPE list_head;
2000 	int ret = 0;
2001 
2002 	INIT_LIST_HEAD(&list_head);
2003 
2004 	s_list_itr = (struct ice_fltr_list_entry *)
2005 			ice_malloc(hw, sizeof(*s_list_itr));
2006 	if (!s_list_itr)
2007 		return -ENOMEM;
2008 	s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2009 	s_list_itr->fltr_info.vsi_handle = vsi->idx;
2010 	s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2011 			RTE_ETHER_TYPE_LLDP;
2012 	s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2013 	s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2014 	s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2015 	LIST_ADD(&s_list_itr->list_entry, &list_head);
2016 	if (on)
2017 		ret = ice_add_eth_mac(hw, &list_head);
2018 	else
2019 		ret = ice_remove_eth_mac(hw, &list_head);
2020 
2021 	rte_free(s_list_itr);
2022 	return ret;
2023 }
2024 
2025 static enum ice_status
2026 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2027 		uint16_t num, uint16_t desc_id,
2028 		uint16_t *prof_buf, uint16_t *num_prof)
2029 {
2030 	struct ice_aqc_res_elem *resp_buf;
2031 	int ret;
2032 	uint16_t buf_len;
2033 	bool res_shared = 1;
2034 	struct ice_aq_desc aq_desc;
2035 	struct ice_sq_cd *cd = NULL;
2036 	struct ice_aqc_get_allocd_res_desc *cmd =
2037 			&aq_desc.params.get_res_desc;
2038 
2039 	buf_len = sizeof(*resp_buf) * num;
2040 	resp_buf = ice_malloc(hw, buf_len);
2041 	if (!resp_buf)
2042 		return -ENOMEM;
2043 
2044 	ice_fill_dflt_direct_cmd_desc(&aq_desc,
2045 			ice_aqc_opc_get_allocd_res_desc);
2046 
2047 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2048 				ICE_AQC_RES_TYPE_M) | (res_shared ?
2049 				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2050 	cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2051 
2052 	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2053 	if (!ret)
2054 		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2055 	else
2056 		goto exit;
2057 
2058 	ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
2059 			(*num_prof), ICE_NONDMA_TO_NONDMA);
2060 
2061 exit:
2062 	rte_free(resp_buf);
2063 	return ret;
2064 }
2065 static int
2066 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2067 {
2068 	int ret;
2069 	uint16_t prof_id;
2070 	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2071 	uint16_t first_desc = 1;
2072 	uint16_t num_prof = 0;
2073 
2074 	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2075 			first_desc, prof_buf, &num_prof);
2076 	if (ret) {
2077 		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2078 		return ret;
2079 	}
2080 
2081 	for (prof_id = 0; prof_id < num_prof; prof_id++) {
2082 		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2083 		if (ret) {
2084 			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2085 			return ret;
2086 		}
2087 	}
2088 	return 0;
2089 }
2090 
2091 static int
2092 ice_reset_fxp_resource(struct ice_hw *hw)
2093 {
2094 	int ret;
2095 
2096 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2097 	if (ret) {
2098 		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2099 		return ret;
2100 	}
2101 
2102 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2103 	if (ret) {
2104 		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2105 		return ret;
2106 	}
2107 
2108 	return 0;
2109 }
2110 
2111 static void
2112 ice_rss_ctx_init(struct ice_pf *pf)
2113 {
2114 	memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2115 }
2116 
2117 static uint64_t
2118 ice_get_supported_rxdid(struct ice_hw *hw)
2119 {
2120 	uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2121 	uint32_t regval;
2122 	int i;
2123 
2124 	supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2125 
2126 	for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2127 		regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2128 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2129 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2130 			supported_rxdid |= BIT(i);
2131 	}
2132 	return supported_rxdid;
2133 }
2134 
2135 static int
2136 ice_dev_init(struct rte_eth_dev *dev)
2137 {
2138 	struct rte_pci_device *pci_dev;
2139 	struct rte_intr_handle *intr_handle;
2140 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2141 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2142 	struct ice_adapter *ad =
2143 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2144 	struct ice_vsi *vsi;
2145 	int ret;
2146 #ifndef RTE_EXEC_ENV_WINDOWS
2147 	off_t pos;
2148 	uint32_t dsn_low, dsn_high;
2149 	uint64_t dsn;
2150 	bool use_dsn;
2151 #endif
2152 
2153 	dev->dev_ops = &ice_eth_dev_ops;
2154 	dev->rx_queue_count = ice_rx_queue_count;
2155 	dev->rx_descriptor_status = ice_rx_descriptor_status;
2156 	dev->tx_descriptor_status = ice_tx_descriptor_status;
2157 	dev->rx_pkt_burst = ice_recv_pkts;
2158 	dev->tx_pkt_burst = ice_xmit_pkts;
2159 	dev->tx_pkt_prepare = ice_prep_pkts;
2160 
2161 	/* for secondary processes, we don't initialise any further as primary
2162 	 * has already done this work.
2163 	 */
2164 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2165 		ice_set_rx_function(dev);
2166 		ice_set_tx_function(dev);
2167 		return 0;
2168 	}
2169 
2170 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2171 
2172 	ice_set_default_ptype_table(dev);
2173 	pci_dev = RTE_DEV_TO_PCI(dev->device);
2174 	intr_handle = &pci_dev->intr_handle;
2175 
2176 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2177 	pf->dev_data = dev->data;
2178 	hw->back = pf->adapter;
2179 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2180 	hw->vendor_id = pci_dev->id.vendor_id;
2181 	hw->device_id = pci_dev->id.device_id;
2182 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2183 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2184 	hw->bus.device = pci_dev->addr.devid;
2185 	hw->bus.func = pci_dev->addr.function;
2186 
2187 	ret = ice_parse_devargs(dev);
2188 	if (ret) {
2189 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
2190 		return -EINVAL;
2191 	}
2192 
2193 	ice_init_controlq_parameter(hw);
2194 
2195 	ret = ice_init_hw(hw);
2196 	if (ret) {
2197 		PMD_INIT_LOG(ERR, "Failed to initialize HW");
2198 		return -EINVAL;
2199 	}
2200 
2201 #ifndef RTE_EXEC_ENV_WINDOWS
2202 	use_dsn = false;
2203 	dsn = 0;
2204 	pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
2205 	if (pos) {
2206 		if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 ||
2207 				rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
2208 			PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
2209 		} else {
2210 			use_dsn = true;
2211 			dsn = (uint64_t)dsn_high << 32 | dsn_low;
2212 		}
2213 	} else {
2214 		PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
2215 	}
2216 
2217 	ret = ice_load_pkg(pf->adapter, use_dsn, dsn);
2218 	if (ret == 0) {
2219 		ret = ice_init_hw_tbls(hw);
2220 		if (ret) {
2221 			PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret);
2222 			rte_free(hw->pkg_copy);
2223 		}
2224 	}
2225 
2226 	if (ret) {
2227 		if (ad->devargs.safe_mode_support == 0) {
2228 			PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2229 					"Use safe-mode-support=1 to enter Safe Mode");
2230 			goto err_init_fw;
2231 		}
2232 
2233 		PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2234 					"Entering Safe Mode");
2235 		ad->is_safe_mode = 1;
2236 	}
2237 #endif
2238 
2239 	PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2240 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2241 		     hw->api_maj_ver, hw->api_min_ver);
2242 
2243 	ice_pf_sw_init(dev);
2244 	ret = ice_init_mac_address(dev);
2245 	if (ret) {
2246 		PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2247 		goto err_init_mac;
2248 	}
2249 
2250 	ret = ice_res_pool_init(&pf->msix_pool, 1,
2251 				hw->func_caps.common_cap.num_msix_vectors - 1);
2252 	if (ret) {
2253 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2254 		goto err_msix_pool_init;
2255 	}
2256 
2257 	ret = ice_pf_setup(pf);
2258 	if (ret) {
2259 		PMD_INIT_LOG(ERR, "Failed to setup PF");
2260 		goto err_pf_setup;
2261 	}
2262 
2263 	ret = ice_send_driver_ver(hw);
2264 	if (ret) {
2265 		PMD_INIT_LOG(ERR, "Failed to send driver version");
2266 		goto err_pf_setup;
2267 	}
2268 
2269 	vsi = pf->main_vsi;
2270 
2271 	ret = ice_aq_stop_lldp(hw, true, false, NULL);
2272 	if (ret != ICE_SUCCESS)
2273 		PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2274 	ret = ice_init_dcb(hw, true);
2275 	if (ret != ICE_SUCCESS)
2276 		PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2277 	/* Forward LLDP packets to default VSI */
2278 	ret = ice_vsi_config_sw_lldp(vsi, true);
2279 	if (ret != ICE_SUCCESS)
2280 		PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2281 	/* register callback func to eal lib */
2282 	rte_intr_callback_register(intr_handle,
2283 				   ice_interrupt_handler, dev);
2284 
2285 	ice_pf_enable_irq0(hw);
2286 
2287 	/* enable uio intr after callback register */
2288 	rte_intr_enable(intr_handle);
2289 
2290 	/* get base queue pairs index  in the device */
2291 	ice_base_queue_get(pf);
2292 
2293 	/* Initialize RSS context for gtpu_eh */
2294 	ice_rss_ctx_init(pf);
2295 
2296 	if (!ad->is_safe_mode) {
2297 		ret = ice_flow_init(ad);
2298 		if (ret) {
2299 			PMD_INIT_LOG(ERR, "Failed to initialize flow");
2300 			goto err_flow_init;
2301 		}
2302 	}
2303 
2304 	ret = ice_reset_fxp_resource(hw);
2305 	if (ret) {
2306 		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2307 		goto err_flow_init;
2308 	}
2309 
2310 	pf->supported_rxdid = ice_get_supported_rxdid(hw);
2311 
2312 	return 0;
2313 
2314 err_flow_init:
2315 	ice_flow_uninit(ad);
2316 	rte_intr_disable(intr_handle);
2317 	ice_pf_disable_irq0(hw);
2318 	rte_intr_callback_unregister(intr_handle,
2319 				     ice_interrupt_handler, dev);
2320 err_pf_setup:
2321 	ice_res_pool_destroy(&pf->msix_pool);
2322 err_msix_pool_init:
2323 	rte_free(dev->data->mac_addrs);
2324 	dev->data->mac_addrs = NULL;
2325 err_init_mac:
2326 	rte_free(pf->proto_xtr);
2327 #ifndef RTE_EXEC_ENV_WINDOWS
2328 err_init_fw:
2329 #endif
2330 	ice_deinit_hw(hw);
2331 
2332 	return ret;
2333 }
2334 
2335 int
2336 ice_release_vsi(struct ice_vsi *vsi)
2337 {
2338 	struct ice_hw *hw;
2339 	struct ice_vsi_ctx vsi_ctx;
2340 	enum ice_status ret;
2341 	int error = 0;
2342 
2343 	if (!vsi)
2344 		return error;
2345 
2346 	hw = ICE_VSI_TO_HW(vsi);
2347 
2348 	ice_remove_all_mac_vlan_filters(vsi);
2349 
2350 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2351 
2352 	vsi_ctx.vsi_num = vsi->vsi_id;
2353 	vsi_ctx.info = vsi->info;
2354 	ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2355 	if (ret != ICE_SUCCESS) {
2356 		PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2357 		error = -1;
2358 	}
2359 
2360 	rte_free(vsi->rss_lut);
2361 	rte_free(vsi->rss_key);
2362 	rte_free(vsi);
2363 	return error;
2364 }
2365 
2366 void
2367 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2368 {
2369 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2370 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2371 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2372 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2373 	uint16_t msix_intr, i;
2374 
2375 	/* disable interrupt and also clear all the exist config */
2376 	for (i = 0; i < vsi->nb_qps; i++) {
2377 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2378 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2379 		rte_wmb();
2380 	}
2381 
2382 	if (rte_intr_allow_others(intr_handle))
2383 		/* vfio-pci */
2384 		for (i = 0; i < vsi->nb_msix; i++) {
2385 			msix_intr = vsi->msix_intr + i;
2386 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2387 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2388 		}
2389 	else
2390 		/* igb_uio */
2391 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2392 }
2393 
2394 static int
2395 ice_dev_stop(struct rte_eth_dev *dev)
2396 {
2397 	struct rte_eth_dev_data *data = dev->data;
2398 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2399 	struct ice_vsi *main_vsi = pf->main_vsi;
2400 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2401 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2402 	uint16_t i;
2403 
2404 	/* avoid stopping again */
2405 	if (pf->adapter_stopped)
2406 		return 0;
2407 
2408 	/* stop and clear all Rx queues */
2409 	for (i = 0; i < data->nb_rx_queues; i++)
2410 		ice_rx_queue_stop(dev, i);
2411 
2412 	/* stop and clear all Tx queues */
2413 	for (i = 0; i < data->nb_tx_queues; i++)
2414 		ice_tx_queue_stop(dev, i);
2415 
2416 	/* disable all queue interrupts */
2417 	ice_vsi_disable_queues_intr(main_vsi);
2418 
2419 	if (pf->init_link_up)
2420 		ice_dev_set_link_up(dev);
2421 	else
2422 		ice_dev_set_link_down(dev);
2423 
2424 	/* Clean datapath event and queue/vec mapping */
2425 	rte_intr_efd_disable(intr_handle);
2426 	if (intr_handle->intr_vec) {
2427 		rte_free(intr_handle->intr_vec);
2428 		intr_handle->intr_vec = NULL;
2429 	}
2430 
2431 	pf->adapter_stopped = true;
2432 	dev->data->dev_started = 0;
2433 
2434 	return 0;
2435 }
2436 
2437 static int
2438 ice_dev_close(struct rte_eth_dev *dev)
2439 {
2440 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2441 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2442 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2443 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2444 	struct ice_adapter *ad =
2445 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2446 	int ret;
2447 	uint32_t val;
2448 	uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
2449 	uint32_t pin_idx = ad->devargs.pin_idx;
2450 
2451 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2452 		return 0;
2453 
2454 	/* Since stop will make link down, then the link event will be
2455 	 * triggered, disable the irq firstly to avoid the port_infoe etc
2456 	 * resources deallocation causing the interrupt service thread
2457 	 * crash.
2458 	 */
2459 	ice_pf_disable_irq0(hw);
2460 
2461 	ret = ice_dev_stop(dev);
2462 
2463 	if (!ad->is_safe_mode)
2464 		ice_flow_uninit(ad);
2465 
2466 	/* release all queue resource */
2467 	ice_free_queues(dev);
2468 
2469 	ice_res_pool_destroy(&pf->msix_pool);
2470 	ice_release_vsi(pf->main_vsi);
2471 	ice_sched_cleanup_all(hw);
2472 	ice_free_hw_tbls(hw);
2473 	rte_free(hw->port_info);
2474 	hw->port_info = NULL;
2475 	ice_shutdown_all_ctrlq(hw);
2476 	rte_free(pf->proto_xtr);
2477 	pf->proto_xtr = NULL;
2478 
2479 	if (ad->devargs.pps_out_ena) {
2480 		ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
2481 		ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
2482 		ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0);
2483 		ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0);
2484 
2485 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
2486 		ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val);
2487 	}
2488 
2489 	/* disable uio intr before callback unregister */
2490 	rte_intr_disable(intr_handle);
2491 
2492 	/* unregister callback func from eal lib */
2493 	rte_intr_callback_unregister(intr_handle,
2494 				     ice_interrupt_handler, dev);
2495 
2496 	return ret;
2497 }
2498 
2499 static int
2500 ice_dev_uninit(struct rte_eth_dev *dev)
2501 {
2502 	ice_dev_close(dev);
2503 
2504 	return 0;
2505 }
2506 
2507 static bool
2508 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2509 {
2510 	return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2511 }
2512 
2513 static void
2514 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2515 {
2516 	cfg->hash_flds = 0;
2517 	cfg->addl_hdrs = 0;
2518 	cfg->symm = 0;
2519 	cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2520 }
2521 
2522 static int
2523 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2524 {
2525 	enum ice_status status = ICE_SUCCESS;
2526 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2527 	struct ice_vsi *vsi = pf->main_vsi;
2528 
2529 	if (!is_hash_cfg_valid(cfg))
2530 		return -ENOENT;
2531 
2532 	status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2533 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2534 		PMD_DRV_LOG(ERR,
2535 			    "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2536 			    vsi->idx, status);
2537 		return -EBUSY;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static int
2544 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2545 {
2546 	enum ice_status status = ICE_SUCCESS;
2547 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2548 	struct ice_vsi *vsi = pf->main_vsi;
2549 
2550 	if (!is_hash_cfg_valid(cfg))
2551 		return -ENOENT;
2552 
2553 	status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2554 	if (status) {
2555 		PMD_DRV_LOG(ERR,
2556 			    "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2557 			    vsi->idx, status);
2558 		return -EBUSY;
2559 	}
2560 
2561 	return 0;
2562 }
2563 
2564 static int
2565 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2566 {
2567 	int ret;
2568 
2569 	ret = ice_hash_moveout(pf, cfg);
2570 	if (ret && (ret != -ENOENT))
2571 		return ret;
2572 
2573 	hash_cfg_reset(cfg);
2574 
2575 	return 0;
2576 }
2577 
2578 static int
2579 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2580 			 u8 ctx_idx)
2581 {
2582 	int ret;
2583 
2584 	switch (ctx_idx) {
2585 	case ICE_HASH_GTPU_CTX_EH_IP:
2586 		ret = ice_hash_remove(pf,
2587 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2588 		if (ret && (ret != -ENOENT))
2589 			return ret;
2590 
2591 		ret = ice_hash_remove(pf,
2592 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2593 		if (ret && (ret != -ENOENT))
2594 			return ret;
2595 
2596 		ret = ice_hash_remove(pf,
2597 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2598 		if (ret && (ret != -ENOENT))
2599 			return ret;
2600 
2601 		ret = ice_hash_remove(pf,
2602 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2603 		if (ret && (ret != -ENOENT))
2604 			return ret;
2605 
2606 		ret = ice_hash_remove(pf,
2607 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2608 		if (ret && (ret != -ENOENT))
2609 			return ret;
2610 
2611 		ret = ice_hash_remove(pf,
2612 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2613 		if (ret && (ret != -ENOENT))
2614 			return ret;
2615 
2616 		ret = ice_hash_remove(pf,
2617 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2618 		if (ret && (ret != -ENOENT))
2619 			return ret;
2620 
2621 		ret = ice_hash_remove(pf,
2622 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2623 		if (ret && (ret != -ENOENT))
2624 			return ret;
2625 
2626 		break;
2627 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2628 		ret = ice_hash_remove(pf,
2629 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2630 		if (ret && (ret != -ENOENT))
2631 			return ret;
2632 
2633 		ret = ice_hash_remove(pf,
2634 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2635 		if (ret && (ret != -ENOENT))
2636 			return ret;
2637 
2638 		ret = ice_hash_moveout(pf,
2639 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2640 		if (ret && (ret != -ENOENT))
2641 			return ret;
2642 
2643 		ret = ice_hash_moveout(pf,
2644 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2645 		if (ret && (ret != -ENOENT))
2646 			return ret;
2647 
2648 		ret = ice_hash_moveout(pf,
2649 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2650 		if (ret && (ret != -ENOENT))
2651 			return ret;
2652 
2653 		ret = ice_hash_moveout(pf,
2654 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2655 		if (ret && (ret != -ENOENT))
2656 			return ret;
2657 
2658 		break;
2659 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2660 		ret = ice_hash_remove(pf,
2661 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2662 		if (ret && (ret != -ENOENT))
2663 			return ret;
2664 
2665 		ret = ice_hash_remove(pf,
2666 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2667 		if (ret && (ret != -ENOENT))
2668 			return ret;
2669 
2670 		ret = ice_hash_moveout(pf,
2671 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2672 		if (ret && (ret != -ENOENT))
2673 			return ret;
2674 
2675 		ret = ice_hash_moveout(pf,
2676 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2677 		if (ret && (ret != -ENOENT))
2678 			return ret;
2679 
2680 		ret = ice_hash_moveout(pf,
2681 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2682 		if (ret && (ret != -ENOENT))
2683 			return ret;
2684 
2685 		ret = ice_hash_moveout(pf,
2686 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2687 		if (ret && (ret != -ENOENT))
2688 			return ret;
2689 
2690 		break;
2691 	case ICE_HASH_GTPU_CTX_UP_IP:
2692 		ret = ice_hash_remove(pf,
2693 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2694 		if (ret && (ret != -ENOENT))
2695 			return ret;
2696 
2697 		ret = ice_hash_remove(pf,
2698 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2699 		if (ret && (ret != -ENOENT))
2700 			return ret;
2701 
2702 		ret = ice_hash_moveout(pf,
2703 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2704 		if (ret && (ret != -ENOENT))
2705 			return ret;
2706 
2707 		ret = ice_hash_moveout(pf,
2708 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2709 		if (ret && (ret != -ENOENT))
2710 			return ret;
2711 
2712 		ret = ice_hash_moveout(pf,
2713 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2714 		if (ret && (ret != -ENOENT))
2715 			return ret;
2716 
2717 		break;
2718 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2719 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2720 		ret = ice_hash_moveout(pf,
2721 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2722 		if (ret && (ret != -ENOENT))
2723 			return ret;
2724 
2725 		ret = ice_hash_moveout(pf,
2726 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2727 		if (ret && (ret != -ENOENT))
2728 			return ret;
2729 
2730 		ret = ice_hash_moveout(pf,
2731 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2732 		if (ret && (ret != -ENOENT))
2733 			return ret;
2734 
2735 		break;
2736 	case ICE_HASH_GTPU_CTX_DW_IP:
2737 		ret = ice_hash_remove(pf,
2738 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2739 		if (ret && (ret != -ENOENT))
2740 			return ret;
2741 
2742 		ret = ice_hash_remove(pf,
2743 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2744 		if (ret && (ret != -ENOENT))
2745 			return ret;
2746 
2747 		ret = ice_hash_moveout(pf,
2748 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2749 		if (ret && (ret != -ENOENT))
2750 			return ret;
2751 
2752 		ret = ice_hash_moveout(pf,
2753 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2754 		if (ret && (ret != -ENOENT))
2755 			return ret;
2756 
2757 		ret = ice_hash_moveout(pf,
2758 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2759 		if (ret && (ret != -ENOENT))
2760 			return ret;
2761 
2762 		break;
2763 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2764 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2765 		ret = ice_hash_moveout(pf,
2766 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2767 		if (ret && (ret != -ENOENT))
2768 			return ret;
2769 
2770 		ret = ice_hash_moveout(pf,
2771 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2772 		if (ret && (ret != -ENOENT))
2773 			return ret;
2774 
2775 		ret = ice_hash_moveout(pf,
2776 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2777 		if (ret && (ret != -ENOENT))
2778 			return ret;
2779 
2780 		break;
2781 	default:
2782 		break;
2783 	}
2784 
2785 	return 0;
2786 }
2787 
2788 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2789 {
2790 	u8 eh_idx, ip_idx;
2791 
2792 	if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2793 		eh_idx = 0;
2794 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2795 		eh_idx = 1;
2796 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2797 		eh_idx = 2;
2798 	else
2799 		return ICE_HASH_GTPU_CTX_MAX;
2800 
2801 	ip_idx = 0;
2802 	if (hdr & ICE_FLOW_SEG_HDR_UDP)
2803 		ip_idx = 1;
2804 	else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2805 		ip_idx = 2;
2806 
2807 	if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2808 		return eh_idx * 3 + ip_idx;
2809 	else
2810 		return ICE_HASH_GTPU_CTX_MAX;
2811 }
2812 
2813 static int
2814 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2815 {
2816 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2817 
2818 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2819 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2820 						gtpu_ctx_idx);
2821 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2822 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2823 						gtpu_ctx_idx);
2824 
2825 	return 0;
2826 }
2827 
2828 static int
2829 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2830 			  u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2831 {
2832 	int ret;
2833 
2834 	if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2835 		ctx->ctx[ctx_idx] = *cfg;
2836 
2837 	switch (ctx_idx) {
2838 	case ICE_HASH_GTPU_CTX_EH_IP:
2839 		break;
2840 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2841 		ret = ice_hash_moveback(pf,
2842 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2843 		if (ret && (ret != -ENOENT))
2844 			return ret;
2845 
2846 		ret = ice_hash_moveback(pf,
2847 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2848 		if (ret && (ret != -ENOENT))
2849 			return ret;
2850 
2851 		ret = ice_hash_moveback(pf,
2852 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2853 		if (ret && (ret != -ENOENT))
2854 			return ret;
2855 
2856 		ret = ice_hash_moveback(pf,
2857 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2858 		if (ret && (ret != -ENOENT))
2859 			return ret;
2860 
2861 		break;
2862 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2863 		ret = ice_hash_moveback(pf,
2864 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2865 		if (ret && (ret != -ENOENT))
2866 			return ret;
2867 
2868 		ret = ice_hash_moveback(pf,
2869 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2870 		if (ret && (ret != -ENOENT))
2871 			return ret;
2872 
2873 		ret = ice_hash_moveback(pf,
2874 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2875 		if (ret && (ret != -ENOENT))
2876 			return ret;
2877 
2878 		ret = ice_hash_moveback(pf,
2879 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2880 		if (ret && (ret != -ENOENT))
2881 			return ret;
2882 
2883 		break;
2884 	case ICE_HASH_GTPU_CTX_UP_IP:
2885 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2886 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2887 	case ICE_HASH_GTPU_CTX_DW_IP:
2888 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2889 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2890 		ret = ice_hash_moveback(pf,
2891 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2892 		if (ret && (ret != -ENOENT))
2893 			return ret;
2894 
2895 		ret = ice_hash_moveback(pf,
2896 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2897 		if (ret && (ret != -ENOENT))
2898 			return ret;
2899 
2900 		ret = ice_hash_moveback(pf,
2901 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2902 		if (ret && (ret != -ENOENT))
2903 			return ret;
2904 
2905 		break;
2906 	default:
2907 		break;
2908 	}
2909 
2910 	return 0;
2911 }
2912 
2913 static int
2914 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2915 {
2916 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2917 
2918 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2919 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2920 						 gtpu_ctx_idx, cfg);
2921 	else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2922 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2923 						 gtpu_ctx_idx, cfg);
2924 
2925 	return 0;
2926 }
2927 
2928 static void
2929 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2930 {
2931 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2932 
2933 	if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2934 		return;
2935 
2936 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2937 		hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2938 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2939 		hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2940 }
2941 
2942 int
2943 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2944 		     struct ice_rss_hash_cfg *cfg)
2945 {
2946 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2947 	int ret;
2948 
2949 	ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2950 	if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2951 		PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2952 
2953 	ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2954 
2955 	return 0;
2956 }
2957 
2958 int
2959 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2960 		     struct ice_rss_hash_cfg *cfg)
2961 {
2962 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2963 	int ret;
2964 
2965 	ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2966 	if (ret)
2967 		PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2968 
2969 	ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2970 	if (ret)
2971 		PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2972 
2973 	ret = ice_add_rss_cfg_post(pf, cfg);
2974 	if (ret)
2975 		PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2976 
2977 	return 0;
2978 }
2979 
2980 static void
2981 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2982 {
2983 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2984 	struct ice_vsi *vsi = pf->main_vsi;
2985 	struct ice_rss_hash_cfg cfg;
2986 	int ret;
2987 
2988 #define ICE_RSS_HF_ALL ( \
2989 	ETH_RSS_IPV4 | \
2990 	ETH_RSS_IPV6 | \
2991 	ETH_RSS_NONFRAG_IPV4_UDP | \
2992 	ETH_RSS_NONFRAG_IPV6_UDP | \
2993 	ETH_RSS_NONFRAG_IPV4_TCP | \
2994 	ETH_RSS_NONFRAG_IPV6_TCP | \
2995 	ETH_RSS_NONFRAG_IPV4_SCTP | \
2996 	ETH_RSS_NONFRAG_IPV6_SCTP)
2997 
2998 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
2999 	if (ret)
3000 		PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
3001 			    __func__, ret);
3002 
3003 	cfg.symm = 0;
3004 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3005 	/* Configure RSS for IPv4 with src/dst addr as input set */
3006 	if (rss_hf & ETH_RSS_IPV4) {
3007 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3008 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3009 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3010 		if (ret)
3011 			PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
3012 				    __func__, ret);
3013 	}
3014 
3015 	/* Configure RSS for IPv6 with src/dst addr as input set */
3016 	if (rss_hf & ETH_RSS_IPV6) {
3017 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3018 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3019 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3020 		if (ret)
3021 			PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
3022 				    __func__, ret);
3023 	}
3024 
3025 	/* Configure RSS for udp4 with src/dst addr and port as input set */
3026 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
3027 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
3028 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3029 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
3030 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3031 		if (ret)
3032 			PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
3033 				    __func__, ret);
3034 	}
3035 
3036 	/* Configure RSS for udp6 with src/dst addr and port as input set */
3037 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
3038 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
3039 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3040 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
3041 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3042 		if (ret)
3043 			PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
3044 				    __func__, ret);
3045 	}
3046 
3047 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
3048 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
3049 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
3050 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3051 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
3052 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3053 		if (ret)
3054 			PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
3055 				    __func__, ret);
3056 	}
3057 
3058 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
3059 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
3060 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
3061 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3062 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
3063 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3064 		if (ret)
3065 			PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
3066 				    __func__, ret);
3067 	}
3068 
3069 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
3070 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
3071 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
3072 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3073 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
3074 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3075 		if (ret)
3076 			PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
3077 				    __func__, ret);
3078 	}
3079 
3080 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
3081 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
3082 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
3083 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3084 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
3085 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3086 		if (ret)
3087 			PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
3088 				    __func__, ret);
3089 	}
3090 
3091 	if (rss_hf & ETH_RSS_IPV4) {
3092 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
3093 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3094 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3095 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3096 		if (ret)
3097 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
3098 				    __func__, ret);
3099 	}
3100 
3101 	if (rss_hf & ETH_RSS_IPV6) {
3102 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
3103 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3104 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3105 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3106 		if (ret)
3107 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
3108 				    __func__, ret);
3109 	}
3110 
3111 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
3112 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3113 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3114 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
3115 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3116 		if (ret)
3117 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
3118 				    __func__, ret);
3119 	}
3120 
3121 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
3122 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3123 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3124 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
3125 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3126 		if (ret)
3127 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3128 				    __func__, ret);
3129 	}
3130 
3131 	if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
3132 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3133 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3134 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
3135 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3136 		if (ret)
3137 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3138 				    __func__, ret);
3139 	}
3140 
3141 	if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
3142 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3143 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3144 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
3145 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3146 		if (ret)
3147 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3148 				    __func__, ret);
3149 	}
3150 
3151 	pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3152 }
3153 
3154 static void
3155 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
3156 {
3157 	static struct ice_aqc_get_set_rss_keys default_key;
3158 	static bool default_key_done;
3159 	uint8_t *key = (uint8_t *)&default_key;
3160 	size_t i;
3161 
3162 	if (rss_key_size > sizeof(default_key)) {
3163 		PMD_DRV_LOG(WARNING,
3164 			    "requested size %u is larger than default %zu, "
3165 			    "only %zu bytes are gotten for key\n",
3166 			    rss_key_size, sizeof(default_key),
3167 			    sizeof(default_key));
3168 	}
3169 
3170 	if (!default_key_done) {
3171 		/* Calculate the default hash key */
3172 		for (i = 0; i < sizeof(default_key); i++)
3173 			key[i] = (uint8_t)rte_rand();
3174 		default_key_done = true;
3175 	}
3176 	rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3177 }
3178 
3179 static int ice_init_rss(struct ice_pf *pf)
3180 {
3181 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
3182 	struct ice_vsi *vsi = pf->main_vsi;
3183 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3184 	struct ice_aq_get_set_rss_lut_params lut_params;
3185 	struct rte_eth_rss_conf *rss_conf;
3186 	struct ice_aqc_get_set_rss_keys key;
3187 	uint16_t i, nb_q;
3188 	int ret = 0;
3189 	bool is_safe_mode = pf->adapter->is_safe_mode;
3190 	uint32_t reg;
3191 
3192 	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3193 	nb_q = dev_data->nb_rx_queues;
3194 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3195 	vsi->rss_lut_size = pf->hash_lut_size;
3196 
3197 	if (nb_q == 0) {
3198 		PMD_DRV_LOG(WARNING,
3199 			"RSS is not supported as rx queues number is zero\n");
3200 		return 0;
3201 	}
3202 
3203 	if (is_safe_mode) {
3204 		PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3205 		return 0;
3206 	}
3207 
3208 	if (!vsi->rss_key) {
3209 		vsi->rss_key = rte_zmalloc(NULL,
3210 					   vsi->rss_key_size, 0);
3211 		if (vsi->rss_key == NULL) {
3212 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3213 			return -ENOMEM;
3214 		}
3215 	}
3216 	if (!vsi->rss_lut) {
3217 		vsi->rss_lut = rte_zmalloc(NULL,
3218 					   vsi->rss_lut_size, 0);
3219 		if (vsi->rss_lut == NULL) {
3220 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3221 			rte_free(vsi->rss_key);
3222 			vsi->rss_key = NULL;
3223 			return -ENOMEM;
3224 		}
3225 	}
3226 	/* configure RSS key */
3227 	if (!rss_conf->rss_key)
3228 		ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3229 	else
3230 		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3231 			   RTE_MIN(rss_conf->rss_key_len,
3232 				   vsi->rss_key_size));
3233 
3234 	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3235 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3236 	if (ret)
3237 		goto out;
3238 
3239 	/* init RSS LUT table */
3240 	for (i = 0; i < vsi->rss_lut_size; i++)
3241 		vsi->rss_lut[i] = i % nb_q;
3242 
3243 	lut_params.vsi_handle = vsi->idx;
3244 	lut_params.lut_size = vsi->rss_lut_size;
3245 	lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3246 	lut_params.lut = vsi->rss_lut;
3247 	lut_params.global_lut_id = 0;
3248 	ret = ice_aq_set_rss_lut(hw, &lut_params);
3249 	if (ret)
3250 		goto out;
3251 
3252 	/* Enable registers for symmetric_toeplitz function. */
3253 	reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3254 	reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3255 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3256 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3257 
3258 	/* RSS hash configuration */
3259 	ice_rss_hash_set(pf, rss_conf->rss_hf);
3260 
3261 	return 0;
3262 out:
3263 	rte_free(vsi->rss_key);
3264 	vsi->rss_key = NULL;
3265 	rte_free(vsi->rss_lut);
3266 	vsi->rss_lut = NULL;
3267 	return -EINVAL;
3268 }
3269 
3270 static int
3271 ice_dev_configure(struct rte_eth_dev *dev)
3272 {
3273 	struct ice_adapter *ad =
3274 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3275 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3276 	int ret;
3277 
3278 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
3279 	 * bulk allocation or vector Rx preconditions we will reset it.
3280 	 */
3281 	ad->rx_bulk_alloc_allowed = true;
3282 	ad->tx_simple_allowed = true;
3283 
3284 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
3285 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
3286 
3287 	if (dev->data->nb_rx_queues) {
3288 		ret = ice_init_rss(pf);
3289 		if (ret) {
3290 			PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3291 			return ret;
3292 		}
3293 	}
3294 
3295 	return 0;
3296 }
3297 
3298 static void
3299 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3300 		       int base_queue, int nb_queue)
3301 {
3302 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3303 	uint32_t val, val_tx;
3304 	int rx_low_latency, i;
3305 
3306 	rx_low_latency = vsi->adapter->devargs.rx_low_latency;
3307 	for (i = 0; i < nb_queue; i++) {
3308 		/*do actual bind*/
3309 		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3310 		      (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3311 		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3312 			 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3313 
3314 		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3315 			    base_queue + i, msix_vect);
3316 
3317 		/* set ITR0 value */
3318 		if (rx_low_latency) {
3319 			/**
3320 			 * Empirical configuration for optimal real time
3321 			 * latency reduced interrupt throttling to 2us
3322 			 */
3323 			ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1);
3324 			ICE_WRITE_REG(hw, QRX_ITR(base_queue + i),
3325 				      QRX_ITR_NO_EXPR_M);
3326 		} else {
3327 			ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3328 			ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0);
3329 		}
3330 
3331 		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3332 		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3333 	}
3334 }
3335 
3336 void
3337 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3338 {
3339 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3340 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3341 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3342 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3343 	uint16_t msix_vect = vsi->msix_intr;
3344 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
3345 	uint16_t queue_idx = 0;
3346 	int record = 0;
3347 	int i;
3348 
3349 	/* clear Rx/Tx queue interrupt */
3350 	for (i = 0; i < vsi->nb_used_qps; i++) {
3351 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3352 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3353 	}
3354 
3355 	/* PF bind interrupt */
3356 	if (rte_intr_dp_is_en(intr_handle)) {
3357 		queue_idx = 0;
3358 		record = 1;
3359 	}
3360 
3361 	for (i = 0; i < vsi->nb_used_qps; i++) {
3362 		if (nb_msix <= 1) {
3363 			if (!rte_intr_allow_others(intr_handle))
3364 				msix_vect = ICE_MISC_VEC_ID;
3365 
3366 			/* uio mapping all queue to one msix_vect */
3367 			__vsi_queues_bind_intr(vsi, msix_vect,
3368 					       vsi->base_queue + i,
3369 					       vsi->nb_used_qps - i);
3370 
3371 			for (; !!record && i < vsi->nb_used_qps; i++)
3372 				intr_handle->intr_vec[queue_idx + i] =
3373 					msix_vect;
3374 			break;
3375 		}
3376 
3377 		/* vfio 1:1 queue/msix_vect mapping */
3378 		__vsi_queues_bind_intr(vsi, msix_vect,
3379 				       vsi->base_queue + i, 1);
3380 
3381 		if (!!record)
3382 			intr_handle->intr_vec[queue_idx + i] = msix_vect;
3383 
3384 		msix_vect++;
3385 		nb_msix--;
3386 	}
3387 }
3388 
3389 void
3390 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3391 {
3392 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3393 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3394 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3395 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3396 	uint16_t msix_intr, i;
3397 
3398 	if (rte_intr_allow_others(intr_handle))
3399 		for (i = 0; i < vsi->nb_used_qps; i++) {
3400 			msix_intr = vsi->msix_intr + i;
3401 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3402 				      GLINT_DYN_CTL_INTENA_M |
3403 				      GLINT_DYN_CTL_CLEARPBA_M |
3404 				      GLINT_DYN_CTL_ITR_INDX_M |
3405 				      GLINT_DYN_CTL_WB_ON_ITR_M);
3406 		}
3407 	else
3408 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3409 			      GLINT_DYN_CTL_INTENA_M |
3410 			      GLINT_DYN_CTL_CLEARPBA_M |
3411 			      GLINT_DYN_CTL_ITR_INDX_M |
3412 			      GLINT_DYN_CTL_WB_ON_ITR_M);
3413 }
3414 
3415 static int
3416 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3417 {
3418 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3419 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3420 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3421 	struct ice_vsi *vsi = pf->main_vsi;
3422 	uint32_t intr_vector = 0;
3423 
3424 	rte_intr_disable(intr_handle);
3425 
3426 	/* check and configure queue intr-vector mapping */
3427 	if ((rte_intr_cap_multiple(intr_handle) ||
3428 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
3429 	    dev->data->dev_conf.intr_conf.rxq != 0) {
3430 		intr_vector = dev->data->nb_rx_queues;
3431 		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3432 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3433 				    ICE_MAX_INTR_QUEUE_NUM);
3434 			return -ENOTSUP;
3435 		}
3436 		if (rte_intr_efd_enable(intr_handle, intr_vector))
3437 			return -1;
3438 	}
3439 
3440 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3441 		intr_handle->intr_vec =
3442 		rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
3443 			    0);
3444 		if (!intr_handle->intr_vec) {
3445 			PMD_DRV_LOG(ERR,
3446 				    "Failed to allocate %d rx_queues intr_vec",
3447 				    dev->data->nb_rx_queues);
3448 			return -ENOMEM;
3449 		}
3450 	}
3451 
3452 	/* Map queues with MSIX interrupt */
3453 	vsi->nb_used_qps = dev->data->nb_rx_queues;
3454 	ice_vsi_queues_bind_intr(vsi);
3455 
3456 	/* Enable interrupts for all the queues */
3457 	ice_vsi_enable_queues_intr(vsi);
3458 
3459 	rte_intr_enable(intr_handle);
3460 
3461 	return 0;
3462 }
3463 
3464 static void
3465 ice_get_init_link_status(struct rte_eth_dev *dev)
3466 {
3467 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3468 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3469 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3470 	struct ice_link_status link_status;
3471 	int ret;
3472 
3473 	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3474 				   &link_status, NULL);
3475 	if (ret != ICE_SUCCESS) {
3476 		PMD_DRV_LOG(ERR, "Failed to get link info");
3477 		pf->init_link_up = false;
3478 		return;
3479 	}
3480 
3481 	if (link_status.link_info & ICE_AQ_LINK_UP)
3482 		pf->init_link_up = true;
3483 }
3484 
3485 static int
3486 ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer)
3487 {
3488 	uint64_t current_time, start_time;
3489 	uint32_t hi, lo, lo2, func, val;
3490 
3491 	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3492 	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3493 	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3494 
3495 	if (lo2 < lo) {
3496 		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3497 		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3498 	}
3499 
3500 	current_time = ((uint64_t)hi << 32) | lo;
3501 
3502 	start_time = (current_time + NSEC_PER_SEC) /
3503 			NSEC_PER_SEC * NSEC_PER_SEC;
3504 	start_time = start_time - PPS_OUT_DELAY_NS;
3505 
3506 	func = 8 + idx + timer * 4;
3507 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
3508 		((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
3509 		GLGEN_GPIO_CTL_PIN_FUNC_M);
3510 
3511 	/* Write clkout with half of period value */
3512 	ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2);
3513 
3514 	/* Write TARGET time register */
3515 	ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff);
3516 	ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32);
3517 
3518 	/* Write AUX_OUT register */
3519 	ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer),
3520 		      GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M);
3521 
3522 	/* Write GPIO CTL register */
3523 	ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val);
3524 
3525 	return 0;
3526 }
3527 
3528 static int
3529 ice_dev_start(struct rte_eth_dev *dev)
3530 {
3531 	struct rte_eth_dev_data *data = dev->data;
3532 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3533 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3534 	struct ice_vsi *vsi = pf->main_vsi;
3535 	struct ice_adapter *ad =
3536 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3537 	uint16_t nb_rxq = 0;
3538 	uint16_t nb_txq, i;
3539 	uint16_t max_frame_size;
3540 	int mask, ret;
3541 	uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
3542 	uint32_t pin_idx = ad->devargs.pin_idx;
3543 
3544 	/* program Tx queues' context in hardware */
3545 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3546 		ret = ice_tx_queue_start(dev, nb_txq);
3547 		if (ret) {
3548 			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3549 			goto tx_err;
3550 		}
3551 	}
3552 
3553 	/* program Rx queues' context in hardware*/
3554 	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3555 		ret = ice_rx_queue_start(dev, nb_rxq);
3556 		if (ret) {
3557 			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3558 			goto rx_err;
3559 		}
3560 	}
3561 
3562 	ice_set_rx_function(dev);
3563 	ice_set_tx_function(dev);
3564 
3565 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
3566 			ETH_VLAN_EXTEND_MASK;
3567 	ret = ice_vlan_offload_set(dev, mask);
3568 	if (ret) {
3569 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3570 		goto rx_err;
3571 	}
3572 
3573 	/* enable Rx interrput and mapping Rx queue to interrupt vector */
3574 	if (ice_rxq_intr_setup(dev))
3575 		return -EIO;
3576 
3577 	/* Enable receiving broadcast packets and transmitting packets */
3578 	ret = ice_set_vsi_promisc(hw, vsi->idx,
3579 				  ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3580 				  ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3581 				  0);
3582 	if (ret != ICE_SUCCESS)
3583 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3584 
3585 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3586 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3587 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3588 				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3589 				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3590 				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
3591 				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3592 				     NULL);
3593 	if (ret != ICE_SUCCESS)
3594 		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3595 
3596 	ice_get_init_link_status(dev);
3597 
3598 	ice_dev_set_link_up(dev);
3599 
3600 	/* Call get_link_info aq commond to enable/disable LSE */
3601 	ice_link_update(dev, 0);
3602 
3603 	pf->adapter_stopped = false;
3604 
3605 	/* Set the max frame size to default value*/
3606 	max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
3607 		pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
3608 		ICE_FRAME_SIZE_MAX;
3609 
3610 	/* Set the max frame size to HW*/
3611 	ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3612 
3613 	if (ad->devargs.pps_out_ena) {
3614 		ret = ice_pps_out_cfg(hw, pin_idx, timer);
3615 		if (ret) {
3616 			PMD_DRV_LOG(ERR, "Fail to configure 1pps out");
3617 			goto rx_err;
3618 		}
3619 	}
3620 
3621 	return 0;
3622 
3623 	/* stop the started queues if failed to start all queues */
3624 rx_err:
3625 	for (i = 0; i < nb_rxq; i++)
3626 		ice_rx_queue_stop(dev, i);
3627 tx_err:
3628 	for (i = 0; i < nb_txq; i++)
3629 		ice_tx_queue_stop(dev, i);
3630 
3631 	return -EIO;
3632 }
3633 
3634 static int
3635 ice_dev_reset(struct rte_eth_dev *dev)
3636 {
3637 	int ret;
3638 
3639 	if (dev->data->sriov.active)
3640 		return -ENOTSUP;
3641 
3642 	ret = ice_dev_uninit(dev);
3643 	if (ret) {
3644 		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3645 		return -ENXIO;
3646 	}
3647 
3648 	ret = ice_dev_init(dev);
3649 	if (ret) {
3650 		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3651 		return -ENXIO;
3652 	}
3653 
3654 	return 0;
3655 }
3656 
3657 static int
3658 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3659 {
3660 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3661 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3662 	struct ice_vsi *vsi = pf->main_vsi;
3663 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3664 	bool is_safe_mode = pf->adapter->is_safe_mode;
3665 	u64 phy_type_low;
3666 	u64 phy_type_high;
3667 
3668 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3669 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3670 	dev_info->max_rx_queues = vsi->nb_qps;
3671 	dev_info->max_tx_queues = vsi->nb_qps;
3672 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3673 	dev_info->max_vfs = pci_dev->max_vfs;
3674 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3675 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3676 
3677 	dev_info->rx_offload_capa =
3678 		DEV_RX_OFFLOAD_VLAN_STRIP |
3679 		DEV_RX_OFFLOAD_JUMBO_FRAME |
3680 		DEV_RX_OFFLOAD_KEEP_CRC |
3681 		DEV_RX_OFFLOAD_SCATTER |
3682 		DEV_RX_OFFLOAD_VLAN_FILTER;
3683 	dev_info->tx_offload_capa =
3684 		DEV_TX_OFFLOAD_VLAN_INSERT |
3685 		DEV_TX_OFFLOAD_TCP_TSO |
3686 		DEV_TX_OFFLOAD_MULTI_SEGS |
3687 		DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3688 	dev_info->flow_type_rss_offloads = 0;
3689 
3690 	if (!is_safe_mode) {
3691 		dev_info->rx_offload_capa |=
3692 			DEV_RX_OFFLOAD_IPV4_CKSUM |
3693 			DEV_RX_OFFLOAD_UDP_CKSUM |
3694 			DEV_RX_OFFLOAD_TCP_CKSUM |
3695 			DEV_RX_OFFLOAD_QINQ_STRIP |
3696 			DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3697 			DEV_RX_OFFLOAD_VLAN_EXTEND |
3698 			DEV_RX_OFFLOAD_RSS_HASH |
3699 			DEV_RX_OFFLOAD_TIMESTAMP;
3700 		dev_info->tx_offload_capa |=
3701 			DEV_TX_OFFLOAD_QINQ_INSERT |
3702 			DEV_TX_OFFLOAD_IPV4_CKSUM |
3703 			DEV_TX_OFFLOAD_UDP_CKSUM |
3704 			DEV_TX_OFFLOAD_TCP_CKSUM |
3705 			DEV_TX_OFFLOAD_SCTP_CKSUM |
3706 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3707 			DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
3708 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3709 	}
3710 
3711 	dev_info->rx_queue_offload_capa = 0;
3712 	dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3713 
3714 	dev_info->reta_size = pf->hash_lut_size;
3715 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3716 
3717 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3718 		.rx_thresh = {
3719 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
3720 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
3721 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
3722 		},
3723 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3724 		.rx_drop_en = 0,
3725 		.offloads = 0,
3726 	};
3727 
3728 	dev_info->default_txconf = (struct rte_eth_txconf) {
3729 		.tx_thresh = {
3730 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
3731 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
3732 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
3733 		},
3734 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3735 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3736 		.offloads = 0,
3737 	};
3738 
3739 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3740 		.nb_max = ICE_MAX_RING_DESC,
3741 		.nb_min = ICE_MIN_RING_DESC,
3742 		.nb_align = ICE_ALIGN_RING_DESC,
3743 	};
3744 
3745 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3746 		.nb_max = ICE_MAX_RING_DESC,
3747 		.nb_min = ICE_MIN_RING_DESC,
3748 		.nb_align = ICE_ALIGN_RING_DESC,
3749 	};
3750 
3751 	dev_info->speed_capa = ETH_LINK_SPEED_10M |
3752 			       ETH_LINK_SPEED_100M |
3753 			       ETH_LINK_SPEED_1G |
3754 			       ETH_LINK_SPEED_2_5G |
3755 			       ETH_LINK_SPEED_5G |
3756 			       ETH_LINK_SPEED_10G |
3757 			       ETH_LINK_SPEED_20G |
3758 			       ETH_LINK_SPEED_25G;
3759 
3760 	phy_type_low = hw->port_info->phy.phy_type_low;
3761 	phy_type_high = hw->port_info->phy.phy_type_high;
3762 
3763 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3764 		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
3765 
3766 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3767 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3768 		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
3769 
3770 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3771 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3772 
3773 	dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3774 	dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3775 	dev_info->default_rxportconf.nb_queues = 1;
3776 	dev_info->default_txportconf.nb_queues = 1;
3777 	dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3778 	dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3779 
3780 	return 0;
3781 }
3782 
3783 static inline int
3784 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3785 			    struct rte_eth_link *link)
3786 {
3787 	struct rte_eth_link *dst = link;
3788 	struct rte_eth_link *src = &dev->data->dev_link;
3789 
3790 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3791 				*(uint64_t *)src) == 0)
3792 		return -1;
3793 
3794 	return 0;
3795 }
3796 
3797 static inline int
3798 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3799 			     struct rte_eth_link *link)
3800 {
3801 	struct rte_eth_link *dst = &dev->data->dev_link;
3802 	struct rte_eth_link *src = link;
3803 
3804 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3805 				*(uint64_t *)src) == 0)
3806 		return -1;
3807 
3808 	return 0;
3809 }
3810 
3811 static int
3812 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3813 {
3814 #define CHECK_INTERVAL 100  /* 100ms */
3815 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3816 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3817 	struct ice_link_status link_status;
3818 	struct rte_eth_link link, old;
3819 	int status;
3820 	unsigned int rep_cnt = MAX_REPEAT_TIME;
3821 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3822 
3823 	memset(&link, 0, sizeof(link));
3824 	memset(&old, 0, sizeof(old));
3825 	memset(&link_status, 0, sizeof(link_status));
3826 	ice_atomic_read_link_status(dev, &old);
3827 
3828 	do {
3829 		/* Get link status information from hardware */
3830 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
3831 					      &link_status, NULL);
3832 		if (status != ICE_SUCCESS) {
3833 			link.link_speed = ETH_SPEED_NUM_100M;
3834 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
3835 			PMD_DRV_LOG(ERR, "Failed to get link info");
3836 			goto out;
3837 		}
3838 
3839 		link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3840 		if (!wait_to_complete || link.link_status)
3841 			break;
3842 
3843 		rte_delay_ms(CHECK_INTERVAL);
3844 	} while (--rep_cnt);
3845 
3846 	if (!link.link_status)
3847 		goto out;
3848 
3849 	/* Full-duplex operation at all supported speeds */
3850 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
3851 
3852 	/* Parse the link status */
3853 	switch (link_status.link_speed) {
3854 	case ICE_AQ_LINK_SPEED_10MB:
3855 		link.link_speed = ETH_SPEED_NUM_10M;
3856 		break;
3857 	case ICE_AQ_LINK_SPEED_100MB:
3858 		link.link_speed = ETH_SPEED_NUM_100M;
3859 		break;
3860 	case ICE_AQ_LINK_SPEED_1000MB:
3861 		link.link_speed = ETH_SPEED_NUM_1G;
3862 		break;
3863 	case ICE_AQ_LINK_SPEED_2500MB:
3864 		link.link_speed = ETH_SPEED_NUM_2_5G;
3865 		break;
3866 	case ICE_AQ_LINK_SPEED_5GB:
3867 		link.link_speed = ETH_SPEED_NUM_5G;
3868 		break;
3869 	case ICE_AQ_LINK_SPEED_10GB:
3870 		link.link_speed = ETH_SPEED_NUM_10G;
3871 		break;
3872 	case ICE_AQ_LINK_SPEED_20GB:
3873 		link.link_speed = ETH_SPEED_NUM_20G;
3874 		break;
3875 	case ICE_AQ_LINK_SPEED_25GB:
3876 		link.link_speed = ETH_SPEED_NUM_25G;
3877 		break;
3878 	case ICE_AQ_LINK_SPEED_40GB:
3879 		link.link_speed = ETH_SPEED_NUM_40G;
3880 		break;
3881 	case ICE_AQ_LINK_SPEED_50GB:
3882 		link.link_speed = ETH_SPEED_NUM_50G;
3883 		break;
3884 	case ICE_AQ_LINK_SPEED_100GB:
3885 		link.link_speed = ETH_SPEED_NUM_100G;
3886 		break;
3887 	case ICE_AQ_LINK_SPEED_UNKNOWN:
3888 		PMD_DRV_LOG(ERR, "Unknown link speed");
3889 		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
3890 		break;
3891 	default:
3892 		PMD_DRV_LOG(ERR, "None link speed");
3893 		link.link_speed = ETH_SPEED_NUM_NONE;
3894 		break;
3895 	}
3896 
3897 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3898 			      ETH_LINK_SPEED_FIXED);
3899 
3900 out:
3901 	ice_atomic_write_link_status(dev, &link);
3902 	if (link.link_status == old.link_status)
3903 		return -1;
3904 
3905 	return 0;
3906 }
3907 
3908 /* Force the physical link state by getting the current PHY capabilities from
3909  * hardware and setting the PHY config based on the determined capabilities. If
3910  * link changes, link event will be triggered because both the Enable Automatic
3911  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3912  */
3913 static enum ice_status
3914 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3915 {
3916 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3917 	struct ice_aqc_get_phy_caps_data *pcaps;
3918 	struct ice_port_info *pi;
3919 	enum ice_status status;
3920 
3921 	if (!hw || !hw->port_info)
3922 		return ICE_ERR_PARAM;
3923 
3924 	pi = hw->port_info;
3925 
3926 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3927 		ice_malloc(hw, sizeof(*pcaps));
3928 	if (!pcaps)
3929 		return ICE_ERR_NO_MEMORY;
3930 
3931 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3932 				     pcaps, NULL);
3933 	if (status)
3934 		goto out;
3935 
3936 	/* No change in link */
3937 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3938 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3939 		goto out;
3940 
3941 	cfg.phy_type_low = pcaps->phy_type_low;
3942 	cfg.phy_type_high = pcaps->phy_type_high;
3943 	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3944 	cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3945 	cfg.eee_cap = pcaps->eee_cap;
3946 	cfg.eeer_value = pcaps->eeer_value;
3947 	cfg.link_fec_opt = pcaps->link_fec_options;
3948 	if (link_up)
3949 		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3950 	else
3951 		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3952 
3953 	status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3954 
3955 out:
3956 	ice_free(hw, pcaps);
3957 	return status;
3958 }
3959 
3960 static int
3961 ice_dev_set_link_up(struct rte_eth_dev *dev)
3962 {
3963 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3964 
3965 	return ice_force_phys_link_state(hw, true);
3966 }
3967 
3968 static int
3969 ice_dev_set_link_down(struct rte_eth_dev *dev)
3970 {
3971 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3972 
3973 	return ice_force_phys_link_state(hw, false);
3974 }
3975 
3976 static int
3977 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3978 {
3979 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3980 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3981 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
3982 
3983 	/* check if mtu is within the allowed range */
3984 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
3985 		return -EINVAL;
3986 
3987 	/* mtu setting is forbidden if port is start */
3988 	if (dev_data->dev_started) {
3989 		PMD_DRV_LOG(ERR,
3990 			    "port %d must be stopped before configuration",
3991 			    dev_data->port_id);
3992 		return -EBUSY;
3993 	}
3994 
3995 	if (frame_size > ICE_ETH_MAX_LEN)
3996 		dev_data->dev_conf.rxmode.offloads |=
3997 			DEV_RX_OFFLOAD_JUMBO_FRAME;
3998 	else
3999 		dev_data->dev_conf.rxmode.offloads &=
4000 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
4001 
4002 	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4003 
4004 	return 0;
4005 }
4006 
4007 static int ice_macaddr_set(struct rte_eth_dev *dev,
4008 			   struct rte_ether_addr *mac_addr)
4009 {
4010 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4011 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4012 	struct ice_vsi *vsi = pf->main_vsi;
4013 	struct ice_mac_filter *f;
4014 	uint8_t flags = 0;
4015 	int ret;
4016 
4017 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
4018 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
4019 		return -EINVAL;
4020 	}
4021 
4022 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
4023 		if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
4024 			break;
4025 	}
4026 
4027 	if (!f) {
4028 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
4029 		return -EIO;
4030 	}
4031 
4032 	ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
4033 	if (ret != ICE_SUCCESS) {
4034 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
4035 		return -EIO;
4036 	}
4037 	ret = ice_add_mac_filter(vsi, mac_addr);
4038 	if (ret != ICE_SUCCESS) {
4039 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
4040 		return -EIO;
4041 	}
4042 	rte_ether_addr_copy(mac_addr, &pf->dev_addr);
4043 
4044 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4045 	ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
4046 	if (ret != ICE_SUCCESS)
4047 		PMD_DRV_LOG(ERR, "Failed to set manage mac");
4048 
4049 	return 0;
4050 }
4051 
4052 /* Add a MAC address, and update filters */
4053 static int
4054 ice_macaddr_add(struct rte_eth_dev *dev,
4055 		struct rte_ether_addr *mac_addr,
4056 		__rte_unused uint32_t index,
4057 		__rte_unused uint32_t pool)
4058 {
4059 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4060 	struct ice_vsi *vsi = pf->main_vsi;
4061 	int ret;
4062 
4063 	ret = ice_add_mac_filter(vsi, mac_addr);
4064 	if (ret != ICE_SUCCESS) {
4065 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
4066 		return -EINVAL;
4067 	}
4068 
4069 	return ICE_SUCCESS;
4070 }
4071 
4072 /* Remove a MAC address, and update filters */
4073 static void
4074 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4075 {
4076 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4077 	struct ice_vsi *vsi = pf->main_vsi;
4078 	struct rte_eth_dev_data *data = dev->data;
4079 	struct rte_ether_addr *macaddr;
4080 	int ret;
4081 
4082 	macaddr = &data->mac_addrs[index];
4083 	ret = ice_remove_mac_filter(vsi, macaddr);
4084 	if (ret) {
4085 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
4086 		return;
4087 	}
4088 }
4089 
4090 static int
4091 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
4092 {
4093 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4094 	struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
4095 	struct ice_vsi *vsi = pf->main_vsi;
4096 	int ret;
4097 
4098 	PMD_INIT_FUNC_TRACE();
4099 
4100 	/**
4101 	 * Vlan 0 is the generic filter for untagged packets
4102 	 * and can't be removed or added by user.
4103 	 */
4104 	if (vlan_id == 0)
4105 		return 0;
4106 
4107 	if (on) {
4108 		ret = ice_add_vlan_filter(vsi, &vlan);
4109 		if (ret < 0) {
4110 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
4111 			return -EINVAL;
4112 		}
4113 	} else {
4114 		ret = ice_remove_vlan_filter(vsi, &vlan);
4115 		if (ret < 0) {
4116 			PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
4117 			return -EINVAL;
4118 		}
4119 	}
4120 
4121 	return 0;
4122 }
4123 
4124 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
4125  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
4126  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
4127  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
4128  *
4129  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
4130  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
4131  * traffic in SVM, since the VLAN TPID isn't part of filtering.
4132  *
4133  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
4134  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
4135  * part of filtering.
4136  */
4137 static int
4138 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
4139 {
4140 	struct ice_vlan vlan;
4141 	int err;
4142 
4143 	vlan = ICE_VLAN(0, 0);
4144 	err = ice_add_vlan_filter(vsi, &vlan);
4145 	if (err) {
4146 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
4147 		return err;
4148 	}
4149 
4150 	/* in SVM both VLAN 0 filters are identical */
4151 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
4152 		return 0;
4153 
4154 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4155 	err = ice_add_vlan_filter(vsi, &vlan);
4156 	if (err) {
4157 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
4158 		return err;
4159 	}
4160 
4161 	return 0;
4162 }
4163 
4164 /*
4165  * Delete the VLAN 0 filters in the same manner that they were added in
4166  * ice_vsi_add_vlan_zero.
4167  */
4168 static int
4169 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
4170 {
4171 	struct ice_vlan vlan;
4172 	int err;
4173 
4174 	vlan = ICE_VLAN(0, 0);
4175 	err = ice_remove_vlan_filter(vsi, &vlan);
4176 	if (err) {
4177 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4178 		return err;
4179 	}
4180 
4181 	/* in SVM both VLAN 0 filters are identical */
4182 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
4183 		return 0;
4184 
4185 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4186 	err = ice_remove_vlan_filter(vsi, &vlan);
4187 	if (err) {
4188 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4189 		return err;
4190 	}
4191 
4192 	return 0;
4193 }
4194 
4195 /* Configure vlan filter on or off */
4196 static int
4197 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4198 {
4199 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4200 	struct ice_vsi_ctx ctxt;
4201 	uint8_t sw_flags2;
4202 	int ret = 0;
4203 
4204 	sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4205 
4206 	if (on)
4207 		vsi->info.sw_flags2 |= sw_flags2;
4208 	else
4209 		vsi->info.sw_flags2 &= ~sw_flags2;
4210 
4211 	vsi->info.sw_id = hw->port_info->sw_id;
4212 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4213 	ctxt.info.valid_sections =
4214 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4215 				 ICE_AQ_VSI_PROP_SECURITY_VALID);
4216 	ctxt.vsi_num = vsi->vsi_id;
4217 
4218 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4219 	if (ret) {
4220 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4221 			    on ? "enable" : "disable");
4222 		return -EINVAL;
4223 	} else {
4224 		vsi->info.valid_sections |=
4225 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4226 					 ICE_AQ_VSI_PROP_SECURITY_VALID);
4227 	}
4228 
4229 	/* consist with other drivers, allow untagged packet when vlan filter on */
4230 	if (on)
4231 		ret = ice_vsi_add_vlan_zero(vsi);
4232 	else
4233 		ret = ice_vsi_del_vlan_zero(vsi);
4234 
4235 	return 0;
4236 }
4237 
4238 /* Manage VLAN stripping for the VSI for Rx */
4239 static int
4240 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4241 {
4242 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4243 	struct ice_vsi_ctx ctxt;
4244 	enum ice_status status;
4245 	int err = 0;
4246 
4247 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4248 	 * on this VSI
4249 	 */
4250 	if (vsi->info.port_based_inner_vlan)
4251 		return 0;
4252 
4253 	memset(&ctxt, 0, sizeof(ctxt));
4254 
4255 	if (ena)
4256 		/* Strip VLAN tag from Rx packet and put it in the desc */
4257 		ctxt.info.inner_vlan_flags =
4258 					ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4259 	else
4260 		/* Disable stripping. Leave tag in packet */
4261 		ctxt.info.inner_vlan_flags =
4262 					ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4263 
4264 	/* Allow all packets untagged/tagged */
4265 	ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4266 
4267 	ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4268 
4269 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4270 	if (status) {
4271 		PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4272 			    ena ? "enable" : "disable");
4273 		err = -EIO;
4274 	} else {
4275 		vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4276 	}
4277 
4278 	return err;
4279 }
4280 
4281 static int
4282 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4283 {
4284 	return ice_vsi_manage_vlan_stripping(vsi, true);
4285 }
4286 
4287 static int
4288 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4289 {
4290 	return ice_vsi_manage_vlan_stripping(vsi, false);
4291 }
4292 
4293 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4294 {
4295 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4296 	struct ice_vsi_ctx ctxt;
4297 	enum ice_status status;
4298 	int err = 0;
4299 
4300 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4301 	 * on this VSI
4302 	 */
4303 	if (vsi->info.port_based_outer_vlan)
4304 		return 0;
4305 
4306 	memset(&ctxt, 0, sizeof(ctxt));
4307 
4308 	ctxt.info.valid_sections =
4309 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4310 	/* clear current outer VLAN strip settings */
4311 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4312 		~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4313 	ctxt.info.outer_vlan_flags |=
4314 		(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4315 		 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4316 		(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4317 		 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4318 
4319 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4320 	if (status) {
4321 		PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4322 		err = -EIO;
4323 	} else {
4324 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4325 	}
4326 
4327 	return err;
4328 }
4329 
4330 static int
4331 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4332 {
4333 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4334 	struct ice_vsi_ctx ctxt;
4335 	enum ice_status status;
4336 	int err = 0;
4337 
4338 	if (vsi->info.port_based_outer_vlan)
4339 		return 0;
4340 
4341 	memset(&ctxt, 0, sizeof(ctxt));
4342 
4343 	ctxt.info.valid_sections =
4344 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4345 	/* clear current outer VLAN strip settings */
4346 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4347 		~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4348 	ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4349 		ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4350 
4351 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4352 	if (status) {
4353 		PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4354 		err = -EIO;
4355 	} else {
4356 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4357 	}
4358 
4359 	return err;
4360 }
4361 
4362 static int
4363 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4364 {
4365 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4366 	int ret;
4367 
4368 	if (ice_is_dvm_ena(hw)) {
4369 		if (ena)
4370 			ret = ice_vsi_ena_outer_stripping(vsi);
4371 		else
4372 			ret = ice_vsi_dis_outer_stripping(vsi);
4373 	} else {
4374 		if (ena)
4375 			ret = ice_vsi_ena_inner_stripping(vsi);
4376 		else
4377 			ret = ice_vsi_dis_inner_stripping(vsi);
4378 	}
4379 
4380 	return ret;
4381 }
4382 
4383 static int
4384 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4385 {
4386 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4387 	struct ice_vsi *vsi = pf->main_vsi;
4388 	struct rte_eth_rxmode *rxmode;
4389 
4390 	rxmode = &dev->data->dev_conf.rxmode;
4391 	if (mask & ETH_VLAN_FILTER_MASK) {
4392 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
4393 			ice_vsi_config_vlan_filter(vsi, true);
4394 		else
4395 			ice_vsi_config_vlan_filter(vsi, false);
4396 	}
4397 
4398 	if (mask & ETH_VLAN_STRIP_MASK) {
4399 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4400 			ice_vsi_config_vlan_stripping(vsi, true);
4401 		else
4402 			ice_vsi_config_vlan_stripping(vsi, false);
4403 	}
4404 
4405 	return 0;
4406 }
4407 
4408 static int
4409 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4410 {
4411 	struct ice_aq_get_set_rss_lut_params lut_params;
4412 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4413 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4414 	int ret;
4415 
4416 	if (!lut)
4417 		return -EINVAL;
4418 
4419 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4420 		lut_params.vsi_handle = vsi->idx;
4421 		lut_params.lut_size = lut_size;
4422 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4423 		lut_params.lut = lut;
4424 		lut_params.global_lut_id = 0;
4425 		ret = ice_aq_get_rss_lut(hw, &lut_params);
4426 		if (ret) {
4427 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4428 			return -EINVAL;
4429 		}
4430 	} else {
4431 		uint64_t *lut_dw = (uint64_t *)lut;
4432 		uint16_t i, lut_size_dw = lut_size / 4;
4433 
4434 		for (i = 0; i < lut_size_dw; i++)
4435 			lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4436 	}
4437 
4438 	return 0;
4439 }
4440 
4441 static int
4442 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4443 {
4444 	struct ice_aq_get_set_rss_lut_params lut_params;
4445 	struct ice_pf *pf;
4446 	struct ice_hw *hw;
4447 	int ret;
4448 
4449 	if (!vsi || !lut)
4450 		return -EINVAL;
4451 
4452 	pf = ICE_VSI_TO_PF(vsi);
4453 	hw = ICE_VSI_TO_HW(vsi);
4454 
4455 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4456 		lut_params.vsi_handle = vsi->idx;
4457 		lut_params.lut_size = lut_size;
4458 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4459 		lut_params.lut = lut;
4460 		lut_params.global_lut_id = 0;
4461 		ret = ice_aq_set_rss_lut(hw, &lut_params);
4462 		if (ret) {
4463 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4464 			return -EINVAL;
4465 		}
4466 	} else {
4467 		uint64_t *lut_dw = (uint64_t *)lut;
4468 		uint16_t i, lut_size_dw = lut_size / 4;
4469 
4470 		for (i = 0; i < lut_size_dw; i++)
4471 			ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4472 
4473 		ice_flush(hw);
4474 	}
4475 
4476 	return 0;
4477 }
4478 
4479 static int
4480 ice_rss_reta_update(struct rte_eth_dev *dev,
4481 		    struct rte_eth_rss_reta_entry64 *reta_conf,
4482 		    uint16_t reta_size)
4483 {
4484 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4485 	uint16_t i, lut_size = pf->hash_lut_size;
4486 	uint16_t idx, shift;
4487 	uint8_t *lut;
4488 	int ret;
4489 
4490 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4491 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4492 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4493 		PMD_DRV_LOG(ERR,
4494 			    "The size of hash lookup table configured (%d)"
4495 			    "doesn't match the number hardware can "
4496 			    "supported (128, 512, 2048)",
4497 			    reta_size);
4498 		return -EINVAL;
4499 	}
4500 
4501 	/* It MUST use the current LUT size to get the RSS lookup table,
4502 	 * otherwise if will fail with -100 error code.
4503 	 */
4504 	lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4505 	if (!lut) {
4506 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4507 		return -ENOMEM;
4508 	}
4509 	ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4510 	if (ret)
4511 		goto out;
4512 
4513 	for (i = 0; i < reta_size; i++) {
4514 		idx = i / RTE_RETA_GROUP_SIZE;
4515 		shift = i % RTE_RETA_GROUP_SIZE;
4516 		if (reta_conf[idx].mask & (1ULL << shift))
4517 			lut[i] = reta_conf[idx].reta[shift];
4518 	}
4519 	ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4520 	if (ret == 0 && lut_size != reta_size) {
4521 		PMD_DRV_LOG(INFO,
4522 			    "The size of hash lookup table is changed from (%d) to (%d)",
4523 			    lut_size, reta_size);
4524 		pf->hash_lut_size = reta_size;
4525 	}
4526 
4527 out:
4528 	rte_free(lut);
4529 
4530 	return ret;
4531 }
4532 
4533 static int
4534 ice_rss_reta_query(struct rte_eth_dev *dev,
4535 		   struct rte_eth_rss_reta_entry64 *reta_conf,
4536 		   uint16_t reta_size)
4537 {
4538 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4539 	uint16_t i, lut_size = pf->hash_lut_size;
4540 	uint16_t idx, shift;
4541 	uint8_t *lut;
4542 	int ret;
4543 
4544 	if (reta_size != lut_size) {
4545 		PMD_DRV_LOG(ERR,
4546 			    "The size of hash lookup table configured (%d)"
4547 			    "doesn't match the number hardware can "
4548 			    "supported (%d)",
4549 			    reta_size, lut_size);
4550 		return -EINVAL;
4551 	}
4552 
4553 	lut = rte_zmalloc(NULL, reta_size, 0);
4554 	if (!lut) {
4555 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4556 		return -ENOMEM;
4557 	}
4558 
4559 	ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4560 	if (ret)
4561 		goto out;
4562 
4563 	for (i = 0; i < reta_size; i++) {
4564 		idx = i / RTE_RETA_GROUP_SIZE;
4565 		shift = i % RTE_RETA_GROUP_SIZE;
4566 		if (reta_conf[idx].mask & (1ULL << shift))
4567 			reta_conf[idx].reta[shift] = lut[i];
4568 	}
4569 
4570 out:
4571 	rte_free(lut);
4572 
4573 	return ret;
4574 }
4575 
4576 static int
4577 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4578 {
4579 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4580 	int ret = 0;
4581 
4582 	if (!key || key_len == 0) {
4583 		PMD_DRV_LOG(DEBUG, "No key to be configured");
4584 		return 0;
4585 	} else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4586 		   sizeof(uint32_t)) {
4587 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4588 		return -EINVAL;
4589 	}
4590 
4591 	struct ice_aqc_get_set_rss_keys *key_dw =
4592 		(struct ice_aqc_get_set_rss_keys *)key;
4593 
4594 	ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4595 	if (ret) {
4596 		PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4597 		ret = -EINVAL;
4598 	}
4599 
4600 	return ret;
4601 }
4602 
4603 static int
4604 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4605 {
4606 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4607 	int ret;
4608 
4609 	if (!key || !key_len)
4610 		return -EINVAL;
4611 
4612 	ret = ice_aq_get_rss_key
4613 		(hw, vsi->idx,
4614 		 (struct ice_aqc_get_set_rss_keys *)key);
4615 	if (ret) {
4616 		PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4617 		return -EINVAL;
4618 	}
4619 	*key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4620 
4621 	return 0;
4622 }
4623 
4624 static int
4625 ice_rss_hash_update(struct rte_eth_dev *dev,
4626 		    struct rte_eth_rss_conf *rss_conf)
4627 {
4628 	enum ice_status status = ICE_SUCCESS;
4629 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4630 	struct ice_vsi *vsi = pf->main_vsi;
4631 
4632 	/* set hash key */
4633 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4634 	if (status)
4635 		return status;
4636 
4637 	if (rss_conf->rss_hf == 0) {
4638 		pf->rss_hf = 0;
4639 		return 0;
4640 	}
4641 
4642 	/* RSS hash configuration */
4643 	ice_rss_hash_set(pf, rss_conf->rss_hf);
4644 
4645 	return 0;
4646 }
4647 
4648 static int
4649 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4650 		      struct rte_eth_rss_conf *rss_conf)
4651 {
4652 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4653 	struct ice_vsi *vsi = pf->main_vsi;
4654 
4655 	ice_get_rss_key(vsi, rss_conf->rss_key,
4656 			&rss_conf->rss_key_len);
4657 
4658 	rss_conf->rss_hf = pf->rss_hf;
4659 	return 0;
4660 }
4661 
4662 static int
4663 ice_promisc_enable(struct rte_eth_dev *dev)
4664 {
4665 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4666 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4667 	struct ice_vsi *vsi = pf->main_vsi;
4668 	enum ice_status status;
4669 	uint8_t pmask;
4670 	int ret = 0;
4671 
4672 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4673 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4674 
4675 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4676 	switch (status) {
4677 	case ICE_ERR_ALREADY_EXISTS:
4678 		PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4679 	case ICE_SUCCESS:
4680 		break;
4681 	default:
4682 		PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4683 		ret = -EAGAIN;
4684 	}
4685 
4686 	return ret;
4687 }
4688 
4689 static int
4690 ice_promisc_disable(struct rte_eth_dev *dev)
4691 {
4692 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4693 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4694 	struct ice_vsi *vsi = pf->main_vsi;
4695 	enum ice_status status;
4696 	uint8_t pmask;
4697 	int ret = 0;
4698 
4699 	if (dev->data->all_multicast == 1)
4700 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4701 	else
4702 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4703 			ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4704 
4705 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4706 	if (status != ICE_SUCCESS) {
4707 		PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4708 		ret = -EAGAIN;
4709 	}
4710 
4711 	return ret;
4712 }
4713 
4714 static int
4715 ice_allmulti_enable(struct rte_eth_dev *dev)
4716 {
4717 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4718 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4719 	struct ice_vsi *vsi = pf->main_vsi;
4720 	enum ice_status status;
4721 	uint8_t pmask;
4722 	int ret = 0;
4723 
4724 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4725 
4726 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4727 
4728 	switch (status) {
4729 	case ICE_ERR_ALREADY_EXISTS:
4730 		PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4731 	case ICE_SUCCESS:
4732 		break;
4733 	default:
4734 		PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4735 		ret = -EAGAIN;
4736 	}
4737 
4738 	return ret;
4739 }
4740 
4741 static int
4742 ice_allmulti_disable(struct rte_eth_dev *dev)
4743 {
4744 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4745 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4746 	struct ice_vsi *vsi = pf->main_vsi;
4747 	enum ice_status status;
4748 	uint8_t pmask;
4749 	int ret = 0;
4750 
4751 	if (dev->data->promiscuous == 1)
4752 		return 0; /* must remain in all_multicast mode */
4753 
4754 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4755 
4756 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4757 	if (status != ICE_SUCCESS) {
4758 		PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4759 		ret = -EAGAIN;
4760 	}
4761 
4762 	return ret;
4763 }
4764 
4765 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4766 				    uint16_t queue_id)
4767 {
4768 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4769 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4770 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4771 	uint32_t val;
4772 	uint16_t msix_intr;
4773 
4774 	msix_intr = intr_handle->intr_vec[queue_id];
4775 
4776 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4777 	      GLINT_DYN_CTL_ITR_INDX_M;
4778 	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4779 
4780 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4781 	rte_intr_ack(&pci_dev->intr_handle);
4782 
4783 	return 0;
4784 }
4785 
4786 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4787 				     uint16_t queue_id)
4788 {
4789 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4790 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4791 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4792 	uint16_t msix_intr;
4793 
4794 	msix_intr = intr_handle->intr_vec[queue_id];
4795 
4796 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4797 
4798 	return 0;
4799 }
4800 
4801 static int
4802 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4803 {
4804 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4805 	u8 ver, patch;
4806 	u16 build;
4807 	int ret;
4808 
4809 	ver = hw->flash.orom.major;
4810 	patch = hw->flash.orom.patch;
4811 	build = hw->flash.orom.build;
4812 
4813 	ret = snprintf(fw_version, fw_size,
4814 			"%x.%02x 0x%08x %d.%d.%d",
4815 			hw->flash.nvm.major,
4816 			hw->flash.nvm.minor,
4817 			hw->flash.nvm.eetrack,
4818 			ver, build, patch);
4819 	if (ret < 0)
4820 		return -EINVAL;
4821 
4822 	/* add the size of '\0' */
4823 	ret += 1;
4824 	if (fw_size < (size_t)ret)
4825 		return ret;
4826 	else
4827 		return 0;
4828 }
4829 
4830 static int
4831 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4832 {
4833 	struct ice_hw *hw;
4834 	struct ice_vsi_ctx ctxt;
4835 	uint8_t vlan_flags = 0;
4836 	int ret;
4837 
4838 	if (!vsi || !info) {
4839 		PMD_DRV_LOG(ERR, "invalid parameters");
4840 		return -EINVAL;
4841 	}
4842 
4843 	if (info->on) {
4844 		vsi->info.port_based_inner_vlan = info->config.pvid;
4845 		/**
4846 		 * If insert pvid is enabled, only tagged pkts are
4847 		 * allowed to be sent out.
4848 		 */
4849 		vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4850 			     ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4851 	} else {
4852 		vsi->info.port_based_inner_vlan = 0;
4853 		if (info->config.reject.tagged == 0)
4854 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4855 
4856 		if (info->config.reject.untagged == 0)
4857 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4858 	}
4859 	vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4860 				  ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4861 	vsi->info.inner_vlan_flags |= vlan_flags;
4862 	memset(&ctxt, 0, sizeof(ctxt));
4863 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4864 	ctxt.info.valid_sections =
4865 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4866 	ctxt.vsi_num = vsi->vsi_id;
4867 
4868 	hw = ICE_VSI_TO_HW(vsi);
4869 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4870 	if (ret != ICE_SUCCESS) {
4871 		PMD_DRV_LOG(ERR,
4872 			    "update VSI for VLAN insert failed, err %d",
4873 			    ret);
4874 		return -EINVAL;
4875 	}
4876 
4877 	vsi->info.valid_sections |=
4878 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4879 
4880 	return ret;
4881 }
4882 
4883 static int
4884 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4885 {
4886 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4887 	struct ice_vsi *vsi = pf->main_vsi;
4888 	struct rte_eth_dev_data *data = pf->dev_data;
4889 	struct ice_vsi_vlan_pvid_info info;
4890 	int ret;
4891 
4892 	memset(&info, 0, sizeof(info));
4893 	info.on = on;
4894 	if (info.on) {
4895 		info.config.pvid = pvid;
4896 	} else {
4897 		info.config.reject.tagged =
4898 			data->dev_conf.txmode.hw_vlan_reject_tagged;
4899 		info.config.reject.untagged =
4900 			data->dev_conf.txmode.hw_vlan_reject_untagged;
4901 	}
4902 
4903 	ret = ice_vsi_vlan_pvid_set(vsi, &info);
4904 	if (ret < 0) {
4905 		PMD_DRV_LOG(ERR, "Failed to set pvid.");
4906 		return -EINVAL;
4907 	}
4908 
4909 	return 0;
4910 }
4911 
4912 static int
4913 ice_get_eeprom_length(struct rte_eth_dev *dev)
4914 {
4915 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4916 
4917 	return hw->flash.flash_size;
4918 }
4919 
4920 static int
4921 ice_get_eeprom(struct rte_eth_dev *dev,
4922 	       struct rte_dev_eeprom_info *eeprom)
4923 {
4924 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4925 	enum ice_status status = ICE_SUCCESS;
4926 	uint8_t *data = eeprom->data;
4927 
4928 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4929 
4930 	status = ice_acquire_nvm(hw, ICE_RES_READ);
4931 	if (status) {
4932 		PMD_DRV_LOG(ERR, "acquire nvm failed.");
4933 		return -EIO;
4934 	}
4935 
4936 	status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4937 				   data, false);
4938 
4939 	ice_release_nvm(hw);
4940 
4941 	if (status) {
4942 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
4943 		return -EIO;
4944 	}
4945 
4946 	return 0;
4947 }
4948 
4949 static void
4950 ice_stat_update_32(struct ice_hw *hw,
4951 		   uint32_t reg,
4952 		   bool offset_loaded,
4953 		   uint64_t *offset,
4954 		   uint64_t *stat)
4955 {
4956 	uint64_t new_data;
4957 
4958 	new_data = (uint64_t)ICE_READ_REG(hw, reg);
4959 	if (!offset_loaded)
4960 		*offset = new_data;
4961 
4962 	if (new_data >= *offset)
4963 		*stat = (uint64_t)(new_data - *offset);
4964 	else
4965 		*stat = (uint64_t)((new_data +
4966 				    ((uint64_t)1 << ICE_32_BIT_WIDTH))
4967 				   - *offset);
4968 }
4969 
4970 static void
4971 ice_stat_update_40(struct ice_hw *hw,
4972 		   uint32_t hireg,
4973 		   uint32_t loreg,
4974 		   bool offset_loaded,
4975 		   uint64_t *offset,
4976 		   uint64_t *stat)
4977 {
4978 	uint64_t new_data;
4979 
4980 	new_data = (uint64_t)ICE_READ_REG(hw, loreg);
4981 	new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
4982 		    ICE_32_BIT_WIDTH;
4983 
4984 	if (!offset_loaded)
4985 		*offset = new_data;
4986 
4987 	if (new_data >= *offset)
4988 		*stat = new_data - *offset;
4989 	else
4990 		*stat = (uint64_t)((new_data +
4991 				    ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
4992 				   *offset);
4993 
4994 	*stat &= ICE_40_BIT_MASK;
4995 }
4996 
4997 /* Get all the statistics of a VSI */
4998 static void
4999 ice_update_vsi_stats(struct ice_vsi *vsi)
5000 {
5001 	struct ice_eth_stats *oes = &vsi->eth_stats_offset;
5002 	struct ice_eth_stats *nes = &vsi->eth_stats;
5003 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
5004 	int idx = rte_le_to_cpu_16(vsi->vsi_id);
5005 
5006 	ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
5007 			   vsi->offset_loaded, &oes->rx_bytes,
5008 			   &nes->rx_bytes);
5009 	ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
5010 			   vsi->offset_loaded, &oes->rx_unicast,
5011 			   &nes->rx_unicast);
5012 	ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
5013 			   vsi->offset_loaded, &oes->rx_multicast,
5014 			   &nes->rx_multicast);
5015 	ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
5016 			   vsi->offset_loaded, &oes->rx_broadcast,
5017 			   &nes->rx_broadcast);
5018 	/* enlarge the limitation when rx_bytes overflowed */
5019 	if (vsi->offset_loaded) {
5020 		if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
5021 			nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5022 		nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
5023 	}
5024 	vsi->old_rx_bytes = nes->rx_bytes;
5025 	/* exclude CRC bytes */
5026 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
5027 			  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
5028 
5029 	ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
5030 			   &oes->rx_discards, &nes->rx_discards);
5031 	/* GLV_REPC not supported */
5032 	/* GLV_RMPC not supported */
5033 	ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
5034 			   &oes->rx_unknown_protocol,
5035 			   &nes->rx_unknown_protocol);
5036 	ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
5037 			   vsi->offset_loaded, &oes->tx_bytes,
5038 			   &nes->tx_bytes);
5039 	ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
5040 			   vsi->offset_loaded, &oes->tx_unicast,
5041 			   &nes->tx_unicast);
5042 	ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
5043 			   vsi->offset_loaded, &oes->tx_multicast,
5044 			   &nes->tx_multicast);
5045 	ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
5046 			   vsi->offset_loaded,  &oes->tx_broadcast,
5047 			   &nes->tx_broadcast);
5048 	/* GLV_TDPC not supported */
5049 	ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
5050 			   &oes->tx_errors, &nes->tx_errors);
5051 	/* enlarge the limitation when tx_bytes overflowed */
5052 	if (vsi->offset_loaded) {
5053 		if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
5054 			nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5055 		nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
5056 	}
5057 	vsi->old_tx_bytes = nes->tx_bytes;
5058 	vsi->offset_loaded = true;
5059 
5060 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
5061 		    vsi->vsi_id);
5062 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
5063 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
5064 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
5065 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
5066 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
5067 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5068 		    nes->rx_unknown_protocol);
5069 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
5070 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
5071 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
5072 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
5073 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
5074 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
5075 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
5076 		    vsi->vsi_id);
5077 }
5078 
5079 static void
5080 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
5081 {
5082 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5083 	struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
5084 
5085 	/* Get statistics of struct ice_eth_stats */
5086 	ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
5087 			   GLPRT_GORCL(hw->port_info->lport),
5088 			   pf->offset_loaded, &os->eth.rx_bytes,
5089 			   &ns->eth.rx_bytes);
5090 	ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
5091 			   GLPRT_UPRCL(hw->port_info->lport),
5092 			   pf->offset_loaded, &os->eth.rx_unicast,
5093 			   &ns->eth.rx_unicast);
5094 	ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
5095 			   GLPRT_MPRCL(hw->port_info->lport),
5096 			   pf->offset_loaded, &os->eth.rx_multicast,
5097 			   &ns->eth.rx_multicast);
5098 	ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
5099 			   GLPRT_BPRCL(hw->port_info->lport),
5100 			   pf->offset_loaded, &os->eth.rx_broadcast,
5101 			   &ns->eth.rx_broadcast);
5102 	ice_stat_update_32(hw, PRTRPB_RDPC,
5103 			   pf->offset_loaded, &os->eth.rx_discards,
5104 			   &ns->eth.rx_discards);
5105 	/* enlarge the limitation when rx_bytes overflowed */
5106 	if (pf->offset_loaded) {
5107 		if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
5108 			ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5109 		ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
5110 	}
5111 	pf->old_rx_bytes = ns->eth.rx_bytes;
5112 
5113 	/* Workaround: CRC size should not be included in byte statistics,
5114 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
5115 	 * packet.
5116 	 */
5117 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
5118 			     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
5119 
5120 	/* GLPRT_REPC not supported */
5121 	/* GLPRT_RMPC not supported */
5122 	ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
5123 			   pf->offset_loaded,
5124 			   &os->eth.rx_unknown_protocol,
5125 			   &ns->eth.rx_unknown_protocol);
5126 	ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
5127 			   GLPRT_GOTCL(hw->port_info->lport),
5128 			   pf->offset_loaded, &os->eth.tx_bytes,
5129 			   &ns->eth.tx_bytes);
5130 	ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
5131 			   GLPRT_UPTCL(hw->port_info->lport),
5132 			   pf->offset_loaded, &os->eth.tx_unicast,
5133 			   &ns->eth.tx_unicast);
5134 	ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
5135 			   GLPRT_MPTCL(hw->port_info->lport),
5136 			   pf->offset_loaded, &os->eth.tx_multicast,
5137 			   &ns->eth.tx_multicast);
5138 	ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
5139 			   GLPRT_BPTCL(hw->port_info->lport),
5140 			   pf->offset_loaded, &os->eth.tx_broadcast,
5141 			   &ns->eth.tx_broadcast);
5142 	/* enlarge the limitation when tx_bytes overflowed */
5143 	if (pf->offset_loaded) {
5144 		if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
5145 			ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5146 		ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
5147 	}
5148 	pf->old_tx_bytes = ns->eth.tx_bytes;
5149 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
5150 			     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
5151 
5152 	/* GLPRT_TEPC not supported */
5153 
5154 	/* additional port specific stats */
5155 	ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
5156 			   pf->offset_loaded, &os->tx_dropped_link_down,
5157 			   &ns->tx_dropped_link_down);
5158 	ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
5159 			   pf->offset_loaded, &os->crc_errors,
5160 			   &ns->crc_errors);
5161 	ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
5162 			   pf->offset_loaded, &os->illegal_bytes,
5163 			   &ns->illegal_bytes);
5164 	/* GLPRT_ERRBC not supported */
5165 	ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
5166 			   pf->offset_loaded, &os->mac_local_faults,
5167 			   &ns->mac_local_faults);
5168 	ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
5169 			   pf->offset_loaded, &os->mac_remote_faults,
5170 			   &ns->mac_remote_faults);
5171 
5172 	ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
5173 			   pf->offset_loaded, &os->rx_len_errors,
5174 			   &ns->rx_len_errors);
5175 
5176 	ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
5177 			   pf->offset_loaded, &os->link_xon_rx,
5178 			   &ns->link_xon_rx);
5179 	ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
5180 			   pf->offset_loaded, &os->link_xoff_rx,
5181 			   &ns->link_xoff_rx);
5182 	ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5183 			   pf->offset_loaded, &os->link_xon_tx,
5184 			   &ns->link_xon_tx);
5185 	ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5186 			   pf->offset_loaded, &os->link_xoff_tx,
5187 			   &ns->link_xoff_tx);
5188 	ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5189 			   GLPRT_PRC64L(hw->port_info->lport),
5190 			   pf->offset_loaded, &os->rx_size_64,
5191 			   &ns->rx_size_64);
5192 	ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5193 			   GLPRT_PRC127L(hw->port_info->lport),
5194 			   pf->offset_loaded, &os->rx_size_127,
5195 			   &ns->rx_size_127);
5196 	ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5197 			   GLPRT_PRC255L(hw->port_info->lport),
5198 			   pf->offset_loaded, &os->rx_size_255,
5199 			   &ns->rx_size_255);
5200 	ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5201 			   GLPRT_PRC511L(hw->port_info->lport),
5202 			   pf->offset_loaded, &os->rx_size_511,
5203 			   &ns->rx_size_511);
5204 	ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5205 			   GLPRT_PRC1023L(hw->port_info->lport),
5206 			   pf->offset_loaded, &os->rx_size_1023,
5207 			   &ns->rx_size_1023);
5208 	ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5209 			   GLPRT_PRC1522L(hw->port_info->lport),
5210 			   pf->offset_loaded, &os->rx_size_1522,
5211 			   &ns->rx_size_1522);
5212 	ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5213 			   GLPRT_PRC9522L(hw->port_info->lport),
5214 			   pf->offset_loaded, &os->rx_size_big,
5215 			   &ns->rx_size_big);
5216 	ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5217 			   pf->offset_loaded, &os->rx_undersize,
5218 			   &ns->rx_undersize);
5219 	ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5220 			   pf->offset_loaded, &os->rx_fragments,
5221 			   &ns->rx_fragments);
5222 	ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5223 			   pf->offset_loaded, &os->rx_oversize,
5224 			   &ns->rx_oversize);
5225 	ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5226 			   pf->offset_loaded, &os->rx_jabber,
5227 			   &ns->rx_jabber);
5228 	ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5229 			   GLPRT_PTC64L(hw->port_info->lport),
5230 			   pf->offset_loaded, &os->tx_size_64,
5231 			   &ns->tx_size_64);
5232 	ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5233 			   GLPRT_PTC127L(hw->port_info->lport),
5234 			   pf->offset_loaded, &os->tx_size_127,
5235 			   &ns->tx_size_127);
5236 	ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5237 			   GLPRT_PTC255L(hw->port_info->lport),
5238 			   pf->offset_loaded, &os->tx_size_255,
5239 			   &ns->tx_size_255);
5240 	ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5241 			   GLPRT_PTC511L(hw->port_info->lport),
5242 			   pf->offset_loaded, &os->tx_size_511,
5243 			   &ns->tx_size_511);
5244 	ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5245 			   GLPRT_PTC1023L(hw->port_info->lport),
5246 			   pf->offset_loaded, &os->tx_size_1023,
5247 			   &ns->tx_size_1023);
5248 	ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5249 			   GLPRT_PTC1522L(hw->port_info->lport),
5250 			   pf->offset_loaded, &os->tx_size_1522,
5251 			   &ns->tx_size_1522);
5252 	ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5253 			   GLPRT_PTC9522L(hw->port_info->lport),
5254 			   pf->offset_loaded, &os->tx_size_big,
5255 			   &ns->tx_size_big);
5256 
5257 	/* GLPRT_MSPDC not supported */
5258 	/* GLPRT_XEC not supported */
5259 
5260 	pf->offset_loaded = true;
5261 
5262 	if (pf->main_vsi)
5263 		ice_update_vsi_stats(pf->main_vsi);
5264 }
5265 
5266 /* Get all statistics of a port */
5267 static int
5268 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5269 {
5270 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5271 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5272 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5273 
5274 	/* call read registers - updates values, now write them to struct */
5275 	ice_read_stats_registers(pf, hw);
5276 
5277 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5278 			  pf->main_vsi->eth_stats.rx_multicast +
5279 			  pf->main_vsi->eth_stats.rx_broadcast -
5280 			  pf->main_vsi->eth_stats.rx_discards;
5281 	stats->opackets = ns->eth.tx_unicast +
5282 			  ns->eth.tx_multicast +
5283 			  ns->eth.tx_broadcast;
5284 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5285 	stats->obytes   = ns->eth.tx_bytes;
5286 	stats->oerrors  = ns->eth.tx_errors +
5287 			  pf->main_vsi->eth_stats.tx_errors;
5288 
5289 	/* Rx Errors */
5290 	stats->imissed  = ns->eth.rx_discards +
5291 			  pf->main_vsi->eth_stats.rx_discards;
5292 	stats->ierrors  = ns->crc_errors +
5293 			  ns->rx_undersize +
5294 			  ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5295 
5296 	PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5297 	PMD_DRV_LOG(DEBUG, "rx_bytes:	%"PRIu64"", ns->eth.rx_bytes);
5298 	PMD_DRV_LOG(DEBUG, "rx_unicast:	%"PRIu64"", ns->eth.rx_unicast);
5299 	PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5300 	PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5301 	PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5302 	PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5303 		    pf->main_vsi->eth_stats.rx_discards);
5304 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5305 		    ns->eth.rx_unknown_protocol);
5306 	PMD_DRV_LOG(DEBUG, "tx_bytes:	%"PRIu64"", ns->eth.tx_bytes);
5307 	PMD_DRV_LOG(DEBUG, "tx_unicast:	%"PRIu64"", ns->eth.tx_unicast);
5308 	PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5309 	PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5310 	PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5311 	PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5312 		    pf->main_vsi->eth_stats.tx_discards);
5313 	PMD_DRV_LOG(DEBUG, "tx_errors:		%"PRIu64"", ns->eth.tx_errors);
5314 
5315 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:	%"PRIu64"",
5316 		    ns->tx_dropped_link_down);
5317 	PMD_DRV_LOG(DEBUG, "crc_errors:	%"PRIu64"", ns->crc_errors);
5318 	PMD_DRV_LOG(DEBUG, "illegal_bytes:	%"PRIu64"",
5319 		    ns->illegal_bytes);
5320 	PMD_DRV_LOG(DEBUG, "error_bytes:	%"PRIu64"", ns->error_bytes);
5321 	PMD_DRV_LOG(DEBUG, "mac_local_faults:	%"PRIu64"",
5322 		    ns->mac_local_faults);
5323 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:	%"PRIu64"",
5324 		    ns->mac_remote_faults);
5325 	PMD_DRV_LOG(DEBUG, "link_xon_rx:	%"PRIu64"", ns->link_xon_rx);
5326 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:	%"PRIu64"", ns->link_xoff_rx);
5327 	PMD_DRV_LOG(DEBUG, "link_xon_tx:	%"PRIu64"", ns->link_xon_tx);
5328 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:	%"PRIu64"", ns->link_xoff_tx);
5329 	PMD_DRV_LOG(DEBUG, "rx_size_64:		%"PRIu64"", ns->rx_size_64);
5330 	PMD_DRV_LOG(DEBUG, "rx_size_127:	%"PRIu64"", ns->rx_size_127);
5331 	PMD_DRV_LOG(DEBUG, "rx_size_255:	%"PRIu64"", ns->rx_size_255);
5332 	PMD_DRV_LOG(DEBUG, "rx_size_511:	%"PRIu64"", ns->rx_size_511);
5333 	PMD_DRV_LOG(DEBUG, "rx_size_1023:	%"PRIu64"", ns->rx_size_1023);
5334 	PMD_DRV_LOG(DEBUG, "rx_size_1522:	%"PRIu64"", ns->rx_size_1522);
5335 	PMD_DRV_LOG(DEBUG, "rx_size_big:	%"PRIu64"", ns->rx_size_big);
5336 	PMD_DRV_LOG(DEBUG, "rx_undersize:	%"PRIu64"", ns->rx_undersize);
5337 	PMD_DRV_LOG(DEBUG, "rx_fragments:	%"PRIu64"", ns->rx_fragments);
5338 	PMD_DRV_LOG(DEBUG, "rx_oversize:	%"PRIu64"", ns->rx_oversize);
5339 	PMD_DRV_LOG(DEBUG, "rx_jabber:		%"PRIu64"", ns->rx_jabber);
5340 	PMD_DRV_LOG(DEBUG, "tx_size_64:		%"PRIu64"", ns->tx_size_64);
5341 	PMD_DRV_LOG(DEBUG, "tx_size_127:	%"PRIu64"", ns->tx_size_127);
5342 	PMD_DRV_LOG(DEBUG, "tx_size_255:	%"PRIu64"", ns->tx_size_255);
5343 	PMD_DRV_LOG(DEBUG, "tx_size_511:	%"PRIu64"", ns->tx_size_511);
5344 	PMD_DRV_LOG(DEBUG, "tx_size_1023:	%"PRIu64"", ns->tx_size_1023);
5345 	PMD_DRV_LOG(DEBUG, "tx_size_1522:	%"PRIu64"", ns->tx_size_1522);
5346 	PMD_DRV_LOG(DEBUG, "tx_size_big:	%"PRIu64"", ns->tx_size_big);
5347 	PMD_DRV_LOG(DEBUG, "rx_len_errors:	%"PRIu64"", ns->rx_len_errors);
5348 	PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5349 	return 0;
5350 }
5351 
5352 /* Reset the statistics */
5353 static int
5354 ice_stats_reset(struct rte_eth_dev *dev)
5355 {
5356 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5357 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5358 
5359 	/* Mark PF and VSI stats to update the offset, aka "reset" */
5360 	pf->offset_loaded = false;
5361 	if (pf->main_vsi)
5362 		pf->main_vsi->offset_loaded = false;
5363 
5364 	/* read the stats, reading current register values into offset */
5365 	ice_read_stats_registers(pf, hw);
5366 
5367 	return 0;
5368 }
5369 
5370 static uint32_t
5371 ice_xstats_calc_num(void)
5372 {
5373 	uint32_t num;
5374 
5375 	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5376 
5377 	return num;
5378 }
5379 
5380 static int
5381 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5382 	       unsigned int n)
5383 {
5384 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5385 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5386 	unsigned int i;
5387 	unsigned int count;
5388 	struct ice_hw_port_stats *hw_stats = &pf->stats;
5389 
5390 	count = ice_xstats_calc_num();
5391 	if (n < count)
5392 		return count;
5393 
5394 	ice_read_stats_registers(pf, hw);
5395 
5396 	if (!xstats)
5397 		return 0;
5398 
5399 	count = 0;
5400 
5401 	/* Get stats from ice_eth_stats struct */
5402 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5403 		xstats[count].value =
5404 			*(uint64_t *)((char *)&hw_stats->eth +
5405 				      ice_stats_strings[i].offset);
5406 		xstats[count].id = count;
5407 		count++;
5408 	}
5409 
5410 	/* Get individiual stats from ice_hw_port struct */
5411 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5412 		xstats[count].value =
5413 			*(uint64_t *)((char *)hw_stats +
5414 				      ice_hw_port_strings[i].offset);
5415 		xstats[count].id = count;
5416 		count++;
5417 	}
5418 
5419 	return count;
5420 }
5421 
5422 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5423 				struct rte_eth_xstat_name *xstats_names,
5424 				__rte_unused unsigned int limit)
5425 {
5426 	unsigned int count = 0;
5427 	unsigned int i;
5428 
5429 	if (!xstats_names)
5430 		return ice_xstats_calc_num();
5431 
5432 	/* Note: limit checked in rte_eth_xstats_names() */
5433 
5434 	/* Get stats from ice_eth_stats struct */
5435 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5436 		strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5437 			sizeof(xstats_names[count].name));
5438 		count++;
5439 	}
5440 
5441 	/* Get individiual stats from ice_hw_port struct */
5442 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5443 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5444 			sizeof(xstats_names[count].name));
5445 		count++;
5446 	}
5447 
5448 	return count;
5449 }
5450 
5451 static int
5452 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5453 		     const struct rte_flow_ops **ops)
5454 {
5455 	if (!dev)
5456 		return -EINVAL;
5457 
5458 	*ops = &ice_flow_ops;
5459 	return 0;
5460 }
5461 
5462 /* Add UDP tunneling port */
5463 static int
5464 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5465 			     struct rte_eth_udp_tunnel *udp_tunnel)
5466 {
5467 	int ret = 0;
5468 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5469 
5470 	if (udp_tunnel == NULL)
5471 		return -EINVAL;
5472 
5473 	switch (udp_tunnel->prot_type) {
5474 	case RTE_TUNNEL_TYPE_VXLAN:
5475 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5476 		break;
5477 	default:
5478 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5479 		ret = -EINVAL;
5480 		break;
5481 	}
5482 
5483 	return ret;
5484 }
5485 
5486 /* Delete UDP tunneling port */
5487 static int
5488 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5489 			     struct rte_eth_udp_tunnel *udp_tunnel)
5490 {
5491 	int ret = 0;
5492 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5493 
5494 	if (udp_tunnel == NULL)
5495 		return -EINVAL;
5496 
5497 	switch (udp_tunnel->prot_type) {
5498 	case RTE_TUNNEL_TYPE_VXLAN:
5499 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5500 		break;
5501 	default:
5502 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5503 		ret = -EINVAL;
5504 		break;
5505 	}
5506 
5507 	return ret;
5508 }
5509 
5510 static int
5511 ice_timesync_enable(struct rte_eth_dev *dev)
5512 {
5513 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5514 	struct ice_adapter *ad =
5515 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5516 	int ret;
5517 
5518 	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
5519 	    DEV_RX_OFFLOAD_TIMESTAMP)) {
5520 		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
5521 		return -1;
5522 	}
5523 
5524 	if (hw->func_caps.ts_func_info.src_tmr_owned) {
5525 		ret = ice_ptp_init_phc(hw);
5526 		if (ret) {
5527 			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
5528 			return -1;
5529 		}
5530 
5531 		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
5532 		if (ret) {
5533 			PMD_DRV_LOG(ERR,
5534 				"Failed to write PHC increment time value");
5535 			return -1;
5536 		}
5537 	}
5538 
5539 	/* Initialize cycle counters for system time/RX/TX timestamp */
5540 	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
5541 	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5542 	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5543 
5544 	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5545 	ad->systime_tc.cc_shift = 0;
5546 	ad->systime_tc.nsec_mask = 0;
5547 
5548 	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5549 	ad->rx_tstamp_tc.cc_shift = 0;
5550 	ad->rx_tstamp_tc.nsec_mask = 0;
5551 
5552 	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5553 	ad->tx_tstamp_tc.cc_shift = 0;
5554 	ad->tx_tstamp_tc.nsec_mask = 0;
5555 
5556 	ad->ptp_ena = 1;
5557 
5558 	return 0;
5559 }
5560 
5561 static int
5562 ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5563 			       struct timespec *timestamp, uint32_t flags)
5564 {
5565 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5566 	struct ice_adapter *ad =
5567 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5568 	struct ice_rx_queue *rxq;
5569 	uint32_t ts_high;
5570 	uint64_t ts_ns, ns;
5571 
5572 	rxq = dev->data->rx_queues[flags];
5573 
5574 	ts_high = rxq->time_high;
5575 	ts_ns = ice_tstamp_convert_32b_64b(hw, ts_high);
5576 	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
5577 	*timestamp = rte_ns_to_timespec(ns);
5578 
5579 	return 0;
5580 }
5581 
5582 static int
5583 ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5584 			       struct timespec *timestamp)
5585 {
5586 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5587 	struct ice_adapter *ad =
5588 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5589 	uint8_t lport;
5590 	uint64_t ts_ns, ns, tstamp;
5591 	const uint64_t mask = 0xFFFFFFFF;
5592 	int ret;
5593 
5594 	lport = hw->port_info->lport;
5595 
5596 	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
5597 	if (ret) {
5598 		PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
5599 		return -1;
5600 	}
5601 
5602 	ts_ns = ice_tstamp_convert_32b_64b(hw, (tstamp >> 8) & mask);
5603 	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
5604 	*timestamp = rte_ns_to_timespec(ns);
5605 
5606 	return 0;
5607 }
5608 
5609 static int
5610 ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5611 {
5612 	struct ice_adapter *ad =
5613 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5614 
5615 	ad->systime_tc.nsec += delta;
5616 	ad->rx_tstamp_tc.nsec += delta;
5617 	ad->tx_tstamp_tc.nsec += delta;
5618 
5619 	return 0;
5620 }
5621 
5622 static int
5623 ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5624 {
5625 	struct ice_adapter *ad =
5626 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5627 	uint64_t ns;
5628 
5629 	ns = rte_timespec_to_ns(ts);
5630 
5631 	ad->systime_tc.nsec = ns;
5632 	ad->rx_tstamp_tc.nsec = ns;
5633 	ad->tx_tstamp_tc.nsec = ns;
5634 
5635 	return 0;
5636 }
5637 
5638 static int
5639 ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5640 {
5641 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5642 	struct ice_adapter *ad =
5643 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5644 	uint32_t hi, lo, lo2;
5645 	uint64_t time, ns;
5646 
5647 	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5648 	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5649 	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5650 
5651 	if (lo2 < lo) {
5652 		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5653 		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5654 	}
5655 
5656 	time = ((uint64_t)hi << 32) | lo;
5657 	ns = rte_timecounter_update(&ad->systime_tc, time);
5658 	*ts = rte_ns_to_timespec(ns);
5659 
5660 	return 0;
5661 }
5662 
5663 static int
5664 ice_timesync_disable(struct rte_eth_dev *dev)
5665 {
5666 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5667 	struct ice_adapter *ad =
5668 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5669 	uint64_t val;
5670 	uint8_t lport;
5671 
5672 	lport = hw->port_info->lport;
5673 
5674 	ice_clear_phy_tstamp(hw, lport, 0);
5675 
5676 	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
5677 	val &= ~GLTSYN_ENA_TSYN_ENA_M;
5678 	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
5679 
5680 	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
5681 	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
5682 
5683 	ad->ptp_ena = 0;
5684 
5685 	return 0;
5686 }
5687 
5688 static int
5689 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5690 	      struct rte_pci_device *pci_dev)
5691 {
5692 	return rte_eth_dev_pci_generic_probe(pci_dev,
5693 					     sizeof(struct ice_adapter),
5694 					     ice_dev_init);
5695 }
5696 
5697 static int
5698 ice_pci_remove(struct rte_pci_device *pci_dev)
5699 {
5700 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5701 }
5702 
5703 static struct rte_pci_driver rte_ice_pmd = {
5704 	.id_table = pci_id_ice_map,
5705 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5706 	.probe = ice_pci_probe,
5707 	.remove = ice_pci_remove,
5708 };
5709 
5710 /**
5711  * Driver initialization routine.
5712  * Invoked once at EAL init time.
5713  * Register itself as the [Poll Mode] Driver of PCI devices.
5714  */
5715 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5716 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5717 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5718 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5719 			      ICE_HW_DEBUG_MASK_ARG "=0xXXX"
5720 			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5721 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5722 			      ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
5723 			      ICE_RX_LOW_LATENCY_ARG "=<0|1>");
5724 
5725 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5726 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5727 #ifdef RTE_ETHDEV_DEBUG_RX
5728 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5729 #endif
5730 #ifdef RTE_ETHDEV_DEBUG_TX
5731 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);
5732 #endif
5733