xref: /dpdk/drivers/net/ice/ice_ethdev.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 
8 #include <stdio.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <unistd.h>
12 
13 #include <rte_tailq.h>
14 
15 #include "eal_firmware.h"
16 
17 #include "base/ice_sched.h"
18 #include "base/ice_flow.h"
19 #include "base/ice_dcb.h"
20 #include "base/ice_common.h"
21 #include "base/ice_ptp_hw.h"
22 
23 #include "rte_pmd_ice.h"
24 #include "ice_ethdev.h"
25 #include "ice_rxtx.h"
26 #include "ice_generic_flow.h"
27 
28 /* devargs */
29 #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
30 #define ICE_PIPELINE_MODE_SUPPORT_ARG  "pipeline-mode-support"
31 #define ICE_PROTO_XTR_ARG         "proto_xtr"
32 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
33 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
34 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
35 
36 #define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
37 
38 uint64_t ice_timestamp_dynflag;
39 int ice_timestamp_dynfield_offset = -1;
40 
41 static const char * const ice_valid_args[] = {
42 	ICE_SAFE_MODE_SUPPORT_ARG,
43 	ICE_PIPELINE_MODE_SUPPORT_ARG,
44 	ICE_PROTO_XTR_ARG,
45 	ICE_HW_DEBUG_MASK_ARG,
46 	ICE_ONE_PPS_OUT_ARG,
47 	ICE_RX_LOW_LATENCY_ARG,
48 	NULL
49 };
50 
51 #define PPS_OUT_DELAY_NS  1
52 
53 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
54 	.name = "intel_pmd_dynfield_proto_xtr_metadata",
55 	.size = sizeof(uint32_t),
56 	.align = __alignof__(uint32_t),
57 	.flags = 0,
58 };
59 
60 struct proto_xtr_ol_flag {
61 	const struct rte_mbuf_dynflag param;
62 	uint64_t *ol_flag;
63 	bool required;
64 };
65 
66 static bool ice_proto_xtr_hw_support[PROTO_XTR_MAX];
67 
68 static struct proto_xtr_ol_flag ice_proto_xtr_ol_flag_params[] = {
69 	[PROTO_XTR_VLAN] = {
70 		.param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
71 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_vlan_mask },
72 	[PROTO_XTR_IPV4] = {
73 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
74 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv4_mask },
75 	[PROTO_XTR_IPV6] = {
76 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
77 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_mask },
78 	[PROTO_XTR_IPV6_FLOW] = {
79 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
80 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask },
81 	[PROTO_XTR_TCP] = {
82 		.param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
83 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_tcp_mask },
84 	[PROTO_XTR_IP_OFFSET] = {
85 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
86 		.ol_flag = &rte_net_ice_dynflag_proto_xtr_ip_offset_mask },
87 };
88 
89 #define ICE_OS_DEFAULT_PKG_NAME		"ICE OS Default Package"
90 #define ICE_COMMS_PKG_NAME			"ICE COMMS Package"
91 #define ICE_MAX_RES_DESC_NUM        1024
92 
93 static int ice_dev_configure(struct rte_eth_dev *dev);
94 static int ice_dev_start(struct rte_eth_dev *dev);
95 static int ice_dev_stop(struct rte_eth_dev *dev);
96 static int ice_dev_close(struct rte_eth_dev *dev);
97 static int ice_dev_reset(struct rte_eth_dev *dev);
98 static int ice_dev_info_get(struct rte_eth_dev *dev,
99 			    struct rte_eth_dev_info *dev_info);
100 static int ice_link_update(struct rte_eth_dev *dev,
101 			   int wait_to_complete);
102 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
103 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
104 
105 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
106 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
107 static int ice_rss_reta_update(struct rte_eth_dev *dev,
108 			       struct rte_eth_rss_reta_entry64 *reta_conf,
109 			       uint16_t reta_size);
110 static int ice_rss_reta_query(struct rte_eth_dev *dev,
111 			      struct rte_eth_rss_reta_entry64 *reta_conf,
112 			      uint16_t reta_size);
113 static int ice_rss_hash_update(struct rte_eth_dev *dev,
114 			       struct rte_eth_rss_conf *rss_conf);
115 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				 struct rte_eth_rss_conf *rss_conf);
117 static int ice_promisc_enable(struct rte_eth_dev *dev);
118 static int ice_promisc_disable(struct rte_eth_dev *dev);
119 static int ice_allmulti_enable(struct rte_eth_dev *dev);
120 static int ice_allmulti_disable(struct rte_eth_dev *dev);
121 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
122 			       uint16_t vlan_id,
123 			       int on);
124 static int ice_macaddr_set(struct rte_eth_dev *dev,
125 			   struct rte_ether_addr *mac_addr);
126 static int ice_macaddr_add(struct rte_eth_dev *dev,
127 			   struct rte_ether_addr *mac_addr,
128 			   __rte_unused uint32_t index,
129 			   uint32_t pool);
130 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
131 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
132 				    uint16_t queue_id);
133 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
134 				     uint16_t queue_id);
135 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
136 			      size_t fw_size);
137 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
138 			     uint16_t pvid, int on);
139 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
140 static int ice_get_eeprom(struct rte_eth_dev *dev,
141 			  struct rte_dev_eeprom_info *eeprom);
142 static int ice_get_module_info(struct rte_eth_dev *dev,
143 			       struct rte_eth_dev_module_info *modinfo);
144 static int ice_get_module_eeprom(struct rte_eth_dev *dev,
145 				 struct rte_dev_eeprom_info *info);
146 static int ice_stats_get(struct rte_eth_dev *dev,
147 			 struct rte_eth_stats *stats);
148 static int ice_stats_reset(struct rte_eth_dev *dev);
149 static int ice_xstats_get(struct rte_eth_dev *dev,
150 			  struct rte_eth_xstat *xstats, unsigned int n);
151 static int ice_xstats_get_names(struct rte_eth_dev *dev,
152 				struct rte_eth_xstat_name *xstats_names,
153 				unsigned int limit);
154 static int ice_dev_flow_ops_get(struct rte_eth_dev *dev,
155 				const struct rte_flow_ops **ops);
156 static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
157 			struct rte_eth_udp_tunnel *udp_tunnel);
158 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
159 			struct rte_eth_udp_tunnel *udp_tunnel);
160 static int ice_timesync_enable(struct rte_eth_dev *dev);
161 static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
162 					  struct timespec *timestamp,
163 					  uint32_t flags);
164 static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
165 					  struct timespec *timestamp);
166 static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
167 static int ice_timesync_read_time(struct rte_eth_dev *dev,
168 				  struct timespec *timestamp);
169 static int ice_timesync_write_time(struct rte_eth_dev *dev,
170 				   const struct timespec *timestamp);
171 static int ice_timesync_disable(struct rte_eth_dev *dev);
172 
173 static const struct rte_pci_id pci_id_ice_map[] = {
174 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
175 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
176 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
177 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
178 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
179 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
180 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
181 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
182 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE) },
183 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP) },
184 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP) },
185 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE) },
186 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP) },
187 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
188 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
189 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
190 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
191 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
192 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
193 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
194 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
195 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
196 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
197 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
198 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
199 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E824S) },
200 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE) },
201 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP) },
202 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP) },
203 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_1GBE) },
204 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825X) },
205 	{ .vendor_id = 0, /* sentinel */ },
206 };
207 
208 static const struct eth_dev_ops ice_eth_dev_ops = {
209 	.dev_configure                = ice_dev_configure,
210 	.dev_start                    = ice_dev_start,
211 	.dev_stop                     = ice_dev_stop,
212 	.dev_close                    = ice_dev_close,
213 	.dev_reset                    = ice_dev_reset,
214 	.dev_set_link_up              = ice_dev_set_link_up,
215 	.dev_set_link_down            = ice_dev_set_link_down,
216 	.rx_queue_start               = ice_rx_queue_start,
217 	.rx_queue_stop                = ice_rx_queue_stop,
218 	.tx_queue_start               = ice_tx_queue_start,
219 	.tx_queue_stop                = ice_tx_queue_stop,
220 	.rx_queue_setup               = ice_rx_queue_setup,
221 	.rx_queue_release             = ice_dev_rx_queue_release,
222 	.tx_queue_setup               = ice_tx_queue_setup,
223 	.tx_queue_release             = ice_dev_tx_queue_release,
224 	.dev_infos_get                = ice_dev_info_get,
225 	.dev_supported_ptypes_get     = ice_dev_supported_ptypes_get,
226 	.link_update                  = ice_link_update,
227 	.mtu_set                      = ice_mtu_set,
228 	.mac_addr_set                 = ice_macaddr_set,
229 	.mac_addr_add                 = ice_macaddr_add,
230 	.mac_addr_remove              = ice_macaddr_remove,
231 	.vlan_filter_set              = ice_vlan_filter_set,
232 	.vlan_offload_set             = ice_vlan_offload_set,
233 	.reta_update                  = ice_rss_reta_update,
234 	.reta_query                   = ice_rss_reta_query,
235 	.rss_hash_update              = ice_rss_hash_update,
236 	.rss_hash_conf_get            = ice_rss_hash_conf_get,
237 	.promiscuous_enable           = ice_promisc_enable,
238 	.promiscuous_disable          = ice_promisc_disable,
239 	.allmulticast_enable          = ice_allmulti_enable,
240 	.allmulticast_disable         = ice_allmulti_disable,
241 	.rx_queue_intr_enable         = ice_rx_queue_intr_enable,
242 	.rx_queue_intr_disable        = ice_rx_queue_intr_disable,
243 	.fw_version_get               = ice_fw_version_get,
244 	.vlan_pvid_set                = ice_vlan_pvid_set,
245 	.rxq_info_get                 = ice_rxq_info_get,
246 	.txq_info_get                 = ice_txq_info_get,
247 	.rx_burst_mode_get            = ice_rx_burst_mode_get,
248 	.tx_burst_mode_get            = ice_tx_burst_mode_get,
249 	.get_eeprom_length            = ice_get_eeprom_length,
250 	.get_eeprom                   = ice_get_eeprom,
251 	.get_module_info              = ice_get_module_info,
252 	.get_module_eeprom            = ice_get_module_eeprom,
253 	.stats_get                    = ice_stats_get,
254 	.stats_reset                  = ice_stats_reset,
255 	.xstats_get                   = ice_xstats_get,
256 	.xstats_get_names             = ice_xstats_get_names,
257 	.xstats_reset                 = ice_stats_reset,
258 	.flow_ops_get                 = ice_dev_flow_ops_get,
259 	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
260 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
261 	.tx_done_cleanup              = ice_tx_done_cleanup,
262 	.get_monitor_addr             = ice_get_monitor_addr,
263 	.timesync_enable              = ice_timesync_enable,
264 	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
265 	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
266 	.timesync_adjust_time         = ice_timesync_adjust_time,
267 	.timesync_read_time           = ice_timesync_read_time,
268 	.timesync_write_time          = ice_timesync_write_time,
269 	.timesync_disable             = ice_timesync_disable,
270 };
271 
272 /* store statistics names and its offset in stats structure */
273 struct ice_xstats_name_off {
274 	char name[RTE_ETH_XSTATS_NAME_SIZE];
275 	unsigned int offset;
276 };
277 
278 static const struct ice_xstats_name_off ice_stats_strings[] = {
279 	{"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
280 	{"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
281 	{"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
282 	{"rx_dropped_packets", offsetof(struct ice_eth_stats, rx_discards)},
283 	{"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
284 		rx_unknown_protocol)},
285 	{"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
286 	{"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
287 	{"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
288 	{"tx_dropped_packets", offsetof(struct ice_eth_stats, tx_discards)},
289 };
290 
291 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
292 		sizeof(ice_stats_strings[0]))
293 
294 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
295 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
296 		tx_dropped_link_down)},
297 	{"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
298 	{"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
299 		illegal_bytes)},
300 	{"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
301 	{"mac_local_errors", offsetof(struct ice_hw_port_stats,
302 		mac_local_faults)},
303 	{"mac_remote_errors", offsetof(struct ice_hw_port_stats,
304 		mac_remote_faults)},
305 	{"rx_len_errors", offsetof(struct ice_hw_port_stats,
306 		rx_len_errors)},
307 	{"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
308 	{"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
309 	{"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
310 	{"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
311 	{"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
312 	{"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
313 		rx_size_127)},
314 	{"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
315 		rx_size_255)},
316 	{"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
317 		rx_size_511)},
318 	{"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
319 		rx_size_1023)},
320 	{"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
321 		rx_size_1522)},
322 	{"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
323 		rx_size_big)},
324 	{"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
325 		rx_undersize)},
326 	{"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
327 		rx_oversize)},
328 	{"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
329 		mac_short_pkt_dropped)},
330 	{"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
331 		rx_fragments)},
332 	{"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
333 	{"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
334 	{"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
335 		tx_size_127)},
336 	{"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
337 		tx_size_255)},
338 	{"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
339 		tx_size_511)},
340 	{"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
341 		tx_size_1023)},
342 	{"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
343 		tx_size_1522)},
344 	{"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
345 		tx_size_big)},
346 };
347 
348 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
349 		sizeof(ice_hw_port_strings[0]))
350 
351 static void
352 ice_init_controlq_parameter(struct ice_hw *hw)
353 {
354 	/* fields for adminq */
355 	hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
356 	hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
357 	hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
358 	hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
359 
360 	/* fields for mailboxq, DPDK used as PF host */
361 	hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
362 	hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
363 	hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
364 	hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
365 
366 	/* fields for sideband queue */
367 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
368 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
369 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
370 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
371 
372 }
373 
374 static int
375 lookup_proto_xtr_type(const char *xtr_name)
376 {
377 	static struct {
378 		const char *name;
379 		enum proto_xtr_type type;
380 	} xtr_type_map[] = {
381 		{ "vlan",      PROTO_XTR_VLAN      },
382 		{ "ipv4",      PROTO_XTR_IPV4      },
383 		{ "ipv6",      PROTO_XTR_IPV6      },
384 		{ "ipv6_flow", PROTO_XTR_IPV6_FLOW },
385 		{ "tcp",       PROTO_XTR_TCP       },
386 		{ "ip_offset", PROTO_XTR_IP_OFFSET },
387 	};
388 	uint32_t i;
389 
390 	for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
391 		if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
392 			return xtr_type_map[i].type;
393 	}
394 
395 	return -1;
396 }
397 
398 /*
399  * Parse elem, the elem could be single number/range or '(' ')' group
400  * 1) A single number elem, it's just a simple digit. e.g. 9
401  * 2) A single range elem, two digits with a '-' between. e.g. 2-6
402  * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
403  *    Within group elem, '-' used for a range separator;
404  *                       ',' used for a single number.
405  */
406 static int
407 parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
408 {
409 	const char *str = input;
410 	char *end = NULL;
411 	uint32_t min, max;
412 	uint32_t idx;
413 
414 	while (isblank(*str))
415 		str++;
416 
417 	if (!isdigit(*str) && *str != '(')
418 		return -1;
419 
420 	/* process single number or single range of number */
421 	if (*str != '(') {
422 		errno = 0;
423 		idx = strtoul(str, &end, 10);
424 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
425 			return -1;
426 
427 		while (isblank(*end))
428 			end++;
429 
430 		min = idx;
431 		max = idx;
432 
433 		/* process single <number>-<number> */
434 		if (*end == '-') {
435 			end++;
436 			while (isblank(*end))
437 				end++;
438 			if (!isdigit(*end))
439 				return -1;
440 
441 			errno = 0;
442 			idx = strtoul(end, &end, 10);
443 			if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
444 				return -1;
445 
446 			max = idx;
447 			while (isblank(*end))
448 				end++;
449 		}
450 
451 		if (*end != ':')
452 			return -1;
453 
454 		for (idx = RTE_MIN(min, max);
455 		     idx <= RTE_MAX(min, max); idx++)
456 			devargs->proto_xtr[idx] = xtr_type;
457 
458 		return 0;
459 	}
460 
461 	/* process set within bracket */
462 	str++;
463 	while (isblank(*str))
464 		str++;
465 	if (*str == '\0')
466 		return -1;
467 
468 	min = ICE_MAX_QUEUE_NUM;
469 	do {
470 		/* go ahead to the first digit */
471 		while (isblank(*str))
472 			str++;
473 		if (!isdigit(*str))
474 			return -1;
475 
476 		/* get the digit value */
477 		errno = 0;
478 		idx = strtoul(str, &end, 10);
479 		if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
480 			return -1;
481 
482 		/* go ahead to separator '-',',' and ')' */
483 		while (isblank(*end))
484 			end++;
485 		if (*end == '-') {
486 			if (min == ICE_MAX_QUEUE_NUM)
487 				min = idx;
488 			else /* avoid continuous '-' */
489 				return -1;
490 		} else if (*end == ',' || *end == ')') {
491 			max = idx;
492 			if (min == ICE_MAX_QUEUE_NUM)
493 				min = idx;
494 
495 			for (idx = RTE_MIN(min, max);
496 			     idx <= RTE_MAX(min, max); idx++)
497 				devargs->proto_xtr[idx] = xtr_type;
498 
499 			min = ICE_MAX_QUEUE_NUM;
500 		} else {
501 			return -1;
502 		}
503 
504 		str = end + 1;
505 	} while (*end != ')' && *end != '\0');
506 
507 	return 0;
508 }
509 
510 static int
511 parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
512 {
513 	const char *queue_start;
514 	uint32_t idx;
515 	int xtr_type;
516 	char xtr_name[32];
517 
518 	while (isblank(*queues))
519 		queues++;
520 
521 	if (*queues != '[') {
522 		xtr_type = lookup_proto_xtr_type(queues);
523 		if (xtr_type < 0)
524 			return -1;
525 
526 		devargs->proto_xtr_dflt = xtr_type;
527 
528 		return 0;
529 	}
530 
531 	queues++;
532 	do {
533 		while (isblank(*queues))
534 			queues++;
535 		if (*queues == '\0')
536 			return -1;
537 
538 		queue_start = queues;
539 
540 		/* go across a complete bracket */
541 		if (*queue_start == '(') {
542 			queues += strcspn(queues, ")");
543 			if (*queues != ')')
544 				return -1;
545 		}
546 
547 		/* scan the separator ':' */
548 		queues += strcspn(queues, ":");
549 		if (*queues++ != ':')
550 			return -1;
551 		while (isblank(*queues))
552 			queues++;
553 
554 		for (idx = 0; ; idx++) {
555 			if (isblank(queues[idx]) ||
556 			    queues[idx] == ',' ||
557 			    queues[idx] == ']' ||
558 			    queues[idx] == '\0')
559 				break;
560 
561 			if (idx > sizeof(xtr_name) - 2)
562 				return -1;
563 
564 			xtr_name[idx] = queues[idx];
565 		}
566 		xtr_name[idx] = '\0';
567 		xtr_type = lookup_proto_xtr_type(xtr_name);
568 		if (xtr_type < 0)
569 			return -1;
570 
571 		queues += idx;
572 
573 		while (isblank(*queues) || *queues == ',' || *queues == ']')
574 			queues++;
575 
576 		if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
577 			return -1;
578 	} while (*queues != '\0');
579 
580 	return 0;
581 }
582 
583 static int
584 handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
585 		     void *extra_args)
586 {
587 	struct ice_devargs *devargs = extra_args;
588 
589 	if (value == NULL || extra_args == NULL)
590 		return -EINVAL;
591 
592 	if (parse_queue_proto_xtr(value, devargs) < 0) {
593 		PMD_DRV_LOG(ERR,
594 			    "The protocol extraction parameter is wrong : '%s'",
595 			    value);
596 		return -1;
597 	}
598 
599 	return 0;
600 }
601 
602 static void
603 ice_check_proto_xtr_support(struct ice_hw *hw)
604 {
605 #define FLX_REG(val, fld, idx) \
606 	(((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
607 	 GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
608 	static struct {
609 		uint32_t rxdid;
610 		uint8_t opcode;
611 		uint8_t protid_0;
612 		uint8_t protid_1;
613 	} xtr_sets[] = {
614 		[PROTO_XTR_VLAN] = { ICE_RXDID_COMMS_AUX_VLAN,
615 				     ICE_RX_OPC_EXTRACT,
616 				     ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O},
617 		[PROTO_XTR_IPV4] = { ICE_RXDID_COMMS_AUX_IPV4,
618 				     ICE_RX_OPC_EXTRACT,
619 				     ICE_PROT_IPV4_OF_OR_S,
620 				     ICE_PROT_IPV4_OF_OR_S },
621 		[PROTO_XTR_IPV6] = { ICE_RXDID_COMMS_AUX_IPV6,
622 				     ICE_RX_OPC_EXTRACT,
623 				     ICE_PROT_IPV6_OF_OR_S,
624 				     ICE_PROT_IPV6_OF_OR_S },
625 		[PROTO_XTR_IPV6_FLOW] = { ICE_RXDID_COMMS_AUX_IPV6_FLOW,
626 					  ICE_RX_OPC_EXTRACT,
627 					  ICE_PROT_IPV6_OF_OR_S,
628 					  ICE_PROT_IPV6_OF_OR_S },
629 		[PROTO_XTR_TCP] = { ICE_RXDID_COMMS_AUX_TCP,
630 				    ICE_RX_OPC_EXTRACT,
631 				    ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
632 		[PROTO_XTR_IP_OFFSET] = { ICE_RXDID_COMMS_AUX_IP_OFFSET,
633 					  ICE_RX_OPC_PROTID,
634 					  ICE_PROT_IPV4_OF_OR_S,
635 					  ICE_PROT_IPV6_OF_OR_S },
636 	};
637 	uint32_t i;
638 
639 	for (i = 0; i < RTE_DIM(xtr_sets); i++) {
640 		uint32_t rxdid = xtr_sets[i].rxdid;
641 		uint32_t v;
642 
643 		if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
644 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
645 
646 			if (FLX_REG(v, PROT_MDID, 4) == xtr_sets[i].protid_0 &&
647 			    FLX_REG(v, RXDID_OPCODE, 4) == xtr_sets[i].opcode)
648 				ice_proto_xtr_hw_support[i] = true;
649 		}
650 
651 		if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
652 			v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
653 
654 			if (FLX_REG(v, PROT_MDID, 5) == xtr_sets[i].protid_1 &&
655 			    FLX_REG(v, RXDID_OPCODE, 5) == xtr_sets[i].opcode)
656 				ice_proto_xtr_hw_support[i] = true;
657 		}
658 	}
659 }
660 
661 static int
662 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
663 		  uint32_t num)
664 {
665 	struct pool_entry *entry;
666 
667 	if (!pool || !num)
668 		return -EINVAL;
669 
670 	entry = rte_zmalloc(NULL, sizeof(*entry), 0);
671 	if (!entry) {
672 		PMD_INIT_LOG(ERR,
673 			     "Failed to allocate memory for resource pool");
674 		return -ENOMEM;
675 	}
676 
677 	/* queue heap initialize */
678 	pool->num_free = num;
679 	pool->num_alloc = 0;
680 	pool->base = base;
681 	LIST_INIT(&pool->alloc_list);
682 	LIST_INIT(&pool->free_list);
683 
684 	/* Initialize element  */
685 	entry->base = 0;
686 	entry->len = num;
687 
688 	LIST_INSERT_HEAD(&pool->free_list, entry, next);
689 	return 0;
690 }
691 
692 static int
693 ice_res_pool_alloc(struct ice_res_pool_info *pool,
694 		   uint16_t num)
695 {
696 	struct pool_entry *entry, *valid_entry;
697 
698 	if (!pool || !num) {
699 		PMD_INIT_LOG(ERR, "Invalid parameter");
700 		return -EINVAL;
701 	}
702 
703 	if (pool->num_free < num) {
704 		PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
705 			     num, pool->num_free);
706 		return -ENOMEM;
707 	}
708 
709 	valid_entry = NULL;
710 	/* Lookup  in free list and find most fit one */
711 	LIST_FOREACH(entry, &pool->free_list, next) {
712 		if (entry->len >= num) {
713 			/* Find best one */
714 			if (entry->len == num) {
715 				valid_entry = entry;
716 				break;
717 			}
718 			if (!valid_entry ||
719 			    valid_entry->len > entry->len)
720 				valid_entry = entry;
721 		}
722 	}
723 
724 	/* Not find one to satisfy the request, return */
725 	if (!valid_entry) {
726 		PMD_INIT_LOG(ERR, "No valid entry found");
727 		return -ENOMEM;
728 	}
729 	/**
730 	 * The entry have equal queue number as requested,
731 	 * remove it from alloc_list.
732 	 */
733 	if (valid_entry->len == num) {
734 		LIST_REMOVE(valid_entry, next);
735 	} else {
736 		/**
737 		 * The entry have more numbers than requested,
738 		 * create a new entry for alloc_list and minus its
739 		 * queue base and number in free_list.
740 		 */
741 		entry = rte_zmalloc(NULL, sizeof(*entry), 0);
742 		if (!entry) {
743 			PMD_INIT_LOG(ERR,
744 				     "Failed to allocate memory for "
745 				     "resource pool");
746 			return -ENOMEM;
747 		}
748 		entry->base = valid_entry->base;
749 		entry->len = num;
750 		valid_entry->base += num;
751 		valid_entry->len -= num;
752 		valid_entry = entry;
753 	}
754 
755 	/* Insert it into alloc list, not sorted */
756 	LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
757 
758 	pool->num_free -= valid_entry->len;
759 	pool->num_alloc += valid_entry->len;
760 
761 	return valid_entry->base + pool->base;
762 }
763 
764 static void
765 ice_res_pool_destroy(struct ice_res_pool_info *pool)
766 {
767 	struct pool_entry *entry, *next_entry;
768 
769 	if (!pool)
770 		return;
771 
772 	for (entry = LIST_FIRST(&pool->alloc_list);
773 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
774 	     entry = next_entry) {
775 		LIST_REMOVE(entry, next);
776 		rte_free(entry);
777 	}
778 
779 	for (entry = LIST_FIRST(&pool->free_list);
780 	     entry && (next_entry = LIST_NEXT(entry, next), 1);
781 	     entry = next_entry) {
782 		LIST_REMOVE(entry, next);
783 		rte_free(entry);
784 	}
785 
786 	pool->num_free = 0;
787 	pool->num_alloc = 0;
788 	pool->base = 0;
789 	LIST_INIT(&pool->alloc_list);
790 	LIST_INIT(&pool->free_list);
791 }
792 
793 static void
794 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
795 {
796 	/* Set VSI LUT selection */
797 	info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
798 			  ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
799 	/* Set Hash scheme */
800 	info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
801 			   ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
802 	/* enable TC */
803 	info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
804 }
805 
806 static enum ice_status
807 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
808 				struct ice_aqc_vsi_props *info,
809 				uint8_t enabled_tcmap)
810 {
811 	uint16_t bsf, qp_idx;
812 
813 	/* default tc 0 now. Multi-TC supporting need to be done later.
814 	 * Configure TC and queue mapping parameters, for enabled TC,
815 	 * allocate qpnum_per_tc queues to this traffic.
816 	 */
817 	if (enabled_tcmap != 0x01) {
818 		PMD_INIT_LOG(ERR, "only TC0 is supported");
819 		return -ENOTSUP;
820 	}
821 
822 	vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
823 	bsf = rte_bsf32(vsi->nb_qps);
824 	/* Adjust the queue number to actual queues that can be applied */
825 	vsi->nb_qps = 0x1 << bsf;
826 
827 	qp_idx = 0;
828 	/* Set tc and queue mapping with VSI */
829 	info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
830 						ICE_AQ_VSI_TC_Q_OFFSET_S) |
831 					       (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
832 
833 	/* Associate queue number with VSI */
834 	info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
835 	info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
836 	info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
837 	info->valid_sections |=
838 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
839 	/* Set the info.ingress_table and info.egress_table
840 	 * for UP translate table. Now just set it to 1:1 map by default
841 	 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
842 	 */
843 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
844 	info->ingress_table  = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
845 	info->egress_table   = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
846 	info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
847 	return 0;
848 }
849 
850 static int
851 ice_init_mac_address(struct rte_eth_dev *dev)
852 {
853 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
854 
855 	if (!rte_is_unicast_ether_addr
856 		((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
857 		PMD_INIT_LOG(ERR, "Invalid MAC address");
858 		return -EINVAL;
859 	}
860 
861 	rte_ether_addr_copy(
862 		(struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
863 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
864 
865 	dev->data->mac_addrs =
866 		rte_zmalloc(NULL, sizeof(struct rte_ether_addr) * ICE_NUM_MACADDR_MAX, 0);
867 	if (!dev->data->mac_addrs) {
868 		PMD_INIT_LOG(ERR,
869 			     "Failed to allocate memory to store mac address");
870 		return -ENOMEM;
871 	}
872 	/* store it to dev data */
873 	rte_ether_addr_copy(
874 		(struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
875 		&dev->data->mac_addrs[0]);
876 	return 0;
877 }
878 
879 /* Find out specific MAC filter */
880 static struct ice_mac_filter *
881 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
882 {
883 	struct ice_mac_filter *f;
884 
885 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
886 		if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
887 			return f;
888 	}
889 
890 	return NULL;
891 }
892 
893 static int
894 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
895 {
896 	struct ice_fltr_list_entry *m_list_itr = NULL;
897 	struct ice_mac_filter *f;
898 	struct LIST_HEAD_TYPE list_head;
899 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
900 	int ret = 0;
901 
902 	/* If it's added and configured, return */
903 	f = ice_find_mac_filter(vsi, mac_addr);
904 	if (f) {
905 		PMD_DRV_LOG(INFO, "This MAC filter already exists.");
906 		return 0;
907 	}
908 
909 	INIT_LIST_HEAD(&list_head);
910 
911 	m_list_itr = (struct ice_fltr_list_entry *)
912 		ice_malloc(hw, sizeof(*m_list_itr));
913 	if (!m_list_itr) {
914 		ret = -ENOMEM;
915 		goto DONE;
916 	}
917 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
918 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
919 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
920 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
921 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
922 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
923 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
924 
925 	LIST_ADD(&m_list_itr->list_entry, &list_head);
926 
927 	/* Add the mac */
928 	ret = ice_add_mac(hw, &list_head);
929 	if (ret != ICE_SUCCESS) {
930 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
931 		ret = -EINVAL;
932 		goto DONE;
933 	}
934 	/* Add the mac addr into mac list */
935 	f = rte_zmalloc(NULL, sizeof(*f), 0);
936 	if (!f) {
937 		PMD_DRV_LOG(ERR, "failed to allocate memory");
938 		ret = -ENOMEM;
939 		goto DONE;
940 	}
941 	rte_ether_addr_copy(mac_addr, &f->mac_info.mac_addr);
942 	TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
943 	vsi->mac_num++;
944 
945 	ret = 0;
946 
947 DONE:
948 	rte_free(m_list_itr);
949 	return ret;
950 }
951 
952 static int
953 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
954 {
955 	struct ice_fltr_list_entry *m_list_itr = NULL;
956 	struct ice_mac_filter *f;
957 	struct LIST_HEAD_TYPE list_head;
958 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
959 	int ret = 0;
960 
961 	/* Can't find it, return an error */
962 	f = ice_find_mac_filter(vsi, mac_addr);
963 	if (!f)
964 		return -EINVAL;
965 
966 	INIT_LIST_HEAD(&list_head);
967 
968 	m_list_itr = (struct ice_fltr_list_entry *)
969 		ice_malloc(hw, sizeof(*m_list_itr));
970 	if (!m_list_itr) {
971 		ret = -ENOMEM;
972 		goto DONE;
973 	}
974 	ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
975 		   mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
976 	m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
977 	m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
978 	m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
979 	m_list_itr->fltr_info.flag = ICE_FLTR_TX;
980 	m_list_itr->fltr_info.vsi_handle = vsi->idx;
981 
982 	LIST_ADD(&m_list_itr->list_entry, &list_head);
983 
984 	/* remove the mac filter */
985 	ret = ice_remove_mac(hw, &list_head);
986 	if (ret != ICE_SUCCESS) {
987 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
988 		ret = -EINVAL;
989 		goto DONE;
990 	}
991 
992 	/* Remove the mac addr from mac list */
993 	TAILQ_REMOVE(&vsi->mac_list, f, next);
994 	rte_free(f);
995 	vsi->mac_num--;
996 
997 	ret = 0;
998 DONE:
999 	rte_free(m_list_itr);
1000 	return ret;
1001 }
1002 
1003 /* Find out specific VLAN filter */
1004 static struct ice_vlan_filter *
1005 ice_find_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1006 {
1007 	struct ice_vlan_filter *f;
1008 
1009 	TAILQ_FOREACH(f, &vsi->vlan_list, next) {
1010 		if (vlan->tpid == f->vlan_info.vlan.tpid &&
1011 		    vlan->vid == f->vlan_info.vlan.vid)
1012 			return f;
1013 	}
1014 
1015 	return NULL;
1016 }
1017 
1018 static int
1019 ice_add_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1020 {
1021 	struct ice_fltr_list_entry *v_list_itr = NULL;
1022 	struct ice_vlan_filter *f;
1023 	struct LIST_HEAD_TYPE list_head;
1024 	struct ice_hw *hw;
1025 	int ret = 0;
1026 
1027 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1028 		return -EINVAL;
1029 
1030 	hw = ICE_VSI_TO_HW(vsi);
1031 
1032 	/* If it's added and configured, return. */
1033 	f = ice_find_vlan_filter(vsi, vlan);
1034 	if (f) {
1035 		PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
1036 		return 0;
1037 	}
1038 
1039 	if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
1040 		return 0;
1041 
1042 	INIT_LIST_HEAD(&list_head);
1043 
1044 	v_list_itr = (struct ice_fltr_list_entry *)
1045 		      ice_malloc(hw, sizeof(*v_list_itr));
1046 	if (!v_list_itr) {
1047 		ret = -ENOMEM;
1048 		goto DONE;
1049 	}
1050 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1051 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1052 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1053 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1054 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1055 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1056 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1057 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1058 
1059 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1060 
1061 	/* Add the vlan */
1062 	ret = ice_add_vlan(hw, &list_head);
1063 	if (ret != ICE_SUCCESS) {
1064 		PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
1065 		ret = -EINVAL;
1066 		goto DONE;
1067 	}
1068 
1069 	/* Add vlan into vlan list */
1070 	f = rte_zmalloc(NULL, sizeof(*f), 0);
1071 	if (!f) {
1072 		PMD_DRV_LOG(ERR, "failed to allocate memory");
1073 		ret = -ENOMEM;
1074 		goto DONE;
1075 	}
1076 	f->vlan_info.vlan.tpid = vlan->tpid;
1077 	f->vlan_info.vlan.vid = vlan->vid;
1078 	TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
1079 	vsi->vlan_num++;
1080 
1081 	ret = 0;
1082 
1083 DONE:
1084 	rte_free(v_list_itr);
1085 	return ret;
1086 }
1087 
1088 static int
1089 ice_remove_vlan_filter(struct ice_vsi *vsi, struct ice_vlan *vlan)
1090 {
1091 	struct ice_fltr_list_entry *v_list_itr = NULL;
1092 	struct ice_vlan_filter *f;
1093 	struct LIST_HEAD_TYPE list_head;
1094 	struct ice_hw *hw;
1095 	int ret = 0;
1096 
1097 	if (!vsi || vlan->vid > RTE_ETHER_MAX_VLAN_ID)
1098 		return -EINVAL;
1099 
1100 	hw = ICE_VSI_TO_HW(vsi);
1101 
1102 	/* Can't find it, return an error */
1103 	f = ice_find_vlan_filter(vsi, vlan);
1104 	if (!f)
1105 		return -EINVAL;
1106 
1107 	INIT_LIST_HEAD(&list_head);
1108 
1109 	v_list_itr = (struct ice_fltr_list_entry *)
1110 		      ice_malloc(hw, sizeof(*v_list_itr));
1111 	if (!v_list_itr) {
1112 		ret = -ENOMEM;
1113 		goto DONE;
1114 	}
1115 
1116 	v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan->vid;
1117 	v_list_itr->fltr_info.l_data.vlan.tpid = vlan->tpid;
1118 	v_list_itr->fltr_info.l_data.vlan.tpid_valid = true;
1119 	v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
1120 	v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1121 	v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1122 	v_list_itr->fltr_info.flag = ICE_FLTR_TX;
1123 	v_list_itr->fltr_info.vsi_handle = vsi->idx;
1124 
1125 	LIST_ADD(&v_list_itr->list_entry, &list_head);
1126 
1127 	/* remove the vlan filter */
1128 	ret = ice_remove_vlan(hw, &list_head);
1129 	if (ret != ICE_SUCCESS) {
1130 		PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
1131 		ret = -EINVAL;
1132 		goto DONE;
1133 	}
1134 
1135 	/* Remove the vlan id from vlan list */
1136 	TAILQ_REMOVE(&vsi->vlan_list, f, next);
1137 	rte_free(f);
1138 	vsi->vlan_num--;
1139 
1140 	ret = 0;
1141 DONE:
1142 	rte_free(v_list_itr);
1143 	return ret;
1144 }
1145 
1146 static int
1147 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
1148 {
1149 	struct ice_mac_filter *m_f;
1150 	struct ice_vlan_filter *v_f;
1151 	void *temp;
1152 	int ret = 0;
1153 
1154 	if (!vsi || !vsi->mac_num)
1155 		return -EINVAL;
1156 
1157 	RTE_TAILQ_FOREACH_SAFE(m_f, &vsi->mac_list, next, temp) {
1158 		ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
1159 		if (ret != ICE_SUCCESS) {
1160 			ret = -EINVAL;
1161 			goto DONE;
1162 		}
1163 	}
1164 
1165 	if (vsi->vlan_num == 0)
1166 		return 0;
1167 
1168 	RTE_TAILQ_FOREACH_SAFE(v_f, &vsi->vlan_list, next, temp) {
1169 		ret = ice_remove_vlan_filter(vsi, &v_f->vlan_info.vlan);
1170 		if (ret != ICE_SUCCESS) {
1171 			ret = -EINVAL;
1172 			goto DONE;
1173 		}
1174 	}
1175 
1176 DONE:
1177 	return ret;
1178 }
1179 
1180 /* Enable IRQ0 */
1181 static void
1182 ice_pf_enable_irq0(struct ice_hw *hw)
1183 {
1184 	/* reset the registers */
1185 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
1186 	ICE_READ_REG(hw, PFINT_OICR);
1187 
1188 #ifdef ICE_LSE_SPT
1189 	ICE_WRITE_REG(hw, PFINT_OICR_ENA,
1190 		      (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
1191 				 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
1192 
1193 	ICE_WRITE_REG(hw, PFINT_OICR_CTL,
1194 		      (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
1195 		      ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
1196 		       PFINT_OICR_CTL_ITR_INDX_M) |
1197 		      PFINT_OICR_CTL_CAUSE_ENA_M);
1198 
1199 	ICE_WRITE_REG(hw, PFINT_FW_CTL,
1200 		      (0 & PFINT_FW_CTL_MSIX_INDX_M) |
1201 		      ((0 << PFINT_FW_CTL_ITR_INDX_S) &
1202 		       PFINT_FW_CTL_ITR_INDX_M) |
1203 		      PFINT_FW_CTL_CAUSE_ENA_M);
1204 #else
1205 	ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
1206 #endif
1207 
1208 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1209 		      GLINT_DYN_CTL_INTENA_M |
1210 		      GLINT_DYN_CTL_CLEARPBA_M |
1211 		      GLINT_DYN_CTL_ITR_INDX_M);
1212 
1213 	ice_flush(hw);
1214 }
1215 
1216 /* Disable IRQ0 */
1217 static void
1218 ice_pf_disable_irq0(struct ice_hw *hw)
1219 {
1220 	/* Disable all interrupt types */
1221 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1222 	ice_flush(hw);
1223 }
1224 
1225 #ifdef ICE_LSE_SPT
1226 static void
1227 ice_handle_aq_msg(struct rte_eth_dev *dev)
1228 {
1229 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 	struct ice_ctl_q_info *cq = &hw->adminq;
1231 	struct ice_rq_event_info event;
1232 	uint16_t pending, opcode;
1233 	int ret;
1234 
1235 	event.buf_len = ICE_AQ_MAX_BUF_LEN;
1236 	event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
1237 	if (!event.msg_buf) {
1238 		PMD_DRV_LOG(ERR, "Failed to allocate mem");
1239 		return;
1240 	}
1241 
1242 	pending = 1;
1243 	while (pending) {
1244 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1245 
1246 		if (ret != ICE_SUCCESS) {
1247 			PMD_DRV_LOG(INFO,
1248 				    "Failed to read msg from AdminQ, "
1249 				    "adminq_err: %u",
1250 				    hw->adminq.sq_last_status);
1251 			break;
1252 		}
1253 		opcode = rte_le_to_cpu_16(event.desc.opcode);
1254 
1255 		switch (opcode) {
1256 		case ice_aqc_opc_get_link_status:
1257 			ret = ice_link_update(dev, 0);
1258 			if (!ret)
1259 				rte_eth_dev_callback_process
1260 					(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1261 			break;
1262 		default:
1263 			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1264 				    opcode);
1265 			break;
1266 		}
1267 	}
1268 	rte_free(event.msg_buf);
1269 }
1270 #endif
1271 
1272 /**
1273  * Interrupt handler triggered by NIC for handling
1274  * specific interrupt.
1275  *
1276  * @param handle
1277  *  Pointer to interrupt handle.
1278  * @param param
1279  *  The address of parameter (struct rte_eth_dev *) registered before.
1280  *
1281  * @return
1282  *  void
1283  */
1284 static void
1285 ice_interrupt_handler(void *param)
1286 {
1287 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1288 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1289 	uint32_t oicr;
1290 	uint32_t reg;
1291 	uint8_t pf_num;
1292 	uint8_t event;
1293 	uint16_t queue;
1294 	int ret;
1295 #ifdef ICE_LSE_SPT
1296 	uint32_t int_fw_ctl;
1297 #endif
1298 
1299 	/* Disable interrupt */
1300 	ice_pf_disable_irq0(hw);
1301 
1302 	/* read out interrupt causes */
1303 	oicr = ICE_READ_REG(hw, PFINT_OICR);
1304 #ifdef ICE_LSE_SPT
1305 	int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1306 #endif
1307 
1308 	/* No interrupt event indicated */
1309 	if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1310 		PMD_DRV_LOG(INFO, "No interrupt event");
1311 		goto done;
1312 	}
1313 
1314 #ifdef ICE_LSE_SPT
1315 	if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1316 		PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1317 		ice_handle_aq_msg(dev);
1318 	}
1319 #else
1320 	if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1321 		PMD_DRV_LOG(INFO, "OICR: link state change event");
1322 		ret = ice_link_update(dev, 0);
1323 		if (!ret)
1324 			rte_eth_dev_callback_process
1325 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1326 	}
1327 #endif
1328 
1329 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
1330 		PMD_DRV_LOG(WARNING, "OICR: MDD event");
1331 		reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1332 		if (reg & GL_MDET_TX_PQM_VALID_M) {
1333 			pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1334 				 GL_MDET_TX_PQM_PF_NUM_S;
1335 			event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1336 				GL_MDET_TX_PQM_MAL_TYPE_S;
1337 			queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1338 				GL_MDET_TX_PQM_QNUM_S;
1339 
1340 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1341 				    "%d by PQM on TX queue %d PF# %d",
1342 				    event, queue, pf_num);
1343 		}
1344 
1345 		reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1346 		if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1347 			pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1348 				 GL_MDET_TX_TCLAN_PF_NUM_S;
1349 			event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1350 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1351 			queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1352 				GL_MDET_TX_TCLAN_QNUM_S;
1353 
1354 			PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1355 				    "%d by TCLAN on TX queue %d PF# %d",
1356 				    event, queue, pf_num);
1357 		}
1358 	}
1359 done:
1360 	/* Enable interrupt */
1361 	ice_pf_enable_irq0(hw);
1362 	rte_intr_ack(dev->intr_handle);
1363 }
1364 
1365 static void
1366 ice_init_proto_xtr(struct rte_eth_dev *dev)
1367 {
1368 	struct ice_adapter *ad =
1369 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1370 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1371 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1372 	const struct proto_xtr_ol_flag *ol_flag;
1373 	bool proto_xtr_enable = false;
1374 	int offset;
1375 	uint16_t i;
1376 
1377 	pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
1378 	if (unlikely(pf->proto_xtr == NULL)) {
1379 		PMD_DRV_LOG(ERR, "No memory for setting up protocol extraction table");
1380 		return;
1381 	}
1382 
1383 	for (i = 0; i < pf->lan_nb_qps; i++) {
1384 		pf->proto_xtr[i] = ad->devargs.proto_xtr[i] != PROTO_XTR_NONE ?
1385 				   ad->devargs.proto_xtr[i] :
1386 				   ad->devargs.proto_xtr_dflt;
1387 
1388 		if (pf->proto_xtr[i] != PROTO_XTR_NONE) {
1389 			uint8_t type = pf->proto_xtr[i];
1390 
1391 			ice_proto_xtr_ol_flag_params[type].required = true;
1392 			proto_xtr_enable = true;
1393 		}
1394 	}
1395 
1396 	if (likely(!proto_xtr_enable))
1397 		return;
1398 
1399 	ice_check_proto_xtr_support(hw);
1400 
1401 	offset = rte_mbuf_dynfield_register(&ice_proto_xtr_metadata_param);
1402 	if (unlikely(offset == -1)) {
1403 		PMD_DRV_LOG(ERR,
1404 			    "Protocol extraction metadata is disabled in mbuf with error %d",
1405 			    -rte_errno);
1406 		return;
1407 	}
1408 
1409 	PMD_DRV_LOG(DEBUG,
1410 		    "Protocol extraction metadata offset in mbuf is : %d",
1411 		    offset);
1412 	rte_net_ice_dynfield_proto_xtr_metadata_offs = offset;
1413 
1414 	for (i = 0; i < RTE_DIM(ice_proto_xtr_ol_flag_params); i++) {
1415 		ol_flag = &ice_proto_xtr_ol_flag_params[i];
1416 
1417 		if (!ol_flag->required)
1418 			continue;
1419 
1420 		if (!ice_proto_xtr_hw_support[i]) {
1421 			PMD_DRV_LOG(ERR,
1422 				    "Protocol extraction type %u is not supported in hardware",
1423 				    i);
1424 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1425 			break;
1426 		}
1427 
1428 		offset = rte_mbuf_dynflag_register(&ol_flag->param);
1429 		if (unlikely(offset == -1)) {
1430 			PMD_DRV_LOG(ERR,
1431 				    "Protocol extraction offload '%s' failed to register with error %d",
1432 				    ol_flag->param.name, -rte_errno);
1433 
1434 			rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
1435 			break;
1436 		}
1437 
1438 		PMD_DRV_LOG(DEBUG,
1439 			    "Protocol extraction offload '%s' offset in mbuf is : %d",
1440 			    ol_flag->param.name, offset);
1441 		*ol_flag->ol_flag = 1ULL << offset;
1442 	}
1443 }
1444 
1445 /*  Initialize SW parameters of PF */
1446 static int
1447 ice_pf_sw_init(struct rte_eth_dev *dev)
1448 {
1449 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1450 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1451 
1452 	pf->lan_nb_qp_max =
1453 		(uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1454 				  hw->func_caps.common_cap.num_rxq);
1455 
1456 	pf->lan_nb_qps = pf->lan_nb_qp_max;
1457 
1458 	ice_init_proto_xtr(dev);
1459 
1460 	if (hw->func_caps.fd_fltr_guar > 0 ||
1461 	    hw->func_caps.fd_fltr_best_effort > 0) {
1462 		pf->flags |= ICE_FLAG_FDIR;
1463 		pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR;
1464 		pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps;
1465 	} else {
1466 		pf->fdir_nb_qps = 0;
1467 	}
1468 	pf->fdir_qp_offset = 0;
1469 
1470 	return 0;
1471 }
1472 
1473 struct ice_vsi *
1474 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1475 {
1476 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1477 	struct ice_vsi *vsi = NULL;
1478 	struct ice_vsi_ctx vsi_ctx;
1479 	int ret;
1480 	struct rte_ether_addr broadcast = {
1481 		.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1482 	struct rte_ether_addr mac_addr;
1483 	uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1484 	uint8_t tc_bitmap = 0x1;
1485 	uint16_t cfg;
1486 
1487 	/* hw->num_lports = 1 in NIC mode */
1488 	vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1489 	if (!vsi)
1490 		return NULL;
1491 
1492 	vsi->idx = pf->next_vsi_idx;
1493 	pf->next_vsi_idx++;
1494 	vsi->type = type;
1495 	vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1496 	vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1497 	vsi->vlan_anti_spoof_on = 0;
1498 	vsi->vlan_filter_on = 1;
1499 	TAILQ_INIT(&vsi->mac_list);
1500 	TAILQ_INIT(&vsi->vlan_list);
1501 
1502 	/* Be sync with RTE_ETH_RSS_RETA_SIZE_x maximum value definition */
1503 	pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1504 			RTE_ETH_RSS_RETA_SIZE_512 ? RTE_ETH_RSS_RETA_SIZE_512 :
1505 			hw->func_caps.common_cap.rss_table_size;
1506 	pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1507 
1508 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1509 	switch (type) {
1510 	case ICE_VSI_PF:
1511 		vsi->nb_qps = pf->lan_nb_qps;
1512 		vsi->base_queue = 1;
1513 		ice_vsi_config_default_rss(&vsi_ctx.info);
1514 		vsi_ctx.alloc_from_pool = true;
1515 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1516 		/* switch_id is queried by get_switch_config aq, which is done
1517 		 * by ice_init_hw
1518 		 */
1519 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1520 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1521 		/* Allow all untagged or tagged packets */
1522 		vsi_ctx.info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
1523 		vsi_ctx.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
1524 		vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1525 					 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1526 		if (ice_is_dvm_ena(hw)) {
1527 			vsi_ctx.info.outer_vlan_flags =
1528 				(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
1529 				 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
1530 				ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
1531 			vsi_ctx.info.outer_vlan_flags |=
1532 				(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
1533 				 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
1534 				ICE_AQ_VSI_OUTER_TAG_TYPE_M;
1535 		}
1536 
1537 		/* FDIR */
1538 		cfg = ICE_AQ_VSI_PROP_SECURITY_VALID |
1539 			ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1540 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1541 		cfg = ICE_AQ_VSI_FD_ENABLE;
1542 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1543 		vsi_ctx.info.max_fd_fltr_dedicated =
1544 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar);
1545 		vsi_ctx.info.max_fd_fltr_shared =
1546 			rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort);
1547 
1548 		/* Enable VLAN/UP trip */
1549 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1550 						      &vsi_ctx.info,
1551 						      ICE_DEFAULT_TCMAP);
1552 		if (ret) {
1553 			PMD_INIT_LOG(ERR,
1554 				     "tc queue mapping with vsi failed, "
1555 				     "err = %d",
1556 				     ret);
1557 			goto fail_mem;
1558 		}
1559 
1560 		break;
1561 	case ICE_VSI_CTRL:
1562 		vsi->nb_qps = pf->fdir_nb_qps;
1563 		vsi->base_queue = ICE_FDIR_QUEUE_ID;
1564 		vsi_ctx.alloc_from_pool = true;
1565 		vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1566 
1567 		cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1568 		vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg);
1569 		cfg = ICE_AQ_VSI_FD_PROG_ENABLE;
1570 		vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg);
1571 		vsi_ctx.info.sw_id = hw->port_info->sw_id;
1572 		vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1573 		ret = ice_vsi_config_tc_queue_mapping(vsi,
1574 						      &vsi_ctx.info,
1575 						      ICE_DEFAULT_TCMAP);
1576 		if (ret) {
1577 			PMD_INIT_LOG(ERR,
1578 				     "tc queue mapping with vsi failed, "
1579 				     "err = %d",
1580 				     ret);
1581 			goto fail_mem;
1582 		}
1583 		break;
1584 	default:
1585 		/* for other types of VSI */
1586 		PMD_INIT_LOG(ERR, "other types of VSI not supported");
1587 		goto fail_mem;
1588 	}
1589 
1590 	/* VF has MSIX interrupt in VF range, don't allocate here */
1591 	if (type == ICE_VSI_PF) {
1592 		ret = ice_res_pool_alloc(&pf->msix_pool,
1593 					 RTE_MIN(vsi->nb_qps,
1594 						 RTE_MAX_RXTX_INTR_VEC_ID));
1595 		if (ret < 0) {
1596 			PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1597 				     vsi->vsi_id, ret);
1598 		}
1599 		vsi->msix_intr = ret;
1600 		vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1601 	} else if (type == ICE_VSI_CTRL) {
1602 		ret = ice_res_pool_alloc(&pf->msix_pool, 1);
1603 		if (ret < 0) {
1604 			PMD_DRV_LOG(ERR, "VSI %d get heap failed %d",
1605 				    vsi->vsi_id, ret);
1606 		}
1607 		vsi->msix_intr = ret;
1608 		vsi->nb_msix = 1;
1609 	} else {
1610 		vsi->msix_intr = 0;
1611 		vsi->nb_msix = 0;
1612 	}
1613 	ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1614 	if (ret != ICE_SUCCESS) {
1615 		PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1616 		goto fail_mem;
1617 	}
1618 	/* store vsi information is SW structure */
1619 	vsi->vsi_id = vsi_ctx.vsi_num;
1620 	vsi->info = vsi_ctx.info;
1621 	pf->vsis_allocated = vsi_ctx.vsis_allocd;
1622 	pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1623 
1624 	if (type == ICE_VSI_PF) {
1625 		/* MAC configuration */
1626 		rte_ether_addr_copy((struct rte_ether_addr *)
1627 					hw->port_info->mac.perm_addr,
1628 				    &pf->dev_addr);
1629 
1630 		rte_ether_addr_copy(&pf->dev_addr, &mac_addr);
1631 		ret = ice_add_mac_filter(vsi, &mac_addr);
1632 		if (ret != ICE_SUCCESS)
1633 			PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1634 
1635 		rte_ether_addr_copy(&broadcast, &mac_addr);
1636 		ret = ice_add_mac_filter(vsi, &mac_addr);
1637 		if (ret != ICE_SUCCESS)
1638 			PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1639 	}
1640 
1641 	/* At the beginning, only TC0. */
1642 	/* What we need here is the maximum number of the TX queues.
1643 	 * Currently vsi->nb_qps means it.
1644 	 * Correct it if any change.
1645 	 */
1646 	max_txqs[0] = vsi->nb_qps;
1647 	ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1648 			      tc_bitmap, max_txqs);
1649 	if (ret != ICE_SUCCESS)
1650 		PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1651 
1652 	return vsi;
1653 fail_mem:
1654 	rte_free(vsi);
1655 	pf->next_vsi_idx--;
1656 	return NULL;
1657 }
1658 
1659 static int
1660 ice_send_driver_ver(struct ice_hw *hw)
1661 {
1662 	struct ice_driver_ver dv;
1663 
1664 	/* we don't have driver version use 0 for dummy */
1665 	dv.major_ver = 0;
1666 	dv.minor_ver = 0;
1667 	dv.build_ver = 0;
1668 	dv.subbuild_ver = 0;
1669 	strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1670 
1671 	return ice_aq_send_driver_ver(hw, &dv, NULL);
1672 }
1673 
1674 static int
1675 ice_pf_setup(struct ice_pf *pf)
1676 {
1677 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1678 	struct ice_vsi *vsi;
1679 	uint16_t unused;
1680 
1681 	/* Clear all stats counters */
1682 	pf->offset_loaded = false;
1683 	memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1684 	memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1685 	memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1686 	memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1687 
1688 	/* force guaranteed filter pool for PF */
1689 	ice_alloc_fd_guar_item(hw, &unused,
1690 			       hw->func_caps.fd_fltr_guar);
1691 	/* force shared filter pool for PF */
1692 	ice_alloc_fd_shrd_item(hw, &unused,
1693 			       hw->func_caps.fd_fltr_best_effort);
1694 
1695 	vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1696 	if (!vsi) {
1697 		PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1698 		return -EINVAL;
1699 	}
1700 
1701 	pf->main_vsi = vsi;
1702 
1703 	return 0;
1704 }
1705 
1706 static enum ice_pkg_type
1707 ice_load_pkg_type(struct ice_hw *hw)
1708 {
1709 	enum ice_pkg_type package_type;
1710 
1711 	/* store the activated package type (OS default or Comms) */
1712 	if (!strncmp((char *)hw->active_pkg_name, ICE_OS_DEFAULT_PKG_NAME,
1713 		ICE_PKG_NAME_SIZE))
1714 		package_type = ICE_PKG_TYPE_OS_DEFAULT;
1715 	else if (!strncmp((char *)hw->active_pkg_name, ICE_COMMS_PKG_NAME,
1716 		ICE_PKG_NAME_SIZE))
1717 		package_type = ICE_PKG_TYPE_COMMS;
1718 	else
1719 		package_type = ICE_PKG_TYPE_UNKNOWN;
1720 
1721 	PMD_INIT_LOG(NOTICE, "Active package is: %d.%d.%d.%d, %s (%s VLAN mode)",
1722 		hw->active_pkg_ver.major, hw->active_pkg_ver.minor,
1723 		hw->active_pkg_ver.update, hw->active_pkg_ver.draft,
1724 		hw->active_pkg_name,
1725 		ice_is_dvm_ena(hw) ? "double" : "single");
1726 
1727 	return package_type;
1728 }
1729 
1730 int ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn)
1731 {
1732 	struct ice_hw *hw = &adapter->hw;
1733 	char pkg_file[ICE_MAX_PKG_FILENAME_SIZE];
1734 	char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
1735 	void *buf;
1736 	size_t bufsz;
1737 	int err;
1738 
1739 	if (!use_dsn)
1740 		goto no_dsn;
1741 
1742 	memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
1743 	snprintf(opt_ddp_filename, ICE_MAX_PKG_FILENAME_SIZE,
1744 		"ice-%016" PRIx64 ".pkg", dsn);
1745 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_UPDATES,
1746 		ICE_MAX_PKG_FILENAME_SIZE);
1747 	strcat(pkg_file, opt_ddp_filename);
1748 	if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1749 		goto load_fw;
1750 
1751 	strncpy(pkg_file, ICE_PKG_FILE_SEARCH_PATH_DEFAULT,
1752 		ICE_MAX_PKG_FILENAME_SIZE);
1753 	strcat(pkg_file, opt_ddp_filename);
1754 	if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1755 		goto load_fw;
1756 
1757 no_dsn:
1758 	strncpy(pkg_file, ICE_PKG_FILE_UPDATES, ICE_MAX_PKG_FILENAME_SIZE);
1759 	if (rte_firmware_read(pkg_file, &buf, &bufsz) == 0)
1760 		goto load_fw;
1761 
1762 	strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE);
1763 	if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) {
1764 		PMD_INIT_LOG(ERR, "failed to search file path\n");
1765 		return -1;
1766 	}
1767 
1768 load_fw:
1769 	PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_file);
1770 
1771 	err = ice_copy_and_init_pkg(hw, buf, bufsz);
1772 	if (err) {
1773 		PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1774 		goto out;
1775 	}
1776 
1777 	/* store the loaded pkg type info */
1778 	adapter->active_pkg_type = ice_load_pkg_type(hw);
1779 
1780 out:
1781 	free(buf);
1782 	return err;
1783 }
1784 
1785 static void
1786 ice_base_queue_get(struct ice_pf *pf)
1787 {
1788 	uint32_t reg;
1789 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
1790 
1791 	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
1792 	if (reg & PFLAN_RX_QALLOC_VALID_M) {
1793 		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
1794 	} else {
1795 		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
1796 					" index");
1797 	}
1798 }
1799 
1800 static int
1801 parse_bool(const char *key, const char *value, void *args)
1802 {
1803 	int *i = (int *)args;
1804 	char *end;
1805 	int num;
1806 
1807 	num = strtoul(value, &end, 10);
1808 
1809 	if (num != 0 && num != 1) {
1810 		PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
1811 			"value must be 0 or 1",
1812 			value, key);
1813 		return -1;
1814 	}
1815 
1816 	*i = num;
1817 	return 0;
1818 }
1819 
1820 static int
1821 parse_u64(const char *key, const char *value, void *args)
1822 {
1823 	u64 *num = (u64 *)args;
1824 	u64 tmp;
1825 
1826 	errno = 0;
1827 	tmp = strtoull(value, NULL, 16);
1828 	if (errno) {
1829 		PMD_DRV_LOG(WARNING, "%s: \"%s\" is not a valid u64",
1830 			    key, value);
1831 		return -1;
1832 	}
1833 
1834 	*num = tmp;
1835 
1836 	return 0;
1837 }
1838 
1839 static int
1840 lookup_pps_type(const char *pps_name)
1841 {
1842 	static struct {
1843 		const char *name;
1844 		enum pps_type type;
1845 	} pps_type_map[] = {
1846 		{ "pin",  PPS_PIN  },
1847 	};
1848 
1849 	uint32_t i;
1850 
1851 	for (i = 0; i < RTE_DIM(pps_type_map); i++) {
1852 		if (strcmp(pps_name, pps_type_map[i].name) == 0)
1853 			return pps_type_map[i].type;
1854 	}
1855 
1856 	return -1;
1857 }
1858 
1859 static int
1860 parse_pin_set(const char *input, int pps_type, struct ice_devargs *devargs)
1861 {
1862 	const char *str = input;
1863 	char *end = NULL;
1864 	uint32_t idx;
1865 
1866 	while (isblank(*str))
1867 		str++;
1868 
1869 	if (!isdigit(*str))
1870 		return -1;
1871 
1872 	if (pps_type == PPS_PIN) {
1873 		idx = strtoul(str, &end, 10);
1874 		if (end == NULL || idx >= ICE_MAX_PIN_NUM)
1875 			return -1;
1876 		while (isblank(*end))
1877 			end++;
1878 		if (*end != ']')
1879 			return -1;
1880 
1881 		devargs->pin_idx = idx;
1882 		devargs->pps_out_ena = 1;
1883 
1884 		return 0;
1885 	}
1886 
1887 	return -1;
1888 }
1889 
1890 static int
1891 parse_pps_out_parameter(const char *pins, struct ice_devargs *devargs)
1892 {
1893 	const char *pin_start;
1894 	uint32_t idx;
1895 	int pps_type;
1896 	char pps_name[32];
1897 
1898 	while (isblank(*pins))
1899 		pins++;
1900 
1901 	pins++;
1902 	while (isblank(*pins))
1903 		pins++;
1904 	if (*pins == '\0')
1905 		return -1;
1906 
1907 	for (idx = 0; ; idx++) {
1908 		if (isblank(pins[idx]) ||
1909 		    pins[idx] == ':' ||
1910 		    pins[idx] == '\0')
1911 			break;
1912 
1913 		pps_name[idx] = pins[idx];
1914 	}
1915 	pps_name[idx] = '\0';
1916 	pps_type = lookup_pps_type(pps_name);
1917 	if (pps_type < 0)
1918 		return -1;
1919 
1920 	pins += idx;
1921 
1922 	pins += strcspn(pins, ":");
1923 	if (*pins++ != ':')
1924 		return -1;
1925 	while (isblank(*pins))
1926 		pins++;
1927 
1928 	pin_start = pins;
1929 
1930 	while (isblank(*pins))
1931 		pins++;
1932 
1933 	if (parse_pin_set(pin_start, pps_type, devargs) < 0)
1934 		return -1;
1935 
1936 	return 0;
1937 }
1938 
1939 static int
1940 handle_pps_out_arg(__rte_unused const char *key, const char *value,
1941 		   void *extra_args)
1942 {
1943 	struct ice_devargs *devargs = extra_args;
1944 
1945 	if (value == NULL || extra_args == NULL)
1946 		return -EINVAL;
1947 
1948 	if (parse_pps_out_parameter(value, devargs) < 0) {
1949 		PMD_DRV_LOG(ERR,
1950 			    "The GPIO pin parameter is wrong : '%s'",
1951 			    value);
1952 		return -1;
1953 	}
1954 
1955 	return 0;
1956 }
1957 
1958 static int ice_parse_devargs(struct rte_eth_dev *dev)
1959 {
1960 	struct ice_adapter *ad =
1961 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1962 	struct rte_devargs *devargs = dev->device->devargs;
1963 	struct rte_kvargs *kvlist;
1964 	int ret;
1965 
1966 	if (devargs == NULL)
1967 		return 0;
1968 
1969 	kvlist = rte_kvargs_parse(devargs->args, ice_valid_args);
1970 	if (kvlist == NULL) {
1971 		PMD_INIT_LOG(ERR, "Invalid kvargs key\n");
1972 		return -EINVAL;
1973 	}
1974 
1975 	ad->devargs.proto_xtr_dflt = PROTO_XTR_NONE;
1976 	memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
1977 	       sizeof(ad->devargs.proto_xtr));
1978 
1979 	ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
1980 				 &handle_proto_xtr_arg, &ad->devargs);
1981 	if (ret)
1982 		goto bail;
1983 
1984 	ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
1985 				 &parse_bool, &ad->devargs.safe_mode_support);
1986 	if (ret)
1987 		goto bail;
1988 
1989 	ret = rte_kvargs_process(kvlist, ICE_PIPELINE_MODE_SUPPORT_ARG,
1990 				 &parse_bool, &ad->devargs.pipe_mode_support);
1991 	if (ret)
1992 		goto bail;
1993 
1994 	ret = rte_kvargs_process(kvlist, ICE_HW_DEBUG_MASK_ARG,
1995 				 &parse_u64, &ad->hw.debug_mask);
1996 	if (ret)
1997 		goto bail;
1998 
1999 	ret = rte_kvargs_process(kvlist, ICE_ONE_PPS_OUT_ARG,
2000 				 &handle_pps_out_arg, &ad->devargs);
2001 	if (ret)
2002 		goto bail;
2003 
2004 	ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
2005 				 &parse_bool, &ad->devargs.rx_low_latency);
2006 
2007 bail:
2008 	rte_kvargs_free(kvlist);
2009 	return ret;
2010 }
2011 
2012 /* Forward LLDP packets to default VSI by set switch rules */
2013 static int
2014 ice_vsi_config_sw_lldp(struct ice_vsi *vsi,  bool on)
2015 {
2016 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2017 	struct ice_fltr_list_entry *s_list_itr = NULL;
2018 	struct LIST_HEAD_TYPE list_head;
2019 	int ret = 0;
2020 
2021 	INIT_LIST_HEAD(&list_head);
2022 
2023 	s_list_itr = (struct ice_fltr_list_entry *)
2024 			ice_malloc(hw, sizeof(*s_list_itr));
2025 	if (!s_list_itr)
2026 		return -ENOMEM;
2027 	s_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2028 	s_list_itr->fltr_info.vsi_handle = vsi->idx;
2029 	s_list_itr->fltr_info.l_data.ethertype_mac.ethertype =
2030 			RTE_ETHER_TYPE_LLDP;
2031 	s_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2032 	s_list_itr->fltr_info.flag = ICE_FLTR_RX;
2033 	s_list_itr->fltr_info.src_id = ICE_SRC_ID_LPORT;
2034 	LIST_ADD(&s_list_itr->list_entry, &list_head);
2035 	if (on)
2036 		ret = ice_add_eth_mac(hw, &list_head);
2037 	else
2038 		ret = ice_remove_eth_mac(hw, &list_head);
2039 
2040 	rte_free(s_list_itr);
2041 	return ret;
2042 }
2043 
2044 static enum ice_status
2045 ice_get_hw_res(struct ice_hw *hw, uint16_t res_type,
2046 		uint16_t num, uint16_t desc_id,
2047 		uint16_t *prof_buf, uint16_t *num_prof)
2048 {
2049 	struct ice_aqc_res_elem *resp_buf;
2050 	int ret;
2051 	uint16_t buf_len;
2052 	bool res_shared = 1;
2053 	struct ice_aq_desc aq_desc;
2054 	struct ice_sq_cd *cd = NULL;
2055 	struct ice_aqc_get_allocd_res_desc *cmd =
2056 			&aq_desc.params.get_res_desc;
2057 
2058 	buf_len = sizeof(*resp_buf) * num;
2059 	resp_buf = ice_malloc(hw, buf_len);
2060 	if (!resp_buf)
2061 		return -ENOMEM;
2062 
2063 	ice_fill_dflt_direct_cmd_desc(&aq_desc,
2064 			ice_aqc_opc_get_allocd_res_desc);
2065 
2066 	cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2067 				ICE_AQC_RES_TYPE_M) | (res_shared ?
2068 				ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2069 	cmd->ops.cmd.first_desc = CPU_TO_LE16(desc_id);
2070 
2071 	ret = ice_aq_send_cmd(hw, &aq_desc, resp_buf, buf_len, cd);
2072 	if (!ret)
2073 		*num_prof = LE16_TO_CPU(cmd->ops.resp.num_desc);
2074 	else
2075 		goto exit;
2076 
2077 	ice_memcpy(prof_buf, resp_buf, sizeof(*resp_buf) *
2078 			(*num_prof), ICE_NONDMA_TO_NONDMA);
2079 
2080 exit:
2081 	rte_free(resp_buf);
2082 	return ret;
2083 }
2084 static int
2085 ice_cleanup_resource(struct ice_hw *hw, uint16_t res_type)
2086 {
2087 	int ret;
2088 	uint16_t prof_id;
2089 	uint16_t prof_buf[ICE_MAX_RES_DESC_NUM];
2090 	uint16_t first_desc = 1;
2091 	uint16_t num_prof = 0;
2092 
2093 	ret = ice_get_hw_res(hw, res_type, ICE_MAX_RES_DESC_NUM,
2094 			first_desc, prof_buf, &num_prof);
2095 	if (ret) {
2096 		PMD_INIT_LOG(ERR, "Failed to get fxp resource");
2097 		return ret;
2098 	}
2099 
2100 	for (prof_id = 0; prof_id < num_prof; prof_id++) {
2101 		ret = ice_free_hw_res(hw, res_type, 1, &prof_buf[prof_id]);
2102 		if (ret) {
2103 			PMD_INIT_LOG(ERR, "Failed to free fxp resource");
2104 			return ret;
2105 		}
2106 	}
2107 	return 0;
2108 }
2109 
2110 static int
2111 ice_reset_fxp_resource(struct ice_hw *hw)
2112 {
2113 	int ret;
2114 
2115 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID);
2116 	if (ret) {
2117 		PMD_INIT_LOG(ERR, "Failed to clearup fdir resource");
2118 		return ret;
2119 	}
2120 
2121 	ret = ice_cleanup_resource(hw, ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID);
2122 	if (ret) {
2123 		PMD_INIT_LOG(ERR, "Failed to clearup rss resource");
2124 		return ret;
2125 	}
2126 
2127 	return 0;
2128 }
2129 
2130 static void
2131 ice_rss_ctx_init(struct ice_pf *pf)
2132 {
2133 	memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
2134 }
2135 
2136 static uint64_t
2137 ice_get_supported_rxdid(struct ice_hw *hw)
2138 {
2139 	uint64_t supported_rxdid = 0; /* bitmap for supported RXDID */
2140 	uint32_t regval;
2141 	int i;
2142 
2143 	supported_rxdid |= BIT(ICE_RXDID_LEGACY_1);
2144 
2145 	for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
2146 		regval = ICE_READ_REG(hw, GLFLXP_RXDID_FLAGS(i, 0));
2147 		if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
2148 			& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
2149 			supported_rxdid |= BIT(i);
2150 	}
2151 	return supported_rxdid;
2152 }
2153 
2154 static int
2155 ice_dev_init(struct rte_eth_dev *dev)
2156 {
2157 	struct rte_pci_device *pci_dev;
2158 	struct rte_intr_handle *intr_handle;
2159 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2161 	struct ice_adapter *ad =
2162 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2163 	struct ice_vsi *vsi;
2164 	int ret;
2165 #ifndef RTE_EXEC_ENV_WINDOWS
2166 	off_t pos;
2167 	uint32_t dsn_low, dsn_high;
2168 	uint64_t dsn;
2169 	bool use_dsn;
2170 #endif
2171 
2172 	dev->dev_ops = &ice_eth_dev_ops;
2173 	dev->rx_queue_count = ice_rx_queue_count;
2174 	dev->rx_descriptor_status = ice_rx_descriptor_status;
2175 	dev->tx_descriptor_status = ice_tx_descriptor_status;
2176 	dev->rx_pkt_burst = ice_recv_pkts;
2177 	dev->tx_pkt_burst = ice_xmit_pkts;
2178 	dev->tx_pkt_prepare = ice_prep_pkts;
2179 
2180 	/* for secondary processes, we don't initialise any further as primary
2181 	 * has already done this work.
2182 	 */
2183 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2184 		ice_set_rx_function(dev);
2185 		ice_set_tx_function(dev);
2186 		return 0;
2187 	}
2188 
2189 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2190 
2191 	ice_set_default_ptype_table(dev);
2192 	pci_dev = RTE_DEV_TO_PCI(dev->device);
2193 	intr_handle = pci_dev->intr_handle;
2194 
2195 	pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2196 	pf->dev_data = dev->data;
2197 	hw->back = pf->adapter;
2198 	hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
2199 	hw->vendor_id = pci_dev->id.vendor_id;
2200 	hw->device_id = pci_dev->id.device_id;
2201 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2202 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2203 	hw->bus.device = pci_dev->addr.devid;
2204 	hw->bus.func = pci_dev->addr.function;
2205 
2206 	ret = ice_parse_devargs(dev);
2207 	if (ret) {
2208 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
2209 		return -EINVAL;
2210 	}
2211 
2212 	ice_init_controlq_parameter(hw);
2213 
2214 	ret = ice_init_hw(hw);
2215 	if (ret) {
2216 		PMD_INIT_LOG(ERR, "Failed to initialize HW");
2217 		return -EINVAL;
2218 	}
2219 
2220 #ifndef RTE_EXEC_ENV_WINDOWS
2221 	use_dsn = false;
2222 	dsn = 0;
2223 	pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
2224 	if (pos) {
2225 		if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 ||
2226 				rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) {
2227 			PMD_INIT_LOG(ERR, "Failed to read pci config space\n");
2228 		} else {
2229 			use_dsn = true;
2230 			dsn = (uint64_t)dsn_high << 32 | dsn_low;
2231 		}
2232 	} else {
2233 		PMD_INIT_LOG(ERR, "Failed to read device serial number\n");
2234 	}
2235 
2236 	ret = ice_load_pkg(pf->adapter, use_dsn, dsn);
2237 	if (ret == 0) {
2238 		ret = ice_init_hw_tbls(hw);
2239 		if (ret) {
2240 			PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret);
2241 			rte_free(hw->pkg_copy);
2242 		}
2243 	}
2244 
2245 	if (ret) {
2246 		if (ad->devargs.safe_mode_support == 0) {
2247 			PMD_INIT_LOG(ERR, "Failed to load the DDP package,"
2248 					"Use safe-mode-support=1 to enter Safe Mode");
2249 			goto err_init_fw;
2250 		}
2251 
2252 		PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
2253 					"Entering Safe Mode");
2254 		ad->is_safe_mode = 1;
2255 	}
2256 #endif
2257 
2258 	PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
2259 		     hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2260 		     hw->api_maj_ver, hw->api_min_ver);
2261 
2262 	ice_pf_sw_init(dev);
2263 	ret = ice_init_mac_address(dev);
2264 	if (ret) {
2265 		PMD_INIT_LOG(ERR, "Failed to initialize mac address");
2266 		goto err_init_mac;
2267 	}
2268 
2269 	ret = ice_res_pool_init(&pf->msix_pool, 1,
2270 				hw->func_caps.common_cap.num_msix_vectors - 1);
2271 	if (ret) {
2272 		PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
2273 		goto err_msix_pool_init;
2274 	}
2275 
2276 	ret = ice_pf_setup(pf);
2277 	if (ret) {
2278 		PMD_INIT_LOG(ERR, "Failed to setup PF");
2279 		goto err_pf_setup;
2280 	}
2281 
2282 	ret = ice_send_driver_ver(hw);
2283 	if (ret) {
2284 		PMD_INIT_LOG(ERR, "Failed to send driver version");
2285 		goto err_pf_setup;
2286 	}
2287 
2288 	vsi = pf->main_vsi;
2289 
2290 	ret = ice_aq_stop_lldp(hw, true, false, NULL);
2291 	if (ret != ICE_SUCCESS)
2292 		PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
2293 	ret = ice_init_dcb(hw, true);
2294 	if (ret != ICE_SUCCESS)
2295 		PMD_INIT_LOG(DEBUG, "Failed to init DCB\n");
2296 	/* Forward LLDP packets to default VSI */
2297 	ret = ice_vsi_config_sw_lldp(vsi, true);
2298 	if (ret != ICE_SUCCESS)
2299 		PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n");
2300 	/* register callback func to eal lib */
2301 	rte_intr_callback_register(intr_handle,
2302 				   ice_interrupt_handler, dev);
2303 
2304 	ice_pf_enable_irq0(hw);
2305 
2306 	/* enable uio intr after callback register */
2307 	rte_intr_enable(intr_handle);
2308 
2309 	/* get base queue pairs index  in the device */
2310 	ice_base_queue_get(pf);
2311 
2312 	/* Initialize RSS context for gtpu_eh */
2313 	ice_rss_ctx_init(pf);
2314 
2315 	if (!ad->is_safe_mode) {
2316 		ret = ice_flow_init(ad);
2317 		if (ret) {
2318 			PMD_INIT_LOG(ERR, "Failed to initialize flow");
2319 			goto err_flow_init;
2320 		}
2321 	}
2322 
2323 	ret = ice_reset_fxp_resource(hw);
2324 	if (ret) {
2325 		PMD_INIT_LOG(ERR, "Failed to reset fxp resource");
2326 		goto err_flow_init;
2327 	}
2328 
2329 	pf->supported_rxdid = ice_get_supported_rxdid(hw);
2330 
2331 	return 0;
2332 
2333 err_flow_init:
2334 	ice_flow_uninit(ad);
2335 	rte_intr_disable(intr_handle);
2336 	ice_pf_disable_irq0(hw);
2337 	rte_intr_callback_unregister(intr_handle,
2338 				     ice_interrupt_handler, dev);
2339 err_pf_setup:
2340 	ice_res_pool_destroy(&pf->msix_pool);
2341 err_msix_pool_init:
2342 	rte_free(dev->data->mac_addrs);
2343 	dev->data->mac_addrs = NULL;
2344 err_init_mac:
2345 	rte_free(pf->proto_xtr);
2346 #ifndef RTE_EXEC_ENV_WINDOWS
2347 err_init_fw:
2348 #endif
2349 	ice_deinit_hw(hw);
2350 
2351 	return ret;
2352 }
2353 
2354 int
2355 ice_release_vsi(struct ice_vsi *vsi)
2356 {
2357 	struct ice_hw *hw;
2358 	struct ice_vsi_ctx vsi_ctx;
2359 	enum ice_status ret;
2360 	int error = 0;
2361 
2362 	if (!vsi)
2363 		return error;
2364 
2365 	hw = ICE_VSI_TO_HW(vsi);
2366 
2367 	ice_remove_all_mac_vlan_filters(vsi);
2368 
2369 	memset(&vsi_ctx, 0, sizeof(vsi_ctx));
2370 
2371 	vsi_ctx.vsi_num = vsi->vsi_id;
2372 	vsi_ctx.info = vsi->info;
2373 	ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
2374 	if (ret != ICE_SUCCESS) {
2375 		PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
2376 		error = -1;
2377 	}
2378 
2379 	rte_free(vsi->rss_lut);
2380 	rte_free(vsi->rss_key);
2381 	rte_free(vsi);
2382 	return error;
2383 }
2384 
2385 void
2386 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
2387 {
2388 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
2389 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2390 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2391 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2392 	uint16_t msix_intr, i;
2393 
2394 	/* disable interrupt and also clear all the exist config */
2395 	for (i = 0; i < vsi->nb_qps; i++) {
2396 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
2397 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
2398 		rte_wmb();
2399 	}
2400 
2401 	if (rte_intr_allow_others(intr_handle))
2402 		/* vfio-pci */
2403 		for (i = 0; i < vsi->nb_msix; i++) {
2404 			msix_intr = vsi->msix_intr + i;
2405 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
2406 				      GLINT_DYN_CTL_WB_ON_ITR_M);
2407 		}
2408 	else
2409 		/* igb_uio */
2410 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
2411 }
2412 
2413 static int
2414 ice_dev_stop(struct rte_eth_dev *dev)
2415 {
2416 	struct rte_eth_dev_data *data = dev->data;
2417 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2418 	struct ice_vsi *main_vsi = pf->main_vsi;
2419 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2420 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2421 	uint16_t i;
2422 
2423 	/* avoid stopping again */
2424 	if (pf->adapter_stopped)
2425 		return 0;
2426 
2427 	/* stop and clear all Rx queues */
2428 	for (i = 0; i < data->nb_rx_queues; i++)
2429 		ice_rx_queue_stop(dev, i);
2430 
2431 	/* stop and clear all Tx queues */
2432 	for (i = 0; i < data->nb_tx_queues; i++)
2433 		ice_tx_queue_stop(dev, i);
2434 
2435 	/* disable all queue interrupts */
2436 	ice_vsi_disable_queues_intr(main_vsi);
2437 
2438 	if (pf->init_link_up)
2439 		ice_dev_set_link_up(dev);
2440 	else
2441 		ice_dev_set_link_down(dev);
2442 
2443 	/* Clean datapath event and queue/vec mapping */
2444 	rte_intr_efd_disable(intr_handle);
2445 	rte_intr_vec_list_free(intr_handle);
2446 
2447 	pf->adapter_stopped = true;
2448 	dev->data->dev_started = 0;
2449 
2450 	return 0;
2451 }
2452 
2453 static int
2454 ice_dev_close(struct rte_eth_dev *dev)
2455 {
2456 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2457 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2459 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2460 	struct ice_adapter *ad =
2461 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2462 	int ret;
2463 	uint32_t val;
2464 	uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
2465 	uint32_t pin_idx = ad->devargs.pin_idx;
2466 
2467 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2468 		return 0;
2469 
2470 	/* Since stop will make link down, then the link event will be
2471 	 * triggered, disable the irq firstly to avoid the port_infoe etc
2472 	 * resources deallocation causing the interrupt service thread
2473 	 * crash.
2474 	 */
2475 	ice_pf_disable_irq0(hw);
2476 
2477 	ret = ice_dev_stop(dev);
2478 
2479 	if (!ad->is_safe_mode)
2480 		ice_flow_uninit(ad);
2481 
2482 	/* release all queue resource */
2483 	ice_free_queues(dev);
2484 
2485 	ice_res_pool_destroy(&pf->msix_pool);
2486 	ice_release_vsi(pf->main_vsi);
2487 	ice_sched_cleanup_all(hw);
2488 	ice_free_hw_tbls(hw);
2489 	rte_free(hw->port_info);
2490 	hw->port_info = NULL;
2491 	ice_shutdown_all_ctrlq(hw);
2492 	rte_free(pf->proto_xtr);
2493 	pf->proto_xtr = NULL;
2494 
2495 	if (ad->devargs.pps_out_ena) {
2496 		ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(pin_idx, timer), 0);
2497 		ICE_WRITE_REG(hw, GLTSYN_CLKO(pin_idx, timer), 0);
2498 		ICE_WRITE_REG(hw, GLTSYN_TGT_L(pin_idx, timer), 0);
2499 		ICE_WRITE_REG(hw, GLTSYN_TGT_H(pin_idx, timer), 0);
2500 
2501 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
2502 		ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(pin_idx), val);
2503 	}
2504 
2505 	/* disable uio intr before callback unregister */
2506 	rte_intr_disable(intr_handle);
2507 
2508 	/* unregister callback func from eal lib */
2509 	rte_intr_callback_unregister(intr_handle,
2510 				     ice_interrupt_handler, dev);
2511 
2512 	return ret;
2513 }
2514 
2515 static int
2516 ice_dev_uninit(struct rte_eth_dev *dev)
2517 {
2518 	ice_dev_close(dev);
2519 
2520 	return 0;
2521 }
2522 
2523 static bool
2524 is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
2525 {
2526 	return (cfg->hash_flds != 0 && cfg->addl_hdrs != 0) ? true : false;
2527 }
2528 
2529 static void
2530 hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
2531 {
2532 	cfg->hash_flds = 0;
2533 	cfg->addl_hdrs = 0;
2534 	cfg->symm = 0;
2535 	cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
2536 }
2537 
2538 static int
2539 ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2540 {
2541 	enum ice_status status = ICE_SUCCESS;
2542 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2543 	struct ice_vsi *vsi = pf->main_vsi;
2544 
2545 	if (!is_hash_cfg_valid(cfg))
2546 		return -ENOENT;
2547 
2548 	status = ice_rem_rss_cfg(hw, vsi->idx, cfg);
2549 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2550 		PMD_DRV_LOG(ERR,
2551 			    "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
2552 			    vsi->idx, status);
2553 		return -EBUSY;
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 static int
2560 ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2561 {
2562 	enum ice_status status = ICE_SUCCESS;
2563 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2564 	struct ice_vsi *vsi = pf->main_vsi;
2565 
2566 	if (!is_hash_cfg_valid(cfg))
2567 		return -ENOENT;
2568 
2569 	status = ice_add_rss_cfg(hw, vsi->idx, cfg);
2570 	if (status) {
2571 		PMD_DRV_LOG(ERR,
2572 			    "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
2573 			    vsi->idx, status);
2574 		return -EBUSY;
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 static int
2581 ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2582 {
2583 	int ret;
2584 
2585 	ret = ice_hash_moveout(pf, cfg);
2586 	if (ret && (ret != -ENOENT))
2587 		return ret;
2588 
2589 	hash_cfg_reset(cfg);
2590 
2591 	return 0;
2592 }
2593 
2594 static int
2595 ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2596 			 u8 ctx_idx)
2597 {
2598 	int ret;
2599 
2600 	switch (ctx_idx) {
2601 	case ICE_HASH_GTPU_CTX_EH_IP:
2602 		ret = ice_hash_remove(pf,
2603 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2604 		if (ret && (ret != -ENOENT))
2605 			return ret;
2606 
2607 		ret = ice_hash_remove(pf,
2608 				      &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2609 		if (ret && (ret != -ENOENT))
2610 			return ret;
2611 
2612 		ret = ice_hash_remove(pf,
2613 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2614 		if (ret && (ret != -ENOENT))
2615 			return ret;
2616 
2617 		ret = ice_hash_remove(pf,
2618 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2619 		if (ret && (ret != -ENOENT))
2620 			return ret;
2621 
2622 		ret = ice_hash_remove(pf,
2623 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2624 		if (ret && (ret != -ENOENT))
2625 			return ret;
2626 
2627 		ret = ice_hash_remove(pf,
2628 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2629 		if (ret && (ret != -ENOENT))
2630 			return ret;
2631 
2632 		ret = ice_hash_remove(pf,
2633 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2634 		if (ret && (ret != -ENOENT))
2635 			return ret;
2636 
2637 		ret = ice_hash_remove(pf,
2638 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2639 		if (ret && (ret != -ENOENT))
2640 			return ret;
2641 
2642 		break;
2643 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2644 		ret = ice_hash_remove(pf,
2645 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2646 		if (ret && (ret != -ENOENT))
2647 			return ret;
2648 
2649 		ret = ice_hash_remove(pf,
2650 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2651 		if (ret && (ret != -ENOENT))
2652 			return ret;
2653 
2654 		ret = ice_hash_moveout(pf,
2655 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2656 		if (ret && (ret != -ENOENT))
2657 			return ret;
2658 
2659 		ret = ice_hash_moveout(pf,
2660 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2661 		if (ret && (ret != -ENOENT))
2662 			return ret;
2663 
2664 		ret = ice_hash_moveout(pf,
2665 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2666 		if (ret && (ret != -ENOENT))
2667 			return ret;
2668 
2669 		ret = ice_hash_moveout(pf,
2670 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2671 		if (ret && (ret != -ENOENT))
2672 			return ret;
2673 
2674 		break;
2675 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2676 		ret = ice_hash_remove(pf,
2677 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2678 		if (ret && (ret != -ENOENT))
2679 			return ret;
2680 
2681 		ret = ice_hash_remove(pf,
2682 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2683 		if (ret && (ret != -ENOENT))
2684 			return ret;
2685 
2686 		ret = ice_hash_moveout(pf,
2687 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2688 		if (ret && (ret != -ENOENT))
2689 			return ret;
2690 
2691 		ret = ice_hash_moveout(pf,
2692 				       &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2693 		if (ret && (ret != -ENOENT))
2694 			return ret;
2695 
2696 		ret = ice_hash_moveout(pf,
2697 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2698 		if (ret && (ret != -ENOENT))
2699 			return ret;
2700 
2701 		ret = ice_hash_moveout(pf,
2702 				       &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2703 		if (ret && (ret != -ENOENT))
2704 			return ret;
2705 
2706 		break;
2707 	case ICE_HASH_GTPU_CTX_UP_IP:
2708 		ret = ice_hash_remove(pf,
2709 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2710 		if (ret && (ret != -ENOENT))
2711 			return ret;
2712 
2713 		ret = ice_hash_remove(pf,
2714 				      &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2715 		if (ret && (ret != -ENOENT))
2716 			return ret;
2717 
2718 		ret = ice_hash_moveout(pf,
2719 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2720 		if (ret && (ret != -ENOENT))
2721 			return ret;
2722 
2723 		ret = ice_hash_moveout(pf,
2724 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2725 		if (ret && (ret != -ENOENT))
2726 			return ret;
2727 
2728 		ret = ice_hash_moveout(pf,
2729 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2730 		if (ret && (ret != -ENOENT))
2731 			return ret;
2732 
2733 		break;
2734 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2735 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2736 		ret = ice_hash_moveout(pf,
2737 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2738 		if (ret && (ret != -ENOENT))
2739 			return ret;
2740 
2741 		ret = ice_hash_moveout(pf,
2742 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2743 		if (ret && (ret != -ENOENT))
2744 			return ret;
2745 
2746 		ret = ice_hash_moveout(pf,
2747 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2748 		if (ret && (ret != -ENOENT))
2749 			return ret;
2750 
2751 		break;
2752 	case ICE_HASH_GTPU_CTX_DW_IP:
2753 		ret = ice_hash_remove(pf,
2754 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2755 		if (ret && (ret != -ENOENT))
2756 			return ret;
2757 
2758 		ret = ice_hash_remove(pf,
2759 				      &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2760 		if (ret && (ret != -ENOENT))
2761 			return ret;
2762 
2763 		ret = ice_hash_moveout(pf,
2764 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2765 		if (ret && (ret != -ENOENT))
2766 			return ret;
2767 
2768 		ret = ice_hash_moveout(pf,
2769 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2770 		if (ret && (ret != -ENOENT))
2771 			return ret;
2772 
2773 		ret = ice_hash_moveout(pf,
2774 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2775 		if (ret && (ret != -ENOENT))
2776 			return ret;
2777 
2778 		break;
2779 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2780 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2781 		ret = ice_hash_moveout(pf,
2782 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2783 		if (ret && (ret != -ENOENT))
2784 			return ret;
2785 
2786 		ret = ice_hash_moveout(pf,
2787 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2788 		if (ret && (ret != -ENOENT))
2789 			return ret;
2790 
2791 		ret = ice_hash_moveout(pf,
2792 				       &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2793 		if (ret && (ret != -ENOENT))
2794 			return ret;
2795 
2796 		break;
2797 	default:
2798 		break;
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 static u8 calc_gtpu_ctx_idx(uint32_t hdr)
2805 {
2806 	u8 eh_idx, ip_idx;
2807 
2808 	if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
2809 		eh_idx = 0;
2810 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
2811 		eh_idx = 1;
2812 	else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
2813 		eh_idx = 2;
2814 	else
2815 		return ICE_HASH_GTPU_CTX_MAX;
2816 
2817 	ip_idx = 0;
2818 	if (hdr & ICE_FLOW_SEG_HDR_UDP)
2819 		ip_idx = 1;
2820 	else if (hdr & ICE_FLOW_SEG_HDR_TCP)
2821 		ip_idx = 2;
2822 
2823 	if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
2824 		return eh_idx * 3 + ip_idx;
2825 	else
2826 		return ICE_HASH_GTPU_CTX_MAX;
2827 }
2828 
2829 static int
2830 ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
2831 {
2832 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2833 
2834 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2835 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
2836 						gtpu_ctx_idx);
2837 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2838 		return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
2839 						gtpu_ctx_idx);
2840 
2841 	return 0;
2842 }
2843 
2844 static int
2845 ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
2846 			  u8 ctx_idx, struct ice_rss_hash_cfg *cfg)
2847 {
2848 	int ret;
2849 
2850 	if (ctx_idx < ICE_HASH_GTPU_CTX_MAX)
2851 		ctx->ctx[ctx_idx] = *cfg;
2852 
2853 	switch (ctx_idx) {
2854 	case ICE_HASH_GTPU_CTX_EH_IP:
2855 		break;
2856 	case ICE_HASH_GTPU_CTX_EH_IP_UDP:
2857 		ret = ice_hash_moveback(pf,
2858 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2859 		if (ret && (ret != -ENOENT))
2860 			return ret;
2861 
2862 		ret = ice_hash_moveback(pf,
2863 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
2864 		if (ret && (ret != -ENOENT))
2865 			return ret;
2866 
2867 		ret = ice_hash_moveback(pf,
2868 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2869 		if (ret && (ret != -ENOENT))
2870 			return ret;
2871 
2872 		ret = ice_hash_moveback(pf,
2873 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
2874 		if (ret && (ret != -ENOENT))
2875 			return ret;
2876 
2877 		break;
2878 	case ICE_HASH_GTPU_CTX_EH_IP_TCP:
2879 		ret = ice_hash_moveback(pf,
2880 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
2881 		if (ret && (ret != -ENOENT))
2882 			return ret;
2883 
2884 		ret = ice_hash_moveback(pf,
2885 					&ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
2886 		if (ret && (ret != -ENOENT))
2887 			return ret;
2888 
2889 		ret = ice_hash_moveback(pf,
2890 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
2891 		if (ret && (ret != -ENOENT))
2892 			return ret;
2893 
2894 		ret = ice_hash_moveback(pf,
2895 					&ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
2896 		if (ret && (ret != -ENOENT))
2897 			return ret;
2898 
2899 		break;
2900 	case ICE_HASH_GTPU_CTX_UP_IP:
2901 	case ICE_HASH_GTPU_CTX_UP_IP_UDP:
2902 	case ICE_HASH_GTPU_CTX_UP_IP_TCP:
2903 	case ICE_HASH_GTPU_CTX_DW_IP:
2904 	case ICE_HASH_GTPU_CTX_DW_IP_UDP:
2905 	case ICE_HASH_GTPU_CTX_DW_IP_TCP:
2906 		ret = ice_hash_moveback(pf,
2907 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
2908 		if (ret && (ret != -ENOENT))
2909 			return ret;
2910 
2911 		ret = ice_hash_moveback(pf,
2912 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
2913 		if (ret && (ret != -ENOENT))
2914 			return ret;
2915 
2916 		ret = ice_hash_moveback(pf,
2917 					&ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
2918 		if (ret && (ret != -ENOENT))
2919 			return ret;
2920 
2921 		break;
2922 	default:
2923 		break;
2924 	}
2925 
2926 	return 0;
2927 }
2928 
2929 static int
2930 ice_add_rss_cfg_post(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
2931 {
2932 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(cfg->addl_hdrs);
2933 
2934 	if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
2935 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4,
2936 						 gtpu_ctx_idx, cfg);
2937 	else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
2938 		return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6,
2939 						 gtpu_ctx_idx, cfg);
2940 
2941 	return 0;
2942 }
2943 
2944 static void
2945 ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
2946 {
2947 	u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
2948 
2949 	if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
2950 		return;
2951 
2952 	if (hdr & ICE_FLOW_SEG_HDR_IPV4)
2953 		hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
2954 	else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
2955 		hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
2956 }
2957 
2958 int
2959 ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2960 		     struct ice_rss_hash_cfg *cfg)
2961 {
2962 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2963 	int ret;
2964 
2965 	ret = ice_rem_rss_cfg(hw, vsi_id, cfg);
2966 	if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
2967 		PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
2968 
2969 	ice_rem_rss_cfg_post(pf, cfg->addl_hdrs);
2970 
2971 	return 0;
2972 }
2973 
2974 int
2975 ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
2976 		     struct ice_rss_hash_cfg *cfg)
2977 {
2978 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
2979 	int ret;
2980 
2981 	ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs);
2982 	if (ret)
2983 		PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
2984 
2985 	ret = ice_add_rss_cfg(hw, vsi_id, cfg);
2986 	if (ret)
2987 		PMD_DRV_LOG(ERR, "add rss cfg failed\n");
2988 
2989 	ret = ice_add_rss_cfg_post(pf, cfg);
2990 	if (ret)
2991 		PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
2992 
2993 	return 0;
2994 }
2995 
2996 static void
2997 ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
2998 {
2999 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
3000 	struct ice_vsi *vsi = pf->main_vsi;
3001 	struct ice_rss_hash_cfg cfg;
3002 	int ret;
3003 
3004 #define ICE_RSS_HF_ALL ( \
3005 	RTE_ETH_RSS_IPV4 | \
3006 	RTE_ETH_RSS_IPV6 | \
3007 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
3008 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
3009 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
3010 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
3011 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
3012 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
3013 
3014 	ret = ice_rem_vsi_rss_cfg(hw, vsi->idx);
3015 	if (ret)
3016 		PMD_DRV_LOG(ERR, "%s Remove rss vsi fail %d",
3017 			    __func__, ret);
3018 
3019 	cfg.symm = 0;
3020 	cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
3021 	/* Configure RSS for IPv4 with src/dst addr as input set */
3022 	if (rss_hf & RTE_ETH_RSS_IPV4) {
3023 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3024 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3025 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3026 		if (ret)
3027 			PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
3028 				    __func__, ret);
3029 	}
3030 
3031 	/* Configure RSS for IPv6 with src/dst addr as input set */
3032 	if (rss_hf & RTE_ETH_RSS_IPV6) {
3033 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3034 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3035 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3036 		if (ret)
3037 			PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
3038 				    __func__, ret);
3039 	}
3040 
3041 	/* Configure RSS for udp4 with src/dst addr and port as input set */
3042 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
3043 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 |
3044 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3045 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
3046 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3047 		if (ret)
3048 			PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
3049 				    __func__, ret);
3050 	}
3051 
3052 	/* Configure RSS for udp6 with src/dst addr and port as input set */
3053 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
3054 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 |
3055 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3056 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
3057 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3058 		if (ret)
3059 			PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
3060 				    __func__, ret);
3061 	}
3062 
3063 	/* Configure RSS for tcp4 with src/dst addr and port as input set */
3064 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
3065 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 |
3066 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3067 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
3068 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3069 		if (ret)
3070 			PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
3071 				    __func__, ret);
3072 	}
3073 
3074 	/* Configure RSS for tcp6 with src/dst addr and port as input set */
3075 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
3076 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 |
3077 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3078 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
3079 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3080 		if (ret)
3081 			PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
3082 				    __func__, ret);
3083 	}
3084 
3085 	/* Configure RSS for sctp4 with src/dst addr and port as input set */
3086 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) {
3087 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 |
3088 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3089 		cfg.hash_flds = ICE_HASH_SCTP_IPV4;
3090 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3091 		if (ret)
3092 			PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
3093 				    __func__, ret);
3094 	}
3095 
3096 	/* Configure RSS for sctp6 with src/dst addr and port as input set */
3097 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) {
3098 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 |
3099 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3100 		cfg.hash_flds = ICE_HASH_SCTP_IPV6;
3101 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3102 		if (ret)
3103 			PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
3104 				    __func__, ret);
3105 	}
3106 
3107 	if (rss_hf & RTE_ETH_RSS_IPV4) {
3108 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 |
3109 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3110 		cfg.hash_flds = ICE_FLOW_HASH_IPV4;
3111 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3112 		if (ret)
3113 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
3114 				    __func__, ret);
3115 	}
3116 
3117 	if (rss_hf & RTE_ETH_RSS_IPV6) {
3118 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 |
3119 				ICE_FLOW_SEG_HDR_IPV_OTHER;
3120 		cfg.hash_flds = ICE_FLOW_HASH_IPV6;
3121 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3122 		if (ret)
3123 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
3124 				    __func__, ret);
3125 	}
3126 
3127 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
3128 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3129 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3130 		cfg.hash_flds = ICE_HASH_UDP_IPV4;
3131 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3132 		if (ret)
3133 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
3134 				    __func__, ret);
3135 	}
3136 
3137 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) {
3138 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_UDP |
3139 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3140 		cfg.hash_flds = ICE_HASH_UDP_IPV6;
3141 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3142 		if (ret)
3143 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
3144 				    __func__, ret);
3145 	}
3146 
3147 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
3148 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3149 				ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3150 		cfg.hash_flds = ICE_HASH_TCP_IPV4;
3151 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3152 		if (ret)
3153 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
3154 				    __func__, ret);
3155 	}
3156 
3157 	if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) {
3158 		cfg.addl_hdrs = ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_TCP |
3159 				ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER;
3160 		cfg.hash_flds = ICE_HASH_TCP_IPV6;
3161 		ret = ice_add_rss_cfg_wrap(pf, vsi->idx, &cfg);
3162 		if (ret)
3163 			PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
3164 				    __func__, ret);
3165 	}
3166 
3167 	pf->rss_hf = rss_hf & ICE_RSS_HF_ALL;
3168 }
3169 
3170 static void
3171 ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size)
3172 {
3173 	static struct ice_aqc_get_set_rss_keys default_key;
3174 	static bool default_key_done;
3175 	uint8_t *key = (uint8_t *)&default_key;
3176 	size_t i;
3177 
3178 	if (rss_key_size > sizeof(default_key)) {
3179 		PMD_DRV_LOG(WARNING,
3180 			    "requested size %u is larger than default %zu, "
3181 			    "only %zu bytes are gotten for key\n",
3182 			    rss_key_size, sizeof(default_key),
3183 			    sizeof(default_key));
3184 	}
3185 
3186 	if (!default_key_done) {
3187 		/* Calculate the default hash key */
3188 		for (i = 0; i < sizeof(default_key); i++)
3189 			key[i] = (uint8_t)rte_rand();
3190 		default_key_done = true;
3191 	}
3192 	rte_memcpy(rss_key, key, RTE_MIN(rss_key_size, sizeof(default_key)));
3193 }
3194 
3195 static int ice_init_rss(struct ice_pf *pf)
3196 {
3197 	struct ice_hw *hw = ICE_PF_TO_HW(pf);
3198 	struct ice_vsi *vsi = pf->main_vsi;
3199 	struct rte_eth_dev_data *dev_data = pf->dev_data;
3200 	struct ice_aq_get_set_rss_lut_params lut_params;
3201 	struct rte_eth_rss_conf *rss_conf;
3202 	struct ice_aqc_get_set_rss_keys key;
3203 	uint16_t i, nb_q;
3204 	int ret = 0;
3205 	bool is_safe_mode = pf->adapter->is_safe_mode;
3206 	uint32_t reg;
3207 
3208 	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
3209 	nb_q = dev_data->nb_rx_queues;
3210 	vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
3211 	vsi->rss_lut_size = pf->hash_lut_size;
3212 
3213 	if (nb_q == 0) {
3214 		PMD_DRV_LOG(WARNING,
3215 			"RSS is not supported as rx queues number is zero\n");
3216 		return 0;
3217 	}
3218 
3219 	if (is_safe_mode) {
3220 		PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
3221 		return 0;
3222 	}
3223 
3224 	if (!vsi->rss_key) {
3225 		vsi->rss_key = rte_zmalloc(NULL,
3226 					   vsi->rss_key_size, 0);
3227 		if (vsi->rss_key == NULL) {
3228 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3229 			return -ENOMEM;
3230 		}
3231 	}
3232 	if (!vsi->rss_lut) {
3233 		vsi->rss_lut = rte_zmalloc(NULL,
3234 					   vsi->rss_lut_size, 0);
3235 		if (vsi->rss_lut == NULL) {
3236 			PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
3237 			rte_free(vsi->rss_key);
3238 			vsi->rss_key = NULL;
3239 			return -ENOMEM;
3240 		}
3241 	}
3242 	/* configure RSS key */
3243 	if (!rss_conf->rss_key)
3244 		ice_get_default_rss_key(vsi->rss_key, vsi->rss_key_size);
3245 	else
3246 		rte_memcpy(vsi->rss_key, rss_conf->rss_key,
3247 			   RTE_MIN(rss_conf->rss_key_len,
3248 				   vsi->rss_key_size));
3249 
3250 	rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
3251 	ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
3252 	if (ret)
3253 		goto out;
3254 
3255 	/* init RSS LUT table */
3256 	for (i = 0; i < vsi->rss_lut_size; i++)
3257 		vsi->rss_lut[i] = i % nb_q;
3258 
3259 	lut_params.vsi_handle = vsi->idx;
3260 	lut_params.lut_size = vsi->rss_lut_size;
3261 	lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
3262 	lut_params.lut = vsi->rss_lut;
3263 	lut_params.global_lut_id = 0;
3264 	ret = ice_aq_set_rss_lut(hw, &lut_params);
3265 	if (ret)
3266 		goto out;
3267 
3268 	/* Enable registers for symmetric_toeplitz function. */
3269 	reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
3270 	reg = (reg & (~VSIQF_HASH_CTL_HASH_SCHEME_M)) |
3271 		(1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
3272 	ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
3273 
3274 	/* RSS hash configuration */
3275 	ice_rss_hash_set(pf, rss_conf->rss_hf);
3276 
3277 	return 0;
3278 out:
3279 	rte_free(vsi->rss_key);
3280 	vsi->rss_key = NULL;
3281 	rte_free(vsi->rss_lut);
3282 	vsi->rss_lut = NULL;
3283 	return -EINVAL;
3284 }
3285 
3286 static int
3287 ice_dev_configure(struct rte_eth_dev *dev)
3288 {
3289 	struct ice_adapter *ad =
3290 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3291 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3292 	int ret;
3293 
3294 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
3295 	 * bulk allocation or vector Rx preconditions we will reset it.
3296 	 */
3297 	ad->rx_bulk_alloc_allowed = true;
3298 	ad->tx_simple_allowed = true;
3299 
3300 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
3301 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
3302 
3303 	if (dev->data->nb_rx_queues) {
3304 		ret = ice_init_rss(pf);
3305 		if (ret) {
3306 			PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
3307 			return ret;
3308 		}
3309 	}
3310 
3311 	return 0;
3312 }
3313 
3314 static void
3315 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
3316 		       int base_queue, int nb_queue)
3317 {
3318 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3319 	uint32_t val, val_tx;
3320 	int rx_low_latency, i;
3321 
3322 	rx_low_latency = vsi->adapter->devargs.rx_low_latency;
3323 	for (i = 0; i < nb_queue; i++) {
3324 		/*do actual bind*/
3325 		val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
3326 		      (0 << QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
3327 		val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
3328 			 (0 << QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
3329 
3330 		PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
3331 			    base_queue + i, msix_vect);
3332 
3333 		/* set ITR0 value */
3334 		if (rx_low_latency) {
3335 			/**
3336 			 * Empirical configuration for optimal real time
3337 			 * latency reduced interrupt throttling to 2us
3338 			 */
3339 			ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x1);
3340 			ICE_WRITE_REG(hw, QRX_ITR(base_queue + i),
3341 				      QRX_ITR_NO_EXPR_M);
3342 		} else {
3343 			ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x2);
3344 			ICE_WRITE_REG(hw, QRX_ITR(base_queue + i), 0);
3345 		}
3346 
3347 		ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
3348 		ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
3349 	}
3350 }
3351 
3352 void
3353 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
3354 {
3355 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3356 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3357 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3358 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3359 	uint16_t msix_vect = vsi->msix_intr;
3360 	uint16_t nb_msix = RTE_MIN(vsi->nb_msix,
3361 				   rte_intr_nb_efd_get(intr_handle));
3362 	uint16_t queue_idx = 0;
3363 	int record = 0;
3364 	int i;
3365 
3366 	/* clear Rx/Tx queue interrupt */
3367 	for (i = 0; i < vsi->nb_used_qps; i++) {
3368 		ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
3369 		ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
3370 	}
3371 
3372 	/* PF bind interrupt */
3373 	if (rte_intr_dp_is_en(intr_handle)) {
3374 		queue_idx = 0;
3375 		record = 1;
3376 	}
3377 
3378 	for (i = 0; i < vsi->nb_used_qps; i++) {
3379 		if (nb_msix <= 1) {
3380 			if (!rte_intr_allow_others(intr_handle))
3381 				msix_vect = ICE_MISC_VEC_ID;
3382 
3383 			/* uio mapping all queue to one msix_vect */
3384 			__vsi_queues_bind_intr(vsi, msix_vect,
3385 					       vsi->base_queue + i,
3386 					       vsi->nb_used_qps - i);
3387 
3388 			for (; !!record && i < vsi->nb_used_qps; i++)
3389 				rte_intr_vec_list_index_set(intr_handle,
3390 						queue_idx + i, msix_vect);
3391 
3392 			break;
3393 		}
3394 
3395 		/* vfio 1:1 queue/msix_vect mapping */
3396 		__vsi_queues_bind_intr(vsi, msix_vect,
3397 				       vsi->base_queue + i, 1);
3398 
3399 		if (!!record)
3400 			rte_intr_vec_list_index_set(intr_handle,
3401 							   queue_idx + i,
3402 							   msix_vect);
3403 
3404 		msix_vect++;
3405 		nb_msix--;
3406 	}
3407 }
3408 
3409 void
3410 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
3411 {
3412 	struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
3413 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3414 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3415 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3416 	uint16_t msix_intr, i;
3417 
3418 	if (rte_intr_allow_others(intr_handle))
3419 		for (i = 0; i < vsi->nb_used_qps; i++) {
3420 			msix_intr = vsi->msix_intr + i;
3421 			ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
3422 				      GLINT_DYN_CTL_INTENA_M |
3423 				      GLINT_DYN_CTL_CLEARPBA_M |
3424 				      GLINT_DYN_CTL_ITR_INDX_M |
3425 				      GLINT_DYN_CTL_WB_ON_ITR_M);
3426 		}
3427 	else
3428 		ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
3429 			      GLINT_DYN_CTL_INTENA_M |
3430 			      GLINT_DYN_CTL_CLEARPBA_M |
3431 			      GLINT_DYN_CTL_ITR_INDX_M |
3432 			      GLINT_DYN_CTL_WB_ON_ITR_M);
3433 }
3434 
3435 static int
3436 ice_rxq_intr_setup(struct rte_eth_dev *dev)
3437 {
3438 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3439 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
3440 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3441 	struct ice_vsi *vsi = pf->main_vsi;
3442 	uint32_t intr_vector = 0;
3443 
3444 	rte_intr_disable(intr_handle);
3445 
3446 	/* check and configure queue intr-vector mapping */
3447 	if ((rte_intr_cap_multiple(intr_handle) ||
3448 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
3449 	    dev->data->dev_conf.intr_conf.rxq != 0) {
3450 		intr_vector = dev->data->nb_rx_queues;
3451 		if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
3452 			PMD_DRV_LOG(ERR, "At most %d intr queues supported",
3453 				    ICE_MAX_INTR_QUEUE_NUM);
3454 			return -ENOTSUP;
3455 		}
3456 		if (rte_intr_efd_enable(intr_handle, intr_vector))
3457 			return -1;
3458 	}
3459 
3460 	if (rte_intr_dp_is_en(intr_handle)) {
3461 		if (rte_intr_vec_list_alloc(intr_handle, NULL,
3462 						   dev->data->nb_rx_queues)) {
3463 			PMD_DRV_LOG(ERR,
3464 				    "Failed to allocate %d rx_queues intr_vec",
3465 				    dev->data->nb_rx_queues);
3466 			return -ENOMEM;
3467 		}
3468 	}
3469 
3470 	/* Map queues with MSIX interrupt */
3471 	vsi->nb_used_qps = dev->data->nb_rx_queues;
3472 	ice_vsi_queues_bind_intr(vsi);
3473 
3474 	/* Enable interrupts for all the queues */
3475 	ice_vsi_enable_queues_intr(vsi);
3476 
3477 	rte_intr_enable(intr_handle);
3478 
3479 	return 0;
3480 }
3481 
3482 static void
3483 ice_get_init_link_status(struct rte_eth_dev *dev)
3484 {
3485 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3486 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3487 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3488 	struct ice_link_status link_status;
3489 	int ret;
3490 
3491 	ret = ice_aq_get_link_info(hw->port_info, enable_lse,
3492 				   &link_status, NULL);
3493 	if (ret != ICE_SUCCESS) {
3494 		PMD_DRV_LOG(ERR, "Failed to get link info");
3495 		pf->init_link_up = false;
3496 		return;
3497 	}
3498 
3499 	if (link_status.link_info & ICE_AQ_LINK_UP)
3500 		pf->init_link_up = true;
3501 }
3502 
3503 static int
3504 ice_pps_out_cfg(struct ice_hw *hw, int idx, int timer)
3505 {
3506 	uint64_t current_time, start_time;
3507 	uint32_t hi, lo, lo2, func, val;
3508 
3509 	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3510 	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3511 	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3512 
3513 	if (lo2 < lo) {
3514 		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(timer));
3515 		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(timer));
3516 	}
3517 
3518 	current_time = ((uint64_t)hi << 32) | lo;
3519 
3520 	start_time = (current_time + NSEC_PER_SEC) /
3521 			NSEC_PER_SEC * NSEC_PER_SEC;
3522 	start_time = start_time - PPS_OUT_DELAY_NS;
3523 
3524 	func = 8 + idx + timer * 4;
3525 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
3526 		((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
3527 		GLGEN_GPIO_CTL_PIN_FUNC_M);
3528 
3529 	/* Write clkout with half of period value */
3530 	ICE_WRITE_REG(hw, GLTSYN_CLKO(idx, timer), NSEC_PER_SEC / 2);
3531 
3532 	/* Write TARGET time register */
3533 	ICE_WRITE_REG(hw, GLTSYN_TGT_L(idx, timer), start_time & 0xffffffff);
3534 	ICE_WRITE_REG(hw, GLTSYN_TGT_H(idx, timer), start_time >> 32);
3535 
3536 	/* Write AUX_OUT register */
3537 	ICE_WRITE_REG(hw, GLTSYN_AUX_OUT(idx, timer),
3538 		      GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M);
3539 
3540 	/* Write GPIO CTL register */
3541 	ICE_WRITE_REG(hw, GLGEN_GPIO_CTL(idx), val);
3542 
3543 	return 0;
3544 }
3545 
3546 static int
3547 ice_dev_start(struct rte_eth_dev *dev)
3548 {
3549 	struct rte_eth_dev_data *data = dev->data;
3550 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3551 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3552 	struct ice_vsi *vsi = pf->main_vsi;
3553 	struct ice_adapter *ad =
3554 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3555 	uint16_t nb_rxq = 0;
3556 	uint16_t nb_txq, i;
3557 	uint16_t max_frame_size;
3558 	int mask, ret;
3559 	uint8_t timer = hw->func_caps.ts_func_info.tmr_index_owned;
3560 	uint32_t pin_idx = ad->devargs.pin_idx;
3561 
3562 	/* program Tx queues' context in hardware */
3563 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
3564 		ret = ice_tx_queue_start(dev, nb_txq);
3565 		if (ret) {
3566 			PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
3567 			goto tx_err;
3568 		}
3569 	}
3570 
3571 	/* program Rx queues' context in hardware*/
3572 	for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
3573 		ret = ice_rx_queue_start(dev, nb_rxq);
3574 		if (ret) {
3575 			PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
3576 			goto rx_err;
3577 		}
3578 	}
3579 
3580 	ice_set_rx_function(dev);
3581 	ice_set_tx_function(dev);
3582 
3583 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
3584 			RTE_ETH_VLAN_EXTEND_MASK;
3585 	ret = ice_vlan_offload_set(dev, mask);
3586 	if (ret) {
3587 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
3588 		goto rx_err;
3589 	}
3590 
3591 	/* enable Rx interrupt and mapping Rx queue to interrupt vector */
3592 	if (ice_rxq_intr_setup(dev))
3593 		return -EIO;
3594 
3595 	/* Enable receiving broadcast packets and transmitting packets */
3596 	ret = ice_set_vsi_promisc(hw, vsi->idx,
3597 				  ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
3598 				  ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
3599 				  0);
3600 	if (ret != ICE_SUCCESS)
3601 		PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
3602 
3603 	ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
3604 				    ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
3605 				     ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
3606 				     ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
3607 				     ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
3608 				     ICE_AQ_LINK_EVENT_AN_COMPLETED |
3609 				     ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
3610 				     NULL);
3611 	if (ret != ICE_SUCCESS)
3612 		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
3613 
3614 	ice_get_init_link_status(dev);
3615 
3616 	ice_dev_set_link_up(dev);
3617 
3618 	/* Call get_link_info aq command to enable/disable LSE */
3619 	ice_link_update(dev, 1);
3620 
3621 	pf->adapter_stopped = false;
3622 
3623 	/* Set the max frame size to default value*/
3624 	max_frame_size = pf->dev_data->mtu ?
3625 		pf->dev_data->mtu + ICE_ETH_OVERHEAD :
3626 		ICE_FRAME_SIZE_MAX;
3627 
3628 	/* Set the max frame size to HW*/
3629 	ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
3630 
3631 	if (ad->devargs.pps_out_ena) {
3632 		ret = ice_pps_out_cfg(hw, pin_idx, timer);
3633 		if (ret) {
3634 			PMD_DRV_LOG(ERR, "Fail to configure 1pps out");
3635 			goto rx_err;
3636 		}
3637 	}
3638 
3639 	return 0;
3640 
3641 	/* stop the started queues if failed to start all queues */
3642 rx_err:
3643 	for (i = 0; i < nb_rxq; i++)
3644 		ice_rx_queue_stop(dev, i);
3645 tx_err:
3646 	for (i = 0; i < nb_txq; i++)
3647 		ice_tx_queue_stop(dev, i);
3648 
3649 	return -EIO;
3650 }
3651 
3652 static int
3653 ice_dev_reset(struct rte_eth_dev *dev)
3654 {
3655 	int ret;
3656 
3657 	if (dev->data->sriov.active)
3658 		return -ENOTSUP;
3659 
3660 	ret = ice_dev_uninit(dev);
3661 	if (ret) {
3662 		PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
3663 		return -ENXIO;
3664 	}
3665 
3666 	ret = ice_dev_init(dev);
3667 	if (ret) {
3668 		PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
3669 		return -ENXIO;
3670 	}
3671 
3672 	return 0;
3673 }
3674 
3675 static int
3676 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3677 {
3678 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3679 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3680 	struct ice_vsi *vsi = pf->main_vsi;
3681 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
3682 	bool is_safe_mode = pf->adapter->is_safe_mode;
3683 	u64 phy_type_low;
3684 	u64 phy_type_high;
3685 
3686 	dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
3687 	dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
3688 	dev_info->max_rx_queues = vsi->nb_qps;
3689 	dev_info->max_tx_queues = vsi->nb_qps;
3690 	dev_info->max_mac_addrs = vsi->max_macaddrs;
3691 	dev_info->max_vfs = pci_dev->max_vfs;
3692 	dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
3693 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3694 
3695 	dev_info->rx_offload_capa =
3696 		RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
3697 		RTE_ETH_RX_OFFLOAD_KEEP_CRC |
3698 		RTE_ETH_RX_OFFLOAD_SCATTER |
3699 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3700 	dev_info->tx_offload_capa =
3701 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
3702 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
3703 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
3704 		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3705 	dev_info->flow_type_rss_offloads = 0;
3706 
3707 	if (!is_safe_mode) {
3708 		dev_info->rx_offload_capa |=
3709 			RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
3710 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
3711 			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
3712 			RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
3713 			RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3714 			RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
3715 			RTE_ETH_RX_OFFLOAD_RSS_HASH |
3716 			RTE_ETH_RX_OFFLOAD_TIMESTAMP;
3717 		dev_info->tx_offload_capa |=
3718 			RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
3719 			RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
3720 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
3721 			RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
3722 			RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
3723 			RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3724 			RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
3725 		dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
3726 	}
3727 
3728 	dev_info->rx_queue_offload_capa = 0;
3729 	dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
3730 
3731 	dev_info->reta_size = pf->hash_lut_size;
3732 	dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
3733 
3734 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3735 		.rx_thresh = {
3736 			.pthresh = ICE_DEFAULT_RX_PTHRESH,
3737 			.hthresh = ICE_DEFAULT_RX_HTHRESH,
3738 			.wthresh = ICE_DEFAULT_RX_WTHRESH,
3739 		},
3740 		.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
3741 		.rx_drop_en = 0,
3742 		.offloads = 0,
3743 	};
3744 
3745 	dev_info->default_txconf = (struct rte_eth_txconf) {
3746 		.tx_thresh = {
3747 			.pthresh = ICE_DEFAULT_TX_PTHRESH,
3748 			.hthresh = ICE_DEFAULT_TX_HTHRESH,
3749 			.wthresh = ICE_DEFAULT_TX_WTHRESH,
3750 		},
3751 		.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
3752 		.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
3753 		.offloads = 0,
3754 	};
3755 
3756 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3757 		.nb_max = ICE_MAX_RING_DESC,
3758 		.nb_min = ICE_MIN_RING_DESC,
3759 		.nb_align = ICE_ALIGN_RING_DESC,
3760 	};
3761 
3762 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3763 		.nb_max = ICE_MAX_RING_DESC,
3764 		.nb_min = ICE_MIN_RING_DESC,
3765 		.nb_align = ICE_ALIGN_RING_DESC,
3766 	};
3767 
3768 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M |
3769 			       RTE_ETH_LINK_SPEED_100M |
3770 			       RTE_ETH_LINK_SPEED_1G |
3771 			       RTE_ETH_LINK_SPEED_2_5G |
3772 			       RTE_ETH_LINK_SPEED_5G |
3773 			       RTE_ETH_LINK_SPEED_10G |
3774 			       RTE_ETH_LINK_SPEED_20G |
3775 			       RTE_ETH_LINK_SPEED_25G;
3776 
3777 	phy_type_low = hw->port_info->phy.phy_type_low;
3778 	phy_type_high = hw->port_info->phy.phy_type_high;
3779 
3780 	if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
3781 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_50G;
3782 
3783 	if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
3784 			ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
3785 		dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;
3786 
3787 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3788 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3789 
3790 	dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
3791 	dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
3792 	dev_info->default_rxportconf.nb_queues = 1;
3793 	dev_info->default_txportconf.nb_queues = 1;
3794 	dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
3795 	dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
3796 
3797 	return 0;
3798 }
3799 
3800 static inline int
3801 ice_atomic_read_link_status(struct rte_eth_dev *dev,
3802 			    struct rte_eth_link *link)
3803 {
3804 	struct rte_eth_link *dst = link;
3805 	struct rte_eth_link *src = &dev->data->dev_link;
3806 
3807 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3808 				*(uint64_t *)src) == 0)
3809 		return -1;
3810 
3811 	return 0;
3812 }
3813 
3814 static inline int
3815 ice_atomic_write_link_status(struct rte_eth_dev *dev,
3816 			     struct rte_eth_link *link)
3817 {
3818 	struct rte_eth_link *dst = &dev->data->dev_link;
3819 	struct rte_eth_link *src = link;
3820 
3821 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
3822 				*(uint64_t *)src) == 0)
3823 		return -1;
3824 
3825 	return 0;
3826 }
3827 
3828 static int
3829 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
3830 {
3831 #define CHECK_INTERVAL 100  /* 100ms */
3832 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
3833 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3834 	struct ice_link_status link_status;
3835 	struct rte_eth_link link, old;
3836 	int status;
3837 	unsigned int rep_cnt = MAX_REPEAT_TIME;
3838 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
3839 
3840 	memset(&link, 0, sizeof(link));
3841 	memset(&old, 0, sizeof(old));
3842 	memset(&link_status, 0, sizeof(link_status));
3843 	ice_atomic_read_link_status(dev, &old);
3844 
3845 	do {
3846 		/* Get link status information from hardware */
3847 		status = ice_aq_get_link_info(hw->port_info, enable_lse,
3848 					      &link_status, NULL);
3849 		if (status != ICE_SUCCESS) {
3850 			link.link_speed = RTE_ETH_SPEED_NUM_100M;
3851 			link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3852 			PMD_DRV_LOG(ERR, "Failed to get link info");
3853 			goto out;
3854 		}
3855 
3856 		link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
3857 		if (!wait_to_complete || link.link_status)
3858 			break;
3859 
3860 		rte_delay_ms(CHECK_INTERVAL);
3861 	} while (--rep_cnt);
3862 
3863 	if (!link.link_status)
3864 		goto out;
3865 
3866 	/* Full-duplex operation at all supported speeds */
3867 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3868 
3869 	/* Parse the link status */
3870 	switch (link_status.link_speed) {
3871 	case ICE_AQ_LINK_SPEED_10MB:
3872 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
3873 		break;
3874 	case ICE_AQ_LINK_SPEED_100MB:
3875 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
3876 		break;
3877 	case ICE_AQ_LINK_SPEED_1000MB:
3878 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
3879 		break;
3880 	case ICE_AQ_LINK_SPEED_2500MB:
3881 		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
3882 		break;
3883 	case ICE_AQ_LINK_SPEED_5GB:
3884 		link.link_speed = RTE_ETH_SPEED_NUM_5G;
3885 		break;
3886 	case ICE_AQ_LINK_SPEED_10GB:
3887 		link.link_speed = RTE_ETH_SPEED_NUM_10G;
3888 		break;
3889 	case ICE_AQ_LINK_SPEED_20GB:
3890 		link.link_speed = RTE_ETH_SPEED_NUM_20G;
3891 		break;
3892 	case ICE_AQ_LINK_SPEED_25GB:
3893 		link.link_speed = RTE_ETH_SPEED_NUM_25G;
3894 		break;
3895 	case ICE_AQ_LINK_SPEED_40GB:
3896 		link.link_speed = RTE_ETH_SPEED_NUM_40G;
3897 		break;
3898 	case ICE_AQ_LINK_SPEED_50GB:
3899 		link.link_speed = RTE_ETH_SPEED_NUM_50G;
3900 		break;
3901 	case ICE_AQ_LINK_SPEED_100GB:
3902 		link.link_speed = RTE_ETH_SPEED_NUM_100G;
3903 		break;
3904 	case ICE_AQ_LINK_SPEED_UNKNOWN:
3905 		PMD_DRV_LOG(ERR, "Unknown link speed");
3906 		link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
3907 		break;
3908 	default:
3909 		PMD_DRV_LOG(ERR, "None link speed");
3910 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
3911 		break;
3912 	}
3913 
3914 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
3915 			      RTE_ETH_LINK_SPEED_FIXED);
3916 
3917 out:
3918 	ice_atomic_write_link_status(dev, &link);
3919 	if (link.link_status == old.link_status)
3920 		return -1;
3921 
3922 	return 0;
3923 }
3924 
3925 /* Force the physical link state by getting the current PHY capabilities from
3926  * hardware and setting the PHY config based on the determined capabilities. If
3927  * link changes, link event will be triggered because both the Enable Automatic
3928  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
3929  */
3930 static enum ice_status
3931 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
3932 {
3933 	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3934 	struct ice_aqc_get_phy_caps_data *pcaps;
3935 	struct ice_port_info *pi;
3936 	enum ice_status status;
3937 
3938 	if (!hw || !hw->port_info)
3939 		return ICE_ERR_PARAM;
3940 
3941 	pi = hw->port_info;
3942 
3943 	pcaps = (struct ice_aqc_get_phy_caps_data *)
3944 		ice_malloc(hw, sizeof(*pcaps));
3945 	if (!pcaps)
3946 		return ICE_ERR_NO_MEMORY;
3947 
3948 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3949 				     pcaps, NULL);
3950 	if (status)
3951 		goto out;
3952 
3953 	/* No change in link */
3954 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
3955 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
3956 		goto out;
3957 
3958 	cfg.phy_type_low = pcaps->phy_type_low;
3959 	cfg.phy_type_high = pcaps->phy_type_high;
3960 	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3961 	cfg.low_power_ctrl_an = pcaps->low_power_ctrl_an;
3962 	cfg.eee_cap = pcaps->eee_cap;
3963 	cfg.eeer_value = pcaps->eeer_value;
3964 	cfg.link_fec_opt = pcaps->link_fec_options;
3965 	if (link_up)
3966 		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
3967 	else
3968 		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
3969 
3970 	status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3971 
3972 out:
3973 	ice_free(hw, pcaps);
3974 	return status;
3975 }
3976 
3977 static int
3978 ice_dev_set_link_up(struct rte_eth_dev *dev)
3979 {
3980 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3981 
3982 	return ice_force_phys_link_state(hw, true);
3983 }
3984 
3985 static int
3986 ice_dev_set_link_down(struct rte_eth_dev *dev)
3987 {
3988 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3989 
3990 	return ice_force_phys_link_state(hw, false);
3991 }
3992 
3993 static int
3994 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
3995 {
3996 	/* mtu setting is forbidden if port is start */
3997 	if (dev->data->dev_started != 0) {
3998 		PMD_DRV_LOG(ERR,
3999 			    "port %d must be stopped before configuration",
4000 			    dev->data->port_id);
4001 		return -EBUSY;
4002 	}
4003 
4004 	return 0;
4005 }
4006 
4007 static int ice_macaddr_set(struct rte_eth_dev *dev,
4008 			   struct rte_ether_addr *mac_addr)
4009 {
4010 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4011 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4012 	struct ice_vsi *vsi = pf->main_vsi;
4013 	struct ice_mac_filter *f;
4014 	uint8_t flags = 0;
4015 	int ret;
4016 
4017 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
4018 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
4019 		return -EINVAL;
4020 	}
4021 
4022 	TAILQ_FOREACH(f, &vsi->mac_list, next) {
4023 		if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
4024 			break;
4025 	}
4026 
4027 	if (!f) {
4028 		PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
4029 		return -EIO;
4030 	}
4031 
4032 	ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
4033 	if (ret != ICE_SUCCESS) {
4034 		PMD_DRV_LOG(ERR, "Failed to delete mac filter");
4035 		return -EIO;
4036 	}
4037 	ret = ice_add_mac_filter(vsi, mac_addr);
4038 	if (ret != ICE_SUCCESS) {
4039 		PMD_DRV_LOG(ERR, "Failed to add mac filter");
4040 		return -EIO;
4041 	}
4042 	rte_ether_addr_copy(mac_addr, &pf->dev_addr);
4043 
4044 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4045 	ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
4046 	if (ret != ICE_SUCCESS)
4047 		PMD_DRV_LOG(ERR, "Failed to set manage mac");
4048 
4049 	return 0;
4050 }
4051 
4052 /* Add a MAC address, and update filters */
4053 static int
4054 ice_macaddr_add(struct rte_eth_dev *dev,
4055 		struct rte_ether_addr *mac_addr,
4056 		__rte_unused uint32_t index,
4057 		__rte_unused uint32_t pool)
4058 {
4059 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4060 	struct ice_vsi *vsi = pf->main_vsi;
4061 	int ret;
4062 
4063 	ret = ice_add_mac_filter(vsi, mac_addr);
4064 	if (ret != ICE_SUCCESS) {
4065 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
4066 		return -EINVAL;
4067 	}
4068 
4069 	return ICE_SUCCESS;
4070 }
4071 
4072 /* Remove a MAC address, and update filters */
4073 static void
4074 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
4075 {
4076 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4077 	struct ice_vsi *vsi = pf->main_vsi;
4078 	struct rte_eth_dev_data *data = dev->data;
4079 	struct rte_ether_addr *macaddr;
4080 	int ret;
4081 
4082 	macaddr = &data->mac_addrs[index];
4083 	ret = ice_remove_mac_filter(vsi, macaddr);
4084 	if (ret) {
4085 		PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
4086 		return;
4087 	}
4088 }
4089 
4090 static int
4091 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
4092 {
4093 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4094 	struct ice_vlan vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, vlan_id);
4095 	struct ice_vsi *vsi = pf->main_vsi;
4096 	int ret;
4097 
4098 	PMD_INIT_FUNC_TRACE();
4099 
4100 	/**
4101 	 * Vlan 0 is the generic filter for untagged packets
4102 	 * and can't be removed or added by user.
4103 	 */
4104 	if (vlan_id == 0)
4105 		return 0;
4106 
4107 	if (on) {
4108 		ret = ice_add_vlan_filter(vsi, &vlan);
4109 		if (ret < 0) {
4110 			PMD_DRV_LOG(ERR, "Failed to add vlan filter");
4111 			return -EINVAL;
4112 		}
4113 	} else {
4114 		ret = ice_remove_vlan_filter(vsi, &vlan);
4115 		if (ret < 0) {
4116 			PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
4117 			return -EINVAL;
4118 		}
4119 	}
4120 
4121 	return 0;
4122 }
4123 
4124 /* In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are
4125  * based on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8)
4126  * doesn't matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
4127  * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
4128  *
4129  * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
4130  * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
4131  * traffic in SVM, since the VLAN TPID isn't part of filtering.
4132  *
4133  * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
4134  * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
4135  * part of filtering.
4136  */
4137 static int
4138 ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
4139 {
4140 	struct ice_vlan vlan;
4141 	int err;
4142 
4143 	vlan = ICE_VLAN(0, 0);
4144 	err = ice_add_vlan_filter(vsi, &vlan);
4145 	if (err) {
4146 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0");
4147 		return err;
4148 	}
4149 
4150 	/* in SVM both VLAN 0 filters are identical */
4151 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
4152 		return 0;
4153 
4154 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4155 	err = ice_add_vlan_filter(vsi, &vlan);
4156 	if (err) {
4157 		PMD_DRV_LOG(DEBUG, "Failed to add VLAN ID 0 in double VLAN mode");
4158 		return err;
4159 	}
4160 
4161 	return 0;
4162 }
4163 
4164 /*
4165  * Delete the VLAN 0 filters in the same manner that they were added in
4166  * ice_vsi_add_vlan_zero.
4167  */
4168 static int
4169 ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
4170 {
4171 	struct ice_vlan vlan;
4172 	int err;
4173 
4174 	vlan = ICE_VLAN(0, 0);
4175 	err = ice_remove_vlan_filter(vsi, &vlan);
4176 	if (err) {
4177 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0");
4178 		return err;
4179 	}
4180 
4181 	/* in SVM both VLAN 0 filters are identical */
4182 	if (!ice_is_dvm_ena(&vsi->adapter->hw))
4183 		return 0;
4184 
4185 	vlan = ICE_VLAN(RTE_ETHER_TYPE_VLAN, 0);
4186 	err = ice_remove_vlan_filter(vsi, &vlan);
4187 	if (err) {
4188 		PMD_DRV_LOG(DEBUG, "Failed to remove VLAN ID 0 in double VLAN mode");
4189 		return err;
4190 	}
4191 
4192 	return 0;
4193 }
4194 
4195 /* Configure vlan filter on or off */
4196 static int
4197 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
4198 {
4199 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4200 	struct ice_vsi_ctx ctxt;
4201 	uint8_t sw_flags2;
4202 	int ret = 0;
4203 
4204 	sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4205 
4206 	if (on)
4207 		vsi->info.sw_flags2 |= sw_flags2;
4208 	else
4209 		vsi->info.sw_flags2 &= ~sw_flags2;
4210 
4211 	vsi->info.sw_id = hw->port_info->sw_id;
4212 	(void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4213 	ctxt.info.valid_sections =
4214 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4215 				 ICE_AQ_VSI_PROP_SECURITY_VALID);
4216 	ctxt.vsi_num = vsi->vsi_id;
4217 
4218 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4219 	if (ret) {
4220 		PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
4221 			    on ? "enable" : "disable");
4222 		return -EINVAL;
4223 	} else {
4224 		vsi->info.valid_sections |=
4225 			rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
4226 					 ICE_AQ_VSI_PROP_SECURITY_VALID);
4227 	}
4228 
4229 	/* consist with other drivers, allow untagged packet when vlan filter on */
4230 	if (on)
4231 		ret = ice_vsi_add_vlan_zero(vsi);
4232 	else
4233 		ret = ice_vsi_del_vlan_zero(vsi);
4234 
4235 	return 0;
4236 }
4237 
4238 /* Manage VLAN stripping for the VSI for Rx */
4239 static int
4240 ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
4241 {
4242 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4243 	struct ice_vsi_ctx ctxt;
4244 	enum ice_status status;
4245 	int err = 0;
4246 
4247 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4248 	 * on this VSI
4249 	 */
4250 	if (vsi->info.port_based_inner_vlan)
4251 		return 0;
4252 
4253 	memset(&ctxt, 0, sizeof(ctxt));
4254 
4255 	if (ena)
4256 		/* Strip VLAN tag from Rx packet and put it in the desc */
4257 		ctxt.info.inner_vlan_flags =
4258 					ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
4259 	else
4260 		/* Disable stripping. Leave tag in packet */
4261 		ctxt.info.inner_vlan_flags =
4262 					ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4263 
4264 	/* Allow all packets untagged/tagged */
4265 	ctxt.info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
4266 
4267 	ctxt.info.valid_sections = rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4268 
4269 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4270 	if (status) {
4271 		PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan stripping",
4272 			    ena ? "enable" : "disable");
4273 		err = -EIO;
4274 	} else {
4275 		vsi->info.inner_vlan_flags = ctxt.info.inner_vlan_flags;
4276 	}
4277 
4278 	return err;
4279 }
4280 
4281 static int
4282 ice_vsi_ena_inner_stripping(struct ice_vsi *vsi)
4283 {
4284 	return ice_vsi_manage_vlan_stripping(vsi, true);
4285 }
4286 
4287 static int
4288 ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
4289 {
4290 	return ice_vsi_manage_vlan_stripping(vsi, false);
4291 }
4292 
4293 static int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi)
4294 {
4295 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4296 	struct ice_vsi_ctx ctxt;
4297 	enum ice_status status;
4298 	int err = 0;
4299 
4300 	/* do not allow modifying VLAN stripping when a port VLAN is configured
4301 	 * on this VSI
4302 	 */
4303 	if (vsi->info.port_based_outer_vlan)
4304 		return 0;
4305 
4306 	memset(&ctxt, 0, sizeof(ctxt));
4307 
4308 	ctxt.info.valid_sections =
4309 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4310 	/* clear current outer VLAN strip settings */
4311 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4312 		~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
4313 	ctxt.info.outer_vlan_flags |=
4314 		(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
4315 		 ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
4316 		(ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
4317 		 ICE_AQ_VSI_OUTER_TAG_TYPE_S);
4318 
4319 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4320 	if (status) {
4321 		PMD_DRV_LOG(ERR, "Update VSI failed to enable outer VLAN stripping");
4322 		err = -EIO;
4323 	} else {
4324 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4325 	}
4326 
4327 	return err;
4328 }
4329 
4330 static int
4331 ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
4332 {
4333 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4334 	struct ice_vsi_ctx ctxt;
4335 	enum ice_status status;
4336 	int err = 0;
4337 
4338 	if (vsi->info.port_based_outer_vlan)
4339 		return 0;
4340 
4341 	memset(&ctxt, 0, sizeof(ctxt));
4342 
4343 	ctxt.info.valid_sections =
4344 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
4345 	/* clear current outer VLAN strip settings */
4346 	ctxt.info.outer_vlan_flags = vsi->info.outer_vlan_flags &
4347 		~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
4348 	ctxt.info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
4349 		ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
4350 
4351 	status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4352 	if (status) {
4353 		PMD_DRV_LOG(ERR, "Update VSI failed to disable outer VLAN stripping");
4354 		err = -EIO;
4355 	} else {
4356 		vsi->info.outer_vlan_flags = ctxt.info.outer_vlan_flags;
4357 	}
4358 
4359 	return err;
4360 }
4361 
4362 static int
4363 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool ena)
4364 {
4365 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4366 	int ret;
4367 
4368 	if (ice_is_dvm_ena(hw)) {
4369 		if (ena)
4370 			ret = ice_vsi_ena_outer_stripping(vsi);
4371 		else
4372 			ret = ice_vsi_dis_outer_stripping(vsi);
4373 	} else {
4374 		if (ena)
4375 			ret = ice_vsi_ena_inner_stripping(vsi);
4376 		else
4377 			ret = ice_vsi_dis_inner_stripping(vsi);
4378 	}
4379 
4380 	return ret;
4381 }
4382 
4383 static int
4384 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
4385 {
4386 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4387 	struct ice_vsi *vsi = pf->main_vsi;
4388 	struct rte_eth_rxmode *rxmode;
4389 
4390 	rxmode = &dev->data->dev_conf.rxmode;
4391 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
4392 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4393 			ice_vsi_config_vlan_filter(vsi, true);
4394 		else
4395 			ice_vsi_config_vlan_filter(vsi, false);
4396 	}
4397 
4398 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
4399 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4400 			ice_vsi_config_vlan_stripping(vsi, true);
4401 		else
4402 			ice_vsi_config_vlan_stripping(vsi, false);
4403 	}
4404 
4405 	return 0;
4406 }
4407 
4408 static int
4409 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4410 {
4411 	struct ice_aq_get_set_rss_lut_params lut_params;
4412 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
4413 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4414 	int ret;
4415 
4416 	if (!lut)
4417 		return -EINVAL;
4418 
4419 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4420 		lut_params.vsi_handle = vsi->idx;
4421 		lut_params.lut_size = lut_size;
4422 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4423 		lut_params.lut = lut;
4424 		lut_params.global_lut_id = 0;
4425 		ret = ice_aq_get_rss_lut(hw, &lut_params);
4426 		if (ret) {
4427 			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4428 			return -EINVAL;
4429 		}
4430 	} else {
4431 		uint64_t *lut_dw = (uint64_t *)lut;
4432 		uint16_t i, lut_size_dw = lut_size / 4;
4433 
4434 		for (i = 0; i < lut_size_dw; i++)
4435 			lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
4436 	}
4437 
4438 	return 0;
4439 }
4440 
4441 static int
4442 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4443 {
4444 	struct ice_aq_get_set_rss_lut_params lut_params;
4445 	struct ice_pf *pf;
4446 	struct ice_hw *hw;
4447 	int ret;
4448 
4449 	if (!vsi || !lut)
4450 		return -EINVAL;
4451 
4452 	pf = ICE_VSI_TO_PF(vsi);
4453 	hw = ICE_VSI_TO_HW(vsi);
4454 
4455 	if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
4456 		lut_params.vsi_handle = vsi->idx;
4457 		lut_params.lut_size = lut_size;
4458 		lut_params.lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
4459 		lut_params.lut = lut;
4460 		lut_params.global_lut_id = 0;
4461 		ret = ice_aq_set_rss_lut(hw, &lut_params);
4462 		if (ret) {
4463 			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4464 			return -EINVAL;
4465 		}
4466 	} else {
4467 		uint64_t *lut_dw = (uint64_t *)lut;
4468 		uint16_t i, lut_size_dw = lut_size / 4;
4469 
4470 		for (i = 0; i < lut_size_dw; i++)
4471 			ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
4472 
4473 		ice_flush(hw);
4474 	}
4475 
4476 	return 0;
4477 }
4478 
4479 static int
4480 ice_rss_reta_update(struct rte_eth_dev *dev,
4481 		    struct rte_eth_rss_reta_entry64 *reta_conf,
4482 		    uint16_t reta_size)
4483 {
4484 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4485 	uint16_t i, lut_size = pf->hash_lut_size;
4486 	uint16_t idx, shift;
4487 	uint8_t *lut;
4488 	int ret;
4489 
4490 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
4491 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
4492 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
4493 		PMD_DRV_LOG(ERR,
4494 			    "The size of hash lookup table configured (%d)"
4495 			    "doesn't match the number hardware can "
4496 			    "supported (128, 512, 2048)",
4497 			    reta_size);
4498 		return -EINVAL;
4499 	}
4500 
4501 	/* It MUST use the current LUT size to get the RSS lookup table,
4502 	 * otherwise if will fail with -100 error code.
4503 	 */
4504 	lut = rte_zmalloc(NULL,  RTE_MAX(reta_size, lut_size), 0);
4505 	if (!lut) {
4506 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4507 		return -ENOMEM;
4508 	}
4509 	ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
4510 	if (ret)
4511 		goto out;
4512 
4513 	for (i = 0; i < reta_size; i++) {
4514 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4515 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4516 		if (reta_conf[idx].mask & (1ULL << shift))
4517 			lut[i] = reta_conf[idx].reta[shift];
4518 	}
4519 	ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
4520 	if (ret == 0 && lut_size != reta_size) {
4521 		PMD_DRV_LOG(INFO,
4522 			    "The size of hash lookup table is changed from (%d) to (%d)",
4523 			    lut_size, reta_size);
4524 		pf->hash_lut_size = reta_size;
4525 	}
4526 
4527 out:
4528 	rte_free(lut);
4529 
4530 	return ret;
4531 }
4532 
4533 static int
4534 ice_rss_reta_query(struct rte_eth_dev *dev,
4535 		   struct rte_eth_rss_reta_entry64 *reta_conf,
4536 		   uint16_t reta_size)
4537 {
4538 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4539 	uint16_t i, lut_size = pf->hash_lut_size;
4540 	uint16_t idx, shift;
4541 	uint8_t *lut;
4542 	int ret;
4543 
4544 	if (reta_size != lut_size) {
4545 		PMD_DRV_LOG(ERR,
4546 			    "The size of hash lookup table configured (%d)"
4547 			    "doesn't match the number hardware can "
4548 			    "supported (%d)",
4549 			    reta_size, lut_size);
4550 		return -EINVAL;
4551 	}
4552 
4553 	lut = rte_zmalloc(NULL, reta_size, 0);
4554 	if (!lut) {
4555 		PMD_DRV_LOG(ERR, "No memory can be allocated");
4556 		return -ENOMEM;
4557 	}
4558 
4559 	ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
4560 	if (ret)
4561 		goto out;
4562 
4563 	for (i = 0; i < reta_size; i++) {
4564 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4565 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4566 		if (reta_conf[idx].mask & (1ULL << shift))
4567 			reta_conf[idx].reta[shift] = lut[i];
4568 	}
4569 
4570 out:
4571 	rte_free(lut);
4572 
4573 	return ret;
4574 }
4575 
4576 static int
4577 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
4578 {
4579 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4580 	int ret = 0;
4581 
4582 	if (!key || key_len == 0) {
4583 		PMD_DRV_LOG(DEBUG, "No key to be configured");
4584 		return 0;
4585 	} else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
4586 		   sizeof(uint32_t)) {
4587 		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
4588 		return -EINVAL;
4589 	}
4590 
4591 	struct ice_aqc_get_set_rss_keys *key_dw =
4592 		(struct ice_aqc_get_set_rss_keys *)key;
4593 
4594 	ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
4595 	if (ret) {
4596 		PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
4597 		ret = -EINVAL;
4598 	}
4599 
4600 	return ret;
4601 }
4602 
4603 static int
4604 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
4605 {
4606 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
4607 	int ret;
4608 
4609 	if (!key || !key_len)
4610 		return -EINVAL;
4611 
4612 	ret = ice_aq_get_rss_key
4613 		(hw, vsi->idx,
4614 		 (struct ice_aqc_get_set_rss_keys *)key);
4615 	if (ret) {
4616 		PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
4617 		return -EINVAL;
4618 	}
4619 	*key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
4620 
4621 	return 0;
4622 }
4623 
4624 static int
4625 ice_rss_hash_update(struct rte_eth_dev *dev,
4626 		    struct rte_eth_rss_conf *rss_conf)
4627 {
4628 	enum ice_status status = ICE_SUCCESS;
4629 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4630 	struct ice_vsi *vsi = pf->main_vsi;
4631 
4632 	/* set hash key */
4633 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
4634 	if (status)
4635 		return status;
4636 
4637 	if (rss_conf->rss_hf == 0) {
4638 		pf->rss_hf = 0;
4639 		return 0;
4640 	}
4641 
4642 	/* RSS hash configuration */
4643 	ice_rss_hash_set(pf, rss_conf->rss_hf);
4644 
4645 	return 0;
4646 }
4647 
4648 static int
4649 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
4650 		      struct rte_eth_rss_conf *rss_conf)
4651 {
4652 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4653 	struct ice_vsi *vsi = pf->main_vsi;
4654 
4655 	ice_get_rss_key(vsi, rss_conf->rss_key,
4656 			&rss_conf->rss_key_len);
4657 
4658 	rss_conf->rss_hf = pf->rss_hf;
4659 	return 0;
4660 }
4661 
4662 static int
4663 ice_promisc_enable(struct rte_eth_dev *dev)
4664 {
4665 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4666 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4667 	struct ice_vsi *vsi = pf->main_vsi;
4668 	enum ice_status status;
4669 	uint8_t pmask;
4670 	int ret = 0;
4671 
4672 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4673 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4674 
4675 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4676 	switch (status) {
4677 	case ICE_ERR_ALREADY_EXISTS:
4678 		PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
4679 	case ICE_SUCCESS:
4680 		break;
4681 	default:
4682 		PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
4683 		ret = -EAGAIN;
4684 	}
4685 
4686 	return ret;
4687 }
4688 
4689 static int
4690 ice_promisc_disable(struct rte_eth_dev *dev)
4691 {
4692 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4693 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4694 	struct ice_vsi *vsi = pf->main_vsi;
4695 	enum ice_status status;
4696 	uint8_t pmask;
4697 	int ret = 0;
4698 
4699 	if (dev->data->all_multicast == 1)
4700 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX;
4701 	else
4702 		pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
4703 			ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4704 
4705 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4706 	if (status != ICE_SUCCESS) {
4707 		PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
4708 		ret = -EAGAIN;
4709 	}
4710 
4711 	return ret;
4712 }
4713 
4714 static int
4715 ice_allmulti_enable(struct rte_eth_dev *dev)
4716 {
4717 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4718 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4719 	struct ice_vsi *vsi = pf->main_vsi;
4720 	enum ice_status status;
4721 	uint8_t pmask;
4722 	int ret = 0;
4723 
4724 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4725 
4726 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
4727 
4728 	switch (status) {
4729 	case ICE_ERR_ALREADY_EXISTS:
4730 		PMD_DRV_LOG(DEBUG, "Allmulti has already been enabled");
4731 	case ICE_SUCCESS:
4732 		break;
4733 	default:
4734 		PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
4735 		ret = -EAGAIN;
4736 	}
4737 
4738 	return ret;
4739 }
4740 
4741 static int
4742 ice_allmulti_disable(struct rte_eth_dev *dev)
4743 {
4744 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4745 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4746 	struct ice_vsi *vsi = pf->main_vsi;
4747 	enum ice_status status;
4748 	uint8_t pmask;
4749 	int ret = 0;
4750 
4751 	if (dev->data->promiscuous == 1)
4752 		return 0; /* must remain in all_multicast mode */
4753 
4754 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
4755 
4756 	status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
4757 	if (status != ICE_SUCCESS) {
4758 		PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
4759 		ret = -EAGAIN;
4760 	}
4761 
4762 	return ret;
4763 }
4764 
4765 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
4766 				    uint16_t queue_id)
4767 {
4768 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4769 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4770 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4771 	uint32_t val;
4772 	uint16_t msix_intr;
4773 
4774 	msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
4775 
4776 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
4777 	      GLINT_DYN_CTL_ITR_INDX_M;
4778 	val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
4779 
4780 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
4781 	rte_intr_ack(pci_dev->intr_handle);
4782 
4783 	return 0;
4784 }
4785 
4786 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
4787 				     uint16_t queue_id)
4788 {
4789 	struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
4790 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
4791 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4792 	uint16_t msix_intr;
4793 
4794 	msix_intr = rte_intr_vec_list_index_get(intr_handle, queue_id);
4795 
4796 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
4797 
4798 	return 0;
4799 }
4800 
4801 static int
4802 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
4803 {
4804 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4805 	u8 ver, patch;
4806 	u16 build;
4807 	int ret;
4808 
4809 	ver = hw->flash.orom.major;
4810 	patch = hw->flash.orom.patch;
4811 	build = hw->flash.orom.build;
4812 
4813 	ret = snprintf(fw_version, fw_size,
4814 			"%x.%02x 0x%08x %d.%d.%d",
4815 			hw->flash.nvm.major,
4816 			hw->flash.nvm.minor,
4817 			hw->flash.nvm.eetrack,
4818 			ver, build, patch);
4819 	if (ret < 0)
4820 		return -EINVAL;
4821 
4822 	/* add the size of '\0' */
4823 	ret += 1;
4824 	if (fw_size < (size_t)ret)
4825 		return ret;
4826 	else
4827 		return 0;
4828 }
4829 
4830 static int
4831 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
4832 {
4833 	struct ice_hw *hw;
4834 	struct ice_vsi_ctx ctxt;
4835 	uint8_t vlan_flags = 0;
4836 	int ret;
4837 
4838 	if (!vsi || !info) {
4839 		PMD_DRV_LOG(ERR, "invalid parameters");
4840 		return -EINVAL;
4841 	}
4842 
4843 	if (info->on) {
4844 		vsi->info.port_based_inner_vlan = info->config.pvid;
4845 		/**
4846 		 * If insert pvid is enabled, only tagged pkts are
4847 		 * allowed to be sent out.
4848 		 */
4849 		vlan_flags = ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4850 			     ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4851 	} else {
4852 		vsi->info.port_based_inner_vlan = 0;
4853 		if (info->config.reject.tagged == 0)
4854 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED;
4855 
4856 		if (info->config.reject.untagged == 0)
4857 			vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED;
4858 	}
4859 	vsi->info.inner_vlan_flags &= ~(ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
4860 				  ICE_AQ_VSI_INNER_VLAN_EMODE_M);
4861 	vsi->info.inner_vlan_flags |= vlan_flags;
4862 	memset(&ctxt, 0, sizeof(ctxt));
4863 	rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4864 	ctxt.info.valid_sections =
4865 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4866 	ctxt.vsi_num = vsi->vsi_id;
4867 
4868 	hw = ICE_VSI_TO_HW(vsi);
4869 	ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
4870 	if (ret != ICE_SUCCESS) {
4871 		PMD_DRV_LOG(ERR,
4872 			    "update VSI for VLAN insert failed, err %d",
4873 			    ret);
4874 		return -EINVAL;
4875 	}
4876 
4877 	vsi->info.valid_sections |=
4878 		rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
4879 
4880 	return ret;
4881 }
4882 
4883 static int
4884 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
4885 {
4886 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4887 	struct ice_vsi *vsi = pf->main_vsi;
4888 	struct rte_eth_dev_data *data = pf->dev_data;
4889 	struct ice_vsi_vlan_pvid_info info;
4890 	int ret;
4891 
4892 	memset(&info, 0, sizeof(info));
4893 	info.on = on;
4894 	if (info.on) {
4895 		info.config.pvid = pvid;
4896 	} else {
4897 		info.config.reject.tagged =
4898 			data->dev_conf.txmode.hw_vlan_reject_tagged;
4899 		info.config.reject.untagged =
4900 			data->dev_conf.txmode.hw_vlan_reject_untagged;
4901 	}
4902 
4903 	ret = ice_vsi_vlan_pvid_set(vsi, &info);
4904 	if (ret < 0) {
4905 		PMD_DRV_LOG(ERR, "Failed to set pvid.");
4906 		return -EINVAL;
4907 	}
4908 
4909 	return 0;
4910 }
4911 
4912 static int
4913 ice_get_eeprom_length(struct rte_eth_dev *dev)
4914 {
4915 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4916 
4917 	return hw->flash.flash_size;
4918 }
4919 
4920 static int
4921 ice_get_eeprom(struct rte_eth_dev *dev,
4922 	       struct rte_dev_eeprom_info *eeprom)
4923 {
4924 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4925 	enum ice_status status = ICE_SUCCESS;
4926 	uint8_t *data = eeprom->data;
4927 
4928 	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4929 
4930 	status = ice_acquire_nvm(hw, ICE_RES_READ);
4931 	if (status) {
4932 		PMD_DRV_LOG(ERR, "acquire nvm failed.");
4933 		return -EIO;
4934 	}
4935 
4936 	status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->length,
4937 				   data, false);
4938 
4939 	ice_release_nvm(hw);
4940 
4941 	if (status) {
4942 		PMD_DRV_LOG(ERR, "EEPROM read failed.");
4943 		return -EIO;
4944 	}
4945 
4946 	return 0;
4947 }
4948 
4949 static int
4950 ice_get_module_info(struct rte_eth_dev *dev,
4951 		    struct rte_eth_dev_module_info *modinfo)
4952 {
4953 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4954 	enum ice_status status;
4955 	u8 sff8472_comp = 0;
4956 	u8 sff8472_swap = 0;
4957 	u8 sff8636_rev = 0;
4958 	u8 value = 0;
4959 
4960 	status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
4961 				   0, &value, 1, 0, NULL);
4962 	if (status)
4963 		return -EIO;
4964 
4965 	switch (value) {
4966 	case ICE_MODULE_TYPE_SFP:
4967 		status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4968 					   ICE_MODULE_SFF_8472_COMP, 0x00, 0,
4969 					   &sff8472_comp, 1, 0, NULL);
4970 		if (status)
4971 			return -EIO;
4972 		status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4973 					   ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
4974 					   &sff8472_swap, 1, 0, NULL);
4975 		if (status)
4976 			return -EIO;
4977 
4978 		if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
4979 			modinfo->type = ICE_MODULE_SFF_8079;
4980 			modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
4981 		} else if (sff8472_comp &&
4982 			   (sff8472_swap & ICE_MODULE_SFF_DIAG_CAPAB)) {
4983 			modinfo->type = ICE_MODULE_SFF_8472;
4984 			modinfo->eeprom_len = ICE_MODULE_SFF_8472_LEN;
4985 		} else {
4986 			modinfo->type = ICE_MODULE_SFF_8079;
4987 			modinfo->eeprom_len = ICE_MODULE_SFF_8079_LEN;
4988 		}
4989 		break;
4990 	case ICE_MODULE_TYPE_QSFP_PLUS:
4991 	case ICE_MODULE_TYPE_QSFP28:
4992 		status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
4993 					   ICE_MODULE_REVISION_ADDR, 0x00, 0,
4994 					   &sff8636_rev, 1, 0, NULL);
4995 		if (status)
4996 			return -EIO;
4997 		/* Check revision compliance */
4998 		if (sff8636_rev > 0x02) {
4999 			/* Module is SFF-8636 compliant */
5000 			modinfo->type = ICE_MODULE_SFF_8636;
5001 			modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
5002 		} else {
5003 			modinfo->type = ICE_MODULE_SFF_8436;
5004 			modinfo->eeprom_len = ICE_MODULE_QSFP_MAX_LEN;
5005 		}
5006 		break;
5007 	default:
5008 		PMD_DRV_LOG(WARNING, "SFF Module Type not recognized.\n");
5009 		return -EINVAL;
5010 	}
5011 	return 0;
5012 }
5013 
5014 static int
5015 ice_get_module_eeprom(struct rte_eth_dev *dev,
5016 		      struct rte_dev_eeprom_info *info)
5017 {
5018 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5019 #define SFF_READ_BLOCK_SIZE 8
5020 #define I2C_BUSY_TRY_TIMES 4
5021 #define I2C_USLEEP_MIN_TIME 1500
5022 #define I2C_USLEEP_MAX_TIME 2500
5023 	uint8_t value[SFF_READ_BLOCK_SIZE] = {0};
5024 	uint8_t addr = ICE_I2C_EEPROM_DEV_ADDR;
5025 	uint8_t *data = NULL;
5026 	enum ice_status status;
5027 	bool is_sfp = false;
5028 	uint32_t i, j;
5029 	uint32_t offset = 0;
5030 	uint8_t page = 0;
5031 
5032 	if (!info || !info->length || !info->data)
5033 		return -EINVAL;
5034 
5035 	status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
5036 				   NULL);
5037 	if (status)
5038 		return -EIO;
5039 
5040 	if (value[0] == ICE_MODULE_TYPE_SFP)
5041 		is_sfp = true;
5042 
5043 	data = info->data;
5044 	memset(data, 0, info->length);
5045 	for (i = 0; i < info->length; i += SFF_READ_BLOCK_SIZE) {
5046 		offset = i + info->offset;
5047 		page = 0;
5048 
5049 		/* Check if we need to access the other memory page */
5050 		if (is_sfp) {
5051 			if (offset >= ICE_MODULE_SFF_8079_LEN) {
5052 				offset -= ICE_MODULE_SFF_8079_LEN;
5053 				addr = ICE_I2C_EEPROM_DEV_ADDR2;
5054 			}
5055 		} else {
5056 			while (offset >= ICE_MODULE_SFF_8436_LEN) {
5057 				/* Compute memory page number and offset. */
5058 				offset -= ICE_MODULE_SFF_8436_LEN / 2;
5059 				page++;
5060 			}
5061 		}
5062 
5063 		/* Bit 2 of eeprom address 0x02 declares upper
5064 		 * pages are disabled on QSFP modules.
5065 		 * SFP modules only ever use page 0.
5066 		 */
5067 		if (page == 0 || !(data[0x2] & 0x4)) {
5068 			/* If i2c bus is busy due to slow page change or
5069 			 * link management access, call can fail.
5070 			 * This is normal. So we retry this a few times.
5071 			 */
5072 			for (j = 0; j < I2C_BUSY_TRY_TIMES; j++) {
5073 				status = ice_aq_sff_eeprom(hw, 0, addr, offset,
5074 							   page, !is_sfp, value,
5075 							   SFF_READ_BLOCK_SIZE,
5076 							   0, NULL);
5077 				PMD_DRV_LOG(DEBUG, "SFF %02X %02X %02X %X = "
5078 					"%02X%02X%02X%02X."
5079 					"%02X%02X%02X%02X (%X)\n",
5080 					addr, offset, page, is_sfp,
5081 					value[0], value[1],
5082 					value[2], value[3],
5083 					value[4], value[5],
5084 					value[6], value[7],
5085 					status);
5086 				if (status) {
5087 					usleep_range(I2C_USLEEP_MIN_TIME,
5088 						     I2C_USLEEP_MAX_TIME);
5089 					memset(value, 0, SFF_READ_BLOCK_SIZE);
5090 					continue;
5091 				}
5092 				break;
5093 			}
5094 
5095 			/* Make sure we have enough room for the new block */
5096 			if ((i + SFF_READ_BLOCK_SIZE) < info->length)
5097 				memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
5098 		}
5099 	}
5100 
5101 	return 0;
5102 }
5103 
5104 static void
5105 ice_stat_update_32(struct ice_hw *hw,
5106 		   uint32_t reg,
5107 		   bool offset_loaded,
5108 		   uint64_t *offset,
5109 		   uint64_t *stat)
5110 {
5111 	uint64_t new_data;
5112 
5113 	new_data = (uint64_t)ICE_READ_REG(hw, reg);
5114 	if (!offset_loaded)
5115 		*offset = new_data;
5116 
5117 	if (new_data >= *offset)
5118 		*stat = (uint64_t)(new_data - *offset);
5119 	else
5120 		*stat = (uint64_t)((new_data +
5121 				    ((uint64_t)1 << ICE_32_BIT_WIDTH))
5122 				   - *offset);
5123 }
5124 
5125 static void
5126 ice_stat_update_40(struct ice_hw *hw,
5127 		   uint32_t hireg,
5128 		   uint32_t loreg,
5129 		   bool offset_loaded,
5130 		   uint64_t *offset,
5131 		   uint64_t *stat)
5132 {
5133 	uint64_t new_data;
5134 
5135 	new_data = (uint64_t)ICE_READ_REG(hw, loreg);
5136 	new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
5137 		    ICE_32_BIT_WIDTH;
5138 
5139 	if (!offset_loaded)
5140 		*offset = new_data;
5141 
5142 	if (new_data >= *offset)
5143 		*stat = new_data - *offset;
5144 	else
5145 		*stat = (uint64_t)((new_data +
5146 				    ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
5147 				   *offset);
5148 
5149 	*stat &= ICE_40_BIT_MASK;
5150 }
5151 
5152 /* Get all the statistics of a VSI */
5153 static void
5154 ice_update_vsi_stats(struct ice_vsi *vsi)
5155 {
5156 	struct ice_eth_stats *oes = &vsi->eth_stats_offset;
5157 	struct ice_eth_stats *nes = &vsi->eth_stats;
5158 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
5159 	int idx = rte_le_to_cpu_16(vsi->vsi_id);
5160 
5161 	ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
5162 			   vsi->offset_loaded, &oes->rx_bytes,
5163 			   &nes->rx_bytes);
5164 	ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
5165 			   vsi->offset_loaded, &oes->rx_unicast,
5166 			   &nes->rx_unicast);
5167 	ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
5168 			   vsi->offset_loaded, &oes->rx_multicast,
5169 			   &nes->rx_multicast);
5170 	ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
5171 			   vsi->offset_loaded, &oes->rx_broadcast,
5172 			   &nes->rx_broadcast);
5173 	/* enlarge the limitation when rx_bytes overflowed */
5174 	if (vsi->offset_loaded) {
5175 		if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
5176 			nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5177 		nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
5178 	}
5179 	vsi->old_rx_bytes = nes->rx_bytes;
5180 	/* exclude CRC bytes */
5181 	nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
5182 			  nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
5183 
5184 	ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
5185 			   &oes->rx_discards, &nes->rx_discards);
5186 	/* GLV_REPC not supported */
5187 	/* GLV_RMPC not supported */
5188 	ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
5189 			   &oes->rx_unknown_protocol,
5190 			   &nes->rx_unknown_protocol);
5191 	ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
5192 			   vsi->offset_loaded, &oes->tx_bytes,
5193 			   &nes->tx_bytes);
5194 	ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
5195 			   vsi->offset_loaded, &oes->tx_unicast,
5196 			   &nes->tx_unicast);
5197 	ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
5198 			   vsi->offset_loaded, &oes->tx_multicast,
5199 			   &nes->tx_multicast);
5200 	ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
5201 			   vsi->offset_loaded,  &oes->tx_broadcast,
5202 			   &nes->tx_broadcast);
5203 	/* GLV_TDPC not supported */
5204 	ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
5205 			   &oes->tx_errors, &nes->tx_errors);
5206 	/* enlarge the limitation when tx_bytes overflowed */
5207 	if (vsi->offset_loaded) {
5208 		if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
5209 			nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5210 		nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
5211 	}
5212 	vsi->old_tx_bytes = nes->tx_bytes;
5213 	vsi->offset_loaded = true;
5214 
5215 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
5216 		    vsi->vsi_id);
5217 	PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
5218 	PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
5219 	PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
5220 	PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
5221 	PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
5222 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
5223 		    nes->rx_unknown_protocol);
5224 	PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
5225 	PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
5226 	PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
5227 	PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
5228 	PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
5229 	PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
5230 	PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
5231 		    vsi->vsi_id);
5232 }
5233 
5234 static void
5235 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
5236 {
5237 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5238 	struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
5239 
5240 	/* Get statistics of struct ice_eth_stats */
5241 	ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
5242 			   GLPRT_GORCL(hw->port_info->lport),
5243 			   pf->offset_loaded, &os->eth.rx_bytes,
5244 			   &ns->eth.rx_bytes);
5245 	ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
5246 			   GLPRT_UPRCL(hw->port_info->lport),
5247 			   pf->offset_loaded, &os->eth.rx_unicast,
5248 			   &ns->eth.rx_unicast);
5249 	ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
5250 			   GLPRT_MPRCL(hw->port_info->lport),
5251 			   pf->offset_loaded, &os->eth.rx_multicast,
5252 			   &ns->eth.rx_multicast);
5253 	ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
5254 			   GLPRT_BPRCL(hw->port_info->lport),
5255 			   pf->offset_loaded, &os->eth.rx_broadcast,
5256 			   &ns->eth.rx_broadcast);
5257 	ice_stat_update_32(hw, PRTRPB_RDPC,
5258 			   pf->offset_loaded, &os->eth.rx_discards,
5259 			   &ns->eth.rx_discards);
5260 	/* enlarge the limitation when rx_bytes overflowed */
5261 	if (pf->offset_loaded) {
5262 		if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
5263 			ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5264 		ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
5265 	}
5266 	pf->old_rx_bytes = ns->eth.rx_bytes;
5267 
5268 	/* Workaround: CRC size should not be included in byte statistics,
5269 	 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
5270 	 * packet.
5271 	 */
5272 	ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
5273 			     ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
5274 
5275 	/* GLPRT_REPC not supported */
5276 	/* GLPRT_RMPC not supported */
5277 	ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
5278 			   pf->offset_loaded,
5279 			   &os->eth.rx_unknown_protocol,
5280 			   &ns->eth.rx_unknown_protocol);
5281 	ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
5282 			   GLPRT_GOTCL(hw->port_info->lport),
5283 			   pf->offset_loaded, &os->eth.tx_bytes,
5284 			   &ns->eth.tx_bytes);
5285 	ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
5286 			   GLPRT_UPTCL(hw->port_info->lport),
5287 			   pf->offset_loaded, &os->eth.tx_unicast,
5288 			   &ns->eth.tx_unicast);
5289 	ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
5290 			   GLPRT_MPTCL(hw->port_info->lport),
5291 			   pf->offset_loaded, &os->eth.tx_multicast,
5292 			   &ns->eth.tx_multicast);
5293 	ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
5294 			   GLPRT_BPTCL(hw->port_info->lport),
5295 			   pf->offset_loaded, &os->eth.tx_broadcast,
5296 			   &ns->eth.tx_broadcast);
5297 	/* enlarge the limitation when tx_bytes overflowed */
5298 	if (pf->offset_loaded) {
5299 		if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
5300 			ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
5301 		ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
5302 	}
5303 	pf->old_tx_bytes = ns->eth.tx_bytes;
5304 	ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
5305 			     ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
5306 
5307 	/* GLPRT_TEPC not supported */
5308 
5309 	/* additional port specific stats */
5310 	ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
5311 			   pf->offset_loaded, &os->tx_dropped_link_down,
5312 			   &ns->tx_dropped_link_down);
5313 	ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
5314 			   pf->offset_loaded, &os->crc_errors,
5315 			   &ns->crc_errors);
5316 	ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
5317 			   pf->offset_loaded, &os->illegal_bytes,
5318 			   &ns->illegal_bytes);
5319 	/* GLPRT_ERRBC not supported */
5320 	ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
5321 			   pf->offset_loaded, &os->mac_local_faults,
5322 			   &ns->mac_local_faults);
5323 	ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
5324 			   pf->offset_loaded, &os->mac_remote_faults,
5325 			   &ns->mac_remote_faults);
5326 
5327 	ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
5328 			   pf->offset_loaded, &os->rx_len_errors,
5329 			   &ns->rx_len_errors);
5330 
5331 	ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
5332 			   pf->offset_loaded, &os->link_xon_rx,
5333 			   &ns->link_xon_rx);
5334 	ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
5335 			   pf->offset_loaded, &os->link_xoff_rx,
5336 			   &ns->link_xoff_rx);
5337 	ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
5338 			   pf->offset_loaded, &os->link_xon_tx,
5339 			   &ns->link_xon_tx);
5340 	ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
5341 			   pf->offset_loaded, &os->link_xoff_tx,
5342 			   &ns->link_xoff_tx);
5343 	ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
5344 			   GLPRT_PRC64L(hw->port_info->lport),
5345 			   pf->offset_loaded, &os->rx_size_64,
5346 			   &ns->rx_size_64);
5347 	ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
5348 			   GLPRT_PRC127L(hw->port_info->lport),
5349 			   pf->offset_loaded, &os->rx_size_127,
5350 			   &ns->rx_size_127);
5351 	ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
5352 			   GLPRT_PRC255L(hw->port_info->lport),
5353 			   pf->offset_loaded, &os->rx_size_255,
5354 			   &ns->rx_size_255);
5355 	ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
5356 			   GLPRT_PRC511L(hw->port_info->lport),
5357 			   pf->offset_loaded, &os->rx_size_511,
5358 			   &ns->rx_size_511);
5359 	ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
5360 			   GLPRT_PRC1023L(hw->port_info->lport),
5361 			   pf->offset_loaded, &os->rx_size_1023,
5362 			   &ns->rx_size_1023);
5363 	ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
5364 			   GLPRT_PRC1522L(hw->port_info->lport),
5365 			   pf->offset_loaded, &os->rx_size_1522,
5366 			   &ns->rx_size_1522);
5367 	ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
5368 			   GLPRT_PRC9522L(hw->port_info->lport),
5369 			   pf->offset_loaded, &os->rx_size_big,
5370 			   &ns->rx_size_big);
5371 	ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
5372 			   pf->offset_loaded, &os->rx_undersize,
5373 			   &ns->rx_undersize);
5374 	ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
5375 			   pf->offset_loaded, &os->rx_fragments,
5376 			   &ns->rx_fragments);
5377 	ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
5378 			   pf->offset_loaded, &os->rx_oversize,
5379 			   &ns->rx_oversize);
5380 	ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
5381 			   pf->offset_loaded, &os->rx_jabber,
5382 			   &ns->rx_jabber);
5383 	ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
5384 			   GLPRT_PTC64L(hw->port_info->lport),
5385 			   pf->offset_loaded, &os->tx_size_64,
5386 			   &ns->tx_size_64);
5387 	ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
5388 			   GLPRT_PTC127L(hw->port_info->lport),
5389 			   pf->offset_loaded, &os->tx_size_127,
5390 			   &ns->tx_size_127);
5391 	ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
5392 			   GLPRT_PTC255L(hw->port_info->lport),
5393 			   pf->offset_loaded, &os->tx_size_255,
5394 			   &ns->tx_size_255);
5395 	ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
5396 			   GLPRT_PTC511L(hw->port_info->lport),
5397 			   pf->offset_loaded, &os->tx_size_511,
5398 			   &ns->tx_size_511);
5399 	ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
5400 			   GLPRT_PTC1023L(hw->port_info->lport),
5401 			   pf->offset_loaded, &os->tx_size_1023,
5402 			   &ns->tx_size_1023);
5403 	ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
5404 			   GLPRT_PTC1522L(hw->port_info->lport),
5405 			   pf->offset_loaded, &os->tx_size_1522,
5406 			   &ns->tx_size_1522);
5407 	ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
5408 			   GLPRT_PTC9522L(hw->port_info->lport),
5409 			   pf->offset_loaded, &os->tx_size_big,
5410 			   &ns->tx_size_big);
5411 
5412 	/* GLPRT_MSPDC not supported */
5413 	/* GLPRT_XEC not supported */
5414 
5415 	pf->offset_loaded = true;
5416 
5417 	if (pf->main_vsi)
5418 		ice_update_vsi_stats(pf->main_vsi);
5419 }
5420 
5421 /* Get all statistics of a port */
5422 static int
5423 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
5424 {
5425 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5426 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5427 	struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
5428 
5429 	/* call read registers - updates values, now write them to struct */
5430 	ice_read_stats_registers(pf, hw);
5431 
5432 	stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
5433 			  pf->main_vsi->eth_stats.rx_multicast +
5434 			  pf->main_vsi->eth_stats.rx_broadcast -
5435 			  pf->main_vsi->eth_stats.rx_discards;
5436 	stats->opackets = ns->eth.tx_unicast +
5437 			  ns->eth.tx_multicast +
5438 			  ns->eth.tx_broadcast;
5439 	stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
5440 	stats->obytes   = ns->eth.tx_bytes;
5441 	stats->oerrors  = ns->eth.tx_errors +
5442 			  pf->main_vsi->eth_stats.tx_errors;
5443 
5444 	/* Rx Errors */
5445 	stats->imissed  = ns->eth.rx_discards +
5446 			  pf->main_vsi->eth_stats.rx_discards;
5447 	stats->ierrors  = ns->crc_errors +
5448 			  ns->rx_undersize +
5449 			  ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
5450 
5451 	PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
5452 	PMD_DRV_LOG(DEBUG, "rx_bytes:	%"PRIu64"", ns->eth.rx_bytes);
5453 	PMD_DRV_LOG(DEBUG, "rx_unicast:	%"PRIu64"", ns->eth.rx_unicast);
5454 	PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
5455 	PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
5456 	PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
5457 	PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
5458 		    pf->main_vsi->eth_stats.rx_discards);
5459 	PMD_DRV_LOG(DEBUG, "rx_unknown_protocol:  %"PRIu64"",
5460 		    ns->eth.rx_unknown_protocol);
5461 	PMD_DRV_LOG(DEBUG, "tx_bytes:	%"PRIu64"", ns->eth.tx_bytes);
5462 	PMD_DRV_LOG(DEBUG, "tx_unicast:	%"PRIu64"", ns->eth.tx_unicast);
5463 	PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
5464 	PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
5465 	PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
5466 	PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
5467 		    pf->main_vsi->eth_stats.tx_discards);
5468 	PMD_DRV_LOG(DEBUG, "tx_errors:		%"PRIu64"", ns->eth.tx_errors);
5469 
5470 	PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:	%"PRIu64"",
5471 		    ns->tx_dropped_link_down);
5472 	PMD_DRV_LOG(DEBUG, "crc_errors:	%"PRIu64"", ns->crc_errors);
5473 	PMD_DRV_LOG(DEBUG, "illegal_bytes:	%"PRIu64"",
5474 		    ns->illegal_bytes);
5475 	PMD_DRV_LOG(DEBUG, "error_bytes:	%"PRIu64"", ns->error_bytes);
5476 	PMD_DRV_LOG(DEBUG, "mac_local_faults:	%"PRIu64"",
5477 		    ns->mac_local_faults);
5478 	PMD_DRV_LOG(DEBUG, "mac_remote_faults:	%"PRIu64"",
5479 		    ns->mac_remote_faults);
5480 	PMD_DRV_LOG(DEBUG, "link_xon_rx:	%"PRIu64"", ns->link_xon_rx);
5481 	PMD_DRV_LOG(DEBUG, "link_xoff_rx:	%"PRIu64"", ns->link_xoff_rx);
5482 	PMD_DRV_LOG(DEBUG, "link_xon_tx:	%"PRIu64"", ns->link_xon_tx);
5483 	PMD_DRV_LOG(DEBUG, "link_xoff_tx:	%"PRIu64"", ns->link_xoff_tx);
5484 	PMD_DRV_LOG(DEBUG, "rx_size_64:		%"PRIu64"", ns->rx_size_64);
5485 	PMD_DRV_LOG(DEBUG, "rx_size_127:	%"PRIu64"", ns->rx_size_127);
5486 	PMD_DRV_LOG(DEBUG, "rx_size_255:	%"PRIu64"", ns->rx_size_255);
5487 	PMD_DRV_LOG(DEBUG, "rx_size_511:	%"PRIu64"", ns->rx_size_511);
5488 	PMD_DRV_LOG(DEBUG, "rx_size_1023:	%"PRIu64"", ns->rx_size_1023);
5489 	PMD_DRV_LOG(DEBUG, "rx_size_1522:	%"PRIu64"", ns->rx_size_1522);
5490 	PMD_DRV_LOG(DEBUG, "rx_size_big:	%"PRIu64"", ns->rx_size_big);
5491 	PMD_DRV_LOG(DEBUG, "rx_undersize:	%"PRIu64"", ns->rx_undersize);
5492 	PMD_DRV_LOG(DEBUG, "rx_fragments:	%"PRIu64"", ns->rx_fragments);
5493 	PMD_DRV_LOG(DEBUG, "rx_oversize:	%"PRIu64"", ns->rx_oversize);
5494 	PMD_DRV_LOG(DEBUG, "rx_jabber:		%"PRIu64"", ns->rx_jabber);
5495 	PMD_DRV_LOG(DEBUG, "tx_size_64:		%"PRIu64"", ns->tx_size_64);
5496 	PMD_DRV_LOG(DEBUG, "tx_size_127:	%"PRIu64"", ns->tx_size_127);
5497 	PMD_DRV_LOG(DEBUG, "tx_size_255:	%"PRIu64"", ns->tx_size_255);
5498 	PMD_DRV_LOG(DEBUG, "tx_size_511:	%"PRIu64"", ns->tx_size_511);
5499 	PMD_DRV_LOG(DEBUG, "tx_size_1023:	%"PRIu64"", ns->tx_size_1023);
5500 	PMD_DRV_LOG(DEBUG, "tx_size_1522:	%"PRIu64"", ns->tx_size_1522);
5501 	PMD_DRV_LOG(DEBUG, "tx_size_big:	%"PRIu64"", ns->tx_size_big);
5502 	PMD_DRV_LOG(DEBUG, "rx_len_errors:	%"PRIu64"", ns->rx_len_errors);
5503 	PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
5504 	return 0;
5505 }
5506 
5507 /* Reset the statistics */
5508 static int
5509 ice_stats_reset(struct rte_eth_dev *dev)
5510 {
5511 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5512 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5513 
5514 	/* Mark PF and VSI stats to update the offset, aka "reset" */
5515 	pf->offset_loaded = false;
5516 	if (pf->main_vsi)
5517 		pf->main_vsi->offset_loaded = false;
5518 
5519 	/* read the stats, reading current register values into offset */
5520 	ice_read_stats_registers(pf, hw);
5521 
5522 	return 0;
5523 }
5524 
5525 static uint32_t
5526 ice_xstats_calc_num(void)
5527 {
5528 	uint32_t num;
5529 
5530 	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
5531 
5532 	return num;
5533 }
5534 
5535 static int
5536 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
5537 	       unsigned int n)
5538 {
5539 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5540 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5541 	unsigned int i;
5542 	unsigned int count;
5543 	struct ice_hw_port_stats *hw_stats = &pf->stats;
5544 
5545 	count = ice_xstats_calc_num();
5546 	if (n < count)
5547 		return count;
5548 
5549 	ice_read_stats_registers(pf, hw);
5550 
5551 	if (!xstats)
5552 		return 0;
5553 
5554 	count = 0;
5555 
5556 	/* Get stats from ice_eth_stats struct */
5557 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5558 		xstats[count].value =
5559 			*(uint64_t *)((char *)&hw_stats->eth +
5560 				      ice_stats_strings[i].offset);
5561 		xstats[count].id = count;
5562 		count++;
5563 	}
5564 
5565 	/* Get individual stats from ice_hw_port struct */
5566 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5567 		xstats[count].value =
5568 			*(uint64_t *)((char *)hw_stats +
5569 				      ice_hw_port_strings[i].offset);
5570 		xstats[count].id = count;
5571 		count++;
5572 	}
5573 
5574 	return count;
5575 }
5576 
5577 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
5578 				struct rte_eth_xstat_name *xstats_names,
5579 				__rte_unused unsigned int limit)
5580 {
5581 	unsigned int count = 0;
5582 	unsigned int i;
5583 
5584 	if (!xstats_names)
5585 		return ice_xstats_calc_num();
5586 
5587 	/* Note: limit checked in rte_eth_xstats_names() */
5588 
5589 	/* Get stats from ice_eth_stats struct */
5590 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
5591 		strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
5592 			sizeof(xstats_names[count].name));
5593 		count++;
5594 	}
5595 
5596 	/* Get individual stats from ice_hw_port struct */
5597 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
5598 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
5599 			sizeof(xstats_names[count].name));
5600 		count++;
5601 	}
5602 
5603 	return count;
5604 }
5605 
5606 static int
5607 ice_dev_flow_ops_get(struct rte_eth_dev *dev,
5608 		     const struct rte_flow_ops **ops)
5609 {
5610 	if (!dev)
5611 		return -EINVAL;
5612 
5613 	*ops = &ice_flow_ops;
5614 	return 0;
5615 }
5616 
5617 /* Add UDP tunneling port */
5618 static int
5619 ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
5620 			     struct rte_eth_udp_tunnel *udp_tunnel)
5621 {
5622 	int ret = 0;
5623 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5624 
5625 	if (udp_tunnel == NULL)
5626 		return -EINVAL;
5627 
5628 	switch (udp_tunnel->prot_type) {
5629 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
5630 		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
5631 		break;
5632 	default:
5633 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5634 		ret = -EINVAL;
5635 		break;
5636 	}
5637 
5638 	return ret;
5639 }
5640 
5641 /* Delete UDP tunneling port */
5642 static int
5643 ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5644 			     struct rte_eth_udp_tunnel *udp_tunnel)
5645 {
5646 	int ret = 0;
5647 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5648 
5649 	if (udp_tunnel == NULL)
5650 		return -EINVAL;
5651 
5652 	switch (udp_tunnel->prot_type) {
5653 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
5654 		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
5655 		break;
5656 	default:
5657 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5658 		ret = -EINVAL;
5659 		break;
5660 	}
5661 
5662 	return ret;
5663 }
5664 
5665 static int
5666 ice_timesync_enable(struct rte_eth_dev *dev)
5667 {
5668 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5669 	struct ice_adapter *ad =
5670 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5671 	int ret;
5672 
5673 	if (dev->data->dev_started && !(dev->data->dev_conf.rxmode.offloads &
5674 	    RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
5675 		PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
5676 		return -1;
5677 	}
5678 
5679 	if (hw->func_caps.ts_func_info.src_tmr_owned) {
5680 		ret = ice_ptp_init_phc(hw);
5681 		if (ret) {
5682 			PMD_DRV_LOG(ERR, "Failed to initialize PHC");
5683 			return -1;
5684 		}
5685 
5686 		ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
5687 		if (ret) {
5688 			PMD_DRV_LOG(ERR,
5689 				"Failed to write PHC increment time value");
5690 			return -1;
5691 		}
5692 	}
5693 
5694 	/* Initialize cycle counters for system time/RX/TX timestamp */
5695 	memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
5696 	memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5697 	memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5698 
5699 	ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5700 	ad->systime_tc.cc_shift = 0;
5701 	ad->systime_tc.nsec_mask = 0;
5702 
5703 	ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5704 	ad->rx_tstamp_tc.cc_shift = 0;
5705 	ad->rx_tstamp_tc.nsec_mask = 0;
5706 
5707 	ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
5708 	ad->tx_tstamp_tc.cc_shift = 0;
5709 	ad->tx_tstamp_tc.nsec_mask = 0;
5710 
5711 	ad->ptp_ena = 1;
5712 
5713 	return 0;
5714 }
5715 
5716 static int
5717 ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5718 			       struct timespec *timestamp, uint32_t flags)
5719 {
5720 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5721 	struct ice_adapter *ad =
5722 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5723 	struct ice_rx_queue *rxq;
5724 	uint32_t ts_high;
5725 	uint64_t ts_ns, ns;
5726 
5727 	rxq = dev->data->rx_queues[flags];
5728 
5729 	ts_high = rxq->time_high;
5730 	ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, ts_high);
5731 	ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
5732 	*timestamp = rte_ns_to_timespec(ns);
5733 
5734 	return 0;
5735 }
5736 
5737 static int
5738 ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5739 			       struct timespec *timestamp)
5740 {
5741 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5742 	struct ice_adapter *ad =
5743 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5744 	uint8_t lport;
5745 	uint64_t ts_ns, ns, tstamp;
5746 	const uint64_t mask = 0xFFFFFFFF;
5747 	int ret;
5748 
5749 	lport = hw->port_info->lport;
5750 
5751 	ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
5752 	if (ret) {
5753 		PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
5754 		return -1;
5755 	}
5756 
5757 	ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, (tstamp >> 8) & mask);
5758 	ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
5759 	*timestamp = rte_ns_to_timespec(ns);
5760 
5761 	return 0;
5762 }
5763 
5764 static int
5765 ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5766 {
5767 	struct ice_adapter *ad =
5768 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5769 
5770 	ad->systime_tc.nsec += delta;
5771 	ad->rx_tstamp_tc.nsec += delta;
5772 	ad->tx_tstamp_tc.nsec += delta;
5773 
5774 	return 0;
5775 }
5776 
5777 static int
5778 ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5779 {
5780 	struct ice_adapter *ad =
5781 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5782 	uint64_t ns;
5783 
5784 	ns = rte_timespec_to_ns(ts);
5785 
5786 	ad->systime_tc.nsec = ns;
5787 	ad->rx_tstamp_tc.nsec = ns;
5788 	ad->tx_tstamp_tc.nsec = ns;
5789 
5790 	return 0;
5791 }
5792 
5793 static int
5794 ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5795 {
5796 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5797 	struct ice_adapter *ad =
5798 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5799 	uint32_t hi, lo, lo2;
5800 	uint64_t time, ns;
5801 
5802 	lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5803 	hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5804 	lo2 = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5805 
5806 	if (lo2 < lo) {
5807 		lo = ICE_READ_REG(hw, GLTSYN_TIME_L(0));
5808 		hi = ICE_READ_REG(hw, GLTSYN_TIME_H(0));
5809 	}
5810 
5811 	time = ((uint64_t)hi << 32) | lo;
5812 	ns = rte_timecounter_update(&ad->systime_tc, time);
5813 	*ts = rte_ns_to_timespec(ns);
5814 
5815 	return 0;
5816 }
5817 
5818 static int
5819 ice_timesync_disable(struct rte_eth_dev *dev)
5820 {
5821 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5822 	struct ice_adapter *ad =
5823 			ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
5824 	uint64_t val;
5825 	uint8_t lport;
5826 
5827 	lport = hw->port_info->lport;
5828 
5829 	ice_clear_phy_tstamp(hw, lport, 0);
5830 
5831 	val = ICE_READ_REG(hw, GLTSYN_ENA(0));
5832 	val &= ~GLTSYN_ENA_TSYN_ENA_M;
5833 	ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
5834 
5835 	ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
5836 	ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
5837 
5838 	ad->ptp_ena = 0;
5839 
5840 	return 0;
5841 }
5842 
5843 static int
5844 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
5845 	      struct rte_pci_device *pci_dev)
5846 {
5847 	return rte_eth_dev_pci_generic_probe(pci_dev,
5848 					     sizeof(struct ice_adapter),
5849 					     ice_dev_init);
5850 }
5851 
5852 static int
5853 ice_pci_remove(struct rte_pci_device *pci_dev)
5854 {
5855 	return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
5856 }
5857 
5858 static struct rte_pci_driver rte_ice_pmd = {
5859 	.id_table = pci_id_ice_map,
5860 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
5861 	.probe = ice_pci_probe,
5862 	.remove = ice_pci_remove,
5863 };
5864 
5865 /**
5866  * Driver initialization routine.
5867  * Invoked once at EAL init time.
5868  * Register itself as the [Poll Mode] Driver of PCI devices.
5869  */
5870 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
5871 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
5872 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
5873 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
5874 			      ICE_HW_DEBUG_MASK_ARG "=0xXXX"
5875 			      ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset>"
5876 			      ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>"
5877 			      ICE_PIPELINE_MODE_SUPPORT_ARG "=<0|1>"
5878 			      ICE_RX_LOW_LATENCY_ARG "=<0|1>");
5879 
5880 RTE_LOG_REGISTER_SUFFIX(ice_logtype_init, init, NOTICE);
5881 RTE_LOG_REGISTER_SUFFIX(ice_logtype_driver, driver, NOTICE);
5882 #ifdef RTE_ETHDEV_DEBUG_RX
5883 RTE_LOG_REGISTER_SUFFIX(ice_logtype_rx, rx, DEBUG);
5884 #endif
5885 #ifdef RTE_ETHDEV_DEBUG_TX
5886 RTE_LOG_REGISTER_SUFFIX(ice_logtype_tx, tx, DEBUG);
5887 #endif
5888