xref: /dpdk/drivers/net/txgbe/txgbe_ethdev.c (revision 3e3f736e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <rte_common.h>
11 #include <ethdev_pci.h>
12 
13 #include <rte_interrupts.h>
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_pci.h>
17 #include <rte_memory.h>
18 #include <rte_eal.h>
19 #include <rte_alarm.h>
20 #include <rte_kvargs.h>
21 
22 #include "txgbe_logs.h"
23 #include "base/txgbe.h"
24 #include "txgbe_ethdev.h"
25 #include "txgbe_rxtx.h"
26 #include "txgbe_regs_group.h"
27 
28 static const struct reg_info txgbe_regs_general[] = {
29 	{TXGBE_RST, 1, 1, "TXGBE_RST"},
30 	{TXGBE_STAT, 1, 1, "TXGBE_STAT"},
31 	{TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
32 	{TXGBE_SDP, 1, 1, "TXGBE_SDP"},
33 	{TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
34 	{TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
35 	{0, 0, 0, ""}
36 };
37 
38 static const struct reg_info txgbe_regs_nvm[] = {
39 	{0, 0, 0, ""}
40 };
41 
42 static const struct reg_info txgbe_regs_interrupt[] = {
43 	{0, 0, 0, ""}
44 };
45 
46 static const struct reg_info txgbe_regs_fctl_others[] = {
47 	{0, 0, 0, ""}
48 };
49 
50 static const struct reg_info txgbe_regs_rxdma[] = {
51 	{0, 0, 0, ""}
52 };
53 
54 static const struct reg_info txgbe_regs_rx[] = {
55 	{0, 0, 0, ""}
56 };
57 
58 static struct reg_info txgbe_regs_tx[] = {
59 	{0, 0, 0, ""}
60 };
61 
62 static const struct reg_info txgbe_regs_wakeup[] = {
63 	{0, 0, 0, ""}
64 };
65 
66 static const struct reg_info txgbe_regs_dcb[] = {
67 	{0, 0, 0, ""}
68 };
69 
70 static const struct reg_info txgbe_regs_mac[] = {
71 	{0, 0, 0, ""}
72 };
73 
74 static const struct reg_info txgbe_regs_diagnostic[] = {
75 	{0, 0, 0, ""},
76 };
77 
78 /* PF registers */
79 static const struct reg_info *txgbe_regs_others[] = {
80 				txgbe_regs_general,
81 				txgbe_regs_nvm,
82 				txgbe_regs_interrupt,
83 				txgbe_regs_fctl_others,
84 				txgbe_regs_rxdma,
85 				txgbe_regs_rx,
86 				txgbe_regs_tx,
87 				txgbe_regs_wakeup,
88 				txgbe_regs_dcb,
89 				txgbe_regs_mac,
90 				txgbe_regs_diagnostic,
91 				NULL};
92 
93 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
94 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
95 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
96 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
97 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
98 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
99 static int txgbe_dev_close(struct rte_eth_dev *dev);
100 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
101 				int wait_to_complete);
102 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
103 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
104 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
105 					uint16_t queue);
106 
107 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
108 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
109 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
110 static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
111 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
112 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
113 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
114 				      struct rte_intr_handle *handle);
115 static void txgbe_dev_interrupt_handler(void *param);
116 static void txgbe_dev_interrupt_delayed_handler(void *param);
117 static void txgbe_configure_msix(struct rte_eth_dev *dev);
118 
119 static int txgbe_filter_restore(struct rte_eth_dev *dev);
120 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
121 
122 #define TXGBE_SET_HWSTRIP(h, q) do {\
123 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
124 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
125 		(h)->bitmap[idx] |= 1 << bit;\
126 	} while (0)
127 
128 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
129 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
130 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
131 		(h)->bitmap[idx] &= ~(1 << bit);\
132 	} while (0)
133 
134 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
135 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
136 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
137 		(r) = (h)->bitmap[idx] >> bit & 1;\
138 	} while (0)
139 
140 /*
141  * The set of PCI devices this driver supports
142  */
143 static const struct rte_pci_id pci_id_txgbe_map[] = {
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
146 	{ .vendor_id = 0, /* sentinel */ },
147 };
148 
149 static const struct rte_eth_desc_lim rx_desc_lim = {
150 	.nb_max = TXGBE_RING_DESC_MAX,
151 	.nb_min = TXGBE_RING_DESC_MIN,
152 	.nb_align = TXGBE_RXD_ALIGN,
153 };
154 
155 static const struct rte_eth_desc_lim tx_desc_lim = {
156 	.nb_max = TXGBE_RING_DESC_MAX,
157 	.nb_min = TXGBE_RING_DESC_MIN,
158 	.nb_align = TXGBE_TXD_ALIGN,
159 	.nb_seg_max = TXGBE_TX_MAX_SEG,
160 	.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
161 };
162 
163 static const struct eth_dev_ops txgbe_eth_dev_ops;
164 
165 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
166 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
167 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
168 	/* MNG RxTx */
169 	HW_XSTAT(mng_bmc2host_packets),
170 	HW_XSTAT(mng_host2bmc_packets),
171 	/* Basic RxTx */
172 	HW_XSTAT(rx_packets),
173 	HW_XSTAT(tx_packets),
174 	HW_XSTAT(rx_bytes),
175 	HW_XSTAT(tx_bytes),
176 	HW_XSTAT(rx_total_bytes),
177 	HW_XSTAT(rx_total_packets),
178 	HW_XSTAT(tx_total_packets),
179 	HW_XSTAT(rx_total_missed_packets),
180 	HW_XSTAT(rx_broadcast_packets),
181 	HW_XSTAT(rx_multicast_packets),
182 	HW_XSTAT(rx_management_packets),
183 	HW_XSTAT(tx_management_packets),
184 	HW_XSTAT(rx_management_dropped),
185 
186 	/* Basic Error */
187 	HW_XSTAT(rx_crc_errors),
188 	HW_XSTAT(rx_illegal_byte_errors),
189 	HW_XSTAT(rx_error_bytes),
190 	HW_XSTAT(rx_mac_short_packet_dropped),
191 	HW_XSTAT(rx_length_errors),
192 	HW_XSTAT(rx_undersize_errors),
193 	HW_XSTAT(rx_fragment_errors),
194 	HW_XSTAT(rx_oversize_errors),
195 	HW_XSTAT(rx_jabber_errors),
196 	HW_XSTAT(rx_l3_l4_xsum_error),
197 	HW_XSTAT(mac_local_errors),
198 	HW_XSTAT(mac_remote_errors),
199 
200 	/* Flow Director */
201 	HW_XSTAT(flow_director_added_filters),
202 	HW_XSTAT(flow_director_removed_filters),
203 	HW_XSTAT(flow_director_filter_add_errors),
204 	HW_XSTAT(flow_director_filter_remove_errors),
205 	HW_XSTAT(flow_director_matched_filters),
206 	HW_XSTAT(flow_director_missed_filters),
207 
208 	/* FCoE */
209 	HW_XSTAT(rx_fcoe_crc_errors),
210 	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
211 	HW_XSTAT(rx_fcoe_dropped),
212 	HW_XSTAT(rx_fcoe_packets),
213 	HW_XSTAT(tx_fcoe_packets),
214 	HW_XSTAT(rx_fcoe_bytes),
215 	HW_XSTAT(tx_fcoe_bytes),
216 	HW_XSTAT(rx_fcoe_no_ddp),
217 	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
218 
219 	/* MACSEC */
220 	HW_XSTAT(tx_macsec_pkts_untagged),
221 	HW_XSTAT(tx_macsec_pkts_encrypted),
222 	HW_XSTAT(tx_macsec_pkts_protected),
223 	HW_XSTAT(tx_macsec_octets_encrypted),
224 	HW_XSTAT(tx_macsec_octets_protected),
225 	HW_XSTAT(rx_macsec_pkts_untagged),
226 	HW_XSTAT(rx_macsec_pkts_badtag),
227 	HW_XSTAT(rx_macsec_pkts_nosci),
228 	HW_XSTAT(rx_macsec_pkts_unknownsci),
229 	HW_XSTAT(rx_macsec_octets_decrypted),
230 	HW_XSTAT(rx_macsec_octets_validated),
231 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
232 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
233 	HW_XSTAT(rx_macsec_sc_pkts_late),
234 	HW_XSTAT(rx_macsec_sa_pkts_ok),
235 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
236 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
237 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
238 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
239 
240 	/* MAC RxTx */
241 	HW_XSTAT(rx_size_64_packets),
242 	HW_XSTAT(rx_size_65_to_127_packets),
243 	HW_XSTAT(rx_size_128_to_255_packets),
244 	HW_XSTAT(rx_size_256_to_511_packets),
245 	HW_XSTAT(rx_size_512_to_1023_packets),
246 	HW_XSTAT(rx_size_1024_to_max_packets),
247 	HW_XSTAT(tx_size_64_packets),
248 	HW_XSTAT(tx_size_65_to_127_packets),
249 	HW_XSTAT(tx_size_128_to_255_packets),
250 	HW_XSTAT(tx_size_256_to_511_packets),
251 	HW_XSTAT(tx_size_512_to_1023_packets),
252 	HW_XSTAT(tx_size_1024_to_max_packets),
253 
254 	/* Flow Control */
255 	HW_XSTAT(tx_xon_packets),
256 	HW_XSTAT(rx_xon_packets),
257 	HW_XSTAT(tx_xoff_packets),
258 	HW_XSTAT(rx_xoff_packets),
259 
260 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
261 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
262 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
263 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
264 };
265 
266 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
267 			   sizeof(rte_txgbe_stats_strings[0]))
268 
269 /* Per-priority statistics */
270 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
271 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
272 	UP_XSTAT(rx_up_packets),
273 	UP_XSTAT(tx_up_packets),
274 	UP_XSTAT(rx_up_bytes),
275 	UP_XSTAT(tx_up_bytes),
276 	UP_XSTAT(rx_up_drop_packets),
277 
278 	UP_XSTAT(tx_up_xon_packets),
279 	UP_XSTAT(rx_up_xon_packets),
280 	UP_XSTAT(tx_up_xoff_packets),
281 	UP_XSTAT(rx_up_xoff_packets),
282 	UP_XSTAT(rx_up_dropped),
283 	UP_XSTAT(rx_up_mbuf_alloc_errors),
284 	UP_XSTAT(tx_up_xon2off_packets),
285 };
286 
287 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
288 			   sizeof(rte_txgbe_up_strings[0]))
289 
290 /* Per-queue statistics */
291 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
292 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
293 	QP_XSTAT(rx_qp_packets),
294 	QP_XSTAT(tx_qp_packets),
295 	QP_XSTAT(rx_qp_bytes),
296 	QP_XSTAT(tx_qp_bytes),
297 	QP_XSTAT(rx_qp_mc_packets),
298 };
299 
300 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
301 			   sizeof(rte_txgbe_qp_strings[0]))
302 
303 static inline int
304 txgbe_is_sfp(struct txgbe_hw *hw)
305 {
306 	switch (hw->phy.type) {
307 	case txgbe_phy_sfp_avago:
308 	case txgbe_phy_sfp_ftl:
309 	case txgbe_phy_sfp_intel:
310 	case txgbe_phy_sfp_unknown:
311 	case txgbe_phy_sfp_tyco_passive:
312 	case txgbe_phy_sfp_unknown_passive:
313 		return 1;
314 	default:
315 		return 0;
316 	}
317 }
318 
319 static inline int32_t
320 txgbe_pf_reset_hw(struct txgbe_hw *hw)
321 {
322 	uint32_t ctrl_ext;
323 	int32_t status;
324 
325 	status = hw->mac.reset_hw(hw);
326 
327 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
328 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
329 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
330 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
331 	txgbe_flush(hw);
332 
333 	if (status == TXGBE_ERR_SFP_NOT_PRESENT)
334 		status = 0;
335 	return status;
336 }
337 
338 static inline void
339 txgbe_enable_intr(struct rte_eth_dev *dev)
340 {
341 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
342 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
343 
344 	wr32(hw, TXGBE_IENMISC, intr->mask_misc);
345 	wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
346 	wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
347 	txgbe_flush(hw);
348 }
349 
350 static void
351 txgbe_disable_intr(struct txgbe_hw *hw)
352 {
353 	PMD_INIT_FUNC_TRACE();
354 
355 	wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
356 	wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
357 	wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
358 	txgbe_flush(hw);
359 }
360 
361 static int
362 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
363 				  uint16_t queue_id,
364 				  uint8_t stat_idx,
365 				  uint8_t is_rx)
366 {
367 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
368 	struct txgbe_stat_mappings *stat_mappings =
369 		TXGBE_DEV_STAT_MAPPINGS(eth_dev);
370 	uint32_t qsmr_mask = 0;
371 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
372 	uint32_t q_map;
373 	uint8_t n, offset;
374 
375 	if (hw->mac.type != txgbe_mac_raptor)
376 		return -ENOSYS;
377 
378 	if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
379 		return -EIO;
380 
381 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
382 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
383 		     queue_id, stat_idx);
384 
385 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
386 	if (n >= TXGBE_NB_STAT_MAPPING) {
387 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
388 		return -EIO;
389 	}
390 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
391 
392 	/* Now clear any previous stat_idx set */
393 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
394 	if (!is_rx)
395 		stat_mappings->tqsm[n] &= ~clearing_mask;
396 	else
397 		stat_mappings->rqsm[n] &= ~clearing_mask;
398 
399 	q_map = (uint32_t)stat_idx;
400 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
401 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
402 	if (!is_rx)
403 		stat_mappings->tqsm[n] |= qsmr_mask;
404 	else
405 		stat_mappings->rqsm[n] |= qsmr_mask;
406 
407 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
408 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
409 		     queue_id, stat_idx);
410 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
411 		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
412 	return 0;
413 }
414 
415 static void
416 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
417 {
418 	int i;
419 	u8 bwgp;
420 	struct txgbe_dcb_tc_config *tc;
421 
422 	UNREFERENCED_PARAMETER(hw);
423 
424 	dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
425 	dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
426 	bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
427 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
428 		tc = &dcb_config->tc_config[i];
429 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
430 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
431 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
432 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
433 		tc->pfc = txgbe_dcb_pfc_disabled;
434 	}
435 
436 	/* Initialize default user to priority mapping, UPx->TC0 */
437 	tc = &dcb_config->tc_config[0];
438 	tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
439 	tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
440 	for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
441 		dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
442 		dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
443 	}
444 	dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
445 	dcb_config->pfc_mode_enable = false;
446 	dcb_config->vt_mode = true;
447 	dcb_config->round_robin_enable = false;
448 	/* support all DCB capabilities */
449 	dcb_config->support.capabilities = 0xFF;
450 }
451 
452 /*
453  * Ensure that all locks are released before first NVM or PHY access
454  */
455 static void
456 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
457 {
458 	uint16_t mask;
459 
460 	/*
461 	 * These ones are more tricky since they are common to all ports; but
462 	 * swfw_sync retries last long enough (1s) to be almost sure that if
463 	 * lock can not be taken it is due to an improper lock of the
464 	 * semaphore.
465 	 */
466 	mask = TXGBE_MNGSEM_SWPHY |
467 	       TXGBE_MNGSEM_SWMBX |
468 	       TXGBE_MNGSEM_SWFLASH;
469 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
470 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
471 
472 	hw->mac.release_swfw_sync(hw, mask);
473 }
474 
475 static int
476 txgbe_handle_devarg(__rte_unused const char *key, const char *value,
477 		  void *extra_args)
478 {
479 	uint16_t *n = extra_args;
480 
481 	if (value == NULL || extra_args == NULL)
482 		return -EINVAL;
483 
484 	*n = (uint16_t)strtoul(value, NULL, 10);
485 	if (*n == USHRT_MAX && errno == ERANGE)
486 		return -1;
487 
488 	return 0;
489 }
490 
491 static void
492 txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
493 {
494 	struct rte_kvargs *kvlist;
495 	u16 auto_neg = 1;
496 	u16 poll = 0;
497 	u16 present = 1;
498 	u16 sgmii = 0;
499 	u16 ffe_set = 0;
500 	u16 ffe_main = 27;
501 	u16 ffe_pre = 8;
502 	u16 ffe_post = 44;
503 
504 	if (devargs == NULL)
505 		goto null;
506 
507 	kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
508 	if (kvlist == NULL)
509 		goto null;
510 
511 	rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
512 			   &txgbe_handle_devarg, &auto_neg);
513 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
514 			   &txgbe_handle_devarg, &poll);
515 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
516 			   &txgbe_handle_devarg, &present);
517 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
518 			   &txgbe_handle_devarg, &sgmii);
519 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
520 			   &txgbe_handle_devarg, &ffe_set);
521 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
522 			   &txgbe_handle_devarg, &ffe_main);
523 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
524 			   &txgbe_handle_devarg, &ffe_pre);
525 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
526 			   &txgbe_handle_devarg, &ffe_post);
527 	rte_kvargs_free(kvlist);
528 
529 null:
530 	hw->devarg.auto_neg = auto_neg;
531 	hw->devarg.poll = poll;
532 	hw->devarg.present = present;
533 	hw->devarg.sgmii = sgmii;
534 	hw->phy.ffe_set = ffe_set;
535 	hw->phy.ffe_main = ffe_main;
536 	hw->phy.ffe_pre = ffe_pre;
537 	hw->phy.ffe_post = ffe_post;
538 }
539 
540 static int
541 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
542 {
543 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
544 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
545 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
546 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
547 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
548 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
549 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
550 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
551 	const struct rte_memzone *mz;
552 	uint32_t ctrl_ext;
553 	uint16_t csum;
554 	int err, i, ret;
555 
556 	PMD_INIT_FUNC_TRACE();
557 
558 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
559 	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
560 	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
561 	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
562 	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
563 	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
564 	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
565 
566 	/*
567 	 * For secondary processes, we don't initialise any further as primary
568 	 * has already done this work. Only check we don't need a different
569 	 * RX and TX function.
570 	 */
571 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
572 		struct txgbe_tx_queue *txq;
573 		/* TX queue function in primary, set by last queue initialized
574 		 * Tx queue may not initialized by primary process
575 		 */
576 		if (eth_dev->data->tx_queues) {
577 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
578 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
579 			txgbe_set_tx_function(eth_dev, txq);
580 		} else {
581 			/* Use default TX function if we get here */
582 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
583 				     "Using default TX function.");
584 		}
585 
586 		txgbe_set_rx_function(eth_dev);
587 
588 		return 0;
589 	}
590 
591 	rte_eth_copy_pci_info(eth_dev, pci_dev);
592 
593 	/* Vendor and Device ID need to be set before init of shared code */
594 	hw->device_id = pci_dev->id.device_id;
595 	hw->vendor_id = pci_dev->id.vendor_id;
596 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
597 	hw->allow_unsupported_sfp = 1;
598 
599 	/* Reserve memory for interrupt status block */
600 	mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
601 		16, TXGBE_ALIGN, SOCKET_ID_ANY);
602 	if (mz == NULL)
603 		return -ENOMEM;
604 
605 	hw->isb_dma = TMZ_PADDR(mz);
606 	hw->isb_mem = TMZ_VADDR(mz);
607 
608 	txgbe_parse_devargs(hw, pci_dev->device.devargs);
609 	/* Initialize the shared code (base driver) */
610 	err = txgbe_init_shared_code(hw);
611 	if (err != 0) {
612 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
613 		return -EIO;
614 	}
615 
616 	/* Unlock any pending hardware semaphore */
617 	txgbe_swfw_lock_reset(hw);
618 
619 #ifdef RTE_LIB_SECURITY
620 	/* Initialize security_ctx only for primary process*/
621 	if (txgbe_ipsec_ctx_create(eth_dev))
622 		return -ENOMEM;
623 #endif
624 
625 	/* Initialize DCB configuration*/
626 	memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
627 	txgbe_dcb_init(hw, dcb_config);
628 
629 	/* Get Hardware Flow Control setting */
630 	hw->fc.requested_mode = txgbe_fc_full;
631 	hw->fc.current_mode = txgbe_fc_full;
632 	hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
633 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
634 		hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
635 		hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
636 	}
637 	hw->fc.send_xon = 1;
638 
639 	err = hw->rom.init_params(hw);
640 	if (err != 0) {
641 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
642 		return -EIO;
643 	}
644 
645 	/* Make sure we have a good EEPROM before we read from it */
646 	err = hw->rom.validate_checksum(hw, &csum);
647 	if (err != 0) {
648 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
649 		return -EIO;
650 	}
651 
652 	err = hw->mac.init_hw(hw);
653 
654 	/*
655 	 * Devices with copper phys will fail to initialise if txgbe_init_hw()
656 	 * is called too soon after the kernel driver unbinding/binding occurs.
657 	 * The failure occurs in txgbe_identify_phy() for all devices,
658 	 * but for non-copper devies, txgbe_identify_sfp_module() is
659 	 * also called. See txgbe_identify_phy(). The reason for the
660 	 * failure is not known, and only occuts when virtualisation features
661 	 * are disabled in the bios. A delay of 200ms  was found to be enough by
662 	 * trial-and-error, and is doubled to be safe.
663 	 */
664 	if (err && hw->phy.media_type == txgbe_media_type_copper) {
665 		rte_delay_ms(200);
666 		err = hw->mac.init_hw(hw);
667 	}
668 
669 	if (err == TXGBE_ERR_SFP_NOT_PRESENT)
670 		err = 0;
671 
672 	if (err == TXGBE_ERR_EEPROM_VERSION) {
673 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
674 			     "LOM.  Please be aware there may be issues associated "
675 			     "with your hardware.");
676 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
677 			     "please contact your hardware representative "
678 			     "who provided you with this hardware.");
679 	} else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
680 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
681 	}
682 	if (err) {
683 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
684 		return -EIO;
685 	}
686 
687 	/* Reset the hw statistics */
688 	txgbe_dev_stats_reset(eth_dev);
689 
690 	/* disable interrupt */
691 	txgbe_disable_intr(hw);
692 
693 	/* Allocate memory for storing MAC addresses */
694 	eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
695 					       hw->mac.num_rar_entries, 0);
696 	if (eth_dev->data->mac_addrs == NULL) {
697 		PMD_INIT_LOG(ERR,
698 			     "Failed to allocate %u bytes needed to store "
699 			     "MAC addresses",
700 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
701 		return -ENOMEM;
702 	}
703 
704 	/* Copy the permanent MAC address */
705 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
706 			&eth_dev->data->mac_addrs[0]);
707 
708 	/* Allocate memory for storing hash filter MAC addresses */
709 	eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
710 			RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
711 	if (eth_dev->data->hash_mac_addrs == NULL) {
712 		PMD_INIT_LOG(ERR,
713 			     "Failed to allocate %d bytes needed to store MAC addresses",
714 			     RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
715 		return -ENOMEM;
716 	}
717 
718 	/* initialize the vfta */
719 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
720 
721 	/* initialize the hw strip bitmap*/
722 	memset(hwstrip, 0, sizeof(*hwstrip));
723 
724 	/* initialize PF if max_vfs not zero */
725 	ret = txgbe_pf_host_init(eth_dev);
726 	if (ret) {
727 		rte_free(eth_dev->data->mac_addrs);
728 		eth_dev->data->mac_addrs = NULL;
729 		rte_free(eth_dev->data->hash_mac_addrs);
730 		eth_dev->data->hash_mac_addrs = NULL;
731 		return ret;
732 	}
733 
734 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
735 	/* let hardware know driver is loaded */
736 	ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
737 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
738 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
739 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
740 	txgbe_flush(hw);
741 
742 	if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
743 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
744 			     (int)hw->mac.type, (int)hw->phy.type,
745 			     (int)hw->phy.sfp_type);
746 	else
747 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
748 			     (int)hw->mac.type, (int)hw->phy.type);
749 
750 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
751 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
752 		     pci_dev->id.device_id);
753 
754 	rte_intr_callback_register(intr_handle,
755 				   txgbe_dev_interrupt_handler, eth_dev);
756 
757 	/* enable uio/vfio intr/eventfd mapping */
758 	rte_intr_enable(intr_handle);
759 
760 	/* enable support intr */
761 	txgbe_enable_intr(eth_dev);
762 
763 	/* initialize filter info */
764 	memset(filter_info, 0,
765 	       sizeof(struct txgbe_filter_info));
766 
767 	/* initialize 5tuple filter list */
768 	TAILQ_INIT(&filter_info->fivetuple_list);
769 
770 	/* initialize flow director filter list & hash */
771 	txgbe_fdir_filter_init(eth_dev);
772 
773 	/* initialize l2 tunnel filter list & hash */
774 	txgbe_l2_tn_filter_init(eth_dev);
775 
776 	/* initialize flow filter lists */
777 	txgbe_filterlist_init();
778 
779 	/* initialize bandwidth configuration info */
780 	memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
781 
782 	/* initialize Traffic Manager configuration */
783 	txgbe_tm_conf_init(eth_dev);
784 
785 	return 0;
786 }
787 
788 static int
789 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
790 {
791 	PMD_INIT_FUNC_TRACE();
792 
793 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
794 		return 0;
795 
796 	txgbe_dev_close(eth_dev);
797 
798 	return 0;
799 }
800 
801 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
802 {
803 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
804 	struct txgbe_5tuple_filter *p_5tuple;
805 
806 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
807 		TAILQ_REMOVE(&filter_info->fivetuple_list,
808 			     p_5tuple,
809 			     entries);
810 		rte_free(p_5tuple);
811 	}
812 	memset(filter_info->fivetuple_mask, 0,
813 	       sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
814 
815 	return 0;
816 }
817 
818 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
819 {
820 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
821 	struct txgbe_fdir_filter *fdir_filter;
822 
823 	if (fdir_info->hash_map)
824 		rte_free(fdir_info->hash_map);
825 	if (fdir_info->hash_handle)
826 		rte_hash_free(fdir_info->hash_handle);
827 
828 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
829 		TAILQ_REMOVE(&fdir_info->fdir_list,
830 			     fdir_filter,
831 			     entries);
832 		rte_free(fdir_filter);
833 	}
834 
835 	return 0;
836 }
837 
838 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
839 {
840 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
841 	struct txgbe_l2_tn_filter *l2_tn_filter;
842 
843 	if (l2_tn_info->hash_map)
844 		rte_free(l2_tn_info->hash_map);
845 	if (l2_tn_info->hash_handle)
846 		rte_hash_free(l2_tn_info->hash_handle);
847 
848 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
849 		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
850 			     l2_tn_filter,
851 			     entries);
852 		rte_free(l2_tn_filter);
853 	}
854 
855 	return 0;
856 }
857 
858 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
859 {
860 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
861 	char fdir_hash_name[RTE_HASH_NAMESIZE];
862 	struct rte_hash_parameters fdir_hash_params = {
863 		.name = fdir_hash_name,
864 		.entries = TXGBE_MAX_FDIR_FILTER_NUM,
865 		.key_len = sizeof(struct txgbe_atr_input),
866 		.hash_func = rte_hash_crc,
867 		.hash_func_init_val = 0,
868 		.socket_id = rte_socket_id(),
869 	};
870 
871 	TAILQ_INIT(&fdir_info->fdir_list);
872 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
873 		 "fdir_%s", TDEV_NAME(eth_dev));
874 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
875 	if (!fdir_info->hash_handle) {
876 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
877 		return -EINVAL;
878 	}
879 	fdir_info->hash_map = rte_zmalloc("txgbe",
880 					  sizeof(struct txgbe_fdir_filter *) *
881 					  TXGBE_MAX_FDIR_FILTER_NUM,
882 					  0);
883 	if (!fdir_info->hash_map) {
884 		PMD_INIT_LOG(ERR,
885 			     "Failed to allocate memory for fdir hash map!");
886 		return -ENOMEM;
887 	}
888 	fdir_info->mask_added = FALSE;
889 
890 	return 0;
891 }
892 
893 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
894 {
895 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
896 	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
897 	struct rte_hash_parameters l2_tn_hash_params = {
898 		.name = l2_tn_hash_name,
899 		.entries = TXGBE_MAX_L2_TN_FILTER_NUM,
900 		.key_len = sizeof(struct txgbe_l2_tn_key),
901 		.hash_func = rte_hash_crc,
902 		.hash_func_init_val = 0,
903 		.socket_id = rte_socket_id(),
904 	};
905 
906 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
907 	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
908 		 "l2_tn_%s", TDEV_NAME(eth_dev));
909 	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
910 	if (!l2_tn_info->hash_handle) {
911 		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
912 		return -EINVAL;
913 	}
914 	l2_tn_info->hash_map = rte_zmalloc("txgbe",
915 				   sizeof(struct txgbe_l2_tn_filter *) *
916 				   TXGBE_MAX_L2_TN_FILTER_NUM,
917 				   0);
918 	if (!l2_tn_info->hash_map) {
919 		PMD_INIT_LOG(ERR,
920 			"Failed to allocate memory for L2 TN hash map!");
921 		return -ENOMEM;
922 	}
923 	l2_tn_info->e_tag_en = FALSE;
924 	l2_tn_info->e_tag_fwd_en = FALSE;
925 	l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
926 
927 	return 0;
928 }
929 
930 static int
931 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
932 		struct rte_pci_device *pci_dev)
933 {
934 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
935 			sizeof(struct txgbe_adapter),
936 			eth_dev_pci_specific_init, pci_dev,
937 			eth_txgbe_dev_init, NULL);
938 }
939 
940 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
941 {
942 	struct rte_eth_dev *ethdev;
943 
944 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
945 	if (!ethdev)
946 		return 0;
947 
948 	return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
949 }
950 
951 static struct rte_pci_driver rte_txgbe_pmd = {
952 	.id_table = pci_id_txgbe_map,
953 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
954 		     RTE_PCI_DRV_INTR_LSC,
955 	.probe = eth_txgbe_pci_probe,
956 	.remove = eth_txgbe_pci_remove,
957 };
958 
959 static int
960 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
961 {
962 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
963 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
964 	uint32_t vfta;
965 	uint32_t vid_idx;
966 	uint32_t vid_bit;
967 
968 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
969 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
970 	vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
971 	if (on)
972 		vfta |= vid_bit;
973 	else
974 		vfta &= ~vid_bit;
975 	wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
976 
977 	/* update local VFTA copy */
978 	shadow_vfta->vfta[vid_idx] = vfta;
979 
980 	return 0;
981 }
982 
983 static void
984 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
985 {
986 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
987 	struct txgbe_rx_queue *rxq;
988 	bool restart;
989 	uint32_t rxcfg, rxbal, rxbah;
990 
991 	if (on)
992 		txgbe_vlan_hw_strip_enable(dev, queue);
993 	else
994 		txgbe_vlan_hw_strip_disable(dev, queue);
995 
996 	rxq = dev->data->rx_queues[queue];
997 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
998 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
999 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1000 	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
1001 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1002 			!(rxcfg & TXGBE_RXCFG_VLAN);
1003 		rxcfg |= TXGBE_RXCFG_VLAN;
1004 	} else {
1005 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1006 			(rxcfg & TXGBE_RXCFG_VLAN);
1007 		rxcfg &= ~TXGBE_RXCFG_VLAN;
1008 	}
1009 	rxcfg &= ~TXGBE_RXCFG_ENA;
1010 
1011 	if (restart) {
1012 		/* set vlan strip for ring */
1013 		txgbe_dev_rx_queue_stop(dev, queue);
1014 		wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
1015 		wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
1016 		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
1017 		txgbe_dev_rx_queue_start(dev, queue);
1018 	}
1019 }
1020 
1021 static int
1022 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1023 		    enum rte_vlan_type vlan_type,
1024 		    uint16_t tpid)
1025 {
1026 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1027 	int ret = 0;
1028 	uint32_t portctrl, vlan_ext, qinq;
1029 
1030 	portctrl = rd32(hw, TXGBE_PORTCTL);
1031 
1032 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
1033 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
1034 	switch (vlan_type) {
1035 	case ETH_VLAN_TYPE_INNER:
1036 		if (vlan_ext) {
1037 			wr32m(hw, TXGBE_VLANCTL,
1038 				TXGBE_VLANCTL_TPID_MASK,
1039 				TXGBE_VLANCTL_TPID(tpid));
1040 			wr32m(hw, TXGBE_DMATXCTRL,
1041 				TXGBE_DMATXCTRL_TPID_MASK,
1042 				TXGBE_DMATXCTRL_TPID(tpid));
1043 		} else {
1044 			ret = -ENOTSUP;
1045 			PMD_DRV_LOG(ERR, "Inner type is not supported"
1046 				    " by single VLAN");
1047 		}
1048 
1049 		if (qinq) {
1050 			wr32m(hw, TXGBE_TAGTPID(0),
1051 				TXGBE_TAGTPID_LSB_MASK,
1052 				TXGBE_TAGTPID_LSB(tpid));
1053 		}
1054 		break;
1055 	case ETH_VLAN_TYPE_OUTER:
1056 		if (vlan_ext) {
1057 			/* Only the high 16-bits is valid */
1058 			wr32m(hw, TXGBE_EXTAG,
1059 				TXGBE_EXTAG_VLAN_MASK,
1060 				TXGBE_EXTAG_VLAN(tpid));
1061 		} else {
1062 			wr32m(hw, TXGBE_VLANCTL,
1063 				TXGBE_VLANCTL_TPID_MASK,
1064 				TXGBE_VLANCTL_TPID(tpid));
1065 			wr32m(hw, TXGBE_DMATXCTRL,
1066 				TXGBE_DMATXCTRL_TPID_MASK,
1067 				TXGBE_DMATXCTRL_TPID(tpid));
1068 		}
1069 
1070 		if (qinq) {
1071 			wr32m(hw, TXGBE_TAGTPID(0),
1072 				TXGBE_TAGTPID_MSB_MASK,
1073 				TXGBE_TAGTPID_MSB(tpid));
1074 		}
1075 		break;
1076 	default:
1077 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1078 		return -EINVAL;
1079 	}
1080 
1081 	return ret;
1082 }
1083 
1084 void
1085 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1086 {
1087 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1088 	uint32_t vlnctrl;
1089 
1090 	PMD_INIT_FUNC_TRACE();
1091 
1092 	/* Filter Table Disable */
1093 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1094 	vlnctrl &= ~TXGBE_VLANCTL_VFE;
1095 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1096 }
1097 
1098 void
1099 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1100 {
1101 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1102 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1103 	uint32_t vlnctrl;
1104 	uint16_t i;
1105 
1106 	PMD_INIT_FUNC_TRACE();
1107 
1108 	/* Filter Table Enable */
1109 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1110 	vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1111 	vlnctrl |= TXGBE_VLANCTL_VFE;
1112 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1113 
1114 	/* write whatever is in local vfta copy */
1115 	for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1116 		wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1117 }
1118 
1119 void
1120 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1121 {
1122 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1123 	struct txgbe_rx_queue *rxq;
1124 
1125 	if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1126 		return;
1127 
1128 	if (on)
1129 		TXGBE_SET_HWSTRIP(hwstrip, queue);
1130 	else
1131 		TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1132 
1133 	if (queue >= dev->data->nb_rx_queues)
1134 		return;
1135 
1136 	rxq = dev->data->rx_queues[queue];
1137 
1138 	if (on) {
1139 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1140 		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1141 	} else {
1142 		rxq->vlan_flags = PKT_RX_VLAN;
1143 		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1144 	}
1145 }
1146 
1147 static void
1148 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1149 {
1150 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1151 	uint32_t ctrl;
1152 
1153 	PMD_INIT_FUNC_TRACE();
1154 
1155 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1156 	ctrl &= ~TXGBE_RXCFG_VLAN;
1157 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1158 
1159 	/* record those setting for HW strip per queue */
1160 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1161 }
1162 
1163 static void
1164 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1165 {
1166 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1167 	uint32_t ctrl;
1168 
1169 	PMD_INIT_FUNC_TRACE();
1170 
1171 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1172 	ctrl |= TXGBE_RXCFG_VLAN;
1173 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1174 
1175 	/* record those setting for HW strip per queue */
1176 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1177 }
1178 
1179 static void
1180 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1181 {
1182 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1183 	uint32_t ctrl;
1184 
1185 	PMD_INIT_FUNC_TRACE();
1186 
1187 	ctrl = rd32(hw, TXGBE_PORTCTL);
1188 	ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1189 	wr32(hw, TXGBE_PORTCTL, ctrl);
1190 }
1191 
1192 static void
1193 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1194 {
1195 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1196 	uint32_t ctrl;
1197 
1198 	PMD_INIT_FUNC_TRACE();
1199 
1200 	ctrl  = rd32(hw, TXGBE_PORTCTL);
1201 	ctrl |= TXGBE_PORTCTL_VLANEXT;
1202 	wr32(hw, TXGBE_PORTCTL, ctrl);
1203 }
1204 
1205 static void
1206 txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
1207 {
1208 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1209 	uint32_t ctrl;
1210 
1211 	PMD_INIT_FUNC_TRACE();
1212 
1213 	ctrl = rd32(hw, TXGBE_PORTCTL);
1214 	ctrl &= ~TXGBE_PORTCTL_QINQ;
1215 	wr32(hw, TXGBE_PORTCTL, ctrl);
1216 }
1217 
1218 static void
1219 txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
1220 {
1221 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1222 	uint32_t ctrl;
1223 
1224 	PMD_INIT_FUNC_TRACE();
1225 
1226 	ctrl  = rd32(hw, TXGBE_PORTCTL);
1227 	ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
1228 	wr32(hw, TXGBE_PORTCTL, ctrl);
1229 }
1230 
1231 void
1232 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1233 {
1234 	struct txgbe_rx_queue *rxq;
1235 	uint16_t i;
1236 
1237 	PMD_INIT_FUNC_TRACE();
1238 
1239 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1240 		rxq = dev->data->rx_queues[i];
1241 
1242 		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1243 			txgbe_vlan_strip_queue_set(dev, i, 1);
1244 		else
1245 			txgbe_vlan_strip_queue_set(dev, i, 0);
1246 	}
1247 }
1248 
1249 void
1250 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1251 {
1252 	uint16_t i;
1253 	struct rte_eth_rxmode *rxmode;
1254 	struct txgbe_rx_queue *rxq;
1255 
1256 	if (mask & ETH_VLAN_STRIP_MASK) {
1257 		rxmode = &dev->data->dev_conf.rxmode;
1258 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1259 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1260 				rxq = dev->data->rx_queues[i];
1261 				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1262 			}
1263 		else
1264 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1265 				rxq = dev->data->rx_queues[i];
1266 				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1267 			}
1268 	}
1269 }
1270 
1271 static int
1272 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1273 {
1274 	struct rte_eth_rxmode *rxmode;
1275 	rxmode = &dev->data->dev_conf.rxmode;
1276 
1277 	if (mask & ETH_VLAN_STRIP_MASK)
1278 		txgbe_vlan_hw_strip_config(dev);
1279 
1280 	if (mask & ETH_VLAN_FILTER_MASK) {
1281 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1282 			txgbe_vlan_hw_filter_enable(dev);
1283 		else
1284 			txgbe_vlan_hw_filter_disable(dev);
1285 	}
1286 
1287 	if (mask & ETH_VLAN_EXTEND_MASK) {
1288 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1289 			txgbe_vlan_hw_extend_enable(dev);
1290 		else
1291 			txgbe_vlan_hw_extend_disable(dev);
1292 	}
1293 
1294 	if (mask & ETH_QINQ_STRIP_MASK) {
1295 		if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
1296 			txgbe_qinq_hw_strip_enable(dev);
1297 		else
1298 			txgbe_qinq_hw_strip_disable(dev);
1299 	}
1300 
1301 	return 0;
1302 }
1303 
1304 static int
1305 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1306 {
1307 	txgbe_config_vlan_strip_on_all_queues(dev, mask);
1308 
1309 	txgbe_vlan_offload_config(dev, mask);
1310 
1311 	return 0;
1312 }
1313 
1314 static void
1315 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1316 {
1317 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1318 	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
1319 	uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1320 
1321 	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1322 	wr32(hw, TXGBE_VLANCTL, vlanctrl);
1323 }
1324 
1325 static int
1326 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1327 {
1328 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1329 
1330 	switch (nb_rx_q) {
1331 	case 1:
1332 	case 2:
1333 		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1334 		break;
1335 	case 4:
1336 		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1337 		break;
1338 	default:
1339 		return -EINVAL;
1340 	}
1341 
1342 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1343 		TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1344 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1345 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1346 	return 0;
1347 }
1348 
1349 static int
1350 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1351 {
1352 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1353 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
1354 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
1355 
1356 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1357 		/* check multi-queue mode */
1358 		switch (dev_conf->rxmode.mq_mode) {
1359 		case ETH_MQ_RX_VMDQ_DCB:
1360 			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1361 			break;
1362 		case ETH_MQ_RX_VMDQ_DCB_RSS:
1363 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1364 			PMD_INIT_LOG(ERR, "SRIOV active,"
1365 					" unsupported mq_mode rx %d.",
1366 					dev_conf->rxmode.mq_mode);
1367 			return -EINVAL;
1368 		case ETH_MQ_RX_RSS:
1369 		case ETH_MQ_RX_VMDQ_RSS:
1370 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1371 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1372 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1373 					PMD_INIT_LOG(ERR, "SRIOV is active,"
1374 						" invalid queue number"
1375 						" for VMDQ RSS, allowed"
1376 						" value are 1, 2 or 4.");
1377 					return -EINVAL;
1378 				}
1379 			break;
1380 		case ETH_MQ_RX_VMDQ_ONLY:
1381 		case ETH_MQ_RX_NONE:
1382 			/* if nothing mq mode configure, use default scheme */
1383 			dev->data->dev_conf.rxmode.mq_mode =
1384 				ETH_MQ_RX_VMDQ_ONLY;
1385 			break;
1386 		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1387 			/* SRIOV only works in VMDq enable mode */
1388 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1389 					" wrong mq_mode rx %d.",
1390 					dev_conf->rxmode.mq_mode);
1391 			return -EINVAL;
1392 		}
1393 
1394 		switch (dev_conf->txmode.mq_mode) {
1395 		case ETH_MQ_TX_VMDQ_DCB:
1396 			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1397 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1398 			break;
1399 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1400 			dev->data->dev_conf.txmode.mq_mode =
1401 				ETH_MQ_TX_VMDQ_ONLY;
1402 			break;
1403 		}
1404 
1405 		/* check valid queue number */
1406 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1407 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1408 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1409 					" nb_rx_q=%d nb_tx_q=%d queue number"
1410 					" must be less than or equal to %d.",
1411 					nb_rx_q, nb_tx_q,
1412 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1413 			return -EINVAL;
1414 		}
1415 	} else {
1416 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1417 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1418 					  " not supported.");
1419 			return -EINVAL;
1420 		}
1421 		/* check configuration for vmdb+dcb mode */
1422 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1423 			const struct rte_eth_vmdq_dcb_conf *conf;
1424 
1425 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1426 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1427 						TXGBE_VMDQ_DCB_NB_QUEUES);
1428 				return -EINVAL;
1429 			}
1430 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1431 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1432 			       conf->nb_queue_pools == ETH_32_POOLS)) {
1433 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1434 						" nb_queue_pools must be %d or %d.",
1435 						ETH_16_POOLS, ETH_32_POOLS);
1436 				return -EINVAL;
1437 			}
1438 		}
1439 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1440 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
1441 
1442 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1443 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1444 						 TXGBE_VMDQ_DCB_NB_QUEUES);
1445 				return -EINVAL;
1446 			}
1447 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1448 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1449 			       conf->nb_queue_pools == ETH_32_POOLS)) {
1450 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1451 						" nb_queue_pools != %d and"
1452 						" nb_queue_pools != %d.",
1453 						ETH_16_POOLS, ETH_32_POOLS);
1454 				return -EINVAL;
1455 			}
1456 		}
1457 
1458 		/* For DCB mode check our configuration before we go further */
1459 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1460 			const struct rte_eth_dcb_rx_conf *conf;
1461 
1462 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1463 			if (!(conf->nb_tcs == ETH_4_TCS ||
1464 			       conf->nb_tcs == ETH_8_TCS)) {
1465 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1466 						" and nb_tcs != %d.",
1467 						ETH_4_TCS, ETH_8_TCS);
1468 				return -EINVAL;
1469 			}
1470 		}
1471 
1472 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1473 			const struct rte_eth_dcb_tx_conf *conf;
1474 
1475 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1476 			if (!(conf->nb_tcs == ETH_4_TCS ||
1477 			       conf->nb_tcs == ETH_8_TCS)) {
1478 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1479 						" and nb_tcs != %d.",
1480 						ETH_4_TCS, ETH_8_TCS);
1481 				return -EINVAL;
1482 			}
1483 		}
1484 	}
1485 	return 0;
1486 }
1487 
1488 static int
1489 txgbe_dev_configure(struct rte_eth_dev *dev)
1490 {
1491 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1492 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1493 	int ret;
1494 
1495 	PMD_INIT_FUNC_TRACE();
1496 
1497 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1498 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1499 
1500 	/* multiple queue mode checking */
1501 	ret  = txgbe_check_mq_mode(dev);
1502 	if (ret != 0) {
1503 		PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1504 			    ret);
1505 		return ret;
1506 	}
1507 
1508 	/* set flag to update link status after init */
1509 	intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1510 
1511 	/*
1512 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1513 	 * allocation Rx preconditions we will reset it.
1514 	 */
1515 	adapter->rx_bulk_alloc_allowed = true;
1516 
1517 	return 0;
1518 }
1519 
1520 static void
1521 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1522 {
1523 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1524 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1525 	uint32_t gpie;
1526 
1527 	gpie = rd32(hw, TXGBE_GPIOINTEN);
1528 	gpie |= TXGBE_GPIOBIT_6;
1529 	wr32(hw, TXGBE_GPIOINTEN, gpie);
1530 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1531 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
1532 }
1533 
1534 int
1535 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1536 			uint16_t tx_rate, uint64_t q_msk)
1537 {
1538 	struct txgbe_hw *hw;
1539 	struct txgbe_vf_info *vfinfo;
1540 	struct rte_eth_link link;
1541 	uint8_t  nb_q_per_pool;
1542 	uint32_t queue_stride;
1543 	uint32_t queue_idx, idx = 0, vf_idx;
1544 	uint32_t queue_end;
1545 	uint16_t total_rate = 0;
1546 	struct rte_pci_device *pci_dev;
1547 	int ret;
1548 
1549 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1550 	ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1551 	if (ret < 0)
1552 		return ret;
1553 
1554 	if (vf >= pci_dev->max_vfs)
1555 		return -EINVAL;
1556 
1557 	if (tx_rate > link.link_speed)
1558 		return -EINVAL;
1559 
1560 	if (q_msk == 0)
1561 		return 0;
1562 
1563 	hw = TXGBE_DEV_HW(dev);
1564 	vfinfo = *(TXGBE_DEV_VFDATA(dev));
1565 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1566 	queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1567 	queue_idx = vf * queue_stride;
1568 	queue_end = queue_idx + nb_q_per_pool - 1;
1569 	if (queue_end >= hw->mac.max_tx_queues)
1570 		return -EINVAL;
1571 
1572 	if (vfinfo) {
1573 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1574 			if (vf_idx == vf)
1575 				continue;
1576 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1577 				idx++)
1578 				total_rate += vfinfo[vf_idx].tx_rate[idx];
1579 		}
1580 	} else {
1581 		return -EINVAL;
1582 	}
1583 
1584 	/* Store tx_rate for this vf. */
1585 	for (idx = 0; idx < nb_q_per_pool; idx++) {
1586 		if (((uint64_t)0x1 << idx) & q_msk) {
1587 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
1588 				vfinfo[vf].tx_rate[idx] = tx_rate;
1589 			total_rate += tx_rate;
1590 		}
1591 	}
1592 
1593 	if (total_rate > dev->data->dev_link.link_speed) {
1594 		/* Reset stored TX rate of the VF if it causes exceed
1595 		 * link speed.
1596 		 */
1597 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1598 		return -EINVAL;
1599 	}
1600 
1601 	/* Set ARBTXRATE of each queue/pool for vf X  */
1602 	for (; queue_idx <= queue_end; queue_idx++) {
1603 		if (0x1 & q_msk)
1604 			txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1605 		q_msk = q_msk >> 1;
1606 	}
1607 
1608 	return 0;
1609 }
1610 
1611 /*
1612  * Configure device link speed and setup link.
1613  * It returns 0 on success.
1614  */
1615 static int
1616 txgbe_dev_start(struct rte_eth_dev *dev)
1617 {
1618 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1619 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1620 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1621 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1622 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1623 	uint32_t intr_vector = 0;
1624 	int err;
1625 	bool link_up = false, negotiate = 0;
1626 	uint32_t speed = 0;
1627 	uint32_t allowed_speeds = 0;
1628 	int mask = 0;
1629 	int status;
1630 	uint16_t vf, idx;
1631 	uint32_t *link_speeds;
1632 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1633 
1634 	PMD_INIT_FUNC_TRACE();
1635 
1636 	/* TXGBE devices don't support:
1637 	 *    - half duplex (checked afterwards for valid speeds)
1638 	 *    - fixed speed: TODO implement
1639 	 */
1640 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1641 		PMD_INIT_LOG(ERR,
1642 		"Invalid link_speeds for port %u, fix speed not supported",
1643 				dev->data->port_id);
1644 		return -EINVAL;
1645 	}
1646 
1647 	/* Stop the link setup handler before resetting the HW. */
1648 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1649 
1650 	/* disable uio/vfio intr/eventfd mapping */
1651 	rte_intr_disable(intr_handle);
1652 
1653 	/* stop adapter */
1654 	hw->adapter_stopped = 0;
1655 	txgbe_stop_hw(hw);
1656 
1657 	/* reinitialize adapter
1658 	 * this calls reset and start
1659 	 */
1660 	hw->nb_rx_queues = dev->data->nb_rx_queues;
1661 	hw->nb_tx_queues = dev->data->nb_tx_queues;
1662 	status = txgbe_pf_reset_hw(hw);
1663 	if (status != 0)
1664 		return -1;
1665 	hw->mac.start_hw(hw);
1666 	hw->mac.get_link_status = true;
1667 	hw->dev_start = true;
1668 
1669 	/* configure PF module if SRIOV enabled */
1670 	txgbe_pf_host_configure(dev);
1671 
1672 	txgbe_dev_phy_intr_setup(dev);
1673 
1674 	/* check and configure queue intr-vector mapping */
1675 	if ((rte_intr_cap_multiple(intr_handle) ||
1676 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1677 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1678 		intr_vector = dev->data->nb_rx_queues;
1679 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1680 			return -1;
1681 	}
1682 
1683 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1684 		intr_handle->intr_vec =
1685 			rte_zmalloc("intr_vec",
1686 				    dev->data->nb_rx_queues * sizeof(int), 0);
1687 		if (intr_handle->intr_vec == NULL) {
1688 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1689 				     " intr_vec", dev->data->nb_rx_queues);
1690 			return -ENOMEM;
1691 		}
1692 	}
1693 
1694 	/* confiugre msix for sleep until rx interrupt */
1695 	txgbe_configure_msix(dev);
1696 
1697 	/* initialize transmission unit */
1698 	txgbe_dev_tx_init(dev);
1699 
1700 	/* This can fail when allocating mbufs for descriptor rings */
1701 	err = txgbe_dev_rx_init(dev);
1702 	if (err) {
1703 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1704 		goto error;
1705 	}
1706 
1707 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1708 		ETH_VLAN_EXTEND_MASK;
1709 	err = txgbe_vlan_offload_config(dev, mask);
1710 	if (err) {
1711 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1712 		goto error;
1713 	}
1714 
1715 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1716 		/* Enable vlan filtering for VMDq */
1717 		txgbe_vmdq_vlan_hw_filter_enable(dev);
1718 	}
1719 
1720 	/* Configure DCB hw */
1721 	txgbe_configure_pb(dev);
1722 	txgbe_configure_port(dev);
1723 	txgbe_configure_dcb(dev);
1724 
1725 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1726 		err = txgbe_fdir_configure(dev);
1727 		if (err)
1728 			goto error;
1729 	}
1730 
1731 	/* Restore vf rate limit */
1732 	if (vfinfo != NULL) {
1733 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
1734 			for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1735 				if (vfinfo[vf].tx_rate[idx] != 0)
1736 					txgbe_set_vf_rate_limit(dev, vf,
1737 						vfinfo[vf].tx_rate[idx],
1738 						1 << idx);
1739 	}
1740 
1741 	err = txgbe_dev_rxtx_start(dev);
1742 	if (err < 0) {
1743 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1744 		goto error;
1745 	}
1746 
1747 	/* Skip link setup if loopback mode is enabled. */
1748 	if (hw->mac.type == txgbe_mac_raptor &&
1749 	    dev->data->dev_conf.lpbk_mode)
1750 		goto skip_link_setup;
1751 
1752 	if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1753 		err = hw->mac.setup_sfp(hw);
1754 		if (err)
1755 			goto error;
1756 	}
1757 
1758 	if (hw->phy.media_type == txgbe_media_type_copper) {
1759 		/* Turn on the copper */
1760 		hw->phy.set_phy_power(hw, true);
1761 	} else {
1762 		/* Turn on the laser */
1763 		hw->mac.enable_tx_laser(hw);
1764 	}
1765 
1766 	if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
1767 		err = hw->mac.check_link(hw, &speed, &link_up, 0);
1768 	if (err)
1769 		goto error;
1770 	dev->data->dev_link.link_status = link_up;
1771 
1772 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1773 	if (err)
1774 		goto error;
1775 
1776 	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1777 			ETH_LINK_SPEED_10G;
1778 
1779 	link_speeds = &dev->data->dev_conf.link_speeds;
1780 	if (*link_speeds & ~allowed_speeds) {
1781 		PMD_INIT_LOG(ERR, "Invalid link setting");
1782 		goto error;
1783 	}
1784 
1785 	speed = 0x0;
1786 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1787 		speed = (TXGBE_LINK_SPEED_100M_FULL |
1788 			 TXGBE_LINK_SPEED_1GB_FULL |
1789 			 TXGBE_LINK_SPEED_10GB_FULL);
1790 	} else {
1791 		if (*link_speeds & ETH_LINK_SPEED_10G)
1792 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
1793 		if (*link_speeds & ETH_LINK_SPEED_5G)
1794 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
1795 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
1796 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1797 		if (*link_speeds & ETH_LINK_SPEED_1G)
1798 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
1799 		if (*link_speeds & ETH_LINK_SPEED_100M)
1800 			speed |= TXGBE_LINK_SPEED_100M_FULL;
1801 	}
1802 
1803 	err = hw->mac.setup_link(hw, speed, link_up);
1804 	if (err)
1805 		goto error;
1806 
1807 skip_link_setup:
1808 
1809 	if (rte_intr_allow_others(intr_handle)) {
1810 		txgbe_dev_misc_interrupt_setup(dev);
1811 		/* check if lsc interrupt is enabled */
1812 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1813 			txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1814 		else
1815 			txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1816 		txgbe_dev_macsec_interrupt_setup(dev);
1817 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1818 	} else {
1819 		rte_intr_callback_unregister(intr_handle,
1820 					     txgbe_dev_interrupt_handler, dev);
1821 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1822 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1823 				     " no intr multiplex");
1824 	}
1825 
1826 	/* check if rxq interrupt is enabled */
1827 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1828 	    rte_intr_dp_is_en(intr_handle))
1829 		txgbe_dev_rxq_interrupt_setup(dev);
1830 
1831 	/* enable uio/vfio intr/eventfd mapping */
1832 	rte_intr_enable(intr_handle);
1833 
1834 	/* resume enabled intr since hw reset */
1835 	txgbe_enable_intr(dev);
1836 	txgbe_l2_tunnel_conf(dev);
1837 	txgbe_filter_restore(dev);
1838 
1839 	if (tm_conf->root && !tm_conf->committed)
1840 		PMD_DRV_LOG(WARNING,
1841 			    "please call hierarchy_commit() "
1842 			    "before starting the port");
1843 
1844 	/*
1845 	 * Update link status right before return, because it may
1846 	 * start link configuration process in a separate thread.
1847 	 */
1848 	txgbe_dev_link_update(dev, 0);
1849 
1850 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1851 
1852 	txgbe_read_stats_registers(hw, hw_stats);
1853 	hw->offset_loaded = 1;
1854 
1855 	return 0;
1856 
1857 error:
1858 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1859 	txgbe_dev_clear_queues(dev);
1860 	return -EIO;
1861 }
1862 
1863 /*
1864  * Stop device: disable rx and tx functions to allow for reconfiguring.
1865  */
1866 static int
1867 txgbe_dev_stop(struct rte_eth_dev *dev)
1868 {
1869 	struct rte_eth_link link;
1870 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1871 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1872 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1873 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1874 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1875 	int vf;
1876 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1877 
1878 	if (hw->adapter_stopped)
1879 		return 0;
1880 
1881 	PMD_INIT_FUNC_TRACE();
1882 
1883 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1884 
1885 	/* disable interrupts */
1886 	txgbe_disable_intr(hw);
1887 
1888 	/* reset the NIC */
1889 	txgbe_pf_reset_hw(hw);
1890 	hw->adapter_stopped = 0;
1891 
1892 	/* stop adapter */
1893 	txgbe_stop_hw(hw);
1894 
1895 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1896 		vfinfo[vf].clear_to_send = false;
1897 
1898 	if (hw->phy.media_type == txgbe_media_type_copper) {
1899 		/* Turn off the copper */
1900 		hw->phy.set_phy_power(hw, false);
1901 	} else {
1902 		/* Turn off the laser */
1903 		hw->mac.disable_tx_laser(hw);
1904 	}
1905 
1906 	txgbe_dev_clear_queues(dev);
1907 
1908 	/* Clear stored conf */
1909 	dev->data->scattered_rx = 0;
1910 	dev->data->lro = 0;
1911 
1912 	/* Clear recorded link status */
1913 	memset(&link, 0, sizeof(link));
1914 	rte_eth_linkstatus_set(dev, &link);
1915 
1916 	if (!rte_intr_allow_others(intr_handle))
1917 		/* resume to the default handler */
1918 		rte_intr_callback_register(intr_handle,
1919 					   txgbe_dev_interrupt_handler,
1920 					   (void *)dev);
1921 
1922 	/* Clean datapath event and queue/vec mapping */
1923 	rte_intr_efd_disable(intr_handle);
1924 	if (intr_handle->intr_vec != NULL) {
1925 		rte_free(intr_handle->intr_vec);
1926 		intr_handle->intr_vec = NULL;
1927 	}
1928 
1929 	/* reset hierarchy commit */
1930 	tm_conf->committed = false;
1931 
1932 	adapter->rss_reta_updated = 0;
1933 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1934 
1935 	hw->adapter_stopped = true;
1936 	dev->data->dev_started = 0;
1937 	hw->dev_start = false;
1938 
1939 	return 0;
1940 }
1941 
1942 /*
1943  * Set device link up: enable tx.
1944  */
1945 static int
1946 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1947 {
1948 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1949 
1950 	if (hw->phy.media_type == txgbe_media_type_copper) {
1951 		/* Turn on the copper */
1952 		hw->phy.set_phy_power(hw, true);
1953 	} else {
1954 		/* Turn on the laser */
1955 		hw->mac.enable_tx_laser(hw);
1956 		txgbe_dev_link_update(dev, 0);
1957 	}
1958 
1959 	return 0;
1960 }
1961 
1962 /*
1963  * Set device link down: disable tx.
1964  */
1965 static int
1966 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1967 {
1968 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1969 
1970 	if (hw->phy.media_type == txgbe_media_type_copper) {
1971 		/* Turn off the copper */
1972 		hw->phy.set_phy_power(hw, false);
1973 	} else {
1974 		/* Turn off the laser */
1975 		hw->mac.disable_tx_laser(hw);
1976 		txgbe_dev_link_update(dev, 0);
1977 	}
1978 
1979 	return 0;
1980 }
1981 
1982 /*
1983  * Reset and stop device.
1984  */
1985 static int
1986 txgbe_dev_close(struct rte_eth_dev *dev)
1987 {
1988 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1989 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1990 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1991 	int retries = 0;
1992 	int ret;
1993 
1994 	PMD_INIT_FUNC_TRACE();
1995 
1996 	txgbe_pf_reset_hw(hw);
1997 
1998 	ret = txgbe_dev_stop(dev);
1999 
2000 	txgbe_dev_free_queues(dev);
2001 
2002 	/* reprogram the RAR[0] in case user changed it. */
2003 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
2004 
2005 	/* Unlock any pending hardware semaphore */
2006 	txgbe_swfw_lock_reset(hw);
2007 
2008 	/* disable uio intr before callback unregister */
2009 	rte_intr_disable(intr_handle);
2010 
2011 	do {
2012 		ret = rte_intr_callback_unregister(intr_handle,
2013 				txgbe_dev_interrupt_handler, dev);
2014 		if (ret >= 0 || ret == -ENOENT) {
2015 			break;
2016 		} else if (ret != -EAGAIN) {
2017 			PMD_INIT_LOG(ERR,
2018 				"intr callback unregister failed: %d",
2019 				ret);
2020 		}
2021 		rte_delay_ms(100);
2022 	} while (retries++ < (10 + TXGBE_LINK_UP_TIME));
2023 
2024 	/* cancel the delay handler before remove dev */
2025 	rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
2026 
2027 	/* uninitialize PF if max_vfs not zero */
2028 	txgbe_pf_host_uninit(dev);
2029 
2030 	rte_free(dev->data->mac_addrs);
2031 	dev->data->mac_addrs = NULL;
2032 
2033 	rte_free(dev->data->hash_mac_addrs);
2034 	dev->data->hash_mac_addrs = NULL;
2035 
2036 	/* remove all the fdir filters & hash */
2037 	txgbe_fdir_filter_uninit(dev);
2038 
2039 	/* remove all the L2 tunnel filters & hash */
2040 	txgbe_l2_tn_filter_uninit(dev);
2041 
2042 	/* Remove all ntuple filters of the device */
2043 	txgbe_ntuple_filter_uninit(dev);
2044 
2045 	/* clear all the filters list */
2046 	txgbe_filterlist_flush();
2047 
2048 	/* Remove all Traffic Manager configuration */
2049 	txgbe_tm_conf_uninit(dev);
2050 
2051 #ifdef RTE_LIB_SECURITY
2052 	rte_free(dev->security_ctx);
2053 #endif
2054 
2055 	return ret;
2056 }
2057 
2058 /*
2059  * Reset PF device.
2060  */
2061 static int
2062 txgbe_dev_reset(struct rte_eth_dev *dev)
2063 {
2064 	int ret;
2065 
2066 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2067 	 * its VF to make them align with it. The detailed notification
2068 	 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
2069 	 * To avoid unexpected behavior in VF, currently reset of PF with
2070 	 * SR-IOV activation is not supported. It might be supported later.
2071 	 */
2072 	if (dev->data->sriov.active)
2073 		return -ENOTSUP;
2074 
2075 	ret = eth_txgbe_dev_uninit(dev);
2076 	if (ret)
2077 		return ret;
2078 
2079 	ret = eth_txgbe_dev_init(dev, NULL);
2080 
2081 	return ret;
2082 }
2083 
2084 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
2085 	{                                                       \
2086 		uint32_t current_counter = rd32(hw, reg);       \
2087 		if (current_counter < last_counter)             \
2088 			current_counter += 0x100000000LL;       \
2089 		if (!hw->offset_loaded)                         \
2090 			last_counter = current_counter;         \
2091 		counter = current_counter - last_counter;       \
2092 		counter &= 0xFFFFFFFFLL;                        \
2093 	}
2094 
2095 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2096 	{                                                                \
2097 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
2098 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
2099 		uint64_t current_counter = (current_counter_msb << 32) | \
2100 			current_counter_lsb;                             \
2101 		if (current_counter < last_counter)                      \
2102 			current_counter += 0x1000000000LL;               \
2103 		if (!hw->offset_loaded)                                  \
2104 			last_counter = current_counter;                  \
2105 		counter = current_counter - last_counter;                \
2106 		counter &= 0xFFFFFFFFFLL;                                \
2107 	}
2108 
2109 void
2110 txgbe_read_stats_registers(struct txgbe_hw *hw,
2111 			   struct txgbe_hw_stats *hw_stats)
2112 {
2113 	unsigned int i;
2114 
2115 	/* QP Stats */
2116 	for (i = 0; i < hw->nb_rx_queues; i++) {
2117 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2118 			hw->qp_last[i].rx_qp_packets,
2119 			hw_stats->qp[i].rx_qp_packets);
2120 		UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2121 			hw->qp_last[i].rx_qp_bytes,
2122 			hw_stats->qp[i].rx_qp_bytes);
2123 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2124 			hw->qp_last[i].rx_qp_mc_packets,
2125 			hw_stats->qp[i].rx_qp_mc_packets);
2126 	}
2127 
2128 	for (i = 0; i < hw->nb_tx_queues; i++) {
2129 		UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2130 			hw->qp_last[i].tx_qp_packets,
2131 			hw_stats->qp[i].tx_qp_packets);
2132 		UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2133 			hw->qp_last[i].tx_qp_bytes,
2134 			hw_stats->qp[i].tx_qp_bytes);
2135 	}
2136 	/* PB Stats */
2137 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2138 		hw_stats->up[i].rx_up_xon_packets +=
2139 				rd32(hw, TXGBE_PBRXUPXON(i));
2140 		hw_stats->up[i].rx_up_xoff_packets +=
2141 				rd32(hw, TXGBE_PBRXUPXOFF(i));
2142 		hw_stats->up[i].tx_up_xon_packets +=
2143 				rd32(hw, TXGBE_PBTXUPXON(i));
2144 		hw_stats->up[i].tx_up_xoff_packets +=
2145 				rd32(hw, TXGBE_PBTXUPXOFF(i));
2146 		hw_stats->up[i].tx_up_xon2off_packets +=
2147 				rd32(hw, TXGBE_PBTXUPOFF(i));
2148 		hw_stats->up[i].rx_up_dropped +=
2149 				rd32(hw, TXGBE_PBRXMISS(i));
2150 	}
2151 	hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2152 	hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2153 	hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2154 	hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2155 
2156 	/* DMA Stats */
2157 	hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2158 	hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2159 
2160 	hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2161 	hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2162 	hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
2163 	hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2164 
2165 	/* MAC Stats */
2166 	hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2167 	hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2168 	hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2169 
2170 	hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2171 	hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2172 	hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2173 
2174 	hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2175 	hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2176 
2177 	hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2178 	hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2179 	hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2180 	hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2181 	hw_stats->rx_size_512_to_1023_packets +=
2182 			rd64(hw, TXGBE_MACRX512TO1023L);
2183 	hw_stats->rx_size_1024_to_max_packets +=
2184 			rd64(hw, TXGBE_MACRX1024TOMAXL);
2185 	hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2186 	hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2187 	hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2188 	hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2189 	hw_stats->tx_size_512_to_1023_packets +=
2190 			rd64(hw, TXGBE_MACTX512TO1023L);
2191 	hw_stats->tx_size_1024_to_max_packets +=
2192 			rd64(hw, TXGBE_MACTX1024TOMAXL);
2193 
2194 	hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2195 	hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2196 	hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2197 
2198 	/* MNG Stats */
2199 	hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2200 	hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2201 	hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2202 	hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2203 
2204 	/* FCoE Stats */
2205 	hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2206 	hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2207 	hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2208 	hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2209 	hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2210 	hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2211 	hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2212 
2213 	/* Flow Director Stats */
2214 	hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2215 	hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2216 	hw_stats->flow_director_added_filters +=
2217 		TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2218 	hw_stats->flow_director_removed_filters +=
2219 		TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2220 	hw_stats->flow_director_filter_add_errors +=
2221 		TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2222 	hw_stats->flow_director_filter_remove_errors +=
2223 		TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2224 
2225 	/* MACsec Stats */
2226 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2227 	hw_stats->tx_macsec_pkts_encrypted +=
2228 			rd32(hw, TXGBE_LSECTX_ENCPKT);
2229 	hw_stats->tx_macsec_pkts_protected +=
2230 			rd32(hw, TXGBE_LSECTX_PROTPKT);
2231 	hw_stats->tx_macsec_octets_encrypted +=
2232 			rd32(hw, TXGBE_LSECTX_ENCOCT);
2233 	hw_stats->tx_macsec_octets_protected +=
2234 			rd32(hw, TXGBE_LSECTX_PROTOCT);
2235 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2236 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2237 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2238 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2239 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2240 	hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2241 	hw_stats->rx_macsec_sc_pkts_unchecked +=
2242 			rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2243 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2244 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2245 	for (i = 0; i < 2; i++) {
2246 		hw_stats->rx_macsec_sa_pkts_ok +=
2247 			rd32(hw, TXGBE_LSECRX_OKPKT(i));
2248 		hw_stats->rx_macsec_sa_pkts_invalid +=
2249 			rd32(hw, TXGBE_LSECRX_INVPKT(i));
2250 		hw_stats->rx_macsec_sa_pkts_notvalid +=
2251 			rd32(hw, TXGBE_LSECRX_BADPKT(i));
2252 	}
2253 	hw_stats->rx_macsec_sa_pkts_unusedsa +=
2254 			rd32(hw, TXGBE_LSECRX_INVSAPKT);
2255 	hw_stats->rx_macsec_sa_pkts_notusingsa +=
2256 			rd32(hw, TXGBE_LSECRX_BADSAPKT);
2257 
2258 	hw_stats->rx_total_missed_packets = 0;
2259 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2260 		hw_stats->rx_total_missed_packets +=
2261 			hw_stats->up[i].rx_up_dropped;
2262 	}
2263 }
2264 
2265 static int
2266 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2267 {
2268 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2269 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2270 	struct txgbe_stat_mappings *stat_mappings =
2271 			TXGBE_DEV_STAT_MAPPINGS(dev);
2272 	uint32_t i, j;
2273 
2274 	txgbe_read_stats_registers(hw, hw_stats);
2275 
2276 	if (stats == NULL)
2277 		return -EINVAL;
2278 
2279 	/* Fill out the rte_eth_stats statistics structure */
2280 	stats->ipackets = hw_stats->rx_packets;
2281 	stats->ibytes = hw_stats->rx_bytes;
2282 	stats->opackets = hw_stats->tx_packets;
2283 	stats->obytes = hw_stats->tx_bytes;
2284 
2285 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2286 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2287 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2288 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2289 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2290 	for (i = 0; i < TXGBE_MAX_QP; i++) {
2291 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2292 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2293 		uint32_t q_map;
2294 
2295 		q_map = (stat_mappings->rqsm[n] >> offset)
2296 				& QMAP_FIELD_RESERVED_BITS_MASK;
2297 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2298 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2299 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2300 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2301 
2302 		q_map = (stat_mappings->tqsm[n] >> offset)
2303 				& QMAP_FIELD_RESERVED_BITS_MASK;
2304 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2305 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2306 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2307 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2308 	}
2309 
2310 	/* Rx Errors */
2311 	stats->imissed  = hw_stats->rx_total_missed_packets +
2312 			  hw_stats->rx_dma_drop;
2313 	stats->ierrors  = hw_stats->rx_crc_errors +
2314 			  hw_stats->rx_mac_short_packet_dropped +
2315 			  hw_stats->rx_length_errors +
2316 			  hw_stats->rx_undersize_errors +
2317 			  hw_stats->rx_oversize_errors +
2318 			  hw_stats->rx_drop_packets +
2319 			  hw_stats->rx_illegal_byte_errors +
2320 			  hw_stats->rx_error_bytes +
2321 			  hw_stats->rx_fragment_errors +
2322 			  hw_stats->rx_fcoe_crc_errors +
2323 			  hw_stats->rx_fcoe_mbuf_allocation_errors;
2324 
2325 	/* Tx Errors */
2326 	stats->oerrors  = 0;
2327 	return 0;
2328 }
2329 
2330 static int
2331 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2332 {
2333 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2334 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2335 
2336 	/* HW registers are cleared on read */
2337 	hw->offset_loaded = 0;
2338 	txgbe_dev_stats_get(dev, NULL);
2339 	hw->offset_loaded = 1;
2340 
2341 	/* Reset software totals */
2342 	memset(hw_stats, 0, sizeof(*hw_stats));
2343 
2344 	return 0;
2345 }
2346 
2347 /* This function calculates the number of xstats based on the current config */
2348 static unsigned
2349 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2350 {
2351 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2352 	return TXGBE_NB_HW_STATS +
2353 	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2354 	       TXGBE_NB_QP_STATS * nb_queues;
2355 }
2356 
2357 static inline int
2358 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2359 {
2360 	int nb, st;
2361 
2362 	/* Extended stats from txgbe_hw_stats */
2363 	if (id < TXGBE_NB_HW_STATS) {
2364 		snprintf(name, size, "[hw]%s",
2365 			rte_txgbe_stats_strings[id].name);
2366 		return 0;
2367 	}
2368 	id -= TXGBE_NB_HW_STATS;
2369 
2370 	/* Priority Stats */
2371 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2372 		nb = id / TXGBE_NB_UP_STATS;
2373 		st = id % TXGBE_NB_UP_STATS;
2374 		snprintf(name, size, "[p%u]%s", nb,
2375 			rte_txgbe_up_strings[st].name);
2376 		return 0;
2377 	}
2378 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2379 
2380 	/* Queue Stats */
2381 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2382 		nb = id / TXGBE_NB_QP_STATS;
2383 		st = id % TXGBE_NB_QP_STATS;
2384 		snprintf(name, size, "[q%u]%s", nb,
2385 			rte_txgbe_qp_strings[st].name);
2386 		return 0;
2387 	}
2388 	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2389 
2390 	return -(int)(id + 1);
2391 }
2392 
2393 static inline int
2394 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2395 {
2396 	int nb, st;
2397 
2398 	/* Extended stats from txgbe_hw_stats */
2399 	if (id < TXGBE_NB_HW_STATS) {
2400 		*offset = rte_txgbe_stats_strings[id].offset;
2401 		return 0;
2402 	}
2403 	id -= TXGBE_NB_HW_STATS;
2404 
2405 	/* Priority Stats */
2406 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2407 		nb = id / TXGBE_NB_UP_STATS;
2408 		st = id % TXGBE_NB_UP_STATS;
2409 		*offset = rte_txgbe_up_strings[st].offset +
2410 			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2411 		return 0;
2412 	}
2413 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2414 
2415 	/* Queue Stats */
2416 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2417 		nb = id / TXGBE_NB_QP_STATS;
2418 		st = id % TXGBE_NB_QP_STATS;
2419 		*offset = rte_txgbe_qp_strings[st].offset +
2420 			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2421 		return 0;
2422 	}
2423 
2424 	return -1;
2425 }
2426 
2427 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2428 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2429 {
2430 	unsigned int i, count;
2431 
2432 	count = txgbe_xstats_calc_num(dev);
2433 	if (xstats_names == NULL)
2434 		return count;
2435 
2436 	/* Note: limit >= cnt_stats checked upstream
2437 	 * in rte_eth_xstats_names()
2438 	 */
2439 	limit = min(limit, count);
2440 
2441 	/* Extended stats from txgbe_hw_stats */
2442 	for (i = 0; i < limit; i++) {
2443 		if (txgbe_get_name_by_id(i, xstats_names[i].name,
2444 			sizeof(xstats_names[i].name))) {
2445 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2446 			break;
2447 		}
2448 	}
2449 
2450 	return i;
2451 }
2452 
2453 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2454 	struct rte_eth_xstat_name *xstats_names,
2455 	const uint64_t *ids,
2456 	unsigned int limit)
2457 {
2458 	unsigned int i;
2459 
2460 	if (ids == NULL)
2461 		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2462 
2463 	for (i = 0; i < limit; i++) {
2464 		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2465 				sizeof(xstats_names[i].name))) {
2466 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2467 			return -1;
2468 		}
2469 	}
2470 
2471 	return i;
2472 }
2473 
2474 static int
2475 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2476 					 unsigned int limit)
2477 {
2478 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2479 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2480 	unsigned int i, count;
2481 
2482 	txgbe_read_stats_registers(hw, hw_stats);
2483 
2484 	/* If this is a reset xstats is NULL, and we have cleared the
2485 	 * registers by reading them.
2486 	 */
2487 	count = txgbe_xstats_calc_num(dev);
2488 	if (xstats == NULL)
2489 		return count;
2490 
2491 	limit = min(limit, txgbe_xstats_calc_num(dev));
2492 
2493 	/* Extended stats from txgbe_hw_stats */
2494 	for (i = 0; i < limit; i++) {
2495 		uint32_t offset = 0;
2496 
2497 		if (txgbe_get_offset_by_id(i, &offset)) {
2498 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2499 			break;
2500 		}
2501 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2502 		xstats[i].id = i;
2503 	}
2504 
2505 	return i;
2506 }
2507 
2508 static int
2509 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2510 					 unsigned int limit)
2511 {
2512 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2513 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2514 	unsigned int i, count;
2515 
2516 	txgbe_read_stats_registers(hw, hw_stats);
2517 
2518 	/* If this is a reset xstats is NULL, and we have cleared the
2519 	 * registers by reading them.
2520 	 */
2521 	count = txgbe_xstats_calc_num(dev);
2522 	if (values == NULL)
2523 		return count;
2524 
2525 	limit = min(limit, txgbe_xstats_calc_num(dev));
2526 
2527 	/* Extended stats from txgbe_hw_stats */
2528 	for (i = 0; i < limit; i++) {
2529 		uint32_t offset;
2530 
2531 		if (txgbe_get_offset_by_id(i, &offset)) {
2532 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2533 			break;
2534 		}
2535 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2536 	}
2537 
2538 	return i;
2539 }
2540 
2541 static int
2542 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2543 		uint64_t *values, unsigned int limit)
2544 {
2545 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2546 	unsigned int i;
2547 
2548 	if (ids == NULL)
2549 		return txgbe_dev_xstats_get_(dev, values, limit);
2550 
2551 	for (i = 0; i < limit; i++) {
2552 		uint32_t offset;
2553 
2554 		if (txgbe_get_offset_by_id(ids[i], &offset)) {
2555 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2556 			break;
2557 		}
2558 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2559 	}
2560 
2561 	return i;
2562 }
2563 
2564 static int
2565 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2566 {
2567 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2568 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2569 
2570 	/* HW registers are cleared on read */
2571 	hw->offset_loaded = 0;
2572 	txgbe_read_stats_registers(hw, hw_stats);
2573 	hw->offset_loaded = 1;
2574 
2575 	/* Reset software totals */
2576 	memset(hw_stats, 0, sizeof(*hw_stats));
2577 
2578 	return 0;
2579 }
2580 
2581 static int
2582 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2583 {
2584 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2585 	u32 etrack_id;
2586 	int ret;
2587 
2588 	hw->phy.get_fw_version(hw, &etrack_id);
2589 
2590 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2591 	if (ret < 0)
2592 		return -EINVAL;
2593 
2594 	ret += 1; /* add the size of '\0' */
2595 	if (fw_size < (size_t)ret)
2596 		return ret;
2597 	else
2598 		return 0;
2599 }
2600 
2601 static int
2602 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2603 {
2604 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2605 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2606 
2607 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2608 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2609 	dev_info->min_rx_bufsize = 1024;
2610 	dev_info->max_rx_pktlen = 15872;
2611 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2612 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2613 	dev_info->max_vfs = pci_dev->max_vfs;
2614 	dev_info->max_vmdq_pools = ETH_64_POOLS;
2615 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2616 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2617 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2618 				     dev_info->rx_queue_offload_capa);
2619 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2620 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2621 
2622 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2623 		.rx_thresh = {
2624 			.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2625 			.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2626 			.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2627 		},
2628 		.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2629 		.rx_drop_en = 0,
2630 		.offloads = 0,
2631 	};
2632 
2633 	dev_info->default_txconf = (struct rte_eth_txconf) {
2634 		.tx_thresh = {
2635 			.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2636 			.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2637 			.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2638 		},
2639 		.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2640 		.offloads = 0,
2641 	};
2642 
2643 	dev_info->rx_desc_lim = rx_desc_lim;
2644 	dev_info->tx_desc_lim = tx_desc_lim;
2645 
2646 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2647 	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2648 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2649 
2650 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2651 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2652 
2653 	/* Driver-preferred Rx/Tx parameters */
2654 	dev_info->default_rxportconf.burst_size = 32;
2655 	dev_info->default_txportconf.burst_size = 32;
2656 	dev_info->default_rxportconf.nb_queues = 1;
2657 	dev_info->default_txportconf.nb_queues = 1;
2658 	dev_info->default_rxportconf.ring_size = 256;
2659 	dev_info->default_txportconf.ring_size = 256;
2660 
2661 	return 0;
2662 }
2663 
2664 const uint32_t *
2665 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2666 {
2667 	if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2668 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2669 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2670 	    dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2671 		return txgbe_get_supported_ptypes();
2672 
2673 	return NULL;
2674 }
2675 
2676 void
2677 txgbe_dev_setup_link_alarm_handler(void *param)
2678 {
2679 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2680 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2681 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2682 	u32 speed;
2683 	bool autoneg = false;
2684 
2685 	speed = hw->phy.autoneg_advertised;
2686 	if (!speed)
2687 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2688 
2689 	hw->mac.setup_link(hw, speed, true);
2690 
2691 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2692 }
2693 
2694 /* return 0 means link status changed, -1 means not changed */
2695 int
2696 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2697 			    int wait_to_complete)
2698 {
2699 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2700 	struct rte_eth_link link;
2701 	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2702 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2703 	bool link_up;
2704 	int err;
2705 	int wait = 1;
2706 
2707 	memset(&link, 0, sizeof(link));
2708 	link.link_status = ETH_LINK_DOWN;
2709 	link.link_speed = ETH_SPEED_NUM_NONE;
2710 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
2711 	link.link_autoneg = ETH_LINK_AUTONEG;
2712 
2713 	hw->mac.get_link_status = true;
2714 
2715 	if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2716 		return rte_eth_linkstatus_set(dev, &link);
2717 
2718 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
2719 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2720 		wait = 0;
2721 
2722 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2723 
2724 	if (err != 0) {
2725 		link.link_speed = ETH_SPEED_NUM_100M;
2726 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
2727 		return rte_eth_linkstatus_set(dev, &link);
2728 	}
2729 
2730 	if (link_up == 0) {
2731 		if ((hw->subsystem_device_id & 0xFF) ==
2732 				TXGBE_DEV_ID_KR_KX_KX4) {
2733 			hw->mac.bp_down_event(hw);
2734 		} else if (hw->phy.media_type == txgbe_media_type_fiber) {
2735 			intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2736 			rte_eal_alarm_set(10,
2737 				txgbe_dev_setup_link_alarm_handler, dev);
2738 		}
2739 		return rte_eth_linkstatus_set(dev, &link);
2740 	} else if (!hw->dev_start) {
2741 		return rte_eth_linkstatus_set(dev, &link);
2742 	}
2743 
2744 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2745 	link.link_status = ETH_LINK_UP;
2746 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2747 
2748 	switch (link_speed) {
2749 	default:
2750 	case TXGBE_LINK_SPEED_UNKNOWN:
2751 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
2752 		link.link_speed = ETH_SPEED_NUM_100M;
2753 		break;
2754 
2755 	case TXGBE_LINK_SPEED_100M_FULL:
2756 		link.link_speed = ETH_SPEED_NUM_100M;
2757 		break;
2758 
2759 	case TXGBE_LINK_SPEED_1GB_FULL:
2760 		link.link_speed = ETH_SPEED_NUM_1G;
2761 		break;
2762 
2763 	case TXGBE_LINK_SPEED_2_5GB_FULL:
2764 		link.link_speed = ETH_SPEED_NUM_2_5G;
2765 		break;
2766 
2767 	case TXGBE_LINK_SPEED_5GB_FULL:
2768 		link.link_speed = ETH_SPEED_NUM_5G;
2769 		break;
2770 
2771 	case TXGBE_LINK_SPEED_10GB_FULL:
2772 		link.link_speed = ETH_SPEED_NUM_10G;
2773 		break;
2774 	}
2775 
2776 	return rte_eth_linkstatus_set(dev, &link);
2777 }
2778 
2779 static int
2780 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2781 {
2782 	return txgbe_dev_link_update_share(dev, wait_to_complete);
2783 }
2784 
2785 static int
2786 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2787 {
2788 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2789 	uint32_t fctrl;
2790 
2791 	fctrl = rd32(hw, TXGBE_PSRCTL);
2792 	fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2793 	wr32(hw, TXGBE_PSRCTL, fctrl);
2794 
2795 	return 0;
2796 }
2797 
2798 static int
2799 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2800 {
2801 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2802 	uint32_t fctrl;
2803 
2804 	fctrl = rd32(hw, TXGBE_PSRCTL);
2805 	fctrl &= (~TXGBE_PSRCTL_UCP);
2806 	if (dev->data->all_multicast == 1)
2807 		fctrl |= TXGBE_PSRCTL_MCP;
2808 	else
2809 		fctrl &= (~TXGBE_PSRCTL_MCP);
2810 	wr32(hw, TXGBE_PSRCTL, fctrl);
2811 
2812 	return 0;
2813 }
2814 
2815 static int
2816 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2817 {
2818 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2819 	uint32_t fctrl;
2820 
2821 	fctrl = rd32(hw, TXGBE_PSRCTL);
2822 	fctrl |= TXGBE_PSRCTL_MCP;
2823 	wr32(hw, TXGBE_PSRCTL, fctrl);
2824 
2825 	return 0;
2826 }
2827 
2828 static int
2829 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2830 {
2831 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2832 	uint32_t fctrl;
2833 
2834 	if (dev->data->promiscuous == 1)
2835 		return 0; /* must remain in all_multicast mode */
2836 
2837 	fctrl = rd32(hw, TXGBE_PSRCTL);
2838 	fctrl &= (~TXGBE_PSRCTL_MCP);
2839 	wr32(hw, TXGBE_PSRCTL, fctrl);
2840 
2841 	return 0;
2842 }
2843 
2844 /**
2845  * It clears the interrupt causes and enables the interrupt.
2846  * It will be called once only during nic initialized.
2847  *
2848  * @param dev
2849  *  Pointer to struct rte_eth_dev.
2850  * @param on
2851  *  Enable or Disable.
2852  *
2853  * @return
2854  *  - On success, zero.
2855  *  - On failure, a negative value.
2856  */
2857 static int
2858 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2859 {
2860 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2861 
2862 	txgbe_dev_link_status_print(dev);
2863 	if (on)
2864 		intr->mask_misc |= TXGBE_ICRMISC_LSC;
2865 	else
2866 		intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2867 
2868 	return 0;
2869 }
2870 
2871 static int
2872 txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2873 {
2874 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2875 	u64 mask;
2876 
2877 	mask = TXGBE_ICR_MASK;
2878 	mask &= (1ULL << TXGBE_MISC_VEC_ID);
2879 	intr->mask |= mask;
2880 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
2881 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
2882 	return 0;
2883 }
2884 
2885 /**
2886  * It clears the interrupt causes and enables the interrupt.
2887  * It will be called once only during nic initialized.
2888  *
2889  * @param dev
2890  *  Pointer to struct rte_eth_dev.
2891  *
2892  * @return
2893  *  - On success, zero.
2894  *  - On failure, a negative value.
2895  */
2896 static int
2897 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2898 {
2899 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2900 	u64 mask;
2901 
2902 	mask = TXGBE_ICR_MASK;
2903 	mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
2904 	intr->mask |= mask;
2905 
2906 	return 0;
2907 }
2908 
2909 /**
2910  * It clears the interrupt causes and enables the interrupt.
2911  * It will be called once only during nic initialized.
2912  *
2913  * @param dev
2914  *  Pointer to struct rte_eth_dev.
2915  *
2916  * @return
2917  *  - On success, zero.
2918  *  - On failure, a negative value.
2919  */
2920 static int
2921 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2922 {
2923 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2924 
2925 	intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2926 
2927 	return 0;
2928 }
2929 
2930 /*
2931  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2932  *
2933  * @param dev
2934  *  Pointer to struct rte_eth_dev.
2935  *
2936  * @return
2937  *  - On success, zero.
2938  *  - On failure, a negative value.
2939  */
2940 static int
2941 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2942 {
2943 	uint32_t eicr;
2944 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2945 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2946 
2947 	/* clear all cause mask */
2948 	txgbe_disable_intr(hw);
2949 
2950 	/* read-on-clear nic registers here */
2951 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2952 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2953 
2954 	intr->flags = 0;
2955 
2956 	/* set flag for async link update */
2957 	if (eicr & TXGBE_ICRMISC_LSC)
2958 		intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2959 
2960 	if (eicr & TXGBE_ICRMISC_ANDONE)
2961 		intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
2962 
2963 	if (eicr & TXGBE_ICRMISC_VFMBX)
2964 		intr->flags |= TXGBE_FLAG_MAILBOX;
2965 
2966 	if (eicr & TXGBE_ICRMISC_LNKSEC)
2967 		intr->flags |= TXGBE_FLAG_MACSEC;
2968 
2969 	if (eicr & TXGBE_ICRMISC_GPIO)
2970 		intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2971 
2972 	return 0;
2973 }
2974 
2975 /**
2976  * It gets and then prints the link status.
2977  *
2978  * @param dev
2979  *  Pointer to struct rte_eth_dev.
2980  *
2981  * @return
2982  *  - On success, zero.
2983  *  - On failure, a negative value.
2984  */
2985 static void
2986 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2987 {
2988 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2989 	struct rte_eth_link link;
2990 
2991 	rte_eth_linkstatus_get(dev, &link);
2992 
2993 	if (link.link_status) {
2994 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2995 					(int)(dev->data->port_id),
2996 					(unsigned int)link.link_speed,
2997 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2998 					"full-duplex" : "half-duplex");
2999 	} else {
3000 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
3001 				(int)(dev->data->port_id));
3002 	}
3003 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
3004 				pci_dev->addr.domain,
3005 				pci_dev->addr.bus,
3006 				pci_dev->addr.devid,
3007 				pci_dev->addr.function);
3008 }
3009 
3010 /*
3011  * It executes link_update after knowing an interrupt occurred.
3012  *
3013  * @param dev
3014  *  Pointer to struct rte_eth_dev.
3015  *
3016  * @return
3017  *  - On success, zero.
3018  *  - On failure, a negative value.
3019  */
3020 static int
3021 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
3022 			   struct rte_intr_handle *intr_handle)
3023 {
3024 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3025 	int64_t timeout;
3026 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3027 
3028 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3029 
3030 	if (intr->flags & TXGBE_FLAG_MAILBOX) {
3031 		txgbe_pf_mbx_process(dev);
3032 		intr->flags &= ~TXGBE_FLAG_MAILBOX;
3033 	}
3034 
3035 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3036 		hw->phy.handle_lasi(hw);
3037 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3038 	}
3039 
3040 	if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
3041 		if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
3042 			hw->mac.kr_handle(hw);
3043 			intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
3044 		}
3045 	}
3046 
3047 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3048 		struct rte_eth_link link;
3049 
3050 		/*get the link status before link update, for predicting later*/
3051 		rte_eth_linkstatus_get(dev, &link);
3052 
3053 		txgbe_dev_link_update(dev, 0);
3054 
3055 		/* likely to up */
3056 		if (!link.link_status)
3057 			/* handle it 1 sec later, wait it being stable */
3058 			timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
3059 		/* likely to down */
3060 		else if ((hw->subsystem_device_id & 0xFF) ==
3061 				TXGBE_DEV_ID_KR_KX_KX4 &&
3062 				hw->devarg.auto_neg == 1)
3063 			/* handle it 2 sec later for backplane AN73 */
3064 			timeout = 2000;
3065 		else
3066 			/* handle it 4 sec later, wait it being stable */
3067 			timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
3068 
3069 		txgbe_dev_link_status_print(dev);
3070 		if (rte_eal_alarm_set(timeout * 1000,
3071 				      txgbe_dev_interrupt_delayed_handler,
3072 				      (void *)dev) < 0) {
3073 			PMD_DRV_LOG(ERR, "Error setting alarm");
3074 		} else {
3075 			/* only disable lsc interrupt */
3076 			intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
3077 
3078 			intr->mask_orig = intr->mask;
3079 			/* only disable all misc interrupts */
3080 			intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
3081 		}
3082 	}
3083 
3084 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
3085 	txgbe_enable_intr(dev);
3086 	rte_intr_enable(intr_handle);
3087 
3088 	return 0;
3089 }
3090 
3091 /**
3092  * Interrupt handler which shall be registered for alarm callback for delayed
3093  * handling specific interrupt to wait for the stable nic state. As the
3094  * NIC interrupt state is not stable for txgbe after link is just down,
3095  * it needs to wait 4 seconds to get the stable status.
3096  *
3097  * @param handle
3098  *  Pointer to interrupt handle.
3099  * @param param
3100  *  The address of parameter (struct rte_eth_dev *) registered before.
3101  *
3102  * @return
3103  *  void
3104  */
3105 static void
3106 txgbe_dev_interrupt_delayed_handler(void *param)
3107 {
3108 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3109 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3110 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3111 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3112 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3113 	uint32_t eicr;
3114 
3115 	txgbe_disable_intr(hw);
3116 
3117 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3118 	if (eicr & TXGBE_ICRMISC_VFMBX)
3119 		txgbe_pf_mbx_process(dev);
3120 
3121 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3122 		hw->phy.handle_lasi(hw);
3123 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3124 	}
3125 
3126 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3127 		txgbe_dev_link_update(dev, 0);
3128 		intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
3129 		txgbe_dev_link_status_print(dev);
3130 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
3131 					      NULL);
3132 	}
3133 
3134 	if (intr->flags & TXGBE_FLAG_MACSEC) {
3135 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3136 					      NULL);
3137 		intr->flags &= ~TXGBE_FLAG_MACSEC;
3138 	}
3139 
3140 	/* restore original mask */
3141 	intr->mask_misc |= TXGBE_ICRMISC_LSC;
3142 
3143 	intr->mask = intr->mask_orig;
3144 	intr->mask_orig = 0;
3145 
3146 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3147 	txgbe_enable_intr(dev);
3148 	rte_intr_enable(intr_handle);
3149 }
3150 
3151 /**
3152  * Interrupt handler triggered by NIC  for handling
3153  * specific interrupt.
3154  *
3155  * @param handle
3156  *  Pointer to interrupt handle.
3157  * @param param
3158  *  The address of parameter (struct rte_eth_dev *) registered before.
3159  *
3160  * @return
3161  *  void
3162  */
3163 static void
3164 txgbe_dev_interrupt_handler(void *param)
3165 {
3166 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3167 
3168 	txgbe_dev_interrupt_get_status(dev);
3169 	txgbe_dev_interrupt_action(dev, dev->intr_handle);
3170 }
3171 
3172 static int
3173 txgbe_dev_led_on(struct rte_eth_dev *dev)
3174 {
3175 	struct txgbe_hw *hw;
3176 
3177 	hw = TXGBE_DEV_HW(dev);
3178 	return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3179 }
3180 
3181 static int
3182 txgbe_dev_led_off(struct rte_eth_dev *dev)
3183 {
3184 	struct txgbe_hw *hw;
3185 
3186 	hw = TXGBE_DEV_HW(dev);
3187 	return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3188 }
3189 
3190 static int
3191 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3192 {
3193 	struct txgbe_hw *hw;
3194 	uint32_t mflcn_reg;
3195 	uint32_t fccfg_reg;
3196 	int rx_pause;
3197 	int tx_pause;
3198 
3199 	hw = TXGBE_DEV_HW(dev);
3200 
3201 	fc_conf->pause_time = hw->fc.pause_time;
3202 	fc_conf->high_water = hw->fc.high_water[0];
3203 	fc_conf->low_water = hw->fc.low_water[0];
3204 	fc_conf->send_xon = hw->fc.send_xon;
3205 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3206 
3207 	/*
3208 	 * Return rx_pause status according to actual setting of
3209 	 * RXFCCFG register.
3210 	 */
3211 	mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3212 	if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3213 		rx_pause = 1;
3214 	else
3215 		rx_pause = 0;
3216 
3217 	/*
3218 	 * Return tx_pause status according to actual setting of
3219 	 * TXFCCFG register.
3220 	 */
3221 	fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3222 	if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3223 		tx_pause = 1;
3224 	else
3225 		tx_pause = 0;
3226 
3227 	if (rx_pause && tx_pause)
3228 		fc_conf->mode = RTE_FC_FULL;
3229 	else if (rx_pause)
3230 		fc_conf->mode = RTE_FC_RX_PAUSE;
3231 	else if (tx_pause)
3232 		fc_conf->mode = RTE_FC_TX_PAUSE;
3233 	else
3234 		fc_conf->mode = RTE_FC_NONE;
3235 
3236 	return 0;
3237 }
3238 
3239 static int
3240 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3241 {
3242 	struct txgbe_hw *hw;
3243 	int err;
3244 	uint32_t rx_buf_size;
3245 	uint32_t max_high_water;
3246 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3247 		txgbe_fc_none,
3248 		txgbe_fc_rx_pause,
3249 		txgbe_fc_tx_pause,
3250 		txgbe_fc_full
3251 	};
3252 
3253 	PMD_INIT_FUNC_TRACE();
3254 
3255 	hw = TXGBE_DEV_HW(dev);
3256 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3257 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3258 
3259 	/*
3260 	 * At least reserve one Ethernet frame for watermark
3261 	 * high_water/low_water in kilo bytes for txgbe
3262 	 */
3263 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3264 	if (fc_conf->high_water > max_high_water ||
3265 	    fc_conf->high_water < fc_conf->low_water) {
3266 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3267 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3268 		return -EINVAL;
3269 	}
3270 
3271 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3272 	hw->fc.pause_time     = fc_conf->pause_time;
3273 	hw->fc.high_water[0]  = fc_conf->high_water;
3274 	hw->fc.low_water[0]   = fc_conf->low_water;
3275 	hw->fc.send_xon       = fc_conf->send_xon;
3276 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3277 
3278 	err = txgbe_fc_enable(hw);
3279 
3280 	/* Not negotiated is not an error case */
3281 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3282 		wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3283 		      (fc_conf->mac_ctrl_frame_fwd
3284 		       ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3285 		txgbe_flush(hw);
3286 
3287 		return 0;
3288 	}
3289 
3290 	PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3291 	return -EIO;
3292 }
3293 
3294 static int
3295 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3296 		struct rte_eth_pfc_conf *pfc_conf)
3297 {
3298 	int err;
3299 	uint32_t rx_buf_size;
3300 	uint32_t max_high_water;
3301 	uint8_t tc_num;
3302 	uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3303 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3304 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3305 
3306 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3307 		txgbe_fc_none,
3308 		txgbe_fc_rx_pause,
3309 		txgbe_fc_tx_pause,
3310 		txgbe_fc_full
3311 	};
3312 
3313 	PMD_INIT_FUNC_TRACE();
3314 
3315 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3316 	tc_num = map[pfc_conf->priority];
3317 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3318 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3319 	/*
3320 	 * At least reserve one Ethernet frame for watermark
3321 	 * high_water/low_water in kilo bytes for txgbe
3322 	 */
3323 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3324 	if (pfc_conf->fc.high_water > max_high_water ||
3325 	    pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3326 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3327 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3328 		return -EINVAL;
3329 	}
3330 
3331 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3332 	hw->fc.pause_time = pfc_conf->fc.pause_time;
3333 	hw->fc.send_xon = pfc_conf->fc.send_xon;
3334 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3335 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3336 
3337 	err = txgbe_dcb_pfc_enable(hw, tc_num);
3338 
3339 	/* Not negotiated is not an error case */
3340 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3341 		return 0;
3342 
3343 	PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3344 	return -EIO;
3345 }
3346 
3347 int
3348 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3349 			  struct rte_eth_rss_reta_entry64 *reta_conf,
3350 			  uint16_t reta_size)
3351 {
3352 	uint8_t i, j, mask;
3353 	uint32_t reta;
3354 	uint16_t idx, shift;
3355 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3356 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3357 
3358 	PMD_INIT_FUNC_TRACE();
3359 
3360 	if (!txgbe_rss_update_sp(hw->mac.type)) {
3361 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3362 			"NIC.");
3363 		return -ENOTSUP;
3364 	}
3365 
3366 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
3367 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3368 			"(%d) doesn't match the number hardware can supported "
3369 			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3370 		return -EINVAL;
3371 	}
3372 
3373 	for (i = 0; i < reta_size; i += 4) {
3374 		idx = i / RTE_RETA_GROUP_SIZE;
3375 		shift = i % RTE_RETA_GROUP_SIZE;
3376 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3377 		if (!mask)
3378 			continue;
3379 
3380 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3381 		for (j = 0; j < 4; j++) {
3382 			if (RS8(mask, j, 0x1)) {
3383 				reta  &= ~(MS32(8 * j, 0xFF));
3384 				reta |= LS32(reta_conf[idx].reta[shift + j],
3385 						8 * j, 0xFF);
3386 			}
3387 		}
3388 		wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3389 	}
3390 	adapter->rss_reta_updated = 1;
3391 
3392 	return 0;
3393 }
3394 
3395 int
3396 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3397 			 struct rte_eth_rss_reta_entry64 *reta_conf,
3398 			 uint16_t reta_size)
3399 {
3400 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3401 	uint8_t i, j, mask;
3402 	uint32_t reta;
3403 	uint16_t idx, shift;
3404 
3405 	PMD_INIT_FUNC_TRACE();
3406 
3407 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
3408 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3409 			"(%d) doesn't match the number hardware can supported "
3410 			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3411 		return -EINVAL;
3412 	}
3413 
3414 	for (i = 0; i < reta_size; i += 4) {
3415 		idx = i / RTE_RETA_GROUP_SIZE;
3416 		shift = i % RTE_RETA_GROUP_SIZE;
3417 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3418 		if (!mask)
3419 			continue;
3420 
3421 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3422 		for (j = 0; j < 4; j++) {
3423 			if (RS8(mask, j, 0x1))
3424 				reta_conf[idx].reta[shift + j] =
3425 					(uint16_t)RS32(reta, 8 * j, 0xFF);
3426 		}
3427 	}
3428 
3429 	return 0;
3430 }
3431 
3432 static int
3433 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3434 				uint32_t index, uint32_t pool)
3435 {
3436 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3437 	uint32_t enable_addr = 1;
3438 
3439 	return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3440 			     pool, enable_addr);
3441 }
3442 
3443 static void
3444 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3445 {
3446 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3447 
3448 	txgbe_clear_rar(hw, index);
3449 }
3450 
3451 static int
3452 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3453 {
3454 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3455 
3456 	txgbe_remove_rar(dev, 0);
3457 	txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3458 
3459 	return 0;
3460 }
3461 
3462 static int
3463 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3464 {
3465 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3466 	struct rte_eth_dev_info dev_info;
3467 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3468 	struct rte_eth_dev_data *dev_data = dev->data;
3469 	int ret;
3470 
3471 	ret = txgbe_dev_info_get(dev, &dev_info);
3472 	if (ret != 0)
3473 		return ret;
3474 
3475 	/* check that mtu is within the allowed range */
3476 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3477 		return -EINVAL;
3478 
3479 	/* If device is started, refuse mtu that requires the support of
3480 	 * scattered packets when this feature has not been enabled before.
3481 	 */
3482 	if (dev_data->dev_started && !dev_data->scattered_rx &&
3483 	    (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3484 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3485 		PMD_INIT_LOG(ERR, "Stop port first.");
3486 		return -EINVAL;
3487 	}
3488 
3489 	/* update max frame size */
3490 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3491 
3492 	if (hw->mode)
3493 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3494 			TXGBE_FRAME_SIZE_MAX);
3495 	else
3496 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3497 			TXGBE_FRMSZ_MAX(frame_size));
3498 
3499 	return 0;
3500 }
3501 
3502 static uint32_t
3503 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3504 {
3505 	uint32_t vector = 0;
3506 
3507 	switch (hw->mac.mc_filter_type) {
3508 	case 0:   /* use bits [47:36] of the address */
3509 		vector = ((uc_addr->addr_bytes[4] >> 4) |
3510 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
3511 		break;
3512 	case 1:   /* use bits [46:35] of the address */
3513 		vector = ((uc_addr->addr_bytes[4] >> 3) |
3514 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
3515 		break;
3516 	case 2:   /* use bits [45:34] of the address */
3517 		vector = ((uc_addr->addr_bytes[4] >> 2) |
3518 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
3519 		break;
3520 	case 3:   /* use bits [43:32] of the address */
3521 		vector = ((uc_addr->addr_bytes[4]) |
3522 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
3523 		break;
3524 	default:  /* Invalid mc_filter_type */
3525 		break;
3526 	}
3527 
3528 	/* vector can only be 12-bits or boundary will be exceeded */
3529 	vector &= 0xFFF;
3530 	return vector;
3531 }
3532 
3533 static int
3534 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3535 			struct rte_ether_addr *mac_addr, uint8_t on)
3536 {
3537 	uint32_t vector;
3538 	uint32_t uta_idx;
3539 	uint32_t reg_val;
3540 	uint32_t uta_mask;
3541 	uint32_t psrctl;
3542 
3543 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3544 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3545 
3546 	/* The UTA table only exists on pf hardware */
3547 	if (hw->mac.type < txgbe_mac_raptor)
3548 		return -ENOTSUP;
3549 
3550 	vector = txgbe_uta_vector(hw, mac_addr);
3551 	uta_idx = (vector >> 5) & 0x7F;
3552 	uta_mask = 0x1UL << (vector & 0x1F);
3553 
3554 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3555 		return 0;
3556 
3557 	reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3558 	if (on) {
3559 		uta_info->uta_in_use++;
3560 		reg_val |= uta_mask;
3561 		uta_info->uta_shadow[uta_idx] |= uta_mask;
3562 	} else {
3563 		uta_info->uta_in_use--;
3564 		reg_val &= ~uta_mask;
3565 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3566 	}
3567 
3568 	wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3569 
3570 	psrctl = rd32(hw, TXGBE_PSRCTL);
3571 	if (uta_info->uta_in_use > 0)
3572 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3573 	else
3574 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3575 
3576 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3577 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3578 	wr32(hw, TXGBE_PSRCTL, psrctl);
3579 
3580 	return 0;
3581 }
3582 
3583 static int
3584 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3585 {
3586 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3587 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3588 	uint32_t psrctl;
3589 	int i;
3590 
3591 	/* The UTA table only exists on pf hardware */
3592 	if (hw->mac.type < txgbe_mac_raptor)
3593 		return -ENOTSUP;
3594 
3595 	if (on) {
3596 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3597 			uta_info->uta_shadow[i] = ~0;
3598 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3599 		}
3600 	} else {
3601 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3602 			uta_info->uta_shadow[i] = 0;
3603 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
3604 		}
3605 	}
3606 
3607 	psrctl = rd32(hw, TXGBE_PSRCTL);
3608 	if (on)
3609 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3610 	else
3611 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3612 
3613 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3614 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3615 	wr32(hw, TXGBE_PSRCTL, psrctl);
3616 
3617 	return 0;
3618 }
3619 
3620 uint32_t
3621 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3622 {
3623 	uint32_t new_val = orig_val;
3624 
3625 	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3626 		new_val |= TXGBE_POOLETHCTL_UTA;
3627 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3628 		new_val |= TXGBE_POOLETHCTL_MCHA;
3629 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3630 		new_val |= TXGBE_POOLETHCTL_UCHA;
3631 	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3632 		new_val |= TXGBE_POOLETHCTL_BCA;
3633 	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3634 		new_val |= TXGBE_POOLETHCTL_MCP;
3635 
3636 	return new_val;
3637 }
3638 
3639 static int
3640 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3641 {
3642 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3643 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3644 	uint32_t mask;
3645 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3646 
3647 	if (queue_id < 32) {
3648 		mask = rd32(hw, TXGBE_IMS(0));
3649 		mask &= (1 << queue_id);
3650 		wr32(hw, TXGBE_IMS(0), mask);
3651 	} else if (queue_id < 64) {
3652 		mask = rd32(hw, TXGBE_IMS(1));
3653 		mask &= (1 << (queue_id - 32));
3654 		wr32(hw, TXGBE_IMS(1), mask);
3655 	}
3656 	rte_intr_enable(intr_handle);
3657 
3658 	return 0;
3659 }
3660 
3661 static int
3662 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3663 {
3664 	uint32_t mask;
3665 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3666 
3667 	if (queue_id < 32) {
3668 		mask = rd32(hw, TXGBE_IMS(0));
3669 		mask &= ~(1 << queue_id);
3670 		wr32(hw, TXGBE_IMS(0), mask);
3671 	} else if (queue_id < 64) {
3672 		mask = rd32(hw, TXGBE_IMS(1));
3673 		mask &= ~(1 << (queue_id - 32));
3674 		wr32(hw, TXGBE_IMS(1), mask);
3675 	}
3676 
3677 	return 0;
3678 }
3679 
3680 /**
3681  * set the IVAR registers, mapping interrupt causes to vectors
3682  * @param hw
3683  *  pointer to txgbe_hw struct
3684  * @direction
3685  *  0 for Rx, 1 for Tx, -1 for other causes
3686  * @queue
3687  *  queue to map the corresponding interrupt to
3688  * @msix_vector
3689  *  the vector to map to the corresponding queue
3690  */
3691 void
3692 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3693 		   uint8_t queue, uint8_t msix_vector)
3694 {
3695 	uint32_t tmp, idx;
3696 
3697 	if (direction == -1) {
3698 		/* other causes */
3699 		msix_vector |= TXGBE_IVARMISC_VLD;
3700 		idx = 0;
3701 		tmp = rd32(hw, TXGBE_IVARMISC);
3702 		tmp &= ~(0xFF << idx);
3703 		tmp |= (msix_vector << idx);
3704 		wr32(hw, TXGBE_IVARMISC, tmp);
3705 	} else {
3706 		/* rx or tx causes */
3707 		/* Workround for ICR lost */
3708 		idx = ((16 * (queue & 1)) + (8 * direction));
3709 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3710 		tmp &= ~(0xFF << idx);
3711 		tmp |= (msix_vector << idx);
3712 		wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3713 	}
3714 }
3715 
3716 /**
3717  * Sets up the hardware to properly generate MSI-X interrupts
3718  * @hw
3719  *  board private structure
3720  */
3721 static void
3722 txgbe_configure_msix(struct rte_eth_dev *dev)
3723 {
3724 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3725 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3726 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3727 	uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3728 	uint32_t vec = TXGBE_MISC_VEC_ID;
3729 	uint32_t gpie;
3730 
3731 	/* won't configure msix register if no mapping is done
3732 	 * between intr vector and event fd
3733 	 * but if misx has been enabled already, need to configure
3734 	 * auto clean, auto mask and throttling.
3735 	 */
3736 	gpie = rd32(hw, TXGBE_GPIE);
3737 	if (!rte_intr_dp_is_en(intr_handle) &&
3738 	    !(gpie & TXGBE_GPIE_MSIX))
3739 		return;
3740 
3741 	if (rte_intr_allow_others(intr_handle)) {
3742 		base = TXGBE_RX_VEC_START;
3743 		vec = base;
3744 	}
3745 
3746 	/* setup GPIE for MSI-x mode */
3747 	gpie = rd32(hw, TXGBE_GPIE);
3748 	gpie |= TXGBE_GPIE_MSIX;
3749 	wr32(hw, TXGBE_GPIE, gpie);
3750 
3751 	/* Populate the IVAR table and set the ITR values to the
3752 	 * corresponding register.
3753 	 */
3754 	if (rte_intr_dp_is_en(intr_handle)) {
3755 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3756 			queue_id++) {
3757 			/* by default, 1:1 mapping */
3758 			txgbe_set_ivar_map(hw, 0, queue_id, vec);
3759 			intr_handle->intr_vec[queue_id] = vec;
3760 			if (vec < base + intr_handle->nb_efd - 1)
3761 				vec++;
3762 		}
3763 
3764 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3765 	}
3766 	wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3767 			TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3768 			| TXGBE_ITR_WRDSA);
3769 }
3770 
3771 int
3772 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3773 			   uint16_t queue_idx, uint16_t tx_rate)
3774 {
3775 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3776 	uint32_t bcnrc_val;
3777 
3778 	if (queue_idx >= hw->mac.max_tx_queues)
3779 		return -EINVAL;
3780 
3781 	if (tx_rate != 0) {
3782 		bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3783 		bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3784 	} else {
3785 		bcnrc_val = 0;
3786 	}
3787 
3788 	/*
3789 	 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3790 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3791 	 */
3792 	wr32(hw, TXGBE_ARBTXMMW, 0x14);
3793 
3794 	/* Set ARBTXRATE of queue X */
3795 	wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3796 	wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3797 	txgbe_flush(hw);
3798 
3799 	return 0;
3800 }
3801 
3802 int
3803 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3804 			struct rte_eth_syn_filter *filter,
3805 			bool add)
3806 {
3807 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3808 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3809 	uint32_t syn_info;
3810 	uint32_t synqf;
3811 
3812 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3813 		return -EINVAL;
3814 
3815 	syn_info = filter_info->syn_info;
3816 
3817 	if (add) {
3818 		if (syn_info & TXGBE_SYNCLS_ENA)
3819 			return -EINVAL;
3820 		synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3821 		synqf |= TXGBE_SYNCLS_ENA;
3822 
3823 		if (filter->hig_pri)
3824 			synqf |= TXGBE_SYNCLS_HIPRIO;
3825 		else
3826 			synqf &= ~TXGBE_SYNCLS_HIPRIO;
3827 	} else {
3828 		synqf = rd32(hw, TXGBE_SYNCLS);
3829 		if (!(syn_info & TXGBE_SYNCLS_ENA))
3830 			return -ENOENT;
3831 		synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3832 	}
3833 
3834 	filter_info->syn_info = synqf;
3835 	wr32(hw, TXGBE_SYNCLS, synqf);
3836 	txgbe_flush(hw);
3837 	return 0;
3838 }
3839 
3840 static inline enum txgbe_5tuple_protocol
3841 convert_protocol_type(uint8_t protocol_value)
3842 {
3843 	if (protocol_value == IPPROTO_TCP)
3844 		return TXGBE_5TF_PROT_TCP;
3845 	else if (protocol_value == IPPROTO_UDP)
3846 		return TXGBE_5TF_PROT_UDP;
3847 	else if (protocol_value == IPPROTO_SCTP)
3848 		return TXGBE_5TF_PROT_SCTP;
3849 	else
3850 		return TXGBE_5TF_PROT_NONE;
3851 }
3852 
3853 /* inject a 5-tuple filter to HW */
3854 static inline void
3855 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3856 			   struct txgbe_5tuple_filter *filter)
3857 {
3858 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3859 	int i;
3860 	uint32_t ftqf, sdpqf;
3861 	uint32_t l34timir = 0;
3862 	uint32_t mask = TXGBE_5TFCTL0_MASK;
3863 
3864 	i = filter->index;
3865 	sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3866 	sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3867 
3868 	ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3869 	ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3870 	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3871 		mask &= ~TXGBE_5TFCTL0_MSADDR;
3872 	if (filter->filter_info.dst_ip_mask == 0)
3873 		mask &= ~TXGBE_5TFCTL0_MDADDR;
3874 	if (filter->filter_info.src_port_mask == 0)
3875 		mask &= ~TXGBE_5TFCTL0_MSPORT;
3876 	if (filter->filter_info.dst_port_mask == 0)
3877 		mask &= ~TXGBE_5TFCTL0_MDPORT;
3878 	if (filter->filter_info.proto_mask == 0)
3879 		mask &= ~TXGBE_5TFCTL0_MPROTO;
3880 	ftqf |= mask;
3881 	ftqf |= TXGBE_5TFCTL0_MPOOL;
3882 	ftqf |= TXGBE_5TFCTL0_ENA;
3883 
3884 	wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3885 	wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3886 	wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3887 	wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3888 
3889 	l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3890 	wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3891 }
3892 
3893 /*
3894  * add a 5tuple filter
3895  *
3896  * @param
3897  * dev: Pointer to struct rte_eth_dev.
3898  * index: the index the filter allocates.
3899  * filter: pointer to the filter that will be added.
3900  * rx_queue: the queue id the filter assigned to.
3901  *
3902  * @return
3903  *    - On success, zero.
3904  *    - On failure, a negative value.
3905  */
3906 static int
3907 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3908 			struct txgbe_5tuple_filter *filter)
3909 {
3910 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3911 	int i, idx, shift;
3912 
3913 	/*
3914 	 * look for an unused 5tuple filter index,
3915 	 * and insert the filter to list.
3916 	 */
3917 	for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3918 		idx = i / (sizeof(uint32_t) * NBBY);
3919 		shift = i % (sizeof(uint32_t) * NBBY);
3920 		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3921 			filter_info->fivetuple_mask[idx] |= 1 << shift;
3922 			filter->index = i;
3923 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3924 					  filter,
3925 					  entries);
3926 			break;
3927 		}
3928 	}
3929 	if (i >= TXGBE_MAX_FTQF_FILTERS) {
3930 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
3931 		return -ENOSYS;
3932 	}
3933 
3934 	txgbe_inject_5tuple_filter(dev, filter);
3935 
3936 	return 0;
3937 }
3938 
3939 /*
3940  * remove a 5tuple filter
3941  *
3942  * @param
3943  * dev: Pointer to struct rte_eth_dev.
3944  * filter: the pointer of the filter will be removed.
3945  */
3946 static void
3947 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3948 			struct txgbe_5tuple_filter *filter)
3949 {
3950 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3951 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3952 	uint16_t index = filter->index;
3953 
3954 	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3955 				~(1 << (index % (sizeof(uint32_t) * NBBY)));
3956 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3957 	rte_free(filter);
3958 
3959 	wr32(hw, TXGBE_5TFDADDR(index), 0);
3960 	wr32(hw, TXGBE_5TFSADDR(index), 0);
3961 	wr32(hw, TXGBE_5TFPORT(index), 0);
3962 	wr32(hw, TXGBE_5TFCTL0(index), 0);
3963 	wr32(hw, TXGBE_5TFCTL1(index), 0);
3964 }
3965 
3966 static inline struct txgbe_5tuple_filter *
3967 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3968 			struct txgbe_5tuple_filter_info *key)
3969 {
3970 	struct txgbe_5tuple_filter *it;
3971 
3972 	TAILQ_FOREACH(it, filter_list, entries) {
3973 		if (memcmp(key, &it->filter_info,
3974 			sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3975 			return it;
3976 		}
3977 	}
3978 	return NULL;
3979 }
3980 
3981 /* translate elements in struct rte_eth_ntuple_filter
3982  * to struct txgbe_5tuple_filter_info
3983  */
3984 static inline int
3985 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3986 			struct txgbe_5tuple_filter_info *filter_info)
3987 {
3988 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3989 		filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3990 		filter->priority < TXGBE_5TUPLE_MIN_PRI)
3991 		return -EINVAL;
3992 
3993 	switch (filter->dst_ip_mask) {
3994 	case UINT32_MAX:
3995 		filter_info->dst_ip_mask = 0;
3996 		filter_info->dst_ip = filter->dst_ip;
3997 		break;
3998 	case 0:
3999 		filter_info->dst_ip_mask = 1;
4000 		break;
4001 	default:
4002 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4003 		return -EINVAL;
4004 	}
4005 
4006 	switch (filter->src_ip_mask) {
4007 	case UINT32_MAX:
4008 		filter_info->src_ip_mask = 0;
4009 		filter_info->src_ip = filter->src_ip;
4010 		break;
4011 	case 0:
4012 		filter_info->src_ip_mask = 1;
4013 		break;
4014 	default:
4015 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4016 		return -EINVAL;
4017 	}
4018 
4019 	switch (filter->dst_port_mask) {
4020 	case UINT16_MAX:
4021 		filter_info->dst_port_mask = 0;
4022 		filter_info->dst_port = filter->dst_port;
4023 		break;
4024 	case 0:
4025 		filter_info->dst_port_mask = 1;
4026 		break;
4027 	default:
4028 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4029 		return -EINVAL;
4030 	}
4031 
4032 	switch (filter->src_port_mask) {
4033 	case UINT16_MAX:
4034 		filter_info->src_port_mask = 0;
4035 		filter_info->src_port = filter->src_port;
4036 		break;
4037 	case 0:
4038 		filter_info->src_port_mask = 1;
4039 		break;
4040 	default:
4041 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
4042 		return -EINVAL;
4043 	}
4044 
4045 	switch (filter->proto_mask) {
4046 	case UINT8_MAX:
4047 		filter_info->proto_mask = 0;
4048 		filter_info->proto =
4049 			convert_protocol_type(filter->proto);
4050 		break;
4051 	case 0:
4052 		filter_info->proto_mask = 1;
4053 		break;
4054 	default:
4055 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
4056 		return -EINVAL;
4057 	}
4058 
4059 	filter_info->priority = (uint8_t)filter->priority;
4060 	return 0;
4061 }
4062 
4063 /*
4064  * add or delete a ntuple filter
4065  *
4066  * @param
4067  * dev: Pointer to struct rte_eth_dev.
4068  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4069  * add: if true, add filter, if false, remove filter
4070  *
4071  * @return
4072  *    - On success, zero.
4073  *    - On failure, a negative value.
4074  */
4075 int
4076 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
4077 			struct rte_eth_ntuple_filter *ntuple_filter,
4078 			bool add)
4079 {
4080 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4081 	struct txgbe_5tuple_filter_info filter_5tuple;
4082 	struct txgbe_5tuple_filter *filter;
4083 	int ret;
4084 
4085 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
4086 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
4087 		return -EINVAL;
4088 	}
4089 
4090 	memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
4091 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
4092 	if (ret < 0)
4093 		return ret;
4094 
4095 	filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
4096 					 &filter_5tuple);
4097 	if (filter != NULL && add) {
4098 		PMD_DRV_LOG(ERR, "filter exists.");
4099 		return -EEXIST;
4100 	}
4101 	if (filter == NULL && !add) {
4102 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
4103 		return -ENOENT;
4104 	}
4105 
4106 	if (add) {
4107 		filter = rte_zmalloc("txgbe_5tuple_filter",
4108 				sizeof(struct txgbe_5tuple_filter), 0);
4109 		if (filter == NULL)
4110 			return -ENOMEM;
4111 		rte_memcpy(&filter->filter_info,
4112 				 &filter_5tuple,
4113 				 sizeof(struct txgbe_5tuple_filter_info));
4114 		filter->queue = ntuple_filter->queue;
4115 		ret = txgbe_add_5tuple_filter(dev, filter);
4116 		if (ret < 0) {
4117 			rte_free(filter);
4118 			return ret;
4119 		}
4120 	} else {
4121 		txgbe_remove_5tuple_filter(dev, filter);
4122 	}
4123 
4124 	return 0;
4125 }
4126 
4127 int
4128 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4129 			struct rte_eth_ethertype_filter *filter,
4130 			bool add)
4131 {
4132 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4133 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4134 	uint32_t etqf = 0;
4135 	uint32_t etqs = 0;
4136 	int ret;
4137 	struct txgbe_ethertype_filter ethertype_filter;
4138 
4139 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4140 		return -EINVAL;
4141 
4142 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4143 	    filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4144 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4145 			" ethertype filter.", filter->ether_type);
4146 		return -EINVAL;
4147 	}
4148 
4149 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4150 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4151 		return -EINVAL;
4152 	}
4153 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4154 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
4155 		return -EINVAL;
4156 	}
4157 
4158 	ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4159 	if (ret >= 0 && add) {
4160 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4161 			    filter->ether_type);
4162 		return -EEXIST;
4163 	}
4164 	if (ret < 0 && !add) {
4165 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4166 			    filter->ether_type);
4167 		return -ENOENT;
4168 	}
4169 
4170 	if (add) {
4171 		etqf = TXGBE_ETFLT_ENA;
4172 		etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4173 		etqs |= TXGBE_ETCLS_QPID(filter->queue);
4174 		etqs |= TXGBE_ETCLS_QENA;
4175 
4176 		ethertype_filter.ethertype = filter->ether_type;
4177 		ethertype_filter.etqf = etqf;
4178 		ethertype_filter.etqs = etqs;
4179 		ethertype_filter.conf = FALSE;
4180 		ret = txgbe_ethertype_filter_insert(filter_info,
4181 						    &ethertype_filter);
4182 		if (ret < 0) {
4183 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
4184 			return -ENOSPC;
4185 		}
4186 	} else {
4187 		ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4188 		if (ret < 0)
4189 			return -ENOSYS;
4190 	}
4191 	wr32(hw, TXGBE_ETFLT(ret), etqf);
4192 	wr32(hw, TXGBE_ETCLS(ret), etqs);
4193 	txgbe_flush(hw);
4194 
4195 	return 0;
4196 }
4197 
4198 static int
4199 txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
4200 		       const struct rte_flow_ops **ops)
4201 {
4202 	*ops = &txgbe_flow_ops;
4203 	return 0;
4204 }
4205 
4206 static u8 *
4207 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4208 			u8 **mc_addr_ptr, u32 *vmdq)
4209 {
4210 	u8 *mc_addr;
4211 
4212 	*vmdq = 0;
4213 	mc_addr = *mc_addr_ptr;
4214 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4215 	return mc_addr;
4216 }
4217 
4218 int
4219 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4220 			  struct rte_ether_addr *mc_addr_set,
4221 			  uint32_t nb_mc_addr)
4222 {
4223 	struct txgbe_hw *hw;
4224 	u8 *mc_addr_list;
4225 
4226 	hw = TXGBE_DEV_HW(dev);
4227 	mc_addr_list = (u8 *)mc_addr_set;
4228 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4229 					 txgbe_dev_addr_list_itr, TRUE);
4230 }
4231 
4232 static uint64_t
4233 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4234 {
4235 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4236 	uint64_t systime_cycles;
4237 
4238 	systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4239 	systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4240 
4241 	return systime_cycles;
4242 }
4243 
4244 static uint64_t
4245 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4246 {
4247 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4248 	uint64_t rx_tstamp_cycles;
4249 
4250 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4251 	rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4252 	rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4253 
4254 	return rx_tstamp_cycles;
4255 }
4256 
4257 static uint64_t
4258 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4259 {
4260 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4261 	uint64_t tx_tstamp_cycles;
4262 
4263 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4264 	tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4265 	tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4266 
4267 	return tx_tstamp_cycles;
4268 }
4269 
4270 static void
4271 txgbe_start_timecounters(struct rte_eth_dev *dev)
4272 {
4273 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4274 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4275 	struct rte_eth_link link;
4276 	uint32_t incval = 0;
4277 	uint32_t shift = 0;
4278 
4279 	/* Get current link speed. */
4280 	txgbe_dev_link_update(dev, 1);
4281 	rte_eth_linkstatus_get(dev, &link);
4282 
4283 	switch (link.link_speed) {
4284 	case ETH_SPEED_NUM_100M:
4285 		incval = TXGBE_INCVAL_100;
4286 		shift = TXGBE_INCVAL_SHIFT_100;
4287 		break;
4288 	case ETH_SPEED_NUM_1G:
4289 		incval = TXGBE_INCVAL_1GB;
4290 		shift = TXGBE_INCVAL_SHIFT_1GB;
4291 		break;
4292 	case ETH_SPEED_NUM_10G:
4293 	default:
4294 		incval = TXGBE_INCVAL_10GB;
4295 		shift = TXGBE_INCVAL_SHIFT_10GB;
4296 		break;
4297 	}
4298 
4299 	wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4300 
4301 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4302 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4303 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4304 
4305 	adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4306 	adapter->systime_tc.cc_shift = shift;
4307 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4308 
4309 	adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4310 	adapter->rx_tstamp_tc.cc_shift = shift;
4311 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4312 
4313 	adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4314 	adapter->tx_tstamp_tc.cc_shift = shift;
4315 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4316 }
4317 
4318 static int
4319 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4320 {
4321 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4322 
4323 	adapter->systime_tc.nsec += delta;
4324 	adapter->rx_tstamp_tc.nsec += delta;
4325 	adapter->tx_tstamp_tc.nsec += delta;
4326 
4327 	return 0;
4328 }
4329 
4330 static int
4331 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4332 {
4333 	uint64_t ns;
4334 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4335 
4336 	ns = rte_timespec_to_ns(ts);
4337 	/* Set the timecounters to a new value. */
4338 	adapter->systime_tc.nsec = ns;
4339 	adapter->rx_tstamp_tc.nsec = ns;
4340 	adapter->tx_tstamp_tc.nsec = ns;
4341 
4342 	return 0;
4343 }
4344 
4345 static int
4346 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4347 {
4348 	uint64_t ns, systime_cycles;
4349 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4350 
4351 	systime_cycles = txgbe_read_systime_cyclecounter(dev);
4352 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4353 	*ts = rte_ns_to_timespec(ns);
4354 
4355 	return 0;
4356 }
4357 
4358 static int
4359 txgbe_timesync_enable(struct rte_eth_dev *dev)
4360 {
4361 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4362 	uint32_t tsync_ctl;
4363 
4364 	/* Stop the timesync system time. */
4365 	wr32(hw, TXGBE_TSTIMEINC, 0x0);
4366 	/* Reset the timesync system time value. */
4367 	wr32(hw, TXGBE_TSTIMEL, 0x0);
4368 	wr32(hw, TXGBE_TSTIMEH, 0x0);
4369 
4370 	txgbe_start_timecounters(dev);
4371 
4372 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4373 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4374 		RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4375 
4376 	/* Enable timestamping of received PTP packets. */
4377 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4378 	tsync_ctl |= TXGBE_TSRXCTL_ENA;
4379 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4380 
4381 	/* Enable timestamping of transmitted PTP packets. */
4382 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4383 	tsync_ctl |= TXGBE_TSTXCTL_ENA;
4384 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4385 
4386 	txgbe_flush(hw);
4387 
4388 	return 0;
4389 }
4390 
4391 static int
4392 txgbe_timesync_disable(struct rte_eth_dev *dev)
4393 {
4394 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4395 	uint32_t tsync_ctl;
4396 
4397 	/* Disable timestamping of transmitted PTP packets. */
4398 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4399 	tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4400 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4401 
4402 	/* Disable timestamping of received PTP packets. */
4403 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4404 	tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4405 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4406 
4407 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4408 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4409 
4410 	/* Stop incrementating the System Time registers. */
4411 	wr32(hw, TXGBE_TSTIMEINC, 0);
4412 
4413 	return 0;
4414 }
4415 
4416 static int
4417 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4418 				 struct timespec *timestamp,
4419 				 uint32_t flags __rte_unused)
4420 {
4421 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4422 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4423 	uint32_t tsync_rxctl;
4424 	uint64_t rx_tstamp_cycles;
4425 	uint64_t ns;
4426 
4427 	tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4428 	if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4429 		return -EINVAL;
4430 
4431 	rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4432 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4433 	*timestamp = rte_ns_to_timespec(ns);
4434 
4435 	return  0;
4436 }
4437 
4438 static int
4439 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4440 				 struct timespec *timestamp)
4441 {
4442 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4443 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4444 	uint32_t tsync_txctl;
4445 	uint64_t tx_tstamp_cycles;
4446 	uint64_t ns;
4447 
4448 	tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4449 	if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4450 		return -EINVAL;
4451 
4452 	tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4453 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4454 	*timestamp = rte_ns_to_timespec(ns);
4455 
4456 	return 0;
4457 }
4458 
4459 static int
4460 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4461 {
4462 	int count = 0;
4463 	int g_ind = 0;
4464 	const struct reg_info *reg_group;
4465 	const struct reg_info **reg_set = txgbe_regs_others;
4466 
4467 	while ((reg_group = reg_set[g_ind++]))
4468 		count += txgbe_regs_group_count(reg_group);
4469 
4470 	return count;
4471 }
4472 
4473 static int
4474 txgbe_get_regs(struct rte_eth_dev *dev,
4475 	      struct rte_dev_reg_info *regs)
4476 {
4477 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4478 	uint32_t *data = regs->data;
4479 	int g_ind = 0;
4480 	int count = 0;
4481 	const struct reg_info *reg_group;
4482 	const struct reg_info **reg_set = txgbe_regs_others;
4483 
4484 	if (data == NULL) {
4485 		regs->length = txgbe_get_reg_length(dev);
4486 		regs->width = sizeof(uint32_t);
4487 		return 0;
4488 	}
4489 
4490 	/* Support only full register dump */
4491 	if (regs->length == 0 ||
4492 	    regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4493 		regs->version = hw->mac.type << 24 |
4494 				hw->revision_id << 16 |
4495 				hw->device_id;
4496 		while ((reg_group = reg_set[g_ind++]))
4497 			count += txgbe_read_regs_group(dev, &data[count],
4498 						      reg_group);
4499 		return 0;
4500 	}
4501 
4502 	return -ENOTSUP;
4503 }
4504 
4505 static int
4506 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4507 {
4508 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4509 
4510 	/* Return unit is byte count */
4511 	return hw->rom.word_size * 2;
4512 }
4513 
4514 static int
4515 txgbe_get_eeprom(struct rte_eth_dev *dev,
4516 		struct rte_dev_eeprom_info *in_eeprom)
4517 {
4518 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4519 	struct txgbe_rom_info *eeprom = &hw->rom;
4520 	uint16_t *data = in_eeprom->data;
4521 	int first, length;
4522 
4523 	first = in_eeprom->offset >> 1;
4524 	length = in_eeprom->length >> 1;
4525 	if (first > hw->rom.word_size ||
4526 	    ((first + length) > hw->rom.word_size))
4527 		return -EINVAL;
4528 
4529 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4530 
4531 	return eeprom->readw_buffer(hw, first, length, data);
4532 }
4533 
4534 static int
4535 txgbe_set_eeprom(struct rte_eth_dev *dev,
4536 		struct rte_dev_eeprom_info *in_eeprom)
4537 {
4538 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4539 	struct txgbe_rom_info *eeprom = &hw->rom;
4540 	uint16_t *data = in_eeprom->data;
4541 	int first, length;
4542 
4543 	first = in_eeprom->offset >> 1;
4544 	length = in_eeprom->length >> 1;
4545 	if (first > hw->rom.word_size ||
4546 	    ((first + length) > hw->rom.word_size))
4547 		return -EINVAL;
4548 
4549 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4550 
4551 	return eeprom->writew_buffer(hw,  first, length, data);
4552 }
4553 
4554 static int
4555 txgbe_get_module_info(struct rte_eth_dev *dev,
4556 		      struct rte_eth_dev_module_info *modinfo)
4557 {
4558 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4559 	uint32_t status;
4560 	uint8_t sff8472_rev, addr_mode;
4561 	bool page_swap = false;
4562 
4563 	/* Check whether we support SFF-8472 or not */
4564 	status = hw->phy.read_i2c_eeprom(hw,
4565 					     TXGBE_SFF_SFF_8472_COMP,
4566 					     &sff8472_rev);
4567 	if (status != 0)
4568 		return -EIO;
4569 
4570 	/* addressing mode is not supported */
4571 	status = hw->phy.read_i2c_eeprom(hw,
4572 					     TXGBE_SFF_SFF_8472_SWAP,
4573 					     &addr_mode);
4574 	if (status != 0)
4575 		return -EIO;
4576 
4577 	if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4578 		PMD_DRV_LOG(ERR,
4579 			    "Address change required to access page 0xA2, "
4580 			    "but not supported. Please report the module "
4581 			    "type to the driver maintainers.");
4582 		page_swap = true;
4583 	}
4584 
4585 	if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4586 		/* We have a SFP, but it does not support SFF-8472 */
4587 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
4588 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4589 	} else {
4590 		/* We have a SFP which supports a revision of SFF-8472. */
4591 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
4592 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4593 	}
4594 
4595 	return 0;
4596 }
4597 
4598 static int
4599 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4600 			struct rte_dev_eeprom_info *info)
4601 {
4602 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4603 	uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4604 	uint8_t databyte = 0xFF;
4605 	uint8_t *data = info->data;
4606 	uint32_t i = 0;
4607 
4608 	if (info->length == 0)
4609 		return -EINVAL;
4610 
4611 	for (i = info->offset; i < info->offset + info->length; i++) {
4612 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4613 			status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4614 		else
4615 			status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4616 
4617 		if (status != 0)
4618 			return -EIO;
4619 
4620 		data[i - info->offset] = databyte;
4621 	}
4622 
4623 	return 0;
4624 }
4625 
4626 bool
4627 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4628 {
4629 	switch (mac_type) {
4630 	case txgbe_mac_raptor:
4631 	case txgbe_mac_raptor_vf:
4632 		return 1;
4633 	default:
4634 		return 0;
4635 	}
4636 }
4637 
4638 static int
4639 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4640 			struct rte_eth_dcb_info *dcb_info)
4641 {
4642 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4643 	struct txgbe_dcb_tc_config *tc;
4644 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4645 	uint8_t nb_tcs;
4646 	uint8_t i, j;
4647 
4648 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4649 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4650 	else
4651 		dcb_info->nb_tcs = 1;
4652 
4653 	tc_queue = &dcb_info->tc_queue;
4654 	nb_tcs = dcb_info->nb_tcs;
4655 
4656 	if (dcb_config->vt_mode) { /* vt is enabled */
4657 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4658 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4659 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4660 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4661 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4662 			for (j = 0; j < nb_tcs; j++) {
4663 				tc_queue->tc_rxq[0][j].base = j;
4664 				tc_queue->tc_rxq[0][j].nb_queue = 1;
4665 				tc_queue->tc_txq[0][j].base = j;
4666 				tc_queue->tc_txq[0][j].nb_queue = 1;
4667 			}
4668 		} else {
4669 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4670 				for (j = 0; j < nb_tcs; j++) {
4671 					tc_queue->tc_rxq[i][j].base =
4672 						i * nb_tcs + j;
4673 					tc_queue->tc_rxq[i][j].nb_queue = 1;
4674 					tc_queue->tc_txq[i][j].base =
4675 						i * nb_tcs + j;
4676 					tc_queue->tc_txq[i][j].nb_queue = 1;
4677 				}
4678 			}
4679 		}
4680 	} else { /* vt is disabled */
4681 		struct rte_eth_dcb_rx_conf *rx_conf =
4682 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4683 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4684 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4685 		if (dcb_info->nb_tcs == ETH_4_TCS) {
4686 			for (i = 0; i < dcb_info->nb_tcs; i++) {
4687 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4688 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4689 			}
4690 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
4691 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
4692 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
4693 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
4694 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4695 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4696 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4697 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4698 		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
4699 			for (i = 0; i < dcb_info->nb_tcs; i++) {
4700 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4701 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4702 			}
4703 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
4704 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
4705 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
4706 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
4707 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
4708 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
4709 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
4710 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
4711 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4712 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4713 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4714 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4715 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4716 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4717 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4718 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4719 		}
4720 	}
4721 	for (i = 0; i < dcb_info->nb_tcs; i++) {
4722 		tc = &dcb_config->tc_config[i];
4723 		dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4724 	}
4725 	return 0;
4726 }
4727 
4728 /* Update e-tag ether type */
4729 static int
4730 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4731 			    uint16_t ether_type)
4732 {
4733 	uint32_t etag_etype;
4734 
4735 	etag_etype = rd32(hw, TXGBE_EXTAG);
4736 	etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4737 	etag_etype |= ether_type;
4738 	wr32(hw, TXGBE_EXTAG, etag_etype);
4739 	txgbe_flush(hw);
4740 
4741 	return 0;
4742 }
4743 
4744 /* Enable e-tag tunnel */
4745 static int
4746 txgbe_e_tag_enable(struct txgbe_hw *hw)
4747 {
4748 	uint32_t etag_etype;
4749 
4750 	etag_etype = rd32(hw, TXGBE_PORTCTL);
4751 	etag_etype |= TXGBE_PORTCTL_ETAG;
4752 	wr32(hw, TXGBE_PORTCTL, etag_etype);
4753 	txgbe_flush(hw);
4754 
4755 	return 0;
4756 }
4757 
4758 static int
4759 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4760 		       struct txgbe_l2_tunnel_conf  *l2_tunnel)
4761 {
4762 	int ret = 0;
4763 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4764 	uint32_t i, rar_entries;
4765 	uint32_t rar_low, rar_high;
4766 
4767 	rar_entries = hw->mac.num_rar_entries;
4768 
4769 	for (i = 1; i < rar_entries; i++) {
4770 		wr32(hw, TXGBE_ETHADDRIDX, i);
4771 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4772 		rar_low  = rd32(hw, TXGBE_ETHADDRL);
4773 		if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4774 		    (rar_high & TXGBE_ETHADDRH_ETAG) &&
4775 		    (TXGBE_ETHADDRL_ETAG(rar_low) ==
4776 		     l2_tunnel->tunnel_id)) {
4777 			wr32(hw, TXGBE_ETHADDRL, 0);
4778 			wr32(hw, TXGBE_ETHADDRH, 0);
4779 
4780 			txgbe_clear_vmdq(hw, i, BIT_MASK32);
4781 
4782 			return ret;
4783 		}
4784 	}
4785 
4786 	return ret;
4787 }
4788 
4789 static int
4790 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4791 		       struct txgbe_l2_tunnel_conf *l2_tunnel)
4792 {
4793 	int ret = 0;
4794 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4795 	uint32_t i, rar_entries;
4796 	uint32_t rar_low, rar_high;
4797 
4798 	/* One entry for one tunnel. Try to remove potential existing entry. */
4799 	txgbe_e_tag_filter_del(dev, l2_tunnel);
4800 
4801 	rar_entries = hw->mac.num_rar_entries;
4802 
4803 	for (i = 1; i < rar_entries; i++) {
4804 		wr32(hw, TXGBE_ETHADDRIDX, i);
4805 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4806 		if (rar_high & TXGBE_ETHADDRH_VLD) {
4807 			continue;
4808 		} else {
4809 			txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4810 			rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4811 			rar_low = l2_tunnel->tunnel_id;
4812 
4813 			wr32(hw, TXGBE_ETHADDRL, rar_low);
4814 			wr32(hw, TXGBE_ETHADDRH, rar_high);
4815 
4816 			return ret;
4817 		}
4818 	}
4819 
4820 	PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4821 		     " Please remove a rule before adding a new one.");
4822 	return -EINVAL;
4823 }
4824 
4825 static inline struct txgbe_l2_tn_filter *
4826 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4827 			  struct txgbe_l2_tn_key *key)
4828 {
4829 	int ret;
4830 
4831 	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4832 	if (ret < 0)
4833 		return NULL;
4834 
4835 	return l2_tn_info->hash_map[ret];
4836 }
4837 
4838 static inline int
4839 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4840 			  struct txgbe_l2_tn_filter *l2_tn_filter)
4841 {
4842 	int ret;
4843 
4844 	ret = rte_hash_add_key(l2_tn_info->hash_handle,
4845 			       &l2_tn_filter->key);
4846 
4847 	if (ret < 0) {
4848 		PMD_DRV_LOG(ERR,
4849 			    "Failed to insert L2 tunnel filter"
4850 			    " to hash table %d!",
4851 			    ret);
4852 		return ret;
4853 	}
4854 
4855 	l2_tn_info->hash_map[ret] = l2_tn_filter;
4856 
4857 	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4858 
4859 	return 0;
4860 }
4861 
4862 static inline int
4863 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4864 			  struct txgbe_l2_tn_key *key)
4865 {
4866 	int ret;
4867 	struct txgbe_l2_tn_filter *l2_tn_filter;
4868 
4869 	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4870 
4871 	if (ret < 0) {
4872 		PMD_DRV_LOG(ERR,
4873 			    "No such L2 tunnel filter to delete %d!",
4874 			    ret);
4875 		return ret;
4876 	}
4877 
4878 	l2_tn_filter = l2_tn_info->hash_map[ret];
4879 	l2_tn_info->hash_map[ret] = NULL;
4880 
4881 	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4882 	rte_free(l2_tn_filter);
4883 
4884 	return 0;
4885 }
4886 
4887 /* Add l2 tunnel filter */
4888 int
4889 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4890 			       struct txgbe_l2_tunnel_conf *l2_tunnel,
4891 			       bool restore)
4892 {
4893 	int ret;
4894 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4895 	struct txgbe_l2_tn_key key;
4896 	struct txgbe_l2_tn_filter *node;
4897 
4898 	if (!restore) {
4899 		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4900 		key.tn_id = l2_tunnel->tunnel_id;
4901 
4902 		node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4903 
4904 		if (node) {
4905 			PMD_DRV_LOG(ERR,
4906 				    "The L2 tunnel filter already exists!");
4907 			return -EINVAL;
4908 		}
4909 
4910 		node = rte_zmalloc("txgbe_l2_tn",
4911 				   sizeof(struct txgbe_l2_tn_filter),
4912 				   0);
4913 		if (!node)
4914 			return -ENOMEM;
4915 
4916 		rte_memcpy(&node->key,
4917 				 &key,
4918 				 sizeof(struct txgbe_l2_tn_key));
4919 		node->pool = l2_tunnel->pool;
4920 		ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4921 		if (ret < 0) {
4922 			rte_free(node);
4923 			return ret;
4924 		}
4925 	}
4926 
4927 	switch (l2_tunnel->l2_tunnel_type) {
4928 	case RTE_L2_TUNNEL_TYPE_E_TAG:
4929 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4930 		break;
4931 	default:
4932 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4933 		ret = -EINVAL;
4934 		break;
4935 	}
4936 
4937 	if (!restore && ret < 0)
4938 		(void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4939 
4940 	return ret;
4941 }
4942 
4943 /* Delete l2 tunnel filter */
4944 int
4945 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4946 			       struct txgbe_l2_tunnel_conf *l2_tunnel)
4947 {
4948 	int ret;
4949 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4950 	struct txgbe_l2_tn_key key;
4951 
4952 	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4953 	key.tn_id = l2_tunnel->tunnel_id;
4954 	ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4955 	if (ret < 0)
4956 		return ret;
4957 
4958 	switch (l2_tunnel->l2_tunnel_type) {
4959 	case RTE_L2_TUNNEL_TYPE_E_TAG:
4960 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4961 		break;
4962 	default:
4963 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4964 		ret = -EINVAL;
4965 		break;
4966 	}
4967 
4968 	return ret;
4969 }
4970 
4971 static int
4972 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4973 {
4974 	int ret = 0;
4975 	uint32_t ctrl;
4976 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4977 
4978 	ctrl = rd32(hw, TXGBE_POOLCTL);
4979 	ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4980 	if (en)
4981 		ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4982 	wr32(hw, TXGBE_POOLCTL, ctrl);
4983 
4984 	return ret;
4985 }
4986 
4987 /* Add UDP tunneling port */
4988 static int
4989 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4990 			      struct rte_eth_udp_tunnel *udp_tunnel)
4991 {
4992 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4993 	int ret = 0;
4994 
4995 	if (udp_tunnel == NULL)
4996 		return -EINVAL;
4997 
4998 	switch (udp_tunnel->prot_type) {
4999 	case RTE_TUNNEL_TYPE_VXLAN:
5000 		if (udp_tunnel->udp_port == 0) {
5001 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
5002 			ret = -EINVAL;
5003 			break;
5004 		}
5005 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
5006 		break;
5007 	case RTE_TUNNEL_TYPE_GENEVE:
5008 		if (udp_tunnel->udp_port == 0) {
5009 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
5010 			ret = -EINVAL;
5011 			break;
5012 		}
5013 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
5014 		break;
5015 	case RTE_TUNNEL_TYPE_TEREDO:
5016 		if (udp_tunnel->udp_port == 0) {
5017 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
5018 			ret = -EINVAL;
5019 			break;
5020 		}
5021 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
5022 		break;
5023 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
5024 		if (udp_tunnel->udp_port == 0) {
5025 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
5026 			ret = -EINVAL;
5027 			break;
5028 		}
5029 		wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
5030 		break;
5031 	default:
5032 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5033 		ret = -EINVAL;
5034 		break;
5035 	}
5036 
5037 	txgbe_flush(hw);
5038 
5039 	return ret;
5040 }
5041 
5042 /* Remove UDP tunneling port */
5043 static int
5044 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5045 			      struct rte_eth_udp_tunnel *udp_tunnel)
5046 {
5047 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5048 	int ret = 0;
5049 	uint16_t cur_port;
5050 
5051 	if (udp_tunnel == NULL)
5052 		return -EINVAL;
5053 
5054 	switch (udp_tunnel->prot_type) {
5055 	case RTE_TUNNEL_TYPE_VXLAN:
5056 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
5057 		if (cur_port != udp_tunnel->udp_port) {
5058 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5059 					udp_tunnel->udp_port);
5060 			ret = -EINVAL;
5061 			break;
5062 		}
5063 		wr32(hw, TXGBE_VXLANPORT, 0);
5064 		break;
5065 	case RTE_TUNNEL_TYPE_GENEVE:
5066 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
5067 		if (cur_port != udp_tunnel->udp_port) {
5068 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5069 					udp_tunnel->udp_port);
5070 			ret = -EINVAL;
5071 			break;
5072 		}
5073 		wr32(hw, TXGBE_GENEVEPORT, 0);
5074 		break;
5075 	case RTE_TUNNEL_TYPE_TEREDO:
5076 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
5077 		if (cur_port != udp_tunnel->udp_port) {
5078 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5079 					udp_tunnel->udp_port);
5080 			ret = -EINVAL;
5081 			break;
5082 		}
5083 		wr32(hw, TXGBE_TEREDOPORT, 0);
5084 		break;
5085 	case RTE_TUNNEL_TYPE_VXLAN_GPE:
5086 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
5087 		if (cur_port != udp_tunnel->udp_port) {
5088 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5089 					udp_tunnel->udp_port);
5090 			ret = -EINVAL;
5091 			break;
5092 		}
5093 		wr32(hw, TXGBE_VXLANPORTGPE, 0);
5094 		break;
5095 	default:
5096 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5097 		ret = -EINVAL;
5098 		break;
5099 	}
5100 
5101 	txgbe_flush(hw);
5102 
5103 	return ret;
5104 }
5105 
5106 /* restore n-tuple filter */
5107 static inline void
5108 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
5109 {
5110 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5111 	struct txgbe_5tuple_filter *node;
5112 
5113 	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
5114 		txgbe_inject_5tuple_filter(dev, node);
5115 	}
5116 }
5117 
5118 /* restore ethernet type filter */
5119 static inline void
5120 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5121 {
5122 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5123 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5124 	int i;
5125 
5126 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5127 		if (filter_info->ethertype_mask & (1 << i)) {
5128 			wr32(hw, TXGBE_ETFLT(i),
5129 					filter_info->ethertype_filters[i].etqf);
5130 			wr32(hw, TXGBE_ETCLS(i),
5131 					filter_info->ethertype_filters[i].etqs);
5132 			txgbe_flush(hw);
5133 		}
5134 	}
5135 }
5136 
5137 /* restore SYN filter */
5138 static inline void
5139 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5140 {
5141 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5142 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5143 	uint32_t synqf;
5144 
5145 	synqf = filter_info->syn_info;
5146 
5147 	if (synqf & TXGBE_SYNCLS_ENA) {
5148 		wr32(hw, TXGBE_SYNCLS, synqf);
5149 		txgbe_flush(hw);
5150 	}
5151 }
5152 
5153 /* restore L2 tunnel filter */
5154 static inline void
5155 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5156 {
5157 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5158 	struct txgbe_l2_tn_filter *node;
5159 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5160 
5161 	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5162 		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5163 		l2_tn_conf.tunnel_id      = node->key.tn_id;
5164 		l2_tn_conf.pool           = node->pool;
5165 		(void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5166 	}
5167 }
5168 
5169 /* restore rss filter */
5170 static inline void
5171 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
5172 {
5173 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5174 
5175 	if (filter_info->rss_info.conf.queue_num)
5176 		txgbe_config_rss_filter(dev,
5177 			&filter_info->rss_info, TRUE);
5178 }
5179 
5180 static int
5181 txgbe_filter_restore(struct rte_eth_dev *dev)
5182 {
5183 	txgbe_ntuple_filter_restore(dev);
5184 	txgbe_ethertype_filter_restore(dev);
5185 	txgbe_syn_filter_restore(dev);
5186 	txgbe_fdir_filter_restore(dev);
5187 	txgbe_l2_tn_filter_restore(dev);
5188 	txgbe_rss_filter_restore(dev);
5189 
5190 	return 0;
5191 }
5192 
5193 static void
5194 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
5195 {
5196 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5197 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5198 
5199 	if (l2_tn_info->e_tag_en)
5200 		(void)txgbe_e_tag_enable(hw);
5201 
5202 	if (l2_tn_info->e_tag_fwd_en)
5203 		(void)txgbe_e_tag_forwarding_en_dis(dev, 1);
5204 
5205 	(void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
5206 }
5207 
5208 /* remove all the n-tuple filters */
5209 void
5210 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
5211 {
5212 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5213 	struct txgbe_5tuple_filter *p_5tuple;
5214 
5215 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
5216 		txgbe_remove_5tuple_filter(dev, p_5tuple);
5217 }
5218 
5219 /* remove all the ether type filters */
5220 void
5221 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
5222 {
5223 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5224 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5225 	int i;
5226 
5227 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5228 		if (filter_info->ethertype_mask & (1 << i) &&
5229 		    !filter_info->ethertype_filters[i].conf) {
5230 			(void)txgbe_ethertype_filter_remove(filter_info,
5231 							    (uint8_t)i);
5232 			wr32(hw, TXGBE_ETFLT(i), 0);
5233 			wr32(hw, TXGBE_ETCLS(i), 0);
5234 			txgbe_flush(hw);
5235 		}
5236 	}
5237 }
5238 
5239 /* remove the SYN filter */
5240 void
5241 txgbe_clear_syn_filter(struct rte_eth_dev *dev)
5242 {
5243 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5244 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5245 
5246 	if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
5247 		filter_info->syn_info = 0;
5248 
5249 		wr32(hw, TXGBE_SYNCLS, 0);
5250 		txgbe_flush(hw);
5251 	}
5252 }
5253 
5254 /* remove all the L2 tunnel filters */
5255 int
5256 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
5257 {
5258 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5259 	struct txgbe_l2_tn_filter *l2_tn_filter;
5260 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5261 	int ret = 0;
5262 
5263 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
5264 		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
5265 		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
5266 		l2_tn_conf.pool           = l2_tn_filter->pool;
5267 		ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
5268 		if (ret < 0)
5269 			return ret;
5270 	}
5271 
5272 	return 0;
5273 }
5274 
5275 static const struct eth_dev_ops txgbe_eth_dev_ops = {
5276 	.dev_configure              = txgbe_dev_configure,
5277 	.dev_infos_get              = txgbe_dev_info_get,
5278 	.dev_start                  = txgbe_dev_start,
5279 	.dev_stop                   = txgbe_dev_stop,
5280 	.dev_set_link_up            = txgbe_dev_set_link_up,
5281 	.dev_set_link_down          = txgbe_dev_set_link_down,
5282 	.dev_close                  = txgbe_dev_close,
5283 	.dev_reset                  = txgbe_dev_reset,
5284 	.promiscuous_enable         = txgbe_dev_promiscuous_enable,
5285 	.promiscuous_disable        = txgbe_dev_promiscuous_disable,
5286 	.allmulticast_enable        = txgbe_dev_allmulticast_enable,
5287 	.allmulticast_disable       = txgbe_dev_allmulticast_disable,
5288 	.link_update                = txgbe_dev_link_update,
5289 	.stats_get                  = txgbe_dev_stats_get,
5290 	.xstats_get                 = txgbe_dev_xstats_get,
5291 	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
5292 	.stats_reset                = txgbe_dev_stats_reset,
5293 	.xstats_reset               = txgbe_dev_xstats_reset,
5294 	.xstats_get_names           = txgbe_dev_xstats_get_names,
5295 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
5296 	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
5297 	.fw_version_get             = txgbe_fw_version_get,
5298 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
5299 	.mtu_set                    = txgbe_dev_mtu_set,
5300 	.vlan_filter_set            = txgbe_vlan_filter_set,
5301 	.vlan_tpid_set              = txgbe_vlan_tpid_set,
5302 	.vlan_offload_set           = txgbe_vlan_offload_set,
5303 	.vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
5304 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
5305 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
5306 	.tx_queue_start	            = txgbe_dev_tx_queue_start,
5307 	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
5308 	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
5309 	.rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
5310 	.rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
5311 	.rx_queue_release           = txgbe_dev_rx_queue_release,
5312 	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
5313 	.tx_queue_release           = txgbe_dev_tx_queue_release,
5314 	.dev_led_on                 = txgbe_dev_led_on,
5315 	.dev_led_off                = txgbe_dev_led_off,
5316 	.flow_ctrl_get              = txgbe_flow_ctrl_get,
5317 	.flow_ctrl_set              = txgbe_flow_ctrl_set,
5318 	.priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
5319 	.mac_addr_add               = txgbe_add_rar,
5320 	.mac_addr_remove            = txgbe_remove_rar,
5321 	.mac_addr_set               = txgbe_set_default_mac_addr,
5322 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
5323 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
5324 	.set_queue_rate_limit       = txgbe_set_queue_rate_limit,
5325 	.reta_update                = txgbe_dev_rss_reta_update,
5326 	.reta_query                 = txgbe_dev_rss_reta_query,
5327 	.rss_hash_update            = txgbe_dev_rss_hash_update,
5328 	.rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
5329 	.flow_ops_get               = txgbe_dev_flow_ops_get,
5330 	.set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
5331 	.rxq_info_get               = txgbe_rxq_info_get,
5332 	.txq_info_get               = txgbe_txq_info_get,
5333 	.timesync_enable            = txgbe_timesync_enable,
5334 	.timesync_disable           = txgbe_timesync_disable,
5335 	.timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5336 	.timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5337 	.get_reg                    = txgbe_get_regs,
5338 	.get_eeprom_length          = txgbe_get_eeprom_length,
5339 	.get_eeprom                 = txgbe_get_eeprom,
5340 	.set_eeprom                 = txgbe_set_eeprom,
5341 	.get_module_info            = txgbe_get_module_info,
5342 	.get_module_eeprom          = txgbe_get_module_eeprom,
5343 	.get_dcb_info               = txgbe_dev_get_dcb_info,
5344 	.timesync_adjust_time       = txgbe_timesync_adjust_time,
5345 	.timesync_read_time         = txgbe_timesync_read_time,
5346 	.timesync_write_time        = txgbe_timesync_write_time,
5347 	.udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
5348 	.udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
5349 	.tm_ops_get                 = txgbe_tm_ops_get,
5350 	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
5351 };
5352 
5353 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5354 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5355 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5356 RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
5357 			      TXGBE_DEVARG_BP_AUTO "=<0|1>"
5358 			      TXGBE_DEVARG_KR_POLL "=<0|1>"
5359 			      TXGBE_DEVARG_KR_PRESENT "=<0|1>"
5360 			      TXGBE_DEVARG_KX_SGMII "=<0|1>"
5361 			      TXGBE_DEVARG_FFE_SET "=<0-4>"
5362 			      TXGBE_DEVARG_FFE_MAIN "=<uint16>"
5363 			      TXGBE_DEVARG_FFE_PRE "=<uint16>"
5364 			      TXGBE_DEVARG_FFE_POST "=<uint16>");
5365 
5366 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
5367 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
5368 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
5369 
5370 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5371 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
5372 #endif
5373 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5374 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
5375 #endif
5376 
5377 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5378 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);
5379 #endif
5380