xref: /dpdk/drivers/net/txgbe/txgbe_ethdev.c (revision 2cdca6ed)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <rte_common.h>
11 #include <ethdev_pci.h>
12 
13 #include <rte_interrupts.h>
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_pci.h>
17 #include <rte_memory.h>
18 #include <rte_eal.h>
19 #include <rte_alarm.h>
20 #include <rte_kvargs.h>
21 
22 #include "txgbe_logs.h"
23 #include "base/txgbe.h"
24 #include "txgbe_ethdev.h"
25 #include "txgbe_rxtx.h"
26 #include "txgbe_regs_group.h"
27 
28 static const struct reg_info txgbe_regs_general[] = {
29 	{TXGBE_RST, 1, 1, "TXGBE_RST"},
30 	{TXGBE_STAT, 1, 1, "TXGBE_STAT"},
31 	{TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
32 	{TXGBE_SDP, 1, 1, "TXGBE_SDP"},
33 	{TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
34 	{TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
35 	{0, 0, 0, ""}
36 };
37 
38 static const struct reg_info txgbe_regs_nvm[] = {
39 	{0, 0, 0, ""}
40 };
41 
42 static const struct reg_info txgbe_regs_interrupt[] = {
43 	{0, 0, 0, ""}
44 };
45 
46 static const struct reg_info txgbe_regs_fctl_others[] = {
47 	{0, 0, 0, ""}
48 };
49 
50 static const struct reg_info txgbe_regs_rxdma[] = {
51 	{0, 0, 0, ""}
52 };
53 
54 static const struct reg_info txgbe_regs_rx[] = {
55 	{0, 0, 0, ""}
56 };
57 
58 static struct reg_info txgbe_regs_tx[] = {
59 	{0, 0, 0, ""}
60 };
61 
62 static const struct reg_info txgbe_regs_wakeup[] = {
63 	{0, 0, 0, ""}
64 };
65 
66 static const struct reg_info txgbe_regs_dcb[] = {
67 	{0, 0, 0, ""}
68 };
69 
70 static const struct reg_info txgbe_regs_mac[] = {
71 	{0, 0, 0, ""}
72 };
73 
74 static const struct reg_info txgbe_regs_diagnostic[] = {
75 	{0, 0, 0, ""},
76 };
77 
78 /* PF registers */
79 static const struct reg_info *txgbe_regs_others[] = {
80 				txgbe_regs_general,
81 				txgbe_regs_nvm,
82 				txgbe_regs_interrupt,
83 				txgbe_regs_fctl_others,
84 				txgbe_regs_rxdma,
85 				txgbe_regs_rx,
86 				txgbe_regs_tx,
87 				txgbe_regs_wakeup,
88 				txgbe_regs_dcb,
89 				txgbe_regs_mac,
90 				txgbe_regs_diagnostic,
91 				NULL};
92 
93 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
94 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
95 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
96 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
97 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
98 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
99 static int txgbe_dev_close(struct rte_eth_dev *dev);
100 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
101 				int wait_to_complete);
102 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
103 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
104 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
105 					uint16_t queue);
106 
107 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
108 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
109 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
110 static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
111 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
112 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
113 				      struct rte_intr_handle *handle);
114 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
115 				      struct rte_intr_handle *handle);
116 static void txgbe_dev_interrupt_handler(void *param);
117 static void txgbe_dev_interrupt_delayed_handler(void *param);
118 static void txgbe_configure_msix(struct rte_eth_dev *dev);
119 
120 static int txgbe_filter_restore(struct rte_eth_dev *dev);
121 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
122 
123 #define TXGBE_SET_HWSTRIP(h, q) do {\
124 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
125 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
126 		(h)->bitmap[idx] |= 1 << bit;\
127 	} while (0)
128 
129 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
130 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
131 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
132 		(h)->bitmap[idx] &= ~(1 << bit);\
133 	} while (0)
134 
135 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
136 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
137 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
138 		(r) = (h)->bitmap[idx] >> bit & 1;\
139 	} while (0)
140 
141 /*
142  * The set of PCI devices this driver supports
143  */
144 static const struct rte_pci_id pci_id_txgbe_map[] = {
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
147 	{ .vendor_id = 0, /* sentinel */ },
148 };
149 
150 static const struct rte_eth_desc_lim rx_desc_lim = {
151 	.nb_max = TXGBE_RING_DESC_MAX,
152 	.nb_min = TXGBE_RING_DESC_MIN,
153 	.nb_align = TXGBE_RXD_ALIGN,
154 };
155 
156 static const struct rte_eth_desc_lim tx_desc_lim = {
157 	.nb_max = TXGBE_RING_DESC_MAX,
158 	.nb_min = TXGBE_RING_DESC_MIN,
159 	.nb_align = TXGBE_TXD_ALIGN,
160 	.nb_seg_max = TXGBE_TX_MAX_SEG,
161 	.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
162 };
163 
164 static const struct eth_dev_ops txgbe_eth_dev_ops;
165 
166 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
167 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
168 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
169 	/* MNG RxTx */
170 	HW_XSTAT(mng_bmc2host_packets),
171 	HW_XSTAT(mng_host2bmc_packets),
172 	/* Basic RxTx */
173 	HW_XSTAT(rx_packets),
174 	HW_XSTAT(tx_packets),
175 	HW_XSTAT(rx_bytes),
176 	HW_XSTAT(tx_bytes),
177 	HW_XSTAT(rx_total_bytes),
178 	HW_XSTAT(rx_total_packets),
179 	HW_XSTAT(tx_total_packets),
180 	HW_XSTAT(rx_total_missed_packets),
181 	HW_XSTAT(rx_broadcast_packets),
182 	HW_XSTAT(rx_multicast_packets),
183 	HW_XSTAT(rx_management_packets),
184 	HW_XSTAT(tx_management_packets),
185 	HW_XSTAT(rx_management_dropped),
186 
187 	/* Basic Error */
188 	HW_XSTAT(rx_crc_errors),
189 	HW_XSTAT(rx_illegal_byte_errors),
190 	HW_XSTAT(rx_error_bytes),
191 	HW_XSTAT(rx_mac_short_packet_dropped),
192 	HW_XSTAT(rx_length_errors),
193 	HW_XSTAT(rx_undersize_errors),
194 	HW_XSTAT(rx_fragment_errors),
195 	HW_XSTAT(rx_oversize_errors),
196 	HW_XSTAT(rx_jabber_errors),
197 	HW_XSTAT(rx_l3_l4_xsum_error),
198 	HW_XSTAT(mac_local_errors),
199 	HW_XSTAT(mac_remote_errors),
200 
201 	/* Flow Director */
202 	HW_XSTAT(flow_director_added_filters),
203 	HW_XSTAT(flow_director_removed_filters),
204 	HW_XSTAT(flow_director_filter_add_errors),
205 	HW_XSTAT(flow_director_filter_remove_errors),
206 	HW_XSTAT(flow_director_matched_filters),
207 	HW_XSTAT(flow_director_missed_filters),
208 
209 	/* FCoE */
210 	HW_XSTAT(rx_fcoe_crc_errors),
211 	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
212 	HW_XSTAT(rx_fcoe_dropped),
213 	HW_XSTAT(rx_fcoe_packets),
214 	HW_XSTAT(tx_fcoe_packets),
215 	HW_XSTAT(rx_fcoe_bytes),
216 	HW_XSTAT(tx_fcoe_bytes),
217 	HW_XSTAT(rx_fcoe_no_ddp),
218 	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
219 
220 	/* MACSEC */
221 	HW_XSTAT(tx_macsec_pkts_untagged),
222 	HW_XSTAT(tx_macsec_pkts_encrypted),
223 	HW_XSTAT(tx_macsec_pkts_protected),
224 	HW_XSTAT(tx_macsec_octets_encrypted),
225 	HW_XSTAT(tx_macsec_octets_protected),
226 	HW_XSTAT(rx_macsec_pkts_untagged),
227 	HW_XSTAT(rx_macsec_pkts_badtag),
228 	HW_XSTAT(rx_macsec_pkts_nosci),
229 	HW_XSTAT(rx_macsec_pkts_unknownsci),
230 	HW_XSTAT(rx_macsec_octets_decrypted),
231 	HW_XSTAT(rx_macsec_octets_validated),
232 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
233 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
234 	HW_XSTAT(rx_macsec_sc_pkts_late),
235 	HW_XSTAT(rx_macsec_sa_pkts_ok),
236 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
237 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
238 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
239 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
240 
241 	/* MAC RxTx */
242 	HW_XSTAT(rx_size_64_packets),
243 	HW_XSTAT(rx_size_65_to_127_packets),
244 	HW_XSTAT(rx_size_128_to_255_packets),
245 	HW_XSTAT(rx_size_256_to_511_packets),
246 	HW_XSTAT(rx_size_512_to_1023_packets),
247 	HW_XSTAT(rx_size_1024_to_max_packets),
248 	HW_XSTAT(tx_size_64_packets),
249 	HW_XSTAT(tx_size_65_to_127_packets),
250 	HW_XSTAT(tx_size_128_to_255_packets),
251 	HW_XSTAT(tx_size_256_to_511_packets),
252 	HW_XSTAT(tx_size_512_to_1023_packets),
253 	HW_XSTAT(tx_size_1024_to_max_packets),
254 
255 	/* Flow Control */
256 	HW_XSTAT(tx_xon_packets),
257 	HW_XSTAT(rx_xon_packets),
258 	HW_XSTAT(tx_xoff_packets),
259 	HW_XSTAT(rx_xoff_packets),
260 
261 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
262 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
263 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
264 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
265 };
266 
267 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
268 			   sizeof(rte_txgbe_stats_strings[0]))
269 
270 /* Per-priority statistics */
271 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
272 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
273 	UP_XSTAT(rx_up_packets),
274 	UP_XSTAT(tx_up_packets),
275 	UP_XSTAT(rx_up_bytes),
276 	UP_XSTAT(tx_up_bytes),
277 	UP_XSTAT(rx_up_drop_packets),
278 
279 	UP_XSTAT(tx_up_xon_packets),
280 	UP_XSTAT(rx_up_xon_packets),
281 	UP_XSTAT(tx_up_xoff_packets),
282 	UP_XSTAT(rx_up_xoff_packets),
283 	UP_XSTAT(rx_up_dropped),
284 	UP_XSTAT(rx_up_mbuf_alloc_errors),
285 	UP_XSTAT(tx_up_xon2off_packets),
286 };
287 
288 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
289 			   sizeof(rte_txgbe_up_strings[0]))
290 
291 /* Per-queue statistics */
292 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
293 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
294 	QP_XSTAT(rx_qp_packets),
295 	QP_XSTAT(tx_qp_packets),
296 	QP_XSTAT(rx_qp_bytes),
297 	QP_XSTAT(tx_qp_bytes),
298 	QP_XSTAT(rx_qp_mc_packets),
299 };
300 
301 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
302 			   sizeof(rte_txgbe_qp_strings[0]))
303 
304 static inline int
305 txgbe_is_sfp(struct txgbe_hw *hw)
306 {
307 	switch (hw->phy.type) {
308 	case txgbe_phy_sfp_avago:
309 	case txgbe_phy_sfp_ftl:
310 	case txgbe_phy_sfp_intel:
311 	case txgbe_phy_sfp_unknown:
312 	case txgbe_phy_sfp_tyco_passive:
313 	case txgbe_phy_sfp_unknown_passive:
314 		return 1;
315 	default:
316 		return 0;
317 	}
318 }
319 
320 static inline int32_t
321 txgbe_pf_reset_hw(struct txgbe_hw *hw)
322 {
323 	uint32_t ctrl_ext;
324 	int32_t status;
325 
326 	status = hw->mac.reset_hw(hw);
327 
328 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
329 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
330 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
331 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
332 	txgbe_flush(hw);
333 
334 	if (status == TXGBE_ERR_SFP_NOT_PRESENT)
335 		status = 0;
336 	return status;
337 }
338 
339 static inline void
340 txgbe_enable_intr(struct rte_eth_dev *dev)
341 {
342 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
343 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
344 
345 	wr32(hw, TXGBE_IENMISC, intr->mask_misc);
346 	wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
347 	wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
348 	txgbe_flush(hw);
349 }
350 
351 static void
352 txgbe_disable_intr(struct txgbe_hw *hw)
353 {
354 	PMD_INIT_FUNC_TRACE();
355 
356 	wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
357 	wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
358 	wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
359 	txgbe_flush(hw);
360 }
361 
362 static int
363 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
364 				  uint16_t queue_id,
365 				  uint8_t stat_idx,
366 				  uint8_t is_rx)
367 {
368 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
369 	struct txgbe_stat_mappings *stat_mappings =
370 		TXGBE_DEV_STAT_MAPPINGS(eth_dev);
371 	uint32_t qsmr_mask = 0;
372 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
373 	uint32_t q_map;
374 	uint8_t n, offset;
375 
376 	if (hw->mac.type != txgbe_mac_raptor)
377 		return -ENOSYS;
378 
379 	if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
380 		return -EIO;
381 
382 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
383 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
384 		     queue_id, stat_idx);
385 
386 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
387 	if (n >= TXGBE_NB_STAT_MAPPING) {
388 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
389 		return -EIO;
390 	}
391 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
392 
393 	/* Now clear any previous stat_idx set */
394 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
395 	if (!is_rx)
396 		stat_mappings->tqsm[n] &= ~clearing_mask;
397 	else
398 		stat_mappings->rqsm[n] &= ~clearing_mask;
399 
400 	q_map = (uint32_t)stat_idx;
401 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
402 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
403 	if (!is_rx)
404 		stat_mappings->tqsm[n] |= qsmr_mask;
405 	else
406 		stat_mappings->rqsm[n] |= qsmr_mask;
407 
408 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
409 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
410 		     queue_id, stat_idx);
411 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
412 		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
413 	return 0;
414 }
415 
416 static void
417 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
418 {
419 	int i;
420 	u8 bwgp;
421 	struct txgbe_dcb_tc_config *tc;
422 
423 	UNREFERENCED_PARAMETER(hw);
424 
425 	dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
426 	dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
427 	bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
428 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
429 		tc = &dcb_config->tc_config[i];
430 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
431 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
432 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
433 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
434 		tc->pfc = txgbe_dcb_pfc_disabled;
435 	}
436 
437 	/* Initialize default user to priority mapping, UPx->TC0 */
438 	tc = &dcb_config->tc_config[0];
439 	tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
440 	tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
441 	for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
442 		dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
443 		dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
444 	}
445 	dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
446 	dcb_config->pfc_mode_enable = false;
447 	dcb_config->vt_mode = true;
448 	dcb_config->round_robin_enable = false;
449 	/* support all DCB capabilities */
450 	dcb_config->support.capabilities = 0xFF;
451 }
452 
453 /*
454  * Ensure that all locks are released before first NVM or PHY access
455  */
456 static void
457 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
458 {
459 	uint16_t mask;
460 
461 	/*
462 	 * These ones are more tricky since they are common to all ports; but
463 	 * swfw_sync retries last long enough (1s) to be almost sure that if
464 	 * lock can not be taken it is due to an improper lock of the
465 	 * semaphore.
466 	 */
467 	mask = TXGBE_MNGSEM_SWPHY |
468 	       TXGBE_MNGSEM_SWMBX |
469 	       TXGBE_MNGSEM_SWFLASH;
470 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
471 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
472 
473 	hw->mac.release_swfw_sync(hw, mask);
474 }
475 
476 static int
477 txgbe_handle_devarg(__rte_unused const char *key, const char *value,
478 		  void *extra_args)
479 {
480 	uint16_t *n = extra_args;
481 
482 	if (value == NULL || extra_args == NULL)
483 		return -EINVAL;
484 
485 	*n = (uint16_t)strtoul(value, NULL, 10);
486 	if (*n == USHRT_MAX && errno == ERANGE)
487 		return -1;
488 
489 	return 0;
490 }
491 
492 static void
493 txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
494 {
495 	struct rte_kvargs *kvlist;
496 	u16 auto_neg = 1;
497 	u16 poll = 0;
498 	u16 present = 1;
499 	u16 sgmii = 0;
500 	u16 ffe_set = 0;
501 	u16 ffe_main = 27;
502 	u16 ffe_pre = 8;
503 	u16 ffe_post = 44;
504 
505 	if (devargs == NULL)
506 		goto null;
507 
508 	kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
509 	if (kvlist == NULL)
510 		goto null;
511 
512 	rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
513 			   &txgbe_handle_devarg, &auto_neg);
514 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
515 			   &txgbe_handle_devarg, &poll);
516 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
517 			   &txgbe_handle_devarg, &present);
518 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
519 			   &txgbe_handle_devarg, &sgmii);
520 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
521 			   &txgbe_handle_devarg, &ffe_set);
522 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
523 			   &txgbe_handle_devarg, &ffe_main);
524 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
525 			   &txgbe_handle_devarg, &ffe_pre);
526 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
527 			   &txgbe_handle_devarg, &ffe_post);
528 	rte_kvargs_free(kvlist);
529 
530 null:
531 	hw->devarg.auto_neg = auto_neg;
532 	hw->devarg.poll = poll;
533 	hw->devarg.present = present;
534 	hw->devarg.sgmii = sgmii;
535 	hw->phy.ffe_set = ffe_set;
536 	hw->phy.ffe_main = ffe_main;
537 	hw->phy.ffe_pre = ffe_pre;
538 	hw->phy.ffe_post = ffe_post;
539 }
540 
541 static int
542 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
543 {
544 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
545 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
546 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
547 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
548 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
549 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
550 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
551 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
552 	const struct rte_memzone *mz;
553 	uint32_t ctrl_ext;
554 	uint16_t csum;
555 	int err, i, ret;
556 
557 	PMD_INIT_FUNC_TRACE();
558 
559 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
560 	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
561 	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
562 	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
563 	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
564 	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
565 	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
566 
567 	/*
568 	 * For secondary processes, we don't initialise any further as primary
569 	 * has already done this work. Only check we don't need a different
570 	 * RX and TX function.
571 	 */
572 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
573 		struct txgbe_tx_queue *txq;
574 		/* TX queue function in primary, set by last queue initialized
575 		 * Tx queue may not initialized by primary process
576 		 */
577 		if (eth_dev->data->tx_queues) {
578 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
579 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
580 			txgbe_set_tx_function(eth_dev, txq);
581 		} else {
582 			/* Use default TX function if we get here */
583 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
584 				     "Using default TX function.");
585 		}
586 
587 		txgbe_set_rx_function(eth_dev);
588 
589 		return 0;
590 	}
591 
592 	rte_eth_copy_pci_info(eth_dev, pci_dev);
593 
594 	/* Vendor and Device ID need to be set before init of shared code */
595 	hw->device_id = pci_dev->id.device_id;
596 	hw->vendor_id = pci_dev->id.vendor_id;
597 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
598 	hw->allow_unsupported_sfp = 1;
599 
600 	/* Reserve memory for interrupt status block */
601 	mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
602 		16, TXGBE_ALIGN, SOCKET_ID_ANY);
603 	if (mz == NULL)
604 		return -ENOMEM;
605 
606 	hw->isb_dma = TMZ_PADDR(mz);
607 	hw->isb_mem = TMZ_VADDR(mz);
608 
609 	txgbe_parse_devargs(hw, pci_dev->device.devargs);
610 	/* Initialize the shared code (base driver) */
611 	err = txgbe_init_shared_code(hw);
612 	if (err != 0) {
613 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
614 		return -EIO;
615 	}
616 
617 	/* Unlock any pending hardware semaphore */
618 	txgbe_swfw_lock_reset(hw);
619 
620 #ifdef RTE_LIB_SECURITY
621 	/* Initialize security_ctx only for primary process*/
622 	if (txgbe_ipsec_ctx_create(eth_dev))
623 		return -ENOMEM;
624 #endif
625 
626 	/* Initialize DCB configuration*/
627 	memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
628 	txgbe_dcb_init(hw, dcb_config);
629 
630 	/* Get Hardware Flow Control setting */
631 	hw->fc.requested_mode = txgbe_fc_full;
632 	hw->fc.current_mode = txgbe_fc_full;
633 	hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
634 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
635 		hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
636 		hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
637 	}
638 	hw->fc.send_xon = 1;
639 
640 	err = hw->rom.init_params(hw);
641 	if (err != 0) {
642 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
643 		return -EIO;
644 	}
645 
646 	/* Make sure we have a good EEPROM before we read from it */
647 	err = hw->rom.validate_checksum(hw, &csum);
648 	if (err != 0) {
649 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
650 		return -EIO;
651 	}
652 
653 	err = hw->mac.init_hw(hw);
654 
655 	/*
656 	 * Devices with copper phys will fail to initialise if txgbe_init_hw()
657 	 * is called too soon after the kernel driver unbinding/binding occurs.
658 	 * The failure occurs in txgbe_identify_phy() for all devices,
659 	 * but for non-copper devies, txgbe_identify_sfp_module() is
660 	 * also called. See txgbe_identify_phy(). The reason for the
661 	 * failure is not known, and only occuts when virtualisation features
662 	 * are disabled in the bios. A delay of 200ms  was found to be enough by
663 	 * trial-and-error, and is doubled to be safe.
664 	 */
665 	if (err && hw->phy.media_type == txgbe_media_type_copper) {
666 		rte_delay_ms(200);
667 		err = hw->mac.init_hw(hw);
668 	}
669 
670 	if (err == TXGBE_ERR_SFP_NOT_PRESENT)
671 		err = 0;
672 
673 	if (err == TXGBE_ERR_EEPROM_VERSION) {
674 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
675 			     "LOM.  Please be aware there may be issues associated "
676 			     "with your hardware.");
677 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
678 			     "please contact your hardware representative "
679 			     "who provided you with this hardware.");
680 	} else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
681 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
682 	}
683 	if (err) {
684 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
685 		return -EIO;
686 	}
687 
688 	/* Reset the hw statistics */
689 	txgbe_dev_stats_reset(eth_dev);
690 
691 	/* disable interrupt */
692 	txgbe_disable_intr(hw);
693 
694 	/* Allocate memory for storing MAC addresses */
695 	eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
696 					       hw->mac.num_rar_entries, 0);
697 	if (eth_dev->data->mac_addrs == NULL) {
698 		PMD_INIT_LOG(ERR,
699 			     "Failed to allocate %u bytes needed to store "
700 			     "MAC addresses",
701 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
702 		return -ENOMEM;
703 	}
704 
705 	/* Copy the permanent MAC address */
706 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
707 			&eth_dev->data->mac_addrs[0]);
708 
709 	/* Allocate memory for storing hash filter MAC addresses */
710 	eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
711 			RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
712 	if (eth_dev->data->hash_mac_addrs == NULL) {
713 		PMD_INIT_LOG(ERR,
714 			     "Failed to allocate %d bytes needed to store MAC addresses",
715 			     RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
716 		return -ENOMEM;
717 	}
718 
719 	/* initialize the vfta */
720 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
721 
722 	/* initialize the hw strip bitmap*/
723 	memset(hwstrip, 0, sizeof(*hwstrip));
724 
725 	/* initialize PF if max_vfs not zero */
726 	ret = txgbe_pf_host_init(eth_dev);
727 	if (ret) {
728 		rte_free(eth_dev->data->mac_addrs);
729 		eth_dev->data->mac_addrs = NULL;
730 		rte_free(eth_dev->data->hash_mac_addrs);
731 		eth_dev->data->hash_mac_addrs = NULL;
732 		return ret;
733 	}
734 
735 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
736 	/* let hardware know driver is loaded */
737 	ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
738 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
739 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
740 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
741 	txgbe_flush(hw);
742 
743 	if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
744 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
745 			     (int)hw->mac.type, (int)hw->phy.type,
746 			     (int)hw->phy.sfp_type);
747 	else
748 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
749 			     (int)hw->mac.type, (int)hw->phy.type);
750 
751 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
752 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
753 		     pci_dev->id.device_id);
754 
755 	rte_intr_callback_register(intr_handle,
756 				   txgbe_dev_interrupt_handler, eth_dev);
757 
758 	/* enable uio/vfio intr/eventfd mapping */
759 	rte_intr_enable(intr_handle);
760 
761 	/* enable support intr */
762 	txgbe_enable_intr(eth_dev);
763 
764 	/* initialize filter info */
765 	memset(filter_info, 0,
766 	       sizeof(struct txgbe_filter_info));
767 
768 	/* initialize 5tuple filter list */
769 	TAILQ_INIT(&filter_info->fivetuple_list);
770 
771 	/* initialize flow director filter list & hash */
772 	txgbe_fdir_filter_init(eth_dev);
773 
774 	/* initialize l2 tunnel filter list & hash */
775 	txgbe_l2_tn_filter_init(eth_dev);
776 
777 	/* initialize flow filter lists */
778 	txgbe_filterlist_init();
779 
780 	/* initialize bandwidth configuration info */
781 	memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
782 
783 	/* initialize Traffic Manager configuration */
784 	txgbe_tm_conf_init(eth_dev);
785 
786 	return 0;
787 }
788 
789 static int
790 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
791 {
792 	PMD_INIT_FUNC_TRACE();
793 
794 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
795 		return 0;
796 
797 	txgbe_dev_close(eth_dev);
798 
799 	return 0;
800 }
801 
802 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
803 {
804 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
805 	struct txgbe_5tuple_filter *p_5tuple;
806 
807 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
808 		TAILQ_REMOVE(&filter_info->fivetuple_list,
809 			     p_5tuple,
810 			     entries);
811 		rte_free(p_5tuple);
812 	}
813 	memset(filter_info->fivetuple_mask, 0,
814 	       sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
815 
816 	return 0;
817 }
818 
819 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
820 {
821 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
822 	struct txgbe_fdir_filter *fdir_filter;
823 
824 	if (fdir_info->hash_map)
825 		rte_free(fdir_info->hash_map);
826 	if (fdir_info->hash_handle)
827 		rte_hash_free(fdir_info->hash_handle);
828 
829 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
830 		TAILQ_REMOVE(&fdir_info->fdir_list,
831 			     fdir_filter,
832 			     entries);
833 		rte_free(fdir_filter);
834 	}
835 
836 	return 0;
837 }
838 
839 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
840 {
841 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
842 	struct txgbe_l2_tn_filter *l2_tn_filter;
843 
844 	if (l2_tn_info->hash_map)
845 		rte_free(l2_tn_info->hash_map);
846 	if (l2_tn_info->hash_handle)
847 		rte_hash_free(l2_tn_info->hash_handle);
848 
849 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
850 		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
851 			     l2_tn_filter,
852 			     entries);
853 		rte_free(l2_tn_filter);
854 	}
855 
856 	return 0;
857 }
858 
859 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
860 {
861 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
862 	char fdir_hash_name[RTE_HASH_NAMESIZE];
863 	struct rte_hash_parameters fdir_hash_params = {
864 		.name = fdir_hash_name,
865 		.entries = TXGBE_MAX_FDIR_FILTER_NUM,
866 		.key_len = sizeof(struct txgbe_atr_input),
867 		.hash_func = rte_hash_crc,
868 		.hash_func_init_val = 0,
869 		.socket_id = rte_socket_id(),
870 	};
871 
872 	TAILQ_INIT(&fdir_info->fdir_list);
873 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
874 		 "fdir_%s", TDEV_NAME(eth_dev));
875 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
876 	if (!fdir_info->hash_handle) {
877 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
878 		return -EINVAL;
879 	}
880 	fdir_info->hash_map = rte_zmalloc("txgbe",
881 					  sizeof(struct txgbe_fdir_filter *) *
882 					  TXGBE_MAX_FDIR_FILTER_NUM,
883 					  0);
884 	if (!fdir_info->hash_map) {
885 		PMD_INIT_LOG(ERR,
886 			     "Failed to allocate memory for fdir hash map!");
887 		return -ENOMEM;
888 	}
889 	fdir_info->mask_added = FALSE;
890 
891 	return 0;
892 }
893 
894 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
895 {
896 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
897 	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
898 	struct rte_hash_parameters l2_tn_hash_params = {
899 		.name = l2_tn_hash_name,
900 		.entries = TXGBE_MAX_L2_TN_FILTER_NUM,
901 		.key_len = sizeof(struct txgbe_l2_tn_key),
902 		.hash_func = rte_hash_crc,
903 		.hash_func_init_val = 0,
904 		.socket_id = rte_socket_id(),
905 	};
906 
907 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
908 	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
909 		 "l2_tn_%s", TDEV_NAME(eth_dev));
910 	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
911 	if (!l2_tn_info->hash_handle) {
912 		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
913 		return -EINVAL;
914 	}
915 	l2_tn_info->hash_map = rte_zmalloc("txgbe",
916 				   sizeof(struct txgbe_l2_tn_filter *) *
917 				   TXGBE_MAX_L2_TN_FILTER_NUM,
918 				   0);
919 	if (!l2_tn_info->hash_map) {
920 		PMD_INIT_LOG(ERR,
921 			"Failed to allocate memory for L2 TN hash map!");
922 		return -ENOMEM;
923 	}
924 	l2_tn_info->e_tag_en = FALSE;
925 	l2_tn_info->e_tag_fwd_en = FALSE;
926 	l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
927 
928 	return 0;
929 }
930 
931 static int
932 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
933 		struct rte_pci_device *pci_dev)
934 {
935 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
936 			sizeof(struct txgbe_adapter),
937 			eth_dev_pci_specific_init, pci_dev,
938 			eth_txgbe_dev_init, NULL);
939 }
940 
941 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
942 {
943 	struct rte_eth_dev *ethdev;
944 
945 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
946 	if (!ethdev)
947 		return 0;
948 
949 	return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
950 }
951 
952 static struct rte_pci_driver rte_txgbe_pmd = {
953 	.id_table = pci_id_txgbe_map,
954 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
955 		     RTE_PCI_DRV_INTR_LSC,
956 	.probe = eth_txgbe_pci_probe,
957 	.remove = eth_txgbe_pci_remove,
958 };
959 
960 static int
961 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
962 {
963 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
964 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
965 	uint32_t vfta;
966 	uint32_t vid_idx;
967 	uint32_t vid_bit;
968 
969 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
970 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
971 	vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
972 	if (on)
973 		vfta |= vid_bit;
974 	else
975 		vfta &= ~vid_bit;
976 	wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
977 
978 	/* update local VFTA copy */
979 	shadow_vfta->vfta[vid_idx] = vfta;
980 
981 	return 0;
982 }
983 
984 static void
985 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
986 {
987 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
988 	struct txgbe_rx_queue *rxq;
989 	bool restart;
990 	uint32_t rxcfg, rxbal, rxbah;
991 
992 	if (on)
993 		txgbe_vlan_hw_strip_enable(dev, queue);
994 	else
995 		txgbe_vlan_hw_strip_disable(dev, queue);
996 
997 	rxq = dev->data->rx_queues[queue];
998 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
999 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
1000 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1001 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1002 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1003 			!(rxcfg & TXGBE_RXCFG_VLAN);
1004 		rxcfg |= TXGBE_RXCFG_VLAN;
1005 	} else {
1006 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1007 			(rxcfg & TXGBE_RXCFG_VLAN);
1008 		rxcfg &= ~TXGBE_RXCFG_VLAN;
1009 	}
1010 	rxcfg &= ~TXGBE_RXCFG_ENA;
1011 
1012 	if (restart) {
1013 		/* set vlan strip for ring */
1014 		txgbe_dev_rx_queue_stop(dev, queue);
1015 		wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
1016 		wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
1017 		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
1018 		txgbe_dev_rx_queue_start(dev, queue);
1019 	}
1020 }
1021 
1022 static int
1023 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1024 		    enum rte_vlan_type vlan_type,
1025 		    uint16_t tpid)
1026 {
1027 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1028 	int ret = 0;
1029 	uint32_t portctrl, vlan_ext, qinq;
1030 
1031 	portctrl = rd32(hw, TXGBE_PORTCTL);
1032 
1033 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
1034 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
1035 	switch (vlan_type) {
1036 	case RTE_ETH_VLAN_TYPE_INNER:
1037 		if (vlan_ext) {
1038 			wr32m(hw, TXGBE_VLANCTL,
1039 				TXGBE_VLANCTL_TPID_MASK,
1040 				TXGBE_VLANCTL_TPID(tpid));
1041 			wr32m(hw, TXGBE_DMATXCTRL,
1042 				TXGBE_DMATXCTRL_TPID_MASK,
1043 				TXGBE_DMATXCTRL_TPID(tpid));
1044 		} else {
1045 			ret = -ENOTSUP;
1046 			PMD_DRV_LOG(ERR, "Inner type is not supported"
1047 				    " by single VLAN");
1048 		}
1049 
1050 		if (qinq) {
1051 			wr32m(hw, TXGBE_TAGTPID(0),
1052 				TXGBE_TAGTPID_LSB_MASK,
1053 				TXGBE_TAGTPID_LSB(tpid));
1054 		}
1055 		break;
1056 	case RTE_ETH_VLAN_TYPE_OUTER:
1057 		if (vlan_ext) {
1058 			/* Only the high 16-bits is valid */
1059 			wr32m(hw, TXGBE_EXTAG,
1060 				TXGBE_EXTAG_VLAN_MASK,
1061 				TXGBE_EXTAG_VLAN(tpid));
1062 		} else {
1063 			wr32m(hw, TXGBE_VLANCTL,
1064 				TXGBE_VLANCTL_TPID_MASK,
1065 				TXGBE_VLANCTL_TPID(tpid));
1066 			wr32m(hw, TXGBE_DMATXCTRL,
1067 				TXGBE_DMATXCTRL_TPID_MASK,
1068 				TXGBE_DMATXCTRL_TPID(tpid));
1069 		}
1070 
1071 		if (qinq) {
1072 			wr32m(hw, TXGBE_TAGTPID(0),
1073 				TXGBE_TAGTPID_MSB_MASK,
1074 				TXGBE_TAGTPID_MSB(tpid));
1075 		}
1076 		break;
1077 	default:
1078 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1079 		return -EINVAL;
1080 	}
1081 
1082 	return ret;
1083 }
1084 
1085 void
1086 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1087 {
1088 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1089 	uint32_t vlnctrl;
1090 
1091 	PMD_INIT_FUNC_TRACE();
1092 
1093 	/* Filter Table Disable */
1094 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1095 	vlnctrl &= ~TXGBE_VLANCTL_VFE;
1096 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1097 }
1098 
1099 void
1100 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1101 {
1102 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1103 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1104 	uint32_t vlnctrl;
1105 	uint16_t i;
1106 
1107 	PMD_INIT_FUNC_TRACE();
1108 
1109 	/* Filter Table Enable */
1110 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1111 	vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1112 	vlnctrl |= TXGBE_VLANCTL_VFE;
1113 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1114 
1115 	/* write whatever is in local vfta copy */
1116 	for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1117 		wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1118 }
1119 
1120 void
1121 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1122 {
1123 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1124 	struct txgbe_rx_queue *rxq;
1125 
1126 	if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1127 		return;
1128 
1129 	if (on)
1130 		TXGBE_SET_HWSTRIP(hwstrip, queue);
1131 	else
1132 		TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1133 
1134 	if (queue >= dev->data->nb_rx_queues)
1135 		return;
1136 
1137 	rxq = dev->data->rx_queues[queue];
1138 
1139 	if (on) {
1140 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1141 		rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1142 	} else {
1143 		rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
1144 		rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1145 	}
1146 }
1147 
1148 static void
1149 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1150 {
1151 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1152 	uint32_t ctrl;
1153 
1154 	PMD_INIT_FUNC_TRACE();
1155 
1156 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1157 	ctrl &= ~TXGBE_RXCFG_VLAN;
1158 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1159 
1160 	/* record those setting for HW strip per queue */
1161 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1162 }
1163 
1164 static void
1165 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1166 {
1167 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1168 	uint32_t ctrl;
1169 
1170 	PMD_INIT_FUNC_TRACE();
1171 
1172 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1173 	ctrl |= TXGBE_RXCFG_VLAN;
1174 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1175 
1176 	/* record those setting for HW strip per queue */
1177 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1178 }
1179 
1180 static void
1181 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1182 {
1183 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1184 	uint32_t ctrl;
1185 
1186 	PMD_INIT_FUNC_TRACE();
1187 
1188 	ctrl = rd32(hw, TXGBE_PORTCTL);
1189 	ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1190 	wr32(hw, TXGBE_PORTCTL, ctrl);
1191 }
1192 
1193 static void
1194 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1195 {
1196 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1197 	uint32_t ctrl;
1198 
1199 	PMD_INIT_FUNC_TRACE();
1200 
1201 	ctrl  = rd32(hw, TXGBE_PORTCTL);
1202 	ctrl |= TXGBE_PORTCTL_VLANEXT;
1203 	wr32(hw, TXGBE_PORTCTL, ctrl);
1204 }
1205 
1206 static void
1207 txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
1208 {
1209 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1210 	uint32_t ctrl;
1211 
1212 	PMD_INIT_FUNC_TRACE();
1213 
1214 	ctrl = rd32(hw, TXGBE_PORTCTL);
1215 	ctrl &= ~TXGBE_PORTCTL_QINQ;
1216 	wr32(hw, TXGBE_PORTCTL, ctrl);
1217 }
1218 
1219 static void
1220 txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
1221 {
1222 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1223 	uint32_t ctrl;
1224 
1225 	PMD_INIT_FUNC_TRACE();
1226 
1227 	ctrl  = rd32(hw, TXGBE_PORTCTL);
1228 	ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
1229 	wr32(hw, TXGBE_PORTCTL, ctrl);
1230 }
1231 
1232 void
1233 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1234 {
1235 	struct txgbe_rx_queue *rxq;
1236 	uint16_t i;
1237 
1238 	PMD_INIT_FUNC_TRACE();
1239 
1240 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1241 		rxq = dev->data->rx_queues[i];
1242 
1243 		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1244 			txgbe_vlan_strip_queue_set(dev, i, 1);
1245 		else
1246 			txgbe_vlan_strip_queue_set(dev, i, 0);
1247 	}
1248 }
1249 
1250 void
1251 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1252 {
1253 	uint16_t i;
1254 	struct rte_eth_rxmode *rxmode;
1255 	struct txgbe_rx_queue *rxq;
1256 
1257 	if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1258 		rxmode = &dev->data->dev_conf.rxmode;
1259 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1260 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1261 				rxq = dev->data->rx_queues[i];
1262 				rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1263 			}
1264 		else
1265 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1266 				rxq = dev->data->rx_queues[i];
1267 				rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1268 			}
1269 	}
1270 }
1271 
1272 static int
1273 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1274 {
1275 	struct rte_eth_rxmode *rxmode;
1276 	rxmode = &dev->data->dev_conf.rxmode;
1277 
1278 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
1279 		txgbe_vlan_hw_strip_config(dev);
1280 
1281 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1282 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1283 			txgbe_vlan_hw_filter_enable(dev);
1284 		else
1285 			txgbe_vlan_hw_filter_disable(dev);
1286 	}
1287 
1288 	if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
1289 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
1290 			txgbe_vlan_hw_extend_enable(dev);
1291 		else
1292 			txgbe_vlan_hw_extend_disable(dev);
1293 	}
1294 
1295 	if (mask & RTE_ETH_QINQ_STRIP_MASK) {
1296 		if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1297 			txgbe_qinq_hw_strip_enable(dev);
1298 		else
1299 			txgbe_qinq_hw_strip_disable(dev);
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 static int
1306 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1307 {
1308 	txgbe_config_vlan_strip_on_all_queues(dev, mask);
1309 
1310 	txgbe_vlan_offload_config(dev, mask);
1311 
1312 	return 0;
1313 }
1314 
1315 static void
1316 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1317 {
1318 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1319 	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
1320 	uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1321 
1322 	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1323 	wr32(hw, TXGBE_VLANCTL, vlanctrl);
1324 }
1325 
1326 static int
1327 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1328 {
1329 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1330 
1331 	switch (nb_rx_q) {
1332 	case 1:
1333 	case 2:
1334 		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
1335 		break;
1336 	case 4:
1337 		RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
1338 		break;
1339 	default:
1340 		return -EINVAL;
1341 	}
1342 
1343 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1344 		TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1345 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1346 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1347 	return 0;
1348 }
1349 
1350 static int
1351 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1352 {
1353 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1354 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
1355 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
1356 
1357 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1358 		/* check multi-queue mode */
1359 		switch (dev_conf->rxmode.mq_mode) {
1360 		case RTE_ETH_MQ_RX_VMDQ_DCB:
1361 			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1362 			break;
1363 		case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
1364 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1365 			PMD_INIT_LOG(ERR, "SRIOV active,"
1366 					" unsupported mq_mode rx %d.",
1367 					dev_conf->rxmode.mq_mode);
1368 			return -EINVAL;
1369 		case RTE_ETH_MQ_RX_RSS:
1370 		case RTE_ETH_MQ_RX_VMDQ_RSS:
1371 			dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
1372 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1373 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1374 					PMD_INIT_LOG(ERR, "SRIOV is active,"
1375 						" invalid queue number"
1376 						" for VMDQ RSS, allowed"
1377 						" value are 1, 2 or 4.");
1378 					return -EINVAL;
1379 				}
1380 			break;
1381 		case RTE_ETH_MQ_RX_VMDQ_ONLY:
1382 		case RTE_ETH_MQ_RX_NONE:
1383 			/* if nothing mq mode configure, use default scheme */
1384 			dev->data->dev_conf.rxmode.mq_mode =
1385 				RTE_ETH_MQ_RX_VMDQ_ONLY;
1386 			break;
1387 		default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
1388 			/* SRIOV only works in VMDq enable mode */
1389 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1390 					" wrong mq_mode rx %d.",
1391 					dev_conf->rxmode.mq_mode);
1392 			return -EINVAL;
1393 		}
1394 
1395 		switch (dev_conf->txmode.mq_mode) {
1396 		case RTE_ETH_MQ_TX_VMDQ_DCB:
1397 			PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1398 			dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
1399 			break;
1400 		default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
1401 			dev->data->dev_conf.txmode.mq_mode =
1402 				RTE_ETH_MQ_TX_VMDQ_ONLY;
1403 			break;
1404 		}
1405 
1406 		/* check valid queue number */
1407 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1408 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1409 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1410 					" nb_rx_q=%d nb_tx_q=%d queue number"
1411 					" must be less than or equal to %d.",
1412 					nb_rx_q, nb_tx_q,
1413 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1414 			return -EINVAL;
1415 		}
1416 	} else {
1417 		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
1418 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1419 					  " not supported.");
1420 			return -EINVAL;
1421 		}
1422 		/* check configuration for vmdb+dcb mode */
1423 		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
1424 			const struct rte_eth_vmdq_dcb_conf *conf;
1425 
1426 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1427 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1428 						TXGBE_VMDQ_DCB_NB_QUEUES);
1429 				return -EINVAL;
1430 			}
1431 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1432 			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
1433 			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
1434 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1435 						" nb_queue_pools must be %d or %d.",
1436 						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
1437 				return -EINVAL;
1438 			}
1439 		}
1440 		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
1441 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
1442 
1443 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1444 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1445 						 TXGBE_VMDQ_DCB_NB_QUEUES);
1446 				return -EINVAL;
1447 			}
1448 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1449 			if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
1450 			       conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
1451 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1452 						" nb_queue_pools != %d and"
1453 						" nb_queue_pools != %d.",
1454 						RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
1455 				return -EINVAL;
1456 			}
1457 		}
1458 
1459 		/* For DCB mode check our configuration before we go further */
1460 		if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
1461 			const struct rte_eth_dcb_rx_conf *conf;
1462 
1463 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1464 			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
1465 			       conf->nb_tcs == RTE_ETH_8_TCS)) {
1466 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1467 						" and nb_tcs != %d.",
1468 						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
1469 				return -EINVAL;
1470 			}
1471 		}
1472 
1473 		if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
1474 			const struct rte_eth_dcb_tx_conf *conf;
1475 
1476 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1477 			if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
1478 			       conf->nb_tcs == RTE_ETH_8_TCS)) {
1479 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1480 						" and nb_tcs != %d.",
1481 						RTE_ETH_4_TCS, RTE_ETH_8_TCS);
1482 				return -EINVAL;
1483 			}
1484 		}
1485 	}
1486 	return 0;
1487 }
1488 
1489 static int
1490 txgbe_dev_configure(struct rte_eth_dev *dev)
1491 {
1492 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1493 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1494 	int ret;
1495 
1496 	PMD_INIT_FUNC_TRACE();
1497 
1498 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
1499 		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1500 
1501 	/* multiple queue mode checking */
1502 	ret  = txgbe_check_mq_mode(dev);
1503 	if (ret != 0) {
1504 		PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1505 			    ret);
1506 		return ret;
1507 	}
1508 
1509 	/* set flag to update link status after init */
1510 	intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1511 
1512 	/*
1513 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1514 	 * allocation Rx preconditions we will reset it.
1515 	 */
1516 	adapter->rx_bulk_alloc_allowed = true;
1517 
1518 	return 0;
1519 }
1520 
1521 static void
1522 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1523 {
1524 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1525 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1526 	uint32_t gpie;
1527 
1528 	gpie = rd32(hw, TXGBE_GPIOINTEN);
1529 	gpie |= TXGBE_GPIOBIT_6;
1530 	wr32(hw, TXGBE_GPIOINTEN, gpie);
1531 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1532 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
1533 }
1534 
1535 int
1536 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1537 			uint16_t tx_rate, uint64_t q_msk)
1538 {
1539 	struct txgbe_hw *hw;
1540 	struct txgbe_vf_info *vfinfo;
1541 	struct rte_eth_link link;
1542 	uint8_t  nb_q_per_pool;
1543 	uint32_t queue_stride;
1544 	uint32_t queue_idx, idx = 0, vf_idx;
1545 	uint32_t queue_end;
1546 	uint16_t total_rate = 0;
1547 	struct rte_pci_device *pci_dev;
1548 	int ret;
1549 
1550 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1551 	ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1552 	if (ret < 0)
1553 		return ret;
1554 
1555 	if (vf >= pci_dev->max_vfs)
1556 		return -EINVAL;
1557 
1558 	if (tx_rate > link.link_speed)
1559 		return -EINVAL;
1560 
1561 	if (q_msk == 0)
1562 		return 0;
1563 
1564 	hw = TXGBE_DEV_HW(dev);
1565 	vfinfo = *(TXGBE_DEV_VFDATA(dev));
1566 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1567 	queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1568 	queue_idx = vf * queue_stride;
1569 	queue_end = queue_idx + nb_q_per_pool - 1;
1570 	if (queue_end >= hw->mac.max_tx_queues)
1571 		return -EINVAL;
1572 
1573 	if (vfinfo) {
1574 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1575 			if (vf_idx == vf)
1576 				continue;
1577 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1578 				idx++)
1579 				total_rate += vfinfo[vf_idx].tx_rate[idx];
1580 		}
1581 	} else {
1582 		return -EINVAL;
1583 	}
1584 
1585 	/* Store tx_rate for this vf. */
1586 	for (idx = 0; idx < nb_q_per_pool; idx++) {
1587 		if (((uint64_t)0x1 << idx) & q_msk) {
1588 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
1589 				vfinfo[vf].tx_rate[idx] = tx_rate;
1590 			total_rate += tx_rate;
1591 		}
1592 	}
1593 
1594 	if (total_rate > dev->data->dev_link.link_speed) {
1595 		/* Reset stored TX rate of the VF if it causes exceed
1596 		 * link speed.
1597 		 */
1598 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1599 		return -EINVAL;
1600 	}
1601 
1602 	/* Set ARBTXRATE of each queue/pool for vf X  */
1603 	for (; queue_idx <= queue_end; queue_idx++) {
1604 		if (0x1 & q_msk)
1605 			txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1606 		q_msk = q_msk >> 1;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 /*
1613  * Configure device link speed and setup link.
1614  * It returns 0 on success.
1615  */
1616 static int
1617 txgbe_dev_start(struct rte_eth_dev *dev)
1618 {
1619 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1620 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1621 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1622 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1623 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1624 	uint32_t intr_vector = 0;
1625 	int err;
1626 	bool link_up = false, negotiate = 0;
1627 	uint32_t speed = 0;
1628 	uint32_t allowed_speeds = 0;
1629 	int mask = 0;
1630 	int status;
1631 	uint16_t vf, idx;
1632 	uint32_t *link_speeds;
1633 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1634 
1635 	PMD_INIT_FUNC_TRACE();
1636 
1637 	/* Stop the link setup handler before resetting the HW. */
1638 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1639 
1640 	/* disable uio/vfio intr/eventfd mapping */
1641 	rte_intr_disable(intr_handle);
1642 
1643 	/* stop adapter */
1644 	hw->adapter_stopped = 0;
1645 	txgbe_stop_hw(hw);
1646 
1647 	/* reinitialize adapter
1648 	 * this calls reset and start
1649 	 */
1650 	hw->nb_rx_queues = dev->data->nb_rx_queues;
1651 	hw->nb_tx_queues = dev->data->nb_tx_queues;
1652 	status = txgbe_pf_reset_hw(hw);
1653 	if (status != 0)
1654 		return -1;
1655 	hw->mac.start_hw(hw);
1656 	hw->mac.get_link_status = true;
1657 	hw->dev_start = true;
1658 
1659 	/* configure PF module if SRIOV enabled */
1660 	txgbe_pf_host_configure(dev);
1661 
1662 	txgbe_dev_phy_intr_setup(dev);
1663 
1664 	/* check and configure queue intr-vector mapping */
1665 	if ((rte_intr_cap_multiple(intr_handle) ||
1666 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1667 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1668 		intr_vector = dev->data->nb_rx_queues;
1669 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1670 			return -1;
1671 	}
1672 
1673 	if (rte_intr_dp_is_en(intr_handle)) {
1674 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
1675 						   dev->data->nb_rx_queues)) {
1676 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1677 				     " intr_vec", dev->data->nb_rx_queues);
1678 			return -ENOMEM;
1679 		}
1680 	}
1681 	/* confiugre msix for sleep until rx interrupt */
1682 	txgbe_configure_msix(dev);
1683 
1684 	/* initialize transmission unit */
1685 	txgbe_dev_tx_init(dev);
1686 
1687 	/* This can fail when allocating mbufs for descriptor rings */
1688 	err = txgbe_dev_rx_init(dev);
1689 	if (err) {
1690 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1691 		goto error;
1692 	}
1693 
1694 	mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
1695 		RTE_ETH_VLAN_EXTEND_MASK;
1696 	err = txgbe_vlan_offload_config(dev, mask);
1697 	if (err) {
1698 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1699 		goto error;
1700 	}
1701 
1702 	if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
1703 		/* Enable vlan filtering for VMDq */
1704 		txgbe_vmdq_vlan_hw_filter_enable(dev);
1705 	}
1706 
1707 	/* Configure DCB hw */
1708 	txgbe_configure_pb(dev);
1709 	txgbe_configure_port(dev);
1710 	txgbe_configure_dcb(dev);
1711 
1712 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1713 		err = txgbe_fdir_configure(dev);
1714 		if (err)
1715 			goto error;
1716 	}
1717 
1718 	/* Restore vf rate limit */
1719 	if (vfinfo != NULL) {
1720 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
1721 			for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1722 				if (vfinfo[vf].tx_rate[idx] != 0)
1723 					txgbe_set_vf_rate_limit(dev, vf,
1724 						vfinfo[vf].tx_rate[idx],
1725 						1 << idx);
1726 	}
1727 
1728 	err = txgbe_dev_rxtx_start(dev);
1729 	if (err < 0) {
1730 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1731 		goto error;
1732 	}
1733 
1734 	/* Skip link setup if loopback mode is enabled. */
1735 	if (hw->mac.type == txgbe_mac_raptor &&
1736 	    dev->data->dev_conf.lpbk_mode)
1737 		goto skip_link_setup;
1738 
1739 	if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1740 		err = hw->mac.setup_sfp(hw);
1741 		if (err)
1742 			goto error;
1743 	}
1744 
1745 	if (hw->phy.media_type == txgbe_media_type_copper) {
1746 		/* Turn on the copper */
1747 		hw->phy.set_phy_power(hw, true);
1748 	} else {
1749 		/* Turn on the laser */
1750 		hw->mac.enable_tx_laser(hw);
1751 	}
1752 
1753 	if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
1754 		err = hw->mac.check_link(hw, &speed, &link_up, 0);
1755 	if (err)
1756 		goto error;
1757 	dev->data->dev_link.link_status = link_up;
1758 
1759 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1760 	if (err)
1761 		goto error;
1762 
1763 	allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
1764 			RTE_ETH_LINK_SPEED_10G;
1765 
1766 	link_speeds = &dev->data->dev_conf.link_speeds;
1767 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
1768 		PMD_INIT_LOG(ERR, "Invalid link setting");
1769 		goto error;
1770 	}
1771 
1772 	speed = 0x0;
1773 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
1774 		speed = (TXGBE_LINK_SPEED_100M_FULL |
1775 			 TXGBE_LINK_SPEED_1GB_FULL |
1776 			 TXGBE_LINK_SPEED_10GB_FULL);
1777 	} else {
1778 		if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
1779 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
1780 		if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
1781 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
1782 		if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
1783 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1784 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
1785 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
1786 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
1787 			speed |= TXGBE_LINK_SPEED_100M_FULL;
1788 	}
1789 
1790 	err = hw->mac.setup_link(hw, speed, link_up);
1791 	if (err)
1792 		goto error;
1793 
1794 skip_link_setup:
1795 
1796 	if (rte_intr_allow_others(intr_handle)) {
1797 		txgbe_dev_misc_interrupt_setup(dev);
1798 		/* check if lsc interrupt is enabled */
1799 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1800 			txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1801 		else
1802 			txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1803 		txgbe_dev_macsec_interrupt_setup(dev);
1804 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1805 	} else {
1806 		rte_intr_callback_unregister(intr_handle,
1807 					     txgbe_dev_interrupt_handler, dev);
1808 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1809 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1810 				     " no intr multiplex");
1811 	}
1812 
1813 	/* check if rxq interrupt is enabled */
1814 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1815 	    rte_intr_dp_is_en(intr_handle))
1816 		txgbe_dev_rxq_interrupt_setup(dev);
1817 
1818 	/* enable uio/vfio intr/eventfd mapping */
1819 	rte_intr_enable(intr_handle);
1820 
1821 	/* resume enabled intr since hw reset */
1822 	txgbe_enable_intr(dev);
1823 	txgbe_l2_tunnel_conf(dev);
1824 	txgbe_filter_restore(dev);
1825 
1826 	if (tm_conf->root && !tm_conf->committed)
1827 		PMD_DRV_LOG(WARNING,
1828 			    "please call hierarchy_commit() "
1829 			    "before starting the port");
1830 
1831 	/*
1832 	 * Update link status right before return, because it may
1833 	 * start link configuration process in a separate thread.
1834 	 */
1835 	txgbe_dev_link_update(dev, 0);
1836 
1837 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1838 
1839 	txgbe_read_stats_registers(hw, hw_stats);
1840 	hw->offset_loaded = 1;
1841 
1842 	return 0;
1843 
1844 error:
1845 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1846 	txgbe_dev_clear_queues(dev);
1847 	return -EIO;
1848 }
1849 
1850 /*
1851  * Stop device: disable rx and tx functions to allow for reconfiguring.
1852  */
1853 static int
1854 txgbe_dev_stop(struct rte_eth_dev *dev)
1855 {
1856 	struct rte_eth_link link;
1857 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1858 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1859 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1860 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1861 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1862 	int vf;
1863 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1864 
1865 	if (hw->adapter_stopped)
1866 		return 0;
1867 
1868 	PMD_INIT_FUNC_TRACE();
1869 
1870 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1871 
1872 	/* disable interrupts */
1873 	txgbe_disable_intr(hw);
1874 
1875 	/* reset the NIC */
1876 	txgbe_pf_reset_hw(hw);
1877 	hw->adapter_stopped = 0;
1878 
1879 	/* stop adapter */
1880 	txgbe_stop_hw(hw);
1881 
1882 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1883 		vfinfo[vf].clear_to_send = false;
1884 
1885 	if (hw->phy.media_type == txgbe_media_type_copper) {
1886 		/* Turn off the copper */
1887 		hw->phy.set_phy_power(hw, false);
1888 	} else {
1889 		/* Turn off the laser */
1890 		hw->mac.disable_tx_laser(hw);
1891 	}
1892 
1893 	txgbe_dev_clear_queues(dev);
1894 
1895 	/* Clear stored conf */
1896 	dev->data->scattered_rx = 0;
1897 	dev->data->lro = 0;
1898 
1899 	/* Clear recorded link status */
1900 	memset(&link, 0, sizeof(link));
1901 	rte_eth_linkstatus_set(dev, &link);
1902 
1903 	if (!rte_intr_allow_others(intr_handle))
1904 		/* resume to the default handler */
1905 		rte_intr_callback_register(intr_handle,
1906 					   txgbe_dev_interrupt_handler,
1907 					   (void *)dev);
1908 
1909 	/* Clean datapath event and queue/vec mapping */
1910 	rte_intr_efd_disable(intr_handle);
1911 	rte_intr_vec_list_free(intr_handle);
1912 
1913 	/* reset hierarchy commit */
1914 	tm_conf->committed = false;
1915 
1916 	adapter->rss_reta_updated = 0;
1917 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1918 
1919 	hw->adapter_stopped = true;
1920 	dev->data->dev_started = 0;
1921 	hw->dev_start = false;
1922 
1923 	return 0;
1924 }
1925 
1926 /*
1927  * Set device link up: enable tx.
1928  */
1929 static int
1930 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1931 {
1932 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1933 
1934 	if (hw->phy.media_type == txgbe_media_type_copper) {
1935 		/* Turn on the copper */
1936 		hw->phy.set_phy_power(hw, true);
1937 	} else {
1938 		/* Turn on the laser */
1939 		hw->mac.enable_tx_laser(hw);
1940 		txgbe_dev_link_update(dev, 0);
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 /*
1947  * Set device link down: disable tx.
1948  */
1949 static int
1950 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1951 {
1952 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1953 
1954 	if (hw->phy.media_type == txgbe_media_type_copper) {
1955 		/* Turn off the copper */
1956 		hw->phy.set_phy_power(hw, false);
1957 	} else {
1958 		/* Turn off the laser */
1959 		hw->mac.disable_tx_laser(hw);
1960 		txgbe_dev_link_update(dev, 0);
1961 	}
1962 
1963 	return 0;
1964 }
1965 
1966 /*
1967  * Reset and stop device.
1968  */
1969 static int
1970 txgbe_dev_close(struct rte_eth_dev *dev)
1971 {
1972 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1973 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1974 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1975 	int retries = 0;
1976 	int ret;
1977 
1978 	PMD_INIT_FUNC_TRACE();
1979 
1980 	txgbe_pf_reset_hw(hw);
1981 
1982 	ret = txgbe_dev_stop(dev);
1983 
1984 	txgbe_dev_free_queues(dev);
1985 
1986 	/* reprogram the RAR[0] in case user changed it. */
1987 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1988 
1989 	/* Unlock any pending hardware semaphore */
1990 	txgbe_swfw_lock_reset(hw);
1991 
1992 	/* disable uio intr before callback unregister */
1993 	rte_intr_disable(intr_handle);
1994 
1995 	do {
1996 		ret = rte_intr_callback_unregister(intr_handle,
1997 				txgbe_dev_interrupt_handler, dev);
1998 		if (ret >= 0 || ret == -ENOENT) {
1999 			break;
2000 		} else if (ret != -EAGAIN) {
2001 			PMD_INIT_LOG(ERR,
2002 				"intr callback unregister failed: %d",
2003 				ret);
2004 		}
2005 		rte_delay_ms(100);
2006 	} while (retries++ < (10 + TXGBE_LINK_UP_TIME));
2007 
2008 	/* cancel the delay handler before remove dev */
2009 	rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
2010 
2011 	/* uninitialize PF if max_vfs not zero */
2012 	txgbe_pf_host_uninit(dev);
2013 
2014 	rte_free(dev->data->mac_addrs);
2015 	dev->data->mac_addrs = NULL;
2016 
2017 	rte_free(dev->data->hash_mac_addrs);
2018 	dev->data->hash_mac_addrs = NULL;
2019 
2020 	/* remove all the fdir filters & hash */
2021 	txgbe_fdir_filter_uninit(dev);
2022 
2023 	/* remove all the L2 tunnel filters & hash */
2024 	txgbe_l2_tn_filter_uninit(dev);
2025 
2026 	/* Remove all ntuple filters of the device */
2027 	txgbe_ntuple_filter_uninit(dev);
2028 
2029 	/* clear all the filters list */
2030 	txgbe_filterlist_flush();
2031 
2032 	/* Remove all Traffic Manager configuration */
2033 	txgbe_tm_conf_uninit(dev);
2034 
2035 #ifdef RTE_LIB_SECURITY
2036 	rte_free(dev->security_ctx);
2037 #endif
2038 
2039 	return ret;
2040 }
2041 
2042 /*
2043  * Reset PF device.
2044  */
2045 static int
2046 txgbe_dev_reset(struct rte_eth_dev *dev)
2047 {
2048 	int ret;
2049 
2050 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2051 	 * its VF to make them align with it. The detailed notification
2052 	 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
2053 	 * To avoid unexpected behavior in VF, currently reset of PF with
2054 	 * SR-IOV activation is not supported. It might be supported later.
2055 	 */
2056 	if (dev->data->sriov.active)
2057 		return -ENOTSUP;
2058 
2059 	ret = eth_txgbe_dev_uninit(dev);
2060 	if (ret)
2061 		return ret;
2062 
2063 	ret = eth_txgbe_dev_init(dev, NULL);
2064 
2065 	return ret;
2066 }
2067 
2068 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
2069 	{                                                       \
2070 		uint32_t current_counter = rd32(hw, reg);       \
2071 		if (current_counter < last_counter)             \
2072 			current_counter += 0x100000000LL;       \
2073 		if (!hw->offset_loaded)                         \
2074 			last_counter = current_counter;         \
2075 		counter = current_counter - last_counter;       \
2076 		counter &= 0xFFFFFFFFLL;                        \
2077 	}
2078 
2079 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2080 	{                                                                \
2081 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
2082 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
2083 		uint64_t current_counter = (current_counter_msb << 32) | \
2084 			current_counter_lsb;                             \
2085 		if (current_counter < last_counter)                      \
2086 			current_counter += 0x1000000000LL;               \
2087 		if (!hw->offset_loaded)                                  \
2088 			last_counter = current_counter;                  \
2089 		counter = current_counter - last_counter;                \
2090 		counter &= 0xFFFFFFFFFLL;                                \
2091 	}
2092 
2093 void
2094 txgbe_read_stats_registers(struct txgbe_hw *hw,
2095 			   struct txgbe_hw_stats *hw_stats)
2096 {
2097 	unsigned int i;
2098 
2099 	/* QP Stats */
2100 	for (i = 0; i < hw->nb_rx_queues; i++) {
2101 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2102 			hw->qp_last[i].rx_qp_packets,
2103 			hw_stats->qp[i].rx_qp_packets);
2104 		UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2105 			hw->qp_last[i].rx_qp_bytes,
2106 			hw_stats->qp[i].rx_qp_bytes);
2107 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2108 			hw->qp_last[i].rx_qp_mc_packets,
2109 			hw_stats->qp[i].rx_qp_mc_packets);
2110 	}
2111 
2112 	for (i = 0; i < hw->nb_tx_queues; i++) {
2113 		UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2114 			hw->qp_last[i].tx_qp_packets,
2115 			hw_stats->qp[i].tx_qp_packets);
2116 		UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2117 			hw->qp_last[i].tx_qp_bytes,
2118 			hw_stats->qp[i].tx_qp_bytes);
2119 	}
2120 	/* PB Stats */
2121 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2122 		hw_stats->up[i].rx_up_xon_packets +=
2123 				rd32(hw, TXGBE_PBRXUPXON(i));
2124 		hw_stats->up[i].rx_up_xoff_packets +=
2125 				rd32(hw, TXGBE_PBRXUPXOFF(i));
2126 		hw_stats->up[i].tx_up_xon_packets +=
2127 				rd32(hw, TXGBE_PBTXUPXON(i));
2128 		hw_stats->up[i].tx_up_xoff_packets +=
2129 				rd32(hw, TXGBE_PBTXUPXOFF(i));
2130 		hw_stats->up[i].tx_up_xon2off_packets +=
2131 				rd32(hw, TXGBE_PBTXUPOFF(i));
2132 		hw_stats->up[i].rx_up_dropped +=
2133 				rd32(hw, TXGBE_PBRXMISS(i));
2134 	}
2135 	hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2136 	hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2137 	hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2138 	hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2139 
2140 	/* DMA Stats */
2141 	hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2142 	hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2143 
2144 	hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2145 	hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2146 	hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
2147 	hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2148 
2149 	/* MAC Stats */
2150 	hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2151 	hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2152 	hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2153 
2154 	hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2155 	hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2156 	hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2157 
2158 	hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2159 	hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2160 
2161 	hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2162 	hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2163 	hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2164 	hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2165 	hw_stats->rx_size_512_to_1023_packets +=
2166 			rd64(hw, TXGBE_MACRX512TO1023L);
2167 	hw_stats->rx_size_1024_to_max_packets +=
2168 			rd64(hw, TXGBE_MACRX1024TOMAXL);
2169 	hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2170 	hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2171 	hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2172 	hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2173 	hw_stats->tx_size_512_to_1023_packets +=
2174 			rd64(hw, TXGBE_MACTX512TO1023L);
2175 	hw_stats->tx_size_1024_to_max_packets +=
2176 			rd64(hw, TXGBE_MACTX1024TOMAXL);
2177 
2178 	hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2179 	hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2180 	hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2181 
2182 	/* MNG Stats */
2183 	hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2184 	hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2185 	hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2186 	hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2187 
2188 	/* FCoE Stats */
2189 	hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2190 	hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2191 	hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2192 	hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2193 	hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2194 	hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2195 	hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2196 
2197 	/* Flow Director Stats */
2198 	hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2199 	hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2200 	hw_stats->flow_director_added_filters +=
2201 		TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2202 	hw_stats->flow_director_removed_filters +=
2203 		TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2204 	hw_stats->flow_director_filter_add_errors +=
2205 		TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2206 	hw_stats->flow_director_filter_remove_errors +=
2207 		TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2208 
2209 	/* MACsec Stats */
2210 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2211 	hw_stats->tx_macsec_pkts_encrypted +=
2212 			rd32(hw, TXGBE_LSECTX_ENCPKT);
2213 	hw_stats->tx_macsec_pkts_protected +=
2214 			rd32(hw, TXGBE_LSECTX_PROTPKT);
2215 	hw_stats->tx_macsec_octets_encrypted +=
2216 			rd32(hw, TXGBE_LSECTX_ENCOCT);
2217 	hw_stats->tx_macsec_octets_protected +=
2218 			rd32(hw, TXGBE_LSECTX_PROTOCT);
2219 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2220 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2221 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2222 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2223 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2224 	hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2225 	hw_stats->rx_macsec_sc_pkts_unchecked +=
2226 			rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2227 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2228 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2229 	for (i = 0; i < 2; i++) {
2230 		hw_stats->rx_macsec_sa_pkts_ok +=
2231 			rd32(hw, TXGBE_LSECRX_OKPKT(i));
2232 		hw_stats->rx_macsec_sa_pkts_invalid +=
2233 			rd32(hw, TXGBE_LSECRX_INVPKT(i));
2234 		hw_stats->rx_macsec_sa_pkts_notvalid +=
2235 			rd32(hw, TXGBE_LSECRX_BADPKT(i));
2236 	}
2237 	hw_stats->rx_macsec_sa_pkts_unusedsa +=
2238 			rd32(hw, TXGBE_LSECRX_INVSAPKT);
2239 	hw_stats->rx_macsec_sa_pkts_notusingsa +=
2240 			rd32(hw, TXGBE_LSECRX_BADSAPKT);
2241 
2242 	hw_stats->rx_total_missed_packets = 0;
2243 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2244 		hw_stats->rx_total_missed_packets +=
2245 			hw_stats->up[i].rx_up_dropped;
2246 	}
2247 }
2248 
2249 static int
2250 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2251 {
2252 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2253 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2254 	struct txgbe_stat_mappings *stat_mappings =
2255 			TXGBE_DEV_STAT_MAPPINGS(dev);
2256 	uint32_t i, j;
2257 
2258 	txgbe_read_stats_registers(hw, hw_stats);
2259 
2260 	if (stats == NULL)
2261 		return -EINVAL;
2262 
2263 	/* Fill out the rte_eth_stats statistics structure */
2264 	stats->ipackets = hw_stats->rx_packets;
2265 	stats->ibytes = hw_stats->rx_bytes;
2266 	stats->opackets = hw_stats->tx_packets;
2267 	stats->obytes = hw_stats->tx_bytes;
2268 
2269 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2270 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2271 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2272 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2273 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2274 	for (i = 0; i < TXGBE_MAX_QP; i++) {
2275 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2276 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2277 		uint32_t q_map;
2278 
2279 		q_map = (stat_mappings->rqsm[n] >> offset)
2280 				& QMAP_FIELD_RESERVED_BITS_MASK;
2281 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2282 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2283 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2284 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2285 
2286 		q_map = (stat_mappings->tqsm[n] >> offset)
2287 				& QMAP_FIELD_RESERVED_BITS_MASK;
2288 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2289 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2290 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2291 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2292 	}
2293 
2294 	/* Rx Errors */
2295 	stats->imissed  = hw_stats->rx_total_missed_packets +
2296 			  hw_stats->rx_dma_drop;
2297 	stats->ierrors  = hw_stats->rx_crc_errors +
2298 			  hw_stats->rx_mac_short_packet_dropped +
2299 			  hw_stats->rx_length_errors +
2300 			  hw_stats->rx_undersize_errors +
2301 			  hw_stats->rx_oversize_errors +
2302 			  hw_stats->rx_drop_packets +
2303 			  hw_stats->rx_illegal_byte_errors +
2304 			  hw_stats->rx_error_bytes +
2305 			  hw_stats->rx_fragment_errors +
2306 			  hw_stats->rx_fcoe_crc_errors +
2307 			  hw_stats->rx_fcoe_mbuf_allocation_errors;
2308 
2309 	/* Tx Errors */
2310 	stats->oerrors  = 0;
2311 	return 0;
2312 }
2313 
2314 static int
2315 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2316 {
2317 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2318 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2319 
2320 	/* HW registers are cleared on read */
2321 	hw->offset_loaded = 0;
2322 	txgbe_dev_stats_get(dev, NULL);
2323 	hw->offset_loaded = 1;
2324 
2325 	/* Reset software totals */
2326 	memset(hw_stats, 0, sizeof(*hw_stats));
2327 
2328 	return 0;
2329 }
2330 
2331 /* This function calculates the number of xstats based on the current config */
2332 static unsigned
2333 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2334 {
2335 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2336 	return TXGBE_NB_HW_STATS +
2337 	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2338 	       TXGBE_NB_QP_STATS * nb_queues;
2339 }
2340 
2341 static inline int
2342 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2343 {
2344 	int nb, st;
2345 
2346 	/* Extended stats from txgbe_hw_stats */
2347 	if (id < TXGBE_NB_HW_STATS) {
2348 		snprintf(name, size, "[hw]%s",
2349 			rte_txgbe_stats_strings[id].name);
2350 		return 0;
2351 	}
2352 	id -= TXGBE_NB_HW_STATS;
2353 
2354 	/* Priority Stats */
2355 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2356 		nb = id / TXGBE_NB_UP_STATS;
2357 		st = id % TXGBE_NB_UP_STATS;
2358 		snprintf(name, size, "[p%u]%s", nb,
2359 			rte_txgbe_up_strings[st].name);
2360 		return 0;
2361 	}
2362 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2363 
2364 	/* Queue Stats */
2365 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2366 		nb = id / TXGBE_NB_QP_STATS;
2367 		st = id % TXGBE_NB_QP_STATS;
2368 		snprintf(name, size, "[q%u]%s", nb,
2369 			rte_txgbe_qp_strings[st].name);
2370 		return 0;
2371 	}
2372 	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2373 
2374 	return -(int)(id + 1);
2375 }
2376 
2377 static inline int
2378 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2379 {
2380 	int nb, st;
2381 
2382 	/* Extended stats from txgbe_hw_stats */
2383 	if (id < TXGBE_NB_HW_STATS) {
2384 		*offset = rte_txgbe_stats_strings[id].offset;
2385 		return 0;
2386 	}
2387 	id -= TXGBE_NB_HW_STATS;
2388 
2389 	/* Priority Stats */
2390 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2391 		nb = id / TXGBE_NB_UP_STATS;
2392 		st = id % TXGBE_NB_UP_STATS;
2393 		*offset = rte_txgbe_up_strings[st].offset +
2394 			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2395 		return 0;
2396 	}
2397 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2398 
2399 	/* Queue Stats */
2400 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2401 		nb = id / TXGBE_NB_QP_STATS;
2402 		st = id % TXGBE_NB_QP_STATS;
2403 		*offset = rte_txgbe_qp_strings[st].offset +
2404 			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2405 		return 0;
2406 	}
2407 
2408 	return -1;
2409 }
2410 
2411 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2412 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2413 {
2414 	unsigned int i, count;
2415 
2416 	count = txgbe_xstats_calc_num(dev);
2417 	if (xstats_names == NULL)
2418 		return count;
2419 
2420 	/* Note: limit >= cnt_stats checked upstream
2421 	 * in rte_eth_xstats_names()
2422 	 */
2423 	limit = min(limit, count);
2424 
2425 	/* Extended stats from txgbe_hw_stats */
2426 	for (i = 0; i < limit; i++) {
2427 		if (txgbe_get_name_by_id(i, xstats_names[i].name,
2428 			sizeof(xstats_names[i].name))) {
2429 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2430 			break;
2431 		}
2432 	}
2433 
2434 	return i;
2435 }
2436 
2437 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2438 	const uint64_t *ids,
2439 	struct rte_eth_xstat_name *xstats_names,
2440 	unsigned int limit)
2441 {
2442 	unsigned int i;
2443 
2444 	if (ids == NULL)
2445 		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2446 
2447 	for (i = 0; i < limit; i++) {
2448 		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2449 				sizeof(xstats_names[i].name))) {
2450 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2451 			return -1;
2452 		}
2453 	}
2454 
2455 	return i;
2456 }
2457 
2458 static int
2459 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2460 					 unsigned int limit)
2461 {
2462 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2463 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2464 	unsigned int i, count;
2465 
2466 	txgbe_read_stats_registers(hw, hw_stats);
2467 
2468 	/* If this is a reset xstats is NULL, and we have cleared the
2469 	 * registers by reading them.
2470 	 */
2471 	count = txgbe_xstats_calc_num(dev);
2472 	if (xstats == NULL)
2473 		return count;
2474 
2475 	limit = min(limit, txgbe_xstats_calc_num(dev));
2476 
2477 	/* Extended stats from txgbe_hw_stats */
2478 	for (i = 0; i < limit; i++) {
2479 		uint32_t offset = 0;
2480 
2481 		if (txgbe_get_offset_by_id(i, &offset)) {
2482 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2483 			break;
2484 		}
2485 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2486 		xstats[i].id = i;
2487 	}
2488 
2489 	return i;
2490 }
2491 
2492 static int
2493 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2494 					 unsigned int limit)
2495 {
2496 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2497 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2498 	unsigned int i, count;
2499 
2500 	txgbe_read_stats_registers(hw, hw_stats);
2501 
2502 	/* If this is a reset xstats is NULL, and we have cleared the
2503 	 * registers by reading them.
2504 	 */
2505 	count = txgbe_xstats_calc_num(dev);
2506 	if (values == NULL)
2507 		return count;
2508 
2509 	limit = min(limit, txgbe_xstats_calc_num(dev));
2510 
2511 	/* Extended stats from txgbe_hw_stats */
2512 	for (i = 0; i < limit; i++) {
2513 		uint32_t offset;
2514 
2515 		if (txgbe_get_offset_by_id(i, &offset)) {
2516 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2517 			break;
2518 		}
2519 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2520 	}
2521 
2522 	return i;
2523 }
2524 
2525 static int
2526 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2527 		uint64_t *values, unsigned int limit)
2528 {
2529 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2530 	unsigned int i;
2531 
2532 	if (ids == NULL)
2533 		return txgbe_dev_xstats_get_(dev, values, limit);
2534 
2535 	for (i = 0; i < limit; i++) {
2536 		uint32_t offset;
2537 
2538 		if (txgbe_get_offset_by_id(ids[i], &offset)) {
2539 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2540 			break;
2541 		}
2542 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2543 	}
2544 
2545 	return i;
2546 }
2547 
2548 static int
2549 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2550 {
2551 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2552 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2553 
2554 	/* HW registers are cleared on read */
2555 	hw->offset_loaded = 0;
2556 	txgbe_read_stats_registers(hw, hw_stats);
2557 	hw->offset_loaded = 1;
2558 
2559 	/* Reset software totals */
2560 	memset(hw_stats, 0, sizeof(*hw_stats));
2561 
2562 	return 0;
2563 }
2564 
2565 static int
2566 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2567 {
2568 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2569 	u32 etrack_id;
2570 	int ret;
2571 
2572 	hw->phy.get_fw_version(hw, &etrack_id);
2573 
2574 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2575 	if (ret < 0)
2576 		return -EINVAL;
2577 
2578 	ret += 1; /* add the size of '\0' */
2579 	if (fw_size < (size_t)ret)
2580 		return ret;
2581 	else
2582 		return 0;
2583 }
2584 
2585 static int
2586 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2587 {
2588 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2589 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2590 
2591 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2592 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2593 	dev_info->min_rx_bufsize = 1024;
2594 	dev_info->max_rx_pktlen = 15872;
2595 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2596 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2597 	dev_info->max_vfs = pci_dev->max_vfs;
2598 	dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
2599 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2600 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2601 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2602 				     dev_info->rx_queue_offload_capa);
2603 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2604 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2605 
2606 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2607 		.rx_thresh = {
2608 			.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2609 			.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2610 			.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2611 		},
2612 		.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2613 		.rx_drop_en = 0,
2614 		.offloads = 0,
2615 	};
2616 
2617 	dev_info->default_txconf = (struct rte_eth_txconf) {
2618 		.tx_thresh = {
2619 			.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2620 			.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2621 			.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2622 		},
2623 		.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2624 		.offloads = 0,
2625 	};
2626 
2627 	dev_info->rx_desc_lim = rx_desc_lim;
2628 	dev_info->tx_desc_lim = tx_desc_lim;
2629 
2630 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2631 	dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
2632 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2633 
2634 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
2635 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
2636 
2637 	/* Driver-preferred Rx/Tx parameters */
2638 	dev_info->default_rxportconf.burst_size = 32;
2639 	dev_info->default_txportconf.burst_size = 32;
2640 	dev_info->default_rxportconf.nb_queues = 1;
2641 	dev_info->default_txportconf.nb_queues = 1;
2642 	dev_info->default_rxportconf.ring_size = 256;
2643 	dev_info->default_txportconf.ring_size = 256;
2644 
2645 	return 0;
2646 }
2647 
2648 const uint32_t *
2649 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2650 {
2651 	if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2652 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2653 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2654 	    dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2655 		return txgbe_get_supported_ptypes();
2656 
2657 	return NULL;
2658 }
2659 
2660 void
2661 txgbe_dev_setup_link_alarm_handler(void *param)
2662 {
2663 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2664 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2665 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2666 	u32 speed;
2667 	bool autoneg = false;
2668 
2669 	speed = hw->phy.autoneg_advertised;
2670 	if (!speed)
2671 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2672 
2673 	hw->mac.setup_link(hw, speed, true);
2674 
2675 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2676 }
2677 
2678 /* return 0 means link status changed, -1 means not changed */
2679 int
2680 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2681 			    int wait_to_complete)
2682 {
2683 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2684 	struct rte_eth_link link;
2685 	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2686 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2687 	bool link_up;
2688 	int err;
2689 	int wait = 1;
2690 
2691 	memset(&link, 0, sizeof(link));
2692 	link.link_status = RTE_ETH_LINK_DOWN;
2693 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2694 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
2695 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2696 			RTE_ETH_LINK_AUTONEG);
2697 
2698 	hw->mac.get_link_status = true;
2699 
2700 	if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2701 		return rte_eth_linkstatus_set(dev, &link);
2702 
2703 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
2704 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2705 		wait = 0;
2706 
2707 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2708 
2709 	if (err != 0) {
2710 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
2711 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2712 		return rte_eth_linkstatus_set(dev, &link);
2713 	}
2714 
2715 	if (link_up == 0) {
2716 		if ((hw->subsystem_device_id & 0xFF) ==
2717 				TXGBE_DEV_ID_KR_KX_KX4) {
2718 			hw->mac.bp_down_event(hw);
2719 		} else if (hw->phy.media_type == txgbe_media_type_fiber) {
2720 			intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2721 			rte_eal_alarm_set(10,
2722 				txgbe_dev_setup_link_alarm_handler, dev);
2723 		}
2724 		return rte_eth_linkstatus_set(dev, &link);
2725 	} else if (!hw->dev_start) {
2726 		return rte_eth_linkstatus_set(dev, &link);
2727 	}
2728 
2729 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2730 	link.link_status = RTE_ETH_LINK_UP;
2731 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2732 
2733 	switch (link_speed) {
2734 	default:
2735 	case TXGBE_LINK_SPEED_UNKNOWN:
2736 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
2737 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
2738 		break;
2739 
2740 	case TXGBE_LINK_SPEED_100M_FULL:
2741 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
2742 		break;
2743 
2744 	case TXGBE_LINK_SPEED_1GB_FULL:
2745 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
2746 		break;
2747 
2748 	case TXGBE_LINK_SPEED_2_5GB_FULL:
2749 		link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
2750 		break;
2751 
2752 	case TXGBE_LINK_SPEED_5GB_FULL:
2753 		link.link_speed = RTE_ETH_SPEED_NUM_5G;
2754 		break;
2755 
2756 	case TXGBE_LINK_SPEED_10GB_FULL:
2757 		link.link_speed = RTE_ETH_SPEED_NUM_10G;
2758 		break;
2759 	}
2760 
2761 	return rte_eth_linkstatus_set(dev, &link);
2762 }
2763 
2764 static int
2765 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2766 {
2767 	return txgbe_dev_link_update_share(dev, wait_to_complete);
2768 }
2769 
2770 static int
2771 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2772 {
2773 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2774 	uint32_t fctrl;
2775 
2776 	fctrl = rd32(hw, TXGBE_PSRCTL);
2777 	fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2778 	wr32(hw, TXGBE_PSRCTL, fctrl);
2779 
2780 	return 0;
2781 }
2782 
2783 static int
2784 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2785 {
2786 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2787 	uint32_t fctrl;
2788 
2789 	fctrl = rd32(hw, TXGBE_PSRCTL);
2790 	fctrl &= (~TXGBE_PSRCTL_UCP);
2791 	if (dev->data->all_multicast == 1)
2792 		fctrl |= TXGBE_PSRCTL_MCP;
2793 	else
2794 		fctrl &= (~TXGBE_PSRCTL_MCP);
2795 	wr32(hw, TXGBE_PSRCTL, fctrl);
2796 
2797 	return 0;
2798 }
2799 
2800 static int
2801 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2802 {
2803 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2804 	uint32_t fctrl;
2805 
2806 	fctrl = rd32(hw, TXGBE_PSRCTL);
2807 	fctrl |= TXGBE_PSRCTL_MCP;
2808 	wr32(hw, TXGBE_PSRCTL, fctrl);
2809 
2810 	return 0;
2811 }
2812 
2813 static int
2814 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2815 {
2816 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2817 	uint32_t fctrl;
2818 
2819 	if (dev->data->promiscuous == 1)
2820 		return 0; /* must remain in all_multicast mode */
2821 
2822 	fctrl = rd32(hw, TXGBE_PSRCTL);
2823 	fctrl &= (~TXGBE_PSRCTL_MCP);
2824 	wr32(hw, TXGBE_PSRCTL, fctrl);
2825 
2826 	return 0;
2827 }
2828 
2829 /**
2830  * It clears the interrupt causes and enables the interrupt.
2831  * It will be called once only during nic initialized.
2832  *
2833  * @param dev
2834  *  Pointer to struct rte_eth_dev.
2835  * @param on
2836  *  Enable or Disable.
2837  *
2838  * @return
2839  *  - On success, zero.
2840  *  - On failure, a negative value.
2841  */
2842 static int
2843 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2844 {
2845 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2846 
2847 	txgbe_dev_link_status_print(dev);
2848 	if (on)
2849 		intr->mask_misc |= TXGBE_ICRMISC_LSC;
2850 	else
2851 		intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2852 
2853 	return 0;
2854 }
2855 
2856 static int
2857 txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2858 {
2859 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2860 	u64 mask;
2861 
2862 	mask = TXGBE_ICR_MASK;
2863 	mask &= (1ULL << TXGBE_MISC_VEC_ID);
2864 	intr->mask |= mask;
2865 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
2866 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
2867 	return 0;
2868 }
2869 
2870 /**
2871  * It clears the interrupt causes and enables the interrupt.
2872  * It will be called once only during nic initialized.
2873  *
2874  * @param dev
2875  *  Pointer to struct rte_eth_dev.
2876  *
2877  * @return
2878  *  - On success, zero.
2879  *  - On failure, a negative value.
2880  */
2881 static int
2882 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2883 {
2884 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2885 	u64 mask;
2886 
2887 	mask = TXGBE_ICR_MASK;
2888 	mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
2889 	intr->mask |= mask;
2890 
2891 	return 0;
2892 }
2893 
2894 /**
2895  * It clears the interrupt causes and enables the interrupt.
2896  * It will be called once only during nic initialized.
2897  *
2898  * @param dev
2899  *  Pointer to struct rte_eth_dev.
2900  *
2901  * @return
2902  *  - On success, zero.
2903  *  - On failure, a negative value.
2904  */
2905 static int
2906 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2907 {
2908 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2909 
2910 	intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2911 
2912 	return 0;
2913 }
2914 
2915 /*
2916  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2917  *
2918  * @param dev
2919  *  Pointer to struct rte_eth_dev.
2920  *
2921  * @return
2922  *  - On success, zero.
2923  *  - On failure, a negative value.
2924  */
2925 static int
2926 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
2927 				struct rte_intr_handle *intr_handle)
2928 {
2929 	uint32_t eicr;
2930 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2931 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2932 
2933 	if (rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_UIO &&
2934 		rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX)
2935 		wr32(hw, TXGBE_PX_INTA, 1);
2936 
2937 	/* clear all cause mask */
2938 	txgbe_disable_intr(hw);
2939 
2940 	/* read-on-clear nic registers here */
2941 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2942 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2943 
2944 	intr->flags = 0;
2945 
2946 	/* set flag for async link update */
2947 	if (eicr & TXGBE_ICRMISC_LSC)
2948 		intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2949 
2950 	if (eicr & TXGBE_ICRMISC_ANDONE)
2951 		intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
2952 
2953 	if (eicr & TXGBE_ICRMISC_VFMBX)
2954 		intr->flags |= TXGBE_FLAG_MAILBOX;
2955 
2956 	if (eicr & TXGBE_ICRMISC_LNKSEC)
2957 		intr->flags |= TXGBE_FLAG_MACSEC;
2958 
2959 	if (eicr & TXGBE_ICRMISC_GPIO)
2960 		intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2961 
2962 	return 0;
2963 }
2964 
2965 /**
2966  * It gets and then prints the link status.
2967  *
2968  * @param dev
2969  *  Pointer to struct rte_eth_dev.
2970  *
2971  * @return
2972  *  - On success, zero.
2973  *  - On failure, a negative value.
2974  */
2975 static void
2976 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2977 {
2978 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2979 	struct rte_eth_link link;
2980 
2981 	rte_eth_linkstatus_get(dev, &link);
2982 
2983 	if (link.link_status) {
2984 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2985 					(int)(dev->data->port_id),
2986 					(unsigned int)link.link_speed,
2987 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
2988 					"full-duplex" : "half-duplex");
2989 	} else {
2990 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2991 				(int)(dev->data->port_id));
2992 	}
2993 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2994 				pci_dev->addr.domain,
2995 				pci_dev->addr.bus,
2996 				pci_dev->addr.devid,
2997 				pci_dev->addr.function);
2998 }
2999 
3000 /*
3001  * It executes link_update after knowing an interrupt occurred.
3002  *
3003  * @param dev
3004  *  Pointer to struct rte_eth_dev.
3005  *
3006  * @return
3007  *  - On success, zero.
3008  *  - On failure, a negative value.
3009  */
3010 static int
3011 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
3012 			   struct rte_intr_handle *intr_handle)
3013 {
3014 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3015 	int64_t timeout;
3016 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3017 
3018 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3019 
3020 	if (intr->flags & TXGBE_FLAG_MAILBOX) {
3021 		txgbe_pf_mbx_process(dev);
3022 		intr->flags &= ~TXGBE_FLAG_MAILBOX;
3023 	}
3024 
3025 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3026 		hw->phy.handle_lasi(hw);
3027 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3028 	}
3029 
3030 	if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
3031 		if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
3032 			hw->mac.kr_handle(hw);
3033 			intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
3034 		}
3035 	}
3036 
3037 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3038 		struct rte_eth_link link;
3039 
3040 		/*get the link status before link update, for predicting later*/
3041 		rte_eth_linkstatus_get(dev, &link);
3042 
3043 		txgbe_dev_link_update(dev, 0);
3044 
3045 		/* likely to up */
3046 		if (!link.link_status)
3047 			/* handle it 1 sec later, wait it being stable */
3048 			timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
3049 		/* likely to down */
3050 		else if ((hw->subsystem_device_id & 0xFF) ==
3051 				TXGBE_DEV_ID_KR_KX_KX4 &&
3052 				hw->devarg.auto_neg == 1)
3053 			/* handle it 2 sec later for backplane AN73 */
3054 			timeout = 2000;
3055 		else
3056 			/* handle it 4 sec later, wait it being stable */
3057 			timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
3058 
3059 		txgbe_dev_link_status_print(dev);
3060 		if (rte_eal_alarm_set(timeout * 1000,
3061 				      txgbe_dev_interrupt_delayed_handler,
3062 				      (void *)dev) < 0) {
3063 			PMD_DRV_LOG(ERR, "Error setting alarm");
3064 		} else {
3065 			/* only disable lsc interrupt */
3066 			intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
3067 
3068 			intr->mask_orig = intr->mask;
3069 			/* only disable all misc interrupts */
3070 			intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
3071 		}
3072 	}
3073 
3074 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
3075 	txgbe_enable_intr(dev);
3076 	rte_intr_enable(intr_handle);
3077 
3078 	return 0;
3079 }
3080 
3081 /**
3082  * Interrupt handler which shall be registered for alarm callback for delayed
3083  * handling specific interrupt to wait for the stable nic state. As the
3084  * NIC interrupt state is not stable for txgbe after link is just down,
3085  * it needs to wait 4 seconds to get the stable status.
3086  *
3087  * @param handle
3088  *  Pointer to interrupt handle.
3089  * @param param
3090  *  The address of parameter (struct rte_eth_dev *) registered before.
3091  *
3092  * @return
3093  *  void
3094  */
3095 static void
3096 txgbe_dev_interrupt_delayed_handler(void *param)
3097 {
3098 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3099 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3100 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3101 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3102 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3103 	uint32_t eicr;
3104 
3105 	txgbe_disable_intr(hw);
3106 
3107 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3108 	if (eicr & TXGBE_ICRMISC_VFMBX)
3109 		txgbe_pf_mbx_process(dev);
3110 
3111 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3112 		hw->phy.handle_lasi(hw);
3113 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3114 	}
3115 
3116 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3117 		txgbe_dev_link_update(dev, 0);
3118 		intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
3119 		txgbe_dev_link_status_print(dev);
3120 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
3121 					      NULL);
3122 	}
3123 
3124 	if (intr->flags & TXGBE_FLAG_MACSEC) {
3125 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3126 					      NULL);
3127 		intr->flags &= ~TXGBE_FLAG_MACSEC;
3128 	}
3129 
3130 	/* restore original mask */
3131 	intr->mask_misc |= TXGBE_ICRMISC_LSC;
3132 
3133 	intr->mask = intr->mask_orig;
3134 	intr->mask_orig = 0;
3135 
3136 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3137 	txgbe_enable_intr(dev);
3138 	rte_intr_enable(intr_handle);
3139 }
3140 
3141 /**
3142  * Interrupt handler triggered by NIC  for handling
3143  * specific interrupt.
3144  *
3145  * @param handle
3146  *  Pointer to interrupt handle.
3147  * @param param
3148  *  The address of parameter (struct rte_eth_dev *) registered before.
3149  *
3150  * @return
3151  *  void
3152  */
3153 static void
3154 txgbe_dev_interrupt_handler(void *param)
3155 {
3156 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3157 
3158 	txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
3159 	txgbe_dev_interrupt_action(dev, dev->intr_handle);
3160 }
3161 
3162 static int
3163 txgbe_dev_led_on(struct rte_eth_dev *dev)
3164 {
3165 	struct txgbe_hw *hw;
3166 
3167 	hw = TXGBE_DEV_HW(dev);
3168 	return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3169 }
3170 
3171 static int
3172 txgbe_dev_led_off(struct rte_eth_dev *dev)
3173 {
3174 	struct txgbe_hw *hw;
3175 
3176 	hw = TXGBE_DEV_HW(dev);
3177 	return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3178 }
3179 
3180 static int
3181 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3182 {
3183 	struct txgbe_hw *hw;
3184 	uint32_t mflcn_reg;
3185 	uint32_t fccfg_reg;
3186 	int rx_pause;
3187 	int tx_pause;
3188 
3189 	hw = TXGBE_DEV_HW(dev);
3190 
3191 	fc_conf->pause_time = hw->fc.pause_time;
3192 	fc_conf->high_water = hw->fc.high_water[0];
3193 	fc_conf->low_water = hw->fc.low_water[0];
3194 	fc_conf->send_xon = hw->fc.send_xon;
3195 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3196 
3197 	/*
3198 	 * Return rx_pause status according to actual setting of
3199 	 * RXFCCFG register.
3200 	 */
3201 	mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3202 	if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3203 		rx_pause = 1;
3204 	else
3205 		rx_pause = 0;
3206 
3207 	/*
3208 	 * Return tx_pause status according to actual setting of
3209 	 * TXFCCFG register.
3210 	 */
3211 	fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3212 	if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3213 		tx_pause = 1;
3214 	else
3215 		tx_pause = 0;
3216 
3217 	if (rx_pause && tx_pause)
3218 		fc_conf->mode = RTE_ETH_FC_FULL;
3219 	else if (rx_pause)
3220 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
3221 	else if (tx_pause)
3222 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
3223 	else
3224 		fc_conf->mode = RTE_ETH_FC_NONE;
3225 
3226 	return 0;
3227 }
3228 
3229 static int
3230 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3231 {
3232 	struct txgbe_hw *hw;
3233 	int err;
3234 	uint32_t rx_buf_size;
3235 	uint32_t max_high_water;
3236 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3237 		txgbe_fc_none,
3238 		txgbe_fc_rx_pause,
3239 		txgbe_fc_tx_pause,
3240 		txgbe_fc_full
3241 	};
3242 
3243 	PMD_INIT_FUNC_TRACE();
3244 
3245 	hw = TXGBE_DEV_HW(dev);
3246 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3247 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3248 
3249 	/*
3250 	 * At least reserve one Ethernet frame for watermark
3251 	 * high_water/low_water in kilo bytes for txgbe
3252 	 */
3253 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3254 	if (fc_conf->high_water > max_high_water ||
3255 	    fc_conf->high_water < fc_conf->low_water) {
3256 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3257 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3258 		return -EINVAL;
3259 	}
3260 
3261 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3262 	hw->fc.pause_time     = fc_conf->pause_time;
3263 	hw->fc.high_water[0]  = fc_conf->high_water;
3264 	hw->fc.low_water[0]   = fc_conf->low_water;
3265 	hw->fc.send_xon       = fc_conf->send_xon;
3266 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3267 
3268 	err = txgbe_fc_enable(hw);
3269 
3270 	/* Not negotiated is not an error case */
3271 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3272 		wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3273 		      (fc_conf->mac_ctrl_frame_fwd
3274 		       ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3275 		txgbe_flush(hw);
3276 
3277 		return 0;
3278 	}
3279 
3280 	PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3281 	return -EIO;
3282 }
3283 
3284 static int
3285 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3286 		struct rte_eth_pfc_conf *pfc_conf)
3287 {
3288 	int err;
3289 	uint32_t rx_buf_size;
3290 	uint32_t max_high_water;
3291 	uint8_t tc_num;
3292 	uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3293 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3294 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3295 
3296 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3297 		txgbe_fc_none,
3298 		txgbe_fc_rx_pause,
3299 		txgbe_fc_tx_pause,
3300 		txgbe_fc_full
3301 	};
3302 
3303 	PMD_INIT_FUNC_TRACE();
3304 
3305 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3306 	tc_num = map[pfc_conf->priority];
3307 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3308 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3309 	/*
3310 	 * At least reserve one Ethernet frame for watermark
3311 	 * high_water/low_water in kilo bytes for txgbe
3312 	 */
3313 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3314 	if (pfc_conf->fc.high_water > max_high_water ||
3315 	    pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3316 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3317 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3318 		return -EINVAL;
3319 	}
3320 
3321 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3322 	hw->fc.pause_time = pfc_conf->fc.pause_time;
3323 	hw->fc.send_xon = pfc_conf->fc.send_xon;
3324 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3325 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3326 
3327 	err = txgbe_dcb_pfc_enable(hw, tc_num);
3328 
3329 	/* Not negotiated is not an error case */
3330 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3331 		return 0;
3332 
3333 	PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3334 	return -EIO;
3335 }
3336 
3337 int
3338 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3339 			  struct rte_eth_rss_reta_entry64 *reta_conf,
3340 			  uint16_t reta_size)
3341 {
3342 	uint8_t i, j, mask;
3343 	uint32_t reta;
3344 	uint16_t idx, shift;
3345 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3346 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3347 
3348 	PMD_INIT_FUNC_TRACE();
3349 
3350 	if (!txgbe_rss_update_sp(hw->mac.type)) {
3351 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3352 			"NIC.");
3353 		return -ENOTSUP;
3354 	}
3355 
3356 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3357 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3358 			"(%d) doesn't match the number hardware can supported "
3359 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3360 		return -EINVAL;
3361 	}
3362 
3363 	for (i = 0; i < reta_size; i += 4) {
3364 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3365 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3366 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3367 		if (!mask)
3368 			continue;
3369 
3370 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3371 		for (j = 0; j < 4; j++) {
3372 			if (RS8(mask, j, 0x1)) {
3373 				reta  &= ~(MS32(8 * j, 0xFF));
3374 				reta |= LS32(reta_conf[idx].reta[shift + j],
3375 						8 * j, 0xFF);
3376 			}
3377 		}
3378 		wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3379 	}
3380 	adapter->rss_reta_updated = 1;
3381 
3382 	return 0;
3383 }
3384 
3385 int
3386 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3387 			 struct rte_eth_rss_reta_entry64 *reta_conf,
3388 			 uint16_t reta_size)
3389 {
3390 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3391 	uint8_t i, j, mask;
3392 	uint32_t reta;
3393 	uint16_t idx, shift;
3394 
3395 	PMD_INIT_FUNC_TRACE();
3396 
3397 	if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
3398 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3399 			"(%d) doesn't match the number hardware can supported "
3400 			"(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
3401 		return -EINVAL;
3402 	}
3403 
3404 	for (i = 0; i < reta_size; i += 4) {
3405 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3406 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3407 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3408 		if (!mask)
3409 			continue;
3410 
3411 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3412 		for (j = 0; j < 4; j++) {
3413 			if (RS8(mask, j, 0x1))
3414 				reta_conf[idx].reta[shift + j] =
3415 					(uint16_t)RS32(reta, 8 * j, 0xFF);
3416 		}
3417 	}
3418 
3419 	return 0;
3420 }
3421 
3422 static int
3423 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3424 				uint32_t index, uint32_t pool)
3425 {
3426 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3427 	uint32_t enable_addr = 1;
3428 
3429 	return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3430 			     pool, enable_addr);
3431 }
3432 
3433 static void
3434 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3435 {
3436 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3437 
3438 	txgbe_clear_rar(hw, index);
3439 }
3440 
3441 static int
3442 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3443 {
3444 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3445 
3446 	txgbe_remove_rar(dev, 0);
3447 	txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3448 
3449 	return 0;
3450 }
3451 
3452 static int
3453 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3454 {
3455 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3456 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3457 	struct rte_eth_dev_data *dev_data = dev->data;
3458 
3459 	/* If device is started, refuse mtu that requires the support of
3460 	 * scattered packets when this feature has not been enabled before.
3461 	 */
3462 	if (dev_data->dev_started && !dev_data->scattered_rx &&
3463 	    (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3464 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3465 		PMD_INIT_LOG(ERR, "Stop port first.");
3466 		return -EINVAL;
3467 	}
3468 
3469 	if (hw->mode)
3470 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3471 			TXGBE_FRAME_SIZE_MAX);
3472 	else
3473 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3474 			TXGBE_FRMSZ_MAX(frame_size));
3475 
3476 	return 0;
3477 }
3478 
3479 static uint32_t
3480 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3481 {
3482 	uint32_t vector = 0;
3483 
3484 	switch (hw->mac.mc_filter_type) {
3485 	case 0:   /* use bits [47:36] of the address */
3486 		vector = ((uc_addr->addr_bytes[4] >> 4) |
3487 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
3488 		break;
3489 	case 1:   /* use bits [46:35] of the address */
3490 		vector = ((uc_addr->addr_bytes[4] >> 3) |
3491 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
3492 		break;
3493 	case 2:   /* use bits [45:34] of the address */
3494 		vector = ((uc_addr->addr_bytes[4] >> 2) |
3495 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
3496 		break;
3497 	case 3:   /* use bits [43:32] of the address */
3498 		vector = ((uc_addr->addr_bytes[4]) |
3499 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
3500 		break;
3501 	default:  /* Invalid mc_filter_type */
3502 		break;
3503 	}
3504 
3505 	/* vector can only be 12-bits or boundary will be exceeded */
3506 	vector &= 0xFFF;
3507 	return vector;
3508 }
3509 
3510 static int
3511 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3512 			struct rte_ether_addr *mac_addr, uint8_t on)
3513 {
3514 	uint32_t vector;
3515 	uint32_t uta_idx;
3516 	uint32_t reg_val;
3517 	uint32_t uta_mask;
3518 	uint32_t psrctl;
3519 
3520 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3521 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3522 
3523 	/* The UTA table only exists on pf hardware */
3524 	if (hw->mac.type < txgbe_mac_raptor)
3525 		return -ENOTSUP;
3526 
3527 	vector = txgbe_uta_vector(hw, mac_addr);
3528 	uta_idx = (vector >> 5) & 0x7F;
3529 	uta_mask = 0x1UL << (vector & 0x1F);
3530 
3531 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3532 		return 0;
3533 
3534 	reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3535 	if (on) {
3536 		uta_info->uta_in_use++;
3537 		reg_val |= uta_mask;
3538 		uta_info->uta_shadow[uta_idx] |= uta_mask;
3539 	} else {
3540 		uta_info->uta_in_use--;
3541 		reg_val &= ~uta_mask;
3542 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3543 	}
3544 
3545 	wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3546 
3547 	psrctl = rd32(hw, TXGBE_PSRCTL);
3548 	if (uta_info->uta_in_use > 0)
3549 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3550 	else
3551 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3552 
3553 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3554 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3555 	wr32(hw, TXGBE_PSRCTL, psrctl);
3556 
3557 	return 0;
3558 }
3559 
3560 static int
3561 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3562 {
3563 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3564 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3565 	uint32_t psrctl;
3566 	int i;
3567 
3568 	/* The UTA table only exists on pf hardware */
3569 	if (hw->mac.type < txgbe_mac_raptor)
3570 		return -ENOTSUP;
3571 
3572 	if (on) {
3573 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3574 			uta_info->uta_shadow[i] = ~0;
3575 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3576 		}
3577 	} else {
3578 		for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3579 			uta_info->uta_shadow[i] = 0;
3580 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
3581 		}
3582 	}
3583 
3584 	psrctl = rd32(hw, TXGBE_PSRCTL);
3585 	if (on)
3586 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3587 	else
3588 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3589 
3590 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3591 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3592 	wr32(hw, TXGBE_PSRCTL, psrctl);
3593 
3594 	return 0;
3595 }
3596 
3597 uint32_t
3598 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3599 {
3600 	uint32_t new_val = orig_val;
3601 
3602 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
3603 		new_val |= TXGBE_POOLETHCTL_UTA;
3604 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
3605 		new_val |= TXGBE_POOLETHCTL_MCHA;
3606 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
3607 		new_val |= TXGBE_POOLETHCTL_UCHA;
3608 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
3609 		new_val |= TXGBE_POOLETHCTL_BCA;
3610 	if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
3611 		new_val |= TXGBE_POOLETHCTL_MCP;
3612 
3613 	return new_val;
3614 }
3615 
3616 static int
3617 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3618 {
3619 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3620 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3621 	uint32_t mask;
3622 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3623 
3624 	if (queue_id < 32) {
3625 		mask = rd32(hw, TXGBE_IMS(0));
3626 		mask &= (1 << queue_id);
3627 		wr32(hw, TXGBE_IMS(0), mask);
3628 	} else if (queue_id < 64) {
3629 		mask = rd32(hw, TXGBE_IMS(1));
3630 		mask &= (1 << (queue_id - 32));
3631 		wr32(hw, TXGBE_IMS(1), mask);
3632 	}
3633 	rte_intr_enable(intr_handle);
3634 
3635 	return 0;
3636 }
3637 
3638 static int
3639 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3640 {
3641 	uint32_t mask;
3642 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3643 
3644 	if (queue_id < 32) {
3645 		mask = rd32(hw, TXGBE_IMS(0));
3646 		mask &= ~(1 << queue_id);
3647 		wr32(hw, TXGBE_IMS(0), mask);
3648 	} else if (queue_id < 64) {
3649 		mask = rd32(hw, TXGBE_IMS(1));
3650 		mask &= ~(1 << (queue_id - 32));
3651 		wr32(hw, TXGBE_IMS(1), mask);
3652 	}
3653 
3654 	return 0;
3655 }
3656 
3657 /**
3658  * set the IVAR registers, mapping interrupt causes to vectors
3659  * @param hw
3660  *  pointer to txgbe_hw struct
3661  * @direction
3662  *  0 for Rx, 1 for Tx, -1 for other causes
3663  * @queue
3664  *  queue to map the corresponding interrupt to
3665  * @msix_vector
3666  *  the vector to map to the corresponding queue
3667  */
3668 void
3669 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3670 		   uint8_t queue, uint8_t msix_vector)
3671 {
3672 	uint32_t tmp, idx;
3673 
3674 	if (direction == -1) {
3675 		/* other causes */
3676 		msix_vector |= TXGBE_IVARMISC_VLD;
3677 		idx = 0;
3678 		tmp = rd32(hw, TXGBE_IVARMISC);
3679 		tmp &= ~(0xFF << idx);
3680 		tmp |= (msix_vector << idx);
3681 		wr32(hw, TXGBE_IVARMISC, tmp);
3682 	} else {
3683 		/* rx or tx causes */
3684 		/* Workround for ICR lost */
3685 		idx = ((16 * (queue & 1)) + (8 * direction));
3686 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3687 		tmp &= ~(0xFF << idx);
3688 		tmp |= (msix_vector << idx);
3689 		wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3690 	}
3691 }
3692 
3693 /**
3694  * Sets up the hardware to properly generate MSI-X interrupts
3695  * @hw
3696  *  board private structure
3697  */
3698 static void
3699 txgbe_configure_msix(struct rte_eth_dev *dev)
3700 {
3701 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3702 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
3703 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3704 	uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3705 	uint32_t vec = TXGBE_MISC_VEC_ID;
3706 	uint32_t gpie;
3707 
3708 	/* won't configure msix register if no mapping is done
3709 	 * between intr vector and event fd
3710 	 * but if misx has been enabled already, need to configure
3711 	 * auto clean, auto mask and throttling.
3712 	 */
3713 	gpie = rd32(hw, TXGBE_GPIE);
3714 	if (!rte_intr_dp_is_en(intr_handle) &&
3715 	    !(gpie & TXGBE_GPIE_MSIX))
3716 		return;
3717 
3718 	if (rte_intr_allow_others(intr_handle)) {
3719 		base = TXGBE_RX_VEC_START;
3720 		vec = base;
3721 	}
3722 
3723 	/* setup GPIE for MSI-x mode */
3724 	gpie = rd32(hw, TXGBE_GPIE);
3725 	gpie |= TXGBE_GPIE_MSIX;
3726 	wr32(hw, TXGBE_GPIE, gpie);
3727 
3728 	/* Populate the IVAR table and set the ITR values to the
3729 	 * corresponding register.
3730 	 */
3731 	if (rte_intr_dp_is_en(intr_handle)) {
3732 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3733 			queue_id++) {
3734 			/* by default, 1:1 mapping */
3735 			txgbe_set_ivar_map(hw, 0, queue_id, vec);
3736 			rte_intr_vec_list_index_set(intr_handle,
3737 							   queue_id, vec);
3738 			if (vec < base + rte_intr_nb_efd_get(intr_handle)
3739 			    - 1)
3740 				vec++;
3741 		}
3742 
3743 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3744 	}
3745 	wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3746 			TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3747 			| TXGBE_ITR_WRDSA);
3748 }
3749 
3750 int
3751 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3752 			   uint16_t queue_idx, uint16_t tx_rate)
3753 {
3754 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3755 	uint32_t bcnrc_val;
3756 
3757 	if (queue_idx >= hw->mac.max_tx_queues)
3758 		return -EINVAL;
3759 
3760 	if (tx_rate != 0) {
3761 		bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3762 		bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3763 	} else {
3764 		bcnrc_val = 0;
3765 	}
3766 
3767 	/*
3768 	 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3769 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3770 	 */
3771 	wr32(hw, TXGBE_ARBTXMMW, 0x14);
3772 
3773 	/* Set ARBTXRATE of queue X */
3774 	wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3775 	wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3776 	txgbe_flush(hw);
3777 
3778 	return 0;
3779 }
3780 
3781 int
3782 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3783 			struct rte_eth_syn_filter *filter,
3784 			bool add)
3785 {
3786 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3787 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3788 	uint32_t syn_info;
3789 	uint32_t synqf;
3790 
3791 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3792 		return -EINVAL;
3793 
3794 	syn_info = filter_info->syn_info;
3795 
3796 	if (add) {
3797 		if (syn_info & TXGBE_SYNCLS_ENA)
3798 			return -EINVAL;
3799 		synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3800 		synqf |= TXGBE_SYNCLS_ENA;
3801 
3802 		if (filter->hig_pri)
3803 			synqf |= TXGBE_SYNCLS_HIPRIO;
3804 		else
3805 			synqf &= ~TXGBE_SYNCLS_HIPRIO;
3806 	} else {
3807 		synqf = rd32(hw, TXGBE_SYNCLS);
3808 		if (!(syn_info & TXGBE_SYNCLS_ENA))
3809 			return -ENOENT;
3810 		synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3811 	}
3812 
3813 	filter_info->syn_info = synqf;
3814 	wr32(hw, TXGBE_SYNCLS, synqf);
3815 	txgbe_flush(hw);
3816 	return 0;
3817 }
3818 
3819 static inline enum txgbe_5tuple_protocol
3820 convert_protocol_type(uint8_t protocol_value)
3821 {
3822 	if (protocol_value == IPPROTO_TCP)
3823 		return TXGBE_5TF_PROT_TCP;
3824 	else if (protocol_value == IPPROTO_UDP)
3825 		return TXGBE_5TF_PROT_UDP;
3826 	else if (protocol_value == IPPROTO_SCTP)
3827 		return TXGBE_5TF_PROT_SCTP;
3828 	else
3829 		return TXGBE_5TF_PROT_NONE;
3830 }
3831 
3832 /* inject a 5-tuple filter to HW */
3833 static inline void
3834 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3835 			   struct txgbe_5tuple_filter *filter)
3836 {
3837 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3838 	int i;
3839 	uint32_t ftqf, sdpqf;
3840 	uint32_t l34timir = 0;
3841 	uint32_t mask = TXGBE_5TFCTL0_MASK;
3842 
3843 	i = filter->index;
3844 	sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3845 	sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3846 
3847 	ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3848 	ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3849 	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3850 		mask &= ~TXGBE_5TFCTL0_MSADDR;
3851 	if (filter->filter_info.dst_ip_mask == 0)
3852 		mask &= ~TXGBE_5TFCTL0_MDADDR;
3853 	if (filter->filter_info.src_port_mask == 0)
3854 		mask &= ~TXGBE_5TFCTL0_MSPORT;
3855 	if (filter->filter_info.dst_port_mask == 0)
3856 		mask &= ~TXGBE_5TFCTL0_MDPORT;
3857 	if (filter->filter_info.proto_mask == 0)
3858 		mask &= ~TXGBE_5TFCTL0_MPROTO;
3859 	ftqf |= mask;
3860 	ftqf |= TXGBE_5TFCTL0_MPOOL;
3861 	ftqf |= TXGBE_5TFCTL0_ENA;
3862 
3863 	wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3864 	wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3865 	wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3866 	wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3867 
3868 	l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3869 	wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3870 }
3871 
3872 /*
3873  * add a 5tuple filter
3874  *
3875  * @param
3876  * dev: Pointer to struct rte_eth_dev.
3877  * index: the index the filter allocates.
3878  * filter: pointer to the filter that will be added.
3879  * rx_queue: the queue id the filter assigned to.
3880  *
3881  * @return
3882  *    - On success, zero.
3883  *    - On failure, a negative value.
3884  */
3885 static int
3886 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3887 			struct txgbe_5tuple_filter *filter)
3888 {
3889 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3890 	int i, idx, shift;
3891 
3892 	/*
3893 	 * look for an unused 5tuple filter index,
3894 	 * and insert the filter to list.
3895 	 */
3896 	for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3897 		idx = i / (sizeof(uint32_t) * NBBY);
3898 		shift = i % (sizeof(uint32_t) * NBBY);
3899 		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3900 			filter_info->fivetuple_mask[idx] |= 1 << shift;
3901 			filter->index = i;
3902 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3903 					  filter,
3904 					  entries);
3905 			break;
3906 		}
3907 	}
3908 	if (i >= TXGBE_MAX_FTQF_FILTERS) {
3909 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
3910 		return -ENOSYS;
3911 	}
3912 
3913 	txgbe_inject_5tuple_filter(dev, filter);
3914 
3915 	return 0;
3916 }
3917 
3918 /*
3919  * remove a 5tuple filter
3920  *
3921  * @param
3922  * dev: Pointer to struct rte_eth_dev.
3923  * filter: the pointer of the filter will be removed.
3924  */
3925 static void
3926 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3927 			struct txgbe_5tuple_filter *filter)
3928 {
3929 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3930 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3931 	uint16_t index = filter->index;
3932 
3933 	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3934 				~(1 << (index % (sizeof(uint32_t) * NBBY)));
3935 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3936 	rte_free(filter);
3937 
3938 	wr32(hw, TXGBE_5TFDADDR(index), 0);
3939 	wr32(hw, TXGBE_5TFSADDR(index), 0);
3940 	wr32(hw, TXGBE_5TFPORT(index), 0);
3941 	wr32(hw, TXGBE_5TFCTL0(index), 0);
3942 	wr32(hw, TXGBE_5TFCTL1(index), 0);
3943 }
3944 
3945 static inline struct txgbe_5tuple_filter *
3946 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3947 			struct txgbe_5tuple_filter_info *key)
3948 {
3949 	struct txgbe_5tuple_filter *it;
3950 
3951 	TAILQ_FOREACH(it, filter_list, entries) {
3952 		if (memcmp(key, &it->filter_info,
3953 			sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3954 			return it;
3955 		}
3956 	}
3957 	return NULL;
3958 }
3959 
3960 /* translate elements in struct rte_eth_ntuple_filter
3961  * to struct txgbe_5tuple_filter_info
3962  */
3963 static inline int
3964 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3965 			struct txgbe_5tuple_filter_info *filter_info)
3966 {
3967 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3968 		filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3969 		filter->priority < TXGBE_5TUPLE_MIN_PRI)
3970 		return -EINVAL;
3971 
3972 	switch (filter->dst_ip_mask) {
3973 	case UINT32_MAX:
3974 		filter_info->dst_ip_mask = 0;
3975 		filter_info->dst_ip = filter->dst_ip;
3976 		break;
3977 	case 0:
3978 		filter_info->dst_ip_mask = 1;
3979 		break;
3980 	default:
3981 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3982 		return -EINVAL;
3983 	}
3984 
3985 	switch (filter->src_ip_mask) {
3986 	case UINT32_MAX:
3987 		filter_info->src_ip_mask = 0;
3988 		filter_info->src_ip = filter->src_ip;
3989 		break;
3990 	case 0:
3991 		filter_info->src_ip_mask = 1;
3992 		break;
3993 	default:
3994 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
3995 		return -EINVAL;
3996 	}
3997 
3998 	switch (filter->dst_port_mask) {
3999 	case UINT16_MAX:
4000 		filter_info->dst_port_mask = 0;
4001 		filter_info->dst_port = filter->dst_port;
4002 		break;
4003 	case 0:
4004 		filter_info->dst_port_mask = 1;
4005 		break;
4006 	default:
4007 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4008 		return -EINVAL;
4009 	}
4010 
4011 	switch (filter->src_port_mask) {
4012 	case UINT16_MAX:
4013 		filter_info->src_port_mask = 0;
4014 		filter_info->src_port = filter->src_port;
4015 		break;
4016 	case 0:
4017 		filter_info->src_port_mask = 1;
4018 		break;
4019 	default:
4020 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
4021 		return -EINVAL;
4022 	}
4023 
4024 	switch (filter->proto_mask) {
4025 	case UINT8_MAX:
4026 		filter_info->proto_mask = 0;
4027 		filter_info->proto =
4028 			convert_protocol_type(filter->proto);
4029 		break;
4030 	case 0:
4031 		filter_info->proto_mask = 1;
4032 		break;
4033 	default:
4034 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
4035 		return -EINVAL;
4036 	}
4037 
4038 	filter_info->priority = (uint8_t)filter->priority;
4039 	return 0;
4040 }
4041 
4042 /*
4043  * add or delete a ntuple filter
4044  *
4045  * @param
4046  * dev: Pointer to struct rte_eth_dev.
4047  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4048  * add: if true, add filter, if false, remove filter
4049  *
4050  * @return
4051  *    - On success, zero.
4052  *    - On failure, a negative value.
4053  */
4054 int
4055 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
4056 			struct rte_eth_ntuple_filter *ntuple_filter,
4057 			bool add)
4058 {
4059 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4060 	struct txgbe_5tuple_filter_info filter_5tuple;
4061 	struct txgbe_5tuple_filter *filter;
4062 	int ret;
4063 
4064 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
4065 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
4066 		return -EINVAL;
4067 	}
4068 
4069 	memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
4070 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
4071 	if (ret < 0)
4072 		return ret;
4073 
4074 	filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
4075 					 &filter_5tuple);
4076 	if (filter != NULL && add) {
4077 		PMD_DRV_LOG(ERR, "filter exists.");
4078 		return -EEXIST;
4079 	}
4080 	if (filter == NULL && !add) {
4081 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
4082 		return -ENOENT;
4083 	}
4084 
4085 	if (add) {
4086 		filter = rte_zmalloc("txgbe_5tuple_filter",
4087 				sizeof(struct txgbe_5tuple_filter), 0);
4088 		if (filter == NULL)
4089 			return -ENOMEM;
4090 		rte_memcpy(&filter->filter_info,
4091 				 &filter_5tuple,
4092 				 sizeof(struct txgbe_5tuple_filter_info));
4093 		filter->queue = ntuple_filter->queue;
4094 		ret = txgbe_add_5tuple_filter(dev, filter);
4095 		if (ret < 0) {
4096 			rte_free(filter);
4097 			return ret;
4098 		}
4099 	} else {
4100 		txgbe_remove_5tuple_filter(dev, filter);
4101 	}
4102 
4103 	return 0;
4104 }
4105 
4106 int
4107 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4108 			struct rte_eth_ethertype_filter *filter,
4109 			bool add)
4110 {
4111 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4112 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4113 	uint32_t etqf = 0;
4114 	uint32_t etqs = 0;
4115 	int ret;
4116 	struct txgbe_ethertype_filter ethertype_filter;
4117 
4118 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4119 		return -EINVAL;
4120 
4121 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4122 	    filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4123 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4124 			" ethertype filter.", filter->ether_type);
4125 		return -EINVAL;
4126 	}
4127 
4128 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4129 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4130 		return -EINVAL;
4131 	}
4132 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4133 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
4134 		return -EINVAL;
4135 	}
4136 
4137 	ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4138 	if (ret >= 0 && add) {
4139 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4140 			    filter->ether_type);
4141 		return -EEXIST;
4142 	}
4143 	if (ret < 0 && !add) {
4144 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4145 			    filter->ether_type);
4146 		return -ENOENT;
4147 	}
4148 
4149 	if (add) {
4150 		etqf = TXGBE_ETFLT_ENA;
4151 		etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4152 		etqs |= TXGBE_ETCLS_QPID(filter->queue);
4153 		etqs |= TXGBE_ETCLS_QENA;
4154 
4155 		ethertype_filter.ethertype = filter->ether_type;
4156 		ethertype_filter.etqf = etqf;
4157 		ethertype_filter.etqs = etqs;
4158 		ethertype_filter.conf = FALSE;
4159 		ret = txgbe_ethertype_filter_insert(filter_info,
4160 						    &ethertype_filter);
4161 		if (ret < 0) {
4162 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
4163 			return -ENOSPC;
4164 		}
4165 	} else {
4166 		ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4167 		if (ret < 0)
4168 			return -ENOSYS;
4169 	}
4170 	wr32(hw, TXGBE_ETFLT(ret), etqf);
4171 	wr32(hw, TXGBE_ETCLS(ret), etqs);
4172 	txgbe_flush(hw);
4173 
4174 	return 0;
4175 }
4176 
4177 static int
4178 txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
4179 		       const struct rte_flow_ops **ops)
4180 {
4181 	*ops = &txgbe_flow_ops;
4182 	return 0;
4183 }
4184 
4185 static u8 *
4186 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4187 			u8 **mc_addr_ptr, u32 *vmdq)
4188 {
4189 	u8 *mc_addr;
4190 
4191 	*vmdq = 0;
4192 	mc_addr = *mc_addr_ptr;
4193 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4194 	return mc_addr;
4195 }
4196 
4197 int
4198 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4199 			  struct rte_ether_addr *mc_addr_set,
4200 			  uint32_t nb_mc_addr)
4201 {
4202 	struct txgbe_hw *hw;
4203 	u8 *mc_addr_list;
4204 
4205 	hw = TXGBE_DEV_HW(dev);
4206 	mc_addr_list = (u8 *)mc_addr_set;
4207 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4208 					 txgbe_dev_addr_list_itr, TRUE);
4209 }
4210 
4211 static uint64_t
4212 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4213 {
4214 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4215 	uint64_t systime_cycles;
4216 
4217 	systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4218 	systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4219 
4220 	return systime_cycles;
4221 }
4222 
4223 static uint64_t
4224 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4225 {
4226 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4227 	uint64_t rx_tstamp_cycles;
4228 
4229 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4230 	rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4231 	rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4232 
4233 	return rx_tstamp_cycles;
4234 }
4235 
4236 static uint64_t
4237 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4238 {
4239 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4240 	uint64_t tx_tstamp_cycles;
4241 
4242 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4243 	tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4244 	tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4245 
4246 	return tx_tstamp_cycles;
4247 }
4248 
4249 static void
4250 txgbe_start_timecounters(struct rte_eth_dev *dev)
4251 {
4252 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4253 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4254 	struct rte_eth_link link;
4255 	uint32_t incval = 0;
4256 	uint32_t shift = 0;
4257 
4258 	/* Get current link speed. */
4259 	txgbe_dev_link_update(dev, 1);
4260 	rte_eth_linkstatus_get(dev, &link);
4261 
4262 	switch (link.link_speed) {
4263 	case RTE_ETH_SPEED_NUM_100M:
4264 		incval = TXGBE_INCVAL_100;
4265 		shift = TXGBE_INCVAL_SHIFT_100;
4266 		break;
4267 	case RTE_ETH_SPEED_NUM_1G:
4268 		incval = TXGBE_INCVAL_1GB;
4269 		shift = TXGBE_INCVAL_SHIFT_1GB;
4270 		break;
4271 	case RTE_ETH_SPEED_NUM_10G:
4272 	default:
4273 		incval = TXGBE_INCVAL_10GB;
4274 		shift = TXGBE_INCVAL_SHIFT_10GB;
4275 		break;
4276 	}
4277 
4278 	wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4279 
4280 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4281 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4282 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4283 
4284 	adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4285 	adapter->systime_tc.cc_shift = shift;
4286 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4287 
4288 	adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4289 	adapter->rx_tstamp_tc.cc_shift = shift;
4290 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4291 
4292 	adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4293 	adapter->tx_tstamp_tc.cc_shift = shift;
4294 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4295 }
4296 
4297 static int
4298 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4299 {
4300 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4301 
4302 	adapter->systime_tc.nsec += delta;
4303 	adapter->rx_tstamp_tc.nsec += delta;
4304 	adapter->tx_tstamp_tc.nsec += delta;
4305 
4306 	return 0;
4307 }
4308 
4309 static int
4310 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4311 {
4312 	uint64_t ns;
4313 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4314 
4315 	ns = rte_timespec_to_ns(ts);
4316 	/* Set the timecounters to a new value. */
4317 	adapter->systime_tc.nsec = ns;
4318 	adapter->rx_tstamp_tc.nsec = ns;
4319 	adapter->tx_tstamp_tc.nsec = ns;
4320 
4321 	return 0;
4322 }
4323 
4324 static int
4325 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4326 {
4327 	uint64_t ns, systime_cycles;
4328 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4329 
4330 	systime_cycles = txgbe_read_systime_cyclecounter(dev);
4331 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4332 	*ts = rte_ns_to_timespec(ns);
4333 
4334 	return 0;
4335 }
4336 
4337 static int
4338 txgbe_timesync_enable(struct rte_eth_dev *dev)
4339 {
4340 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4341 	uint32_t tsync_ctl;
4342 
4343 	/* Stop the timesync system time. */
4344 	wr32(hw, TXGBE_TSTIMEINC, 0x0);
4345 	/* Reset the timesync system time value. */
4346 	wr32(hw, TXGBE_TSTIMEL, 0x0);
4347 	wr32(hw, TXGBE_TSTIMEH, 0x0);
4348 
4349 	txgbe_start_timecounters(dev);
4350 
4351 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4352 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4353 		RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4354 
4355 	/* Enable timestamping of received PTP packets. */
4356 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4357 	tsync_ctl |= TXGBE_TSRXCTL_ENA;
4358 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4359 
4360 	/* Enable timestamping of transmitted PTP packets. */
4361 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4362 	tsync_ctl |= TXGBE_TSTXCTL_ENA;
4363 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4364 
4365 	txgbe_flush(hw);
4366 
4367 	return 0;
4368 }
4369 
4370 static int
4371 txgbe_timesync_disable(struct rte_eth_dev *dev)
4372 {
4373 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4374 	uint32_t tsync_ctl;
4375 
4376 	/* Disable timestamping of transmitted PTP packets. */
4377 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4378 	tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4379 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4380 
4381 	/* Disable timestamping of received PTP packets. */
4382 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4383 	tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4384 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4385 
4386 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4387 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4388 
4389 	/* Stop incrementating the System Time registers. */
4390 	wr32(hw, TXGBE_TSTIMEINC, 0);
4391 
4392 	return 0;
4393 }
4394 
4395 static int
4396 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4397 				 struct timespec *timestamp,
4398 				 uint32_t flags __rte_unused)
4399 {
4400 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4401 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4402 	uint32_t tsync_rxctl;
4403 	uint64_t rx_tstamp_cycles;
4404 	uint64_t ns;
4405 
4406 	tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4407 	if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4408 		return -EINVAL;
4409 
4410 	rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4411 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4412 	*timestamp = rte_ns_to_timespec(ns);
4413 
4414 	return  0;
4415 }
4416 
4417 static int
4418 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4419 				 struct timespec *timestamp)
4420 {
4421 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4422 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4423 	uint32_t tsync_txctl;
4424 	uint64_t tx_tstamp_cycles;
4425 	uint64_t ns;
4426 
4427 	tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4428 	if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4429 		return -EINVAL;
4430 
4431 	tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4432 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4433 	*timestamp = rte_ns_to_timespec(ns);
4434 
4435 	return 0;
4436 }
4437 
4438 static int
4439 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4440 {
4441 	int count = 0;
4442 	int g_ind = 0;
4443 	const struct reg_info *reg_group;
4444 	const struct reg_info **reg_set = txgbe_regs_others;
4445 
4446 	while ((reg_group = reg_set[g_ind++]))
4447 		count += txgbe_regs_group_count(reg_group);
4448 
4449 	return count;
4450 }
4451 
4452 static int
4453 txgbe_get_regs(struct rte_eth_dev *dev,
4454 	      struct rte_dev_reg_info *regs)
4455 {
4456 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4457 	uint32_t *data = regs->data;
4458 	int g_ind = 0;
4459 	int count = 0;
4460 	const struct reg_info *reg_group;
4461 	const struct reg_info **reg_set = txgbe_regs_others;
4462 
4463 	if (data == NULL) {
4464 		regs->length = txgbe_get_reg_length(dev);
4465 		regs->width = sizeof(uint32_t);
4466 		return 0;
4467 	}
4468 
4469 	/* Support only full register dump */
4470 	if (regs->length == 0 ||
4471 	    regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4472 		regs->version = hw->mac.type << 24 |
4473 				hw->revision_id << 16 |
4474 				hw->device_id;
4475 		while ((reg_group = reg_set[g_ind++]))
4476 			count += txgbe_read_regs_group(dev, &data[count],
4477 						      reg_group);
4478 		return 0;
4479 	}
4480 
4481 	return -ENOTSUP;
4482 }
4483 
4484 static int
4485 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4486 {
4487 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4488 
4489 	/* Return unit is byte count */
4490 	return hw->rom.word_size * 2;
4491 }
4492 
4493 static int
4494 txgbe_get_eeprom(struct rte_eth_dev *dev,
4495 		struct rte_dev_eeprom_info *in_eeprom)
4496 {
4497 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4498 	struct txgbe_rom_info *eeprom = &hw->rom;
4499 	uint16_t *data = in_eeprom->data;
4500 	int first, length;
4501 
4502 	first = in_eeprom->offset >> 1;
4503 	length = in_eeprom->length >> 1;
4504 	if (first > hw->rom.word_size ||
4505 	    ((first + length) > hw->rom.word_size))
4506 		return -EINVAL;
4507 
4508 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4509 
4510 	return eeprom->readw_buffer(hw, first, length, data);
4511 }
4512 
4513 static int
4514 txgbe_set_eeprom(struct rte_eth_dev *dev,
4515 		struct rte_dev_eeprom_info *in_eeprom)
4516 {
4517 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4518 	struct txgbe_rom_info *eeprom = &hw->rom;
4519 	uint16_t *data = in_eeprom->data;
4520 	int first, length;
4521 
4522 	first = in_eeprom->offset >> 1;
4523 	length = in_eeprom->length >> 1;
4524 	if (first > hw->rom.word_size ||
4525 	    ((first + length) > hw->rom.word_size))
4526 		return -EINVAL;
4527 
4528 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4529 
4530 	return eeprom->writew_buffer(hw,  first, length, data);
4531 }
4532 
4533 static int
4534 txgbe_get_module_info(struct rte_eth_dev *dev,
4535 		      struct rte_eth_dev_module_info *modinfo)
4536 {
4537 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4538 	uint32_t status;
4539 	uint8_t sff8472_rev, addr_mode;
4540 	bool page_swap = false;
4541 
4542 	/* Check whether we support SFF-8472 or not */
4543 	status = hw->phy.read_i2c_eeprom(hw,
4544 					     TXGBE_SFF_SFF_8472_COMP,
4545 					     &sff8472_rev);
4546 	if (status != 0)
4547 		return -EIO;
4548 
4549 	/* addressing mode is not supported */
4550 	status = hw->phy.read_i2c_eeprom(hw,
4551 					     TXGBE_SFF_SFF_8472_SWAP,
4552 					     &addr_mode);
4553 	if (status != 0)
4554 		return -EIO;
4555 
4556 	if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4557 		PMD_DRV_LOG(ERR,
4558 			    "Address change required to access page 0xA2, "
4559 			    "but not supported. Please report the module "
4560 			    "type to the driver maintainers.");
4561 		page_swap = true;
4562 	}
4563 
4564 	if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4565 		/* We have a SFP, but it does not support SFF-8472 */
4566 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
4567 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4568 	} else {
4569 		/* We have a SFP which supports a revision of SFF-8472. */
4570 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
4571 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4572 	}
4573 
4574 	return 0;
4575 }
4576 
4577 static int
4578 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4579 			struct rte_dev_eeprom_info *info)
4580 {
4581 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4582 	uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4583 	uint8_t databyte = 0xFF;
4584 	uint8_t *data = info->data;
4585 	uint32_t i = 0;
4586 
4587 	if (info->length == 0)
4588 		return -EINVAL;
4589 
4590 	for (i = info->offset; i < info->offset + info->length; i++) {
4591 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4592 			status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4593 		else
4594 			status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4595 
4596 		if (status != 0)
4597 			return -EIO;
4598 
4599 		data[i - info->offset] = databyte;
4600 	}
4601 
4602 	return 0;
4603 }
4604 
4605 bool
4606 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4607 {
4608 	switch (mac_type) {
4609 	case txgbe_mac_raptor:
4610 	case txgbe_mac_raptor_vf:
4611 		return 1;
4612 	default:
4613 		return 0;
4614 	}
4615 }
4616 
4617 static int
4618 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4619 			struct rte_eth_dcb_info *dcb_info)
4620 {
4621 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4622 	struct txgbe_dcb_tc_config *tc;
4623 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4624 	uint8_t nb_tcs;
4625 	uint8_t i, j;
4626 
4627 	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
4628 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4629 	else
4630 		dcb_info->nb_tcs = 1;
4631 
4632 	tc_queue = &dcb_info->tc_queue;
4633 	nb_tcs = dcb_info->nb_tcs;
4634 
4635 	if (dcb_config->vt_mode) { /* vt is enabled */
4636 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4637 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4638 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
4639 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4640 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4641 			for (j = 0; j < nb_tcs; j++) {
4642 				tc_queue->tc_rxq[0][j].base = j;
4643 				tc_queue->tc_rxq[0][j].nb_queue = 1;
4644 				tc_queue->tc_txq[0][j].base = j;
4645 				tc_queue->tc_txq[0][j].nb_queue = 1;
4646 			}
4647 		} else {
4648 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4649 				for (j = 0; j < nb_tcs; j++) {
4650 					tc_queue->tc_rxq[i][j].base =
4651 						i * nb_tcs + j;
4652 					tc_queue->tc_rxq[i][j].nb_queue = 1;
4653 					tc_queue->tc_txq[i][j].base =
4654 						i * nb_tcs + j;
4655 					tc_queue->tc_txq[i][j].nb_queue = 1;
4656 				}
4657 			}
4658 		}
4659 	} else { /* vt is disabled */
4660 		struct rte_eth_dcb_rx_conf *rx_conf =
4661 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4662 		for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
4663 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4664 		if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
4665 			for (i = 0; i < dcb_info->nb_tcs; i++) {
4666 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4667 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4668 			}
4669 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
4670 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
4671 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
4672 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
4673 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4674 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4675 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4676 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4677 		} else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
4678 			for (i = 0; i < dcb_info->nb_tcs; i++) {
4679 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4680 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4681 			}
4682 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
4683 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
4684 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
4685 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
4686 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
4687 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
4688 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
4689 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
4690 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4691 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4692 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4693 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4694 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4695 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4696 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4697 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4698 		}
4699 	}
4700 	for (i = 0; i < dcb_info->nb_tcs; i++) {
4701 		tc = &dcb_config->tc_config[i];
4702 		dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4703 	}
4704 	return 0;
4705 }
4706 
4707 /* Update e-tag ether type */
4708 static int
4709 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4710 			    uint16_t ether_type)
4711 {
4712 	uint32_t etag_etype;
4713 
4714 	etag_etype = rd32(hw, TXGBE_EXTAG);
4715 	etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4716 	etag_etype |= ether_type;
4717 	wr32(hw, TXGBE_EXTAG, etag_etype);
4718 	txgbe_flush(hw);
4719 
4720 	return 0;
4721 }
4722 
4723 /* Enable e-tag tunnel */
4724 static int
4725 txgbe_e_tag_enable(struct txgbe_hw *hw)
4726 {
4727 	uint32_t etag_etype;
4728 
4729 	etag_etype = rd32(hw, TXGBE_PORTCTL);
4730 	etag_etype |= TXGBE_PORTCTL_ETAG;
4731 	wr32(hw, TXGBE_PORTCTL, etag_etype);
4732 	txgbe_flush(hw);
4733 
4734 	return 0;
4735 }
4736 
4737 static int
4738 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4739 		       struct txgbe_l2_tunnel_conf  *l2_tunnel)
4740 {
4741 	int ret = 0;
4742 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4743 	uint32_t i, rar_entries;
4744 	uint32_t rar_low, rar_high;
4745 
4746 	rar_entries = hw->mac.num_rar_entries;
4747 
4748 	for (i = 1; i < rar_entries; i++) {
4749 		wr32(hw, TXGBE_ETHADDRIDX, i);
4750 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4751 		rar_low  = rd32(hw, TXGBE_ETHADDRL);
4752 		if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4753 		    (rar_high & TXGBE_ETHADDRH_ETAG) &&
4754 		    (TXGBE_ETHADDRL_ETAG(rar_low) ==
4755 		     l2_tunnel->tunnel_id)) {
4756 			wr32(hw, TXGBE_ETHADDRL, 0);
4757 			wr32(hw, TXGBE_ETHADDRH, 0);
4758 
4759 			txgbe_clear_vmdq(hw, i, BIT_MASK32);
4760 
4761 			return ret;
4762 		}
4763 	}
4764 
4765 	return ret;
4766 }
4767 
4768 static int
4769 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4770 		       struct txgbe_l2_tunnel_conf *l2_tunnel)
4771 {
4772 	int ret = 0;
4773 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4774 	uint32_t i, rar_entries;
4775 	uint32_t rar_low, rar_high;
4776 
4777 	/* One entry for one tunnel. Try to remove potential existing entry. */
4778 	txgbe_e_tag_filter_del(dev, l2_tunnel);
4779 
4780 	rar_entries = hw->mac.num_rar_entries;
4781 
4782 	for (i = 1; i < rar_entries; i++) {
4783 		wr32(hw, TXGBE_ETHADDRIDX, i);
4784 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4785 		if (rar_high & TXGBE_ETHADDRH_VLD) {
4786 			continue;
4787 		} else {
4788 			txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4789 			rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4790 			rar_low = l2_tunnel->tunnel_id;
4791 
4792 			wr32(hw, TXGBE_ETHADDRL, rar_low);
4793 			wr32(hw, TXGBE_ETHADDRH, rar_high);
4794 
4795 			return ret;
4796 		}
4797 	}
4798 
4799 	PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4800 		     " Please remove a rule before adding a new one.");
4801 	return -EINVAL;
4802 }
4803 
4804 static inline struct txgbe_l2_tn_filter *
4805 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4806 			  struct txgbe_l2_tn_key *key)
4807 {
4808 	int ret;
4809 
4810 	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4811 	if (ret < 0)
4812 		return NULL;
4813 
4814 	return l2_tn_info->hash_map[ret];
4815 }
4816 
4817 static inline int
4818 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4819 			  struct txgbe_l2_tn_filter *l2_tn_filter)
4820 {
4821 	int ret;
4822 
4823 	ret = rte_hash_add_key(l2_tn_info->hash_handle,
4824 			       &l2_tn_filter->key);
4825 
4826 	if (ret < 0) {
4827 		PMD_DRV_LOG(ERR,
4828 			    "Failed to insert L2 tunnel filter"
4829 			    " to hash table %d!",
4830 			    ret);
4831 		return ret;
4832 	}
4833 
4834 	l2_tn_info->hash_map[ret] = l2_tn_filter;
4835 
4836 	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4837 
4838 	return 0;
4839 }
4840 
4841 static inline int
4842 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4843 			  struct txgbe_l2_tn_key *key)
4844 {
4845 	int ret;
4846 	struct txgbe_l2_tn_filter *l2_tn_filter;
4847 
4848 	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4849 
4850 	if (ret < 0) {
4851 		PMD_DRV_LOG(ERR,
4852 			    "No such L2 tunnel filter to delete %d!",
4853 			    ret);
4854 		return ret;
4855 	}
4856 
4857 	l2_tn_filter = l2_tn_info->hash_map[ret];
4858 	l2_tn_info->hash_map[ret] = NULL;
4859 
4860 	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4861 	rte_free(l2_tn_filter);
4862 
4863 	return 0;
4864 }
4865 
4866 /* Add l2 tunnel filter */
4867 int
4868 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4869 			       struct txgbe_l2_tunnel_conf *l2_tunnel,
4870 			       bool restore)
4871 {
4872 	int ret;
4873 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4874 	struct txgbe_l2_tn_key key;
4875 	struct txgbe_l2_tn_filter *node;
4876 
4877 	if (!restore) {
4878 		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4879 		key.tn_id = l2_tunnel->tunnel_id;
4880 
4881 		node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4882 
4883 		if (node) {
4884 			PMD_DRV_LOG(ERR,
4885 				    "The L2 tunnel filter already exists!");
4886 			return -EINVAL;
4887 		}
4888 
4889 		node = rte_zmalloc("txgbe_l2_tn",
4890 				   sizeof(struct txgbe_l2_tn_filter),
4891 				   0);
4892 		if (!node)
4893 			return -ENOMEM;
4894 
4895 		rte_memcpy(&node->key,
4896 				 &key,
4897 				 sizeof(struct txgbe_l2_tn_key));
4898 		node->pool = l2_tunnel->pool;
4899 		ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4900 		if (ret < 0) {
4901 			rte_free(node);
4902 			return ret;
4903 		}
4904 	}
4905 
4906 	switch (l2_tunnel->l2_tunnel_type) {
4907 	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
4908 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4909 		break;
4910 	default:
4911 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4912 		ret = -EINVAL;
4913 		break;
4914 	}
4915 
4916 	if (!restore && ret < 0)
4917 		(void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4918 
4919 	return ret;
4920 }
4921 
4922 /* Delete l2 tunnel filter */
4923 int
4924 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4925 			       struct txgbe_l2_tunnel_conf *l2_tunnel)
4926 {
4927 	int ret;
4928 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4929 	struct txgbe_l2_tn_key key;
4930 
4931 	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4932 	key.tn_id = l2_tunnel->tunnel_id;
4933 	ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4934 	if (ret < 0)
4935 		return ret;
4936 
4937 	switch (l2_tunnel->l2_tunnel_type) {
4938 	case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
4939 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4940 		break;
4941 	default:
4942 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4943 		ret = -EINVAL;
4944 		break;
4945 	}
4946 
4947 	return ret;
4948 }
4949 
4950 static int
4951 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4952 {
4953 	int ret = 0;
4954 	uint32_t ctrl;
4955 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4956 
4957 	ctrl = rd32(hw, TXGBE_POOLCTL);
4958 	ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4959 	if (en)
4960 		ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4961 	wr32(hw, TXGBE_POOLCTL, ctrl);
4962 
4963 	return ret;
4964 }
4965 
4966 /* Add UDP tunneling port */
4967 static int
4968 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4969 			      struct rte_eth_udp_tunnel *udp_tunnel)
4970 {
4971 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4972 	int ret = 0;
4973 
4974 	if (udp_tunnel == NULL)
4975 		return -EINVAL;
4976 
4977 	switch (udp_tunnel->prot_type) {
4978 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
4979 		if (udp_tunnel->udp_port == 0) {
4980 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
4981 			ret = -EINVAL;
4982 			break;
4983 		}
4984 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
4985 		break;
4986 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
4987 		if (udp_tunnel->udp_port == 0) {
4988 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
4989 			ret = -EINVAL;
4990 			break;
4991 		}
4992 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
4993 		break;
4994 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
4995 		if (udp_tunnel->udp_port == 0) {
4996 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
4997 			ret = -EINVAL;
4998 			break;
4999 		}
5000 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
5001 		break;
5002 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
5003 		if (udp_tunnel->udp_port == 0) {
5004 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
5005 			ret = -EINVAL;
5006 			break;
5007 		}
5008 		wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
5009 		break;
5010 	default:
5011 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5012 		ret = -EINVAL;
5013 		break;
5014 	}
5015 
5016 	txgbe_flush(hw);
5017 
5018 	return ret;
5019 }
5020 
5021 /* Remove UDP tunneling port */
5022 static int
5023 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5024 			      struct rte_eth_udp_tunnel *udp_tunnel)
5025 {
5026 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5027 	int ret = 0;
5028 	uint16_t cur_port;
5029 
5030 	if (udp_tunnel == NULL)
5031 		return -EINVAL;
5032 
5033 	switch (udp_tunnel->prot_type) {
5034 	case RTE_ETH_TUNNEL_TYPE_VXLAN:
5035 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
5036 		if (cur_port != udp_tunnel->udp_port) {
5037 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5038 					udp_tunnel->udp_port);
5039 			ret = -EINVAL;
5040 			break;
5041 		}
5042 		wr32(hw, TXGBE_VXLANPORT, 0);
5043 		break;
5044 	case RTE_ETH_TUNNEL_TYPE_GENEVE:
5045 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
5046 		if (cur_port != udp_tunnel->udp_port) {
5047 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5048 					udp_tunnel->udp_port);
5049 			ret = -EINVAL;
5050 			break;
5051 		}
5052 		wr32(hw, TXGBE_GENEVEPORT, 0);
5053 		break;
5054 	case RTE_ETH_TUNNEL_TYPE_TEREDO:
5055 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
5056 		if (cur_port != udp_tunnel->udp_port) {
5057 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5058 					udp_tunnel->udp_port);
5059 			ret = -EINVAL;
5060 			break;
5061 		}
5062 		wr32(hw, TXGBE_TEREDOPORT, 0);
5063 		break;
5064 	case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
5065 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
5066 		if (cur_port != udp_tunnel->udp_port) {
5067 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5068 					udp_tunnel->udp_port);
5069 			ret = -EINVAL;
5070 			break;
5071 		}
5072 		wr32(hw, TXGBE_VXLANPORTGPE, 0);
5073 		break;
5074 	default:
5075 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5076 		ret = -EINVAL;
5077 		break;
5078 	}
5079 
5080 	txgbe_flush(hw);
5081 
5082 	return ret;
5083 }
5084 
5085 /* restore n-tuple filter */
5086 static inline void
5087 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
5088 {
5089 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5090 	struct txgbe_5tuple_filter *node;
5091 
5092 	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
5093 		txgbe_inject_5tuple_filter(dev, node);
5094 	}
5095 }
5096 
5097 /* restore ethernet type filter */
5098 static inline void
5099 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5100 {
5101 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5102 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5103 	int i;
5104 
5105 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5106 		if (filter_info->ethertype_mask & (1 << i)) {
5107 			wr32(hw, TXGBE_ETFLT(i),
5108 					filter_info->ethertype_filters[i].etqf);
5109 			wr32(hw, TXGBE_ETCLS(i),
5110 					filter_info->ethertype_filters[i].etqs);
5111 			txgbe_flush(hw);
5112 		}
5113 	}
5114 }
5115 
5116 /* restore SYN filter */
5117 static inline void
5118 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5119 {
5120 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5121 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5122 	uint32_t synqf;
5123 
5124 	synqf = filter_info->syn_info;
5125 
5126 	if (synqf & TXGBE_SYNCLS_ENA) {
5127 		wr32(hw, TXGBE_SYNCLS, synqf);
5128 		txgbe_flush(hw);
5129 	}
5130 }
5131 
5132 /* restore L2 tunnel filter */
5133 static inline void
5134 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5135 {
5136 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5137 	struct txgbe_l2_tn_filter *node;
5138 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5139 
5140 	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5141 		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5142 		l2_tn_conf.tunnel_id      = node->key.tn_id;
5143 		l2_tn_conf.pool           = node->pool;
5144 		(void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5145 	}
5146 }
5147 
5148 /* restore rss filter */
5149 static inline void
5150 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
5151 {
5152 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5153 
5154 	if (filter_info->rss_info.conf.queue_num)
5155 		txgbe_config_rss_filter(dev,
5156 			&filter_info->rss_info, TRUE);
5157 }
5158 
5159 static int
5160 txgbe_filter_restore(struct rte_eth_dev *dev)
5161 {
5162 	txgbe_ntuple_filter_restore(dev);
5163 	txgbe_ethertype_filter_restore(dev);
5164 	txgbe_syn_filter_restore(dev);
5165 	txgbe_fdir_filter_restore(dev);
5166 	txgbe_l2_tn_filter_restore(dev);
5167 	txgbe_rss_filter_restore(dev);
5168 
5169 	return 0;
5170 }
5171 
5172 static void
5173 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
5174 {
5175 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5176 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5177 
5178 	if (l2_tn_info->e_tag_en)
5179 		(void)txgbe_e_tag_enable(hw);
5180 
5181 	if (l2_tn_info->e_tag_fwd_en)
5182 		(void)txgbe_e_tag_forwarding_en_dis(dev, 1);
5183 
5184 	(void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
5185 }
5186 
5187 /* remove all the n-tuple filters */
5188 void
5189 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
5190 {
5191 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5192 	struct txgbe_5tuple_filter *p_5tuple;
5193 
5194 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
5195 		txgbe_remove_5tuple_filter(dev, p_5tuple);
5196 }
5197 
5198 /* remove all the ether type filters */
5199 void
5200 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
5201 {
5202 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5203 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5204 	int i;
5205 
5206 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5207 		if (filter_info->ethertype_mask & (1 << i) &&
5208 		    !filter_info->ethertype_filters[i].conf) {
5209 			(void)txgbe_ethertype_filter_remove(filter_info,
5210 							    (uint8_t)i);
5211 			wr32(hw, TXGBE_ETFLT(i), 0);
5212 			wr32(hw, TXGBE_ETCLS(i), 0);
5213 			txgbe_flush(hw);
5214 		}
5215 	}
5216 }
5217 
5218 /* remove the SYN filter */
5219 void
5220 txgbe_clear_syn_filter(struct rte_eth_dev *dev)
5221 {
5222 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5223 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5224 
5225 	if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
5226 		filter_info->syn_info = 0;
5227 
5228 		wr32(hw, TXGBE_SYNCLS, 0);
5229 		txgbe_flush(hw);
5230 	}
5231 }
5232 
5233 /* remove all the L2 tunnel filters */
5234 int
5235 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
5236 {
5237 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5238 	struct txgbe_l2_tn_filter *l2_tn_filter;
5239 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5240 	int ret = 0;
5241 
5242 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
5243 		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
5244 		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
5245 		l2_tn_conf.pool           = l2_tn_filter->pool;
5246 		ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
5247 		if (ret < 0)
5248 			return ret;
5249 	}
5250 
5251 	return 0;
5252 }
5253 
5254 static const struct eth_dev_ops txgbe_eth_dev_ops = {
5255 	.dev_configure              = txgbe_dev_configure,
5256 	.dev_infos_get              = txgbe_dev_info_get,
5257 	.dev_start                  = txgbe_dev_start,
5258 	.dev_stop                   = txgbe_dev_stop,
5259 	.dev_set_link_up            = txgbe_dev_set_link_up,
5260 	.dev_set_link_down          = txgbe_dev_set_link_down,
5261 	.dev_close                  = txgbe_dev_close,
5262 	.dev_reset                  = txgbe_dev_reset,
5263 	.promiscuous_enable         = txgbe_dev_promiscuous_enable,
5264 	.promiscuous_disable        = txgbe_dev_promiscuous_disable,
5265 	.allmulticast_enable        = txgbe_dev_allmulticast_enable,
5266 	.allmulticast_disable       = txgbe_dev_allmulticast_disable,
5267 	.link_update                = txgbe_dev_link_update,
5268 	.stats_get                  = txgbe_dev_stats_get,
5269 	.xstats_get                 = txgbe_dev_xstats_get,
5270 	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
5271 	.stats_reset                = txgbe_dev_stats_reset,
5272 	.xstats_reset               = txgbe_dev_xstats_reset,
5273 	.xstats_get_names           = txgbe_dev_xstats_get_names,
5274 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
5275 	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
5276 	.fw_version_get             = txgbe_fw_version_get,
5277 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
5278 	.mtu_set                    = txgbe_dev_mtu_set,
5279 	.vlan_filter_set            = txgbe_vlan_filter_set,
5280 	.vlan_tpid_set              = txgbe_vlan_tpid_set,
5281 	.vlan_offload_set           = txgbe_vlan_offload_set,
5282 	.vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
5283 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
5284 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
5285 	.tx_queue_start	            = txgbe_dev_tx_queue_start,
5286 	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
5287 	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
5288 	.rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
5289 	.rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
5290 	.rx_queue_release           = txgbe_dev_rx_queue_release,
5291 	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
5292 	.tx_queue_release           = txgbe_dev_tx_queue_release,
5293 	.dev_led_on                 = txgbe_dev_led_on,
5294 	.dev_led_off                = txgbe_dev_led_off,
5295 	.flow_ctrl_get              = txgbe_flow_ctrl_get,
5296 	.flow_ctrl_set              = txgbe_flow_ctrl_set,
5297 	.priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
5298 	.mac_addr_add               = txgbe_add_rar,
5299 	.mac_addr_remove            = txgbe_remove_rar,
5300 	.mac_addr_set               = txgbe_set_default_mac_addr,
5301 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
5302 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
5303 	.set_queue_rate_limit       = txgbe_set_queue_rate_limit,
5304 	.reta_update                = txgbe_dev_rss_reta_update,
5305 	.reta_query                 = txgbe_dev_rss_reta_query,
5306 	.rss_hash_update            = txgbe_dev_rss_hash_update,
5307 	.rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
5308 	.flow_ops_get               = txgbe_dev_flow_ops_get,
5309 	.set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
5310 	.rxq_info_get               = txgbe_rxq_info_get,
5311 	.txq_info_get               = txgbe_txq_info_get,
5312 	.timesync_enable            = txgbe_timesync_enable,
5313 	.timesync_disable           = txgbe_timesync_disable,
5314 	.timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5315 	.timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5316 	.get_reg                    = txgbe_get_regs,
5317 	.get_eeprom_length          = txgbe_get_eeprom_length,
5318 	.get_eeprom                 = txgbe_get_eeprom,
5319 	.set_eeprom                 = txgbe_set_eeprom,
5320 	.get_module_info            = txgbe_get_module_info,
5321 	.get_module_eeprom          = txgbe_get_module_eeprom,
5322 	.get_dcb_info               = txgbe_dev_get_dcb_info,
5323 	.timesync_adjust_time       = txgbe_timesync_adjust_time,
5324 	.timesync_read_time         = txgbe_timesync_read_time,
5325 	.timesync_write_time        = txgbe_timesync_write_time,
5326 	.udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
5327 	.udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
5328 	.tm_ops_get                 = txgbe_tm_ops_get,
5329 	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
5330 };
5331 
5332 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5333 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5334 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5335 RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
5336 			      TXGBE_DEVARG_BP_AUTO "=<0|1>"
5337 			      TXGBE_DEVARG_KR_POLL "=<0|1>"
5338 			      TXGBE_DEVARG_KR_PRESENT "=<0|1>"
5339 			      TXGBE_DEVARG_KX_SGMII "=<0|1>"
5340 			      TXGBE_DEVARG_FFE_SET "=<0-4>"
5341 			      TXGBE_DEVARG_FFE_MAIN "=<uint16>"
5342 			      TXGBE_DEVARG_FFE_PRE "=<uint16>"
5343 			      TXGBE_DEVARG_FFE_POST "=<uint16>");
5344 
5345 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
5346 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
5347 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
5348 
5349 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5350 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
5351 #endif
5352 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5353 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
5354 #endif
5355 
5356 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5357 	RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);
5358 #endif
5359