xref: /dpdk/drivers/net/txgbe/txgbe_ethdev.c (revision ff43cd79)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <ethdev_pci.h>
11 
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19 #include <rte_kvargs.h>
20 
21 #include "txgbe_logs.h"
22 #include "base/txgbe.h"
23 #include "txgbe_ethdev.h"
24 #include "txgbe_rxtx.h"
25 #include "txgbe_regs_group.h"
26 
27 static const struct reg_info txgbe_regs_general[] = {
28 	{TXGBE_RST, 1, 1, "TXGBE_RST"},
29 	{TXGBE_STAT, 1, 1, "TXGBE_STAT"},
30 	{TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
31 	{TXGBE_SDP, 1, 1, "TXGBE_SDP"},
32 	{TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
33 	{TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
34 	{0, 0, 0, ""}
35 };
36 
37 static const struct reg_info txgbe_regs_nvm[] = {
38 	{0, 0, 0, ""}
39 };
40 
41 static const struct reg_info txgbe_regs_interrupt[] = {
42 	{0, 0, 0, ""}
43 };
44 
45 static const struct reg_info txgbe_regs_fctl_others[] = {
46 	{0, 0, 0, ""}
47 };
48 
49 static const struct reg_info txgbe_regs_rxdma[] = {
50 	{0, 0, 0, ""}
51 };
52 
53 static const struct reg_info txgbe_regs_rx[] = {
54 	{0, 0, 0, ""}
55 };
56 
57 static struct reg_info txgbe_regs_tx[] = {
58 	{0, 0, 0, ""}
59 };
60 
61 static const struct reg_info txgbe_regs_wakeup[] = {
62 	{0, 0, 0, ""}
63 };
64 
65 static const struct reg_info txgbe_regs_dcb[] = {
66 	{0, 0, 0, ""}
67 };
68 
69 static const struct reg_info txgbe_regs_mac[] = {
70 	{0, 0, 0, ""}
71 };
72 
73 static const struct reg_info txgbe_regs_diagnostic[] = {
74 	{0, 0, 0, ""},
75 };
76 
77 /* PF registers */
78 static const struct reg_info *txgbe_regs_others[] = {
79 				txgbe_regs_general,
80 				txgbe_regs_nvm,
81 				txgbe_regs_interrupt,
82 				txgbe_regs_fctl_others,
83 				txgbe_regs_rxdma,
84 				txgbe_regs_rx,
85 				txgbe_regs_tx,
86 				txgbe_regs_wakeup,
87 				txgbe_regs_dcb,
88 				txgbe_regs_mac,
89 				txgbe_regs_diagnostic,
90 				NULL};
91 
92 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
93 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
94 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
95 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
96 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
97 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
98 static int txgbe_dev_close(struct rte_eth_dev *dev);
99 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
100 				int wait_to_complete);
101 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
102 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
103 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
104 					uint16_t queue);
105 
106 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
107 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
108 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
109 static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
110 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
111 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
112 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
113 				      struct rte_intr_handle *handle);
114 static void txgbe_dev_interrupt_handler(void *param);
115 static void txgbe_dev_interrupt_delayed_handler(void *param);
116 static void txgbe_configure_msix(struct rte_eth_dev *dev);
117 
118 static int txgbe_filter_restore(struct rte_eth_dev *dev);
119 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
120 
121 #define TXGBE_SET_HWSTRIP(h, q) do {\
122 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
123 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
124 		(h)->bitmap[idx] |= 1 << bit;\
125 	} while (0)
126 
127 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
128 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
129 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
130 		(h)->bitmap[idx] &= ~(1 << bit);\
131 	} while (0)
132 
133 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
134 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
135 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
136 		(r) = (h)->bitmap[idx] >> bit & 1;\
137 	} while (0)
138 
139 /*
140  * The set of PCI devices this driver supports
141  */
142 static const struct rte_pci_id pci_id_txgbe_map[] = {
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
145 	{ .vendor_id = 0, /* sentinel */ },
146 };
147 
148 static const struct rte_eth_desc_lim rx_desc_lim = {
149 	.nb_max = TXGBE_RING_DESC_MAX,
150 	.nb_min = TXGBE_RING_DESC_MIN,
151 	.nb_align = TXGBE_RXD_ALIGN,
152 };
153 
154 static const struct rte_eth_desc_lim tx_desc_lim = {
155 	.nb_max = TXGBE_RING_DESC_MAX,
156 	.nb_min = TXGBE_RING_DESC_MIN,
157 	.nb_align = TXGBE_TXD_ALIGN,
158 	.nb_seg_max = TXGBE_TX_MAX_SEG,
159 	.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
160 };
161 
162 static const struct eth_dev_ops txgbe_eth_dev_ops;
163 
164 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
165 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
166 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
167 	/* MNG RxTx */
168 	HW_XSTAT(mng_bmc2host_packets),
169 	HW_XSTAT(mng_host2bmc_packets),
170 	/* Basic RxTx */
171 	HW_XSTAT(rx_packets),
172 	HW_XSTAT(tx_packets),
173 	HW_XSTAT(rx_bytes),
174 	HW_XSTAT(tx_bytes),
175 	HW_XSTAT(rx_total_bytes),
176 	HW_XSTAT(rx_total_packets),
177 	HW_XSTAT(tx_total_packets),
178 	HW_XSTAT(rx_total_missed_packets),
179 	HW_XSTAT(rx_broadcast_packets),
180 	HW_XSTAT(rx_multicast_packets),
181 	HW_XSTAT(rx_management_packets),
182 	HW_XSTAT(tx_management_packets),
183 	HW_XSTAT(rx_management_dropped),
184 
185 	/* Basic Error */
186 	HW_XSTAT(rx_crc_errors),
187 	HW_XSTAT(rx_illegal_byte_errors),
188 	HW_XSTAT(rx_error_bytes),
189 	HW_XSTAT(rx_mac_short_packet_dropped),
190 	HW_XSTAT(rx_length_errors),
191 	HW_XSTAT(rx_undersize_errors),
192 	HW_XSTAT(rx_fragment_errors),
193 	HW_XSTAT(rx_oversize_errors),
194 	HW_XSTAT(rx_jabber_errors),
195 	HW_XSTAT(rx_l3_l4_xsum_error),
196 	HW_XSTAT(mac_local_errors),
197 	HW_XSTAT(mac_remote_errors),
198 
199 	/* Flow Director */
200 	HW_XSTAT(flow_director_added_filters),
201 	HW_XSTAT(flow_director_removed_filters),
202 	HW_XSTAT(flow_director_filter_add_errors),
203 	HW_XSTAT(flow_director_filter_remove_errors),
204 	HW_XSTAT(flow_director_matched_filters),
205 	HW_XSTAT(flow_director_missed_filters),
206 
207 	/* FCoE */
208 	HW_XSTAT(rx_fcoe_crc_errors),
209 	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
210 	HW_XSTAT(rx_fcoe_dropped),
211 	HW_XSTAT(rx_fcoe_packets),
212 	HW_XSTAT(tx_fcoe_packets),
213 	HW_XSTAT(rx_fcoe_bytes),
214 	HW_XSTAT(tx_fcoe_bytes),
215 	HW_XSTAT(rx_fcoe_no_ddp),
216 	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
217 
218 	/* MACSEC */
219 	HW_XSTAT(tx_macsec_pkts_untagged),
220 	HW_XSTAT(tx_macsec_pkts_encrypted),
221 	HW_XSTAT(tx_macsec_pkts_protected),
222 	HW_XSTAT(tx_macsec_octets_encrypted),
223 	HW_XSTAT(tx_macsec_octets_protected),
224 	HW_XSTAT(rx_macsec_pkts_untagged),
225 	HW_XSTAT(rx_macsec_pkts_badtag),
226 	HW_XSTAT(rx_macsec_pkts_nosci),
227 	HW_XSTAT(rx_macsec_pkts_unknownsci),
228 	HW_XSTAT(rx_macsec_octets_decrypted),
229 	HW_XSTAT(rx_macsec_octets_validated),
230 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
231 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
232 	HW_XSTAT(rx_macsec_sc_pkts_late),
233 	HW_XSTAT(rx_macsec_sa_pkts_ok),
234 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
235 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
236 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
237 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
238 
239 	/* MAC RxTx */
240 	HW_XSTAT(rx_size_64_packets),
241 	HW_XSTAT(rx_size_65_to_127_packets),
242 	HW_XSTAT(rx_size_128_to_255_packets),
243 	HW_XSTAT(rx_size_256_to_511_packets),
244 	HW_XSTAT(rx_size_512_to_1023_packets),
245 	HW_XSTAT(rx_size_1024_to_max_packets),
246 	HW_XSTAT(tx_size_64_packets),
247 	HW_XSTAT(tx_size_65_to_127_packets),
248 	HW_XSTAT(tx_size_128_to_255_packets),
249 	HW_XSTAT(tx_size_256_to_511_packets),
250 	HW_XSTAT(tx_size_512_to_1023_packets),
251 	HW_XSTAT(tx_size_1024_to_max_packets),
252 
253 	/* Flow Control */
254 	HW_XSTAT(tx_xon_packets),
255 	HW_XSTAT(rx_xon_packets),
256 	HW_XSTAT(tx_xoff_packets),
257 	HW_XSTAT(rx_xoff_packets),
258 
259 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
260 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
261 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
262 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
263 };
264 
265 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
266 			   sizeof(rte_txgbe_stats_strings[0]))
267 
268 /* Per-priority statistics */
269 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
270 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
271 	UP_XSTAT(rx_up_packets),
272 	UP_XSTAT(tx_up_packets),
273 	UP_XSTAT(rx_up_bytes),
274 	UP_XSTAT(tx_up_bytes),
275 	UP_XSTAT(rx_up_drop_packets),
276 
277 	UP_XSTAT(tx_up_xon_packets),
278 	UP_XSTAT(rx_up_xon_packets),
279 	UP_XSTAT(tx_up_xoff_packets),
280 	UP_XSTAT(rx_up_xoff_packets),
281 	UP_XSTAT(rx_up_dropped),
282 	UP_XSTAT(rx_up_mbuf_alloc_errors),
283 	UP_XSTAT(tx_up_xon2off_packets),
284 };
285 
286 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
287 			   sizeof(rte_txgbe_up_strings[0]))
288 
289 /* Per-queue statistics */
290 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
291 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
292 	QP_XSTAT(rx_qp_packets),
293 	QP_XSTAT(tx_qp_packets),
294 	QP_XSTAT(rx_qp_bytes),
295 	QP_XSTAT(tx_qp_bytes),
296 	QP_XSTAT(rx_qp_mc_packets),
297 };
298 
299 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
300 			   sizeof(rte_txgbe_qp_strings[0]))
301 
302 static inline int
303 txgbe_is_sfp(struct txgbe_hw *hw)
304 {
305 	switch (hw->phy.type) {
306 	case txgbe_phy_sfp_avago:
307 	case txgbe_phy_sfp_ftl:
308 	case txgbe_phy_sfp_intel:
309 	case txgbe_phy_sfp_unknown:
310 	case txgbe_phy_sfp_tyco_passive:
311 	case txgbe_phy_sfp_unknown_passive:
312 		return 1;
313 	default:
314 		return 0;
315 	}
316 }
317 
318 static inline int32_t
319 txgbe_pf_reset_hw(struct txgbe_hw *hw)
320 {
321 	uint32_t ctrl_ext;
322 	int32_t status;
323 
324 	status = hw->mac.reset_hw(hw);
325 
326 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
327 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
328 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
329 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
330 	txgbe_flush(hw);
331 
332 	if (status == TXGBE_ERR_SFP_NOT_PRESENT)
333 		status = 0;
334 	return status;
335 }
336 
337 static inline void
338 txgbe_enable_intr(struct rte_eth_dev *dev)
339 {
340 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
341 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
342 
343 	wr32(hw, TXGBE_IENMISC, intr->mask_misc);
344 	wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
345 	wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
346 	txgbe_flush(hw);
347 }
348 
349 static void
350 txgbe_disable_intr(struct txgbe_hw *hw)
351 {
352 	PMD_INIT_FUNC_TRACE();
353 
354 	wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
355 	wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
356 	wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
357 	txgbe_flush(hw);
358 }
359 
360 static int
361 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
362 				  uint16_t queue_id,
363 				  uint8_t stat_idx,
364 				  uint8_t is_rx)
365 {
366 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
367 	struct txgbe_stat_mappings *stat_mappings =
368 		TXGBE_DEV_STAT_MAPPINGS(eth_dev);
369 	uint32_t qsmr_mask = 0;
370 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
371 	uint32_t q_map;
372 	uint8_t n, offset;
373 
374 	if (hw->mac.type != txgbe_mac_raptor)
375 		return -ENOSYS;
376 
377 	if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
378 		return -EIO;
379 
380 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
381 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
382 		     queue_id, stat_idx);
383 
384 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
385 	if (n >= TXGBE_NB_STAT_MAPPING) {
386 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
387 		return -EIO;
388 	}
389 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
390 
391 	/* Now clear any previous stat_idx set */
392 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
393 	if (!is_rx)
394 		stat_mappings->tqsm[n] &= ~clearing_mask;
395 	else
396 		stat_mappings->rqsm[n] &= ~clearing_mask;
397 
398 	q_map = (uint32_t)stat_idx;
399 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
400 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
401 	if (!is_rx)
402 		stat_mappings->tqsm[n] |= qsmr_mask;
403 	else
404 		stat_mappings->rqsm[n] |= qsmr_mask;
405 
406 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
407 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
408 		     queue_id, stat_idx);
409 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
410 		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
411 	return 0;
412 }
413 
414 static void
415 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
416 {
417 	int i;
418 	u8 bwgp;
419 	struct txgbe_dcb_tc_config *tc;
420 
421 	UNREFERENCED_PARAMETER(hw);
422 
423 	dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
424 	dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
425 	bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
426 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
427 		tc = &dcb_config->tc_config[i];
428 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
429 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
430 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
431 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
432 		tc->pfc = txgbe_dcb_pfc_disabled;
433 	}
434 
435 	/* Initialize default user to priority mapping, UPx->TC0 */
436 	tc = &dcb_config->tc_config[0];
437 	tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
438 	tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
439 	for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
440 		dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
441 		dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
442 	}
443 	dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
444 	dcb_config->pfc_mode_enable = false;
445 	dcb_config->vt_mode = true;
446 	dcb_config->round_robin_enable = false;
447 	/* support all DCB capabilities */
448 	dcb_config->support.capabilities = 0xFF;
449 }
450 
451 /*
452  * Ensure that all locks are released before first NVM or PHY access
453  */
454 static void
455 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
456 {
457 	uint16_t mask;
458 
459 	/*
460 	 * These ones are more tricky since they are common to all ports; but
461 	 * swfw_sync retries last long enough (1s) to be almost sure that if
462 	 * lock can not be taken it is due to an improper lock of the
463 	 * semaphore.
464 	 */
465 	mask = TXGBE_MNGSEM_SWPHY |
466 	       TXGBE_MNGSEM_SWMBX |
467 	       TXGBE_MNGSEM_SWFLASH;
468 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
469 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
470 
471 	hw->mac.release_swfw_sync(hw, mask);
472 }
473 
474 static int
475 txgbe_handle_devarg(__rte_unused const char *key, const char *value,
476 		  void *extra_args)
477 {
478 	uint16_t *n = extra_args;
479 
480 	if (value == NULL || extra_args == NULL)
481 		return -EINVAL;
482 
483 	*n = (uint16_t)strtoul(value, NULL, 10);
484 	if (*n == USHRT_MAX && errno == ERANGE)
485 		return -1;
486 
487 	return 0;
488 }
489 
490 static void
491 txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
492 {
493 	struct rte_kvargs *kvlist;
494 	u16 auto_neg = 1;
495 	u16 poll = 0;
496 	u16 present = 1;
497 	u16 sgmii = 0;
498 	u16 ffe_set = 0;
499 	u16 ffe_main = 27;
500 	u16 ffe_pre = 8;
501 	u16 ffe_post = 44;
502 
503 	if (devargs == NULL)
504 		goto null;
505 
506 	kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
507 	if (kvlist == NULL)
508 		goto null;
509 
510 	rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
511 			   &txgbe_handle_devarg, &auto_neg);
512 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
513 			   &txgbe_handle_devarg, &poll);
514 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
515 			   &txgbe_handle_devarg, &present);
516 	rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
517 			   &txgbe_handle_devarg, &sgmii);
518 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
519 			   &txgbe_handle_devarg, &ffe_set);
520 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
521 			   &txgbe_handle_devarg, &ffe_main);
522 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
523 			   &txgbe_handle_devarg, &ffe_pre);
524 	rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
525 			   &txgbe_handle_devarg, &ffe_post);
526 	rte_kvargs_free(kvlist);
527 
528 null:
529 	hw->devarg.auto_neg = auto_neg;
530 	hw->devarg.poll = poll;
531 	hw->devarg.present = present;
532 	hw->devarg.sgmii = sgmii;
533 	hw->phy.ffe_set = ffe_set;
534 	hw->phy.ffe_main = ffe_main;
535 	hw->phy.ffe_pre = ffe_pre;
536 	hw->phy.ffe_post = ffe_post;
537 }
538 
539 static int
540 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
541 {
542 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
543 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
544 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
545 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
546 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
547 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
548 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
549 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
550 	const struct rte_memzone *mz;
551 	uint32_t ctrl_ext;
552 	uint16_t csum;
553 	int err, i, ret;
554 
555 	PMD_INIT_FUNC_TRACE();
556 
557 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
558 	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
559 	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
560 	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
561 	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
562 	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
563 	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
564 
565 	/*
566 	 * For secondary processes, we don't initialise any further as primary
567 	 * has already done this work. Only check we don't need a different
568 	 * RX and TX function.
569 	 */
570 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
571 		struct txgbe_tx_queue *txq;
572 		/* TX queue function in primary, set by last queue initialized
573 		 * Tx queue may not initialized by primary process
574 		 */
575 		if (eth_dev->data->tx_queues) {
576 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
577 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
578 			txgbe_set_tx_function(eth_dev, txq);
579 		} else {
580 			/* Use default TX function if we get here */
581 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
582 				     "Using default TX function.");
583 		}
584 
585 		txgbe_set_rx_function(eth_dev);
586 
587 		return 0;
588 	}
589 
590 	rte_eth_copy_pci_info(eth_dev, pci_dev);
591 
592 	/* Vendor and Device ID need to be set before init of shared code */
593 	hw->device_id = pci_dev->id.device_id;
594 	hw->vendor_id = pci_dev->id.vendor_id;
595 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
596 	hw->allow_unsupported_sfp = 1;
597 
598 	/* Reserve memory for interrupt status block */
599 	mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
600 		16, TXGBE_ALIGN, SOCKET_ID_ANY);
601 	if (mz == NULL)
602 		return -ENOMEM;
603 
604 	hw->isb_dma = TMZ_PADDR(mz);
605 	hw->isb_mem = TMZ_VADDR(mz);
606 
607 	txgbe_parse_devargs(hw, pci_dev->device.devargs);
608 	/* Initialize the shared code (base driver) */
609 	err = txgbe_init_shared_code(hw);
610 	if (err != 0) {
611 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
612 		return -EIO;
613 	}
614 
615 	/* Unlock any pending hardware semaphore */
616 	txgbe_swfw_lock_reset(hw);
617 
618 #ifdef RTE_LIB_SECURITY
619 	/* Initialize security_ctx only for primary process*/
620 	if (txgbe_ipsec_ctx_create(eth_dev))
621 		return -ENOMEM;
622 #endif
623 
624 	/* Initialize DCB configuration*/
625 	memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
626 	txgbe_dcb_init(hw, dcb_config);
627 
628 	/* Get Hardware Flow Control setting */
629 	hw->fc.requested_mode = txgbe_fc_full;
630 	hw->fc.current_mode = txgbe_fc_full;
631 	hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
632 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
633 		hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
634 		hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
635 	}
636 	hw->fc.send_xon = 1;
637 
638 	err = hw->rom.init_params(hw);
639 	if (err != 0) {
640 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
641 		return -EIO;
642 	}
643 
644 	/* Make sure we have a good EEPROM before we read from it */
645 	err = hw->rom.validate_checksum(hw, &csum);
646 	if (err != 0) {
647 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
648 		return -EIO;
649 	}
650 
651 	err = hw->mac.init_hw(hw);
652 
653 	/*
654 	 * Devices with copper phys will fail to initialise if txgbe_init_hw()
655 	 * is called too soon after the kernel driver unbinding/binding occurs.
656 	 * The failure occurs in txgbe_identify_phy() for all devices,
657 	 * but for non-copper devies, txgbe_identify_sfp_module() is
658 	 * also called. See txgbe_identify_phy(). The reason for the
659 	 * failure is not known, and only occuts when virtualisation features
660 	 * are disabled in the bios. A delay of 200ms  was found to be enough by
661 	 * trial-and-error, and is doubled to be safe.
662 	 */
663 	if (err && hw->phy.media_type == txgbe_media_type_copper) {
664 		rte_delay_ms(200);
665 		err = hw->mac.init_hw(hw);
666 	}
667 
668 	if (err == TXGBE_ERR_SFP_NOT_PRESENT)
669 		err = 0;
670 
671 	if (err == TXGBE_ERR_EEPROM_VERSION) {
672 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
673 			     "LOM.  Please be aware there may be issues associated "
674 			     "with your hardware.");
675 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
676 			     "please contact your hardware representative "
677 			     "who provided you with this hardware.");
678 	} else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
679 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
680 	}
681 	if (err) {
682 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
683 		return -EIO;
684 	}
685 
686 	/* Reset the hw statistics */
687 	txgbe_dev_stats_reset(eth_dev);
688 
689 	/* disable interrupt */
690 	txgbe_disable_intr(hw);
691 
692 	/* Allocate memory for storing MAC addresses */
693 	eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
694 					       hw->mac.num_rar_entries, 0);
695 	if (eth_dev->data->mac_addrs == NULL) {
696 		PMD_INIT_LOG(ERR,
697 			     "Failed to allocate %u bytes needed to store "
698 			     "MAC addresses",
699 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
700 		return -ENOMEM;
701 	}
702 
703 	/* Copy the permanent MAC address */
704 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
705 			&eth_dev->data->mac_addrs[0]);
706 
707 	/* Allocate memory for storing hash filter MAC addresses */
708 	eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
709 			RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
710 	if (eth_dev->data->hash_mac_addrs == NULL) {
711 		PMD_INIT_LOG(ERR,
712 			     "Failed to allocate %d bytes needed to store MAC addresses",
713 			     RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
714 		return -ENOMEM;
715 	}
716 
717 	/* initialize the vfta */
718 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
719 
720 	/* initialize the hw strip bitmap*/
721 	memset(hwstrip, 0, sizeof(*hwstrip));
722 
723 	/* initialize PF if max_vfs not zero */
724 	ret = txgbe_pf_host_init(eth_dev);
725 	if (ret) {
726 		rte_free(eth_dev->data->mac_addrs);
727 		eth_dev->data->mac_addrs = NULL;
728 		rte_free(eth_dev->data->hash_mac_addrs);
729 		eth_dev->data->hash_mac_addrs = NULL;
730 		return ret;
731 	}
732 
733 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
734 	/* let hardware know driver is loaded */
735 	ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
736 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
737 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
738 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
739 	txgbe_flush(hw);
740 
741 	if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
742 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
743 			     (int)hw->mac.type, (int)hw->phy.type,
744 			     (int)hw->phy.sfp_type);
745 	else
746 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
747 			     (int)hw->mac.type, (int)hw->phy.type);
748 
749 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
750 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
751 		     pci_dev->id.device_id);
752 
753 	rte_intr_callback_register(intr_handle,
754 				   txgbe_dev_interrupt_handler, eth_dev);
755 
756 	/* enable uio/vfio intr/eventfd mapping */
757 	rte_intr_enable(intr_handle);
758 
759 	/* enable support intr */
760 	txgbe_enable_intr(eth_dev);
761 
762 	/* initialize filter info */
763 	memset(filter_info, 0,
764 	       sizeof(struct txgbe_filter_info));
765 
766 	/* initialize 5tuple filter list */
767 	TAILQ_INIT(&filter_info->fivetuple_list);
768 
769 	/* initialize flow director filter list & hash */
770 	txgbe_fdir_filter_init(eth_dev);
771 
772 	/* initialize l2 tunnel filter list & hash */
773 	txgbe_l2_tn_filter_init(eth_dev);
774 
775 	/* initialize flow filter lists */
776 	txgbe_filterlist_init();
777 
778 	/* initialize bandwidth configuration info */
779 	memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
780 
781 	/* initialize Traffic Manager configuration */
782 	txgbe_tm_conf_init(eth_dev);
783 
784 	return 0;
785 }
786 
787 static int
788 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
789 {
790 	PMD_INIT_FUNC_TRACE();
791 
792 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
793 		return 0;
794 
795 	txgbe_dev_close(eth_dev);
796 
797 	return 0;
798 }
799 
800 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
801 {
802 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
803 	struct txgbe_5tuple_filter *p_5tuple;
804 
805 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
806 		TAILQ_REMOVE(&filter_info->fivetuple_list,
807 			     p_5tuple,
808 			     entries);
809 		rte_free(p_5tuple);
810 	}
811 	memset(filter_info->fivetuple_mask, 0,
812 	       sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
813 
814 	return 0;
815 }
816 
817 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
818 {
819 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
820 	struct txgbe_fdir_filter *fdir_filter;
821 
822 	if (fdir_info->hash_map)
823 		rte_free(fdir_info->hash_map);
824 	if (fdir_info->hash_handle)
825 		rte_hash_free(fdir_info->hash_handle);
826 
827 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
828 		TAILQ_REMOVE(&fdir_info->fdir_list,
829 			     fdir_filter,
830 			     entries);
831 		rte_free(fdir_filter);
832 	}
833 
834 	return 0;
835 }
836 
837 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
838 {
839 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
840 	struct txgbe_l2_tn_filter *l2_tn_filter;
841 
842 	if (l2_tn_info->hash_map)
843 		rte_free(l2_tn_info->hash_map);
844 	if (l2_tn_info->hash_handle)
845 		rte_hash_free(l2_tn_info->hash_handle);
846 
847 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
848 		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
849 			     l2_tn_filter,
850 			     entries);
851 		rte_free(l2_tn_filter);
852 	}
853 
854 	return 0;
855 }
856 
857 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
858 {
859 	struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
860 	char fdir_hash_name[RTE_HASH_NAMESIZE];
861 	struct rte_hash_parameters fdir_hash_params = {
862 		.name = fdir_hash_name,
863 		.entries = TXGBE_MAX_FDIR_FILTER_NUM,
864 		.key_len = sizeof(struct txgbe_atr_input),
865 		.hash_func = rte_hash_crc,
866 		.hash_func_init_val = 0,
867 		.socket_id = rte_socket_id(),
868 	};
869 
870 	TAILQ_INIT(&fdir_info->fdir_list);
871 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
872 		 "fdir_%s", TDEV_NAME(eth_dev));
873 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
874 	if (!fdir_info->hash_handle) {
875 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
876 		return -EINVAL;
877 	}
878 	fdir_info->hash_map = rte_zmalloc("txgbe",
879 					  sizeof(struct txgbe_fdir_filter *) *
880 					  TXGBE_MAX_FDIR_FILTER_NUM,
881 					  0);
882 	if (!fdir_info->hash_map) {
883 		PMD_INIT_LOG(ERR,
884 			     "Failed to allocate memory for fdir hash map!");
885 		return -ENOMEM;
886 	}
887 	fdir_info->mask_added = FALSE;
888 
889 	return 0;
890 }
891 
892 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
893 {
894 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
895 	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
896 	struct rte_hash_parameters l2_tn_hash_params = {
897 		.name = l2_tn_hash_name,
898 		.entries = TXGBE_MAX_L2_TN_FILTER_NUM,
899 		.key_len = sizeof(struct txgbe_l2_tn_key),
900 		.hash_func = rte_hash_crc,
901 		.hash_func_init_val = 0,
902 		.socket_id = rte_socket_id(),
903 	};
904 
905 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
906 	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
907 		 "l2_tn_%s", TDEV_NAME(eth_dev));
908 	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
909 	if (!l2_tn_info->hash_handle) {
910 		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
911 		return -EINVAL;
912 	}
913 	l2_tn_info->hash_map = rte_zmalloc("txgbe",
914 				   sizeof(struct txgbe_l2_tn_filter *) *
915 				   TXGBE_MAX_L2_TN_FILTER_NUM,
916 				   0);
917 	if (!l2_tn_info->hash_map) {
918 		PMD_INIT_LOG(ERR,
919 			"Failed to allocate memory for L2 TN hash map!");
920 		return -ENOMEM;
921 	}
922 	l2_tn_info->e_tag_en = FALSE;
923 	l2_tn_info->e_tag_fwd_en = FALSE;
924 	l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
925 
926 	return 0;
927 }
928 
929 static int
930 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
931 		struct rte_pci_device *pci_dev)
932 {
933 	struct rte_eth_dev *pf_ethdev;
934 	struct rte_eth_devargs eth_da;
935 	int retval;
936 
937 	if (pci_dev->device.devargs) {
938 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
939 				&eth_da);
940 		if (retval)
941 			return retval;
942 	} else {
943 		memset(&eth_da, 0, sizeof(eth_da));
944 	}
945 
946 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
947 			sizeof(struct txgbe_adapter),
948 			eth_dev_pci_specific_init, pci_dev,
949 			eth_txgbe_dev_init, NULL);
950 
951 	if (retval || eth_da.nb_representor_ports < 1)
952 		return retval;
953 	if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
954 		return -ENOTSUP;
955 
956 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
957 	if (pf_ethdev == NULL)
958 		return -ENODEV;
959 
960 	return 0;
961 }
962 
963 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
964 {
965 	struct rte_eth_dev *ethdev;
966 
967 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
968 	if (!ethdev)
969 		return -ENODEV;
970 
971 	return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
972 }
973 
974 static struct rte_pci_driver rte_txgbe_pmd = {
975 	.id_table = pci_id_txgbe_map,
976 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
977 		     RTE_PCI_DRV_INTR_LSC,
978 	.probe = eth_txgbe_pci_probe,
979 	.remove = eth_txgbe_pci_remove,
980 };
981 
982 static int
983 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
984 {
985 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
986 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
987 	uint32_t vfta;
988 	uint32_t vid_idx;
989 	uint32_t vid_bit;
990 
991 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
992 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
993 	vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
994 	if (on)
995 		vfta |= vid_bit;
996 	else
997 		vfta &= ~vid_bit;
998 	wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
999 
1000 	/* update local VFTA copy */
1001 	shadow_vfta->vfta[vid_idx] = vfta;
1002 
1003 	return 0;
1004 }
1005 
1006 static void
1007 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1008 {
1009 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1010 	struct txgbe_rx_queue *rxq;
1011 	bool restart;
1012 	uint32_t rxcfg, rxbal, rxbah;
1013 
1014 	if (on)
1015 		txgbe_vlan_hw_strip_enable(dev, queue);
1016 	else
1017 		txgbe_vlan_hw_strip_disable(dev, queue);
1018 
1019 	rxq = dev->data->rx_queues[queue];
1020 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
1021 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
1022 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1023 	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
1024 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1025 			!(rxcfg & TXGBE_RXCFG_VLAN);
1026 		rxcfg |= TXGBE_RXCFG_VLAN;
1027 	} else {
1028 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
1029 			(rxcfg & TXGBE_RXCFG_VLAN);
1030 		rxcfg &= ~TXGBE_RXCFG_VLAN;
1031 	}
1032 	rxcfg &= ~TXGBE_RXCFG_ENA;
1033 
1034 	if (restart) {
1035 		/* set vlan strip for ring */
1036 		txgbe_dev_rx_queue_stop(dev, queue);
1037 		wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
1038 		wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
1039 		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
1040 		txgbe_dev_rx_queue_start(dev, queue);
1041 	}
1042 }
1043 
1044 static int
1045 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1046 		    enum rte_vlan_type vlan_type,
1047 		    uint16_t tpid)
1048 {
1049 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1050 	int ret = 0;
1051 	uint32_t portctrl, vlan_ext, qinq;
1052 
1053 	portctrl = rd32(hw, TXGBE_PORTCTL);
1054 
1055 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
1056 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
1057 	switch (vlan_type) {
1058 	case ETH_VLAN_TYPE_INNER:
1059 		if (vlan_ext) {
1060 			wr32m(hw, TXGBE_VLANCTL,
1061 				TXGBE_VLANCTL_TPID_MASK,
1062 				TXGBE_VLANCTL_TPID(tpid));
1063 			wr32m(hw, TXGBE_DMATXCTRL,
1064 				TXGBE_DMATXCTRL_TPID_MASK,
1065 				TXGBE_DMATXCTRL_TPID(tpid));
1066 		} else {
1067 			ret = -ENOTSUP;
1068 			PMD_DRV_LOG(ERR, "Inner type is not supported"
1069 				    " by single VLAN");
1070 		}
1071 
1072 		if (qinq) {
1073 			wr32m(hw, TXGBE_TAGTPID(0),
1074 				TXGBE_TAGTPID_LSB_MASK,
1075 				TXGBE_TAGTPID_LSB(tpid));
1076 		}
1077 		break;
1078 	case ETH_VLAN_TYPE_OUTER:
1079 		if (vlan_ext) {
1080 			/* Only the high 16-bits is valid */
1081 			wr32m(hw, TXGBE_EXTAG,
1082 				TXGBE_EXTAG_VLAN_MASK,
1083 				TXGBE_EXTAG_VLAN(tpid));
1084 		} else {
1085 			wr32m(hw, TXGBE_VLANCTL,
1086 				TXGBE_VLANCTL_TPID_MASK,
1087 				TXGBE_VLANCTL_TPID(tpid));
1088 			wr32m(hw, TXGBE_DMATXCTRL,
1089 				TXGBE_DMATXCTRL_TPID_MASK,
1090 				TXGBE_DMATXCTRL_TPID(tpid));
1091 		}
1092 
1093 		if (qinq) {
1094 			wr32m(hw, TXGBE_TAGTPID(0),
1095 				TXGBE_TAGTPID_MSB_MASK,
1096 				TXGBE_TAGTPID_MSB(tpid));
1097 		}
1098 		break;
1099 	default:
1100 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1101 		return -EINVAL;
1102 	}
1103 
1104 	return ret;
1105 }
1106 
1107 void
1108 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1109 {
1110 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1111 	uint32_t vlnctrl;
1112 
1113 	PMD_INIT_FUNC_TRACE();
1114 
1115 	/* Filter Table Disable */
1116 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1117 	vlnctrl &= ~TXGBE_VLANCTL_VFE;
1118 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1119 }
1120 
1121 void
1122 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1123 {
1124 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1125 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
1126 	uint32_t vlnctrl;
1127 	uint16_t i;
1128 
1129 	PMD_INIT_FUNC_TRACE();
1130 
1131 	/* Filter Table Enable */
1132 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
1133 	vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
1134 	vlnctrl |= TXGBE_VLANCTL_VFE;
1135 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
1136 
1137 	/* write whatever is in local vfta copy */
1138 	for (i = 0; i < TXGBE_VFTA_SIZE; i++)
1139 		wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
1140 }
1141 
1142 void
1143 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1144 {
1145 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
1146 	struct txgbe_rx_queue *rxq;
1147 
1148 	if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
1149 		return;
1150 
1151 	if (on)
1152 		TXGBE_SET_HWSTRIP(hwstrip, queue);
1153 	else
1154 		TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1155 
1156 	if (queue >= dev->data->nb_rx_queues)
1157 		return;
1158 
1159 	rxq = dev->data->rx_queues[queue];
1160 
1161 	if (on) {
1162 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1163 		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1164 	} else {
1165 		rxq->vlan_flags = PKT_RX_VLAN;
1166 		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1167 	}
1168 }
1169 
1170 static void
1171 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1172 {
1173 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1174 	uint32_t ctrl;
1175 
1176 	PMD_INIT_FUNC_TRACE();
1177 
1178 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1179 	ctrl &= ~TXGBE_RXCFG_VLAN;
1180 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1181 
1182 	/* record those setting for HW strip per queue */
1183 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1184 }
1185 
1186 static void
1187 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1188 {
1189 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1190 	uint32_t ctrl;
1191 
1192 	PMD_INIT_FUNC_TRACE();
1193 
1194 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
1195 	ctrl |= TXGBE_RXCFG_VLAN;
1196 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
1197 
1198 	/* record those setting for HW strip per queue */
1199 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
1200 }
1201 
1202 static void
1203 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
1204 {
1205 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1206 	uint32_t ctrl;
1207 
1208 	PMD_INIT_FUNC_TRACE();
1209 
1210 	ctrl = rd32(hw, TXGBE_PORTCTL);
1211 	ctrl &= ~TXGBE_PORTCTL_VLANEXT;
1212 	ctrl &= ~TXGBE_PORTCTL_QINQ;
1213 	wr32(hw, TXGBE_PORTCTL, ctrl);
1214 }
1215 
1216 static void
1217 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
1218 {
1219 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1220 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1221 	struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
1222 	uint32_t ctrl;
1223 
1224 	PMD_INIT_FUNC_TRACE();
1225 
1226 	ctrl  = rd32(hw, TXGBE_PORTCTL);
1227 	ctrl |= TXGBE_PORTCTL_VLANEXT;
1228 	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
1229 	    txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
1230 		ctrl |= TXGBE_PORTCTL_QINQ;
1231 	wr32(hw, TXGBE_PORTCTL, ctrl);
1232 }
1233 
1234 void
1235 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1236 {
1237 	struct txgbe_rx_queue *rxq;
1238 	uint16_t i;
1239 
1240 	PMD_INIT_FUNC_TRACE();
1241 
1242 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1243 		rxq = dev->data->rx_queues[i];
1244 
1245 		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1246 			txgbe_vlan_strip_queue_set(dev, i, 1);
1247 		else
1248 			txgbe_vlan_strip_queue_set(dev, i, 0);
1249 	}
1250 }
1251 
1252 void
1253 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1254 {
1255 	uint16_t i;
1256 	struct rte_eth_rxmode *rxmode;
1257 	struct txgbe_rx_queue *rxq;
1258 
1259 	if (mask & ETH_VLAN_STRIP_MASK) {
1260 		rxmode = &dev->data->dev_conf.rxmode;
1261 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1262 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1263 				rxq = dev->data->rx_queues[i];
1264 				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1265 			}
1266 		else
1267 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1268 				rxq = dev->data->rx_queues[i];
1269 				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1270 			}
1271 	}
1272 }
1273 
1274 static int
1275 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1276 {
1277 	struct rte_eth_rxmode *rxmode;
1278 	rxmode = &dev->data->dev_conf.rxmode;
1279 
1280 	if (mask & ETH_VLAN_STRIP_MASK)
1281 		txgbe_vlan_hw_strip_config(dev);
1282 
1283 	if (mask & ETH_VLAN_FILTER_MASK) {
1284 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1285 			txgbe_vlan_hw_filter_enable(dev);
1286 		else
1287 			txgbe_vlan_hw_filter_disable(dev);
1288 	}
1289 
1290 	if (mask & ETH_VLAN_EXTEND_MASK) {
1291 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1292 			txgbe_vlan_hw_extend_enable(dev);
1293 		else
1294 			txgbe_vlan_hw_extend_disable(dev);
1295 	}
1296 
1297 	return 0;
1298 }
1299 
1300 static int
1301 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1302 {
1303 	txgbe_config_vlan_strip_on_all_queues(dev, mask);
1304 
1305 	txgbe_vlan_offload_config(dev, mask);
1306 
1307 	return 0;
1308 }
1309 
1310 static void
1311 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1312 {
1313 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1314 	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
1315 	uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1316 
1317 	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1318 	wr32(hw, TXGBE_VLANCTL, vlanctrl);
1319 }
1320 
1321 static int
1322 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1323 {
1324 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1325 
1326 	switch (nb_rx_q) {
1327 	case 1:
1328 	case 2:
1329 		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1330 		break;
1331 	case 4:
1332 		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1333 		break;
1334 	default:
1335 		return -EINVAL;
1336 	}
1337 
1338 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1339 		TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1340 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1341 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1342 	return 0;
1343 }
1344 
1345 static int
1346 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1347 {
1348 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1349 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
1350 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
1351 
1352 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1353 		/* check multi-queue mode */
1354 		switch (dev_conf->rxmode.mq_mode) {
1355 		case ETH_MQ_RX_VMDQ_DCB:
1356 			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1357 			break;
1358 		case ETH_MQ_RX_VMDQ_DCB_RSS:
1359 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1360 			PMD_INIT_LOG(ERR, "SRIOV active,"
1361 					" unsupported mq_mode rx %d.",
1362 					dev_conf->rxmode.mq_mode);
1363 			return -EINVAL;
1364 		case ETH_MQ_RX_RSS:
1365 		case ETH_MQ_RX_VMDQ_RSS:
1366 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1367 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1368 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1369 					PMD_INIT_LOG(ERR, "SRIOV is active,"
1370 						" invalid queue number"
1371 						" for VMDQ RSS, allowed"
1372 						" value are 1, 2 or 4.");
1373 					return -EINVAL;
1374 				}
1375 			break;
1376 		case ETH_MQ_RX_VMDQ_ONLY:
1377 		case ETH_MQ_RX_NONE:
1378 			/* if nothing mq mode configure, use default scheme */
1379 			dev->data->dev_conf.rxmode.mq_mode =
1380 				ETH_MQ_RX_VMDQ_ONLY;
1381 			break;
1382 		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1383 			/* SRIOV only works in VMDq enable mode */
1384 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1385 					" wrong mq_mode rx %d.",
1386 					dev_conf->rxmode.mq_mode);
1387 			return -EINVAL;
1388 		}
1389 
1390 		switch (dev_conf->txmode.mq_mode) {
1391 		case ETH_MQ_TX_VMDQ_DCB:
1392 			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1393 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1394 			break;
1395 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1396 			dev->data->dev_conf.txmode.mq_mode =
1397 				ETH_MQ_TX_VMDQ_ONLY;
1398 			break;
1399 		}
1400 
1401 		/* check valid queue number */
1402 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1403 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1404 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1405 					" nb_rx_q=%d nb_tx_q=%d queue number"
1406 					" must be less than or equal to %d.",
1407 					nb_rx_q, nb_tx_q,
1408 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1409 			return -EINVAL;
1410 		}
1411 	} else {
1412 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1413 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1414 					  " not supported.");
1415 			return -EINVAL;
1416 		}
1417 		/* check configuration for vmdb+dcb mode */
1418 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1419 			const struct rte_eth_vmdq_dcb_conf *conf;
1420 
1421 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1422 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1423 						TXGBE_VMDQ_DCB_NB_QUEUES);
1424 				return -EINVAL;
1425 			}
1426 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1427 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1428 			       conf->nb_queue_pools == ETH_32_POOLS)) {
1429 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1430 						" nb_queue_pools must be %d or %d.",
1431 						ETH_16_POOLS, ETH_32_POOLS);
1432 				return -EINVAL;
1433 			}
1434 		}
1435 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1436 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
1437 
1438 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1439 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1440 						 TXGBE_VMDQ_DCB_NB_QUEUES);
1441 				return -EINVAL;
1442 			}
1443 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1444 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1445 			       conf->nb_queue_pools == ETH_32_POOLS)) {
1446 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1447 						" nb_queue_pools != %d and"
1448 						" nb_queue_pools != %d.",
1449 						ETH_16_POOLS, ETH_32_POOLS);
1450 				return -EINVAL;
1451 			}
1452 		}
1453 
1454 		/* For DCB mode check our configuration before we go further */
1455 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1456 			const struct rte_eth_dcb_rx_conf *conf;
1457 
1458 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1459 			if (!(conf->nb_tcs == ETH_4_TCS ||
1460 			       conf->nb_tcs == ETH_8_TCS)) {
1461 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1462 						" and nb_tcs != %d.",
1463 						ETH_4_TCS, ETH_8_TCS);
1464 				return -EINVAL;
1465 			}
1466 		}
1467 
1468 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1469 			const struct rte_eth_dcb_tx_conf *conf;
1470 
1471 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1472 			if (!(conf->nb_tcs == ETH_4_TCS ||
1473 			       conf->nb_tcs == ETH_8_TCS)) {
1474 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1475 						" and nb_tcs != %d.",
1476 						ETH_4_TCS, ETH_8_TCS);
1477 				return -EINVAL;
1478 			}
1479 		}
1480 	}
1481 	return 0;
1482 }
1483 
1484 static int
1485 txgbe_dev_configure(struct rte_eth_dev *dev)
1486 {
1487 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1488 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1489 	int ret;
1490 
1491 	PMD_INIT_FUNC_TRACE();
1492 
1493 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1494 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1495 
1496 	/* multiple queue mode checking */
1497 	ret  = txgbe_check_mq_mode(dev);
1498 	if (ret != 0) {
1499 		PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1500 			    ret);
1501 		return ret;
1502 	}
1503 
1504 	/* set flag to update link status after init */
1505 	intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1506 
1507 	/*
1508 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1509 	 * allocation Rx preconditions we will reset it.
1510 	 */
1511 	adapter->rx_bulk_alloc_allowed = true;
1512 
1513 	return 0;
1514 }
1515 
1516 static void
1517 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1518 {
1519 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1520 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1521 	uint32_t gpie;
1522 
1523 	gpie = rd32(hw, TXGBE_GPIOINTEN);
1524 	gpie |= TXGBE_GPIOBIT_6;
1525 	wr32(hw, TXGBE_GPIOINTEN, gpie);
1526 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1527 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
1528 }
1529 
1530 int
1531 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1532 			uint16_t tx_rate, uint64_t q_msk)
1533 {
1534 	struct txgbe_hw *hw;
1535 	struct txgbe_vf_info *vfinfo;
1536 	struct rte_eth_link link;
1537 	uint8_t  nb_q_per_pool;
1538 	uint32_t queue_stride;
1539 	uint32_t queue_idx, idx = 0, vf_idx;
1540 	uint32_t queue_end;
1541 	uint16_t total_rate = 0;
1542 	struct rte_pci_device *pci_dev;
1543 	int ret;
1544 
1545 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1546 	ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1547 	if (ret < 0)
1548 		return ret;
1549 
1550 	if (vf >= pci_dev->max_vfs)
1551 		return -EINVAL;
1552 
1553 	if (tx_rate > link.link_speed)
1554 		return -EINVAL;
1555 
1556 	if (q_msk == 0)
1557 		return 0;
1558 
1559 	hw = TXGBE_DEV_HW(dev);
1560 	vfinfo = *(TXGBE_DEV_VFDATA(dev));
1561 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1562 	queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1563 	queue_idx = vf * queue_stride;
1564 	queue_end = queue_idx + nb_q_per_pool - 1;
1565 	if (queue_end >= hw->mac.max_tx_queues)
1566 		return -EINVAL;
1567 
1568 	if (vfinfo) {
1569 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1570 			if (vf_idx == vf)
1571 				continue;
1572 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1573 				idx++)
1574 				total_rate += vfinfo[vf_idx].tx_rate[idx];
1575 		}
1576 	} else {
1577 		return -EINVAL;
1578 	}
1579 
1580 	/* Store tx_rate for this vf. */
1581 	for (idx = 0; idx < nb_q_per_pool; idx++) {
1582 		if (((uint64_t)0x1 << idx) & q_msk) {
1583 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
1584 				vfinfo[vf].tx_rate[idx] = tx_rate;
1585 			total_rate += tx_rate;
1586 		}
1587 	}
1588 
1589 	if (total_rate > dev->data->dev_link.link_speed) {
1590 		/* Reset stored TX rate of the VF if it causes exceed
1591 		 * link speed.
1592 		 */
1593 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1594 		return -EINVAL;
1595 	}
1596 
1597 	/* Set ARBTXRATE of each queue/pool for vf X  */
1598 	for (; queue_idx <= queue_end; queue_idx++) {
1599 		if (0x1 & q_msk)
1600 			txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1601 		q_msk = q_msk >> 1;
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 /*
1608  * Configure device link speed and setup link.
1609  * It returns 0 on success.
1610  */
1611 static int
1612 txgbe_dev_start(struct rte_eth_dev *dev)
1613 {
1614 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1615 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1616 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1617 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1618 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1619 	uint32_t intr_vector = 0;
1620 	int err;
1621 	bool link_up = false, negotiate = 0;
1622 	uint32_t speed = 0;
1623 	uint32_t allowed_speeds = 0;
1624 	int mask = 0;
1625 	int status;
1626 	uint16_t vf, idx;
1627 	uint32_t *link_speeds;
1628 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1629 
1630 	PMD_INIT_FUNC_TRACE();
1631 
1632 	/* TXGBE devices don't support:
1633 	 *    - half duplex (checked afterwards for valid speeds)
1634 	 *    - fixed speed: TODO implement
1635 	 */
1636 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1637 		PMD_INIT_LOG(ERR,
1638 		"Invalid link_speeds for port %u, fix speed not supported",
1639 				dev->data->port_id);
1640 		return -EINVAL;
1641 	}
1642 
1643 	/* Stop the link setup handler before resetting the HW. */
1644 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1645 
1646 	/* disable uio/vfio intr/eventfd mapping */
1647 	rte_intr_disable(intr_handle);
1648 
1649 	/* stop adapter */
1650 	hw->adapter_stopped = 0;
1651 	txgbe_stop_hw(hw);
1652 
1653 	/* reinitialize adapter
1654 	 * this calls reset and start
1655 	 */
1656 	hw->nb_rx_queues = dev->data->nb_rx_queues;
1657 	hw->nb_tx_queues = dev->data->nb_tx_queues;
1658 	status = txgbe_pf_reset_hw(hw);
1659 	if (status != 0)
1660 		return -1;
1661 	hw->mac.start_hw(hw);
1662 	hw->mac.get_link_status = true;
1663 
1664 	/* configure PF module if SRIOV enabled */
1665 	txgbe_pf_host_configure(dev);
1666 
1667 	txgbe_dev_phy_intr_setup(dev);
1668 
1669 	/* check and configure queue intr-vector mapping */
1670 	if ((rte_intr_cap_multiple(intr_handle) ||
1671 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1672 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1673 		intr_vector = dev->data->nb_rx_queues;
1674 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1675 			return -1;
1676 	}
1677 
1678 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1679 		intr_handle->intr_vec =
1680 			rte_zmalloc("intr_vec",
1681 				    dev->data->nb_rx_queues * sizeof(int), 0);
1682 		if (intr_handle->intr_vec == NULL) {
1683 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1684 				     " intr_vec", dev->data->nb_rx_queues);
1685 			return -ENOMEM;
1686 		}
1687 	}
1688 
1689 	/* confiugre msix for sleep until rx interrupt */
1690 	txgbe_configure_msix(dev);
1691 
1692 	/* initialize transmission unit */
1693 	txgbe_dev_tx_init(dev);
1694 
1695 	/* This can fail when allocating mbufs for descriptor rings */
1696 	err = txgbe_dev_rx_init(dev);
1697 	if (err) {
1698 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1699 		goto error;
1700 	}
1701 
1702 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1703 		ETH_VLAN_EXTEND_MASK;
1704 	err = txgbe_vlan_offload_config(dev, mask);
1705 	if (err) {
1706 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1707 		goto error;
1708 	}
1709 
1710 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1711 		/* Enable vlan filtering for VMDq */
1712 		txgbe_vmdq_vlan_hw_filter_enable(dev);
1713 	}
1714 
1715 	/* Configure DCB hw */
1716 	txgbe_configure_pb(dev);
1717 	txgbe_configure_port(dev);
1718 	txgbe_configure_dcb(dev);
1719 
1720 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1721 		err = txgbe_fdir_configure(dev);
1722 		if (err)
1723 			goto error;
1724 	}
1725 
1726 	/* Restore vf rate limit */
1727 	if (vfinfo != NULL) {
1728 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
1729 			for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1730 				if (vfinfo[vf].tx_rate[idx] != 0)
1731 					txgbe_set_vf_rate_limit(dev, vf,
1732 						vfinfo[vf].tx_rate[idx],
1733 						1 << idx);
1734 	}
1735 
1736 	err = txgbe_dev_rxtx_start(dev);
1737 	if (err < 0) {
1738 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1739 		goto error;
1740 	}
1741 
1742 	/* Skip link setup if loopback mode is enabled. */
1743 	if (hw->mac.type == txgbe_mac_raptor &&
1744 	    dev->data->dev_conf.lpbk_mode)
1745 		goto skip_link_setup;
1746 
1747 	if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1748 		err = hw->mac.setup_sfp(hw);
1749 		if (err)
1750 			goto error;
1751 	}
1752 
1753 	if (hw->phy.media_type == txgbe_media_type_copper) {
1754 		/* Turn on the copper */
1755 		hw->phy.set_phy_power(hw, true);
1756 	} else {
1757 		/* Turn on the laser */
1758 		hw->mac.enable_tx_laser(hw);
1759 	}
1760 
1761 	if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
1762 		err = hw->mac.check_link(hw, &speed, &link_up, 0);
1763 	if (err)
1764 		goto error;
1765 	dev->data->dev_link.link_status = link_up;
1766 
1767 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1768 	if (err)
1769 		goto error;
1770 
1771 	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1772 			ETH_LINK_SPEED_10G;
1773 
1774 	link_speeds = &dev->data->dev_conf.link_speeds;
1775 	if (*link_speeds & ~allowed_speeds) {
1776 		PMD_INIT_LOG(ERR, "Invalid link setting");
1777 		goto error;
1778 	}
1779 
1780 	speed = 0x0;
1781 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1782 		speed = (TXGBE_LINK_SPEED_100M_FULL |
1783 			 TXGBE_LINK_SPEED_1GB_FULL |
1784 			 TXGBE_LINK_SPEED_10GB_FULL);
1785 	} else {
1786 		if (*link_speeds & ETH_LINK_SPEED_10G)
1787 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
1788 		if (*link_speeds & ETH_LINK_SPEED_5G)
1789 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
1790 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
1791 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1792 		if (*link_speeds & ETH_LINK_SPEED_1G)
1793 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
1794 		if (*link_speeds & ETH_LINK_SPEED_100M)
1795 			speed |= TXGBE_LINK_SPEED_100M_FULL;
1796 	}
1797 
1798 	err = hw->mac.setup_link(hw, speed, link_up);
1799 	if (err)
1800 		goto error;
1801 
1802 skip_link_setup:
1803 
1804 	if (rte_intr_allow_others(intr_handle)) {
1805 		txgbe_dev_misc_interrupt_setup(dev);
1806 		/* check if lsc interrupt is enabled */
1807 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1808 			txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1809 		else
1810 			txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1811 		txgbe_dev_macsec_interrupt_setup(dev);
1812 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1813 	} else {
1814 		rte_intr_callback_unregister(intr_handle,
1815 					     txgbe_dev_interrupt_handler, dev);
1816 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1817 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1818 				     " no intr multiplex");
1819 	}
1820 
1821 	/* check if rxq interrupt is enabled */
1822 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1823 	    rte_intr_dp_is_en(intr_handle))
1824 		txgbe_dev_rxq_interrupt_setup(dev);
1825 
1826 	/* enable uio/vfio intr/eventfd mapping */
1827 	rte_intr_enable(intr_handle);
1828 
1829 	/* resume enabled intr since hw reset */
1830 	txgbe_enable_intr(dev);
1831 	txgbe_l2_tunnel_conf(dev);
1832 	txgbe_filter_restore(dev);
1833 
1834 	if (tm_conf->root && !tm_conf->committed)
1835 		PMD_DRV_LOG(WARNING,
1836 			    "please call hierarchy_commit() "
1837 			    "before starting the port");
1838 
1839 	/*
1840 	 * Update link status right before return, because it may
1841 	 * start link configuration process in a separate thread.
1842 	 */
1843 	txgbe_dev_link_update(dev, 0);
1844 
1845 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1846 
1847 	txgbe_read_stats_registers(hw, hw_stats);
1848 	hw->offset_loaded = 1;
1849 
1850 	return 0;
1851 
1852 error:
1853 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1854 	txgbe_dev_clear_queues(dev);
1855 	return -EIO;
1856 }
1857 
1858 /*
1859  * Stop device: disable rx and tx functions to allow for reconfiguring.
1860  */
1861 static int
1862 txgbe_dev_stop(struct rte_eth_dev *dev)
1863 {
1864 	struct rte_eth_link link;
1865 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1866 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1867 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1868 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1869 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1870 	int vf;
1871 	struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
1872 
1873 	if (hw->adapter_stopped)
1874 		return 0;
1875 
1876 	PMD_INIT_FUNC_TRACE();
1877 
1878 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1879 
1880 	/* disable interrupts */
1881 	txgbe_disable_intr(hw);
1882 
1883 	/* reset the NIC */
1884 	txgbe_pf_reset_hw(hw);
1885 	hw->adapter_stopped = 0;
1886 
1887 	/* stop adapter */
1888 	txgbe_stop_hw(hw);
1889 
1890 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1891 		vfinfo[vf].clear_to_send = false;
1892 
1893 	if (hw->phy.media_type == txgbe_media_type_copper) {
1894 		/* Turn off the copper */
1895 		hw->phy.set_phy_power(hw, false);
1896 	} else {
1897 		/* Turn off the laser */
1898 		hw->mac.disable_tx_laser(hw);
1899 	}
1900 
1901 	txgbe_dev_clear_queues(dev);
1902 
1903 	/* Clear stored conf */
1904 	dev->data->scattered_rx = 0;
1905 	dev->data->lro = 0;
1906 
1907 	/* Clear recorded link status */
1908 	memset(&link, 0, sizeof(link));
1909 	rte_eth_linkstatus_set(dev, &link);
1910 
1911 	if (!rte_intr_allow_others(intr_handle))
1912 		/* resume to the default handler */
1913 		rte_intr_callback_register(intr_handle,
1914 					   txgbe_dev_interrupt_handler,
1915 					   (void *)dev);
1916 
1917 	/* Clean datapath event and queue/vec mapping */
1918 	rte_intr_efd_disable(intr_handle);
1919 	if (intr_handle->intr_vec != NULL) {
1920 		rte_free(intr_handle->intr_vec);
1921 		intr_handle->intr_vec = NULL;
1922 	}
1923 
1924 	/* reset hierarchy commit */
1925 	tm_conf->committed = false;
1926 
1927 	adapter->rss_reta_updated = 0;
1928 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1929 
1930 	hw->adapter_stopped = true;
1931 	dev->data->dev_started = 0;
1932 
1933 	return 0;
1934 }
1935 
1936 /*
1937  * Set device link up: enable tx.
1938  */
1939 static int
1940 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1941 {
1942 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1943 
1944 	if (hw->phy.media_type == txgbe_media_type_copper) {
1945 		/* Turn on the copper */
1946 		hw->phy.set_phy_power(hw, true);
1947 	} else {
1948 		/* Turn on the laser */
1949 		hw->mac.enable_tx_laser(hw);
1950 		txgbe_dev_link_update(dev, 0);
1951 	}
1952 
1953 	return 0;
1954 }
1955 
1956 /*
1957  * Set device link down: disable tx.
1958  */
1959 static int
1960 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1961 {
1962 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1963 
1964 	if (hw->phy.media_type == txgbe_media_type_copper) {
1965 		/* Turn off the copper */
1966 		hw->phy.set_phy_power(hw, false);
1967 	} else {
1968 		/* Turn off the laser */
1969 		hw->mac.disable_tx_laser(hw);
1970 		txgbe_dev_link_update(dev, 0);
1971 	}
1972 
1973 	return 0;
1974 }
1975 
1976 /*
1977  * Reset and stop device.
1978  */
1979 static int
1980 txgbe_dev_close(struct rte_eth_dev *dev)
1981 {
1982 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1983 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1984 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1985 	int retries = 0;
1986 	int ret;
1987 
1988 	PMD_INIT_FUNC_TRACE();
1989 
1990 	txgbe_pf_reset_hw(hw);
1991 
1992 	ret = txgbe_dev_stop(dev);
1993 
1994 	txgbe_dev_free_queues(dev);
1995 
1996 	/* reprogram the RAR[0] in case user changed it. */
1997 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1998 
1999 	/* Unlock any pending hardware semaphore */
2000 	txgbe_swfw_lock_reset(hw);
2001 
2002 	/* disable uio intr before callback unregister */
2003 	rte_intr_disable(intr_handle);
2004 
2005 	do {
2006 		ret = rte_intr_callback_unregister(intr_handle,
2007 				txgbe_dev_interrupt_handler, dev);
2008 		if (ret >= 0 || ret == -ENOENT) {
2009 			break;
2010 		} else if (ret != -EAGAIN) {
2011 			PMD_INIT_LOG(ERR,
2012 				"intr callback unregister failed: %d",
2013 				ret);
2014 		}
2015 		rte_delay_ms(100);
2016 	} while (retries++ < (10 + TXGBE_LINK_UP_TIME));
2017 
2018 	/* cancel the delay handler before remove dev */
2019 	rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
2020 
2021 	/* uninitialize PF if max_vfs not zero */
2022 	txgbe_pf_host_uninit(dev);
2023 
2024 	rte_free(dev->data->mac_addrs);
2025 	dev->data->mac_addrs = NULL;
2026 
2027 	rte_free(dev->data->hash_mac_addrs);
2028 	dev->data->hash_mac_addrs = NULL;
2029 
2030 	/* remove all the fdir filters & hash */
2031 	txgbe_fdir_filter_uninit(dev);
2032 
2033 	/* remove all the L2 tunnel filters & hash */
2034 	txgbe_l2_tn_filter_uninit(dev);
2035 
2036 	/* Remove all ntuple filters of the device */
2037 	txgbe_ntuple_filter_uninit(dev);
2038 
2039 	/* clear all the filters list */
2040 	txgbe_filterlist_flush();
2041 
2042 	/* Remove all Traffic Manager configuration */
2043 	txgbe_tm_conf_uninit(dev);
2044 
2045 #ifdef RTE_LIB_SECURITY
2046 	rte_free(dev->security_ctx);
2047 #endif
2048 
2049 	return ret;
2050 }
2051 
2052 /*
2053  * Reset PF device.
2054  */
2055 static int
2056 txgbe_dev_reset(struct rte_eth_dev *dev)
2057 {
2058 	int ret;
2059 
2060 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
2061 	 * its VF to make them align with it. The detailed notification
2062 	 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
2063 	 * To avoid unexpected behavior in VF, currently reset of PF with
2064 	 * SR-IOV activation is not supported. It might be supported later.
2065 	 */
2066 	if (dev->data->sriov.active)
2067 		return -ENOTSUP;
2068 
2069 	ret = eth_txgbe_dev_uninit(dev);
2070 	if (ret)
2071 		return ret;
2072 
2073 	ret = eth_txgbe_dev_init(dev, NULL);
2074 
2075 	return ret;
2076 }
2077 
2078 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
2079 	{                                                       \
2080 		uint32_t current_counter = rd32(hw, reg);       \
2081 		if (current_counter < last_counter)             \
2082 			current_counter += 0x100000000LL;       \
2083 		if (!hw->offset_loaded)                         \
2084 			last_counter = current_counter;         \
2085 		counter = current_counter - last_counter;       \
2086 		counter &= 0xFFFFFFFFLL;                        \
2087 	}
2088 
2089 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2090 	{                                                                \
2091 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
2092 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
2093 		uint64_t current_counter = (current_counter_msb << 32) | \
2094 			current_counter_lsb;                             \
2095 		if (current_counter < last_counter)                      \
2096 			current_counter += 0x1000000000LL;               \
2097 		if (!hw->offset_loaded)                                  \
2098 			last_counter = current_counter;                  \
2099 		counter = current_counter - last_counter;                \
2100 		counter &= 0xFFFFFFFFFLL;                                \
2101 	}
2102 
2103 void
2104 txgbe_read_stats_registers(struct txgbe_hw *hw,
2105 			   struct txgbe_hw_stats *hw_stats)
2106 {
2107 	unsigned int i;
2108 
2109 	/* QP Stats */
2110 	for (i = 0; i < hw->nb_rx_queues; i++) {
2111 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
2112 			hw->qp_last[i].rx_qp_packets,
2113 			hw_stats->qp[i].rx_qp_packets);
2114 		UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
2115 			hw->qp_last[i].rx_qp_bytes,
2116 			hw_stats->qp[i].rx_qp_bytes);
2117 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
2118 			hw->qp_last[i].rx_qp_mc_packets,
2119 			hw_stats->qp[i].rx_qp_mc_packets);
2120 	}
2121 
2122 	for (i = 0; i < hw->nb_tx_queues; i++) {
2123 		UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
2124 			hw->qp_last[i].tx_qp_packets,
2125 			hw_stats->qp[i].tx_qp_packets);
2126 		UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
2127 			hw->qp_last[i].tx_qp_bytes,
2128 			hw_stats->qp[i].tx_qp_bytes);
2129 	}
2130 	/* PB Stats */
2131 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2132 		hw_stats->up[i].rx_up_xon_packets +=
2133 				rd32(hw, TXGBE_PBRXUPXON(i));
2134 		hw_stats->up[i].rx_up_xoff_packets +=
2135 				rd32(hw, TXGBE_PBRXUPXOFF(i));
2136 		hw_stats->up[i].tx_up_xon_packets +=
2137 				rd32(hw, TXGBE_PBTXUPXON(i));
2138 		hw_stats->up[i].tx_up_xoff_packets +=
2139 				rd32(hw, TXGBE_PBTXUPXOFF(i));
2140 		hw_stats->up[i].tx_up_xon2off_packets +=
2141 				rd32(hw, TXGBE_PBTXUPOFF(i));
2142 		hw_stats->up[i].rx_up_dropped +=
2143 				rd32(hw, TXGBE_PBRXMISS(i));
2144 	}
2145 	hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
2146 	hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
2147 	hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
2148 	hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
2149 
2150 	/* DMA Stats */
2151 	hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
2152 	hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
2153 
2154 	hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
2155 	hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
2156 	hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
2157 	hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
2158 
2159 	/* MAC Stats */
2160 	hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
2161 	hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
2162 	hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
2163 
2164 	hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
2165 	hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
2166 	hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
2167 
2168 	hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
2169 	hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
2170 
2171 	hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
2172 	hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
2173 	hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
2174 	hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
2175 	hw_stats->rx_size_512_to_1023_packets +=
2176 			rd64(hw, TXGBE_MACRX512TO1023L);
2177 	hw_stats->rx_size_1024_to_max_packets +=
2178 			rd64(hw, TXGBE_MACRX1024TOMAXL);
2179 	hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
2180 	hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
2181 	hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
2182 	hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
2183 	hw_stats->tx_size_512_to_1023_packets +=
2184 			rd64(hw, TXGBE_MACTX512TO1023L);
2185 	hw_stats->tx_size_1024_to_max_packets +=
2186 			rd64(hw, TXGBE_MACTX1024TOMAXL);
2187 
2188 	hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
2189 	hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
2190 	hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
2191 
2192 	/* MNG Stats */
2193 	hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
2194 	hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
2195 	hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
2196 	hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
2197 
2198 	/* FCoE Stats */
2199 	hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
2200 	hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
2201 	hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
2202 	hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
2203 	hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
2204 	hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
2205 	hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
2206 
2207 	/* Flow Director Stats */
2208 	hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
2209 	hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
2210 	hw_stats->flow_director_added_filters +=
2211 		TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
2212 	hw_stats->flow_director_removed_filters +=
2213 		TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
2214 	hw_stats->flow_director_filter_add_errors +=
2215 		TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
2216 	hw_stats->flow_director_filter_remove_errors +=
2217 		TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
2218 
2219 	/* MACsec Stats */
2220 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
2221 	hw_stats->tx_macsec_pkts_encrypted +=
2222 			rd32(hw, TXGBE_LSECTX_ENCPKT);
2223 	hw_stats->tx_macsec_pkts_protected +=
2224 			rd32(hw, TXGBE_LSECTX_PROTPKT);
2225 	hw_stats->tx_macsec_octets_encrypted +=
2226 			rd32(hw, TXGBE_LSECTX_ENCOCT);
2227 	hw_stats->tx_macsec_octets_protected +=
2228 			rd32(hw, TXGBE_LSECTX_PROTOCT);
2229 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
2230 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
2231 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
2232 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
2233 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
2234 	hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
2235 	hw_stats->rx_macsec_sc_pkts_unchecked +=
2236 			rd32(hw, TXGBE_LSECRX_UNCHKPKT);
2237 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
2238 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
2239 	for (i = 0; i < 2; i++) {
2240 		hw_stats->rx_macsec_sa_pkts_ok +=
2241 			rd32(hw, TXGBE_LSECRX_OKPKT(i));
2242 		hw_stats->rx_macsec_sa_pkts_invalid +=
2243 			rd32(hw, TXGBE_LSECRX_INVPKT(i));
2244 		hw_stats->rx_macsec_sa_pkts_notvalid +=
2245 			rd32(hw, TXGBE_LSECRX_BADPKT(i));
2246 	}
2247 	hw_stats->rx_macsec_sa_pkts_unusedsa +=
2248 			rd32(hw, TXGBE_LSECRX_INVSAPKT);
2249 	hw_stats->rx_macsec_sa_pkts_notusingsa +=
2250 			rd32(hw, TXGBE_LSECRX_BADSAPKT);
2251 
2252 	hw_stats->rx_total_missed_packets = 0;
2253 	for (i = 0; i < TXGBE_MAX_UP; i++) {
2254 		hw_stats->rx_total_missed_packets +=
2255 			hw_stats->up[i].rx_up_dropped;
2256 	}
2257 }
2258 
2259 static int
2260 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2261 {
2262 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2263 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2264 	struct txgbe_stat_mappings *stat_mappings =
2265 			TXGBE_DEV_STAT_MAPPINGS(dev);
2266 	uint32_t i, j;
2267 
2268 	txgbe_read_stats_registers(hw, hw_stats);
2269 
2270 	if (stats == NULL)
2271 		return -EINVAL;
2272 
2273 	/* Fill out the rte_eth_stats statistics structure */
2274 	stats->ipackets = hw_stats->rx_packets;
2275 	stats->ibytes = hw_stats->rx_bytes;
2276 	stats->opackets = hw_stats->tx_packets;
2277 	stats->obytes = hw_stats->tx_bytes;
2278 
2279 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2280 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2281 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2282 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2283 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2284 	for (i = 0; i < TXGBE_MAX_QP; i++) {
2285 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2286 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2287 		uint32_t q_map;
2288 
2289 		q_map = (stat_mappings->rqsm[n] >> offset)
2290 				& QMAP_FIELD_RESERVED_BITS_MASK;
2291 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2292 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2293 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2294 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2295 
2296 		q_map = (stat_mappings->tqsm[n] >> offset)
2297 				& QMAP_FIELD_RESERVED_BITS_MASK;
2298 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2299 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2300 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2301 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2302 	}
2303 
2304 	/* Rx Errors */
2305 	stats->imissed  = hw_stats->rx_total_missed_packets +
2306 			  hw_stats->rx_dma_drop;
2307 	stats->ierrors  = hw_stats->rx_crc_errors +
2308 			  hw_stats->rx_mac_short_packet_dropped +
2309 			  hw_stats->rx_length_errors +
2310 			  hw_stats->rx_undersize_errors +
2311 			  hw_stats->rx_oversize_errors +
2312 			  hw_stats->rx_drop_packets +
2313 			  hw_stats->rx_illegal_byte_errors +
2314 			  hw_stats->rx_error_bytes +
2315 			  hw_stats->rx_fragment_errors +
2316 			  hw_stats->rx_fcoe_crc_errors +
2317 			  hw_stats->rx_fcoe_mbuf_allocation_errors;
2318 
2319 	/* Tx Errors */
2320 	stats->oerrors  = 0;
2321 	return 0;
2322 }
2323 
2324 static int
2325 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2326 {
2327 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2328 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2329 
2330 	/* HW registers are cleared on read */
2331 	hw->offset_loaded = 0;
2332 	txgbe_dev_stats_get(dev, NULL);
2333 	hw->offset_loaded = 1;
2334 
2335 	/* Reset software totals */
2336 	memset(hw_stats, 0, sizeof(*hw_stats));
2337 
2338 	return 0;
2339 }
2340 
2341 /* This function calculates the number of xstats based on the current config */
2342 static unsigned
2343 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2344 {
2345 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2346 	return TXGBE_NB_HW_STATS +
2347 	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2348 	       TXGBE_NB_QP_STATS * nb_queues;
2349 }
2350 
2351 static inline int
2352 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2353 {
2354 	int nb, st;
2355 
2356 	/* Extended stats from txgbe_hw_stats */
2357 	if (id < TXGBE_NB_HW_STATS) {
2358 		snprintf(name, size, "[hw]%s",
2359 			rte_txgbe_stats_strings[id].name);
2360 		return 0;
2361 	}
2362 	id -= TXGBE_NB_HW_STATS;
2363 
2364 	/* Priority Stats */
2365 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2366 		nb = id / TXGBE_NB_UP_STATS;
2367 		st = id % TXGBE_NB_UP_STATS;
2368 		snprintf(name, size, "[p%u]%s", nb,
2369 			rte_txgbe_up_strings[st].name);
2370 		return 0;
2371 	}
2372 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2373 
2374 	/* Queue Stats */
2375 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2376 		nb = id / TXGBE_NB_QP_STATS;
2377 		st = id % TXGBE_NB_QP_STATS;
2378 		snprintf(name, size, "[q%u]%s", nb,
2379 			rte_txgbe_qp_strings[st].name);
2380 		return 0;
2381 	}
2382 	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2383 
2384 	return -(int)(id + 1);
2385 }
2386 
2387 static inline int
2388 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2389 {
2390 	int nb, st;
2391 
2392 	/* Extended stats from txgbe_hw_stats */
2393 	if (id < TXGBE_NB_HW_STATS) {
2394 		*offset = rte_txgbe_stats_strings[id].offset;
2395 		return 0;
2396 	}
2397 	id -= TXGBE_NB_HW_STATS;
2398 
2399 	/* Priority Stats */
2400 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2401 		nb = id / TXGBE_NB_UP_STATS;
2402 		st = id % TXGBE_NB_UP_STATS;
2403 		*offset = rte_txgbe_up_strings[st].offset +
2404 			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2405 		return 0;
2406 	}
2407 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2408 
2409 	/* Queue Stats */
2410 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2411 		nb = id / TXGBE_NB_QP_STATS;
2412 		st = id % TXGBE_NB_QP_STATS;
2413 		*offset = rte_txgbe_qp_strings[st].offset +
2414 			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2415 		return 0;
2416 	}
2417 
2418 	return -1;
2419 }
2420 
2421 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2422 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2423 {
2424 	unsigned int i, count;
2425 
2426 	count = txgbe_xstats_calc_num(dev);
2427 	if (xstats_names == NULL)
2428 		return count;
2429 
2430 	/* Note: limit >= cnt_stats checked upstream
2431 	 * in rte_eth_xstats_names()
2432 	 */
2433 	limit = min(limit, count);
2434 
2435 	/* Extended stats from txgbe_hw_stats */
2436 	for (i = 0; i < limit; i++) {
2437 		if (txgbe_get_name_by_id(i, xstats_names[i].name,
2438 			sizeof(xstats_names[i].name))) {
2439 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2440 			break;
2441 		}
2442 	}
2443 
2444 	return i;
2445 }
2446 
2447 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2448 	struct rte_eth_xstat_name *xstats_names,
2449 	const uint64_t *ids,
2450 	unsigned int limit)
2451 {
2452 	unsigned int i;
2453 
2454 	if (ids == NULL)
2455 		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2456 
2457 	for (i = 0; i < limit; i++) {
2458 		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2459 				sizeof(xstats_names[i].name))) {
2460 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2461 			return -1;
2462 		}
2463 	}
2464 
2465 	return i;
2466 }
2467 
2468 static int
2469 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2470 					 unsigned int limit)
2471 {
2472 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2473 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2474 	unsigned int i, count;
2475 
2476 	txgbe_read_stats_registers(hw, hw_stats);
2477 
2478 	/* If this is a reset xstats is NULL, and we have cleared the
2479 	 * registers by reading them.
2480 	 */
2481 	count = txgbe_xstats_calc_num(dev);
2482 	if (xstats == NULL)
2483 		return count;
2484 
2485 	limit = min(limit, txgbe_xstats_calc_num(dev));
2486 
2487 	/* Extended stats from txgbe_hw_stats */
2488 	for (i = 0; i < limit; i++) {
2489 		uint32_t offset = 0;
2490 
2491 		if (txgbe_get_offset_by_id(i, &offset)) {
2492 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2493 			break;
2494 		}
2495 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2496 		xstats[i].id = i;
2497 	}
2498 
2499 	return i;
2500 }
2501 
2502 static int
2503 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2504 					 unsigned int limit)
2505 {
2506 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2507 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2508 	unsigned int i, count;
2509 
2510 	txgbe_read_stats_registers(hw, hw_stats);
2511 
2512 	/* If this is a reset xstats is NULL, and we have cleared the
2513 	 * registers by reading them.
2514 	 */
2515 	count = txgbe_xstats_calc_num(dev);
2516 	if (values == NULL)
2517 		return count;
2518 
2519 	limit = min(limit, txgbe_xstats_calc_num(dev));
2520 
2521 	/* Extended stats from txgbe_hw_stats */
2522 	for (i = 0; i < limit; i++) {
2523 		uint32_t offset;
2524 
2525 		if (txgbe_get_offset_by_id(i, &offset)) {
2526 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2527 			break;
2528 		}
2529 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2530 	}
2531 
2532 	return i;
2533 }
2534 
2535 static int
2536 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2537 		uint64_t *values, unsigned int limit)
2538 {
2539 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2540 	unsigned int i;
2541 
2542 	if (ids == NULL)
2543 		return txgbe_dev_xstats_get_(dev, values, limit);
2544 
2545 	for (i = 0; i < limit; i++) {
2546 		uint32_t offset;
2547 
2548 		if (txgbe_get_offset_by_id(ids[i], &offset)) {
2549 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2550 			break;
2551 		}
2552 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2553 	}
2554 
2555 	return i;
2556 }
2557 
2558 static int
2559 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2560 {
2561 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2562 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2563 
2564 	/* HW registers are cleared on read */
2565 	hw->offset_loaded = 0;
2566 	txgbe_read_stats_registers(hw, hw_stats);
2567 	hw->offset_loaded = 1;
2568 
2569 	/* Reset software totals */
2570 	memset(hw_stats, 0, sizeof(*hw_stats));
2571 
2572 	return 0;
2573 }
2574 
2575 static int
2576 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2577 {
2578 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2579 	u32 etrack_id;
2580 	int ret;
2581 
2582 	hw->phy.get_fw_version(hw, &etrack_id);
2583 
2584 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2585 
2586 	ret += 1; /* add the size of '\0' */
2587 	if (fw_size < (u32)ret)
2588 		return ret;
2589 	else
2590 		return 0;
2591 }
2592 
2593 static int
2594 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2595 {
2596 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2597 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2598 
2599 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2600 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2601 	dev_info->min_rx_bufsize = 1024;
2602 	dev_info->max_rx_pktlen = 15872;
2603 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2604 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2605 	dev_info->max_vfs = pci_dev->max_vfs;
2606 	dev_info->max_vmdq_pools = ETH_64_POOLS;
2607 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2608 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2609 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2610 				     dev_info->rx_queue_offload_capa);
2611 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2612 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2613 
2614 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2615 		.rx_thresh = {
2616 			.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2617 			.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2618 			.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2619 		},
2620 		.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2621 		.rx_drop_en = 0,
2622 		.offloads = 0,
2623 	};
2624 
2625 	dev_info->default_txconf = (struct rte_eth_txconf) {
2626 		.tx_thresh = {
2627 			.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2628 			.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2629 			.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2630 		},
2631 		.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2632 		.offloads = 0,
2633 	};
2634 
2635 	dev_info->rx_desc_lim = rx_desc_lim;
2636 	dev_info->tx_desc_lim = tx_desc_lim;
2637 
2638 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2639 	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2640 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2641 
2642 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2643 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2644 
2645 	/* Driver-preferred Rx/Tx parameters */
2646 	dev_info->default_rxportconf.burst_size = 32;
2647 	dev_info->default_txportconf.burst_size = 32;
2648 	dev_info->default_rxportconf.nb_queues = 1;
2649 	dev_info->default_txportconf.nb_queues = 1;
2650 	dev_info->default_rxportconf.ring_size = 256;
2651 	dev_info->default_txportconf.ring_size = 256;
2652 
2653 	return 0;
2654 }
2655 
2656 const uint32_t *
2657 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2658 {
2659 	if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2660 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2661 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2662 	    dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2663 		return txgbe_get_supported_ptypes();
2664 
2665 	return NULL;
2666 }
2667 
2668 void
2669 txgbe_dev_setup_link_alarm_handler(void *param)
2670 {
2671 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2672 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2673 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2674 	u32 speed;
2675 	bool autoneg = false;
2676 
2677 	speed = hw->phy.autoneg_advertised;
2678 	if (!speed)
2679 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2680 
2681 	hw->mac.setup_link(hw, speed, true);
2682 
2683 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2684 }
2685 
2686 /* return 0 means link status changed, -1 means not changed */
2687 int
2688 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2689 			    int wait_to_complete)
2690 {
2691 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2692 	struct rte_eth_link link;
2693 	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2694 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2695 	bool link_up;
2696 	int err;
2697 	int wait = 1;
2698 
2699 	memset(&link, 0, sizeof(link));
2700 	link.link_status = ETH_LINK_DOWN;
2701 	link.link_speed = ETH_SPEED_NUM_NONE;
2702 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
2703 	link.link_autoneg = ETH_LINK_AUTONEG;
2704 
2705 	hw->mac.get_link_status = true;
2706 
2707 	if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2708 		return rte_eth_linkstatus_set(dev, &link);
2709 
2710 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
2711 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2712 		wait = 0;
2713 
2714 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2715 
2716 	if (err != 0) {
2717 		link.link_speed = ETH_SPEED_NUM_100M;
2718 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
2719 		return rte_eth_linkstatus_set(dev, &link);
2720 	}
2721 
2722 	if (link_up == 0) {
2723 		if ((hw->subsystem_device_id & 0xFF) ==
2724 				TXGBE_DEV_ID_KR_KX_KX4) {
2725 			hw->mac.bp_down_event(hw);
2726 		} else if (hw->phy.media_type == txgbe_media_type_fiber) {
2727 			intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2728 			rte_eal_alarm_set(10,
2729 				txgbe_dev_setup_link_alarm_handler, dev);
2730 		}
2731 		return rte_eth_linkstatus_set(dev, &link);
2732 	}
2733 
2734 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2735 	link.link_status = ETH_LINK_UP;
2736 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2737 
2738 	switch (link_speed) {
2739 	default:
2740 	case TXGBE_LINK_SPEED_UNKNOWN:
2741 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
2742 		link.link_speed = ETH_SPEED_NUM_100M;
2743 		break;
2744 
2745 	case TXGBE_LINK_SPEED_100M_FULL:
2746 		link.link_speed = ETH_SPEED_NUM_100M;
2747 		break;
2748 
2749 	case TXGBE_LINK_SPEED_1GB_FULL:
2750 		link.link_speed = ETH_SPEED_NUM_1G;
2751 		break;
2752 
2753 	case TXGBE_LINK_SPEED_2_5GB_FULL:
2754 		link.link_speed = ETH_SPEED_NUM_2_5G;
2755 		break;
2756 
2757 	case TXGBE_LINK_SPEED_5GB_FULL:
2758 		link.link_speed = ETH_SPEED_NUM_5G;
2759 		break;
2760 
2761 	case TXGBE_LINK_SPEED_10GB_FULL:
2762 		link.link_speed = ETH_SPEED_NUM_10G;
2763 		break;
2764 	}
2765 
2766 	return rte_eth_linkstatus_set(dev, &link);
2767 }
2768 
2769 static int
2770 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2771 {
2772 	return txgbe_dev_link_update_share(dev, wait_to_complete);
2773 }
2774 
2775 static int
2776 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2777 {
2778 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2779 	uint32_t fctrl;
2780 
2781 	fctrl = rd32(hw, TXGBE_PSRCTL);
2782 	fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2783 	wr32(hw, TXGBE_PSRCTL, fctrl);
2784 
2785 	return 0;
2786 }
2787 
2788 static int
2789 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2790 {
2791 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2792 	uint32_t fctrl;
2793 
2794 	fctrl = rd32(hw, TXGBE_PSRCTL);
2795 	fctrl &= (~TXGBE_PSRCTL_UCP);
2796 	if (dev->data->all_multicast == 1)
2797 		fctrl |= TXGBE_PSRCTL_MCP;
2798 	else
2799 		fctrl &= (~TXGBE_PSRCTL_MCP);
2800 	wr32(hw, TXGBE_PSRCTL, fctrl);
2801 
2802 	return 0;
2803 }
2804 
2805 static int
2806 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2807 {
2808 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2809 	uint32_t fctrl;
2810 
2811 	fctrl = rd32(hw, TXGBE_PSRCTL);
2812 	fctrl |= TXGBE_PSRCTL_MCP;
2813 	wr32(hw, TXGBE_PSRCTL, fctrl);
2814 
2815 	return 0;
2816 }
2817 
2818 static int
2819 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2820 {
2821 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2822 	uint32_t fctrl;
2823 
2824 	if (dev->data->promiscuous == 1)
2825 		return 0; /* must remain in all_multicast mode */
2826 
2827 	fctrl = rd32(hw, TXGBE_PSRCTL);
2828 	fctrl &= (~TXGBE_PSRCTL_MCP);
2829 	wr32(hw, TXGBE_PSRCTL, fctrl);
2830 
2831 	return 0;
2832 }
2833 
2834 /**
2835  * It clears the interrupt causes and enables the interrupt.
2836  * It will be called once only during nic initialized.
2837  *
2838  * @param dev
2839  *  Pointer to struct rte_eth_dev.
2840  * @param on
2841  *  Enable or Disable.
2842  *
2843  * @return
2844  *  - On success, zero.
2845  *  - On failure, a negative value.
2846  */
2847 static int
2848 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2849 {
2850 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2851 
2852 	txgbe_dev_link_status_print(dev);
2853 	if (on)
2854 		intr->mask_misc |= TXGBE_ICRMISC_LSC;
2855 	else
2856 		intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2857 
2858 	return 0;
2859 }
2860 
2861 static int
2862 txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
2863 {
2864 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2865 	u64 mask;
2866 
2867 	mask = TXGBE_ICR_MASK;
2868 	mask &= (1ULL << TXGBE_MISC_VEC_ID);
2869 	intr->mask |= mask;
2870 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
2871 	intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
2872 	return 0;
2873 }
2874 
2875 /**
2876  * It clears the interrupt causes and enables the interrupt.
2877  * It will be called once only during nic initialized.
2878  *
2879  * @param dev
2880  *  Pointer to struct rte_eth_dev.
2881  *
2882  * @return
2883  *  - On success, zero.
2884  *  - On failure, a negative value.
2885  */
2886 static int
2887 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2888 {
2889 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2890 	u64 mask;
2891 
2892 	mask = TXGBE_ICR_MASK;
2893 	mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
2894 	intr->mask |= mask;
2895 
2896 	return 0;
2897 }
2898 
2899 /**
2900  * It clears the interrupt causes and enables the interrupt.
2901  * It will be called once only during nic initialized.
2902  *
2903  * @param dev
2904  *  Pointer to struct rte_eth_dev.
2905  *
2906  * @return
2907  *  - On success, zero.
2908  *  - On failure, a negative value.
2909  */
2910 static int
2911 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2912 {
2913 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2914 
2915 	intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2916 
2917 	return 0;
2918 }
2919 
2920 /*
2921  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2922  *
2923  * @param dev
2924  *  Pointer to struct rte_eth_dev.
2925  *
2926  * @return
2927  *  - On success, zero.
2928  *  - On failure, a negative value.
2929  */
2930 static int
2931 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2932 {
2933 	uint32_t eicr;
2934 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2935 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2936 
2937 	/* clear all cause mask */
2938 	txgbe_disable_intr(hw);
2939 
2940 	/* read-on-clear nic registers here */
2941 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2942 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2943 
2944 	intr->flags = 0;
2945 
2946 	/* set flag for async link update */
2947 	if (eicr & TXGBE_ICRMISC_LSC)
2948 		intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2949 
2950 	if (eicr & TXGBE_ICRMISC_ANDONE)
2951 		intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
2952 
2953 	if (eicr & TXGBE_ICRMISC_VFMBX)
2954 		intr->flags |= TXGBE_FLAG_MAILBOX;
2955 
2956 	if (eicr & TXGBE_ICRMISC_LNKSEC)
2957 		intr->flags |= TXGBE_FLAG_MACSEC;
2958 
2959 	if (eicr & TXGBE_ICRMISC_GPIO)
2960 		intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2961 
2962 	return 0;
2963 }
2964 
2965 /**
2966  * It gets and then prints the link status.
2967  *
2968  * @param dev
2969  *  Pointer to struct rte_eth_dev.
2970  *
2971  * @return
2972  *  - On success, zero.
2973  *  - On failure, a negative value.
2974  */
2975 static void
2976 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2977 {
2978 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2979 	struct rte_eth_link link;
2980 
2981 	rte_eth_linkstatus_get(dev, &link);
2982 
2983 	if (link.link_status) {
2984 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2985 					(int)(dev->data->port_id),
2986 					(unsigned int)link.link_speed,
2987 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2988 					"full-duplex" : "half-duplex");
2989 	} else {
2990 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2991 				(int)(dev->data->port_id));
2992 	}
2993 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2994 				pci_dev->addr.domain,
2995 				pci_dev->addr.bus,
2996 				pci_dev->addr.devid,
2997 				pci_dev->addr.function);
2998 }
2999 
3000 /*
3001  * It executes link_update after knowing an interrupt occurred.
3002  *
3003  * @param dev
3004  *  Pointer to struct rte_eth_dev.
3005  *
3006  * @return
3007  *  - On success, zero.
3008  *  - On failure, a negative value.
3009  */
3010 static int
3011 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
3012 			   struct rte_intr_handle *intr_handle)
3013 {
3014 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3015 	int64_t timeout;
3016 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3017 
3018 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
3019 
3020 	if (intr->flags & TXGBE_FLAG_MAILBOX) {
3021 		txgbe_pf_mbx_process(dev);
3022 		intr->flags &= ~TXGBE_FLAG_MAILBOX;
3023 	}
3024 
3025 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3026 		hw->phy.handle_lasi(hw);
3027 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3028 	}
3029 
3030 	if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
3031 		if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
3032 			hw->mac.kr_handle(hw);
3033 			intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
3034 		}
3035 	}
3036 
3037 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3038 		struct rte_eth_link link;
3039 
3040 		/*get the link status before link update, for predicting later*/
3041 		rte_eth_linkstatus_get(dev, &link);
3042 
3043 		txgbe_dev_link_update(dev, 0);
3044 
3045 		/* likely to up */
3046 		if (!link.link_status)
3047 			/* handle it 1 sec later, wait it being stable */
3048 			timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
3049 		/* likely to down */
3050 		else if ((hw->subsystem_device_id & 0xFF) ==
3051 				TXGBE_DEV_ID_KR_KX_KX4 &&
3052 				hw->devarg.auto_neg == 1)
3053 			/* handle it 2 sec later for backplane AN73 */
3054 			timeout = 2000;
3055 		else
3056 			/* handle it 4 sec later, wait it being stable */
3057 			timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
3058 
3059 		txgbe_dev_link_status_print(dev);
3060 		if (rte_eal_alarm_set(timeout * 1000,
3061 				      txgbe_dev_interrupt_delayed_handler,
3062 				      (void *)dev) < 0) {
3063 			PMD_DRV_LOG(ERR, "Error setting alarm");
3064 		} else {
3065 			/* only disable lsc interrupt */
3066 			intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
3067 
3068 			intr->mask_orig = intr->mask;
3069 			/* only disable all misc interrupts */
3070 			intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
3071 		}
3072 	}
3073 
3074 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
3075 	txgbe_enable_intr(dev);
3076 	rte_intr_enable(intr_handle);
3077 
3078 	return 0;
3079 }
3080 
3081 /**
3082  * Interrupt handler which shall be registered for alarm callback for delayed
3083  * handling specific interrupt to wait for the stable nic state. As the
3084  * NIC interrupt state is not stable for txgbe after link is just down,
3085  * it needs to wait 4 seconds to get the stable status.
3086  *
3087  * @param handle
3088  *  Pointer to interrupt handle.
3089  * @param param
3090  *  The address of parameter (struct rte_eth_dev *) registered before.
3091  *
3092  * @return
3093  *  void
3094  */
3095 static void
3096 txgbe_dev_interrupt_delayed_handler(void *param)
3097 {
3098 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3099 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3100 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3101 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
3102 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3103 	uint32_t eicr;
3104 
3105 	txgbe_disable_intr(hw);
3106 
3107 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
3108 	if (eicr & TXGBE_ICRMISC_VFMBX)
3109 		txgbe_pf_mbx_process(dev);
3110 
3111 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
3112 		hw->phy.handle_lasi(hw);
3113 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
3114 	}
3115 
3116 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
3117 		txgbe_dev_link_update(dev, 0);
3118 		intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
3119 		txgbe_dev_link_status_print(dev);
3120 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
3121 					      NULL);
3122 	}
3123 
3124 	if (intr->flags & TXGBE_FLAG_MACSEC) {
3125 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
3126 					      NULL);
3127 		intr->flags &= ~TXGBE_FLAG_MACSEC;
3128 	}
3129 
3130 	/* restore original mask */
3131 	intr->mask_misc |= TXGBE_ICRMISC_LSC;
3132 
3133 	intr->mask = intr->mask_orig;
3134 	intr->mask_orig = 0;
3135 
3136 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
3137 	txgbe_enable_intr(dev);
3138 	rte_intr_enable(intr_handle);
3139 }
3140 
3141 /**
3142  * Interrupt handler triggered by NIC  for handling
3143  * specific interrupt.
3144  *
3145  * @param handle
3146  *  Pointer to interrupt handle.
3147  * @param param
3148  *  The address of parameter (struct rte_eth_dev *) registered before.
3149  *
3150  * @return
3151  *  void
3152  */
3153 static void
3154 txgbe_dev_interrupt_handler(void *param)
3155 {
3156 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3157 
3158 	txgbe_dev_interrupt_get_status(dev);
3159 	txgbe_dev_interrupt_action(dev, dev->intr_handle);
3160 }
3161 
3162 static int
3163 txgbe_dev_led_on(struct rte_eth_dev *dev)
3164 {
3165 	struct txgbe_hw *hw;
3166 
3167 	hw = TXGBE_DEV_HW(dev);
3168 	return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
3169 }
3170 
3171 static int
3172 txgbe_dev_led_off(struct rte_eth_dev *dev)
3173 {
3174 	struct txgbe_hw *hw;
3175 
3176 	hw = TXGBE_DEV_HW(dev);
3177 	return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
3178 }
3179 
3180 static int
3181 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3182 {
3183 	struct txgbe_hw *hw;
3184 	uint32_t mflcn_reg;
3185 	uint32_t fccfg_reg;
3186 	int rx_pause;
3187 	int tx_pause;
3188 
3189 	hw = TXGBE_DEV_HW(dev);
3190 
3191 	fc_conf->pause_time = hw->fc.pause_time;
3192 	fc_conf->high_water = hw->fc.high_water[0];
3193 	fc_conf->low_water = hw->fc.low_water[0];
3194 	fc_conf->send_xon = hw->fc.send_xon;
3195 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
3196 
3197 	/*
3198 	 * Return rx_pause status according to actual setting of
3199 	 * RXFCCFG register.
3200 	 */
3201 	mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
3202 	if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
3203 		rx_pause = 1;
3204 	else
3205 		rx_pause = 0;
3206 
3207 	/*
3208 	 * Return tx_pause status according to actual setting of
3209 	 * TXFCCFG register.
3210 	 */
3211 	fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
3212 	if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
3213 		tx_pause = 1;
3214 	else
3215 		tx_pause = 0;
3216 
3217 	if (rx_pause && tx_pause)
3218 		fc_conf->mode = RTE_FC_FULL;
3219 	else if (rx_pause)
3220 		fc_conf->mode = RTE_FC_RX_PAUSE;
3221 	else if (tx_pause)
3222 		fc_conf->mode = RTE_FC_TX_PAUSE;
3223 	else
3224 		fc_conf->mode = RTE_FC_NONE;
3225 
3226 	return 0;
3227 }
3228 
3229 static int
3230 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3231 {
3232 	struct txgbe_hw *hw;
3233 	int err;
3234 	uint32_t rx_buf_size;
3235 	uint32_t max_high_water;
3236 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3237 		txgbe_fc_none,
3238 		txgbe_fc_rx_pause,
3239 		txgbe_fc_tx_pause,
3240 		txgbe_fc_full
3241 	};
3242 
3243 	PMD_INIT_FUNC_TRACE();
3244 
3245 	hw = TXGBE_DEV_HW(dev);
3246 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
3247 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3248 
3249 	/*
3250 	 * At least reserve one Ethernet frame for watermark
3251 	 * high_water/low_water in kilo bytes for txgbe
3252 	 */
3253 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3254 	if (fc_conf->high_water > max_high_water ||
3255 	    fc_conf->high_water < fc_conf->low_water) {
3256 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3257 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3258 		return -EINVAL;
3259 	}
3260 
3261 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
3262 	hw->fc.pause_time     = fc_conf->pause_time;
3263 	hw->fc.high_water[0]  = fc_conf->high_water;
3264 	hw->fc.low_water[0]   = fc_conf->low_water;
3265 	hw->fc.send_xon       = fc_conf->send_xon;
3266 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
3267 
3268 	err = txgbe_fc_enable(hw);
3269 
3270 	/* Not negotiated is not an error case */
3271 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
3272 		wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
3273 		      (fc_conf->mac_ctrl_frame_fwd
3274 		       ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
3275 		txgbe_flush(hw);
3276 
3277 		return 0;
3278 	}
3279 
3280 	PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
3281 	return -EIO;
3282 }
3283 
3284 static int
3285 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
3286 		struct rte_eth_pfc_conf *pfc_conf)
3287 {
3288 	int err;
3289 	uint32_t rx_buf_size;
3290 	uint32_t max_high_water;
3291 	uint8_t tc_num;
3292 	uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
3293 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3294 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3295 
3296 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
3297 		txgbe_fc_none,
3298 		txgbe_fc_rx_pause,
3299 		txgbe_fc_tx_pause,
3300 		txgbe_fc_full
3301 	};
3302 
3303 	PMD_INIT_FUNC_TRACE();
3304 
3305 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3306 	tc_num = map[pfc_conf->priority];
3307 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
3308 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3309 	/*
3310 	 * At least reserve one Ethernet frame for watermark
3311 	 * high_water/low_water in kilo bytes for txgbe
3312 	 */
3313 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3314 	if (pfc_conf->fc.high_water > max_high_water ||
3315 	    pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3316 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3317 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3318 		return -EINVAL;
3319 	}
3320 
3321 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3322 	hw->fc.pause_time = pfc_conf->fc.pause_time;
3323 	hw->fc.send_xon = pfc_conf->fc.send_xon;
3324 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3325 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3326 
3327 	err = txgbe_dcb_pfc_enable(hw, tc_num);
3328 
3329 	/* Not negotiated is not an error case */
3330 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3331 		return 0;
3332 
3333 	PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3334 	return -EIO;
3335 }
3336 
3337 int
3338 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3339 			  struct rte_eth_rss_reta_entry64 *reta_conf,
3340 			  uint16_t reta_size)
3341 {
3342 	uint8_t i, j, mask;
3343 	uint32_t reta;
3344 	uint16_t idx, shift;
3345 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3346 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3347 
3348 	PMD_INIT_FUNC_TRACE();
3349 
3350 	if (!txgbe_rss_update_sp(hw->mac.type)) {
3351 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3352 			"NIC.");
3353 		return -ENOTSUP;
3354 	}
3355 
3356 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
3357 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3358 			"(%d) doesn't match the number hardware can supported "
3359 			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3360 		return -EINVAL;
3361 	}
3362 
3363 	for (i = 0; i < reta_size; i += 4) {
3364 		idx = i / RTE_RETA_GROUP_SIZE;
3365 		shift = i % RTE_RETA_GROUP_SIZE;
3366 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3367 		if (!mask)
3368 			continue;
3369 
3370 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3371 		for (j = 0; j < 4; j++) {
3372 			if (RS8(mask, j, 0x1)) {
3373 				reta  &= ~(MS32(8 * j, 0xFF));
3374 				reta |= LS32(reta_conf[idx].reta[shift + j],
3375 						8 * j, 0xFF);
3376 			}
3377 		}
3378 		wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3379 	}
3380 	adapter->rss_reta_updated = 1;
3381 
3382 	return 0;
3383 }
3384 
3385 int
3386 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3387 			 struct rte_eth_rss_reta_entry64 *reta_conf,
3388 			 uint16_t reta_size)
3389 {
3390 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3391 	uint8_t i, j, mask;
3392 	uint32_t reta;
3393 	uint16_t idx, shift;
3394 
3395 	PMD_INIT_FUNC_TRACE();
3396 
3397 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
3398 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3399 			"(%d) doesn't match the number hardware can supported "
3400 			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3401 		return -EINVAL;
3402 	}
3403 
3404 	for (i = 0; i < reta_size; i += 4) {
3405 		idx = i / RTE_RETA_GROUP_SIZE;
3406 		shift = i % RTE_RETA_GROUP_SIZE;
3407 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3408 		if (!mask)
3409 			continue;
3410 
3411 		reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
3412 		for (j = 0; j < 4; j++) {
3413 			if (RS8(mask, j, 0x1))
3414 				reta_conf[idx].reta[shift + j] =
3415 					(uint16_t)RS32(reta, 8 * j, 0xFF);
3416 		}
3417 	}
3418 
3419 	return 0;
3420 }
3421 
3422 static int
3423 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3424 				uint32_t index, uint32_t pool)
3425 {
3426 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3427 	uint32_t enable_addr = 1;
3428 
3429 	return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3430 			     pool, enable_addr);
3431 }
3432 
3433 static void
3434 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3435 {
3436 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3437 
3438 	txgbe_clear_rar(hw, index);
3439 }
3440 
3441 static int
3442 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3443 {
3444 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3445 
3446 	txgbe_remove_rar(dev, 0);
3447 	txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3448 
3449 	return 0;
3450 }
3451 
3452 static int
3453 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3454 {
3455 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3456 	struct rte_eth_dev_info dev_info;
3457 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3458 	struct rte_eth_dev_data *dev_data = dev->data;
3459 	int ret;
3460 
3461 	ret = txgbe_dev_info_get(dev, &dev_info);
3462 	if (ret != 0)
3463 		return ret;
3464 
3465 	/* check that mtu is within the allowed range */
3466 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3467 		return -EINVAL;
3468 
3469 	/* If device is started, refuse mtu that requires the support of
3470 	 * scattered packets when this feature has not been enabled before.
3471 	 */
3472 	if (dev_data->dev_started && !dev_data->scattered_rx &&
3473 	    (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3474 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3475 		PMD_INIT_LOG(ERR, "Stop port first.");
3476 		return -EINVAL;
3477 	}
3478 
3479 	/* update max frame size */
3480 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3481 
3482 	if (hw->mode)
3483 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3484 			TXGBE_FRAME_SIZE_MAX);
3485 	else
3486 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3487 			TXGBE_FRMSZ_MAX(frame_size));
3488 
3489 	return 0;
3490 }
3491 
3492 static uint32_t
3493 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3494 {
3495 	uint32_t vector = 0;
3496 
3497 	switch (hw->mac.mc_filter_type) {
3498 	case 0:   /* use bits [47:36] of the address */
3499 		vector = ((uc_addr->addr_bytes[4] >> 4) |
3500 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
3501 		break;
3502 	case 1:   /* use bits [46:35] of the address */
3503 		vector = ((uc_addr->addr_bytes[4] >> 3) |
3504 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
3505 		break;
3506 	case 2:   /* use bits [45:34] of the address */
3507 		vector = ((uc_addr->addr_bytes[4] >> 2) |
3508 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
3509 		break;
3510 	case 3:   /* use bits [43:32] of the address */
3511 		vector = ((uc_addr->addr_bytes[4]) |
3512 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
3513 		break;
3514 	default:  /* Invalid mc_filter_type */
3515 		break;
3516 	}
3517 
3518 	/* vector can only be 12-bits or boundary will be exceeded */
3519 	vector &= 0xFFF;
3520 	return vector;
3521 }
3522 
3523 static int
3524 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3525 			struct rte_ether_addr *mac_addr, uint8_t on)
3526 {
3527 	uint32_t vector;
3528 	uint32_t uta_idx;
3529 	uint32_t reg_val;
3530 	uint32_t uta_mask;
3531 	uint32_t psrctl;
3532 
3533 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3534 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3535 
3536 	/* The UTA table only exists on pf hardware */
3537 	if (hw->mac.type < txgbe_mac_raptor)
3538 		return -ENOTSUP;
3539 
3540 	vector = txgbe_uta_vector(hw, mac_addr);
3541 	uta_idx = (vector >> 5) & 0x7F;
3542 	uta_mask = 0x1UL << (vector & 0x1F);
3543 
3544 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3545 		return 0;
3546 
3547 	reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3548 	if (on) {
3549 		uta_info->uta_in_use++;
3550 		reg_val |= uta_mask;
3551 		uta_info->uta_shadow[uta_idx] |= uta_mask;
3552 	} else {
3553 		uta_info->uta_in_use--;
3554 		reg_val &= ~uta_mask;
3555 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3556 	}
3557 
3558 	wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3559 
3560 	psrctl = rd32(hw, TXGBE_PSRCTL);
3561 	if (uta_info->uta_in_use > 0)
3562 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3563 	else
3564 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3565 
3566 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3567 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3568 	wr32(hw, TXGBE_PSRCTL, psrctl);
3569 
3570 	return 0;
3571 }
3572 
3573 static int
3574 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3575 {
3576 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3577 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3578 	uint32_t psrctl;
3579 	int i;
3580 
3581 	/* The UTA table only exists on pf hardware */
3582 	if (hw->mac.type < txgbe_mac_raptor)
3583 		return -ENOTSUP;
3584 
3585 	if (on) {
3586 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3587 			uta_info->uta_shadow[i] = ~0;
3588 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3589 		}
3590 	} else {
3591 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3592 			uta_info->uta_shadow[i] = 0;
3593 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
3594 		}
3595 	}
3596 
3597 	psrctl = rd32(hw, TXGBE_PSRCTL);
3598 	if (on)
3599 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3600 	else
3601 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3602 
3603 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3604 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3605 	wr32(hw, TXGBE_PSRCTL, psrctl);
3606 
3607 	return 0;
3608 }
3609 
3610 uint32_t
3611 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3612 {
3613 	uint32_t new_val = orig_val;
3614 
3615 	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3616 		new_val |= TXGBE_POOLETHCTL_UTA;
3617 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3618 		new_val |= TXGBE_POOLETHCTL_MCHA;
3619 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3620 		new_val |= TXGBE_POOLETHCTL_UCHA;
3621 	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3622 		new_val |= TXGBE_POOLETHCTL_BCA;
3623 	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3624 		new_val |= TXGBE_POOLETHCTL_MCP;
3625 
3626 	return new_val;
3627 }
3628 
3629 static int
3630 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3631 {
3632 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3633 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3634 	uint32_t mask;
3635 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3636 
3637 	if (queue_id < 32) {
3638 		mask = rd32(hw, TXGBE_IMS(0));
3639 		mask &= (1 << queue_id);
3640 		wr32(hw, TXGBE_IMS(0), mask);
3641 	} else if (queue_id < 64) {
3642 		mask = rd32(hw, TXGBE_IMS(1));
3643 		mask &= (1 << (queue_id - 32));
3644 		wr32(hw, TXGBE_IMS(1), mask);
3645 	}
3646 	rte_intr_enable(intr_handle);
3647 
3648 	return 0;
3649 }
3650 
3651 static int
3652 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3653 {
3654 	uint32_t mask;
3655 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3656 
3657 	if (queue_id < 32) {
3658 		mask = rd32(hw, TXGBE_IMS(0));
3659 		mask &= ~(1 << queue_id);
3660 		wr32(hw, TXGBE_IMS(0), mask);
3661 	} else if (queue_id < 64) {
3662 		mask = rd32(hw, TXGBE_IMS(1));
3663 		mask &= ~(1 << (queue_id - 32));
3664 		wr32(hw, TXGBE_IMS(1), mask);
3665 	}
3666 
3667 	return 0;
3668 }
3669 
3670 /**
3671  * set the IVAR registers, mapping interrupt causes to vectors
3672  * @param hw
3673  *  pointer to txgbe_hw struct
3674  * @direction
3675  *  0 for Rx, 1 for Tx, -1 for other causes
3676  * @queue
3677  *  queue to map the corresponding interrupt to
3678  * @msix_vector
3679  *  the vector to map to the corresponding queue
3680  */
3681 void
3682 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3683 		   uint8_t queue, uint8_t msix_vector)
3684 {
3685 	uint32_t tmp, idx;
3686 
3687 	if (direction == -1) {
3688 		/* other causes */
3689 		msix_vector |= TXGBE_IVARMISC_VLD;
3690 		idx = 0;
3691 		tmp = rd32(hw, TXGBE_IVARMISC);
3692 		tmp &= ~(0xFF << idx);
3693 		tmp |= (msix_vector << idx);
3694 		wr32(hw, TXGBE_IVARMISC, tmp);
3695 	} else {
3696 		/* rx or tx causes */
3697 		/* Workround for ICR lost */
3698 		idx = ((16 * (queue & 1)) + (8 * direction));
3699 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3700 		tmp &= ~(0xFF << idx);
3701 		tmp |= (msix_vector << idx);
3702 		wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3703 	}
3704 }
3705 
3706 /**
3707  * Sets up the hardware to properly generate MSI-X interrupts
3708  * @hw
3709  *  board private structure
3710  */
3711 static void
3712 txgbe_configure_msix(struct rte_eth_dev *dev)
3713 {
3714 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3715 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3716 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3717 	uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3718 	uint32_t vec = TXGBE_MISC_VEC_ID;
3719 	uint32_t gpie;
3720 
3721 	/* won't configure msix register if no mapping is done
3722 	 * between intr vector and event fd
3723 	 * but if misx has been enabled already, need to configure
3724 	 * auto clean, auto mask and throttling.
3725 	 */
3726 	gpie = rd32(hw, TXGBE_GPIE);
3727 	if (!rte_intr_dp_is_en(intr_handle) &&
3728 	    !(gpie & TXGBE_GPIE_MSIX))
3729 		return;
3730 
3731 	if (rte_intr_allow_others(intr_handle)) {
3732 		base = TXGBE_RX_VEC_START;
3733 		vec = base;
3734 	}
3735 
3736 	/* setup GPIE for MSI-x mode */
3737 	gpie = rd32(hw, TXGBE_GPIE);
3738 	gpie |= TXGBE_GPIE_MSIX;
3739 	wr32(hw, TXGBE_GPIE, gpie);
3740 
3741 	/* Populate the IVAR table and set the ITR values to the
3742 	 * corresponding register.
3743 	 */
3744 	if (rte_intr_dp_is_en(intr_handle)) {
3745 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3746 			queue_id++) {
3747 			/* by default, 1:1 mapping */
3748 			txgbe_set_ivar_map(hw, 0, queue_id, vec);
3749 			intr_handle->intr_vec[queue_id] = vec;
3750 			if (vec < base + intr_handle->nb_efd - 1)
3751 				vec++;
3752 		}
3753 
3754 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3755 	}
3756 	wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3757 			TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3758 			| TXGBE_ITR_WRDSA);
3759 }
3760 
3761 int
3762 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3763 			   uint16_t queue_idx, uint16_t tx_rate)
3764 {
3765 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3766 	uint32_t bcnrc_val;
3767 
3768 	if (queue_idx >= hw->mac.max_tx_queues)
3769 		return -EINVAL;
3770 
3771 	if (tx_rate != 0) {
3772 		bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3773 		bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3774 	} else {
3775 		bcnrc_val = 0;
3776 	}
3777 
3778 	/*
3779 	 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3780 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3781 	 */
3782 	wr32(hw, TXGBE_ARBTXMMW, 0x14);
3783 
3784 	/* Set ARBTXRATE of queue X */
3785 	wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3786 	wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3787 	txgbe_flush(hw);
3788 
3789 	return 0;
3790 }
3791 
3792 int
3793 txgbe_syn_filter_set(struct rte_eth_dev *dev,
3794 			struct rte_eth_syn_filter *filter,
3795 			bool add)
3796 {
3797 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3798 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3799 	uint32_t syn_info;
3800 	uint32_t synqf;
3801 
3802 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
3803 		return -EINVAL;
3804 
3805 	syn_info = filter_info->syn_info;
3806 
3807 	if (add) {
3808 		if (syn_info & TXGBE_SYNCLS_ENA)
3809 			return -EINVAL;
3810 		synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
3811 		synqf |= TXGBE_SYNCLS_ENA;
3812 
3813 		if (filter->hig_pri)
3814 			synqf |= TXGBE_SYNCLS_HIPRIO;
3815 		else
3816 			synqf &= ~TXGBE_SYNCLS_HIPRIO;
3817 	} else {
3818 		synqf = rd32(hw, TXGBE_SYNCLS);
3819 		if (!(syn_info & TXGBE_SYNCLS_ENA))
3820 			return -ENOENT;
3821 		synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
3822 	}
3823 
3824 	filter_info->syn_info = synqf;
3825 	wr32(hw, TXGBE_SYNCLS, synqf);
3826 	txgbe_flush(hw);
3827 	return 0;
3828 }
3829 
3830 static inline enum txgbe_5tuple_protocol
3831 convert_protocol_type(uint8_t protocol_value)
3832 {
3833 	if (protocol_value == IPPROTO_TCP)
3834 		return TXGBE_5TF_PROT_TCP;
3835 	else if (protocol_value == IPPROTO_UDP)
3836 		return TXGBE_5TF_PROT_UDP;
3837 	else if (protocol_value == IPPROTO_SCTP)
3838 		return TXGBE_5TF_PROT_SCTP;
3839 	else
3840 		return TXGBE_5TF_PROT_NONE;
3841 }
3842 
3843 /* inject a 5-tuple filter to HW */
3844 static inline void
3845 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
3846 			   struct txgbe_5tuple_filter *filter)
3847 {
3848 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3849 	int i;
3850 	uint32_t ftqf, sdpqf;
3851 	uint32_t l34timir = 0;
3852 	uint32_t mask = TXGBE_5TFCTL0_MASK;
3853 
3854 	i = filter->index;
3855 	sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
3856 	sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
3857 
3858 	ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
3859 	ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
3860 	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
3861 		mask &= ~TXGBE_5TFCTL0_MSADDR;
3862 	if (filter->filter_info.dst_ip_mask == 0)
3863 		mask &= ~TXGBE_5TFCTL0_MDADDR;
3864 	if (filter->filter_info.src_port_mask == 0)
3865 		mask &= ~TXGBE_5TFCTL0_MSPORT;
3866 	if (filter->filter_info.dst_port_mask == 0)
3867 		mask &= ~TXGBE_5TFCTL0_MDPORT;
3868 	if (filter->filter_info.proto_mask == 0)
3869 		mask &= ~TXGBE_5TFCTL0_MPROTO;
3870 	ftqf |= mask;
3871 	ftqf |= TXGBE_5TFCTL0_MPOOL;
3872 	ftqf |= TXGBE_5TFCTL0_ENA;
3873 
3874 	wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
3875 	wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
3876 	wr32(hw, TXGBE_5TFPORT(i), sdpqf);
3877 	wr32(hw, TXGBE_5TFCTL0(i), ftqf);
3878 
3879 	l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
3880 	wr32(hw, TXGBE_5TFCTL1(i), l34timir);
3881 }
3882 
3883 /*
3884  * add a 5tuple filter
3885  *
3886  * @param
3887  * dev: Pointer to struct rte_eth_dev.
3888  * index: the index the filter allocates.
3889  * filter: pointer to the filter that will be added.
3890  * rx_queue: the queue id the filter assigned to.
3891  *
3892  * @return
3893  *    - On success, zero.
3894  *    - On failure, a negative value.
3895  */
3896 static int
3897 txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
3898 			struct txgbe_5tuple_filter *filter)
3899 {
3900 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3901 	int i, idx, shift;
3902 
3903 	/*
3904 	 * look for an unused 5tuple filter index,
3905 	 * and insert the filter to list.
3906 	 */
3907 	for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
3908 		idx = i / (sizeof(uint32_t) * NBBY);
3909 		shift = i % (sizeof(uint32_t) * NBBY);
3910 		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
3911 			filter_info->fivetuple_mask[idx] |= 1 << shift;
3912 			filter->index = i;
3913 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
3914 					  filter,
3915 					  entries);
3916 			break;
3917 		}
3918 	}
3919 	if (i >= TXGBE_MAX_FTQF_FILTERS) {
3920 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
3921 		return -ENOSYS;
3922 	}
3923 
3924 	txgbe_inject_5tuple_filter(dev, filter);
3925 
3926 	return 0;
3927 }
3928 
3929 /*
3930  * remove a 5tuple filter
3931  *
3932  * @param
3933  * dev: Pointer to struct rte_eth_dev.
3934  * filter: the pointer of the filter will be removed.
3935  */
3936 static void
3937 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
3938 			struct txgbe_5tuple_filter *filter)
3939 {
3940 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3941 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
3942 	uint16_t index = filter->index;
3943 
3944 	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
3945 				~(1 << (index % (sizeof(uint32_t) * NBBY)));
3946 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
3947 	rte_free(filter);
3948 
3949 	wr32(hw, TXGBE_5TFDADDR(index), 0);
3950 	wr32(hw, TXGBE_5TFSADDR(index), 0);
3951 	wr32(hw, TXGBE_5TFPORT(index), 0);
3952 	wr32(hw, TXGBE_5TFCTL0(index), 0);
3953 	wr32(hw, TXGBE_5TFCTL1(index), 0);
3954 }
3955 
3956 static inline struct txgbe_5tuple_filter *
3957 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
3958 			struct txgbe_5tuple_filter_info *key)
3959 {
3960 	struct txgbe_5tuple_filter *it;
3961 
3962 	TAILQ_FOREACH(it, filter_list, entries) {
3963 		if (memcmp(key, &it->filter_info,
3964 			sizeof(struct txgbe_5tuple_filter_info)) == 0) {
3965 			return it;
3966 		}
3967 	}
3968 	return NULL;
3969 }
3970 
3971 /* translate elements in struct rte_eth_ntuple_filter
3972  * to struct txgbe_5tuple_filter_info
3973  */
3974 static inline int
3975 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
3976 			struct txgbe_5tuple_filter_info *filter_info)
3977 {
3978 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
3979 		filter->priority > TXGBE_5TUPLE_MAX_PRI ||
3980 		filter->priority < TXGBE_5TUPLE_MIN_PRI)
3981 		return -EINVAL;
3982 
3983 	switch (filter->dst_ip_mask) {
3984 	case UINT32_MAX:
3985 		filter_info->dst_ip_mask = 0;
3986 		filter_info->dst_ip = filter->dst_ip;
3987 		break;
3988 	case 0:
3989 		filter_info->dst_ip_mask = 1;
3990 		break;
3991 	default:
3992 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
3993 		return -EINVAL;
3994 	}
3995 
3996 	switch (filter->src_ip_mask) {
3997 	case UINT32_MAX:
3998 		filter_info->src_ip_mask = 0;
3999 		filter_info->src_ip = filter->src_ip;
4000 		break;
4001 	case 0:
4002 		filter_info->src_ip_mask = 1;
4003 		break;
4004 	default:
4005 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4006 		return -EINVAL;
4007 	}
4008 
4009 	switch (filter->dst_port_mask) {
4010 	case UINT16_MAX:
4011 		filter_info->dst_port_mask = 0;
4012 		filter_info->dst_port = filter->dst_port;
4013 		break;
4014 	case 0:
4015 		filter_info->dst_port_mask = 1;
4016 		break;
4017 	default:
4018 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4019 		return -EINVAL;
4020 	}
4021 
4022 	switch (filter->src_port_mask) {
4023 	case UINT16_MAX:
4024 		filter_info->src_port_mask = 0;
4025 		filter_info->src_port = filter->src_port;
4026 		break;
4027 	case 0:
4028 		filter_info->src_port_mask = 1;
4029 		break;
4030 	default:
4031 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
4032 		return -EINVAL;
4033 	}
4034 
4035 	switch (filter->proto_mask) {
4036 	case UINT8_MAX:
4037 		filter_info->proto_mask = 0;
4038 		filter_info->proto =
4039 			convert_protocol_type(filter->proto);
4040 		break;
4041 	case 0:
4042 		filter_info->proto_mask = 1;
4043 		break;
4044 	default:
4045 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
4046 		return -EINVAL;
4047 	}
4048 
4049 	filter_info->priority = (uint8_t)filter->priority;
4050 	return 0;
4051 }
4052 
4053 /*
4054  * add or delete a ntuple filter
4055  *
4056  * @param
4057  * dev: Pointer to struct rte_eth_dev.
4058  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4059  * add: if true, add filter, if false, remove filter
4060  *
4061  * @return
4062  *    - On success, zero.
4063  *    - On failure, a negative value.
4064  */
4065 int
4066 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
4067 			struct rte_eth_ntuple_filter *ntuple_filter,
4068 			bool add)
4069 {
4070 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4071 	struct txgbe_5tuple_filter_info filter_5tuple;
4072 	struct txgbe_5tuple_filter *filter;
4073 	int ret;
4074 
4075 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
4076 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
4077 		return -EINVAL;
4078 	}
4079 
4080 	memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
4081 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
4082 	if (ret < 0)
4083 		return ret;
4084 
4085 	filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
4086 					 &filter_5tuple);
4087 	if (filter != NULL && add) {
4088 		PMD_DRV_LOG(ERR, "filter exists.");
4089 		return -EEXIST;
4090 	}
4091 	if (filter == NULL && !add) {
4092 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
4093 		return -ENOENT;
4094 	}
4095 
4096 	if (add) {
4097 		filter = rte_zmalloc("txgbe_5tuple_filter",
4098 				sizeof(struct txgbe_5tuple_filter), 0);
4099 		if (filter == NULL)
4100 			return -ENOMEM;
4101 		rte_memcpy(&filter->filter_info,
4102 				 &filter_5tuple,
4103 				 sizeof(struct txgbe_5tuple_filter_info));
4104 		filter->queue = ntuple_filter->queue;
4105 		ret = txgbe_add_5tuple_filter(dev, filter);
4106 		if (ret < 0) {
4107 			rte_free(filter);
4108 			return ret;
4109 		}
4110 	} else {
4111 		txgbe_remove_5tuple_filter(dev, filter);
4112 	}
4113 
4114 	return 0;
4115 }
4116 
4117 int
4118 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
4119 			struct rte_eth_ethertype_filter *filter,
4120 			bool add)
4121 {
4122 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4123 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
4124 	uint32_t etqf = 0;
4125 	uint32_t etqs = 0;
4126 	int ret;
4127 	struct txgbe_ethertype_filter ethertype_filter;
4128 
4129 	if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
4130 		return -EINVAL;
4131 
4132 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
4133 	    filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4134 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4135 			" ethertype filter.", filter->ether_type);
4136 		return -EINVAL;
4137 	}
4138 
4139 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4140 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4141 		return -EINVAL;
4142 	}
4143 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4144 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
4145 		return -EINVAL;
4146 	}
4147 
4148 	ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
4149 	if (ret >= 0 && add) {
4150 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4151 			    filter->ether_type);
4152 		return -EEXIST;
4153 	}
4154 	if (ret < 0 && !add) {
4155 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4156 			    filter->ether_type);
4157 		return -ENOENT;
4158 	}
4159 
4160 	if (add) {
4161 		etqf = TXGBE_ETFLT_ENA;
4162 		etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
4163 		etqs |= TXGBE_ETCLS_QPID(filter->queue);
4164 		etqs |= TXGBE_ETCLS_QENA;
4165 
4166 		ethertype_filter.ethertype = filter->ether_type;
4167 		ethertype_filter.etqf = etqf;
4168 		ethertype_filter.etqs = etqs;
4169 		ethertype_filter.conf = FALSE;
4170 		ret = txgbe_ethertype_filter_insert(filter_info,
4171 						    &ethertype_filter);
4172 		if (ret < 0) {
4173 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
4174 			return -ENOSPC;
4175 		}
4176 	} else {
4177 		ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
4178 		if (ret < 0)
4179 			return -ENOSYS;
4180 	}
4181 	wr32(hw, TXGBE_ETFLT(ret), etqf);
4182 	wr32(hw, TXGBE_ETCLS(ret), etqs);
4183 	txgbe_flush(hw);
4184 
4185 	return 0;
4186 }
4187 
4188 static int
4189 txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
4190 		       const struct rte_flow_ops **ops)
4191 {
4192 	*ops = &txgbe_flow_ops;
4193 	return 0;
4194 }
4195 
4196 static u8 *
4197 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
4198 			u8 **mc_addr_ptr, u32 *vmdq)
4199 {
4200 	u8 *mc_addr;
4201 
4202 	*vmdq = 0;
4203 	mc_addr = *mc_addr_ptr;
4204 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
4205 	return mc_addr;
4206 }
4207 
4208 int
4209 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
4210 			  struct rte_ether_addr *mc_addr_set,
4211 			  uint32_t nb_mc_addr)
4212 {
4213 	struct txgbe_hw *hw;
4214 	u8 *mc_addr_list;
4215 
4216 	hw = TXGBE_DEV_HW(dev);
4217 	mc_addr_list = (u8 *)mc_addr_set;
4218 	return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
4219 					 txgbe_dev_addr_list_itr, TRUE);
4220 }
4221 
4222 static uint64_t
4223 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
4224 {
4225 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4226 	uint64_t systime_cycles;
4227 
4228 	systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
4229 	systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
4230 
4231 	return systime_cycles;
4232 }
4233 
4234 static uint64_t
4235 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4236 {
4237 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4238 	uint64_t rx_tstamp_cycles;
4239 
4240 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
4241 	rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
4242 	rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
4243 
4244 	return rx_tstamp_cycles;
4245 }
4246 
4247 static uint64_t
4248 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4249 {
4250 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4251 	uint64_t tx_tstamp_cycles;
4252 
4253 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
4254 	tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
4255 	tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
4256 
4257 	return tx_tstamp_cycles;
4258 }
4259 
4260 static void
4261 txgbe_start_timecounters(struct rte_eth_dev *dev)
4262 {
4263 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4264 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4265 	struct rte_eth_link link;
4266 	uint32_t incval = 0;
4267 	uint32_t shift = 0;
4268 
4269 	/* Get current link speed. */
4270 	txgbe_dev_link_update(dev, 1);
4271 	rte_eth_linkstatus_get(dev, &link);
4272 
4273 	switch (link.link_speed) {
4274 	case ETH_SPEED_NUM_100M:
4275 		incval = TXGBE_INCVAL_100;
4276 		shift = TXGBE_INCVAL_SHIFT_100;
4277 		break;
4278 	case ETH_SPEED_NUM_1G:
4279 		incval = TXGBE_INCVAL_1GB;
4280 		shift = TXGBE_INCVAL_SHIFT_1GB;
4281 		break;
4282 	case ETH_SPEED_NUM_10G:
4283 	default:
4284 		incval = TXGBE_INCVAL_10GB;
4285 		shift = TXGBE_INCVAL_SHIFT_10GB;
4286 		break;
4287 	}
4288 
4289 	wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
4290 
4291 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4292 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4293 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4294 
4295 	adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4296 	adapter->systime_tc.cc_shift = shift;
4297 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4298 
4299 	adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4300 	adapter->rx_tstamp_tc.cc_shift = shift;
4301 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4302 
4303 	adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
4304 	adapter->tx_tstamp_tc.cc_shift = shift;
4305 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4306 }
4307 
4308 static int
4309 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4310 {
4311 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4312 
4313 	adapter->systime_tc.nsec += delta;
4314 	adapter->rx_tstamp_tc.nsec += delta;
4315 	adapter->tx_tstamp_tc.nsec += delta;
4316 
4317 	return 0;
4318 }
4319 
4320 static int
4321 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4322 {
4323 	uint64_t ns;
4324 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4325 
4326 	ns = rte_timespec_to_ns(ts);
4327 	/* Set the timecounters to a new value. */
4328 	adapter->systime_tc.nsec = ns;
4329 	adapter->rx_tstamp_tc.nsec = ns;
4330 	adapter->tx_tstamp_tc.nsec = ns;
4331 
4332 	return 0;
4333 }
4334 
4335 static int
4336 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4337 {
4338 	uint64_t ns, systime_cycles;
4339 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4340 
4341 	systime_cycles = txgbe_read_systime_cyclecounter(dev);
4342 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4343 	*ts = rte_ns_to_timespec(ns);
4344 
4345 	return 0;
4346 }
4347 
4348 static int
4349 txgbe_timesync_enable(struct rte_eth_dev *dev)
4350 {
4351 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4352 	uint32_t tsync_ctl;
4353 
4354 	/* Stop the timesync system time. */
4355 	wr32(hw, TXGBE_TSTIMEINC, 0x0);
4356 	/* Reset the timesync system time value. */
4357 	wr32(hw, TXGBE_TSTIMEL, 0x0);
4358 	wr32(hw, TXGBE_TSTIMEH, 0x0);
4359 
4360 	txgbe_start_timecounters(dev);
4361 
4362 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4363 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
4364 		RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
4365 
4366 	/* Enable timestamping of received PTP packets. */
4367 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4368 	tsync_ctl |= TXGBE_TSRXCTL_ENA;
4369 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4370 
4371 	/* Enable timestamping of transmitted PTP packets. */
4372 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4373 	tsync_ctl |= TXGBE_TSTXCTL_ENA;
4374 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4375 
4376 	txgbe_flush(hw);
4377 
4378 	return 0;
4379 }
4380 
4381 static int
4382 txgbe_timesync_disable(struct rte_eth_dev *dev)
4383 {
4384 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4385 	uint32_t tsync_ctl;
4386 
4387 	/* Disable timestamping of transmitted PTP packets. */
4388 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
4389 	tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
4390 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
4391 
4392 	/* Disable timestamping of received PTP packets. */
4393 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
4394 	tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
4395 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
4396 
4397 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4398 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
4399 
4400 	/* Stop incrementating the System Time registers. */
4401 	wr32(hw, TXGBE_TSTIMEINC, 0);
4402 
4403 	return 0;
4404 }
4405 
4406 static int
4407 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4408 				 struct timespec *timestamp,
4409 				 uint32_t flags __rte_unused)
4410 {
4411 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4412 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4413 	uint32_t tsync_rxctl;
4414 	uint64_t rx_tstamp_cycles;
4415 	uint64_t ns;
4416 
4417 	tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
4418 	if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
4419 		return -EINVAL;
4420 
4421 	rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
4422 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4423 	*timestamp = rte_ns_to_timespec(ns);
4424 
4425 	return  0;
4426 }
4427 
4428 static int
4429 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4430 				 struct timespec *timestamp)
4431 {
4432 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4433 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4434 	uint32_t tsync_txctl;
4435 	uint64_t tx_tstamp_cycles;
4436 	uint64_t ns;
4437 
4438 	tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
4439 	if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
4440 		return -EINVAL;
4441 
4442 	tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
4443 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4444 	*timestamp = rte_ns_to_timespec(ns);
4445 
4446 	return 0;
4447 }
4448 
4449 static int
4450 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4451 {
4452 	int count = 0;
4453 	int g_ind = 0;
4454 	const struct reg_info *reg_group;
4455 	const struct reg_info **reg_set = txgbe_regs_others;
4456 
4457 	while ((reg_group = reg_set[g_ind++]))
4458 		count += txgbe_regs_group_count(reg_group);
4459 
4460 	return count;
4461 }
4462 
4463 static int
4464 txgbe_get_regs(struct rte_eth_dev *dev,
4465 	      struct rte_dev_reg_info *regs)
4466 {
4467 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4468 	uint32_t *data = regs->data;
4469 	int g_ind = 0;
4470 	int count = 0;
4471 	const struct reg_info *reg_group;
4472 	const struct reg_info **reg_set = txgbe_regs_others;
4473 
4474 	if (data == NULL) {
4475 		regs->length = txgbe_get_reg_length(dev);
4476 		regs->width = sizeof(uint32_t);
4477 		return 0;
4478 	}
4479 
4480 	/* Support only full register dump */
4481 	if (regs->length == 0 ||
4482 	    regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
4483 		regs->version = hw->mac.type << 24 |
4484 				hw->revision_id << 16 |
4485 				hw->device_id;
4486 		while ((reg_group = reg_set[g_ind++]))
4487 			count += txgbe_read_regs_group(dev, &data[count],
4488 						      reg_group);
4489 		return 0;
4490 	}
4491 
4492 	return -ENOTSUP;
4493 }
4494 
4495 static int
4496 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
4497 {
4498 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4499 
4500 	/* Return unit is byte count */
4501 	return hw->rom.word_size * 2;
4502 }
4503 
4504 static int
4505 txgbe_get_eeprom(struct rte_eth_dev *dev,
4506 		struct rte_dev_eeprom_info *in_eeprom)
4507 {
4508 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4509 	struct txgbe_rom_info *eeprom = &hw->rom;
4510 	uint16_t *data = in_eeprom->data;
4511 	int first, length;
4512 
4513 	first = in_eeprom->offset >> 1;
4514 	length = in_eeprom->length >> 1;
4515 	if (first > hw->rom.word_size ||
4516 	    ((first + length) > hw->rom.word_size))
4517 		return -EINVAL;
4518 
4519 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4520 
4521 	return eeprom->readw_buffer(hw, first, length, data);
4522 }
4523 
4524 static int
4525 txgbe_set_eeprom(struct rte_eth_dev *dev,
4526 		struct rte_dev_eeprom_info *in_eeprom)
4527 {
4528 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4529 	struct txgbe_rom_info *eeprom = &hw->rom;
4530 	uint16_t *data = in_eeprom->data;
4531 	int first, length;
4532 
4533 	first = in_eeprom->offset >> 1;
4534 	length = in_eeprom->length >> 1;
4535 	if (first > hw->rom.word_size ||
4536 	    ((first + length) > hw->rom.word_size))
4537 		return -EINVAL;
4538 
4539 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
4540 
4541 	return eeprom->writew_buffer(hw,  first, length, data);
4542 }
4543 
4544 static int
4545 txgbe_get_module_info(struct rte_eth_dev *dev,
4546 		      struct rte_eth_dev_module_info *modinfo)
4547 {
4548 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4549 	uint32_t status;
4550 	uint8_t sff8472_rev, addr_mode;
4551 	bool page_swap = false;
4552 
4553 	/* Check whether we support SFF-8472 or not */
4554 	status = hw->phy.read_i2c_eeprom(hw,
4555 					     TXGBE_SFF_SFF_8472_COMP,
4556 					     &sff8472_rev);
4557 	if (status != 0)
4558 		return -EIO;
4559 
4560 	/* addressing mode is not supported */
4561 	status = hw->phy.read_i2c_eeprom(hw,
4562 					     TXGBE_SFF_SFF_8472_SWAP,
4563 					     &addr_mode);
4564 	if (status != 0)
4565 		return -EIO;
4566 
4567 	if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
4568 		PMD_DRV_LOG(ERR,
4569 			    "Address change required to access page 0xA2, "
4570 			    "but not supported. Please report the module "
4571 			    "type to the driver maintainers.");
4572 		page_swap = true;
4573 	}
4574 
4575 	if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
4576 		/* We have a SFP, but it does not support SFF-8472 */
4577 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
4578 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
4579 	} else {
4580 		/* We have a SFP which supports a revision of SFF-8472. */
4581 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
4582 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
4583 	}
4584 
4585 	return 0;
4586 }
4587 
4588 static int
4589 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
4590 			struct rte_dev_eeprom_info *info)
4591 {
4592 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4593 	uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
4594 	uint8_t databyte = 0xFF;
4595 	uint8_t *data = info->data;
4596 	uint32_t i = 0;
4597 
4598 	if (info->length == 0)
4599 		return -EINVAL;
4600 
4601 	for (i = info->offset; i < info->offset + info->length; i++) {
4602 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
4603 			status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
4604 		else
4605 			status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
4606 
4607 		if (status != 0)
4608 			return -EIO;
4609 
4610 		data[i - info->offset] = databyte;
4611 	}
4612 
4613 	return 0;
4614 }
4615 
4616 bool
4617 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
4618 {
4619 	switch (mac_type) {
4620 	case txgbe_mac_raptor:
4621 	case txgbe_mac_raptor_vf:
4622 		return 1;
4623 	default:
4624 		return 0;
4625 	}
4626 }
4627 
4628 static int
4629 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
4630 			struct rte_eth_dcb_info *dcb_info)
4631 {
4632 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
4633 	struct txgbe_dcb_tc_config *tc;
4634 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
4635 	uint8_t nb_tcs;
4636 	uint8_t i, j;
4637 
4638 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
4639 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
4640 	else
4641 		dcb_info->nb_tcs = 1;
4642 
4643 	tc_queue = &dcb_info->tc_queue;
4644 	nb_tcs = dcb_info->nb_tcs;
4645 
4646 	if (dcb_config->vt_mode) { /* vt is enabled */
4647 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
4648 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
4649 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4650 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
4651 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
4652 			for (j = 0; j < nb_tcs; j++) {
4653 				tc_queue->tc_rxq[0][j].base = j;
4654 				tc_queue->tc_rxq[0][j].nb_queue = 1;
4655 				tc_queue->tc_txq[0][j].base = j;
4656 				tc_queue->tc_txq[0][j].nb_queue = 1;
4657 			}
4658 		} else {
4659 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
4660 				for (j = 0; j < nb_tcs; j++) {
4661 					tc_queue->tc_rxq[i][j].base =
4662 						i * nb_tcs + j;
4663 					tc_queue->tc_rxq[i][j].nb_queue = 1;
4664 					tc_queue->tc_txq[i][j].base =
4665 						i * nb_tcs + j;
4666 					tc_queue->tc_txq[i][j].nb_queue = 1;
4667 				}
4668 			}
4669 		}
4670 	} else { /* vt is disabled */
4671 		struct rte_eth_dcb_rx_conf *rx_conf =
4672 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
4673 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
4674 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
4675 		if (dcb_info->nb_tcs == ETH_4_TCS) {
4676 			for (i = 0; i < dcb_info->nb_tcs; i++) {
4677 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
4678 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4679 			}
4680 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
4681 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
4682 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
4683 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
4684 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
4685 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4686 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4687 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4688 		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
4689 			for (i = 0; i < dcb_info->nb_tcs; i++) {
4690 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
4691 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
4692 			}
4693 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
4694 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
4695 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
4696 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
4697 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
4698 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
4699 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
4700 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
4701 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
4702 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
4703 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
4704 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
4705 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
4706 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
4707 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
4708 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
4709 		}
4710 	}
4711 	for (i = 0; i < dcb_info->nb_tcs; i++) {
4712 		tc = &dcb_config->tc_config[i];
4713 		dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4714 	}
4715 	return 0;
4716 }
4717 
4718 /* Update e-tag ether type */
4719 static int
4720 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
4721 			    uint16_t ether_type)
4722 {
4723 	uint32_t etag_etype;
4724 
4725 	etag_etype = rd32(hw, TXGBE_EXTAG);
4726 	etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
4727 	etag_etype |= ether_type;
4728 	wr32(hw, TXGBE_EXTAG, etag_etype);
4729 	txgbe_flush(hw);
4730 
4731 	return 0;
4732 }
4733 
4734 /* Enable e-tag tunnel */
4735 static int
4736 txgbe_e_tag_enable(struct txgbe_hw *hw)
4737 {
4738 	uint32_t etag_etype;
4739 
4740 	etag_etype = rd32(hw, TXGBE_PORTCTL);
4741 	etag_etype |= TXGBE_PORTCTL_ETAG;
4742 	wr32(hw, TXGBE_PORTCTL, etag_etype);
4743 	txgbe_flush(hw);
4744 
4745 	return 0;
4746 }
4747 
4748 static int
4749 txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
4750 		       struct txgbe_l2_tunnel_conf  *l2_tunnel)
4751 {
4752 	int ret = 0;
4753 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4754 	uint32_t i, rar_entries;
4755 	uint32_t rar_low, rar_high;
4756 
4757 	rar_entries = hw->mac.num_rar_entries;
4758 
4759 	for (i = 1; i < rar_entries; i++) {
4760 		wr32(hw, TXGBE_ETHADDRIDX, i);
4761 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4762 		rar_low  = rd32(hw, TXGBE_ETHADDRL);
4763 		if ((rar_high & TXGBE_ETHADDRH_VLD) &&
4764 		    (rar_high & TXGBE_ETHADDRH_ETAG) &&
4765 		    (TXGBE_ETHADDRL_ETAG(rar_low) ==
4766 		     l2_tunnel->tunnel_id)) {
4767 			wr32(hw, TXGBE_ETHADDRL, 0);
4768 			wr32(hw, TXGBE_ETHADDRH, 0);
4769 
4770 			txgbe_clear_vmdq(hw, i, BIT_MASK32);
4771 
4772 			return ret;
4773 		}
4774 	}
4775 
4776 	return ret;
4777 }
4778 
4779 static int
4780 txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
4781 		       struct txgbe_l2_tunnel_conf *l2_tunnel)
4782 {
4783 	int ret = 0;
4784 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4785 	uint32_t i, rar_entries;
4786 	uint32_t rar_low, rar_high;
4787 
4788 	/* One entry for one tunnel. Try to remove potential existing entry. */
4789 	txgbe_e_tag_filter_del(dev, l2_tunnel);
4790 
4791 	rar_entries = hw->mac.num_rar_entries;
4792 
4793 	for (i = 1; i < rar_entries; i++) {
4794 		wr32(hw, TXGBE_ETHADDRIDX, i);
4795 		rar_high = rd32(hw, TXGBE_ETHADDRH);
4796 		if (rar_high & TXGBE_ETHADDRH_VLD) {
4797 			continue;
4798 		} else {
4799 			txgbe_set_vmdq(hw, i, l2_tunnel->pool);
4800 			rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
4801 			rar_low = l2_tunnel->tunnel_id;
4802 
4803 			wr32(hw, TXGBE_ETHADDRL, rar_low);
4804 			wr32(hw, TXGBE_ETHADDRH, rar_high);
4805 
4806 			return ret;
4807 		}
4808 	}
4809 
4810 	PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
4811 		     " Please remove a rule before adding a new one.");
4812 	return -EINVAL;
4813 }
4814 
4815 static inline struct txgbe_l2_tn_filter *
4816 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
4817 			  struct txgbe_l2_tn_key *key)
4818 {
4819 	int ret;
4820 
4821 	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
4822 	if (ret < 0)
4823 		return NULL;
4824 
4825 	return l2_tn_info->hash_map[ret];
4826 }
4827 
4828 static inline int
4829 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4830 			  struct txgbe_l2_tn_filter *l2_tn_filter)
4831 {
4832 	int ret;
4833 
4834 	ret = rte_hash_add_key(l2_tn_info->hash_handle,
4835 			       &l2_tn_filter->key);
4836 
4837 	if (ret < 0) {
4838 		PMD_DRV_LOG(ERR,
4839 			    "Failed to insert L2 tunnel filter"
4840 			    " to hash table %d!",
4841 			    ret);
4842 		return ret;
4843 	}
4844 
4845 	l2_tn_info->hash_map[ret] = l2_tn_filter;
4846 
4847 	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4848 
4849 	return 0;
4850 }
4851 
4852 static inline int
4853 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
4854 			  struct txgbe_l2_tn_key *key)
4855 {
4856 	int ret;
4857 	struct txgbe_l2_tn_filter *l2_tn_filter;
4858 
4859 	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
4860 
4861 	if (ret < 0) {
4862 		PMD_DRV_LOG(ERR,
4863 			    "No such L2 tunnel filter to delete %d!",
4864 			    ret);
4865 		return ret;
4866 	}
4867 
4868 	l2_tn_filter = l2_tn_info->hash_map[ret];
4869 	l2_tn_info->hash_map[ret] = NULL;
4870 
4871 	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
4872 	rte_free(l2_tn_filter);
4873 
4874 	return 0;
4875 }
4876 
4877 /* Add l2 tunnel filter */
4878 int
4879 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
4880 			       struct txgbe_l2_tunnel_conf *l2_tunnel,
4881 			       bool restore)
4882 {
4883 	int ret;
4884 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4885 	struct txgbe_l2_tn_key key;
4886 	struct txgbe_l2_tn_filter *node;
4887 
4888 	if (!restore) {
4889 		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4890 		key.tn_id = l2_tunnel->tunnel_id;
4891 
4892 		node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
4893 
4894 		if (node) {
4895 			PMD_DRV_LOG(ERR,
4896 				    "The L2 tunnel filter already exists!");
4897 			return -EINVAL;
4898 		}
4899 
4900 		node = rte_zmalloc("txgbe_l2_tn",
4901 				   sizeof(struct txgbe_l2_tn_filter),
4902 				   0);
4903 		if (!node)
4904 			return -ENOMEM;
4905 
4906 		rte_memcpy(&node->key,
4907 				 &key,
4908 				 sizeof(struct txgbe_l2_tn_key));
4909 		node->pool = l2_tunnel->pool;
4910 		ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
4911 		if (ret < 0) {
4912 			rte_free(node);
4913 			return ret;
4914 		}
4915 	}
4916 
4917 	switch (l2_tunnel->l2_tunnel_type) {
4918 	case RTE_L2_TUNNEL_TYPE_E_TAG:
4919 		ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
4920 		break;
4921 	default:
4922 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4923 		ret = -EINVAL;
4924 		break;
4925 	}
4926 
4927 	if (!restore && ret < 0)
4928 		(void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4929 
4930 	return ret;
4931 }
4932 
4933 /* Delete l2 tunnel filter */
4934 int
4935 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
4936 			       struct txgbe_l2_tunnel_conf *l2_tunnel)
4937 {
4938 	int ret;
4939 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
4940 	struct txgbe_l2_tn_key key;
4941 
4942 	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
4943 	key.tn_id = l2_tunnel->tunnel_id;
4944 	ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
4945 	if (ret < 0)
4946 		return ret;
4947 
4948 	switch (l2_tunnel->l2_tunnel_type) {
4949 	case RTE_L2_TUNNEL_TYPE_E_TAG:
4950 		ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
4951 		break;
4952 	default:
4953 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
4954 		ret = -EINVAL;
4955 		break;
4956 	}
4957 
4958 	return ret;
4959 }
4960 
4961 static int
4962 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
4963 {
4964 	int ret = 0;
4965 	uint32_t ctrl;
4966 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4967 
4968 	ctrl = rd32(hw, TXGBE_POOLCTL);
4969 	ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
4970 	if (en)
4971 		ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
4972 	wr32(hw, TXGBE_POOLCTL, ctrl);
4973 
4974 	return ret;
4975 }
4976 
4977 /* Add UDP tunneling port */
4978 static int
4979 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
4980 			      struct rte_eth_udp_tunnel *udp_tunnel)
4981 {
4982 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4983 	int ret = 0;
4984 
4985 	if (udp_tunnel == NULL)
4986 		return -EINVAL;
4987 
4988 	switch (udp_tunnel->prot_type) {
4989 	case RTE_TUNNEL_TYPE_VXLAN:
4990 		if (udp_tunnel->udp_port == 0) {
4991 			PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
4992 			ret = -EINVAL;
4993 			break;
4994 		}
4995 		wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
4996 		wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
4997 		break;
4998 	case RTE_TUNNEL_TYPE_GENEVE:
4999 		if (udp_tunnel->udp_port == 0) {
5000 			PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
5001 			ret = -EINVAL;
5002 			break;
5003 		}
5004 		wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
5005 		break;
5006 	case RTE_TUNNEL_TYPE_TEREDO:
5007 		if (udp_tunnel->udp_port == 0) {
5008 			PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
5009 			ret = -EINVAL;
5010 			break;
5011 		}
5012 		wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
5013 		break;
5014 	default:
5015 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5016 		ret = -EINVAL;
5017 		break;
5018 	}
5019 
5020 	txgbe_flush(hw);
5021 
5022 	return ret;
5023 }
5024 
5025 /* Remove UDP tunneling port */
5026 static int
5027 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
5028 			      struct rte_eth_udp_tunnel *udp_tunnel)
5029 {
5030 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5031 	int ret = 0;
5032 	uint16_t cur_port;
5033 
5034 	if (udp_tunnel == NULL)
5035 		return -EINVAL;
5036 
5037 	switch (udp_tunnel->prot_type) {
5038 	case RTE_TUNNEL_TYPE_VXLAN:
5039 		cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
5040 		if (cur_port != udp_tunnel->udp_port) {
5041 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5042 					udp_tunnel->udp_port);
5043 			ret = -EINVAL;
5044 			break;
5045 		}
5046 		wr32(hw, TXGBE_VXLANPORT, 0);
5047 		wr32(hw, TXGBE_VXLANPORTGPE, 0);
5048 		break;
5049 	case RTE_TUNNEL_TYPE_GENEVE:
5050 		cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
5051 		if (cur_port != udp_tunnel->udp_port) {
5052 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5053 					udp_tunnel->udp_port);
5054 			ret = -EINVAL;
5055 			break;
5056 		}
5057 		wr32(hw, TXGBE_GENEVEPORT, 0);
5058 		break;
5059 	case RTE_TUNNEL_TYPE_TEREDO:
5060 		cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
5061 		if (cur_port != udp_tunnel->udp_port) {
5062 			PMD_DRV_LOG(ERR, "Port %u does not exist.",
5063 					udp_tunnel->udp_port);
5064 			ret = -EINVAL;
5065 			break;
5066 		}
5067 		wr32(hw, TXGBE_TEREDOPORT, 0);
5068 		break;
5069 	default:
5070 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
5071 		ret = -EINVAL;
5072 		break;
5073 	}
5074 
5075 	txgbe_flush(hw);
5076 
5077 	return ret;
5078 }
5079 
5080 /* restore n-tuple filter */
5081 static inline void
5082 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
5083 {
5084 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5085 	struct txgbe_5tuple_filter *node;
5086 
5087 	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
5088 		txgbe_inject_5tuple_filter(dev, node);
5089 	}
5090 }
5091 
5092 /* restore ethernet type filter */
5093 static inline void
5094 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
5095 {
5096 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5097 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5098 	int i;
5099 
5100 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5101 		if (filter_info->ethertype_mask & (1 << i)) {
5102 			wr32(hw, TXGBE_ETFLT(i),
5103 					filter_info->ethertype_filters[i].etqf);
5104 			wr32(hw, TXGBE_ETCLS(i),
5105 					filter_info->ethertype_filters[i].etqs);
5106 			txgbe_flush(hw);
5107 		}
5108 	}
5109 }
5110 
5111 /* restore SYN filter */
5112 static inline void
5113 txgbe_syn_filter_restore(struct rte_eth_dev *dev)
5114 {
5115 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5116 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5117 	uint32_t synqf;
5118 
5119 	synqf = filter_info->syn_info;
5120 
5121 	if (synqf & TXGBE_SYNCLS_ENA) {
5122 		wr32(hw, TXGBE_SYNCLS, synqf);
5123 		txgbe_flush(hw);
5124 	}
5125 }
5126 
5127 /* restore L2 tunnel filter */
5128 static inline void
5129 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
5130 {
5131 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5132 	struct txgbe_l2_tn_filter *node;
5133 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5134 
5135 	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
5136 		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
5137 		l2_tn_conf.tunnel_id      = node->key.tn_id;
5138 		l2_tn_conf.pool           = node->pool;
5139 		(void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
5140 	}
5141 }
5142 
5143 /* restore rss filter */
5144 static inline void
5145 txgbe_rss_filter_restore(struct rte_eth_dev *dev)
5146 {
5147 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5148 
5149 	if (filter_info->rss_info.conf.queue_num)
5150 		txgbe_config_rss_filter(dev,
5151 			&filter_info->rss_info, TRUE);
5152 }
5153 
5154 static int
5155 txgbe_filter_restore(struct rte_eth_dev *dev)
5156 {
5157 	txgbe_ntuple_filter_restore(dev);
5158 	txgbe_ethertype_filter_restore(dev);
5159 	txgbe_syn_filter_restore(dev);
5160 	txgbe_fdir_filter_restore(dev);
5161 	txgbe_l2_tn_filter_restore(dev);
5162 	txgbe_rss_filter_restore(dev);
5163 
5164 	return 0;
5165 }
5166 
5167 static void
5168 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
5169 {
5170 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5171 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5172 
5173 	if (l2_tn_info->e_tag_en)
5174 		(void)txgbe_e_tag_enable(hw);
5175 
5176 	if (l2_tn_info->e_tag_fwd_en)
5177 		(void)txgbe_e_tag_forwarding_en_dis(dev, 1);
5178 
5179 	(void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
5180 }
5181 
5182 /* remove all the n-tuple filters */
5183 void
5184 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
5185 {
5186 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5187 	struct txgbe_5tuple_filter *p_5tuple;
5188 
5189 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
5190 		txgbe_remove_5tuple_filter(dev, p_5tuple);
5191 }
5192 
5193 /* remove all the ether type filters */
5194 void
5195 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
5196 {
5197 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5198 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5199 	int i;
5200 
5201 	for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
5202 		if (filter_info->ethertype_mask & (1 << i) &&
5203 		    !filter_info->ethertype_filters[i].conf) {
5204 			(void)txgbe_ethertype_filter_remove(filter_info,
5205 							    (uint8_t)i);
5206 			wr32(hw, TXGBE_ETFLT(i), 0);
5207 			wr32(hw, TXGBE_ETCLS(i), 0);
5208 			txgbe_flush(hw);
5209 		}
5210 	}
5211 }
5212 
5213 /* remove the SYN filter */
5214 void
5215 txgbe_clear_syn_filter(struct rte_eth_dev *dev)
5216 {
5217 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
5218 	struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5219 
5220 	if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
5221 		filter_info->syn_info = 0;
5222 
5223 		wr32(hw, TXGBE_SYNCLS, 0);
5224 		txgbe_flush(hw);
5225 	}
5226 }
5227 
5228 /* remove all the L2 tunnel filters */
5229 int
5230 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
5231 {
5232 	struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
5233 	struct txgbe_l2_tn_filter *l2_tn_filter;
5234 	struct txgbe_l2_tunnel_conf l2_tn_conf;
5235 	int ret = 0;
5236 
5237 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
5238 		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
5239 		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
5240 		l2_tn_conf.pool           = l2_tn_filter->pool;
5241 		ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
5242 		if (ret < 0)
5243 			return ret;
5244 	}
5245 
5246 	return 0;
5247 }
5248 
5249 static const struct eth_dev_ops txgbe_eth_dev_ops = {
5250 	.dev_configure              = txgbe_dev_configure,
5251 	.dev_infos_get              = txgbe_dev_info_get,
5252 	.dev_start                  = txgbe_dev_start,
5253 	.dev_stop                   = txgbe_dev_stop,
5254 	.dev_set_link_up            = txgbe_dev_set_link_up,
5255 	.dev_set_link_down          = txgbe_dev_set_link_down,
5256 	.dev_close                  = txgbe_dev_close,
5257 	.dev_reset                  = txgbe_dev_reset,
5258 	.promiscuous_enable         = txgbe_dev_promiscuous_enable,
5259 	.promiscuous_disable        = txgbe_dev_promiscuous_disable,
5260 	.allmulticast_enable        = txgbe_dev_allmulticast_enable,
5261 	.allmulticast_disable       = txgbe_dev_allmulticast_disable,
5262 	.link_update                = txgbe_dev_link_update,
5263 	.stats_get                  = txgbe_dev_stats_get,
5264 	.xstats_get                 = txgbe_dev_xstats_get,
5265 	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
5266 	.stats_reset                = txgbe_dev_stats_reset,
5267 	.xstats_reset               = txgbe_dev_xstats_reset,
5268 	.xstats_get_names           = txgbe_dev_xstats_get_names,
5269 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
5270 	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
5271 	.fw_version_get             = txgbe_fw_version_get,
5272 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
5273 	.mtu_set                    = txgbe_dev_mtu_set,
5274 	.vlan_filter_set            = txgbe_vlan_filter_set,
5275 	.vlan_tpid_set              = txgbe_vlan_tpid_set,
5276 	.vlan_offload_set           = txgbe_vlan_offload_set,
5277 	.vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
5278 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
5279 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
5280 	.tx_queue_start	            = txgbe_dev_tx_queue_start,
5281 	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
5282 	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
5283 	.rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
5284 	.rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
5285 	.rx_queue_release           = txgbe_dev_rx_queue_release,
5286 	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
5287 	.tx_queue_release           = txgbe_dev_tx_queue_release,
5288 	.dev_led_on                 = txgbe_dev_led_on,
5289 	.dev_led_off                = txgbe_dev_led_off,
5290 	.flow_ctrl_get              = txgbe_flow_ctrl_get,
5291 	.flow_ctrl_set              = txgbe_flow_ctrl_set,
5292 	.priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
5293 	.mac_addr_add               = txgbe_add_rar,
5294 	.mac_addr_remove            = txgbe_remove_rar,
5295 	.mac_addr_set               = txgbe_set_default_mac_addr,
5296 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
5297 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
5298 	.set_queue_rate_limit       = txgbe_set_queue_rate_limit,
5299 	.reta_update                = txgbe_dev_rss_reta_update,
5300 	.reta_query                 = txgbe_dev_rss_reta_query,
5301 	.rss_hash_update            = txgbe_dev_rss_hash_update,
5302 	.rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
5303 	.flow_ops_get               = txgbe_dev_flow_ops_get,
5304 	.set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
5305 	.rxq_info_get               = txgbe_rxq_info_get,
5306 	.txq_info_get               = txgbe_txq_info_get,
5307 	.timesync_enable            = txgbe_timesync_enable,
5308 	.timesync_disable           = txgbe_timesync_disable,
5309 	.timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
5310 	.timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
5311 	.get_reg                    = txgbe_get_regs,
5312 	.get_eeprom_length          = txgbe_get_eeprom_length,
5313 	.get_eeprom                 = txgbe_get_eeprom,
5314 	.set_eeprom                 = txgbe_set_eeprom,
5315 	.get_module_info            = txgbe_get_module_info,
5316 	.get_module_eeprom          = txgbe_get_module_eeprom,
5317 	.get_dcb_info               = txgbe_dev_get_dcb_info,
5318 	.timesync_adjust_time       = txgbe_timesync_adjust_time,
5319 	.timesync_read_time         = txgbe_timesync_read_time,
5320 	.timesync_write_time        = txgbe_timesync_write_time,
5321 	.udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
5322 	.udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
5323 	.tm_ops_get                 = txgbe_tm_ops_get,
5324 	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
5325 };
5326 
5327 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
5328 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
5329 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
5330 RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
5331 			      TXGBE_DEVARG_BP_AUTO "=<0|1>"
5332 			      TXGBE_DEVARG_KR_POLL "=<0|1>"
5333 			      TXGBE_DEVARG_KR_PRESENT "=<0|1>"
5334 			      TXGBE_DEVARG_KX_SGMII "=<0|1>"
5335 			      TXGBE_DEVARG_FFE_SET "=<0-4>"
5336 			      TXGBE_DEVARG_FFE_MAIN "=<uint16>"
5337 			      TXGBE_DEVARG_FFE_PRE "=<uint16>"
5338 			      TXGBE_DEVARG_FFE_POST "=<uint16>");
5339 
5340 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
5341 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
5342 RTE_LOG_REGISTER(txgbe_logtype_bp, pmd.net.txgbe.bp, NOTICE);
5343 
5344 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
5345 	RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
5346 #endif
5347 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
5348 	RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
5349 #endif
5350 
5351 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
5352 	RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
5353 #endif
5354