xref: /f-stack/dpdk/drivers/net/txgbe/txgbe_ethdev.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4 
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11 
12 #include <rte_interrupts.h>
13 #include <rte_log.h>
14 #include <rte_debug.h>
15 #include <rte_pci.h>
16 #include <rte_memory.h>
17 #include <rte_eal.h>
18 #include <rte_alarm.h>
19 
20 #include "txgbe_logs.h"
21 #include "base/txgbe.h"
22 #include "txgbe_ethdev.h"
23 #include "txgbe_rxtx.h"
24 #include "txgbe_regs_group.h"
25 
26 static const struct reg_info txgbe_regs_general[] = {
27 	{TXGBE_RST, 1, 1, "TXGBE_RST"},
28 	{TXGBE_STAT, 1, 1, "TXGBE_STAT"},
29 	{TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
30 	{TXGBE_SDP, 1, 1, "TXGBE_SDP"},
31 	{TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
32 	{TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
33 	{0, 0, 0, ""}
34 };
35 
36 static const struct reg_info txgbe_regs_nvm[] = {
37 	{0, 0, 0, ""}
38 };
39 
40 static const struct reg_info txgbe_regs_interrupt[] = {
41 	{0, 0, 0, ""}
42 };
43 
44 static const struct reg_info txgbe_regs_fctl_others[] = {
45 	{0, 0, 0, ""}
46 };
47 
48 static const struct reg_info txgbe_regs_rxdma[] = {
49 	{0, 0, 0, ""}
50 };
51 
52 static const struct reg_info txgbe_regs_rx[] = {
53 	{0, 0, 0, ""}
54 };
55 
56 static struct reg_info txgbe_regs_tx[] = {
57 	{0, 0, 0, ""}
58 };
59 
60 static const struct reg_info txgbe_regs_wakeup[] = {
61 	{0, 0, 0, ""}
62 };
63 
64 static const struct reg_info txgbe_regs_dcb[] = {
65 	{0, 0, 0, ""}
66 };
67 
68 static const struct reg_info txgbe_regs_mac[] = {
69 	{0, 0, 0, ""}
70 };
71 
72 static const struct reg_info txgbe_regs_diagnostic[] = {
73 	{0, 0, 0, ""},
74 };
75 
76 /* PF registers */
77 static const struct reg_info *txgbe_regs_others[] = {
78 				txgbe_regs_general,
79 				txgbe_regs_nvm,
80 				txgbe_regs_interrupt,
81 				txgbe_regs_fctl_others,
82 				txgbe_regs_rxdma,
83 				txgbe_regs_rx,
84 				txgbe_regs_tx,
85 				txgbe_regs_wakeup,
86 				txgbe_regs_dcb,
87 				txgbe_regs_mac,
88 				txgbe_regs_diagnostic,
89 				NULL};
90 
91 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
92 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
93 static int txgbe_dev_close(struct rte_eth_dev *dev);
94 static int txgbe_dev_link_update(struct rte_eth_dev *dev,
95 				int wait_to_complete);
96 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
97 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
98 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
99 					uint16_t queue);
100 
101 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
102 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
103 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
104 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
105 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
106 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
107 				      struct rte_intr_handle *handle);
108 static void txgbe_dev_interrupt_handler(void *param);
109 static void txgbe_dev_interrupt_delayed_handler(void *param);
110 static void txgbe_configure_msix(struct rte_eth_dev *dev);
111 
112 #define TXGBE_SET_HWSTRIP(h, q) do {\
113 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
114 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
115 		(h)->bitmap[idx] |= 1 << bit;\
116 	} while (0)
117 
118 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\
119 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
120 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
121 		(h)->bitmap[idx] &= ~(1 << bit);\
122 	} while (0)
123 
124 #define TXGBE_GET_HWSTRIP(h, q, r) do {\
125 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
126 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
127 		(r) = (h)->bitmap[idx] >> bit & 1;\
128 	} while (0)
129 
130 /*
131  * The set of PCI devices this driver supports
132  */
133 static const struct rte_pci_id pci_id_txgbe_map[] = {
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
136 	{ .vendor_id = 0, /* sentinel */ },
137 };
138 
139 static const struct rte_eth_desc_lim rx_desc_lim = {
140 	.nb_max = TXGBE_RING_DESC_MAX,
141 	.nb_min = TXGBE_RING_DESC_MIN,
142 	.nb_align = TXGBE_RXD_ALIGN,
143 };
144 
145 static const struct rte_eth_desc_lim tx_desc_lim = {
146 	.nb_max = TXGBE_RING_DESC_MAX,
147 	.nb_min = TXGBE_RING_DESC_MIN,
148 	.nb_align = TXGBE_TXD_ALIGN,
149 	.nb_seg_max = TXGBE_TX_MAX_SEG,
150 	.nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
151 };
152 
153 static const struct eth_dev_ops txgbe_eth_dev_ops;
154 
155 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)}
156 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)}
157 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = {
158 	/* MNG RxTx */
159 	HW_XSTAT(mng_bmc2host_packets),
160 	HW_XSTAT(mng_host2bmc_packets),
161 	/* Basic RxTx */
162 	HW_XSTAT(rx_packets),
163 	HW_XSTAT(tx_packets),
164 	HW_XSTAT(rx_bytes),
165 	HW_XSTAT(tx_bytes),
166 	HW_XSTAT(rx_total_bytes),
167 	HW_XSTAT(rx_total_packets),
168 	HW_XSTAT(tx_total_packets),
169 	HW_XSTAT(rx_total_missed_packets),
170 	HW_XSTAT(rx_broadcast_packets),
171 	HW_XSTAT(rx_multicast_packets),
172 	HW_XSTAT(rx_management_packets),
173 	HW_XSTAT(tx_management_packets),
174 	HW_XSTAT(rx_management_dropped),
175 
176 	/* Basic Error */
177 	HW_XSTAT(rx_crc_errors),
178 	HW_XSTAT(rx_illegal_byte_errors),
179 	HW_XSTAT(rx_error_bytes),
180 	HW_XSTAT(rx_mac_short_packet_dropped),
181 	HW_XSTAT(rx_length_errors),
182 	HW_XSTAT(rx_undersize_errors),
183 	HW_XSTAT(rx_fragment_errors),
184 	HW_XSTAT(rx_oversize_errors),
185 	HW_XSTAT(rx_jabber_errors),
186 	HW_XSTAT(rx_l3_l4_xsum_error),
187 	HW_XSTAT(mac_local_errors),
188 	HW_XSTAT(mac_remote_errors),
189 
190 	/* Flow Director */
191 	HW_XSTAT(flow_director_added_filters),
192 	HW_XSTAT(flow_director_removed_filters),
193 	HW_XSTAT(flow_director_filter_add_errors),
194 	HW_XSTAT(flow_director_filter_remove_errors),
195 	HW_XSTAT(flow_director_matched_filters),
196 	HW_XSTAT(flow_director_missed_filters),
197 
198 	/* FCoE */
199 	HW_XSTAT(rx_fcoe_crc_errors),
200 	HW_XSTAT(rx_fcoe_mbuf_allocation_errors),
201 	HW_XSTAT(rx_fcoe_dropped),
202 	HW_XSTAT(rx_fcoe_packets),
203 	HW_XSTAT(tx_fcoe_packets),
204 	HW_XSTAT(rx_fcoe_bytes),
205 	HW_XSTAT(tx_fcoe_bytes),
206 	HW_XSTAT(rx_fcoe_no_ddp),
207 	HW_XSTAT(rx_fcoe_no_ddp_ext_buff),
208 
209 	/* MACSEC */
210 	HW_XSTAT(tx_macsec_pkts_untagged),
211 	HW_XSTAT(tx_macsec_pkts_encrypted),
212 	HW_XSTAT(tx_macsec_pkts_protected),
213 	HW_XSTAT(tx_macsec_octets_encrypted),
214 	HW_XSTAT(tx_macsec_octets_protected),
215 	HW_XSTAT(rx_macsec_pkts_untagged),
216 	HW_XSTAT(rx_macsec_pkts_badtag),
217 	HW_XSTAT(rx_macsec_pkts_nosci),
218 	HW_XSTAT(rx_macsec_pkts_unknownsci),
219 	HW_XSTAT(rx_macsec_octets_decrypted),
220 	HW_XSTAT(rx_macsec_octets_validated),
221 	HW_XSTAT(rx_macsec_sc_pkts_unchecked),
222 	HW_XSTAT(rx_macsec_sc_pkts_delayed),
223 	HW_XSTAT(rx_macsec_sc_pkts_late),
224 	HW_XSTAT(rx_macsec_sa_pkts_ok),
225 	HW_XSTAT(rx_macsec_sa_pkts_invalid),
226 	HW_XSTAT(rx_macsec_sa_pkts_notvalid),
227 	HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
228 	HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
229 
230 	/* MAC RxTx */
231 	HW_XSTAT(rx_size_64_packets),
232 	HW_XSTAT(rx_size_65_to_127_packets),
233 	HW_XSTAT(rx_size_128_to_255_packets),
234 	HW_XSTAT(rx_size_256_to_511_packets),
235 	HW_XSTAT(rx_size_512_to_1023_packets),
236 	HW_XSTAT(rx_size_1024_to_max_packets),
237 	HW_XSTAT(tx_size_64_packets),
238 	HW_XSTAT(tx_size_65_to_127_packets),
239 	HW_XSTAT(tx_size_128_to_255_packets),
240 	HW_XSTAT(tx_size_256_to_511_packets),
241 	HW_XSTAT(tx_size_512_to_1023_packets),
242 	HW_XSTAT(tx_size_1024_to_max_packets),
243 
244 	/* Flow Control */
245 	HW_XSTAT(tx_xon_packets),
246 	HW_XSTAT(rx_xon_packets),
247 	HW_XSTAT(tx_xoff_packets),
248 	HW_XSTAT(rx_xoff_packets),
249 
250 	HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
251 	HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
252 	HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
253 	HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
254 };
255 
256 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \
257 			   sizeof(rte_txgbe_stats_strings[0]))
258 
259 /* Per-priority statistics */
260 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)}
261 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = {
262 	UP_XSTAT(rx_up_packets),
263 	UP_XSTAT(tx_up_packets),
264 	UP_XSTAT(rx_up_bytes),
265 	UP_XSTAT(tx_up_bytes),
266 	UP_XSTAT(rx_up_drop_packets),
267 
268 	UP_XSTAT(tx_up_xon_packets),
269 	UP_XSTAT(rx_up_xon_packets),
270 	UP_XSTAT(tx_up_xoff_packets),
271 	UP_XSTAT(rx_up_xoff_packets),
272 	UP_XSTAT(rx_up_dropped),
273 	UP_XSTAT(rx_up_mbuf_alloc_errors),
274 	UP_XSTAT(tx_up_xon2off_packets),
275 };
276 
277 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \
278 			   sizeof(rte_txgbe_up_strings[0]))
279 
280 /* Per-queue statistics */
281 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)}
282 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = {
283 	QP_XSTAT(rx_qp_packets),
284 	QP_XSTAT(tx_qp_packets),
285 	QP_XSTAT(rx_qp_bytes),
286 	QP_XSTAT(tx_qp_bytes),
287 	QP_XSTAT(rx_qp_mc_packets),
288 };
289 
290 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \
291 			   sizeof(rte_txgbe_qp_strings[0]))
292 
293 static inline int
txgbe_is_sfp(struct txgbe_hw * hw)294 txgbe_is_sfp(struct txgbe_hw *hw)
295 {
296 	switch (hw->phy.type) {
297 	case txgbe_phy_sfp_avago:
298 	case txgbe_phy_sfp_ftl:
299 	case txgbe_phy_sfp_intel:
300 	case txgbe_phy_sfp_unknown:
301 	case txgbe_phy_sfp_tyco_passive:
302 	case txgbe_phy_sfp_unknown_passive:
303 		return 1;
304 	default:
305 		return 0;
306 	}
307 }
308 
309 static inline int32_t
txgbe_pf_reset_hw(struct txgbe_hw * hw)310 txgbe_pf_reset_hw(struct txgbe_hw *hw)
311 {
312 	uint32_t ctrl_ext;
313 	int32_t status;
314 
315 	status = hw->mac.reset_hw(hw);
316 
317 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
318 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
319 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
320 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
321 	txgbe_flush(hw);
322 
323 	if (status == TXGBE_ERR_SFP_NOT_PRESENT)
324 		status = 0;
325 	return status;
326 }
327 
328 static inline void
txgbe_enable_intr(struct rte_eth_dev * dev)329 txgbe_enable_intr(struct rte_eth_dev *dev)
330 {
331 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
332 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
333 
334 	wr32(hw, TXGBE_IENMISC, intr->mask_misc);
335 	wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK);
336 	wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK);
337 	txgbe_flush(hw);
338 }
339 
340 static void
txgbe_disable_intr(struct txgbe_hw * hw)341 txgbe_disable_intr(struct txgbe_hw *hw)
342 {
343 	PMD_INIT_FUNC_TRACE();
344 
345 	wr32(hw, TXGBE_IENMISC, ~BIT_MASK32);
346 	wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK);
347 	wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK);
348 	txgbe_flush(hw);
349 }
350 
351 static int
txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev * eth_dev,uint16_t queue_id,uint8_t stat_idx,uint8_t is_rx)352 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
353 				  uint16_t queue_id,
354 				  uint8_t stat_idx,
355 				  uint8_t is_rx)
356 {
357 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
358 	struct txgbe_stat_mappings *stat_mappings =
359 		TXGBE_DEV_STAT_MAPPINGS(eth_dev);
360 	uint32_t qsmr_mask = 0;
361 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
362 	uint32_t q_map;
363 	uint8_t n, offset;
364 
365 	if (hw->mac.type != txgbe_mac_raptor)
366 		return -ENOSYS;
367 
368 	if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK)
369 		return -EIO;
370 
371 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
372 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
373 		     queue_id, stat_idx);
374 
375 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
376 	if (n >= TXGBE_NB_STAT_MAPPING) {
377 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
378 		return -EIO;
379 	}
380 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
381 
382 	/* Now clear any previous stat_idx set */
383 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
384 	if (!is_rx)
385 		stat_mappings->tqsm[n] &= ~clearing_mask;
386 	else
387 		stat_mappings->rqsm[n] &= ~clearing_mask;
388 
389 	q_map = (uint32_t)stat_idx;
390 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
391 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
392 	if (!is_rx)
393 		stat_mappings->tqsm[n] |= qsmr_mask;
394 	else
395 		stat_mappings->rqsm[n] |= qsmr_mask;
396 
397 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
398 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
399 		     queue_id, stat_idx);
400 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
401 		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
402 	return 0;
403 }
404 
405 static void
txgbe_dcb_init(struct txgbe_hw * hw,struct txgbe_dcb_config * dcb_config)406 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
407 {
408 	int i;
409 	u8 bwgp;
410 	struct txgbe_dcb_tc_config *tc;
411 
412 	UNREFERENCED_PARAMETER(hw);
413 
414 	dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
415 	dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
416 	bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
417 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
418 		tc = &dcb_config->tc_config[i];
419 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
420 		tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
421 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
422 		tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
423 		tc->pfc = txgbe_dcb_pfc_disabled;
424 	}
425 
426 	/* Initialize default user to priority mapping, UPx->TC0 */
427 	tc = &dcb_config->tc_config[0];
428 	tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
429 	tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
430 	for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
431 		dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
432 		dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
433 	}
434 	dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
435 	dcb_config->pfc_mode_enable = false;
436 	dcb_config->vt_mode = true;
437 	dcb_config->round_robin_enable = false;
438 	/* support all DCB capabilities */
439 	dcb_config->support.capabilities = 0xFF;
440 }
441 
442 /*
443  * Ensure that all locks are released before first NVM or PHY access
444  */
445 static void
txgbe_swfw_lock_reset(struct txgbe_hw * hw)446 txgbe_swfw_lock_reset(struct txgbe_hw *hw)
447 {
448 	uint16_t mask;
449 
450 	/*
451 	 * These ones are more tricky since they are common to all ports; but
452 	 * swfw_sync retries last long enough (1s) to be almost sure that if
453 	 * lock can not be taken it is due to an improper lock of the
454 	 * semaphore.
455 	 */
456 	mask = TXGBE_MNGSEM_SWPHY |
457 	       TXGBE_MNGSEM_SWMBX |
458 	       TXGBE_MNGSEM_SWFLASH;
459 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
460 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
461 
462 	hw->mac.release_swfw_sync(hw, mask);
463 }
464 
465 static int
eth_txgbe_dev_init(struct rte_eth_dev * eth_dev,void * init_params __rte_unused)466 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
467 {
468 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
469 	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
470 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
471 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
472 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
473 	struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
474 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
475 	const struct rte_memzone *mz;
476 	uint32_t ctrl_ext;
477 	uint16_t csum;
478 	int err, i, ret;
479 
480 	PMD_INIT_FUNC_TRACE();
481 
482 	eth_dev->dev_ops = &txgbe_eth_dev_ops;
483 	eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
484 	eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
485 	eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
486 	eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
487 	eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
488 	eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
489 
490 	/*
491 	 * For secondary processes, we don't initialise any further as primary
492 	 * has already done this work. Only check we don't need a different
493 	 * RX and TX function.
494 	 */
495 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
496 		struct txgbe_tx_queue *txq;
497 		/* TX queue function in primary, set by last queue initialized
498 		 * Tx queue may not initialized by primary process
499 		 */
500 		if (eth_dev->data->tx_queues) {
501 			uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
502 			txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
503 			txgbe_set_tx_function(eth_dev, txq);
504 		} else {
505 			/* Use default TX function if we get here */
506 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
507 				     "Using default TX function.");
508 		}
509 
510 		txgbe_set_rx_function(eth_dev);
511 
512 		return 0;
513 	}
514 
515 	rte_eth_copy_pci_info(eth_dev, pci_dev);
516 
517 	/* Vendor and Device ID need to be set before init of shared code */
518 	hw->device_id = pci_dev->id.device_id;
519 	hw->vendor_id = pci_dev->id.vendor_id;
520 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
521 	hw->allow_unsupported_sfp = 1;
522 
523 	/* Reserve memory for interrupt status block */
524 	mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
525 		16, TXGBE_ALIGN, SOCKET_ID_ANY);
526 	if (mz == NULL)
527 		return -ENOMEM;
528 
529 	hw->isb_dma = TMZ_PADDR(mz);
530 	hw->isb_mem = TMZ_VADDR(mz);
531 
532 	/* Initialize the shared code (base driver) */
533 	err = txgbe_init_shared_code(hw);
534 	if (err != 0) {
535 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
536 		return -EIO;
537 	}
538 
539 	/* Unlock any pending hardware semaphore */
540 	txgbe_swfw_lock_reset(hw);
541 
542 	/* Initialize DCB configuration*/
543 	memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
544 	txgbe_dcb_init(hw, dcb_config);
545 
546 	/* Get Hardware Flow Control setting */
547 	hw->fc.requested_mode = txgbe_fc_full;
548 	hw->fc.current_mode = txgbe_fc_full;
549 	hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
550 	for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
551 		hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
552 		hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
553 	}
554 	hw->fc.send_xon = 1;
555 
556 	err = hw->rom.init_params(hw);
557 	if (err != 0) {
558 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
559 		return -EIO;
560 	}
561 
562 	/* Make sure we have a good EEPROM before we read from it */
563 	err = hw->rom.validate_checksum(hw, &csum);
564 	if (err != 0) {
565 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
566 		return -EIO;
567 	}
568 
569 	err = hw->mac.init_hw(hw);
570 
571 	/*
572 	 * Devices with copper phys will fail to initialise if txgbe_init_hw()
573 	 * is called too soon after the kernel driver unbinding/binding occurs.
574 	 * The failure occurs in txgbe_identify_phy() for all devices,
575 	 * but for non-copper devies, txgbe_identify_sfp_module() is
576 	 * also called. See txgbe_identify_phy(). The reason for the
577 	 * failure is not known, and only occuts when virtualisation features
578 	 * are disabled in the bios. A delay of 200ms  was found to be enough by
579 	 * trial-and-error, and is doubled to be safe.
580 	 */
581 	if (err && hw->phy.media_type == txgbe_media_type_copper) {
582 		rte_delay_ms(200);
583 		err = hw->mac.init_hw(hw);
584 	}
585 
586 	if (err == TXGBE_ERR_SFP_NOT_PRESENT)
587 		err = 0;
588 
589 	if (err == TXGBE_ERR_EEPROM_VERSION) {
590 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
591 			     "LOM.  Please be aware there may be issues associated "
592 			     "with your hardware.");
593 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
594 			     "please contact your hardware representative "
595 			     "who provided you with this hardware.");
596 	} else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
597 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
598 	}
599 	if (err) {
600 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
601 		return -EIO;
602 	}
603 
604 	/* Reset the hw statistics */
605 	txgbe_dev_stats_reset(eth_dev);
606 
607 	/* disable interrupt */
608 	txgbe_disable_intr(hw);
609 
610 	/* Allocate memory for storing MAC addresses */
611 	eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
612 					       hw->mac.num_rar_entries, 0);
613 	if (eth_dev->data->mac_addrs == NULL) {
614 		PMD_INIT_LOG(ERR,
615 			     "Failed to allocate %u bytes needed to store "
616 			     "MAC addresses",
617 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
618 		return -ENOMEM;
619 	}
620 
621 	/* Copy the permanent MAC address */
622 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
623 			&eth_dev->data->mac_addrs[0]);
624 
625 	/* Allocate memory for storing hash filter MAC addresses */
626 	eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
627 			RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
628 	if (eth_dev->data->hash_mac_addrs == NULL) {
629 		PMD_INIT_LOG(ERR,
630 			     "Failed to allocate %d bytes needed to store MAC addresses",
631 			     RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
632 		return -ENOMEM;
633 	}
634 
635 	/* initialize the vfta */
636 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
637 
638 	/* initialize the hw strip bitmap*/
639 	memset(hwstrip, 0, sizeof(*hwstrip));
640 
641 	/* initialize PF if max_vfs not zero */
642 	ret = txgbe_pf_host_init(eth_dev);
643 	if (ret) {
644 		rte_free(eth_dev->data->mac_addrs);
645 		eth_dev->data->mac_addrs = NULL;
646 		rte_free(eth_dev->data->hash_mac_addrs);
647 		eth_dev->data->hash_mac_addrs = NULL;
648 		return ret;
649 	}
650 
651 	ctrl_ext = rd32(hw, TXGBE_PORTCTL);
652 	/* let hardware know driver is loaded */
653 	ctrl_ext |= TXGBE_PORTCTL_DRVLOAD;
654 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
655 	ctrl_ext |= TXGBE_PORTCTL_RSTDONE;
656 	wr32(hw, TXGBE_PORTCTL, ctrl_ext);
657 	txgbe_flush(hw);
658 
659 	if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
660 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
661 			     (int)hw->mac.type, (int)hw->phy.type,
662 			     (int)hw->phy.sfp_type);
663 	else
664 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
665 			     (int)hw->mac.type, (int)hw->phy.type);
666 
667 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
668 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
669 		     pci_dev->id.device_id);
670 
671 	rte_intr_callback_register(intr_handle,
672 				   txgbe_dev_interrupt_handler, eth_dev);
673 
674 	/* enable uio/vfio intr/eventfd mapping */
675 	rte_intr_enable(intr_handle);
676 
677 	/* enable support intr */
678 	txgbe_enable_intr(eth_dev);
679 
680 	/* initialize bandwidth configuration info */
681 	memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
682 
683 	return 0;
684 }
685 
686 static int
eth_txgbe_dev_uninit(struct rte_eth_dev * eth_dev)687 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
688 {
689 	PMD_INIT_FUNC_TRACE();
690 
691 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
692 		return 0;
693 
694 	txgbe_dev_close(eth_dev);
695 
696 	return 0;
697 }
698 
699 static int
eth_txgbe_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)700 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
701 		struct rte_pci_device *pci_dev)
702 {
703 	struct rte_eth_dev *pf_ethdev;
704 	struct rte_eth_devargs eth_da;
705 	int retval;
706 
707 	if (pci_dev->device.devargs) {
708 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
709 				&eth_da);
710 		if (retval)
711 			return retval;
712 	} else {
713 		memset(&eth_da, 0, sizeof(eth_da));
714 	}
715 
716 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
717 			sizeof(struct txgbe_adapter),
718 			eth_dev_pci_specific_init, pci_dev,
719 			eth_txgbe_dev_init, NULL);
720 
721 	if (retval || eth_da.nb_representor_ports < 1)
722 		return retval;
723 
724 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
725 	if (pf_ethdev == NULL)
726 		return -ENODEV;
727 
728 	return 0;
729 }
730 
eth_txgbe_pci_remove(struct rte_pci_device * pci_dev)731 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
732 {
733 	struct rte_eth_dev *ethdev;
734 
735 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
736 	if (!ethdev)
737 		return -ENODEV;
738 
739 	return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
740 }
741 
742 static struct rte_pci_driver rte_txgbe_pmd = {
743 	.id_table = pci_id_txgbe_map,
744 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
745 		     RTE_PCI_DRV_INTR_LSC,
746 	.probe = eth_txgbe_pci_probe,
747 	.remove = eth_txgbe_pci_remove,
748 };
749 
750 static int
txgbe_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)751 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
752 {
753 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
754 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
755 	uint32_t vfta;
756 	uint32_t vid_idx;
757 	uint32_t vid_bit;
758 
759 	vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
760 	vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
761 	vfta = rd32(hw, TXGBE_VLANTBL(vid_idx));
762 	if (on)
763 		vfta |= vid_bit;
764 	else
765 		vfta &= ~vid_bit;
766 	wr32(hw, TXGBE_VLANTBL(vid_idx), vfta);
767 
768 	/* update local VFTA copy */
769 	shadow_vfta->vfta[vid_idx] = vfta;
770 
771 	return 0;
772 }
773 
774 static void
txgbe_vlan_strip_queue_set(struct rte_eth_dev * dev,uint16_t queue,int on)775 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
776 {
777 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
778 	struct txgbe_rx_queue *rxq;
779 	bool restart;
780 	uint32_t rxcfg, rxbal, rxbah;
781 
782 	if (on)
783 		txgbe_vlan_hw_strip_enable(dev, queue);
784 	else
785 		txgbe_vlan_hw_strip_disable(dev, queue);
786 
787 	rxq = dev->data->rx_queues[queue];
788 	rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
789 	rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
790 	rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
791 	if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
792 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
793 			!(rxcfg & TXGBE_RXCFG_VLAN);
794 		rxcfg |= TXGBE_RXCFG_VLAN;
795 	} else {
796 		restart = (rxcfg & TXGBE_RXCFG_ENA) &&
797 			(rxcfg & TXGBE_RXCFG_VLAN);
798 		rxcfg &= ~TXGBE_RXCFG_VLAN;
799 	}
800 	rxcfg &= ~TXGBE_RXCFG_ENA;
801 
802 	if (restart) {
803 		/* set vlan strip for ring */
804 		txgbe_dev_rx_queue_stop(dev, queue);
805 		wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal);
806 		wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah);
807 		wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg);
808 		txgbe_dev_rx_queue_start(dev, queue);
809 	}
810 }
811 
812 static int
txgbe_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)813 txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
814 		    enum rte_vlan_type vlan_type,
815 		    uint16_t tpid)
816 {
817 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
818 	int ret = 0;
819 	uint32_t portctrl, vlan_ext, qinq;
820 
821 	portctrl = rd32(hw, TXGBE_PORTCTL);
822 
823 	vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
824 	qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
825 	switch (vlan_type) {
826 	case ETH_VLAN_TYPE_INNER:
827 		if (vlan_ext) {
828 			wr32m(hw, TXGBE_VLANCTL,
829 				TXGBE_VLANCTL_TPID_MASK,
830 				TXGBE_VLANCTL_TPID(tpid));
831 			wr32m(hw, TXGBE_DMATXCTRL,
832 				TXGBE_DMATXCTRL_TPID_MASK,
833 				TXGBE_DMATXCTRL_TPID(tpid));
834 		} else {
835 			ret = -ENOTSUP;
836 			PMD_DRV_LOG(ERR, "Inner type is not supported"
837 				    " by single VLAN");
838 		}
839 
840 		if (qinq) {
841 			wr32m(hw, TXGBE_TAGTPID(0),
842 				TXGBE_TAGTPID_LSB_MASK,
843 				TXGBE_TAGTPID_LSB(tpid));
844 		}
845 		break;
846 	case ETH_VLAN_TYPE_OUTER:
847 		if (vlan_ext) {
848 			/* Only the high 16-bits is valid */
849 			wr32m(hw, TXGBE_EXTAG,
850 				TXGBE_EXTAG_VLAN_MASK,
851 				TXGBE_EXTAG_VLAN(tpid));
852 		} else {
853 			wr32m(hw, TXGBE_VLANCTL,
854 				TXGBE_VLANCTL_TPID_MASK,
855 				TXGBE_VLANCTL_TPID(tpid));
856 			wr32m(hw, TXGBE_DMATXCTRL,
857 				TXGBE_DMATXCTRL_TPID_MASK,
858 				TXGBE_DMATXCTRL_TPID(tpid));
859 		}
860 
861 		if (qinq) {
862 			wr32m(hw, TXGBE_TAGTPID(0),
863 				TXGBE_TAGTPID_MSB_MASK,
864 				TXGBE_TAGTPID_MSB(tpid));
865 		}
866 		break;
867 	default:
868 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
869 		return -EINVAL;
870 	}
871 
872 	return ret;
873 }
874 
875 void
txgbe_vlan_hw_filter_disable(struct rte_eth_dev * dev)876 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
877 {
878 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
879 	uint32_t vlnctrl;
880 
881 	PMD_INIT_FUNC_TRACE();
882 
883 	/* Filter Table Disable */
884 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
885 	vlnctrl &= ~TXGBE_VLANCTL_VFE;
886 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
887 }
888 
889 void
txgbe_vlan_hw_filter_enable(struct rte_eth_dev * dev)890 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
891 {
892 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
893 	struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
894 	uint32_t vlnctrl;
895 	uint16_t i;
896 
897 	PMD_INIT_FUNC_TRACE();
898 
899 	/* Filter Table Enable */
900 	vlnctrl = rd32(hw, TXGBE_VLANCTL);
901 	vlnctrl &= ~TXGBE_VLANCTL_CFIENA;
902 	vlnctrl |= TXGBE_VLANCTL_VFE;
903 	wr32(hw, TXGBE_VLANCTL, vlnctrl);
904 
905 	/* write whatever is in local vfta copy */
906 	for (i = 0; i < TXGBE_VFTA_SIZE; i++)
907 		wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]);
908 }
909 
910 void
txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev * dev,uint16_t queue,bool on)911 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
912 {
913 	struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev);
914 	struct txgbe_rx_queue *rxq;
915 
916 	if (queue >= TXGBE_MAX_RX_QUEUE_NUM)
917 		return;
918 
919 	if (on)
920 		TXGBE_SET_HWSTRIP(hwstrip, queue);
921 	else
922 		TXGBE_CLEAR_HWSTRIP(hwstrip, queue);
923 
924 	if (queue >= dev->data->nb_rx_queues)
925 		return;
926 
927 	rxq = dev->data->rx_queues[queue];
928 
929 	if (on) {
930 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
931 		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
932 	} else {
933 		rxq->vlan_flags = PKT_RX_VLAN;
934 		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
935 	}
936 }
937 
938 static void
txgbe_vlan_hw_strip_disable(struct rte_eth_dev * dev,uint16_t queue)939 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
940 {
941 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
942 	uint32_t ctrl;
943 
944 	PMD_INIT_FUNC_TRACE();
945 
946 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
947 	ctrl &= ~TXGBE_RXCFG_VLAN;
948 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
949 
950 	/* record those setting for HW strip per queue */
951 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
952 }
953 
954 static void
txgbe_vlan_hw_strip_enable(struct rte_eth_dev * dev,uint16_t queue)955 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
956 {
957 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
958 	uint32_t ctrl;
959 
960 	PMD_INIT_FUNC_TRACE();
961 
962 	ctrl = rd32(hw, TXGBE_RXCFG(queue));
963 	ctrl |= TXGBE_RXCFG_VLAN;
964 	wr32(hw, TXGBE_RXCFG(queue), ctrl);
965 
966 	/* record those setting for HW strip per queue */
967 	txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
968 }
969 
970 static void
txgbe_vlan_hw_extend_disable(struct rte_eth_dev * dev)971 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
972 {
973 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
974 	uint32_t ctrl;
975 
976 	PMD_INIT_FUNC_TRACE();
977 
978 	ctrl = rd32(hw, TXGBE_PORTCTL);
979 	ctrl &= ~TXGBE_PORTCTL_VLANEXT;
980 	ctrl &= ~TXGBE_PORTCTL_QINQ;
981 	wr32(hw, TXGBE_PORTCTL, ctrl);
982 }
983 
984 static void
txgbe_vlan_hw_extend_enable(struct rte_eth_dev * dev)985 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
986 {
987 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
988 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
989 	struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
990 	uint32_t ctrl;
991 
992 	PMD_INIT_FUNC_TRACE();
993 
994 	ctrl  = rd32(hw, TXGBE_PORTCTL);
995 	ctrl |= TXGBE_PORTCTL_VLANEXT;
996 	if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
997 	    txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
998 		ctrl |= TXGBE_PORTCTL_QINQ;
999 	wr32(hw, TXGBE_PORTCTL, ctrl);
1000 }
1001 
1002 void
txgbe_vlan_hw_strip_config(struct rte_eth_dev * dev)1003 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
1004 {
1005 	struct txgbe_rx_queue *rxq;
1006 	uint16_t i;
1007 
1008 	PMD_INIT_FUNC_TRACE();
1009 
1010 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1011 		rxq = dev->data->rx_queues[i];
1012 
1013 		if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1014 			txgbe_vlan_strip_queue_set(dev, i, 1);
1015 		else
1016 			txgbe_vlan_strip_queue_set(dev, i, 0);
1017 	}
1018 }
1019 
1020 void
txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev * dev,int mask)1021 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
1022 {
1023 	uint16_t i;
1024 	struct rte_eth_rxmode *rxmode;
1025 	struct txgbe_rx_queue *rxq;
1026 
1027 	if (mask & ETH_VLAN_STRIP_MASK) {
1028 		rxmode = &dev->data->dev_conf.rxmode;
1029 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1030 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1031 				rxq = dev->data->rx_queues[i];
1032 				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1033 			}
1034 		else
1035 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
1036 				rxq = dev->data->rx_queues[i];
1037 				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1038 			}
1039 	}
1040 }
1041 
1042 static int
txgbe_vlan_offload_config(struct rte_eth_dev * dev,int mask)1043 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
1044 {
1045 	struct rte_eth_rxmode *rxmode;
1046 	rxmode = &dev->data->dev_conf.rxmode;
1047 
1048 	if (mask & ETH_VLAN_STRIP_MASK)
1049 		txgbe_vlan_hw_strip_config(dev);
1050 
1051 	if (mask & ETH_VLAN_FILTER_MASK) {
1052 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1053 			txgbe_vlan_hw_filter_enable(dev);
1054 		else
1055 			txgbe_vlan_hw_filter_disable(dev);
1056 	}
1057 
1058 	if (mask & ETH_VLAN_EXTEND_MASK) {
1059 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
1060 			txgbe_vlan_hw_extend_enable(dev);
1061 		else
1062 			txgbe_vlan_hw_extend_disable(dev);
1063 	}
1064 
1065 	return 0;
1066 }
1067 
1068 static int
txgbe_vlan_offload_set(struct rte_eth_dev * dev,int mask)1069 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1070 {
1071 	txgbe_config_vlan_strip_on_all_queues(dev, mask);
1072 
1073 	txgbe_vlan_offload_config(dev, mask);
1074 
1075 	return 0;
1076 }
1077 
1078 static void
txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev * dev)1079 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1080 {
1081 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1082 	/* VLNCTL: enable vlan filtering and allow all vlan tags through */
1083 	uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
1084 
1085 	vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
1086 	wr32(hw, TXGBE_VLANCTL, vlanctrl);
1087 }
1088 
1089 static int
txgbe_check_vf_rss_rxq_num(struct rte_eth_dev * dev,uint16_t nb_rx_q)1090 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
1091 {
1092 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1093 
1094 	switch (nb_rx_q) {
1095 	case 1:
1096 	case 2:
1097 		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
1098 		break;
1099 	case 4:
1100 		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
1101 		break;
1102 	default:
1103 		return -EINVAL;
1104 	}
1105 
1106 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
1107 		TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1108 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
1109 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1110 	return 0;
1111 }
1112 
1113 static int
txgbe_check_mq_mode(struct rte_eth_dev * dev)1114 txgbe_check_mq_mode(struct rte_eth_dev *dev)
1115 {
1116 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1117 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
1118 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
1119 
1120 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1121 		/* check multi-queue mode */
1122 		switch (dev_conf->rxmode.mq_mode) {
1123 		case ETH_MQ_RX_VMDQ_DCB:
1124 			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
1125 			break;
1126 		case ETH_MQ_RX_VMDQ_DCB_RSS:
1127 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
1128 			PMD_INIT_LOG(ERR, "SRIOV active,"
1129 					" unsupported mq_mode rx %d.",
1130 					dev_conf->rxmode.mq_mode);
1131 			return -EINVAL;
1132 		case ETH_MQ_RX_RSS:
1133 		case ETH_MQ_RX_VMDQ_RSS:
1134 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
1135 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
1136 				if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
1137 					PMD_INIT_LOG(ERR, "SRIOV is active,"
1138 						" invalid queue number"
1139 						" for VMDQ RSS, allowed"
1140 						" value are 1, 2 or 4.");
1141 					return -EINVAL;
1142 				}
1143 			break;
1144 		case ETH_MQ_RX_VMDQ_ONLY:
1145 		case ETH_MQ_RX_NONE:
1146 			/* if nothing mq mode configure, use default scheme */
1147 			dev->data->dev_conf.rxmode.mq_mode =
1148 				ETH_MQ_RX_VMDQ_ONLY;
1149 			break;
1150 		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
1151 			/* SRIOV only works in VMDq enable mode */
1152 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1153 					" wrong mq_mode rx %d.",
1154 					dev_conf->rxmode.mq_mode);
1155 			return -EINVAL;
1156 		}
1157 
1158 		switch (dev_conf->txmode.mq_mode) {
1159 		case ETH_MQ_TX_VMDQ_DCB:
1160 			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
1161 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1162 			break;
1163 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
1164 			dev->data->dev_conf.txmode.mq_mode =
1165 				ETH_MQ_TX_VMDQ_ONLY;
1166 			break;
1167 		}
1168 
1169 		/* check valid queue number */
1170 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
1171 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
1172 			PMD_INIT_LOG(ERR, "SRIOV is active,"
1173 					" nb_rx_q=%d nb_tx_q=%d queue number"
1174 					" must be less than or equal to %d.",
1175 					nb_rx_q, nb_tx_q,
1176 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
1177 			return -EINVAL;
1178 		}
1179 	} else {
1180 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
1181 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
1182 					  " not supported.");
1183 			return -EINVAL;
1184 		}
1185 		/* check configuration for vmdb+dcb mode */
1186 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
1187 			const struct rte_eth_vmdq_dcb_conf *conf;
1188 
1189 			if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1190 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
1191 						TXGBE_VMDQ_DCB_NB_QUEUES);
1192 				return -EINVAL;
1193 			}
1194 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
1195 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1196 			       conf->nb_queue_pools == ETH_32_POOLS)) {
1197 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1198 						" nb_queue_pools must be %d or %d.",
1199 						ETH_16_POOLS, ETH_32_POOLS);
1200 				return -EINVAL;
1201 			}
1202 		}
1203 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1204 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
1205 
1206 			if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
1207 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
1208 						 TXGBE_VMDQ_DCB_NB_QUEUES);
1209 				return -EINVAL;
1210 			}
1211 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1212 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
1213 			       conf->nb_queue_pools == ETH_32_POOLS)) {
1214 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
1215 						" nb_queue_pools != %d and"
1216 						" nb_queue_pools != %d.",
1217 						ETH_16_POOLS, ETH_32_POOLS);
1218 				return -EINVAL;
1219 			}
1220 		}
1221 
1222 		/* For DCB mode check our configuration before we go further */
1223 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
1224 			const struct rte_eth_dcb_rx_conf *conf;
1225 
1226 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
1227 			if (!(conf->nb_tcs == ETH_4_TCS ||
1228 			       conf->nb_tcs == ETH_8_TCS)) {
1229 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1230 						" and nb_tcs != %d.",
1231 						ETH_4_TCS, ETH_8_TCS);
1232 				return -EINVAL;
1233 			}
1234 		}
1235 
1236 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
1237 			const struct rte_eth_dcb_tx_conf *conf;
1238 
1239 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
1240 			if (!(conf->nb_tcs == ETH_4_TCS ||
1241 			       conf->nb_tcs == ETH_8_TCS)) {
1242 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
1243 						" and nb_tcs != %d.",
1244 						ETH_4_TCS, ETH_8_TCS);
1245 				return -EINVAL;
1246 			}
1247 		}
1248 	}
1249 	return 0;
1250 }
1251 
1252 static int
txgbe_dev_configure(struct rte_eth_dev * dev)1253 txgbe_dev_configure(struct rte_eth_dev *dev)
1254 {
1255 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1256 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1257 	int ret;
1258 
1259 	PMD_INIT_FUNC_TRACE();
1260 
1261 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1262 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1263 
1264 	/* multiple queue mode checking */
1265 	ret  = txgbe_check_mq_mode(dev);
1266 	if (ret != 0) {
1267 		PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.",
1268 			    ret);
1269 		return ret;
1270 	}
1271 
1272 	/* set flag to update link status after init */
1273 	intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
1274 
1275 	/*
1276 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1277 	 * allocation Rx preconditions we will reset it.
1278 	 */
1279 	adapter->rx_bulk_alloc_allowed = true;
1280 
1281 	return 0;
1282 }
1283 
1284 static void
txgbe_dev_phy_intr_setup(struct rte_eth_dev * dev)1285 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
1286 {
1287 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1288 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
1289 	uint32_t gpie;
1290 
1291 	gpie = rd32(hw, TXGBE_GPIOINTEN);
1292 	gpie |= TXGBE_GPIOBIT_6;
1293 	wr32(hw, TXGBE_GPIOINTEN, gpie);
1294 	intr->mask_misc |= TXGBE_ICRMISC_GPIO;
1295 }
1296 
1297 int
txgbe_set_vf_rate_limit(struct rte_eth_dev * dev,uint16_t vf,uint16_t tx_rate,uint64_t q_msk)1298 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
1299 			uint16_t tx_rate, uint64_t q_msk)
1300 {
1301 	struct txgbe_hw *hw;
1302 	struct txgbe_vf_info *vfinfo;
1303 	struct rte_eth_link link;
1304 	uint8_t  nb_q_per_pool;
1305 	uint32_t queue_stride;
1306 	uint32_t queue_idx, idx = 0, vf_idx;
1307 	uint32_t queue_end;
1308 	uint16_t total_rate = 0;
1309 	struct rte_pci_device *pci_dev;
1310 	int ret;
1311 
1312 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1313 	ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
1314 	if (ret < 0)
1315 		return ret;
1316 
1317 	if (vf >= pci_dev->max_vfs)
1318 		return -EINVAL;
1319 
1320 	if (tx_rate > link.link_speed)
1321 		return -EINVAL;
1322 
1323 	if (q_msk == 0)
1324 		return 0;
1325 
1326 	hw = TXGBE_DEV_HW(dev);
1327 	vfinfo = *(TXGBE_DEV_VFDATA(dev));
1328 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
1329 	queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
1330 	queue_idx = vf * queue_stride;
1331 	queue_end = queue_idx + nb_q_per_pool - 1;
1332 	if (queue_end >= hw->mac.max_tx_queues)
1333 		return -EINVAL;
1334 
1335 	if (vfinfo) {
1336 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
1337 			if (vf_idx == vf)
1338 				continue;
1339 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
1340 				idx++)
1341 				total_rate += vfinfo[vf_idx].tx_rate[idx];
1342 		}
1343 	} else {
1344 		return -EINVAL;
1345 	}
1346 
1347 	/* Store tx_rate for this vf. */
1348 	for (idx = 0; idx < nb_q_per_pool; idx++) {
1349 		if (((uint64_t)0x1 << idx) & q_msk) {
1350 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
1351 				vfinfo[vf].tx_rate[idx] = tx_rate;
1352 			total_rate += tx_rate;
1353 		}
1354 	}
1355 
1356 	if (total_rate > dev->data->dev_link.link_speed) {
1357 		/* Reset stored TX rate of the VF if it causes exceed
1358 		 * link speed.
1359 		 */
1360 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
1361 		return -EINVAL;
1362 	}
1363 
1364 	/* Set ARBTXRATE of each queue/pool for vf X  */
1365 	for (; queue_idx <= queue_end; queue_idx++) {
1366 		if (0x1 & q_msk)
1367 			txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
1368 		q_msk = q_msk >> 1;
1369 	}
1370 
1371 	return 0;
1372 }
1373 
1374 /*
1375  * Configure device link speed and setup link.
1376  * It returns 0 on success.
1377  */
1378 static int
txgbe_dev_start(struct rte_eth_dev * dev)1379 txgbe_dev_start(struct rte_eth_dev *dev)
1380 {
1381 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1382 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1383 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1384 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1385 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1386 	uint32_t intr_vector = 0;
1387 	int err;
1388 	bool link_up = false, negotiate = 0;
1389 	uint32_t speed = 0;
1390 	uint32_t allowed_speeds = 0;
1391 	int mask = 0;
1392 	int status;
1393 	uint16_t vf, idx;
1394 	uint32_t *link_speeds;
1395 
1396 	PMD_INIT_FUNC_TRACE();
1397 
1398 	/* TXGBE devices don't support:
1399 	 *    - half duplex (checked afterwards for valid speeds)
1400 	 *    - fixed speed: TODO implement
1401 	 */
1402 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
1403 		PMD_INIT_LOG(ERR,
1404 		"Invalid link_speeds for port %u, fix speed not supported",
1405 				dev->data->port_id);
1406 		return -EINVAL;
1407 	}
1408 
1409 	/* Stop the link setup handler before resetting the HW. */
1410 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1411 
1412 	/* disable uio/vfio intr/eventfd mapping */
1413 	rte_intr_disable(intr_handle);
1414 
1415 	/* stop adapter */
1416 	hw->adapter_stopped = 0;
1417 	txgbe_stop_hw(hw);
1418 
1419 	/* reinitialize adapter
1420 	 * this calls reset and start
1421 	 */
1422 	hw->nb_rx_queues = dev->data->nb_rx_queues;
1423 	hw->nb_tx_queues = dev->data->nb_tx_queues;
1424 	status = txgbe_pf_reset_hw(hw);
1425 	if (status != 0)
1426 		return -1;
1427 	hw->mac.start_hw(hw);
1428 	hw->mac.get_link_status = true;
1429 
1430 	/* configure PF module if SRIOV enabled */
1431 	txgbe_pf_host_configure(dev);
1432 
1433 	txgbe_dev_phy_intr_setup(dev);
1434 
1435 	/* check and configure queue intr-vector mapping */
1436 	if ((rte_intr_cap_multiple(intr_handle) ||
1437 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
1438 	    dev->data->dev_conf.intr_conf.rxq != 0) {
1439 		intr_vector = dev->data->nb_rx_queues;
1440 		if (rte_intr_efd_enable(intr_handle, intr_vector))
1441 			return -1;
1442 	}
1443 
1444 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1445 		intr_handle->intr_vec =
1446 			rte_zmalloc("intr_vec",
1447 				    dev->data->nb_rx_queues * sizeof(int), 0);
1448 		if (intr_handle->intr_vec == NULL) {
1449 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1450 				     " intr_vec", dev->data->nb_rx_queues);
1451 			return -ENOMEM;
1452 		}
1453 	}
1454 
1455 	/* confiugre msix for sleep until rx interrupt */
1456 	txgbe_configure_msix(dev);
1457 
1458 	/* initialize transmission unit */
1459 	txgbe_dev_tx_init(dev);
1460 
1461 	/* This can fail when allocating mbufs for descriptor rings */
1462 	err = txgbe_dev_rx_init(dev);
1463 	if (err) {
1464 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1465 		goto error;
1466 	}
1467 
1468 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1469 		ETH_VLAN_EXTEND_MASK;
1470 	err = txgbe_vlan_offload_config(dev, mask);
1471 	if (err) {
1472 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1473 		goto error;
1474 	}
1475 
1476 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1477 		/* Enable vlan filtering for VMDq */
1478 		txgbe_vmdq_vlan_hw_filter_enable(dev);
1479 	}
1480 
1481 	/* Configure DCB hw */
1482 	txgbe_configure_pb(dev);
1483 	txgbe_configure_port(dev);
1484 	txgbe_configure_dcb(dev);
1485 
1486 	/* Restore vf rate limit */
1487 	if (vfinfo != NULL) {
1488 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
1489 			for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
1490 				if (vfinfo[vf].tx_rate[idx] != 0)
1491 					txgbe_set_vf_rate_limit(dev, vf,
1492 						vfinfo[vf].tx_rate[idx],
1493 						1 << idx);
1494 	}
1495 
1496 	err = txgbe_dev_rxtx_start(dev);
1497 	if (err < 0) {
1498 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
1499 		goto error;
1500 	}
1501 
1502 	/* Skip link setup if loopback mode is enabled. */
1503 	if (hw->mac.type == txgbe_mac_raptor &&
1504 	    dev->data->dev_conf.lpbk_mode)
1505 		goto skip_link_setup;
1506 
1507 	if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
1508 		err = hw->mac.setup_sfp(hw);
1509 		if (err)
1510 			goto error;
1511 	}
1512 
1513 	if (hw->phy.media_type == txgbe_media_type_copper) {
1514 		/* Turn on the copper */
1515 		hw->phy.set_phy_power(hw, true);
1516 	} else {
1517 		/* Turn on the laser */
1518 		hw->mac.enable_tx_laser(hw);
1519 	}
1520 
1521 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
1522 	if (err)
1523 		goto error;
1524 	dev->data->dev_link.link_status = link_up;
1525 
1526 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
1527 	if (err)
1528 		goto error;
1529 
1530 	allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
1531 			ETH_LINK_SPEED_10G;
1532 
1533 	link_speeds = &dev->data->dev_conf.link_speeds;
1534 	if (*link_speeds & ~allowed_speeds) {
1535 		PMD_INIT_LOG(ERR, "Invalid link setting");
1536 		goto error;
1537 	}
1538 
1539 	speed = 0x0;
1540 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
1541 		speed = (TXGBE_LINK_SPEED_100M_FULL |
1542 			 TXGBE_LINK_SPEED_1GB_FULL |
1543 			 TXGBE_LINK_SPEED_10GB_FULL);
1544 	} else {
1545 		if (*link_speeds & ETH_LINK_SPEED_10G)
1546 			speed |= TXGBE_LINK_SPEED_10GB_FULL;
1547 		if (*link_speeds & ETH_LINK_SPEED_5G)
1548 			speed |= TXGBE_LINK_SPEED_5GB_FULL;
1549 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
1550 			speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
1551 		if (*link_speeds & ETH_LINK_SPEED_1G)
1552 			speed |= TXGBE_LINK_SPEED_1GB_FULL;
1553 		if (*link_speeds & ETH_LINK_SPEED_100M)
1554 			speed |= TXGBE_LINK_SPEED_100M_FULL;
1555 	}
1556 
1557 	err = hw->mac.setup_link(hw, speed, link_up);
1558 	if (err)
1559 		goto error;
1560 
1561 skip_link_setup:
1562 
1563 	if (rte_intr_allow_others(intr_handle)) {
1564 		/* check if lsc interrupt is enabled */
1565 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1566 			txgbe_dev_lsc_interrupt_setup(dev, TRUE);
1567 		else
1568 			txgbe_dev_lsc_interrupt_setup(dev, FALSE);
1569 		txgbe_dev_macsec_interrupt_setup(dev);
1570 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
1571 	} else {
1572 		rte_intr_callback_unregister(intr_handle,
1573 					     txgbe_dev_interrupt_handler, dev);
1574 		if (dev->data->dev_conf.intr_conf.lsc != 0)
1575 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
1576 				     " no intr multiplex");
1577 	}
1578 
1579 	/* check if rxq interrupt is enabled */
1580 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1581 	    rte_intr_dp_is_en(intr_handle))
1582 		txgbe_dev_rxq_interrupt_setup(dev);
1583 
1584 	/* enable uio/vfio intr/eventfd mapping */
1585 	rte_intr_enable(intr_handle);
1586 
1587 	/* resume enabled intr since hw reset */
1588 	txgbe_enable_intr(dev);
1589 
1590 	/*
1591 	 * Update link status right before return, because it may
1592 	 * start link configuration process in a separate thread.
1593 	 */
1594 	txgbe_dev_link_update(dev, 0);
1595 
1596 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
1597 
1598 	txgbe_read_stats_registers(hw, hw_stats);
1599 	hw->offset_loaded = 1;
1600 
1601 	return 0;
1602 
1603 error:
1604 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
1605 	txgbe_dev_clear_queues(dev);
1606 	return -EIO;
1607 }
1608 
1609 /*
1610  * Stop device: disable rx and tx functions to allow for reconfiguring.
1611  */
1612 static int
txgbe_dev_stop(struct rte_eth_dev * dev)1613 txgbe_dev_stop(struct rte_eth_dev *dev)
1614 {
1615 	struct rte_eth_link link;
1616 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1617 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1618 	struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
1619 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1620 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1621 	int vf;
1622 
1623 	if (hw->adapter_stopped)
1624 		return 0;
1625 
1626 	PMD_INIT_FUNC_TRACE();
1627 
1628 	rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
1629 
1630 	/* disable interrupts */
1631 	txgbe_disable_intr(hw);
1632 
1633 	/* reset the NIC */
1634 	txgbe_pf_reset_hw(hw);
1635 	hw->adapter_stopped = 0;
1636 
1637 	/* stop adapter */
1638 	txgbe_stop_hw(hw);
1639 
1640 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
1641 		vfinfo[vf].clear_to_send = false;
1642 
1643 	if (hw->phy.media_type == txgbe_media_type_copper) {
1644 		/* Turn off the copper */
1645 		hw->phy.set_phy_power(hw, false);
1646 	} else {
1647 		/* Turn off the laser */
1648 		hw->mac.disable_tx_laser(hw);
1649 	}
1650 
1651 	txgbe_dev_clear_queues(dev);
1652 
1653 	/* Clear stored conf */
1654 	dev->data->scattered_rx = 0;
1655 	dev->data->lro = 0;
1656 
1657 	/* Clear recorded link status */
1658 	memset(&link, 0, sizeof(link));
1659 	rte_eth_linkstatus_set(dev, &link);
1660 
1661 	if (!rte_intr_allow_others(intr_handle))
1662 		/* resume to the default handler */
1663 		rte_intr_callback_register(intr_handle,
1664 					   txgbe_dev_interrupt_handler,
1665 					   (void *)dev);
1666 
1667 	/* Clean datapath event and queue/vec mapping */
1668 	rte_intr_efd_disable(intr_handle);
1669 	if (intr_handle->intr_vec != NULL) {
1670 		rte_free(intr_handle->intr_vec);
1671 		intr_handle->intr_vec = NULL;
1672 	}
1673 
1674 	adapter->rss_reta_updated = 0;
1675 	wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
1676 
1677 	hw->adapter_stopped = true;
1678 	dev->data->dev_started = 0;
1679 
1680 	return 0;
1681 }
1682 
1683 /*
1684  * Set device link up: enable tx.
1685  */
1686 static int
txgbe_dev_set_link_up(struct rte_eth_dev * dev)1687 txgbe_dev_set_link_up(struct rte_eth_dev *dev)
1688 {
1689 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1690 
1691 	if (hw->phy.media_type == txgbe_media_type_copper) {
1692 		/* Turn on the copper */
1693 		hw->phy.set_phy_power(hw, true);
1694 	} else {
1695 		/* Turn on the laser */
1696 		hw->mac.enable_tx_laser(hw);
1697 		txgbe_dev_link_update(dev, 0);
1698 	}
1699 
1700 	return 0;
1701 }
1702 
1703 /*
1704  * Set device link down: disable tx.
1705  */
1706 static int
txgbe_dev_set_link_down(struct rte_eth_dev * dev)1707 txgbe_dev_set_link_down(struct rte_eth_dev *dev)
1708 {
1709 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1710 
1711 	if (hw->phy.media_type == txgbe_media_type_copper) {
1712 		/* Turn off the copper */
1713 		hw->phy.set_phy_power(hw, false);
1714 	} else {
1715 		/* Turn off the laser */
1716 		hw->mac.disable_tx_laser(hw);
1717 		txgbe_dev_link_update(dev, 0);
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 /*
1724  * Reset and stop device.
1725  */
1726 static int
txgbe_dev_close(struct rte_eth_dev * dev)1727 txgbe_dev_close(struct rte_eth_dev *dev)
1728 {
1729 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1730 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1731 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1732 	int retries = 0;
1733 	int ret;
1734 
1735 	PMD_INIT_FUNC_TRACE();
1736 
1737 	txgbe_pf_reset_hw(hw);
1738 
1739 	ret = txgbe_dev_stop(dev);
1740 
1741 	txgbe_dev_free_queues(dev);
1742 
1743 	/* reprogram the RAR[0] in case user changed it. */
1744 	txgbe_set_rar(hw, 0, hw->mac.addr, 0, true);
1745 
1746 	/* Unlock any pending hardware semaphore */
1747 	txgbe_swfw_lock_reset(hw);
1748 
1749 	/* disable uio intr before callback unregister */
1750 	rte_intr_disable(intr_handle);
1751 
1752 	do {
1753 		ret = rte_intr_callback_unregister(intr_handle,
1754 				txgbe_dev_interrupt_handler, dev);
1755 		if (ret >= 0 || ret == -ENOENT) {
1756 			break;
1757 		} else if (ret != -EAGAIN) {
1758 			PMD_INIT_LOG(ERR,
1759 				"intr callback unregister failed: %d",
1760 				ret);
1761 		}
1762 		rte_delay_ms(100);
1763 	} while (retries++ < (10 + TXGBE_LINK_UP_TIME));
1764 
1765 	/* cancel the delay handler before remove dev */
1766 	rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev);
1767 
1768 	/* uninitialize PF if max_vfs not zero */
1769 	txgbe_pf_host_uninit(dev);
1770 
1771 	rte_free(dev->data->mac_addrs);
1772 	dev->data->mac_addrs = NULL;
1773 
1774 	rte_free(dev->data->hash_mac_addrs);
1775 	dev->data->hash_mac_addrs = NULL;
1776 
1777 	return ret;
1778 }
1779 
1780 /*
1781  * Reset PF device.
1782  */
1783 static int
txgbe_dev_reset(struct rte_eth_dev * dev)1784 txgbe_dev_reset(struct rte_eth_dev *dev)
1785 {
1786 	int ret;
1787 
1788 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
1789 	 * its VF to make them align with it. The detailed notification
1790 	 * mechanism is PMD specific. As to txgbe PF, it is rather complex.
1791 	 * To avoid unexpected behavior in VF, currently reset of PF with
1792 	 * SR-IOV activation is not supported. It might be supported later.
1793 	 */
1794 	if (dev->data->sriov.active)
1795 		return -ENOTSUP;
1796 
1797 	ret = eth_txgbe_dev_uninit(dev);
1798 	if (ret)
1799 		return ret;
1800 
1801 	ret = eth_txgbe_dev_init(dev, NULL);
1802 
1803 	return ret;
1804 }
1805 
1806 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter)     \
1807 	{                                                       \
1808 		uint32_t current_counter = rd32(hw, reg);       \
1809 		if (current_counter < last_counter)             \
1810 			current_counter += 0x100000000LL;       \
1811 		if (!hw->offset_loaded)                         \
1812 			last_counter = current_counter;         \
1813 		counter = current_counter - last_counter;       \
1814 		counter &= 0xFFFFFFFFLL;                        \
1815 	}
1816 
1817 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1818 	{                                                                \
1819 		uint64_t current_counter_lsb = rd32(hw, reg_lsb);        \
1820 		uint64_t current_counter_msb = rd32(hw, reg_msb);        \
1821 		uint64_t current_counter = (current_counter_msb << 32) | \
1822 			current_counter_lsb;                             \
1823 		if (current_counter < last_counter)                      \
1824 			current_counter += 0x1000000000LL;               \
1825 		if (!hw->offset_loaded)                                  \
1826 			last_counter = current_counter;                  \
1827 		counter = current_counter - last_counter;                \
1828 		counter &= 0xFFFFFFFFFLL;                                \
1829 	}
1830 
1831 void
txgbe_read_stats_registers(struct txgbe_hw * hw,struct txgbe_hw_stats * hw_stats)1832 txgbe_read_stats_registers(struct txgbe_hw *hw,
1833 			   struct txgbe_hw_stats *hw_stats)
1834 {
1835 	unsigned int i;
1836 
1837 	/* QP Stats */
1838 	for (i = 0; i < hw->nb_rx_queues; i++) {
1839 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
1840 			hw->qp_last[i].rx_qp_packets,
1841 			hw_stats->qp[i].rx_qp_packets);
1842 		UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
1843 			hw->qp_last[i].rx_qp_bytes,
1844 			hw_stats->qp[i].rx_qp_bytes);
1845 		UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
1846 			hw->qp_last[i].rx_qp_mc_packets,
1847 			hw_stats->qp[i].rx_qp_mc_packets);
1848 	}
1849 
1850 	for (i = 0; i < hw->nb_tx_queues; i++) {
1851 		UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
1852 			hw->qp_last[i].tx_qp_packets,
1853 			hw_stats->qp[i].tx_qp_packets);
1854 		UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
1855 			hw->qp_last[i].tx_qp_bytes,
1856 			hw_stats->qp[i].tx_qp_bytes);
1857 	}
1858 	/* PB Stats */
1859 	for (i = 0; i < TXGBE_MAX_UP; i++) {
1860 		hw_stats->up[i].rx_up_xon_packets +=
1861 				rd32(hw, TXGBE_PBRXUPXON(i));
1862 		hw_stats->up[i].rx_up_xoff_packets +=
1863 				rd32(hw, TXGBE_PBRXUPXOFF(i));
1864 		hw_stats->up[i].tx_up_xon_packets +=
1865 				rd32(hw, TXGBE_PBTXUPXON(i));
1866 		hw_stats->up[i].tx_up_xoff_packets +=
1867 				rd32(hw, TXGBE_PBTXUPXOFF(i));
1868 		hw_stats->up[i].tx_up_xon2off_packets +=
1869 				rd32(hw, TXGBE_PBTXUPOFF(i));
1870 		hw_stats->up[i].rx_up_dropped +=
1871 				rd32(hw, TXGBE_PBRXMISS(i));
1872 	}
1873 	hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
1874 	hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
1875 	hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
1876 	hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
1877 
1878 	/* DMA Stats */
1879 	hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
1880 	hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
1881 
1882 	hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
1883 	hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
1884 	hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
1885 
1886 	/* MAC Stats */
1887 	hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
1888 	hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
1889 	hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
1890 
1891 	hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
1892 	hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
1893 	hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
1894 
1895 	hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
1896 	hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
1897 
1898 	hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
1899 	hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
1900 	hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
1901 	hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
1902 	hw_stats->rx_size_512_to_1023_packets +=
1903 			rd64(hw, TXGBE_MACRX512TO1023L);
1904 	hw_stats->rx_size_1024_to_max_packets +=
1905 			rd64(hw, TXGBE_MACRX1024TOMAXL);
1906 	hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
1907 	hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
1908 	hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
1909 	hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
1910 	hw_stats->tx_size_512_to_1023_packets +=
1911 			rd64(hw, TXGBE_MACTX512TO1023L);
1912 	hw_stats->tx_size_1024_to_max_packets +=
1913 			rd64(hw, TXGBE_MACTX1024TOMAXL);
1914 
1915 	hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
1916 	hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
1917 	hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
1918 
1919 	/* MNG Stats */
1920 	hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
1921 	hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
1922 	hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
1923 	hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
1924 
1925 	/* FCoE Stats */
1926 	hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
1927 	hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
1928 	hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
1929 	hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
1930 	hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
1931 	hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
1932 	hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
1933 
1934 	/* Flow Director Stats */
1935 	hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
1936 	hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
1937 	hw_stats->flow_director_added_filters +=
1938 		TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
1939 	hw_stats->flow_director_removed_filters +=
1940 		TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
1941 	hw_stats->flow_director_filter_add_errors +=
1942 		TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
1943 	hw_stats->flow_director_filter_remove_errors +=
1944 		TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
1945 
1946 	/* MACsec Stats */
1947 	hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
1948 	hw_stats->tx_macsec_pkts_encrypted +=
1949 			rd32(hw, TXGBE_LSECTX_ENCPKT);
1950 	hw_stats->tx_macsec_pkts_protected +=
1951 			rd32(hw, TXGBE_LSECTX_PROTPKT);
1952 	hw_stats->tx_macsec_octets_encrypted +=
1953 			rd32(hw, TXGBE_LSECTX_ENCOCT);
1954 	hw_stats->tx_macsec_octets_protected +=
1955 			rd32(hw, TXGBE_LSECTX_PROTOCT);
1956 	hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
1957 	hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
1958 	hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
1959 	hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
1960 	hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
1961 	hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
1962 	hw_stats->rx_macsec_sc_pkts_unchecked +=
1963 			rd32(hw, TXGBE_LSECRX_UNCHKPKT);
1964 	hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
1965 	hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
1966 	for (i = 0; i < 2; i++) {
1967 		hw_stats->rx_macsec_sa_pkts_ok +=
1968 			rd32(hw, TXGBE_LSECRX_OKPKT(i));
1969 		hw_stats->rx_macsec_sa_pkts_invalid +=
1970 			rd32(hw, TXGBE_LSECRX_INVPKT(i));
1971 		hw_stats->rx_macsec_sa_pkts_notvalid +=
1972 			rd32(hw, TXGBE_LSECRX_BADPKT(i));
1973 	}
1974 	hw_stats->rx_macsec_sa_pkts_unusedsa +=
1975 			rd32(hw, TXGBE_LSECRX_INVSAPKT);
1976 	hw_stats->rx_macsec_sa_pkts_notusingsa +=
1977 			rd32(hw, TXGBE_LSECRX_BADSAPKT);
1978 
1979 	hw_stats->rx_total_missed_packets = 0;
1980 	for (i = 0; i < TXGBE_MAX_UP; i++) {
1981 		hw_stats->rx_total_missed_packets +=
1982 			hw_stats->up[i].rx_up_dropped;
1983 	}
1984 }
1985 
1986 static int
txgbe_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)1987 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1988 {
1989 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1990 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
1991 	struct txgbe_stat_mappings *stat_mappings =
1992 			TXGBE_DEV_STAT_MAPPINGS(dev);
1993 	uint32_t i, j;
1994 
1995 	txgbe_read_stats_registers(hw, hw_stats);
1996 
1997 	if (stats == NULL)
1998 		return -EINVAL;
1999 
2000 	/* Fill out the rte_eth_stats statistics structure */
2001 	stats->ipackets = hw_stats->rx_packets;
2002 	stats->ibytes = hw_stats->rx_bytes;
2003 	stats->opackets = hw_stats->tx_packets;
2004 	stats->obytes = hw_stats->tx_bytes;
2005 
2006 	memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
2007 	memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
2008 	memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
2009 	memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
2010 	memset(&stats->q_errors, 0, sizeof(stats->q_errors));
2011 	for (i = 0; i < TXGBE_MAX_QP; i++) {
2012 		uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
2013 		uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
2014 		uint32_t q_map;
2015 
2016 		q_map = (stat_mappings->rqsm[n] >> offset)
2017 				& QMAP_FIELD_RESERVED_BITS_MASK;
2018 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2019 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2020 		stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
2021 		stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
2022 
2023 		q_map = (stat_mappings->tqsm[n] >> offset)
2024 				& QMAP_FIELD_RESERVED_BITS_MASK;
2025 		j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
2026 		     ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
2027 		stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
2028 		stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
2029 	}
2030 
2031 	/* Rx Errors */
2032 	stats->imissed  = hw_stats->rx_total_missed_packets;
2033 	stats->ierrors  = hw_stats->rx_crc_errors +
2034 			  hw_stats->rx_mac_short_packet_dropped +
2035 			  hw_stats->rx_length_errors +
2036 			  hw_stats->rx_undersize_errors +
2037 			  hw_stats->rx_oversize_errors +
2038 			  hw_stats->rx_drop_packets +
2039 			  hw_stats->rx_illegal_byte_errors +
2040 			  hw_stats->rx_error_bytes +
2041 			  hw_stats->rx_fragment_errors +
2042 			  hw_stats->rx_fcoe_crc_errors +
2043 			  hw_stats->rx_fcoe_mbuf_allocation_errors;
2044 
2045 	/* Tx Errors */
2046 	stats->oerrors  = 0;
2047 	return 0;
2048 }
2049 
2050 static int
txgbe_dev_stats_reset(struct rte_eth_dev * dev)2051 txgbe_dev_stats_reset(struct rte_eth_dev *dev)
2052 {
2053 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2054 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2055 
2056 	/* HW registers are cleared on read */
2057 	hw->offset_loaded = 0;
2058 	txgbe_dev_stats_get(dev, NULL);
2059 	hw->offset_loaded = 1;
2060 
2061 	/* Reset software totals */
2062 	memset(hw_stats, 0, sizeof(*hw_stats));
2063 
2064 	return 0;
2065 }
2066 
2067 /* This function calculates the number of xstats based on the current config */
2068 static unsigned
txgbe_xstats_calc_num(struct rte_eth_dev * dev)2069 txgbe_xstats_calc_num(struct rte_eth_dev *dev)
2070 {
2071 	int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2072 	return TXGBE_NB_HW_STATS +
2073 	       TXGBE_NB_UP_STATS * TXGBE_MAX_UP +
2074 	       TXGBE_NB_QP_STATS * nb_queues;
2075 }
2076 
2077 static inline int
txgbe_get_name_by_id(uint32_t id,char * name,uint32_t size)2078 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size)
2079 {
2080 	int nb, st;
2081 
2082 	/* Extended stats from txgbe_hw_stats */
2083 	if (id < TXGBE_NB_HW_STATS) {
2084 		snprintf(name, size, "[hw]%s",
2085 			rte_txgbe_stats_strings[id].name);
2086 		return 0;
2087 	}
2088 	id -= TXGBE_NB_HW_STATS;
2089 
2090 	/* Priority Stats */
2091 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2092 		nb = id / TXGBE_NB_UP_STATS;
2093 		st = id % TXGBE_NB_UP_STATS;
2094 		snprintf(name, size, "[p%u]%s", nb,
2095 			rte_txgbe_up_strings[st].name);
2096 		return 0;
2097 	}
2098 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2099 
2100 	/* Queue Stats */
2101 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2102 		nb = id / TXGBE_NB_QP_STATS;
2103 		st = id % TXGBE_NB_QP_STATS;
2104 		snprintf(name, size, "[q%u]%s", nb,
2105 			rte_txgbe_qp_strings[st].name);
2106 		return 0;
2107 	}
2108 	id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
2109 
2110 	return -(int)(id + 1);
2111 }
2112 
2113 static inline int
txgbe_get_offset_by_id(uint32_t id,uint32_t * offset)2114 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
2115 {
2116 	int nb, st;
2117 
2118 	/* Extended stats from txgbe_hw_stats */
2119 	if (id < TXGBE_NB_HW_STATS) {
2120 		*offset = rte_txgbe_stats_strings[id].offset;
2121 		return 0;
2122 	}
2123 	id -= TXGBE_NB_HW_STATS;
2124 
2125 	/* Priority Stats */
2126 	if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) {
2127 		nb = id / TXGBE_NB_UP_STATS;
2128 		st = id % TXGBE_NB_UP_STATS;
2129 		*offset = rte_txgbe_up_strings[st].offset +
2130 			nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t));
2131 		return 0;
2132 	}
2133 	id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP;
2134 
2135 	/* Queue Stats */
2136 	if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) {
2137 		nb = id / TXGBE_NB_QP_STATS;
2138 		st = id % TXGBE_NB_QP_STATS;
2139 		*offset = rte_txgbe_qp_strings[st].offset +
2140 			nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
2141 		return 0;
2142 	}
2143 
2144 	return -1;
2145 }
2146 
txgbe_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int limit)2147 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
2148 	struct rte_eth_xstat_name *xstats_names, unsigned int limit)
2149 {
2150 	unsigned int i, count;
2151 
2152 	count = txgbe_xstats_calc_num(dev);
2153 	if (xstats_names == NULL)
2154 		return count;
2155 
2156 	/* Note: limit >= cnt_stats checked upstream
2157 	 * in rte_eth_xstats_names()
2158 	 */
2159 	limit = min(limit, count);
2160 
2161 	/* Extended stats from txgbe_hw_stats */
2162 	for (i = 0; i < limit; i++) {
2163 		if (txgbe_get_name_by_id(i, xstats_names[i].name,
2164 			sizeof(xstats_names[i].name))) {
2165 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2166 			break;
2167 		}
2168 	}
2169 
2170 	return i;
2171 }
2172 
txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,const uint64_t * ids,unsigned int limit)2173 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
2174 	struct rte_eth_xstat_name *xstats_names,
2175 	const uint64_t *ids,
2176 	unsigned int limit)
2177 {
2178 	unsigned int i;
2179 
2180 	if (ids == NULL)
2181 		return txgbe_dev_xstats_get_names(dev, xstats_names, limit);
2182 
2183 	for (i = 0; i < limit; i++) {
2184 		if (txgbe_get_name_by_id(ids[i], xstats_names[i].name,
2185 				sizeof(xstats_names[i].name))) {
2186 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2187 			return -1;
2188 		}
2189 	}
2190 
2191 	return i;
2192 }
2193 
2194 static int
txgbe_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int limit)2195 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2196 					 unsigned int limit)
2197 {
2198 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2199 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2200 	unsigned int i, count;
2201 
2202 	txgbe_read_stats_registers(hw, hw_stats);
2203 
2204 	/* If this is a reset xstats is NULL, and we have cleared the
2205 	 * registers by reading them.
2206 	 */
2207 	count = txgbe_xstats_calc_num(dev);
2208 	if (xstats == NULL)
2209 		return count;
2210 
2211 	limit = min(limit, txgbe_xstats_calc_num(dev));
2212 
2213 	/* Extended stats from txgbe_hw_stats */
2214 	for (i = 0; i < limit; i++) {
2215 		uint32_t offset = 0;
2216 
2217 		if (txgbe_get_offset_by_id(i, &offset)) {
2218 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2219 			break;
2220 		}
2221 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset);
2222 		xstats[i].id = i;
2223 	}
2224 
2225 	return i;
2226 }
2227 
2228 static int
txgbe_dev_xstats_get_(struct rte_eth_dev * dev,uint64_t * values,unsigned int limit)2229 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values,
2230 					 unsigned int limit)
2231 {
2232 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2233 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2234 	unsigned int i, count;
2235 
2236 	txgbe_read_stats_registers(hw, hw_stats);
2237 
2238 	/* If this is a reset xstats is NULL, and we have cleared the
2239 	 * registers by reading them.
2240 	 */
2241 	count = txgbe_xstats_calc_num(dev);
2242 	if (values == NULL)
2243 		return count;
2244 
2245 	limit = min(limit, txgbe_xstats_calc_num(dev));
2246 
2247 	/* Extended stats from txgbe_hw_stats */
2248 	for (i = 0; i < limit; i++) {
2249 		uint32_t offset;
2250 
2251 		if (txgbe_get_offset_by_id(i, &offset)) {
2252 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2253 			break;
2254 		}
2255 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2256 	}
2257 
2258 	return i;
2259 }
2260 
2261 static int
txgbe_dev_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int limit)2262 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2263 		uint64_t *values, unsigned int limit)
2264 {
2265 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2266 	unsigned int i;
2267 
2268 	if (ids == NULL)
2269 		return txgbe_dev_xstats_get_(dev, values, limit);
2270 
2271 	for (i = 0; i < limit; i++) {
2272 		uint32_t offset;
2273 
2274 		if (txgbe_get_offset_by_id(ids[i], &offset)) {
2275 			PMD_INIT_LOG(WARNING, "id value %d isn't valid", i);
2276 			break;
2277 		}
2278 		values[i] = *(uint64_t *)(((char *)hw_stats) + offset);
2279 	}
2280 
2281 	return i;
2282 }
2283 
2284 static int
txgbe_dev_xstats_reset(struct rte_eth_dev * dev)2285 txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
2286 {
2287 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2288 	struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
2289 
2290 	/* HW registers are cleared on read */
2291 	hw->offset_loaded = 0;
2292 	txgbe_read_stats_registers(hw, hw_stats);
2293 	hw->offset_loaded = 1;
2294 
2295 	/* Reset software totals */
2296 	memset(hw_stats, 0, sizeof(*hw_stats));
2297 
2298 	return 0;
2299 }
2300 
2301 static int
txgbe_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)2302 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2303 {
2304 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2305 	u16 eeprom_verh, eeprom_verl;
2306 	u32 etrack_id;
2307 	int ret;
2308 
2309 	hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
2310 	hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
2311 
2312 	etrack_id = (eeprom_verh << 16) | eeprom_verl;
2313 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
2314 
2315 	ret += 1; /* add the size of '\0' */
2316 	if (fw_size < (u32)ret)
2317 		return ret;
2318 	else
2319 		return 0;
2320 }
2321 
2322 static int
txgbe_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)2323 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2324 {
2325 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2326 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2327 
2328 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
2329 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
2330 	dev_info->min_rx_bufsize = 1024;
2331 	dev_info->max_rx_pktlen = 15872;
2332 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
2333 	dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
2334 	dev_info->max_vfs = pci_dev->max_vfs;
2335 	dev_info->max_vmdq_pools = ETH_64_POOLS;
2336 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
2337 	dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
2338 	dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
2339 				     dev_info->rx_queue_offload_capa);
2340 	dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
2341 	dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
2342 
2343 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2344 		.rx_thresh = {
2345 			.pthresh = TXGBE_DEFAULT_RX_PTHRESH,
2346 			.hthresh = TXGBE_DEFAULT_RX_HTHRESH,
2347 			.wthresh = TXGBE_DEFAULT_RX_WTHRESH,
2348 		},
2349 		.rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
2350 		.rx_drop_en = 0,
2351 		.offloads = 0,
2352 	};
2353 
2354 	dev_info->default_txconf = (struct rte_eth_txconf) {
2355 		.tx_thresh = {
2356 			.pthresh = TXGBE_DEFAULT_TX_PTHRESH,
2357 			.hthresh = TXGBE_DEFAULT_TX_HTHRESH,
2358 			.wthresh = TXGBE_DEFAULT_TX_WTHRESH,
2359 		},
2360 		.tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
2361 		.offloads = 0,
2362 	};
2363 
2364 	dev_info->rx_desc_lim = rx_desc_lim;
2365 	dev_info->tx_desc_lim = tx_desc_lim;
2366 
2367 	dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
2368 	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2369 	dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
2370 
2371 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
2372 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
2373 
2374 	/* Driver-preferred Rx/Tx parameters */
2375 	dev_info->default_rxportconf.burst_size = 32;
2376 	dev_info->default_txportconf.burst_size = 32;
2377 	dev_info->default_rxportconf.nb_queues = 1;
2378 	dev_info->default_txportconf.nb_queues = 1;
2379 	dev_info->default_rxportconf.ring_size = 256;
2380 	dev_info->default_txportconf.ring_size = 256;
2381 
2382 	return 0;
2383 }
2384 
2385 const uint32_t *
txgbe_dev_supported_ptypes_get(struct rte_eth_dev * dev)2386 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2387 {
2388 	if (dev->rx_pkt_burst == txgbe_recv_pkts ||
2389 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc ||
2390 	    dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc ||
2391 	    dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc)
2392 		return txgbe_get_supported_ptypes();
2393 
2394 	return NULL;
2395 }
2396 
2397 void
txgbe_dev_setup_link_alarm_handler(void * param)2398 txgbe_dev_setup_link_alarm_handler(void *param)
2399 {
2400 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2401 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2402 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2403 	u32 speed;
2404 	bool autoneg = false;
2405 
2406 	speed = hw->phy.autoneg_advertised;
2407 	if (!speed)
2408 		hw->mac.get_link_capabilities(hw, &speed, &autoneg);
2409 
2410 	hw->mac.setup_link(hw, speed, true);
2411 
2412 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2413 }
2414 
2415 /* return 0 means link status changed, -1 means not changed */
2416 int
txgbe_dev_link_update_share(struct rte_eth_dev * dev,int wait_to_complete)2417 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
2418 			    int wait_to_complete)
2419 {
2420 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2421 	struct rte_eth_link link;
2422 	u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN;
2423 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2424 	bool link_up;
2425 	int err;
2426 	int wait = 1;
2427 
2428 	memset(&link, 0, sizeof(link));
2429 	link.link_status = ETH_LINK_DOWN;
2430 	link.link_speed = ETH_SPEED_NUM_NONE;
2431 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
2432 	link.link_autoneg = ETH_LINK_AUTONEG;
2433 
2434 	hw->mac.get_link_status = true;
2435 
2436 	if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG)
2437 		return rte_eth_linkstatus_set(dev, &link);
2438 
2439 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
2440 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
2441 		wait = 0;
2442 
2443 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
2444 
2445 	if (err != 0) {
2446 		link.link_speed = ETH_SPEED_NUM_100M;
2447 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
2448 		return rte_eth_linkstatus_set(dev, &link);
2449 	}
2450 
2451 	if (link_up == 0) {
2452 		if (hw->phy.media_type == txgbe_media_type_fiber) {
2453 			intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
2454 			rte_eal_alarm_set(10,
2455 				txgbe_dev_setup_link_alarm_handler, dev);
2456 		}
2457 		return rte_eth_linkstatus_set(dev, &link);
2458 	}
2459 
2460 	intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
2461 	link.link_status = ETH_LINK_UP;
2462 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
2463 
2464 	switch (link_speed) {
2465 	default:
2466 	case TXGBE_LINK_SPEED_UNKNOWN:
2467 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
2468 		link.link_speed = ETH_SPEED_NUM_100M;
2469 		break;
2470 
2471 	case TXGBE_LINK_SPEED_100M_FULL:
2472 		link.link_speed = ETH_SPEED_NUM_100M;
2473 		break;
2474 
2475 	case TXGBE_LINK_SPEED_1GB_FULL:
2476 		link.link_speed = ETH_SPEED_NUM_1G;
2477 		break;
2478 
2479 	case TXGBE_LINK_SPEED_2_5GB_FULL:
2480 		link.link_speed = ETH_SPEED_NUM_2_5G;
2481 		break;
2482 
2483 	case TXGBE_LINK_SPEED_5GB_FULL:
2484 		link.link_speed = ETH_SPEED_NUM_5G;
2485 		break;
2486 
2487 	case TXGBE_LINK_SPEED_10GB_FULL:
2488 		link.link_speed = ETH_SPEED_NUM_10G;
2489 		break;
2490 	}
2491 
2492 	return rte_eth_linkstatus_set(dev, &link);
2493 }
2494 
2495 static int
txgbe_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)2496 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2497 {
2498 	return txgbe_dev_link_update_share(dev, wait_to_complete);
2499 }
2500 
2501 static int
txgbe_dev_promiscuous_enable(struct rte_eth_dev * dev)2502 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
2503 {
2504 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2505 	uint32_t fctrl;
2506 
2507 	fctrl = rd32(hw, TXGBE_PSRCTL);
2508 	fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
2509 	wr32(hw, TXGBE_PSRCTL, fctrl);
2510 
2511 	return 0;
2512 }
2513 
2514 static int
txgbe_dev_promiscuous_disable(struct rte_eth_dev * dev)2515 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
2516 {
2517 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2518 	uint32_t fctrl;
2519 
2520 	fctrl = rd32(hw, TXGBE_PSRCTL);
2521 	fctrl &= (~TXGBE_PSRCTL_UCP);
2522 	if (dev->data->all_multicast == 1)
2523 		fctrl |= TXGBE_PSRCTL_MCP;
2524 	else
2525 		fctrl &= (~TXGBE_PSRCTL_MCP);
2526 	wr32(hw, TXGBE_PSRCTL, fctrl);
2527 
2528 	return 0;
2529 }
2530 
2531 static int
txgbe_dev_allmulticast_enable(struct rte_eth_dev * dev)2532 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
2533 {
2534 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2535 	uint32_t fctrl;
2536 
2537 	fctrl = rd32(hw, TXGBE_PSRCTL);
2538 	fctrl |= TXGBE_PSRCTL_MCP;
2539 	wr32(hw, TXGBE_PSRCTL, fctrl);
2540 
2541 	return 0;
2542 }
2543 
2544 static int
txgbe_dev_allmulticast_disable(struct rte_eth_dev * dev)2545 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
2546 {
2547 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2548 	uint32_t fctrl;
2549 
2550 	if (dev->data->promiscuous == 1)
2551 		return 0; /* must remain in all_multicast mode */
2552 
2553 	fctrl = rd32(hw, TXGBE_PSRCTL);
2554 	fctrl &= (~TXGBE_PSRCTL_MCP);
2555 	wr32(hw, TXGBE_PSRCTL, fctrl);
2556 
2557 	return 0;
2558 }
2559 
2560 /**
2561  * It clears the interrupt causes and enables the interrupt.
2562  * It will be called once only during nic initialized.
2563  *
2564  * @param dev
2565  *  Pointer to struct rte_eth_dev.
2566  * @param on
2567  *  Enable or Disable.
2568  *
2569  * @return
2570  *  - On success, zero.
2571  *  - On failure, a negative value.
2572  */
2573 static int
txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on)2574 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2575 {
2576 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2577 
2578 	txgbe_dev_link_status_print(dev);
2579 	if (on)
2580 		intr->mask_misc |= TXGBE_ICRMISC_LSC;
2581 	else
2582 		intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2583 
2584 	return 0;
2585 }
2586 
2587 /**
2588  * It clears the interrupt causes and enables the interrupt.
2589  * It will be called once only during nic initialized.
2590  *
2591  * @param dev
2592  *  Pointer to struct rte_eth_dev.
2593  *
2594  * @return
2595  *  - On success, zero.
2596  *  - On failure, a negative value.
2597  */
2598 static int
txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev * dev)2599 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2600 {
2601 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2602 
2603 	intr->mask[0] |= TXGBE_ICR_MASK;
2604 	intr->mask[1] |= TXGBE_ICR_MASK;
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * It clears the interrupt causes and enables the interrupt.
2611  * It will be called once only during nic initialized.
2612  *
2613  * @param dev
2614  *  Pointer to struct rte_eth_dev.
2615  *
2616  * @return
2617  *  - On success, zero.
2618  *  - On failure, a negative value.
2619  */
2620 static int
txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev * dev)2621 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
2622 {
2623 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2624 
2625 	intr->mask_misc |= TXGBE_ICRMISC_LNKSEC;
2626 
2627 	return 0;
2628 }
2629 
2630 /*
2631  * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update.
2632  *
2633  * @param dev
2634  *  Pointer to struct rte_eth_dev.
2635  *
2636  * @return
2637  *  - On success, zero.
2638  *  - On failure, a negative value.
2639  */
2640 static int
txgbe_dev_interrupt_get_status(struct rte_eth_dev * dev)2641 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
2642 {
2643 	uint32_t eicr;
2644 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2645 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2646 
2647 	/* clear all cause mask */
2648 	txgbe_disable_intr(hw);
2649 
2650 	/* read-on-clear nic registers here */
2651 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2652 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
2653 
2654 	intr->flags = 0;
2655 
2656 	/* set flag for async link update */
2657 	if (eicr & TXGBE_ICRMISC_LSC)
2658 		intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
2659 
2660 	if (eicr & TXGBE_ICRMISC_VFMBX)
2661 		intr->flags |= TXGBE_FLAG_MAILBOX;
2662 
2663 	if (eicr & TXGBE_ICRMISC_LNKSEC)
2664 		intr->flags |= TXGBE_FLAG_MACSEC;
2665 
2666 	if (eicr & TXGBE_ICRMISC_GPIO)
2667 		intr->flags |= TXGBE_FLAG_PHY_INTERRUPT;
2668 
2669 	return 0;
2670 }
2671 
2672 /**
2673  * It gets and then prints the link status.
2674  *
2675  * @param dev
2676  *  Pointer to struct rte_eth_dev.
2677  *
2678  * @return
2679  *  - On success, zero.
2680  *  - On failure, a negative value.
2681  */
2682 static void
txgbe_dev_link_status_print(struct rte_eth_dev * dev)2683 txgbe_dev_link_status_print(struct rte_eth_dev *dev)
2684 {
2685 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2686 	struct rte_eth_link link;
2687 
2688 	rte_eth_linkstatus_get(dev, &link);
2689 
2690 	if (link.link_status) {
2691 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
2692 					(int)(dev->data->port_id),
2693 					(unsigned int)link.link_speed,
2694 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2695 					"full-duplex" : "half-duplex");
2696 	} else {
2697 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
2698 				(int)(dev->data->port_id));
2699 	}
2700 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
2701 				pci_dev->addr.domain,
2702 				pci_dev->addr.bus,
2703 				pci_dev->addr.devid,
2704 				pci_dev->addr.function);
2705 }
2706 
2707 /*
2708  * It executes link_update after knowing an interrupt occurred.
2709  *
2710  * @param dev
2711  *  Pointer to struct rte_eth_dev.
2712  *
2713  * @return
2714  *  - On success, zero.
2715  *  - On failure, a negative value.
2716  */
2717 static int
txgbe_dev_interrupt_action(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)2718 txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
2719 			   struct rte_intr_handle *intr_handle)
2720 {
2721 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2722 	int64_t timeout;
2723 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2724 
2725 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
2726 
2727 	if (intr->flags & TXGBE_FLAG_MAILBOX) {
2728 		txgbe_pf_mbx_process(dev);
2729 		intr->flags &= ~TXGBE_FLAG_MAILBOX;
2730 	}
2731 
2732 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2733 		hw->phy.handle_lasi(hw);
2734 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2735 	}
2736 
2737 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2738 		struct rte_eth_link link;
2739 
2740 		/*get the link status before link update, for predicting later*/
2741 		rte_eth_linkstatus_get(dev, &link);
2742 
2743 		txgbe_dev_link_update(dev, 0);
2744 
2745 		/* likely to up */
2746 		if (!link.link_status)
2747 			/* handle it 1 sec later, wait it being stable */
2748 			timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
2749 		/* likely to down */
2750 		else
2751 			/* handle it 4 sec later, wait it being stable */
2752 			timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
2753 
2754 		txgbe_dev_link_status_print(dev);
2755 		if (rte_eal_alarm_set(timeout * 1000,
2756 				      txgbe_dev_interrupt_delayed_handler,
2757 				      (void *)dev) < 0) {
2758 			PMD_DRV_LOG(ERR, "Error setting alarm");
2759 		} else {
2760 			/* remember original mask */
2761 			intr->mask_misc_orig = intr->mask_misc;
2762 			/* only disable lsc interrupt */
2763 			intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
2764 		}
2765 	}
2766 
2767 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
2768 	txgbe_enable_intr(dev);
2769 	rte_intr_enable(intr_handle);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  * Interrupt handler which shall be registered for alarm callback for delayed
2776  * handling specific interrupt to wait for the stable nic state. As the
2777  * NIC interrupt state is not stable for txgbe after link is just down,
2778  * it needs to wait 4 seconds to get the stable status.
2779  *
2780  * @param handle
2781  *  Pointer to interrupt handle.
2782  * @param param
2783  *  The address of parameter (struct rte_eth_dev *) registered before.
2784  *
2785  * @return
2786  *  void
2787  */
2788 static void
txgbe_dev_interrupt_delayed_handler(void * param)2789 txgbe_dev_interrupt_delayed_handler(void *param)
2790 {
2791 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2792 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2793 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2794 	struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
2795 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2796 	uint32_t eicr;
2797 
2798 	txgbe_disable_intr(hw);
2799 
2800 	eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC];
2801 	if (eicr & TXGBE_ICRMISC_VFMBX)
2802 		txgbe_pf_mbx_process(dev);
2803 
2804 	if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) {
2805 		hw->phy.handle_lasi(hw);
2806 		intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
2807 	}
2808 
2809 	if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
2810 		txgbe_dev_link_update(dev, 0);
2811 		intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE;
2812 		txgbe_dev_link_status_print(dev);
2813 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2814 					      NULL);
2815 	}
2816 
2817 	if (intr->flags & TXGBE_FLAG_MACSEC) {
2818 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
2819 					      NULL);
2820 		intr->flags &= ~TXGBE_FLAG_MACSEC;
2821 	}
2822 
2823 	/* restore original mask */
2824 	intr->mask_misc = intr->mask_misc_orig;
2825 	intr->mask_misc_orig = 0;
2826 
2827 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
2828 	txgbe_enable_intr(dev);
2829 	rte_intr_enable(intr_handle);
2830 }
2831 
2832 /**
2833  * Interrupt handler triggered by NIC  for handling
2834  * specific interrupt.
2835  *
2836  * @param handle
2837  *  Pointer to interrupt handle.
2838  * @param param
2839  *  The address of parameter (struct rte_eth_dev *) registered before.
2840  *
2841  * @return
2842  *  void
2843  */
2844 static void
txgbe_dev_interrupt_handler(void * param)2845 txgbe_dev_interrupt_handler(void *param)
2846 {
2847 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2848 
2849 	txgbe_dev_interrupt_get_status(dev);
2850 	txgbe_dev_interrupt_action(dev, dev->intr_handle);
2851 }
2852 
2853 static int
txgbe_dev_led_on(struct rte_eth_dev * dev)2854 txgbe_dev_led_on(struct rte_eth_dev *dev)
2855 {
2856 	struct txgbe_hw *hw;
2857 
2858 	hw = TXGBE_DEV_HW(dev);
2859 	return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
2860 }
2861 
2862 static int
txgbe_dev_led_off(struct rte_eth_dev * dev)2863 txgbe_dev_led_off(struct rte_eth_dev *dev)
2864 {
2865 	struct txgbe_hw *hw;
2866 
2867 	hw = TXGBE_DEV_HW(dev);
2868 	return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
2869 }
2870 
2871 static int
txgbe_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2872 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2873 {
2874 	struct txgbe_hw *hw;
2875 	uint32_t mflcn_reg;
2876 	uint32_t fccfg_reg;
2877 	int rx_pause;
2878 	int tx_pause;
2879 
2880 	hw = TXGBE_DEV_HW(dev);
2881 
2882 	fc_conf->pause_time = hw->fc.pause_time;
2883 	fc_conf->high_water = hw->fc.high_water[0];
2884 	fc_conf->low_water = hw->fc.low_water[0];
2885 	fc_conf->send_xon = hw->fc.send_xon;
2886 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
2887 
2888 	/*
2889 	 * Return rx_pause status according to actual setting of
2890 	 * RXFCCFG register.
2891 	 */
2892 	mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
2893 	if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
2894 		rx_pause = 1;
2895 	else
2896 		rx_pause = 0;
2897 
2898 	/*
2899 	 * Return tx_pause status according to actual setting of
2900 	 * TXFCCFG register.
2901 	 */
2902 	fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
2903 	if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
2904 		tx_pause = 1;
2905 	else
2906 		tx_pause = 0;
2907 
2908 	if (rx_pause && tx_pause)
2909 		fc_conf->mode = RTE_FC_FULL;
2910 	else if (rx_pause)
2911 		fc_conf->mode = RTE_FC_RX_PAUSE;
2912 	else if (tx_pause)
2913 		fc_conf->mode = RTE_FC_TX_PAUSE;
2914 	else
2915 		fc_conf->mode = RTE_FC_NONE;
2916 
2917 	return 0;
2918 }
2919 
2920 static int
txgbe_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)2921 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2922 {
2923 	struct txgbe_hw *hw;
2924 	int err;
2925 	uint32_t rx_buf_size;
2926 	uint32_t max_high_water;
2927 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2928 		txgbe_fc_none,
2929 		txgbe_fc_rx_pause,
2930 		txgbe_fc_tx_pause,
2931 		txgbe_fc_full
2932 	};
2933 
2934 	PMD_INIT_FUNC_TRACE();
2935 
2936 	hw = TXGBE_DEV_HW(dev);
2937 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
2938 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2939 
2940 	/*
2941 	 * At least reserve one Ethernet frame for watermark
2942 	 * high_water/low_water in kilo bytes for txgbe
2943 	 */
2944 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
2945 	if (fc_conf->high_water > max_high_water ||
2946 	    fc_conf->high_water < fc_conf->low_water) {
2947 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
2948 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
2949 		return -EINVAL;
2950 	}
2951 
2952 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
2953 	hw->fc.pause_time     = fc_conf->pause_time;
2954 	hw->fc.high_water[0]  = fc_conf->high_water;
2955 	hw->fc.low_water[0]   = fc_conf->low_water;
2956 	hw->fc.send_xon       = fc_conf->send_xon;
2957 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
2958 
2959 	err = txgbe_fc_enable(hw);
2960 
2961 	/* Not negotiated is not an error case */
2962 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
2963 		wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
2964 		      (fc_conf->mac_ctrl_frame_fwd
2965 		       ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
2966 		txgbe_flush(hw);
2967 
2968 		return 0;
2969 	}
2970 
2971 	PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
2972 	return -EIO;
2973 }
2974 
2975 static int
txgbe_priority_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_pfc_conf * pfc_conf)2976 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
2977 		struct rte_eth_pfc_conf *pfc_conf)
2978 {
2979 	int err;
2980 	uint32_t rx_buf_size;
2981 	uint32_t max_high_water;
2982 	uint8_t tc_num;
2983 	uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
2984 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2985 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
2986 
2987 	enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
2988 		txgbe_fc_none,
2989 		txgbe_fc_rx_pause,
2990 		txgbe_fc_tx_pause,
2991 		txgbe_fc_full
2992 	};
2993 
2994 	PMD_INIT_FUNC_TRACE();
2995 
2996 	txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
2997 	tc_num = map[pfc_conf->priority];
2998 	rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
2999 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3000 	/*
3001 	 * At least reserve one Ethernet frame for watermark
3002 	 * high_water/low_water in kilo bytes for txgbe
3003 	 */
3004 	max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
3005 	if (pfc_conf->fc.high_water > max_high_water ||
3006 	    pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
3007 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
3008 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
3009 		return -EINVAL;
3010 	}
3011 
3012 	hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
3013 	hw->fc.pause_time = pfc_conf->fc.pause_time;
3014 	hw->fc.send_xon = pfc_conf->fc.send_xon;
3015 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
3016 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
3017 
3018 	err = txgbe_dcb_pfc_enable(hw, tc_num);
3019 
3020 	/* Not negotiated is not an error case */
3021 	if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
3022 		return 0;
3023 
3024 	PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
3025 	return -EIO;
3026 }
3027 
3028 int
txgbe_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3029 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
3030 			  struct rte_eth_rss_reta_entry64 *reta_conf,
3031 			  uint16_t reta_size)
3032 {
3033 	uint8_t i, j, mask;
3034 	uint32_t reta;
3035 	uint16_t idx, shift;
3036 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3037 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3038 
3039 	PMD_INIT_FUNC_TRACE();
3040 
3041 	if (!txgbe_rss_update_sp(hw->mac.type)) {
3042 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
3043 			"NIC.");
3044 		return -ENOTSUP;
3045 	}
3046 
3047 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
3048 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3049 			"(%d) doesn't match the number hardware can supported "
3050 			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3051 		return -EINVAL;
3052 	}
3053 
3054 	for (i = 0; i < reta_size; i += 4) {
3055 		idx = i / RTE_RETA_GROUP_SIZE;
3056 		shift = i % RTE_RETA_GROUP_SIZE;
3057 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3058 		if (!mask)
3059 			continue;
3060 
3061 		reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3062 		for (j = 0; j < 4; j++) {
3063 			if (RS8(mask, j, 0x1)) {
3064 				reta  &= ~(MS32(8 * j, 0xFF));
3065 				reta |= LS32(reta_conf[idx].reta[shift + j],
3066 						8 * j, 0xFF);
3067 			}
3068 		}
3069 		wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3070 	}
3071 	adapter->rss_reta_updated = 1;
3072 
3073 	return 0;
3074 }
3075 
3076 int
txgbe_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3077 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
3078 			 struct rte_eth_rss_reta_entry64 *reta_conf,
3079 			 uint16_t reta_size)
3080 {
3081 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3082 	uint8_t i, j, mask;
3083 	uint32_t reta;
3084 	uint16_t idx, shift;
3085 
3086 	PMD_INIT_FUNC_TRACE();
3087 
3088 	if (reta_size != ETH_RSS_RETA_SIZE_128) {
3089 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3090 			"(%d) doesn't match the number hardware can supported "
3091 			"(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3092 		return -EINVAL;
3093 	}
3094 
3095 	for (i = 0; i < reta_size; i += 4) {
3096 		idx = i / RTE_RETA_GROUP_SIZE;
3097 		shift = i % RTE_RETA_GROUP_SIZE;
3098 		mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
3099 		if (!mask)
3100 			continue;
3101 
3102 		reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
3103 		for (j = 0; j < 4; j++) {
3104 			if (RS8(mask, j, 0x1))
3105 				reta_conf[idx].reta[shift + j] =
3106 					(uint16_t)RS32(reta, 8 * j, 0xFF);
3107 		}
3108 	}
3109 
3110 	return 0;
3111 }
3112 
3113 static int
txgbe_add_rar(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)3114 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
3115 				uint32_t index, uint32_t pool)
3116 {
3117 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3118 	uint32_t enable_addr = 1;
3119 
3120 	return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
3121 			     pool, enable_addr);
3122 }
3123 
3124 static void
txgbe_remove_rar(struct rte_eth_dev * dev,uint32_t index)3125 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
3126 {
3127 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3128 
3129 	txgbe_clear_rar(hw, index);
3130 }
3131 
3132 static int
txgbe_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)3133 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3134 {
3135 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3136 
3137 	txgbe_remove_rar(dev, 0);
3138 	txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
3139 
3140 	return 0;
3141 }
3142 
3143 static int
txgbe_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)3144 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
3145 {
3146 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3147 	struct rte_eth_dev_info dev_info;
3148 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3149 	struct rte_eth_dev_data *dev_data = dev->data;
3150 	int ret;
3151 
3152 	ret = txgbe_dev_info_get(dev, &dev_info);
3153 	if (ret != 0)
3154 		return ret;
3155 
3156 	/* check that mtu is within the allowed range */
3157 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
3158 		return -EINVAL;
3159 
3160 	/* If device is started, refuse mtu that requires the support of
3161 	 * scattered packets when this feature has not been enabled before.
3162 	 */
3163 	if (dev_data->dev_started && !dev_data->scattered_rx &&
3164 	    (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
3165 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
3166 		PMD_INIT_LOG(ERR, "Stop port first.");
3167 		return -EINVAL;
3168 	}
3169 
3170 	/* update max frame size */
3171 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
3172 
3173 	if (hw->mode)
3174 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3175 			TXGBE_FRAME_SIZE_MAX);
3176 	else
3177 		wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
3178 			TXGBE_FRMSZ_MAX(frame_size));
3179 
3180 	return 0;
3181 }
3182 
3183 static uint32_t
txgbe_uta_vector(struct txgbe_hw * hw,struct rte_ether_addr * uc_addr)3184 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
3185 {
3186 	uint32_t vector = 0;
3187 
3188 	switch (hw->mac.mc_filter_type) {
3189 	case 0:   /* use bits [47:36] of the address */
3190 		vector = ((uc_addr->addr_bytes[4] >> 4) |
3191 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
3192 		break;
3193 	case 1:   /* use bits [46:35] of the address */
3194 		vector = ((uc_addr->addr_bytes[4] >> 3) |
3195 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
3196 		break;
3197 	case 2:   /* use bits [45:34] of the address */
3198 		vector = ((uc_addr->addr_bytes[4] >> 2) |
3199 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
3200 		break;
3201 	case 3:   /* use bits [43:32] of the address */
3202 		vector = ((uc_addr->addr_bytes[4]) |
3203 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
3204 		break;
3205 	default:  /* Invalid mc_filter_type */
3206 		break;
3207 	}
3208 
3209 	/* vector can only be 12-bits or boundary will be exceeded */
3210 	vector &= 0xFFF;
3211 	return vector;
3212 }
3213 
3214 static int
txgbe_uc_hash_table_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint8_t on)3215 txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
3216 			struct rte_ether_addr *mac_addr, uint8_t on)
3217 {
3218 	uint32_t vector;
3219 	uint32_t uta_idx;
3220 	uint32_t reg_val;
3221 	uint32_t uta_mask;
3222 	uint32_t psrctl;
3223 
3224 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3225 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3226 
3227 	/* The UTA table only exists on pf hardware */
3228 	if (hw->mac.type < txgbe_mac_raptor)
3229 		return -ENOTSUP;
3230 
3231 	vector = txgbe_uta_vector(hw, mac_addr);
3232 	uta_idx = (vector >> 5) & 0x7F;
3233 	uta_mask = 0x1UL << (vector & 0x1F);
3234 
3235 	if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
3236 		return 0;
3237 
3238 	reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
3239 	if (on) {
3240 		uta_info->uta_in_use++;
3241 		reg_val |= uta_mask;
3242 		uta_info->uta_shadow[uta_idx] |= uta_mask;
3243 	} else {
3244 		uta_info->uta_in_use--;
3245 		reg_val &= ~uta_mask;
3246 		uta_info->uta_shadow[uta_idx] &= ~uta_mask;
3247 	}
3248 
3249 	wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val);
3250 
3251 	psrctl = rd32(hw, TXGBE_PSRCTL);
3252 	if (uta_info->uta_in_use > 0)
3253 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3254 	else
3255 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3256 
3257 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3258 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3259 	wr32(hw, TXGBE_PSRCTL, psrctl);
3260 
3261 	return 0;
3262 }
3263 
3264 static int
txgbe_uc_all_hash_table_set(struct rte_eth_dev * dev,uint8_t on)3265 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
3266 {
3267 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3268 	struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
3269 	uint32_t psrctl;
3270 	int i;
3271 
3272 	/* The UTA table only exists on pf hardware */
3273 	if (hw->mac.type < txgbe_mac_raptor)
3274 		return -ENOTSUP;
3275 
3276 	if (on) {
3277 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3278 			uta_info->uta_shadow[i] = ~0;
3279 			wr32(hw, TXGBE_UCADDRTBL(i), ~0);
3280 		}
3281 	} else {
3282 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
3283 			uta_info->uta_shadow[i] = 0;
3284 			wr32(hw, TXGBE_UCADDRTBL(i), 0);
3285 		}
3286 	}
3287 
3288 	psrctl = rd32(hw, TXGBE_PSRCTL);
3289 	if (on)
3290 		psrctl |= TXGBE_PSRCTL_UCHFENA;
3291 	else
3292 		psrctl &= ~TXGBE_PSRCTL_UCHFENA;
3293 
3294 	psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK;
3295 	psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type);
3296 	wr32(hw, TXGBE_PSRCTL, psrctl);
3297 
3298 	return 0;
3299 }
3300 
3301 uint32_t
txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask,uint32_t orig_val)3302 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
3303 {
3304 	uint32_t new_val = orig_val;
3305 
3306 	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
3307 		new_val |= TXGBE_POOLETHCTL_UTA;
3308 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
3309 		new_val |= TXGBE_POOLETHCTL_MCHA;
3310 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
3311 		new_val |= TXGBE_POOLETHCTL_UCHA;
3312 	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
3313 		new_val |= TXGBE_POOLETHCTL_BCA;
3314 	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
3315 		new_val |= TXGBE_POOLETHCTL_MCP;
3316 
3317 	return new_val;
3318 }
3319 
3320 static int
txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)3321 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
3322 {
3323 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3324 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3325 	uint32_t mask;
3326 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3327 
3328 	if (queue_id < 32) {
3329 		mask = rd32(hw, TXGBE_IMS(0));
3330 		mask &= (1 << queue_id);
3331 		wr32(hw, TXGBE_IMS(0), mask);
3332 	} else if (queue_id < 64) {
3333 		mask = rd32(hw, TXGBE_IMS(1));
3334 		mask &= (1 << (queue_id - 32));
3335 		wr32(hw, TXGBE_IMS(1), mask);
3336 	}
3337 	rte_intr_enable(intr_handle);
3338 
3339 	return 0;
3340 }
3341 
3342 static int
txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)3343 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
3344 {
3345 	uint32_t mask;
3346 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3347 
3348 	if (queue_id < 32) {
3349 		mask = rd32(hw, TXGBE_IMS(0));
3350 		mask &= ~(1 << queue_id);
3351 		wr32(hw, TXGBE_IMS(0), mask);
3352 	} else if (queue_id < 64) {
3353 		mask = rd32(hw, TXGBE_IMS(1));
3354 		mask &= ~(1 << (queue_id - 32));
3355 		wr32(hw, TXGBE_IMS(1), mask);
3356 	}
3357 
3358 	return 0;
3359 }
3360 
3361 /**
3362  * set the IVAR registers, mapping interrupt causes to vectors
3363  * @param hw
3364  *  pointer to txgbe_hw struct
3365  * @direction
3366  *  0 for Rx, 1 for Tx, -1 for other causes
3367  * @queue
3368  *  queue to map the corresponding interrupt to
3369  * @msix_vector
3370  *  the vector to map to the corresponding queue
3371  */
3372 void
txgbe_set_ivar_map(struct txgbe_hw * hw,int8_t direction,uint8_t queue,uint8_t msix_vector)3373 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
3374 		   uint8_t queue, uint8_t msix_vector)
3375 {
3376 	uint32_t tmp, idx;
3377 
3378 	if (direction == -1) {
3379 		/* other causes */
3380 		msix_vector |= TXGBE_IVARMISC_VLD;
3381 		idx = 0;
3382 		tmp = rd32(hw, TXGBE_IVARMISC);
3383 		tmp &= ~(0xFF << idx);
3384 		tmp |= (msix_vector << idx);
3385 		wr32(hw, TXGBE_IVARMISC, tmp);
3386 	} else {
3387 		/* rx or tx causes */
3388 		/* Workround for ICR lost */
3389 		idx = ((16 * (queue & 1)) + (8 * direction));
3390 		tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
3391 		tmp &= ~(0xFF << idx);
3392 		tmp |= (msix_vector << idx);
3393 		wr32(hw, TXGBE_IVAR(queue >> 1), tmp);
3394 	}
3395 }
3396 
3397 /**
3398  * Sets up the hardware to properly generate MSI-X interrupts
3399  * @hw
3400  *  board private structure
3401  */
3402 static void
txgbe_configure_msix(struct rte_eth_dev * dev)3403 txgbe_configure_msix(struct rte_eth_dev *dev)
3404 {
3405 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3406 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3407 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3408 	uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
3409 	uint32_t vec = TXGBE_MISC_VEC_ID;
3410 	uint32_t gpie;
3411 
3412 	/* won't configure msix register if no mapping is done
3413 	 * between intr vector and event fd
3414 	 * but if misx has been enabled already, need to configure
3415 	 * auto clean, auto mask and throttling.
3416 	 */
3417 	gpie = rd32(hw, TXGBE_GPIE);
3418 	if (!rte_intr_dp_is_en(intr_handle) &&
3419 	    !(gpie & TXGBE_GPIE_MSIX))
3420 		return;
3421 
3422 	if (rte_intr_allow_others(intr_handle)) {
3423 		base = TXGBE_RX_VEC_START;
3424 		vec = base;
3425 	}
3426 
3427 	/* setup GPIE for MSI-x mode */
3428 	gpie = rd32(hw, TXGBE_GPIE);
3429 	gpie |= TXGBE_GPIE_MSIX;
3430 	wr32(hw, TXGBE_GPIE, gpie);
3431 
3432 	/* Populate the IVAR table and set the ITR values to the
3433 	 * corresponding register.
3434 	 */
3435 	if (rte_intr_dp_is_en(intr_handle)) {
3436 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
3437 			queue_id++) {
3438 			/* by default, 1:1 mapping */
3439 			txgbe_set_ivar_map(hw, 0, queue_id, vec);
3440 			intr_handle->intr_vec[queue_id] = vec;
3441 			if (vec < base + intr_handle->nb_efd - 1)
3442 				vec++;
3443 		}
3444 
3445 		txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID);
3446 	}
3447 	wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
3448 			TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
3449 			| TXGBE_ITR_WRDSA);
3450 }
3451 
3452 int
txgbe_set_queue_rate_limit(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t tx_rate)3453 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
3454 			   uint16_t queue_idx, uint16_t tx_rate)
3455 {
3456 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3457 	uint32_t bcnrc_val;
3458 
3459 	if (queue_idx >= hw->mac.max_tx_queues)
3460 		return -EINVAL;
3461 
3462 	if (tx_rate != 0) {
3463 		bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
3464 		bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
3465 	} else {
3466 		bcnrc_val = 0;
3467 	}
3468 
3469 	/*
3470 	 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
3471 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
3472 	 */
3473 	wr32(hw, TXGBE_ARBTXMMW, 0x14);
3474 
3475 	/* Set ARBTXRATE of queue X */
3476 	wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
3477 	wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
3478 	txgbe_flush(hw);
3479 
3480 	return 0;
3481 }
3482 
3483 static u8 *
txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw * hw,u8 ** mc_addr_ptr,u32 * vmdq)3484 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
3485 			u8 **mc_addr_ptr, u32 *vmdq)
3486 {
3487 	u8 *mc_addr;
3488 
3489 	*vmdq = 0;
3490 	mc_addr = *mc_addr_ptr;
3491 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
3492 	return mc_addr;
3493 }
3494 
3495 int
txgbe_dev_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)3496 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3497 			  struct rte_ether_addr *mc_addr_set,
3498 			  uint32_t nb_mc_addr)
3499 {
3500 	struct txgbe_hw *hw;
3501 	u8 *mc_addr_list;
3502 
3503 	hw = TXGBE_DEV_HW(dev);
3504 	mc_addr_list = (u8 *)mc_addr_set;
3505 	return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
3506 					 txgbe_dev_addr_list_itr, TRUE);
3507 }
3508 
3509 static uint64_t
txgbe_read_systime_cyclecounter(struct rte_eth_dev * dev)3510 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
3511 {
3512 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3513 	uint64_t systime_cycles;
3514 
3515 	systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
3516 	systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
3517 
3518 	return systime_cycles;
3519 }
3520 
3521 static uint64_t
txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev * dev)3522 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
3523 {
3524 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3525 	uint64_t rx_tstamp_cycles;
3526 
3527 	/* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
3528 	rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
3529 	rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
3530 
3531 	return rx_tstamp_cycles;
3532 }
3533 
3534 static uint64_t
txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev * dev)3535 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
3536 {
3537 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3538 	uint64_t tx_tstamp_cycles;
3539 
3540 	/* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
3541 	tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
3542 	tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
3543 
3544 	return tx_tstamp_cycles;
3545 }
3546 
3547 static void
txgbe_start_timecounters(struct rte_eth_dev * dev)3548 txgbe_start_timecounters(struct rte_eth_dev *dev)
3549 {
3550 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3551 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3552 	struct rte_eth_link link;
3553 	uint32_t incval = 0;
3554 	uint32_t shift = 0;
3555 
3556 	/* Get current link speed. */
3557 	txgbe_dev_link_update(dev, 1);
3558 	rte_eth_linkstatus_get(dev, &link);
3559 
3560 	switch (link.link_speed) {
3561 	case ETH_SPEED_NUM_100M:
3562 		incval = TXGBE_INCVAL_100;
3563 		shift = TXGBE_INCVAL_SHIFT_100;
3564 		break;
3565 	case ETH_SPEED_NUM_1G:
3566 		incval = TXGBE_INCVAL_1GB;
3567 		shift = TXGBE_INCVAL_SHIFT_1GB;
3568 		break;
3569 	case ETH_SPEED_NUM_10G:
3570 	default:
3571 		incval = TXGBE_INCVAL_10GB;
3572 		shift = TXGBE_INCVAL_SHIFT_10GB;
3573 		break;
3574 	}
3575 
3576 	wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
3577 
3578 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
3579 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3580 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
3581 
3582 	adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3583 	adapter->systime_tc.cc_shift = shift;
3584 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
3585 
3586 	adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3587 	adapter->rx_tstamp_tc.cc_shift = shift;
3588 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3589 
3590 	adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
3591 	adapter->tx_tstamp_tc.cc_shift = shift;
3592 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
3593 }
3594 
3595 static int
txgbe_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)3596 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
3597 {
3598 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3599 
3600 	adapter->systime_tc.nsec += delta;
3601 	adapter->rx_tstamp_tc.nsec += delta;
3602 	adapter->tx_tstamp_tc.nsec += delta;
3603 
3604 	return 0;
3605 }
3606 
3607 static int
txgbe_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)3608 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
3609 {
3610 	uint64_t ns;
3611 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3612 
3613 	ns = rte_timespec_to_ns(ts);
3614 	/* Set the timecounters to a new value. */
3615 	adapter->systime_tc.nsec = ns;
3616 	adapter->rx_tstamp_tc.nsec = ns;
3617 	adapter->tx_tstamp_tc.nsec = ns;
3618 
3619 	return 0;
3620 }
3621 
3622 static int
txgbe_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)3623 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
3624 {
3625 	uint64_t ns, systime_cycles;
3626 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3627 
3628 	systime_cycles = txgbe_read_systime_cyclecounter(dev);
3629 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
3630 	*ts = rte_ns_to_timespec(ns);
3631 
3632 	return 0;
3633 }
3634 
3635 static int
txgbe_timesync_enable(struct rte_eth_dev * dev)3636 txgbe_timesync_enable(struct rte_eth_dev *dev)
3637 {
3638 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3639 	uint32_t tsync_ctl;
3640 
3641 	/* Stop the timesync system time. */
3642 	wr32(hw, TXGBE_TSTIMEINC, 0x0);
3643 	/* Reset the timesync system time value. */
3644 	wr32(hw, TXGBE_TSTIMEL, 0x0);
3645 	wr32(hw, TXGBE_TSTIMEH, 0x0);
3646 
3647 	txgbe_start_timecounters(dev);
3648 
3649 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3650 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
3651 		RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
3652 
3653 	/* Enable timestamping of received PTP packets. */
3654 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
3655 	tsync_ctl |= TXGBE_TSRXCTL_ENA;
3656 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
3657 
3658 	/* Enable timestamping of transmitted PTP packets. */
3659 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
3660 	tsync_ctl |= TXGBE_TSTXCTL_ENA;
3661 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
3662 
3663 	txgbe_flush(hw);
3664 
3665 	return 0;
3666 }
3667 
3668 static int
txgbe_timesync_disable(struct rte_eth_dev * dev)3669 txgbe_timesync_disable(struct rte_eth_dev *dev)
3670 {
3671 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3672 	uint32_t tsync_ctl;
3673 
3674 	/* Disable timestamping of transmitted PTP packets. */
3675 	tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
3676 	tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
3677 	wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
3678 
3679 	/* Disable timestamping of received PTP packets. */
3680 	tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
3681 	tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
3682 	wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
3683 
3684 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
3685 	wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
3686 
3687 	/* Stop incrementating the System Time registers. */
3688 	wr32(hw, TXGBE_TSTIMEINC, 0);
3689 
3690 	return 0;
3691 }
3692 
3693 static int
txgbe_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags __rte_unused)3694 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
3695 				 struct timespec *timestamp,
3696 				 uint32_t flags __rte_unused)
3697 {
3698 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3699 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3700 	uint32_t tsync_rxctl;
3701 	uint64_t rx_tstamp_cycles;
3702 	uint64_t ns;
3703 
3704 	tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
3705 	if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
3706 		return -EINVAL;
3707 
3708 	rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
3709 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
3710 	*timestamp = rte_ns_to_timespec(ns);
3711 
3712 	return  0;
3713 }
3714 
3715 static int
txgbe_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)3716 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
3717 				 struct timespec *timestamp)
3718 {
3719 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3720 	struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3721 	uint32_t tsync_txctl;
3722 	uint64_t tx_tstamp_cycles;
3723 	uint64_t ns;
3724 
3725 	tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
3726 	if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
3727 		return -EINVAL;
3728 
3729 	tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
3730 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
3731 	*timestamp = rte_ns_to_timespec(ns);
3732 
3733 	return 0;
3734 }
3735 
3736 static int
txgbe_get_reg_length(struct rte_eth_dev * dev __rte_unused)3737 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
3738 {
3739 	int count = 0;
3740 	int g_ind = 0;
3741 	const struct reg_info *reg_group;
3742 	const struct reg_info **reg_set = txgbe_regs_others;
3743 
3744 	while ((reg_group = reg_set[g_ind++]))
3745 		count += txgbe_regs_group_count(reg_group);
3746 
3747 	return count;
3748 }
3749 
3750 static int
txgbe_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)3751 txgbe_get_regs(struct rte_eth_dev *dev,
3752 	      struct rte_dev_reg_info *regs)
3753 {
3754 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3755 	uint32_t *data = regs->data;
3756 	int g_ind = 0;
3757 	int count = 0;
3758 	const struct reg_info *reg_group;
3759 	const struct reg_info **reg_set = txgbe_regs_others;
3760 
3761 	if (data == NULL) {
3762 		regs->length = txgbe_get_reg_length(dev);
3763 		regs->width = sizeof(uint32_t);
3764 		return 0;
3765 	}
3766 
3767 	/* Support only full register dump */
3768 	if (regs->length == 0 ||
3769 	    regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
3770 		regs->version = hw->mac.type << 24 |
3771 				hw->revision_id << 16 |
3772 				hw->device_id;
3773 		while ((reg_group = reg_set[g_ind++]))
3774 			count += txgbe_read_regs_group(dev, &data[count],
3775 						      reg_group);
3776 		return 0;
3777 	}
3778 
3779 	return -ENOTSUP;
3780 }
3781 
3782 static int
txgbe_get_eeprom_length(struct rte_eth_dev * dev)3783 txgbe_get_eeprom_length(struct rte_eth_dev *dev)
3784 {
3785 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3786 
3787 	/* Return unit is byte count */
3788 	return hw->rom.word_size * 2;
3789 }
3790 
3791 static int
txgbe_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)3792 txgbe_get_eeprom(struct rte_eth_dev *dev,
3793 		struct rte_dev_eeprom_info *in_eeprom)
3794 {
3795 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3796 	struct txgbe_rom_info *eeprom = &hw->rom;
3797 	uint16_t *data = in_eeprom->data;
3798 	int first, length;
3799 
3800 	first = in_eeprom->offset >> 1;
3801 	length = in_eeprom->length >> 1;
3802 	if (first > hw->rom.word_size ||
3803 	    ((first + length) > hw->rom.word_size))
3804 		return -EINVAL;
3805 
3806 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3807 
3808 	return eeprom->readw_buffer(hw, first, length, data);
3809 }
3810 
3811 static int
txgbe_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)3812 txgbe_set_eeprom(struct rte_eth_dev *dev,
3813 		struct rte_dev_eeprom_info *in_eeprom)
3814 {
3815 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3816 	struct txgbe_rom_info *eeprom = &hw->rom;
3817 	uint16_t *data = in_eeprom->data;
3818 	int first, length;
3819 
3820 	first = in_eeprom->offset >> 1;
3821 	length = in_eeprom->length >> 1;
3822 	if (first > hw->rom.word_size ||
3823 	    ((first + length) > hw->rom.word_size))
3824 		return -EINVAL;
3825 
3826 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3827 
3828 	return eeprom->writew_buffer(hw,  first, length, data);
3829 }
3830 
3831 static int
txgbe_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)3832 txgbe_get_module_info(struct rte_eth_dev *dev,
3833 		      struct rte_eth_dev_module_info *modinfo)
3834 {
3835 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3836 	uint32_t status;
3837 	uint8_t sff8472_rev, addr_mode;
3838 	bool page_swap = false;
3839 
3840 	/* Check whether we support SFF-8472 or not */
3841 	status = hw->phy.read_i2c_eeprom(hw,
3842 					     TXGBE_SFF_SFF_8472_COMP,
3843 					     &sff8472_rev);
3844 	if (status != 0)
3845 		return -EIO;
3846 
3847 	/* addressing mode is not supported */
3848 	status = hw->phy.read_i2c_eeprom(hw,
3849 					     TXGBE_SFF_SFF_8472_SWAP,
3850 					     &addr_mode);
3851 	if (status != 0)
3852 		return -EIO;
3853 
3854 	if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
3855 		PMD_DRV_LOG(ERR,
3856 			    "Address change required to access page 0xA2, "
3857 			    "but not supported. Please report the module "
3858 			    "type to the driver maintainers.");
3859 		page_swap = true;
3860 	}
3861 
3862 	if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3863 		/* We have a SFP, but it does not support SFF-8472 */
3864 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
3865 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
3866 	} else {
3867 		/* We have a SFP which supports a revision of SFF-8472. */
3868 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
3869 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
3870 	}
3871 
3872 	return 0;
3873 }
3874 
3875 static int
txgbe_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)3876 txgbe_get_module_eeprom(struct rte_eth_dev *dev,
3877 			struct rte_dev_eeprom_info *info)
3878 {
3879 	struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3880 	uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
3881 	uint8_t databyte = 0xFF;
3882 	uint8_t *data = info->data;
3883 	uint32_t i = 0;
3884 
3885 	if (info->length == 0)
3886 		return -EINVAL;
3887 
3888 	for (i = info->offset; i < info->offset + info->length; i++) {
3889 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
3890 			status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
3891 		else
3892 			status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
3893 
3894 		if (status != 0)
3895 			return -EIO;
3896 
3897 		data[i - info->offset] = databyte;
3898 	}
3899 
3900 	return 0;
3901 }
3902 
3903 bool
txgbe_rss_update_sp(enum txgbe_mac_type mac_type)3904 txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
3905 {
3906 	switch (mac_type) {
3907 	case txgbe_mac_raptor:
3908 		return 1;
3909 	default:
3910 		return 0;
3911 	}
3912 }
3913 
3914 static int
txgbe_dev_get_dcb_info(struct rte_eth_dev * dev,struct rte_eth_dcb_info * dcb_info)3915 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
3916 			struct rte_eth_dcb_info *dcb_info)
3917 {
3918 	struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
3919 	struct txgbe_dcb_tc_config *tc;
3920 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
3921 	uint8_t nb_tcs;
3922 	uint8_t i, j;
3923 
3924 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
3925 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
3926 	else
3927 		dcb_info->nb_tcs = 1;
3928 
3929 	tc_queue = &dcb_info->tc_queue;
3930 	nb_tcs = dcb_info->nb_tcs;
3931 
3932 	if (dcb_config->vt_mode) { /* vt is enabled */
3933 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3934 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3935 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3936 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
3937 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
3938 			for (j = 0; j < nb_tcs; j++) {
3939 				tc_queue->tc_rxq[0][j].base = j;
3940 				tc_queue->tc_rxq[0][j].nb_queue = 1;
3941 				tc_queue->tc_txq[0][j].base = j;
3942 				tc_queue->tc_txq[0][j].nb_queue = 1;
3943 			}
3944 		} else {
3945 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
3946 				for (j = 0; j < nb_tcs; j++) {
3947 					tc_queue->tc_rxq[i][j].base =
3948 						i * nb_tcs + j;
3949 					tc_queue->tc_rxq[i][j].nb_queue = 1;
3950 					tc_queue->tc_txq[i][j].base =
3951 						i * nb_tcs + j;
3952 					tc_queue->tc_txq[i][j].nb_queue = 1;
3953 				}
3954 			}
3955 		}
3956 	} else { /* vt is disabled */
3957 		struct rte_eth_dcb_rx_conf *rx_conf =
3958 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3959 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3960 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
3961 		if (dcb_info->nb_tcs == ETH_4_TCS) {
3962 			for (i = 0; i < dcb_info->nb_tcs; i++) {
3963 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
3964 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
3965 			}
3966 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
3967 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
3968 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
3969 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
3970 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
3971 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
3972 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
3973 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
3974 		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
3975 			for (i = 0; i < dcb_info->nb_tcs; i++) {
3976 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
3977 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
3978 			}
3979 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
3980 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
3981 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
3982 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
3983 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
3984 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
3985 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
3986 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
3987 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
3988 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
3989 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
3990 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
3991 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
3992 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
3993 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
3994 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
3995 		}
3996 	}
3997 	for (i = 0; i < dcb_info->nb_tcs; i++) {
3998 		tc = &dcb_config->tc_config[i];
3999 		dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
4000 	}
4001 	return 0;
4002 }
4003 
4004 static const struct eth_dev_ops txgbe_eth_dev_ops = {
4005 	.dev_configure              = txgbe_dev_configure,
4006 	.dev_infos_get              = txgbe_dev_info_get,
4007 	.dev_start                  = txgbe_dev_start,
4008 	.dev_stop                   = txgbe_dev_stop,
4009 	.dev_set_link_up            = txgbe_dev_set_link_up,
4010 	.dev_set_link_down          = txgbe_dev_set_link_down,
4011 	.dev_close                  = txgbe_dev_close,
4012 	.dev_reset                  = txgbe_dev_reset,
4013 	.promiscuous_enable         = txgbe_dev_promiscuous_enable,
4014 	.promiscuous_disable        = txgbe_dev_promiscuous_disable,
4015 	.allmulticast_enable        = txgbe_dev_allmulticast_enable,
4016 	.allmulticast_disable       = txgbe_dev_allmulticast_disable,
4017 	.link_update                = txgbe_dev_link_update,
4018 	.stats_get                  = txgbe_dev_stats_get,
4019 	.xstats_get                 = txgbe_dev_xstats_get,
4020 	.xstats_get_by_id           = txgbe_dev_xstats_get_by_id,
4021 	.stats_reset                = txgbe_dev_stats_reset,
4022 	.xstats_reset               = txgbe_dev_xstats_reset,
4023 	.xstats_get_names           = txgbe_dev_xstats_get_names,
4024 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
4025 	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
4026 	.fw_version_get             = txgbe_fw_version_get,
4027 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
4028 	.mtu_set                    = txgbe_dev_mtu_set,
4029 	.vlan_filter_set            = txgbe_vlan_filter_set,
4030 	.vlan_tpid_set              = txgbe_vlan_tpid_set,
4031 	.vlan_offload_set           = txgbe_vlan_offload_set,
4032 	.vlan_strip_queue_set       = txgbe_vlan_strip_queue_set,
4033 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
4034 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
4035 	.tx_queue_start	            = txgbe_dev_tx_queue_start,
4036 	.tx_queue_stop              = txgbe_dev_tx_queue_stop,
4037 	.rx_queue_setup             = txgbe_dev_rx_queue_setup,
4038 	.rx_queue_intr_enable       = txgbe_dev_rx_queue_intr_enable,
4039 	.rx_queue_intr_disable      = txgbe_dev_rx_queue_intr_disable,
4040 	.rx_queue_release           = txgbe_dev_rx_queue_release,
4041 	.tx_queue_setup             = txgbe_dev_tx_queue_setup,
4042 	.tx_queue_release           = txgbe_dev_tx_queue_release,
4043 	.dev_led_on                 = txgbe_dev_led_on,
4044 	.dev_led_off                = txgbe_dev_led_off,
4045 	.flow_ctrl_get              = txgbe_flow_ctrl_get,
4046 	.flow_ctrl_set              = txgbe_flow_ctrl_set,
4047 	.priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
4048 	.mac_addr_add               = txgbe_add_rar,
4049 	.mac_addr_remove            = txgbe_remove_rar,
4050 	.mac_addr_set               = txgbe_set_default_mac_addr,
4051 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
4052 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
4053 	.set_queue_rate_limit       = txgbe_set_queue_rate_limit,
4054 	.reta_update                = txgbe_dev_rss_reta_update,
4055 	.reta_query                 = txgbe_dev_rss_reta_query,
4056 	.rss_hash_update            = txgbe_dev_rss_hash_update,
4057 	.rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
4058 	.set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
4059 	.rxq_info_get               = txgbe_rxq_info_get,
4060 	.txq_info_get               = txgbe_txq_info_get,
4061 	.timesync_enable            = txgbe_timesync_enable,
4062 	.timesync_disable           = txgbe_timesync_disable,
4063 	.timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
4064 	.timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
4065 	.get_reg                    = txgbe_get_regs,
4066 	.get_eeprom_length          = txgbe_get_eeprom_length,
4067 	.get_eeprom                 = txgbe_get_eeprom,
4068 	.set_eeprom                 = txgbe_set_eeprom,
4069 	.get_module_info            = txgbe_get_module_info,
4070 	.get_module_eeprom          = txgbe_get_module_eeprom,
4071 	.get_dcb_info               = txgbe_dev_get_dcb_info,
4072 	.timesync_adjust_time       = txgbe_timesync_adjust_time,
4073 	.timesync_read_time         = txgbe_timesync_read_time,
4074 	.timesync_write_time        = txgbe_timesync_write_time,
4075 	.tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
4076 };
4077 
4078 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
4079 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
4080 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
4081 
4082 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
4083 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
4084 
4085 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
4086 	RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
4087 #endif
4088 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
4089 	RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
4090 #endif
4091 
4092 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
4093 	RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
4094 #endif
4095