xref: /f-stack/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2010-2017 Intel Corporation
3a9643ea8Slogwang  */
4a9643ea8Slogwang 
5a9643ea8Slogwang #include <sys/queue.h>
6a9643ea8Slogwang #include <stdio.h>
7a9643ea8Slogwang #include <errno.h>
8a9643ea8Slogwang #include <stdint.h>
9a9643ea8Slogwang #include <string.h>
10a9643ea8Slogwang #include <unistd.h>
11a9643ea8Slogwang #include <stdarg.h>
12a9643ea8Slogwang #include <inttypes.h>
13a9643ea8Slogwang #include <netinet/in.h>
144418919fSjohnjiang #include <rte_string_fns.h>
15a9643ea8Slogwang #include <rte_byteorder.h>
16a9643ea8Slogwang #include <rte_common.h>
17a9643ea8Slogwang #include <rte_cycles.h>
18a9643ea8Slogwang 
19a9643ea8Slogwang #include <rte_interrupts.h>
20a9643ea8Slogwang #include <rte_log.h>
21a9643ea8Slogwang #include <rte_debug.h>
22a9643ea8Slogwang #include <rte_pci.h>
232bfe3f2eSlogwang #include <rte_bus_pci.h>
24a9643ea8Slogwang #include <rte_branch_prediction.h>
25a9643ea8Slogwang #include <rte_memory.h>
264b05018fSfengbojiang #include <rte_kvargs.h>
27a9643ea8Slogwang #include <rte_eal.h>
28a9643ea8Slogwang #include <rte_alarm.h>
29a9643ea8Slogwang #include <rte_ether.h>
30d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
312bfe3f2eSlogwang #include <rte_ethdev_pci.h>
32a9643ea8Slogwang #include <rte_malloc.h>
33a9643ea8Slogwang #include <rte_random.h>
34a9643ea8Slogwang #include <rte_dev.h>
352bfe3f2eSlogwang #include <rte_hash_crc.h>
36*2d9fd380Sjfb8856606 #ifdef RTE_LIB_SECURITY
372bfe3f2eSlogwang #include <rte_security_driver.h>
382bfe3f2eSlogwang #endif
39a9643ea8Slogwang 
40a9643ea8Slogwang #include "ixgbe_logs.h"
41a9643ea8Slogwang #include "base/ixgbe_api.h"
42a9643ea8Slogwang #include "base/ixgbe_vf.h"
43a9643ea8Slogwang #include "base/ixgbe_common.h"
44a9643ea8Slogwang #include "ixgbe_ethdev.h"
45a9643ea8Slogwang #include "ixgbe_bypass.h"
46a9643ea8Slogwang #include "ixgbe_rxtx.h"
47a9643ea8Slogwang #include "base/ixgbe_type.h"
48a9643ea8Slogwang #include "base/ixgbe_phy.h"
49a9643ea8Slogwang #include "ixgbe_regs.h"
50a9643ea8Slogwang 
51a9643ea8Slogwang /*
52a9643ea8Slogwang  * High threshold controlling when to start sending XOFF frames. Must be at
53a9643ea8Slogwang  * least 8 bytes less than receive packet buffer size. This value is in units
54a9643ea8Slogwang  * of 1024 bytes.
55a9643ea8Slogwang  */
56a9643ea8Slogwang #define IXGBE_FC_HI    0x80
57a9643ea8Slogwang 
58a9643ea8Slogwang /*
59a9643ea8Slogwang  * Low threshold controlling when to start sending XON frames. This value is
60a9643ea8Slogwang  * in units of 1024 bytes.
61a9643ea8Slogwang  */
62a9643ea8Slogwang #define IXGBE_FC_LO    0x40
63a9643ea8Slogwang 
64a9643ea8Slogwang /* Timer value included in XOFF frames. */
65a9643ea8Slogwang #define IXGBE_FC_PAUSE 0x680
66a9643ea8Slogwang 
672bfe3f2eSlogwang /*Default value of Max Rx Queue*/
682bfe3f2eSlogwang #define IXGBE_MAX_RX_QUEUE_NUM 128
692bfe3f2eSlogwang 
70a9643ea8Slogwang #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
71a9643ea8Slogwang #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
72a9643ea8Slogwang #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
73a9643ea8Slogwang 
74a9643ea8Slogwang #define IXGBE_MMW_SIZE_DEFAULT        0x4
75a9643ea8Slogwang #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
76a9643ea8Slogwang #define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
77a9643ea8Slogwang 
78a9643ea8Slogwang /*
79a9643ea8Slogwang  *  Default values for RX/TX configuration
80a9643ea8Slogwang  */
81a9643ea8Slogwang #define IXGBE_DEFAULT_RX_FREE_THRESH  32
82a9643ea8Slogwang #define IXGBE_DEFAULT_RX_PTHRESH      8
83a9643ea8Slogwang #define IXGBE_DEFAULT_RX_HTHRESH      8
84a9643ea8Slogwang #define IXGBE_DEFAULT_RX_WTHRESH      0
85a9643ea8Slogwang 
86a9643ea8Slogwang #define IXGBE_DEFAULT_TX_FREE_THRESH  32
87a9643ea8Slogwang #define IXGBE_DEFAULT_TX_PTHRESH      32
88a9643ea8Slogwang #define IXGBE_DEFAULT_TX_HTHRESH      0
89a9643ea8Slogwang #define IXGBE_DEFAULT_TX_WTHRESH      0
90a9643ea8Slogwang #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
91a9643ea8Slogwang 
92a9643ea8Slogwang /* Bit shift and mask */
93a9643ea8Slogwang #define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
94a9643ea8Slogwang #define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
95a9643ea8Slogwang #define IXGBE_8_BIT_WIDTH  CHAR_BIT
96a9643ea8Slogwang #define IXGBE_8_BIT_MASK   UINT8_MAX
97a9643ea8Slogwang 
98a9643ea8Slogwang #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
99a9643ea8Slogwang 
100a9643ea8Slogwang #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
101a9643ea8Slogwang 
102a9643ea8Slogwang /* Additional timesync values. */
103a9643ea8Slogwang #define NSEC_PER_SEC             1000000000L
104a9643ea8Slogwang #define IXGBE_INCVAL_10GB        0x66666666
105a9643ea8Slogwang #define IXGBE_INCVAL_1GB         0x40000000
106a9643ea8Slogwang #define IXGBE_INCVAL_100         0x50000000
107a9643ea8Slogwang #define IXGBE_INCVAL_SHIFT_10GB  28
108a9643ea8Slogwang #define IXGBE_INCVAL_SHIFT_1GB   24
109a9643ea8Slogwang #define IXGBE_INCVAL_SHIFT_100   21
110a9643ea8Slogwang #define IXGBE_INCVAL_SHIFT_82599 7
111a9643ea8Slogwang #define IXGBE_INCPER_SHIFT_82599 24
112a9643ea8Slogwang 
113a9643ea8Slogwang #define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
114a9643ea8Slogwang 
115a9643ea8Slogwang #define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
116a9643ea8Slogwang #define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
117a9643ea8Slogwang #define IXGBE_ETAG_ETYPE                       0x00005084
118a9643ea8Slogwang #define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
119a9643ea8Slogwang #define IXGBE_ETAG_ETYPE_VALID                 0x80000000
120a9643ea8Slogwang #define IXGBE_RAH_ADTYPE                       0x40000000
121a9643ea8Slogwang #define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
122a9643ea8Slogwang #define IXGBE_VMVIR_TAGA_MASK                  0x18000000
123a9643ea8Slogwang #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
124a9643ea8Slogwang #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
125a9643ea8Slogwang #define IXGBE_QDE_STRIP_TAG                    0x00000004
126a9643ea8Slogwang #define IXGBE_VTEICR_MASK                      0x07
127a9643ea8Slogwang 
128a9643ea8Slogwang #define IXGBE_EXVET_VET_EXT_SHIFT              16
129a9643ea8Slogwang #define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
130a9643ea8Slogwang 
1314b05018fSfengbojiang #define IXGBEVF_DEVARG_PFLINK_FULLCHK		"pflink_fullchk"
1324b05018fSfengbojiang 
1334b05018fSfengbojiang static const char * const ixgbevf_valid_arguments[] = {
1344b05018fSfengbojiang 	IXGBEVF_DEVARG_PFLINK_FULLCHK,
1354b05018fSfengbojiang 	NULL
1364b05018fSfengbojiang };
1374b05018fSfengbojiang 
138d30ea906Sjfb8856606 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
139a9643ea8Slogwang static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
1402bfe3f2eSlogwang static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
1412bfe3f2eSlogwang static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
1422bfe3f2eSlogwang static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
1432bfe3f2eSlogwang static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
1442bfe3f2eSlogwang static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
145a9643ea8Slogwang static int ixgbe_dev_configure(struct rte_eth_dev *dev);
146a9643ea8Slogwang static int ixgbe_dev_start(struct rte_eth_dev *dev);
147*2d9fd380Sjfb8856606 static int ixgbe_dev_stop(struct rte_eth_dev *dev);
148a9643ea8Slogwang static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
149a9643ea8Slogwang static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
150*2d9fd380Sjfb8856606 static int ixgbe_dev_close(struct rte_eth_dev *dev);
1512bfe3f2eSlogwang static int ixgbe_dev_reset(struct rte_eth_dev *dev);
1524418919fSjohnjiang static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
1534418919fSjohnjiang static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
1544418919fSjohnjiang static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
1554418919fSjohnjiang static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
156a9643ea8Slogwang static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
157a9643ea8Slogwang 				int wait_to_complete);
1582bfe3f2eSlogwang static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
159a9643ea8Slogwang 				struct rte_eth_stats *stats);
160a9643ea8Slogwang static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
161a9643ea8Slogwang 				struct rte_eth_xstat *xstats, unsigned n);
162a9643ea8Slogwang static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
163a9643ea8Slogwang 				  struct rte_eth_xstat *xstats, unsigned n);
1642bfe3f2eSlogwang static int
1652bfe3f2eSlogwang ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1662bfe3f2eSlogwang 		uint64_t *values, unsigned int n);
1674418919fSjohnjiang static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
1684418919fSjohnjiang static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
1692bfe3f2eSlogwang static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
1702bfe3f2eSlogwang 	struct rte_eth_xstat_name *xstats_names,
1712bfe3f2eSlogwang 	unsigned int size);
1722bfe3f2eSlogwang static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
1732bfe3f2eSlogwang 	struct rte_eth_xstat_name *xstats_names, unsigned limit);
1742bfe3f2eSlogwang static int ixgbe_dev_xstats_get_names_by_id(
1752bfe3f2eSlogwang 	struct rte_eth_dev *dev,
1762bfe3f2eSlogwang 	struct rte_eth_xstat_name *xstats_names,
1772bfe3f2eSlogwang 	const uint64_t *ids,
1782bfe3f2eSlogwang 	unsigned int limit);
179a9643ea8Slogwang static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
180a9643ea8Slogwang 					     uint16_t queue_id,
181a9643ea8Slogwang 					     uint8_t stat_idx,
182a9643ea8Slogwang 					     uint8_t is_rx);
1832bfe3f2eSlogwang static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1842bfe3f2eSlogwang 				 size_t fw_size);
1854418919fSjohnjiang static int ixgbe_dev_info_get(struct rte_eth_dev *dev,
186a9643ea8Slogwang 			      struct rte_eth_dev_info *dev_info);
187a9643ea8Slogwang static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
1884418919fSjohnjiang static int ixgbevf_dev_info_get(struct rte_eth_dev *dev,
189a9643ea8Slogwang 				struct rte_eth_dev_info *dev_info);
190a9643ea8Slogwang static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
191a9643ea8Slogwang 
192a9643ea8Slogwang static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
193a9643ea8Slogwang 		uint16_t vlan_id, int on);
194a9643ea8Slogwang static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
195a9643ea8Slogwang 			       enum rte_vlan_type vlan_type,
196a9643ea8Slogwang 			       uint16_t tpid_id);
197a9643ea8Slogwang static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
198a9643ea8Slogwang 		uint16_t queue, bool on);
199a9643ea8Slogwang static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
200a9643ea8Slogwang 		int on);
201d30ea906Sjfb8856606 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
202d30ea906Sjfb8856606 						  int mask);
203d30ea906Sjfb8856606 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
2042bfe3f2eSlogwang static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
205a9643ea8Slogwang static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
206a9643ea8Slogwang static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
207a9643ea8Slogwang static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
208a9643ea8Slogwang static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
209a9643ea8Slogwang 
210a9643ea8Slogwang static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
211a9643ea8Slogwang static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
212a9643ea8Slogwang static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
213a9643ea8Slogwang 			       struct rte_eth_fc_conf *fc_conf);
214a9643ea8Slogwang static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
215a9643ea8Slogwang 			       struct rte_eth_fc_conf *fc_conf);
216a9643ea8Slogwang static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
217a9643ea8Slogwang 		struct rte_eth_pfc_conf *pfc_conf);
218a9643ea8Slogwang static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
219a9643ea8Slogwang 			struct rte_eth_rss_reta_entry64 *reta_conf,
220a9643ea8Slogwang 			uint16_t reta_size);
221a9643ea8Slogwang static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
222a9643ea8Slogwang 			struct rte_eth_rss_reta_entry64 *reta_conf,
223a9643ea8Slogwang 			uint16_t reta_size);
224a9643ea8Slogwang static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
2252bfe3f2eSlogwang static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
2262bfe3f2eSlogwang static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
227a9643ea8Slogwang static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
228a9643ea8Slogwang static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
229d30ea906Sjfb8856606 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
2302bfe3f2eSlogwang static void ixgbe_dev_interrupt_handler(void *param);
231a9643ea8Slogwang static void ixgbe_dev_interrupt_delayed_handler(void *param);
2324418919fSjohnjiang static void *ixgbe_dev_setup_link_thread_handler(void *param);
2330c6bd470Sfengbojiang static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev,
2340c6bd470Sfengbojiang 					      uint32_t timeout_ms);
235d30ea906Sjfb8856606 
2364418919fSjohnjiang static int ixgbe_add_rar(struct rte_eth_dev *dev,
2374418919fSjohnjiang 			struct rte_ether_addr *mac_addr,
238a9643ea8Slogwang 			uint32_t index, uint32_t pool);
239a9643ea8Slogwang static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
240d30ea906Sjfb8856606 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
2414418919fSjohnjiang 					   struct rte_ether_addr *mac_addr);
242a9643ea8Slogwang static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
2432bfe3f2eSlogwang static bool is_device_supported(struct rte_eth_dev *dev,
2442bfe3f2eSlogwang 				struct rte_pci_driver *drv);
245a9643ea8Slogwang 
246a9643ea8Slogwang /* For Virtual Function support */
247a9643ea8Slogwang static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
248a9643ea8Slogwang static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
249a9643ea8Slogwang static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
250a9643ea8Slogwang static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
2512bfe3f2eSlogwang static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
2522bfe3f2eSlogwang 				   int wait_to_complete);
253*2d9fd380Sjfb8856606 static int ixgbevf_dev_stop(struct rte_eth_dev *dev);
254*2d9fd380Sjfb8856606 static int ixgbevf_dev_close(struct rte_eth_dev *dev);
2552bfe3f2eSlogwang static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
256d30ea906Sjfb8856606 static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
257d30ea906Sjfb8856606 static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
2582bfe3f2eSlogwang static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
259a9643ea8Slogwang 		struct rte_eth_stats *stats);
2604418919fSjohnjiang static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
261a9643ea8Slogwang static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
262a9643ea8Slogwang 		uint16_t vlan_id, int on);
263a9643ea8Slogwang static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
264a9643ea8Slogwang 		uint16_t queue, int on);
265d30ea906Sjfb8856606 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
2662bfe3f2eSlogwang static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
267a9643ea8Slogwang static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
268a9643ea8Slogwang static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
269a9643ea8Slogwang 					    uint16_t queue_id);
270a9643ea8Slogwang static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
271a9643ea8Slogwang 					     uint16_t queue_id);
272a9643ea8Slogwang static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
273a9643ea8Slogwang 				 uint8_t queue, uint8_t msix_vector);
274a9643ea8Slogwang static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
2754418919fSjohnjiang static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev);
2764418919fSjohnjiang static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev);
2774418919fSjohnjiang static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
2784418919fSjohnjiang static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
279a9643ea8Slogwang 
280a9643ea8Slogwang /* For Eth VMDQ APIs support */
281a9643ea8Slogwang static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
2824418919fSjohnjiang 		rte_ether_addr * mac_addr, uint8_t on);
283a9643ea8Slogwang static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
284a9643ea8Slogwang static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
285a9643ea8Slogwang 		struct rte_eth_mirror_conf *mirror_conf,
286a9643ea8Slogwang 		uint8_t rule_id, uint8_t on);
287a9643ea8Slogwang static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
288a9643ea8Slogwang 		uint8_t	rule_id);
289a9643ea8Slogwang static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
290a9643ea8Slogwang 					  uint16_t queue_id);
291a9643ea8Slogwang static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
292a9643ea8Slogwang 					   uint16_t queue_id);
293a9643ea8Slogwang static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
294a9643ea8Slogwang 			       uint8_t queue, uint8_t msix_vector);
295a9643ea8Slogwang static void ixgbe_configure_msix(struct rte_eth_dev *dev);
296a9643ea8Slogwang 
2972bfe3f2eSlogwang static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
2984418919fSjohnjiang 				struct rte_ether_addr *mac_addr,
299a9643ea8Slogwang 				uint32_t index, uint32_t pool);
300a9643ea8Slogwang static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
301d30ea906Sjfb8856606 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
3024418919fSjohnjiang 					     struct rte_ether_addr *mac_addr);
303a9643ea8Slogwang static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
304a9643ea8Slogwang 			struct ixgbe_5tuple_filter *filter);
305a9643ea8Slogwang static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
306a9643ea8Slogwang 			struct ixgbe_5tuple_filter *filter);
307a9643ea8Slogwang static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
308a9643ea8Slogwang 		     enum rte_filter_type filter_type,
309a9643ea8Slogwang 		     enum rte_filter_op filter_op,
310a9643ea8Slogwang 		     void *arg);
311a9643ea8Slogwang static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
312a9643ea8Slogwang 
313a9643ea8Slogwang static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
3144418919fSjohnjiang 				      struct rte_ether_addr *mc_addr_set,
315a9643ea8Slogwang 				      uint32_t nb_mc_addr);
316a9643ea8Slogwang static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
317a9643ea8Slogwang 				   struct rte_eth_dcb_info *dcb_info);
318a9643ea8Slogwang 
319a9643ea8Slogwang static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
320a9643ea8Slogwang static int ixgbe_get_regs(struct rte_eth_dev *dev,
321a9643ea8Slogwang 			    struct rte_dev_reg_info *regs);
322a9643ea8Slogwang static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
323a9643ea8Slogwang static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
324a9643ea8Slogwang 				struct rte_dev_eeprom_info *eeprom);
325a9643ea8Slogwang static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
326a9643ea8Slogwang 				struct rte_dev_eeprom_info *eeprom);
327a9643ea8Slogwang 
328d30ea906Sjfb8856606 static int ixgbe_get_module_info(struct rte_eth_dev *dev,
329d30ea906Sjfb8856606 				 struct rte_eth_dev_module_info *modinfo);
330d30ea906Sjfb8856606 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
331d30ea906Sjfb8856606 				   struct rte_dev_eeprom_info *info);
332d30ea906Sjfb8856606 
333a9643ea8Slogwang static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
334a9643ea8Slogwang static int ixgbevf_get_regs(struct rte_eth_dev *dev,
335a9643ea8Slogwang 				struct rte_dev_reg_info *regs);
336a9643ea8Slogwang 
337a9643ea8Slogwang static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
338a9643ea8Slogwang static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
339a9643ea8Slogwang static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
340a9643ea8Slogwang 					    struct timespec *timestamp,
341a9643ea8Slogwang 					    uint32_t flags);
342a9643ea8Slogwang static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
343a9643ea8Slogwang 					    struct timespec *timestamp);
344a9643ea8Slogwang static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
345a9643ea8Slogwang static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
346a9643ea8Slogwang 				   struct timespec *timestamp);
347a9643ea8Slogwang static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
348a9643ea8Slogwang 				   const struct timespec *timestamp);
3492bfe3f2eSlogwang static void ixgbevf_dev_interrupt_handler(void *param);
350a9643ea8Slogwang 
351a9643ea8Slogwang static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
352a9643ea8Slogwang 					 struct rte_eth_udp_tunnel *udp_tunnel);
353a9643ea8Slogwang static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
354a9643ea8Slogwang 					 struct rte_eth_udp_tunnel *udp_tunnel);
3552bfe3f2eSlogwang static int ixgbe_filter_restore(struct rte_eth_dev *dev);
3562bfe3f2eSlogwang static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
3574418919fSjohnjiang static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw);
358a9643ea8Slogwang 
359a9643ea8Slogwang /*
360a9643ea8Slogwang  * Define VF Stats MACRO for Non "cleared on read" register
361a9643ea8Slogwang  */
362a9643ea8Slogwang #define UPDATE_VF_STAT(reg, last, cur)                          \
363a9643ea8Slogwang {                                                               \
364a9643ea8Slogwang 	uint32_t latest = IXGBE_READ_REG(hw, reg);              \
365a9643ea8Slogwang 	cur += (latest - last) & UINT_MAX;                      \
366a9643ea8Slogwang 	last = latest;                                          \
367a9643ea8Slogwang }
368a9643ea8Slogwang 
369a9643ea8Slogwang #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
370a9643ea8Slogwang {                                                                \
371a9643ea8Slogwang 	u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
372a9643ea8Slogwang 	u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
373a9643ea8Slogwang 	u64 latest = ((new_msb << 32) | new_lsb);                \
374a9643ea8Slogwang 	cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
375a9643ea8Slogwang 	last = latest;                                           \
376a9643ea8Slogwang }
377a9643ea8Slogwang 
378a9643ea8Slogwang #define IXGBE_SET_HWSTRIP(h, q) do {\
379a9643ea8Slogwang 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
380a9643ea8Slogwang 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
381a9643ea8Slogwang 		(h)->bitmap[idx] |= 1 << bit;\
382a9643ea8Slogwang 	} while (0)
383a9643ea8Slogwang 
384a9643ea8Slogwang #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
385a9643ea8Slogwang 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
386a9643ea8Slogwang 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
387a9643ea8Slogwang 		(h)->bitmap[idx] &= ~(1 << bit);\
388a9643ea8Slogwang 	} while (0)
389a9643ea8Slogwang 
390a9643ea8Slogwang #define IXGBE_GET_HWSTRIP(h, q, r) do {\
391a9643ea8Slogwang 		uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
392a9643ea8Slogwang 		uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
393a9643ea8Slogwang 		(r) = (h)->bitmap[idx] >> bit & 1;\
394a9643ea8Slogwang 	} while (0)
395a9643ea8Slogwang 
396a9643ea8Slogwang /*
397a9643ea8Slogwang  * The set of PCI devices this driver supports
398a9643ea8Slogwang  */
399a9643ea8Slogwang static const struct rte_pci_id pci_id_ixgbe_map[] = {
4002bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
4012bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
4022bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
4032bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
4042bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
4052bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
4062bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
4072bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
4082bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
4092bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
4102bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
4112bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
4122bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
4132bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
4142bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
4152bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
4162bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
4172bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
4182bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
4192bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
4202bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
4212bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
4222bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
4232bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
4242bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
4252bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
4262bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
4272bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
4282bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
4292bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
4302bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
4312bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
4322bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
4332bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
4342bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
4352bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
4362bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
4372bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
4382bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
4392bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
4402bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
4412bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
4422bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
4432bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
4442bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
4452bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
4462bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
4474418919fSjohnjiang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) },
4482bfe3f2eSlogwang #ifdef RTE_LIBRTE_IXGBE_BYPASS
4492bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
4502bfe3f2eSlogwang #endif
451a9643ea8Slogwang 	{ .vendor_id = 0, /* sentinel */ },
452a9643ea8Slogwang };
453a9643ea8Slogwang 
454a9643ea8Slogwang /*
455a9643ea8Slogwang  * The set of PCI devices this driver supports (for 82599 VF)
456a9643ea8Slogwang  */
457a9643ea8Slogwang static const struct rte_pci_id pci_id_ixgbevf_map[] = {
4582bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
4592bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
4602bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
4612bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
4622bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
4632bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
4642bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
4652bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
4662bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
4672bfe3f2eSlogwang 	{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
468a9643ea8Slogwang 	{ .vendor_id = 0, /* sentinel */ },
469a9643ea8Slogwang };
470a9643ea8Slogwang 
471a9643ea8Slogwang static const struct rte_eth_desc_lim rx_desc_lim = {
472a9643ea8Slogwang 	.nb_max = IXGBE_MAX_RING_DESC,
473a9643ea8Slogwang 	.nb_min = IXGBE_MIN_RING_DESC,
474a9643ea8Slogwang 	.nb_align = IXGBE_RXD_ALIGN,
475a9643ea8Slogwang };
476a9643ea8Slogwang 
477a9643ea8Slogwang static const struct rte_eth_desc_lim tx_desc_lim = {
478a9643ea8Slogwang 	.nb_max = IXGBE_MAX_RING_DESC,
479a9643ea8Slogwang 	.nb_min = IXGBE_MIN_RING_DESC,
480a9643ea8Slogwang 	.nb_align = IXGBE_TXD_ALIGN,
4812bfe3f2eSlogwang 	.nb_seg_max = IXGBE_TX_MAX_SEG,
4822bfe3f2eSlogwang 	.nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
483a9643ea8Slogwang };
484a9643ea8Slogwang 
485a9643ea8Slogwang static const struct eth_dev_ops ixgbe_eth_dev_ops = {
486a9643ea8Slogwang 	.dev_configure        = ixgbe_dev_configure,
487a9643ea8Slogwang 	.dev_start            = ixgbe_dev_start,
488a9643ea8Slogwang 	.dev_stop             = ixgbe_dev_stop,
489a9643ea8Slogwang 	.dev_set_link_up    = ixgbe_dev_set_link_up,
490a9643ea8Slogwang 	.dev_set_link_down  = ixgbe_dev_set_link_down,
491a9643ea8Slogwang 	.dev_close            = ixgbe_dev_close,
4922bfe3f2eSlogwang 	.dev_reset	      = ixgbe_dev_reset,
493a9643ea8Slogwang 	.promiscuous_enable   = ixgbe_dev_promiscuous_enable,
494a9643ea8Slogwang 	.promiscuous_disable  = ixgbe_dev_promiscuous_disable,
495a9643ea8Slogwang 	.allmulticast_enable  = ixgbe_dev_allmulticast_enable,
496a9643ea8Slogwang 	.allmulticast_disable = ixgbe_dev_allmulticast_disable,
497a9643ea8Slogwang 	.link_update          = ixgbe_dev_link_update,
498a9643ea8Slogwang 	.stats_get            = ixgbe_dev_stats_get,
499a9643ea8Slogwang 	.xstats_get           = ixgbe_dev_xstats_get,
5002bfe3f2eSlogwang 	.xstats_get_by_id     = ixgbe_dev_xstats_get_by_id,
501a9643ea8Slogwang 	.stats_reset          = ixgbe_dev_stats_reset,
502a9643ea8Slogwang 	.xstats_reset         = ixgbe_dev_xstats_reset,
503a9643ea8Slogwang 	.xstats_get_names     = ixgbe_dev_xstats_get_names,
5042bfe3f2eSlogwang 	.xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
505a9643ea8Slogwang 	.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
5062bfe3f2eSlogwang 	.fw_version_get       = ixgbe_fw_version_get,
507a9643ea8Slogwang 	.dev_infos_get        = ixgbe_dev_info_get,
508a9643ea8Slogwang 	.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
509a9643ea8Slogwang 	.mtu_set              = ixgbe_dev_mtu_set,
510a9643ea8Slogwang 	.vlan_filter_set      = ixgbe_vlan_filter_set,
511a9643ea8Slogwang 	.vlan_tpid_set        = ixgbe_vlan_tpid_set,
512a9643ea8Slogwang 	.vlan_offload_set     = ixgbe_vlan_offload_set,
513a9643ea8Slogwang 	.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
514a9643ea8Slogwang 	.rx_queue_start	      = ixgbe_dev_rx_queue_start,
515a9643ea8Slogwang 	.rx_queue_stop        = ixgbe_dev_rx_queue_stop,
516a9643ea8Slogwang 	.tx_queue_start	      = ixgbe_dev_tx_queue_start,
517a9643ea8Slogwang 	.tx_queue_stop        = ixgbe_dev_tx_queue_stop,
518a9643ea8Slogwang 	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
519a9643ea8Slogwang 	.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
520a9643ea8Slogwang 	.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
521a9643ea8Slogwang 	.rx_queue_release     = ixgbe_dev_rx_queue_release,
522a9643ea8Slogwang 	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
523a9643ea8Slogwang 	.tx_queue_release     = ixgbe_dev_tx_queue_release,
524a9643ea8Slogwang 	.dev_led_on           = ixgbe_dev_led_on,
525a9643ea8Slogwang 	.dev_led_off          = ixgbe_dev_led_off,
526a9643ea8Slogwang 	.flow_ctrl_get        = ixgbe_flow_ctrl_get,
527a9643ea8Slogwang 	.flow_ctrl_set        = ixgbe_flow_ctrl_set,
528a9643ea8Slogwang 	.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
529a9643ea8Slogwang 	.mac_addr_add         = ixgbe_add_rar,
530a9643ea8Slogwang 	.mac_addr_remove      = ixgbe_remove_rar,
531a9643ea8Slogwang 	.mac_addr_set         = ixgbe_set_default_mac_addr,
532a9643ea8Slogwang 	.uc_hash_table_set    = ixgbe_uc_hash_table_set,
533a9643ea8Slogwang 	.uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
534a9643ea8Slogwang 	.mirror_rule_set      = ixgbe_mirror_rule_set,
535a9643ea8Slogwang 	.mirror_rule_reset    = ixgbe_mirror_rule_reset,
536a9643ea8Slogwang 	.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
537a9643ea8Slogwang 	.reta_update          = ixgbe_dev_rss_reta_update,
538a9643ea8Slogwang 	.reta_query           = ixgbe_dev_rss_reta_query,
539a9643ea8Slogwang 	.rss_hash_update      = ixgbe_dev_rss_hash_update,
540a9643ea8Slogwang 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
541a9643ea8Slogwang 	.filter_ctrl          = ixgbe_dev_filter_ctrl,
542a9643ea8Slogwang 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
543a9643ea8Slogwang 	.rxq_info_get         = ixgbe_rxq_info_get,
544a9643ea8Slogwang 	.txq_info_get         = ixgbe_txq_info_get,
545a9643ea8Slogwang 	.timesync_enable      = ixgbe_timesync_enable,
546a9643ea8Slogwang 	.timesync_disable     = ixgbe_timesync_disable,
547a9643ea8Slogwang 	.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
548a9643ea8Slogwang 	.timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
549a9643ea8Slogwang 	.get_reg              = ixgbe_get_regs,
550a9643ea8Slogwang 	.get_eeprom_length    = ixgbe_get_eeprom_length,
551a9643ea8Slogwang 	.get_eeprom           = ixgbe_get_eeprom,
552a9643ea8Slogwang 	.set_eeprom           = ixgbe_set_eeprom,
553d30ea906Sjfb8856606 	.get_module_info      = ixgbe_get_module_info,
554d30ea906Sjfb8856606 	.get_module_eeprom    = ixgbe_get_module_eeprom,
555a9643ea8Slogwang 	.get_dcb_info         = ixgbe_dev_get_dcb_info,
556a9643ea8Slogwang 	.timesync_adjust_time = ixgbe_timesync_adjust_time,
557a9643ea8Slogwang 	.timesync_read_time   = ixgbe_timesync_read_time,
558a9643ea8Slogwang 	.timesync_write_time  = ixgbe_timesync_write_time,
559a9643ea8Slogwang 	.udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
560a9643ea8Slogwang 	.udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
5612bfe3f2eSlogwang 	.tm_ops_get           = ixgbe_tm_ops_get,
562*2d9fd380Sjfb8856606 	.tx_done_cleanup      = ixgbe_dev_tx_done_cleanup,
563a9643ea8Slogwang };
564a9643ea8Slogwang 
565a9643ea8Slogwang /*
566a9643ea8Slogwang  * dev_ops for virtual function, bare necessities for basic vf
567a9643ea8Slogwang  * operation have been implemented
568a9643ea8Slogwang  */
569a9643ea8Slogwang static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
570a9643ea8Slogwang 	.dev_configure        = ixgbevf_dev_configure,
571a9643ea8Slogwang 	.dev_start            = ixgbevf_dev_start,
572a9643ea8Slogwang 	.dev_stop             = ixgbevf_dev_stop,
5732bfe3f2eSlogwang 	.link_update          = ixgbevf_dev_link_update,
574a9643ea8Slogwang 	.stats_get            = ixgbevf_dev_stats_get,
575a9643ea8Slogwang 	.xstats_get           = ixgbevf_dev_xstats_get,
576a9643ea8Slogwang 	.stats_reset          = ixgbevf_dev_stats_reset,
577a9643ea8Slogwang 	.xstats_reset         = ixgbevf_dev_stats_reset,
578a9643ea8Slogwang 	.xstats_get_names     = ixgbevf_dev_xstats_get_names,
579a9643ea8Slogwang 	.dev_close            = ixgbevf_dev_close,
5802bfe3f2eSlogwang 	.dev_reset	      = ixgbevf_dev_reset,
5814418919fSjohnjiang 	.promiscuous_enable   = ixgbevf_dev_promiscuous_enable,
5824418919fSjohnjiang 	.promiscuous_disable  = ixgbevf_dev_promiscuous_disable,
583a9643ea8Slogwang 	.allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
584a9643ea8Slogwang 	.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
585a9643ea8Slogwang 	.dev_infos_get        = ixgbevf_dev_info_get,
586a9643ea8Slogwang 	.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
587a9643ea8Slogwang 	.mtu_set              = ixgbevf_dev_set_mtu,
588a9643ea8Slogwang 	.vlan_filter_set      = ixgbevf_vlan_filter_set,
589a9643ea8Slogwang 	.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
590a9643ea8Slogwang 	.vlan_offload_set     = ixgbevf_vlan_offload_set,
591a9643ea8Slogwang 	.rx_queue_setup       = ixgbe_dev_rx_queue_setup,
592a9643ea8Slogwang 	.rx_queue_release     = ixgbe_dev_rx_queue_release,
593a9643ea8Slogwang 	.tx_queue_setup       = ixgbe_dev_tx_queue_setup,
594a9643ea8Slogwang 	.tx_queue_release     = ixgbe_dev_tx_queue_release,
595a9643ea8Slogwang 	.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
596a9643ea8Slogwang 	.rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
597a9643ea8Slogwang 	.mac_addr_add         = ixgbevf_add_mac_addr,
598a9643ea8Slogwang 	.mac_addr_remove      = ixgbevf_remove_mac_addr,
599a9643ea8Slogwang 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
600a9643ea8Slogwang 	.rxq_info_get         = ixgbe_rxq_info_get,
601a9643ea8Slogwang 	.txq_info_get         = ixgbe_txq_info_get,
602a9643ea8Slogwang 	.mac_addr_set         = ixgbevf_set_default_mac_addr,
603a9643ea8Slogwang 	.get_reg              = ixgbevf_get_regs,
604a9643ea8Slogwang 	.reta_update          = ixgbe_dev_rss_reta_update,
605a9643ea8Slogwang 	.reta_query           = ixgbe_dev_rss_reta_query,
606a9643ea8Slogwang 	.rss_hash_update      = ixgbe_dev_rss_hash_update,
607a9643ea8Slogwang 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
608*2d9fd380Sjfb8856606 	.tx_done_cleanup      = ixgbe_dev_tx_done_cleanup,
609a9643ea8Slogwang };
610a9643ea8Slogwang 
611a9643ea8Slogwang /* store statistics names and its offset in stats structure */
612a9643ea8Slogwang struct rte_ixgbe_xstats_name_off {
613a9643ea8Slogwang 	char name[RTE_ETH_XSTATS_NAME_SIZE];
614a9643ea8Slogwang 	unsigned offset;
615a9643ea8Slogwang };
616a9643ea8Slogwang 
617a9643ea8Slogwang static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
618a9643ea8Slogwang 	{"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
619a9643ea8Slogwang 	{"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
620a9643ea8Slogwang 	{"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
621a9643ea8Slogwang 	{"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
622a9643ea8Slogwang 	{"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
623a9643ea8Slogwang 	{"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
624a9643ea8Slogwang 	{"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
625a9643ea8Slogwang 	{"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
626a9643ea8Slogwang 	{"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
627a9643ea8Slogwang 	{"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
628a9643ea8Slogwang 	{"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
629a9643ea8Slogwang 	{"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
630a9643ea8Slogwang 	{"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
631a9643ea8Slogwang 	{"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
632a9643ea8Slogwang 	{"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
633a9643ea8Slogwang 		prc1023)},
634a9643ea8Slogwang 	{"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
635a9643ea8Slogwang 		prc1522)},
636a9643ea8Slogwang 	{"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
637a9643ea8Slogwang 	{"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
638a9643ea8Slogwang 	{"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
639a9643ea8Slogwang 	{"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
640a9643ea8Slogwang 	{"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
641a9643ea8Slogwang 	{"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
642a9643ea8Slogwang 	{"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
643a9643ea8Slogwang 	{"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
644a9643ea8Slogwang 	{"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
645a9643ea8Slogwang 	{"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
646a9643ea8Slogwang 	{"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
647a9643ea8Slogwang 	{"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
648a9643ea8Slogwang 	{"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
649a9643ea8Slogwang 	{"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
650a9643ea8Slogwang 	{"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
651a9643ea8Slogwang 	{"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
652a9643ea8Slogwang 	{"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
653a9643ea8Slogwang 		ptc1023)},
654a9643ea8Slogwang 	{"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
655a9643ea8Slogwang 		ptc1522)},
656a9643ea8Slogwang 	{"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
657a9643ea8Slogwang 	{"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
658a9643ea8Slogwang 	{"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
659a9643ea8Slogwang 	{"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
660a9643ea8Slogwang 
661a9643ea8Slogwang 	{"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
662a9643ea8Slogwang 		fdirustat_add)},
663a9643ea8Slogwang 	{"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
664a9643ea8Slogwang 		fdirustat_remove)},
665a9643ea8Slogwang 	{"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
666a9643ea8Slogwang 		fdirfstat_fadd)},
667a9643ea8Slogwang 	{"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
668a9643ea8Slogwang 		fdirfstat_fremove)},
669a9643ea8Slogwang 	{"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
670a9643ea8Slogwang 		fdirmatch)},
671a9643ea8Slogwang 	{"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
672a9643ea8Slogwang 		fdirmiss)},
673a9643ea8Slogwang 
674a9643ea8Slogwang 	{"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
675a9643ea8Slogwang 	{"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
676a9643ea8Slogwang 	{"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
677a9643ea8Slogwang 		fclast)},
678a9643ea8Slogwang 	{"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
679a9643ea8Slogwang 	{"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
680a9643ea8Slogwang 	{"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
681a9643ea8Slogwang 	{"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
682a9643ea8Slogwang 	{"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
683a9643ea8Slogwang 		fcoe_noddp)},
684a9643ea8Slogwang 	{"rx_fcoe_no_direct_data_placement_ext_buff",
685a9643ea8Slogwang 		offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
686a9643ea8Slogwang 
687a9643ea8Slogwang 	{"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
688a9643ea8Slogwang 		lxontxc)},
689a9643ea8Slogwang 	{"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
690a9643ea8Slogwang 		lxonrxc)},
691a9643ea8Slogwang 	{"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
692a9643ea8Slogwang 		lxofftxc)},
693a9643ea8Slogwang 	{"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
694a9643ea8Slogwang 		lxoffrxc)},
695a9643ea8Slogwang 	{"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
696a9643ea8Slogwang };
697a9643ea8Slogwang 
698a9643ea8Slogwang #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
699a9643ea8Slogwang 			   sizeof(rte_ixgbe_stats_strings[0]))
700a9643ea8Slogwang 
7012bfe3f2eSlogwang /* MACsec statistics */
7022bfe3f2eSlogwang static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
7032bfe3f2eSlogwang 	{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
7042bfe3f2eSlogwang 		out_pkts_untagged)},
7052bfe3f2eSlogwang 	{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
7062bfe3f2eSlogwang 		out_pkts_encrypted)},
7072bfe3f2eSlogwang 	{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
7082bfe3f2eSlogwang 		out_pkts_protected)},
7092bfe3f2eSlogwang 	{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
7102bfe3f2eSlogwang 		out_octets_encrypted)},
7112bfe3f2eSlogwang 	{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
7122bfe3f2eSlogwang 		out_octets_protected)},
7132bfe3f2eSlogwang 	{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
7142bfe3f2eSlogwang 		in_pkts_untagged)},
7152bfe3f2eSlogwang 	{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
7162bfe3f2eSlogwang 		in_pkts_badtag)},
7172bfe3f2eSlogwang 	{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
7182bfe3f2eSlogwang 		in_pkts_nosci)},
7192bfe3f2eSlogwang 	{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
7202bfe3f2eSlogwang 		in_pkts_unknownsci)},
7212bfe3f2eSlogwang 	{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
7222bfe3f2eSlogwang 		in_octets_decrypted)},
7232bfe3f2eSlogwang 	{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
7242bfe3f2eSlogwang 		in_octets_validated)},
7252bfe3f2eSlogwang 	{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
7262bfe3f2eSlogwang 		in_pkts_unchecked)},
7272bfe3f2eSlogwang 	{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
7282bfe3f2eSlogwang 		in_pkts_delayed)},
7292bfe3f2eSlogwang 	{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
7302bfe3f2eSlogwang 		in_pkts_late)},
7312bfe3f2eSlogwang 	{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
7322bfe3f2eSlogwang 		in_pkts_ok)},
7332bfe3f2eSlogwang 	{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
7342bfe3f2eSlogwang 		in_pkts_invalid)},
7352bfe3f2eSlogwang 	{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
7362bfe3f2eSlogwang 		in_pkts_notvalid)},
7372bfe3f2eSlogwang 	{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
7382bfe3f2eSlogwang 		in_pkts_unusedsa)},
7392bfe3f2eSlogwang 	{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
7402bfe3f2eSlogwang 		in_pkts_notusingsa)},
7412bfe3f2eSlogwang };
7422bfe3f2eSlogwang 
7432bfe3f2eSlogwang #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
7442bfe3f2eSlogwang 			   sizeof(rte_ixgbe_macsec_strings[0]))
7452bfe3f2eSlogwang 
746a9643ea8Slogwang /* Per-queue statistics */
747a9643ea8Slogwang static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
748a9643ea8Slogwang 	{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
749a9643ea8Slogwang 	{"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
750a9643ea8Slogwang 	{"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
751a9643ea8Slogwang 	{"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
752a9643ea8Slogwang };
753a9643ea8Slogwang 
754a9643ea8Slogwang #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
755a9643ea8Slogwang 			   sizeof(rte_ixgbe_rxq_strings[0]))
756a9643ea8Slogwang #define IXGBE_NB_RXQ_PRIO_VALUES 8
757a9643ea8Slogwang 
758a9643ea8Slogwang static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
759a9643ea8Slogwang 	{"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
760a9643ea8Slogwang 	{"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
761a9643ea8Slogwang 	{"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
762a9643ea8Slogwang 		pxon2offc)},
763a9643ea8Slogwang };
764a9643ea8Slogwang 
765a9643ea8Slogwang #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
766a9643ea8Slogwang 			   sizeof(rte_ixgbe_txq_strings[0]))
767a9643ea8Slogwang #define IXGBE_NB_TXQ_PRIO_VALUES 8
768a9643ea8Slogwang 
769a9643ea8Slogwang static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
770a9643ea8Slogwang 	{"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
771a9643ea8Slogwang };
772a9643ea8Slogwang 
773a9643ea8Slogwang #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) /	\
774a9643ea8Slogwang 		sizeof(rte_ixgbevf_stats_strings[0]))
775a9643ea8Slogwang 
776a9643ea8Slogwang /*
777a9643ea8Slogwang  * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
778a9643ea8Slogwang  */
779a9643ea8Slogwang static inline int
ixgbe_is_sfp(struct ixgbe_hw * hw)780a9643ea8Slogwang ixgbe_is_sfp(struct ixgbe_hw *hw)
781a9643ea8Slogwang {
782a9643ea8Slogwang 	switch (hw->phy.type) {
783a9643ea8Slogwang 	case ixgbe_phy_sfp_avago:
784a9643ea8Slogwang 	case ixgbe_phy_sfp_ftl:
785a9643ea8Slogwang 	case ixgbe_phy_sfp_intel:
786a9643ea8Slogwang 	case ixgbe_phy_sfp_unknown:
787a9643ea8Slogwang 	case ixgbe_phy_sfp_passive_tyco:
788a9643ea8Slogwang 	case ixgbe_phy_sfp_passive_unknown:
789a9643ea8Slogwang 		return 1;
790a9643ea8Slogwang 	default:
791a9643ea8Slogwang 		return 0;
792a9643ea8Slogwang 	}
793a9643ea8Slogwang }
794a9643ea8Slogwang 
795a9643ea8Slogwang static inline int32_t
ixgbe_pf_reset_hw(struct ixgbe_hw * hw)796a9643ea8Slogwang ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
797a9643ea8Slogwang {
798a9643ea8Slogwang 	uint32_t ctrl_ext;
799a9643ea8Slogwang 	int32_t status;
800a9643ea8Slogwang 
801a9643ea8Slogwang 	status = ixgbe_reset_hw(hw);
802a9643ea8Slogwang 
803a9643ea8Slogwang 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
804a9643ea8Slogwang 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
805a9643ea8Slogwang 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
806a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
807a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
808a9643ea8Slogwang 
8092bfe3f2eSlogwang 	if (status == IXGBE_ERR_SFP_NOT_PRESENT)
8102bfe3f2eSlogwang 		status = IXGBE_SUCCESS;
811a9643ea8Slogwang 	return status;
812a9643ea8Slogwang }
813a9643ea8Slogwang 
814a9643ea8Slogwang static inline void
ixgbe_enable_intr(struct rte_eth_dev * dev)815a9643ea8Slogwang ixgbe_enable_intr(struct rte_eth_dev *dev)
816a9643ea8Slogwang {
817a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
818a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
819a9643ea8Slogwang 	struct ixgbe_hw *hw =
820a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
821a9643ea8Slogwang 
822a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
823a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
824a9643ea8Slogwang }
825a9643ea8Slogwang 
826a9643ea8Slogwang /*
827a9643ea8Slogwang  * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
828a9643ea8Slogwang  */
829a9643ea8Slogwang static void
ixgbe_disable_intr(struct ixgbe_hw * hw)830a9643ea8Slogwang ixgbe_disable_intr(struct ixgbe_hw *hw)
831a9643ea8Slogwang {
832a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
833a9643ea8Slogwang 
834a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB) {
835a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
836a9643ea8Slogwang 	} else {
837a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
838a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
839a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
840a9643ea8Slogwang 	}
841a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
842a9643ea8Slogwang }
843a9643ea8Slogwang 
844a9643ea8Slogwang /*
845a9643ea8Slogwang  * This function resets queue statistics mapping registers.
846a9643ea8Slogwang  * From Niantic datasheet, Initialization of Statistics section:
847a9643ea8Slogwang  * "...if software requires the queue counters, the RQSMR and TQSM registers
848a9643ea8Slogwang  * must be re-programmed following a device reset.
849a9643ea8Slogwang  */
850a9643ea8Slogwang static void
ixgbe_reset_qstat_mappings(struct ixgbe_hw * hw)851a9643ea8Slogwang ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
852a9643ea8Slogwang {
853a9643ea8Slogwang 	uint32_t i;
854a9643ea8Slogwang 
855a9643ea8Slogwang 	for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
856a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
857a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
858a9643ea8Slogwang 	}
859a9643ea8Slogwang }
860a9643ea8Slogwang 
861a9643ea8Slogwang 
862a9643ea8Slogwang static int
ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev * eth_dev,uint16_t queue_id,uint8_t stat_idx,uint8_t is_rx)863a9643ea8Slogwang ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
864a9643ea8Slogwang 				  uint16_t queue_id,
865a9643ea8Slogwang 				  uint8_t stat_idx,
866a9643ea8Slogwang 				  uint8_t is_rx)
867a9643ea8Slogwang {
868a9643ea8Slogwang #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
869a9643ea8Slogwang #define NB_QMAP_FIELDS_PER_QSM_REG 4
870a9643ea8Slogwang #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
871a9643ea8Slogwang 
872a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
873a9643ea8Slogwang 	struct ixgbe_stat_mapping_registers *stat_mappings =
874a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
875a9643ea8Slogwang 	uint32_t qsmr_mask = 0;
876a9643ea8Slogwang 	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
877a9643ea8Slogwang 	uint32_t q_map;
878a9643ea8Slogwang 	uint8_t n, offset;
879a9643ea8Slogwang 
880a9643ea8Slogwang 	if ((hw->mac.type != ixgbe_mac_82599EB) &&
881a9643ea8Slogwang 		(hw->mac.type != ixgbe_mac_X540) &&
882a9643ea8Slogwang 		(hw->mac.type != ixgbe_mac_X550) &&
883a9643ea8Slogwang 		(hw->mac.type != ixgbe_mac_X550EM_x) &&
884a9643ea8Slogwang 		(hw->mac.type != ixgbe_mac_X550EM_a))
885a9643ea8Slogwang 		return -ENOSYS;
886a9643ea8Slogwang 
887a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
888a9643ea8Slogwang 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
889a9643ea8Slogwang 		     queue_id, stat_idx);
890a9643ea8Slogwang 
891a9643ea8Slogwang 	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
892a9643ea8Slogwang 	if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
893a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
894a9643ea8Slogwang 		return -EIO;
895a9643ea8Slogwang 	}
896a9643ea8Slogwang 	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
897a9643ea8Slogwang 
898a9643ea8Slogwang 	/* Now clear any previous stat_idx set */
899a9643ea8Slogwang 	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
900a9643ea8Slogwang 	if (!is_rx)
901a9643ea8Slogwang 		stat_mappings->tqsm[n] &= ~clearing_mask;
902a9643ea8Slogwang 	else
903a9643ea8Slogwang 		stat_mappings->rqsmr[n] &= ~clearing_mask;
904a9643ea8Slogwang 
905a9643ea8Slogwang 	q_map = (uint32_t)stat_idx;
906a9643ea8Slogwang 	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
907a9643ea8Slogwang 	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
908a9643ea8Slogwang 	if (!is_rx)
909a9643ea8Slogwang 		stat_mappings->tqsm[n] |= qsmr_mask;
910a9643ea8Slogwang 	else
911a9643ea8Slogwang 		stat_mappings->rqsmr[n] |= qsmr_mask;
912a9643ea8Slogwang 
913a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
914a9643ea8Slogwang 		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
915a9643ea8Slogwang 		     queue_id, stat_idx);
916a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
917a9643ea8Slogwang 		     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
918a9643ea8Slogwang 
919a9643ea8Slogwang 	/* Now write the mapping in the appropriate register */
920a9643ea8Slogwang 	if (is_rx) {
921a9643ea8Slogwang 		PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
922a9643ea8Slogwang 			     stat_mappings->rqsmr[n], n);
923a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
924a9643ea8Slogwang 	} else {
925a9643ea8Slogwang 		PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
926a9643ea8Slogwang 			     stat_mappings->tqsm[n], n);
927a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
928a9643ea8Slogwang 	}
929a9643ea8Slogwang 	return 0;
930a9643ea8Slogwang }
931a9643ea8Slogwang 
932a9643ea8Slogwang static void
ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)933a9643ea8Slogwang ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
934a9643ea8Slogwang {
935a9643ea8Slogwang 	struct ixgbe_stat_mapping_registers *stat_mappings =
936a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
937a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
938a9643ea8Slogwang 	int i;
939a9643ea8Slogwang 
940a9643ea8Slogwang 	/* write whatever was in stat mapping table to the NIC */
941a9643ea8Slogwang 	for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
942a9643ea8Slogwang 		/* rx */
943a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
944a9643ea8Slogwang 
945a9643ea8Slogwang 		/* tx */
946a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
947a9643ea8Slogwang 	}
948a9643ea8Slogwang }
949a9643ea8Slogwang 
950a9643ea8Slogwang static void
ixgbe_dcb_init(struct ixgbe_hw * hw,struct ixgbe_dcb_config * dcb_config)951a9643ea8Slogwang ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
952a9643ea8Slogwang {
953a9643ea8Slogwang 	uint8_t i;
954a9643ea8Slogwang 	struct ixgbe_dcb_tc_config *tc;
955a9643ea8Slogwang 	uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
956a9643ea8Slogwang 
957a9643ea8Slogwang 	dcb_config->num_tcs.pg_tcs = dcb_max_tc;
958a9643ea8Slogwang 	dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
959a9643ea8Slogwang 	for (i = 0; i < dcb_max_tc; i++) {
960a9643ea8Slogwang 		tc = &dcb_config->tc_config[i];
961a9643ea8Slogwang 		tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
962a9643ea8Slogwang 		tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
963a9643ea8Slogwang 				 (uint8_t)(100/dcb_max_tc + (i & 1));
964a9643ea8Slogwang 		tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
965a9643ea8Slogwang 		tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
966a9643ea8Slogwang 				 (uint8_t)(100/dcb_max_tc + (i & 1));
967a9643ea8Slogwang 		tc->pfc = ixgbe_dcb_pfc_disabled;
968a9643ea8Slogwang 	}
969a9643ea8Slogwang 
970a9643ea8Slogwang 	/* Initialize default user to priority mapping, UPx->TC0 */
971a9643ea8Slogwang 	tc = &dcb_config->tc_config[0];
972a9643ea8Slogwang 	tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
973a9643ea8Slogwang 	tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
974a9643ea8Slogwang 	for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
975a9643ea8Slogwang 		dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
976a9643ea8Slogwang 		dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
977a9643ea8Slogwang 	}
978a9643ea8Slogwang 	dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
979a9643ea8Slogwang 	dcb_config->pfc_mode_enable = false;
980a9643ea8Slogwang 	dcb_config->vt_mode = true;
981a9643ea8Slogwang 	dcb_config->round_robin_enable = false;
982a9643ea8Slogwang 	/* support all DCB capabilities in 82599 */
983a9643ea8Slogwang 	dcb_config->support.capabilities = 0xFF;
984a9643ea8Slogwang 
985a9643ea8Slogwang 	/*we only support 4 Tcs for X540, X550 */
986a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_X540 ||
987a9643ea8Slogwang 		hw->mac.type == ixgbe_mac_X550 ||
988a9643ea8Slogwang 		hw->mac.type == ixgbe_mac_X550EM_x ||
989a9643ea8Slogwang 		hw->mac.type == ixgbe_mac_X550EM_a) {
990a9643ea8Slogwang 		dcb_config->num_tcs.pg_tcs = 4;
991a9643ea8Slogwang 		dcb_config->num_tcs.pfc_tcs = 4;
992a9643ea8Slogwang 	}
993a9643ea8Slogwang }
994a9643ea8Slogwang 
995a9643ea8Slogwang /*
996a9643ea8Slogwang  * Ensure that all locks are released before first NVM or PHY access
997a9643ea8Slogwang  */
998a9643ea8Slogwang static void
ixgbe_swfw_lock_reset(struct ixgbe_hw * hw)999a9643ea8Slogwang ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
1000a9643ea8Slogwang {
1001a9643ea8Slogwang 	uint16_t mask;
1002a9643ea8Slogwang 
1003a9643ea8Slogwang 	/*
1004a9643ea8Slogwang 	 * Phy lock should not fail in this early stage. If this is the case,
1005a9643ea8Slogwang 	 * it is due to an improper exit of the application.
1006a9643ea8Slogwang 	 * So force the release of the faulty lock. Release of common lock
1007a9643ea8Slogwang 	 * is done automatically by swfw_sync function.
1008a9643ea8Slogwang 	 */
1009a9643ea8Slogwang 	mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
1010a9643ea8Slogwang 	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1011a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
1012a9643ea8Slogwang 	}
1013a9643ea8Slogwang 	ixgbe_release_swfw_semaphore(hw, mask);
1014a9643ea8Slogwang 
1015a9643ea8Slogwang 	/*
1016a9643ea8Slogwang 	 * These ones are more tricky since they are common to all ports; but
1017a9643ea8Slogwang 	 * swfw_sync retries last long enough (1s) to be almost sure that if
1018a9643ea8Slogwang 	 * lock can not be taken it is due to an improper lock of the
1019a9643ea8Slogwang 	 * semaphore.
1020a9643ea8Slogwang 	 */
1021a9643ea8Slogwang 	mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
1022a9643ea8Slogwang 	if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
1023a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1024a9643ea8Slogwang 	}
1025a9643ea8Slogwang 	ixgbe_release_swfw_semaphore(hw, mask);
1026a9643ea8Slogwang }
1027a9643ea8Slogwang 
1028a9643ea8Slogwang /*
1029a9643ea8Slogwang  * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1030a9643ea8Slogwang  * It returns 0 on success.
1031a9643ea8Slogwang  */
1032a9643ea8Slogwang static int
eth_ixgbe_dev_init(struct rte_eth_dev * eth_dev,void * init_params __rte_unused)1033d30ea906Sjfb8856606 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
1034a9643ea8Slogwang {
10354418919fSjohnjiang 	struct ixgbe_adapter *ad = eth_dev->data->dev_private;
10362bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
10372bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1038a9643ea8Slogwang 	struct ixgbe_hw *hw =
1039a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1040a9643ea8Slogwang 	struct ixgbe_vfta *shadow_vfta =
1041a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1042a9643ea8Slogwang 	struct ixgbe_hwstrip *hwstrip =
1043a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
1044a9643ea8Slogwang 	struct ixgbe_dcb_config *dcb_config =
1045a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
1046a9643ea8Slogwang 	struct ixgbe_filter_info *filter_info =
1047a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
10482bfe3f2eSlogwang 	struct ixgbe_bw_conf *bw_conf =
10492bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
1050a9643ea8Slogwang 	uint32_t ctrl_ext;
1051a9643ea8Slogwang 	uint16_t csum;
10520c6bd470Sfengbojiang 	int diag, i, ret;
1053a9643ea8Slogwang 
1054a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1055a9643ea8Slogwang 
10564418919fSjohnjiang 	ixgbe_dev_macsec_setting_reset(eth_dev);
10574418919fSjohnjiang 
1058a9643ea8Slogwang 	eth_dev->dev_ops = &ixgbe_eth_dev_ops;
1059*2d9fd380Sjfb8856606 	eth_dev->rx_queue_count       = ixgbe_dev_rx_queue_count;
1060*2d9fd380Sjfb8856606 	eth_dev->rx_descriptor_done   = ixgbe_dev_rx_descriptor_done;
1061*2d9fd380Sjfb8856606 	eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
1062*2d9fd380Sjfb8856606 	eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
1063a9643ea8Slogwang 	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1064a9643ea8Slogwang 	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
10652bfe3f2eSlogwang 	eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
1066a9643ea8Slogwang 
1067a9643ea8Slogwang 	/*
1068a9643ea8Slogwang 	 * For secondary processes, we don't initialise any further as primary
1069a9643ea8Slogwang 	 * has already done this work. Only check we don't need a different
1070a9643ea8Slogwang 	 * RX and TX function.
1071a9643ea8Slogwang 	 */
1072a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1073a9643ea8Slogwang 		struct ixgbe_tx_queue *txq;
1074a9643ea8Slogwang 		/* TX queue function in primary, set by last queue initialized
1075a9643ea8Slogwang 		 * Tx queue may not initialized by primary process
1076a9643ea8Slogwang 		 */
1077a9643ea8Slogwang 		if (eth_dev->data->tx_queues) {
1078a9643ea8Slogwang 			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
1079a9643ea8Slogwang 			ixgbe_set_tx_function(eth_dev, txq);
1080a9643ea8Slogwang 		} else {
1081a9643ea8Slogwang 			/* Use default TX function if we get here */
1082a9643ea8Slogwang 			PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
1083a9643ea8Slogwang 				     "Using default TX function.");
1084a9643ea8Slogwang 		}
1085a9643ea8Slogwang 
1086a9643ea8Slogwang 		ixgbe_set_rx_function(eth_dev);
1087a9643ea8Slogwang 
1088a9643ea8Slogwang 		return 0;
1089a9643ea8Slogwang 	}
10902bfe3f2eSlogwang 
10914418919fSjohnjiang 	rte_atomic32_clear(&ad->link_thread_running);
1092a9643ea8Slogwang 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1093*2d9fd380Sjfb8856606 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1094a9643ea8Slogwang 
1095a9643ea8Slogwang 	/* Vendor and Device ID need to be set before init of shared code */
1096a9643ea8Slogwang 	hw->device_id = pci_dev->id.device_id;
1097a9643ea8Slogwang 	hw->vendor_id = pci_dev->id.vendor_id;
1098a9643ea8Slogwang 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1099a9643ea8Slogwang 	hw->allow_unsupported_sfp = 1;
1100a9643ea8Slogwang 
1101a9643ea8Slogwang 	/* Initialize the shared code (base driver) */
11022bfe3f2eSlogwang #ifdef RTE_LIBRTE_IXGBE_BYPASS
1103a9643ea8Slogwang 	diag = ixgbe_bypass_init_shared_code(hw);
1104a9643ea8Slogwang #else
1105a9643ea8Slogwang 	diag = ixgbe_init_shared_code(hw);
11062bfe3f2eSlogwang #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1107a9643ea8Slogwang 
1108a9643ea8Slogwang 	if (diag != IXGBE_SUCCESS) {
1109a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
1110a9643ea8Slogwang 		return -EIO;
1111a9643ea8Slogwang 	}
1112a9643ea8Slogwang 
1113d30ea906Sjfb8856606 	if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
1114d30ea906Sjfb8856606 		PMD_INIT_LOG(ERR, "\nERROR: "
1115d30ea906Sjfb8856606 			"Firmware recovery mode detected. Limiting functionality.\n"
1116d30ea906Sjfb8856606 			"Refer to the Intel(R) Ethernet Adapters and Devices "
1117d30ea906Sjfb8856606 			"User Guide for details on firmware recovery mode.");
1118d30ea906Sjfb8856606 		return -EIO;
1119d30ea906Sjfb8856606 	}
1120d30ea906Sjfb8856606 
1121a9643ea8Slogwang 	/* pick up the PCI bus settings for reporting later */
1122a9643ea8Slogwang 	ixgbe_get_bus_info(hw);
1123a9643ea8Slogwang 
1124a9643ea8Slogwang 	/* Unlock any pending hardware semaphore */
1125a9643ea8Slogwang 	ixgbe_swfw_lock_reset(hw);
1126a9643ea8Slogwang 
1127*2d9fd380Sjfb8856606 #ifdef RTE_LIB_SECURITY
1128d30ea906Sjfb8856606 	/* Initialize security_ctx only for primary process*/
1129d30ea906Sjfb8856606 	if (ixgbe_ipsec_ctx_create(eth_dev))
1130d30ea906Sjfb8856606 		return -ENOMEM;
1131d30ea906Sjfb8856606 #endif
1132d30ea906Sjfb8856606 
1133a9643ea8Slogwang 	/* Initialize DCB configuration*/
1134a9643ea8Slogwang 	memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
1135a9643ea8Slogwang 	ixgbe_dcb_init(hw, dcb_config);
1136a9643ea8Slogwang 	/* Get Hardware Flow Control setting */
11374418919fSjohnjiang 	hw->fc.requested_mode = ixgbe_fc_none;
11384418919fSjohnjiang 	hw->fc.current_mode = ixgbe_fc_none;
1139a9643ea8Slogwang 	hw->fc.pause_time = IXGBE_FC_PAUSE;
1140a9643ea8Slogwang 	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
1141a9643ea8Slogwang 		hw->fc.low_water[i] = IXGBE_FC_LO;
1142a9643ea8Slogwang 		hw->fc.high_water[i] = IXGBE_FC_HI;
1143a9643ea8Slogwang 	}
1144a9643ea8Slogwang 	hw->fc.send_xon = 1;
1145a9643ea8Slogwang 
1146a9643ea8Slogwang 	/* Make sure we have a good EEPROM before we read from it */
1147a9643ea8Slogwang 	diag = ixgbe_validate_eeprom_checksum(hw, &csum);
1148a9643ea8Slogwang 	if (diag != IXGBE_SUCCESS) {
1149a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
1150a9643ea8Slogwang 		return -EIO;
1151a9643ea8Slogwang 	}
1152a9643ea8Slogwang 
11532bfe3f2eSlogwang #ifdef RTE_LIBRTE_IXGBE_BYPASS
1154a9643ea8Slogwang 	diag = ixgbe_bypass_init_hw(hw);
1155a9643ea8Slogwang #else
1156a9643ea8Slogwang 	diag = ixgbe_init_hw(hw);
11572bfe3f2eSlogwang #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1158a9643ea8Slogwang 
1159a9643ea8Slogwang 	/*
1160a9643ea8Slogwang 	 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1161a9643ea8Slogwang 	 * is called too soon after the kernel driver unbinding/binding occurs.
1162a9643ea8Slogwang 	 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1163a9643ea8Slogwang 	 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1164a9643ea8Slogwang 	 * also called. See ixgbe_identify_phy_82599(). The reason for the
1165a9643ea8Slogwang 	 * failure is not known, and only occuts when virtualisation features
1166a9643ea8Slogwang 	 * are disabled in the bios. A delay of 100ms  was found to be enough by
1167a9643ea8Slogwang 	 * trial-and-error, and is doubled to be safe.
1168a9643ea8Slogwang 	 */
1169a9643ea8Slogwang 	if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
1170a9643ea8Slogwang 		rte_delay_ms(200);
1171a9643ea8Slogwang 		diag = ixgbe_init_hw(hw);
1172a9643ea8Slogwang 	}
1173a9643ea8Slogwang 
11742bfe3f2eSlogwang 	if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
11752bfe3f2eSlogwang 		diag = IXGBE_SUCCESS;
11762bfe3f2eSlogwang 
1177a9643ea8Slogwang 	if (diag == IXGBE_ERR_EEPROM_VERSION) {
1178a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
1179a9643ea8Slogwang 			     "LOM.  Please be aware there may be issues associated "
1180a9643ea8Slogwang 			     "with your hardware.");
1181a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "If you are experiencing problems "
1182a9643ea8Slogwang 			     "please contact your Intel or hardware representative "
1183a9643ea8Slogwang 			     "who provided you with this hardware.");
1184a9643ea8Slogwang 	} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
1185a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
1186a9643ea8Slogwang 	if (diag) {
1187a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
1188a9643ea8Slogwang 		return -EIO;
1189a9643ea8Slogwang 	}
1190a9643ea8Slogwang 
1191a9643ea8Slogwang 	/* Reset the hw statistics */
1192a9643ea8Slogwang 	ixgbe_dev_stats_reset(eth_dev);
1193a9643ea8Slogwang 
1194a9643ea8Slogwang 	/* disable interrupt */
1195a9643ea8Slogwang 	ixgbe_disable_intr(hw);
1196a9643ea8Slogwang 
1197a9643ea8Slogwang 	/* reset mappings for queue statistics hw counters*/
1198a9643ea8Slogwang 	ixgbe_reset_qstat_mappings(hw);
1199a9643ea8Slogwang 
1200a9643ea8Slogwang 	/* Allocate memory for storing MAC addresses */
12014418919fSjohnjiang 	eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN *
1202a9643ea8Slogwang 					       hw->mac.num_rar_entries, 0);
1203a9643ea8Slogwang 	if (eth_dev->data->mac_addrs == NULL) {
1204a9643ea8Slogwang 		PMD_INIT_LOG(ERR,
1205a9643ea8Slogwang 			     "Failed to allocate %u bytes needed to store "
1206a9643ea8Slogwang 			     "MAC addresses",
12074418919fSjohnjiang 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1208a9643ea8Slogwang 		return -ENOMEM;
1209a9643ea8Slogwang 	}
1210a9643ea8Slogwang 	/* Copy the permanent MAC address */
12114418919fSjohnjiang 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
1212a9643ea8Slogwang 			&eth_dev->data->mac_addrs[0]);
1213a9643ea8Slogwang 
1214a9643ea8Slogwang 	/* Allocate memory for storing hash filter MAC addresses */
12154418919fSjohnjiang 	eth_dev->data->hash_mac_addrs = rte_zmalloc(
12164418919fSjohnjiang 		"ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);
1217a9643ea8Slogwang 	if (eth_dev->data->hash_mac_addrs == NULL) {
1218a9643ea8Slogwang 		PMD_INIT_LOG(ERR,
1219a9643ea8Slogwang 			     "Failed to allocate %d bytes needed to store MAC addresses",
12204418919fSjohnjiang 			     RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
1221a9643ea8Slogwang 		return -ENOMEM;
1222a9643ea8Slogwang 	}
1223a9643ea8Slogwang 
1224a9643ea8Slogwang 	/* initialize the vfta */
1225a9643ea8Slogwang 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1226a9643ea8Slogwang 
1227a9643ea8Slogwang 	/* initialize the hw strip bitmap*/
1228a9643ea8Slogwang 	memset(hwstrip, 0, sizeof(*hwstrip));
1229a9643ea8Slogwang 
1230a9643ea8Slogwang 	/* initialize PF if max_vfs not zero */
12310c6bd470Sfengbojiang 	ret = ixgbe_pf_host_init(eth_dev);
12320c6bd470Sfengbojiang 	if (ret) {
12330c6bd470Sfengbojiang 		rte_free(eth_dev->data->mac_addrs);
12340c6bd470Sfengbojiang 		eth_dev->data->mac_addrs = NULL;
12350c6bd470Sfengbojiang 		rte_free(eth_dev->data->hash_mac_addrs);
12360c6bd470Sfengbojiang 		eth_dev->data->hash_mac_addrs = NULL;
12370c6bd470Sfengbojiang 		return ret;
12380c6bd470Sfengbojiang 	}
1239a9643ea8Slogwang 
1240a9643ea8Slogwang 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
1241a9643ea8Slogwang 	/* let hardware know driver is loaded */
1242a9643ea8Slogwang 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
1243a9643ea8Slogwang 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
1244a9643ea8Slogwang 	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
1245a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
1246a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
1247a9643ea8Slogwang 
1248a9643ea8Slogwang 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
1249a9643ea8Slogwang 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
1250a9643ea8Slogwang 			     (int) hw->mac.type, (int) hw->phy.type,
1251a9643ea8Slogwang 			     (int) hw->phy.sfp_type);
1252a9643ea8Slogwang 	else
1253a9643ea8Slogwang 		PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
1254a9643ea8Slogwang 			     (int) hw->mac.type, (int) hw->phy.type);
1255a9643ea8Slogwang 
1256a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1257a9643ea8Slogwang 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1258a9643ea8Slogwang 		     pci_dev->id.device_id);
1259a9643ea8Slogwang 
12602bfe3f2eSlogwang 	rte_intr_callback_register(intr_handle,
12612bfe3f2eSlogwang 				   ixgbe_dev_interrupt_handler, eth_dev);
1262a9643ea8Slogwang 
1263a9643ea8Slogwang 	/* enable uio/vfio intr/eventfd mapping */
12642bfe3f2eSlogwang 	rte_intr_enable(intr_handle);
1265a9643ea8Slogwang 
1266a9643ea8Slogwang 	/* enable support intr */
1267a9643ea8Slogwang 	ixgbe_enable_intr(eth_dev);
1268a9643ea8Slogwang 
12692bfe3f2eSlogwang 	/* initialize filter info */
12702bfe3f2eSlogwang 	memset(filter_info, 0,
12712bfe3f2eSlogwang 	       sizeof(struct ixgbe_filter_info));
12722bfe3f2eSlogwang 
1273a9643ea8Slogwang 	/* initialize 5tuple filter list */
1274a9643ea8Slogwang 	TAILQ_INIT(&filter_info->fivetuple_list);
12752bfe3f2eSlogwang 
12762bfe3f2eSlogwang 	/* initialize flow director filter list & hash */
12772bfe3f2eSlogwang 	ixgbe_fdir_filter_init(eth_dev);
12782bfe3f2eSlogwang 
12792bfe3f2eSlogwang 	/* initialize l2 tunnel filter list & hash */
12802bfe3f2eSlogwang 	ixgbe_l2_tn_filter_init(eth_dev);
12812bfe3f2eSlogwang 
12822bfe3f2eSlogwang 	/* initialize flow filter lists */
12832bfe3f2eSlogwang 	ixgbe_filterlist_init();
12842bfe3f2eSlogwang 
12852bfe3f2eSlogwang 	/* initialize bandwidth configuration info */
12862bfe3f2eSlogwang 	memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
12872bfe3f2eSlogwang 
12882bfe3f2eSlogwang 	/* initialize Traffic Manager configuration */
12892bfe3f2eSlogwang 	ixgbe_tm_conf_init(eth_dev);
1290a9643ea8Slogwang 
1291a9643ea8Slogwang 	return 0;
1292a9643ea8Slogwang }
1293a9643ea8Slogwang 
1294a9643ea8Slogwang static int
eth_ixgbe_dev_uninit(struct rte_eth_dev * eth_dev)1295a9643ea8Slogwang eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1296a9643ea8Slogwang {
1297a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1298a9643ea8Slogwang 
1299a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1300d30ea906Sjfb8856606 		return 0;
1301a9643ea8Slogwang 
1302a9643ea8Slogwang 	ixgbe_dev_close(eth_dev);
1303a9643ea8Slogwang 
1304a9643ea8Slogwang 	return 0;
1305a9643ea8Slogwang }
1306a9643ea8Slogwang 
ixgbe_ntuple_filter_uninit(struct rte_eth_dev * eth_dev)13072bfe3f2eSlogwang static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
13082bfe3f2eSlogwang {
13092bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
13102bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
13112bfe3f2eSlogwang 	struct ixgbe_5tuple_filter *p_5tuple;
13122bfe3f2eSlogwang 
13132bfe3f2eSlogwang 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
13142bfe3f2eSlogwang 		TAILQ_REMOVE(&filter_info->fivetuple_list,
13152bfe3f2eSlogwang 			     p_5tuple,
13162bfe3f2eSlogwang 			     entries);
13172bfe3f2eSlogwang 		rte_free(p_5tuple);
13182bfe3f2eSlogwang 	}
13192bfe3f2eSlogwang 	memset(filter_info->fivetuple_mask, 0,
13202bfe3f2eSlogwang 	       sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
13212bfe3f2eSlogwang 
13222bfe3f2eSlogwang 	return 0;
13232bfe3f2eSlogwang }
13242bfe3f2eSlogwang 
ixgbe_fdir_filter_uninit(struct rte_eth_dev * eth_dev)13252bfe3f2eSlogwang static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
13262bfe3f2eSlogwang {
13272bfe3f2eSlogwang 	struct ixgbe_hw_fdir_info *fdir_info =
13282bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
13292bfe3f2eSlogwang 	struct ixgbe_fdir_filter *fdir_filter;
13302bfe3f2eSlogwang 
13312bfe3f2eSlogwang 		if (fdir_info->hash_map)
13322bfe3f2eSlogwang 		rte_free(fdir_info->hash_map);
13332bfe3f2eSlogwang 	if (fdir_info->hash_handle)
13342bfe3f2eSlogwang 		rte_hash_free(fdir_info->hash_handle);
13352bfe3f2eSlogwang 
13362bfe3f2eSlogwang 	while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
13372bfe3f2eSlogwang 		TAILQ_REMOVE(&fdir_info->fdir_list,
13382bfe3f2eSlogwang 			     fdir_filter,
13392bfe3f2eSlogwang 			     entries);
13402bfe3f2eSlogwang 		rte_free(fdir_filter);
13412bfe3f2eSlogwang 	}
13422bfe3f2eSlogwang 
13432bfe3f2eSlogwang 	return 0;
13442bfe3f2eSlogwang }
13452bfe3f2eSlogwang 
ixgbe_l2_tn_filter_uninit(struct rte_eth_dev * eth_dev)13462bfe3f2eSlogwang static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
13472bfe3f2eSlogwang {
13482bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
13492bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
13502bfe3f2eSlogwang 	struct ixgbe_l2_tn_filter *l2_tn_filter;
13512bfe3f2eSlogwang 
13522bfe3f2eSlogwang 	if (l2_tn_info->hash_map)
13532bfe3f2eSlogwang 		rte_free(l2_tn_info->hash_map);
13542bfe3f2eSlogwang 	if (l2_tn_info->hash_handle)
13552bfe3f2eSlogwang 		rte_hash_free(l2_tn_info->hash_handle);
13562bfe3f2eSlogwang 
13572bfe3f2eSlogwang 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
13582bfe3f2eSlogwang 		TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
13592bfe3f2eSlogwang 			     l2_tn_filter,
13602bfe3f2eSlogwang 			     entries);
13612bfe3f2eSlogwang 		rte_free(l2_tn_filter);
13622bfe3f2eSlogwang 	}
13632bfe3f2eSlogwang 
13642bfe3f2eSlogwang 	return 0;
13652bfe3f2eSlogwang }
13662bfe3f2eSlogwang 
ixgbe_fdir_filter_init(struct rte_eth_dev * eth_dev)13672bfe3f2eSlogwang static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
13682bfe3f2eSlogwang {
13692bfe3f2eSlogwang 	struct ixgbe_hw_fdir_info *fdir_info =
13702bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
13712bfe3f2eSlogwang 	char fdir_hash_name[RTE_HASH_NAMESIZE];
13722bfe3f2eSlogwang 	struct rte_hash_parameters fdir_hash_params = {
13732bfe3f2eSlogwang 		.name = fdir_hash_name,
13742bfe3f2eSlogwang 		.entries = IXGBE_MAX_FDIR_FILTER_NUM,
13752bfe3f2eSlogwang 		.key_len = sizeof(union ixgbe_atr_input),
13762bfe3f2eSlogwang 		.hash_func = rte_hash_crc,
13772bfe3f2eSlogwang 		.hash_func_init_val = 0,
13782bfe3f2eSlogwang 		.socket_id = rte_socket_id(),
13792bfe3f2eSlogwang 	};
13802bfe3f2eSlogwang 
13812bfe3f2eSlogwang 	TAILQ_INIT(&fdir_info->fdir_list);
13822bfe3f2eSlogwang 	snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
13832bfe3f2eSlogwang 		 "fdir_%s", eth_dev->device->name);
13842bfe3f2eSlogwang 	fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
13852bfe3f2eSlogwang 	if (!fdir_info->hash_handle) {
13862bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
13872bfe3f2eSlogwang 		return -EINVAL;
13882bfe3f2eSlogwang 	}
13892bfe3f2eSlogwang 	fdir_info->hash_map = rte_zmalloc("ixgbe",
13902bfe3f2eSlogwang 					  sizeof(struct ixgbe_fdir_filter *) *
13912bfe3f2eSlogwang 					  IXGBE_MAX_FDIR_FILTER_NUM,
13922bfe3f2eSlogwang 					  0);
13932bfe3f2eSlogwang 	if (!fdir_info->hash_map) {
13942bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
13952bfe3f2eSlogwang 			     "Failed to allocate memory for fdir hash map!");
13962bfe3f2eSlogwang 		return -ENOMEM;
13972bfe3f2eSlogwang 	}
13982bfe3f2eSlogwang 	fdir_info->mask_added = FALSE;
13992bfe3f2eSlogwang 
14002bfe3f2eSlogwang 	return 0;
14012bfe3f2eSlogwang }
14022bfe3f2eSlogwang 
ixgbe_l2_tn_filter_init(struct rte_eth_dev * eth_dev)14032bfe3f2eSlogwang static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
14042bfe3f2eSlogwang {
14052bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
14062bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
14072bfe3f2eSlogwang 	char l2_tn_hash_name[RTE_HASH_NAMESIZE];
14082bfe3f2eSlogwang 	struct rte_hash_parameters l2_tn_hash_params = {
14092bfe3f2eSlogwang 		.name = l2_tn_hash_name,
14102bfe3f2eSlogwang 		.entries = IXGBE_MAX_L2_TN_FILTER_NUM,
14112bfe3f2eSlogwang 		.key_len = sizeof(struct ixgbe_l2_tn_key),
14122bfe3f2eSlogwang 		.hash_func = rte_hash_crc,
14132bfe3f2eSlogwang 		.hash_func_init_val = 0,
14142bfe3f2eSlogwang 		.socket_id = rte_socket_id(),
14152bfe3f2eSlogwang 	};
14162bfe3f2eSlogwang 
14172bfe3f2eSlogwang 	TAILQ_INIT(&l2_tn_info->l2_tn_list);
14182bfe3f2eSlogwang 	snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
14192bfe3f2eSlogwang 		 "l2_tn_%s", eth_dev->device->name);
14202bfe3f2eSlogwang 	l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
14212bfe3f2eSlogwang 	if (!l2_tn_info->hash_handle) {
14222bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
14232bfe3f2eSlogwang 		return -EINVAL;
14242bfe3f2eSlogwang 	}
14252bfe3f2eSlogwang 	l2_tn_info->hash_map = rte_zmalloc("ixgbe",
14262bfe3f2eSlogwang 				   sizeof(struct ixgbe_l2_tn_filter *) *
14272bfe3f2eSlogwang 				   IXGBE_MAX_L2_TN_FILTER_NUM,
14282bfe3f2eSlogwang 				   0);
14292bfe3f2eSlogwang 	if (!l2_tn_info->hash_map) {
14302bfe3f2eSlogwang 		PMD_INIT_LOG(ERR,
14312bfe3f2eSlogwang 			"Failed to allocate memory for L2 TN hash map!");
14322bfe3f2eSlogwang 		return -ENOMEM;
14332bfe3f2eSlogwang 	}
14342bfe3f2eSlogwang 	l2_tn_info->e_tag_en = FALSE;
14352bfe3f2eSlogwang 	l2_tn_info->e_tag_fwd_en = FALSE;
14364418919fSjohnjiang 	l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
14372bfe3f2eSlogwang 
14382bfe3f2eSlogwang 	return 0;
14392bfe3f2eSlogwang }
1440a9643ea8Slogwang /*
1441a9643ea8Slogwang  * Negotiate mailbox API version with the PF.
1442a9643ea8Slogwang  * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1443a9643ea8Slogwang  * Then we try to negotiate starting with the most recent one.
1444a9643ea8Slogwang  * If all negotiation attempts fail, then we will proceed with
1445a9643ea8Slogwang  * the default one (ixgbe_mbox_api_10).
1446a9643ea8Slogwang  */
1447a9643ea8Slogwang static void
ixgbevf_negotiate_api(struct ixgbe_hw * hw)1448a9643ea8Slogwang ixgbevf_negotiate_api(struct ixgbe_hw *hw)
1449a9643ea8Slogwang {
1450a9643ea8Slogwang 	int32_t i;
1451a9643ea8Slogwang 
1452a9643ea8Slogwang 	/* start with highest supported, proceed down */
1453a9643ea8Slogwang 	static const enum ixgbe_pfvf_api_rev sup_ver[] = {
14544418919fSjohnjiang 		ixgbe_mbox_api_13,
1455a9643ea8Slogwang 		ixgbe_mbox_api_12,
1456a9643ea8Slogwang 		ixgbe_mbox_api_11,
1457a9643ea8Slogwang 		ixgbe_mbox_api_10,
1458a9643ea8Slogwang 	};
1459a9643ea8Slogwang 
1460a9643ea8Slogwang 	for (i = 0;
1461a9643ea8Slogwang 			i != RTE_DIM(sup_ver) &&
1462a9643ea8Slogwang 			ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
1463a9643ea8Slogwang 			i++)
1464a9643ea8Slogwang 		;
1465a9643ea8Slogwang }
1466a9643ea8Slogwang 
1467a9643ea8Slogwang static void
generate_random_mac_addr(struct rte_ether_addr * mac_addr)14684418919fSjohnjiang generate_random_mac_addr(struct rte_ether_addr *mac_addr)
1469a9643ea8Slogwang {
1470a9643ea8Slogwang 	uint64_t random;
1471a9643ea8Slogwang 
1472a9643ea8Slogwang 	/* Set Organizationally Unique Identifier (OUI) prefix. */
1473a9643ea8Slogwang 	mac_addr->addr_bytes[0] = 0x00;
1474a9643ea8Slogwang 	mac_addr->addr_bytes[1] = 0x09;
1475a9643ea8Slogwang 	mac_addr->addr_bytes[2] = 0xC0;
1476a9643ea8Slogwang 	/* Force indication of locally assigned MAC address. */
14774418919fSjohnjiang 	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1478a9643ea8Slogwang 	/* Generate the last 3 bytes of the MAC address with a random number. */
1479a9643ea8Slogwang 	random = rte_rand();
1480a9643ea8Slogwang 	memcpy(&mac_addr->addr_bytes[3], &random, 3);
1481a9643ea8Slogwang }
1482a9643ea8Slogwang 
14834b05018fSfengbojiang static int
devarg_handle_int(__rte_unused const char * key,const char * value,void * extra_args)14844b05018fSfengbojiang devarg_handle_int(__rte_unused const char *key, const char *value,
14854b05018fSfengbojiang 		  void *extra_args)
14864b05018fSfengbojiang {
14874b05018fSfengbojiang 	uint16_t *n = extra_args;
14884b05018fSfengbojiang 
14894b05018fSfengbojiang 	if (value == NULL || extra_args == NULL)
14904b05018fSfengbojiang 		return -EINVAL;
14914b05018fSfengbojiang 
14924b05018fSfengbojiang 	*n = (uint16_t)strtoul(value, NULL, 0);
14934b05018fSfengbojiang 	if (*n == USHRT_MAX && errno == ERANGE)
14944b05018fSfengbojiang 		return -1;
14954b05018fSfengbojiang 
14964b05018fSfengbojiang 	return 0;
14974b05018fSfengbojiang }
14984b05018fSfengbojiang 
14994b05018fSfengbojiang static void
ixgbevf_parse_devargs(struct ixgbe_adapter * adapter,struct rte_devargs * devargs)15004b05018fSfengbojiang ixgbevf_parse_devargs(struct ixgbe_adapter *adapter,
15014b05018fSfengbojiang 		      struct rte_devargs *devargs)
15024b05018fSfengbojiang {
15034b05018fSfengbojiang 	struct rte_kvargs *kvlist;
15044b05018fSfengbojiang 	uint16_t pflink_fullchk;
15054b05018fSfengbojiang 
15064b05018fSfengbojiang 	if (devargs == NULL)
15074b05018fSfengbojiang 		return;
15084b05018fSfengbojiang 
15094b05018fSfengbojiang 	kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments);
15104b05018fSfengbojiang 	if (kvlist == NULL)
15114b05018fSfengbojiang 		return;
15124b05018fSfengbojiang 
15134b05018fSfengbojiang 	if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 &&
15144b05018fSfengbojiang 	    rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK,
15154b05018fSfengbojiang 			       devarg_handle_int, &pflink_fullchk) == 0 &&
15164b05018fSfengbojiang 	    pflink_fullchk == 1)
15174b05018fSfengbojiang 		adapter->pflink_fullchk = 1;
15184b05018fSfengbojiang 
15194b05018fSfengbojiang 	rte_kvargs_free(kvlist);
15204b05018fSfengbojiang }
15214b05018fSfengbojiang 
1522a9643ea8Slogwang /*
1523a9643ea8Slogwang  * Virtual Function device init
1524a9643ea8Slogwang  */
1525a9643ea8Slogwang static int
eth_ixgbevf_dev_init(struct rte_eth_dev * eth_dev)1526a9643ea8Slogwang eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
1527a9643ea8Slogwang {
1528a9643ea8Slogwang 	int diag;
1529a9643ea8Slogwang 	uint32_t tc, tcs;
15304418919fSjohnjiang 	struct ixgbe_adapter *ad = eth_dev->data->dev_private;
15312bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
15322bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1533a9643ea8Slogwang 	struct ixgbe_hw *hw =
1534a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1535a9643ea8Slogwang 	struct ixgbe_vfta *shadow_vfta =
1536a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
1537a9643ea8Slogwang 	struct ixgbe_hwstrip *hwstrip =
1538a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
15394418919fSjohnjiang 	struct rte_ether_addr *perm_addr =
15404418919fSjohnjiang 		(struct rte_ether_addr *)hw->mac.perm_addr;
1541a9643ea8Slogwang 
1542a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1543a9643ea8Slogwang 
1544a9643ea8Slogwang 	eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
1545*2d9fd380Sjfb8856606 	eth_dev->rx_descriptor_done   = ixgbe_dev_rx_descriptor_done;
1546*2d9fd380Sjfb8856606 	eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
1547*2d9fd380Sjfb8856606 	eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
1548a9643ea8Slogwang 	eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
1549a9643ea8Slogwang 	eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
1550a9643ea8Slogwang 
1551a9643ea8Slogwang 	/* for secondary processes, we don't initialise any further as primary
1552a9643ea8Slogwang 	 * has already done this work. Only check we don't need a different
1553a9643ea8Slogwang 	 * RX function
1554a9643ea8Slogwang 	 */
1555a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1556a9643ea8Slogwang 		struct ixgbe_tx_queue *txq;
1557a9643ea8Slogwang 		/* TX queue function in primary, set by last queue initialized
1558a9643ea8Slogwang 		 * Tx queue may not initialized by primary process
1559a9643ea8Slogwang 		 */
1560a9643ea8Slogwang 		if (eth_dev->data->tx_queues) {
1561a9643ea8Slogwang 			txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
1562a9643ea8Slogwang 			ixgbe_set_tx_function(eth_dev, txq);
1563a9643ea8Slogwang 		} else {
1564a9643ea8Slogwang 			/* Use default TX function if we get here */
1565a9643ea8Slogwang 			PMD_INIT_LOG(NOTICE,
1566a9643ea8Slogwang 				     "No TX queues configured yet. Using default TX function.");
1567a9643ea8Slogwang 		}
1568a9643ea8Slogwang 
1569a9643ea8Slogwang 		ixgbe_set_rx_function(eth_dev);
1570a9643ea8Slogwang 
1571a9643ea8Slogwang 		return 0;
1572a9643ea8Slogwang 	}
1573a9643ea8Slogwang 
15744418919fSjohnjiang 	rte_atomic32_clear(&ad->link_thread_running);
15754b05018fSfengbojiang 	ixgbevf_parse_devargs(eth_dev->data->dev_private,
15764b05018fSfengbojiang 			      pci_dev->device.devargs);
15774b05018fSfengbojiang 
1578a9643ea8Slogwang 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1579*2d9fd380Sjfb8856606 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1580a9643ea8Slogwang 
1581a9643ea8Slogwang 	hw->device_id = pci_dev->id.device_id;
1582a9643ea8Slogwang 	hw->vendor_id = pci_dev->id.vendor_id;
1583a9643ea8Slogwang 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1584a9643ea8Slogwang 
1585a9643ea8Slogwang 	/* initialize the vfta */
1586a9643ea8Slogwang 	memset(shadow_vfta, 0, sizeof(*shadow_vfta));
1587a9643ea8Slogwang 
1588a9643ea8Slogwang 	/* initialize the hw strip bitmap*/
1589a9643ea8Slogwang 	memset(hwstrip, 0, sizeof(*hwstrip));
1590a9643ea8Slogwang 
1591a9643ea8Slogwang 	/* Initialize the shared code (base driver) */
1592a9643ea8Slogwang 	diag = ixgbe_init_shared_code(hw);
1593a9643ea8Slogwang 	if (diag != IXGBE_SUCCESS) {
1594a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
1595a9643ea8Slogwang 		return -EIO;
1596a9643ea8Slogwang 	}
1597a9643ea8Slogwang 
1598a9643ea8Slogwang 	/* init_mailbox_params */
1599a9643ea8Slogwang 	hw->mbx.ops.init_params(hw);
1600a9643ea8Slogwang 
1601a9643ea8Slogwang 	/* Reset the hw statistics */
1602a9643ea8Slogwang 	ixgbevf_dev_stats_reset(eth_dev);
1603a9643ea8Slogwang 
1604a9643ea8Slogwang 	/* Disable the interrupts for VF */
1605d30ea906Sjfb8856606 	ixgbevf_intr_disable(eth_dev);
1606a9643ea8Slogwang 
1607a9643ea8Slogwang 	hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
1608a9643ea8Slogwang 	diag = hw->mac.ops.reset_hw(hw);
1609a9643ea8Slogwang 
1610a9643ea8Slogwang 	/*
1611a9643ea8Slogwang 	 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1612a9643ea8Slogwang 	 * the underlying PF driver has not assigned a MAC address to the VF.
1613a9643ea8Slogwang 	 * In this case, assign a random MAC address.
1614a9643ea8Slogwang 	 */
1615a9643ea8Slogwang 	if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
1616a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1617d30ea906Sjfb8856606 		/*
1618d30ea906Sjfb8856606 		 * This error code will be propagated to the app by
1619d30ea906Sjfb8856606 		 * rte_eth_dev_reset, so use a public error code rather than
1620d30ea906Sjfb8856606 		 * the internal-only IXGBE_ERR_RESET_FAILED
1621d30ea906Sjfb8856606 		 */
1622d30ea906Sjfb8856606 		return -EAGAIN;
1623a9643ea8Slogwang 	}
1624a9643ea8Slogwang 
1625a9643ea8Slogwang 	/* negotiate mailbox API version to use with the PF. */
1626a9643ea8Slogwang 	ixgbevf_negotiate_api(hw);
1627a9643ea8Slogwang 
1628a9643ea8Slogwang 	/* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1629a9643ea8Slogwang 	ixgbevf_get_queues(hw, &tcs, &tc);
1630a9643ea8Slogwang 
1631a9643ea8Slogwang 	/* Allocate memory for storing MAC addresses */
16324418919fSjohnjiang 	eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN *
1633a9643ea8Slogwang 					       hw->mac.num_rar_entries, 0);
1634a9643ea8Slogwang 	if (eth_dev->data->mac_addrs == NULL) {
1635a9643ea8Slogwang 		PMD_INIT_LOG(ERR,
1636a9643ea8Slogwang 			     "Failed to allocate %u bytes needed to store "
1637a9643ea8Slogwang 			     "MAC addresses",
16384418919fSjohnjiang 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
1639a9643ea8Slogwang 		return -ENOMEM;
1640a9643ea8Slogwang 	}
1641a9643ea8Slogwang 
1642a9643ea8Slogwang 	/* Generate a random MAC address, if none was assigned by PF. */
16434418919fSjohnjiang 	if (rte_is_zero_ether_addr(perm_addr)) {
1644a9643ea8Slogwang 		generate_random_mac_addr(perm_addr);
1645a9643ea8Slogwang 		diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
1646a9643ea8Slogwang 		if (diag) {
1647a9643ea8Slogwang 			rte_free(eth_dev->data->mac_addrs);
1648a9643ea8Slogwang 			eth_dev->data->mac_addrs = NULL;
1649a9643ea8Slogwang 			return diag;
1650a9643ea8Slogwang 		}
1651a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1652a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1653a9643ea8Slogwang 			     "%02x:%02x:%02x:%02x:%02x:%02x",
1654a9643ea8Slogwang 			     perm_addr->addr_bytes[0],
1655a9643ea8Slogwang 			     perm_addr->addr_bytes[1],
1656a9643ea8Slogwang 			     perm_addr->addr_bytes[2],
1657a9643ea8Slogwang 			     perm_addr->addr_bytes[3],
1658a9643ea8Slogwang 			     perm_addr->addr_bytes[4],
1659a9643ea8Slogwang 			     perm_addr->addr_bytes[5]);
1660a9643ea8Slogwang 	}
1661a9643ea8Slogwang 
1662a9643ea8Slogwang 	/* Copy the permanent MAC address */
16634418919fSjohnjiang 	rte_ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
1664a9643ea8Slogwang 
1665a9643ea8Slogwang 	/* reset the hardware with the new settings */
1666a9643ea8Slogwang 	diag = hw->mac.ops.start_hw(hw);
1667a9643ea8Slogwang 	switch (diag) {
1668a9643ea8Slogwang 	case  0:
1669a9643ea8Slogwang 		break;
1670a9643ea8Slogwang 
1671a9643ea8Slogwang 	default:
1672a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
1673a9643ea8Slogwang 		return -EIO;
1674a9643ea8Slogwang 	}
1675a9643ea8Slogwang 
16762bfe3f2eSlogwang 	rte_intr_callback_register(intr_handle,
16772bfe3f2eSlogwang 				   ixgbevf_dev_interrupt_handler, eth_dev);
16782bfe3f2eSlogwang 	rte_intr_enable(intr_handle);
1679d30ea906Sjfb8856606 	ixgbevf_intr_enable(eth_dev);
1680a9643ea8Slogwang 
1681a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1682a9643ea8Slogwang 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1683a9643ea8Slogwang 		     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
1684a9643ea8Slogwang 
1685a9643ea8Slogwang 	return 0;
1686a9643ea8Slogwang }
1687a9643ea8Slogwang 
1688a9643ea8Slogwang /* Virtual Function device uninit */
1689a9643ea8Slogwang 
1690a9643ea8Slogwang static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev * eth_dev)1691a9643ea8Slogwang eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
1692a9643ea8Slogwang {
1693a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1694a9643ea8Slogwang 
1695a9643ea8Slogwang 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1696d30ea906Sjfb8856606 		return 0;
1697a9643ea8Slogwang 
1698a9643ea8Slogwang 	ixgbevf_dev_close(eth_dev);
1699a9643ea8Slogwang 
1700a9643ea8Slogwang 	return 0;
1701a9643ea8Slogwang }
1702a9643ea8Slogwang 
1703d30ea906Sjfb8856606 static int
eth_ixgbe_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)1704d30ea906Sjfb8856606 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
17052bfe3f2eSlogwang 		struct rte_pci_device *pci_dev)
17062bfe3f2eSlogwang {
1707d30ea906Sjfb8856606 	char name[RTE_ETH_NAME_MAX_LEN];
1708d30ea906Sjfb8856606 	struct rte_eth_dev *pf_ethdev;
1709d30ea906Sjfb8856606 	struct rte_eth_devargs eth_da;
1710d30ea906Sjfb8856606 	int i, retval;
1711d30ea906Sjfb8856606 
1712d30ea906Sjfb8856606 	if (pci_dev->device.devargs) {
1713d30ea906Sjfb8856606 		retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1714d30ea906Sjfb8856606 				&eth_da);
1715d30ea906Sjfb8856606 		if (retval)
1716d30ea906Sjfb8856606 			return retval;
1717d30ea906Sjfb8856606 	} else
1718d30ea906Sjfb8856606 		memset(&eth_da, 0, sizeof(eth_da));
1719d30ea906Sjfb8856606 
1720d30ea906Sjfb8856606 	retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1721d30ea906Sjfb8856606 		sizeof(struct ixgbe_adapter),
1722d30ea906Sjfb8856606 		eth_dev_pci_specific_init, pci_dev,
1723d30ea906Sjfb8856606 		eth_ixgbe_dev_init, NULL);
1724d30ea906Sjfb8856606 
1725d30ea906Sjfb8856606 	if (retval || eth_da.nb_representor_ports < 1)
1726d30ea906Sjfb8856606 		return retval;
1727d30ea906Sjfb8856606 
1728d30ea906Sjfb8856606 	pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1729d30ea906Sjfb8856606 	if (pf_ethdev == NULL)
1730d30ea906Sjfb8856606 		return -ENODEV;
1731d30ea906Sjfb8856606 
1732d30ea906Sjfb8856606 	/* probe VF representor ports */
1733d30ea906Sjfb8856606 	for (i = 0; i < eth_da.nb_representor_ports; i++) {
1734d30ea906Sjfb8856606 		struct ixgbe_vf_info *vfinfo;
1735d30ea906Sjfb8856606 		struct ixgbe_vf_representor representor;
1736d30ea906Sjfb8856606 
1737d30ea906Sjfb8856606 		vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
1738d30ea906Sjfb8856606 			pf_ethdev->data->dev_private);
1739d30ea906Sjfb8856606 		if (vfinfo == NULL) {
1740d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR,
1741d30ea906Sjfb8856606 				"no virtual functions supported by PF");
1742d30ea906Sjfb8856606 			break;
1743d30ea906Sjfb8856606 		}
1744d30ea906Sjfb8856606 
1745d30ea906Sjfb8856606 		representor.vf_id = eth_da.representor_ports[i];
1746d30ea906Sjfb8856606 		representor.switch_domain_id = vfinfo->switch_domain_id;
1747d30ea906Sjfb8856606 		representor.pf_ethdev = pf_ethdev;
1748d30ea906Sjfb8856606 
1749d30ea906Sjfb8856606 		/* representor port net_bdf_port */
1750d30ea906Sjfb8856606 		snprintf(name, sizeof(name), "net_%s_representor_%d",
1751d30ea906Sjfb8856606 			pci_dev->device.name,
1752d30ea906Sjfb8856606 			eth_da.representor_ports[i]);
1753d30ea906Sjfb8856606 
1754d30ea906Sjfb8856606 		retval = rte_eth_dev_create(&pci_dev->device, name,
1755d30ea906Sjfb8856606 			sizeof(struct ixgbe_vf_representor), NULL, NULL,
1756d30ea906Sjfb8856606 			ixgbe_vf_representor_init, &representor);
1757d30ea906Sjfb8856606 
1758d30ea906Sjfb8856606 		if (retval)
1759d30ea906Sjfb8856606 			PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
1760d30ea906Sjfb8856606 				"representor %s.", name);
1761d30ea906Sjfb8856606 	}
1762d30ea906Sjfb8856606 
1763d30ea906Sjfb8856606 	return 0;
17642bfe3f2eSlogwang }
17652bfe3f2eSlogwang 
eth_ixgbe_pci_remove(struct rte_pci_device * pci_dev)17662bfe3f2eSlogwang static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
17672bfe3f2eSlogwang {
1768d30ea906Sjfb8856606 	struct rte_eth_dev *ethdev;
1769d30ea906Sjfb8856606 
1770d30ea906Sjfb8856606 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1771d30ea906Sjfb8856606 	if (!ethdev)
17724418919fSjohnjiang 		return 0;
1773d30ea906Sjfb8856606 
1774d30ea906Sjfb8856606 	if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
17754418919fSjohnjiang 		return rte_eth_dev_pci_generic_remove(pci_dev,
17764418919fSjohnjiang 					ixgbe_vf_representor_uninit);
1777d30ea906Sjfb8856606 	else
17784418919fSjohnjiang 		return rte_eth_dev_pci_generic_remove(pci_dev,
17794418919fSjohnjiang 						eth_ixgbe_dev_uninit);
17802bfe3f2eSlogwang }
17812bfe3f2eSlogwang 
17822bfe3f2eSlogwang static struct rte_pci_driver rte_ixgbe_pmd = {
1783a9643ea8Slogwang 	.id_table = pci_id_ixgbe_map,
17844418919fSjohnjiang 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
17852bfe3f2eSlogwang 	.probe = eth_ixgbe_pci_probe,
17862bfe3f2eSlogwang 	.remove = eth_ixgbe_pci_remove,
1787a9643ea8Slogwang };
1788a9643ea8Slogwang 
eth_ixgbevf_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)17892bfe3f2eSlogwang static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
17902bfe3f2eSlogwang 	struct rte_pci_device *pci_dev)
17912bfe3f2eSlogwang {
17922bfe3f2eSlogwang 	return rte_eth_dev_pci_generic_probe(pci_dev,
17932bfe3f2eSlogwang 		sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
17942bfe3f2eSlogwang }
17952bfe3f2eSlogwang 
eth_ixgbevf_pci_remove(struct rte_pci_device * pci_dev)17962bfe3f2eSlogwang static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
17972bfe3f2eSlogwang {
17982bfe3f2eSlogwang 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
17992bfe3f2eSlogwang }
18002bfe3f2eSlogwang 
1801a9643ea8Slogwang /*
1802a9643ea8Slogwang  * virtual function driver struct
1803a9643ea8Slogwang  */
18042bfe3f2eSlogwang static struct rte_pci_driver rte_ixgbevf_pmd = {
1805a9643ea8Slogwang 	.id_table = pci_id_ixgbevf_map,
18064418919fSjohnjiang 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
18072bfe3f2eSlogwang 	.probe = eth_ixgbevf_pci_probe,
18082bfe3f2eSlogwang 	.remove = eth_ixgbevf_pci_remove,
1809a9643ea8Slogwang };
1810a9643ea8Slogwang 
1811a9643ea8Slogwang static int
ixgbe_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1812a9643ea8Slogwang ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1813a9643ea8Slogwang {
1814a9643ea8Slogwang 	struct ixgbe_hw *hw =
1815a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1816a9643ea8Slogwang 	struct ixgbe_vfta *shadow_vfta =
1817a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1818a9643ea8Slogwang 	uint32_t vfta;
1819a9643ea8Slogwang 	uint32_t vid_idx;
1820a9643ea8Slogwang 	uint32_t vid_bit;
1821a9643ea8Slogwang 
1822a9643ea8Slogwang 	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
1823a9643ea8Slogwang 	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
1824a9643ea8Slogwang 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
1825a9643ea8Slogwang 	if (on)
1826a9643ea8Slogwang 		vfta |= vid_bit;
1827a9643ea8Slogwang 	else
1828a9643ea8Slogwang 		vfta &= ~vid_bit;
1829a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
1830a9643ea8Slogwang 
1831a9643ea8Slogwang 	/* update local VFTA copy */
1832a9643ea8Slogwang 	shadow_vfta->vfta[vid_idx] = vfta;
1833a9643ea8Slogwang 
1834a9643ea8Slogwang 	return 0;
1835a9643ea8Slogwang }
1836a9643ea8Slogwang 
1837a9643ea8Slogwang static void
ixgbe_vlan_strip_queue_set(struct rte_eth_dev * dev,uint16_t queue,int on)1838a9643ea8Slogwang ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
1839a9643ea8Slogwang {
1840a9643ea8Slogwang 	if (on)
1841a9643ea8Slogwang 		ixgbe_vlan_hw_strip_enable(dev, queue);
1842a9643ea8Slogwang 	else
1843a9643ea8Slogwang 		ixgbe_vlan_hw_strip_disable(dev, queue);
1844a9643ea8Slogwang }
1845a9643ea8Slogwang 
1846a9643ea8Slogwang static int
ixgbe_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)1847a9643ea8Slogwang ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1848a9643ea8Slogwang 		    enum rte_vlan_type vlan_type,
1849a9643ea8Slogwang 		    uint16_t tpid)
1850a9643ea8Slogwang {
1851a9643ea8Slogwang 	struct ixgbe_hw *hw =
1852a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1853a9643ea8Slogwang 	int ret = 0;
1854a9643ea8Slogwang 	uint32_t reg;
1855a9643ea8Slogwang 	uint32_t qinq;
1856a9643ea8Slogwang 
1857a9643ea8Slogwang 	qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1858a9643ea8Slogwang 	qinq &= IXGBE_DMATXCTL_GDV;
1859a9643ea8Slogwang 
1860a9643ea8Slogwang 	switch (vlan_type) {
1861a9643ea8Slogwang 	case ETH_VLAN_TYPE_INNER:
1862a9643ea8Slogwang 		if (qinq) {
1863a9643ea8Slogwang 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1864a9643ea8Slogwang 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1865a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1866a9643ea8Slogwang 			reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1867a9643ea8Slogwang 			reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1868a9643ea8Slogwang 				| ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1869a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1870a9643ea8Slogwang 		} else {
1871a9643ea8Slogwang 			ret = -ENOTSUP;
1872a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Inner type is not supported"
1873a9643ea8Slogwang 				    " by single VLAN");
1874a9643ea8Slogwang 		}
1875a9643ea8Slogwang 		break;
1876a9643ea8Slogwang 	case ETH_VLAN_TYPE_OUTER:
1877a9643ea8Slogwang 		if (qinq) {
1878a9643ea8Slogwang 			/* Only the high 16-bits is valid */
1879a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
1880a9643ea8Slogwang 					IXGBE_EXVET_VET_EXT_SHIFT);
1881a9643ea8Slogwang 		} else {
1882a9643ea8Slogwang 			reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1883a9643ea8Slogwang 			reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
1884a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
1885a9643ea8Slogwang 			reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1886a9643ea8Slogwang 			reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
1887a9643ea8Slogwang 				| ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
1888a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
1889a9643ea8Slogwang 		}
1890a9643ea8Slogwang 
1891a9643ea8Slogwang 		break;
1892a9643ea8Slogwang 	default:
1893a9643ea8Slogwang 		ret = -EINVAL;
1894a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
1895a9643ea8Slogwang 		break;
1896a9643ea8Slogwang 	}
1897a9643ea8Slogwang 
1898a9643ea8Slogwang 	return ret;
1899a9643ea8Slogwang }
1900a9643ea8Slogwang 
1901a9643ea8Slogwang void
ixgbe_vlan_hw_filter_disable(struct rte_eth_dev * dev)1902a9643ea8Slogwang ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
1903a9643ea8Slogwang {
1904a9643ea8Slogwang 	struct ixgbe_hw *hw =
1905a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1906a9643ea8Slogwang 	uint32_t vlnctrl;
1907a9643ea8Slogwang 
1908a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1909a9643ea8Slogwang 
1910a9643ea8Slogwang 	/* Filter Table Disable */
1911a9643ea8Slogwang 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1912a9643ea8Slogwang 	vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1913a9643ea8Slogwang 
1914a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1915a9643ea8Slogwang }
1916a9643ea8Slogwang 
1917a9643ea8Slogwang void
ixgbe_vlan_hw_filter_enable(struct rte_eth_dev * dev)1918a9643ea8Slogwang ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1919a9643ea8Slogwang {
1920a9643ea8Slogwang 	struct ixgbe_hw *hw =
1921a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1922a9643ea8Slogwang 	struct ixgbe_vfta *shadow_vfta =
1923a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
1924a9643ea8Slogwang 	uint32_t vlnctrl;
1925a9643ea8Slogwang 	uint16_t i;
1926a9643ea8Slogwang 
1927a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1928a9643ea8Slogwang 
1929a9643ea8Slogwang 	/* Filter Table Enable */
1930a9643ea8Slogwang 	vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1931a9643ea8Slogwang 	vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1932a9643ea8Slogwang 	vlnctrl |= IXGBE_VLNCTRL_VFE;
1933a9643ea8Slogwang 
1934a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1935a9643ea8Slogwang 
1936a9643ea8Slogwang 	/* write whatever is in local vfta copy */
1937a9643ea8Slogwang 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1938a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
1939a9643ea8Slogwang }
1940a9643ea8Slogwang 
1941a9643ea8Slogwang static void
ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev * dev,uint16_t queue,bool on)1942a9643ea8Slogwang ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
1943a9643ea8Slogwang {
1944a9643ea8Slogwang 	struct ixgbe_hwstrip *hwstrip =
1945a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
1946a9643ea8Slogwang 	struct ixgbe_rx_queue *rxq;
1947a9643ea8Slogwang 
1948a9643ea8Slogwang 	if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
1949a9643ea8Slogwang 		return;
1950a9643ea8Slogwang 
1951a9643ea8Slogwang 	if (on)
1952a9643ea8Slogwang 		IXGBE_SET_HWSTRIP(hwstrip, queue);
1953a9643ea8Slogwang 	else
1954a9643ea8Slogwang 		IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
1955a9643ea8Slogwang 
1956a9643ea8Slogwang 	if (queue >= dev->data->nb_rx_queues)
1957a9643ea8Slogwang 		return;
1958a9643ea8Slogwang 
1959a9643ea8Slogwang 	rxq = dev->data->rx_queues[queue];
1960a9643ea8Slogwang 
1961d30ea906Sjfb8856606 	if (on) {
19622bfe3f2eSlogwang 		rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1963d30ea906Sjfb8856606 		rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1964d30ea906Sjfb8856606 	} else {
19652bfe3f2eSlogwang 		rxq->vlan_flags = PKT_RX_VLAN;
1966d30ea906Sjfb8856606 		rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1967d30ea906Sjfb8856606 	}
1968a9643ea8Slogwang }
1969a9643ea8Slogwang 
1970a9643ea8Slogwang static void
ixgbe_vlan_hw_strip_disable(struct rte_eth_dev * dev,uint16_t queue)1971a9643ea8Slogwang ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
1972a9643ea8Slogwang {
1973a9643ea8Slogwang 	struct ixgbe_hw *hw =
1974a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1975a9643ea8Slogwang 	uint32_t ctrl;
1976a9643ea8Slogwang 
1977a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
1978a9643ea8Slogwang 
1979a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB) {
1980a9643ea8Slogwang 		/* No queue level support */
1981a9643ea8Slogwang 		PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
1982a9643ea8Slogwang 		return;
1983a9643ea8Slogwang 	}
1984a9643ea8Slogwang 
1985a9643ea8Slogwang 	/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
1986a9643ea8Slogwang 	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
1987a9643ea8Slogwang 	ctrl &= ~IXGBE_RXDCTL_VME;
1988a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
1989a9643ea8Slogwang 
1990a9643ea8Slogwang 	/* record those setting for HW strip per queue */
1991a9643ea8Slogwang 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
1992a9643ea8Slogwang }
1993a9643ea8Slogwang 
1994a9643ea8Slogwang static void
ixgbe_vlan_hw_strip_enable(struct rte_eth_dev * dev,uint16_t queue)1995a9643ea8Slogwang ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
1996a9643ea8Slogwang {
1997a9643ea8Slogwang 	struct ixgbe_hw *hw =
1998a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1999a9643ea8Slogwang 	uint32_t ctrl;
2000a9643ea8Slogwang 
2001a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2002a9643ea8Slogwang 
2003a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB) {
2004a9643ea8Slogwang 		/* No queue level supported */
2005a9643ea8Slogwang 		PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
2006a9643ea8Slogwang 		return;
2007a9643ea8Slogwang 	}
2008a9643ea8Slogwang 
2009a9643ea8Slogwang 	/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2010a9643ea8Slogwang 	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
2011a9643ea8Slogwang 	ctrl |= IXGBE_RXDCTL_VME;
2012a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
2013a9643ea8Slogwang 
2014a9643ea8Slogwang 	/* record those setting for HW strip per queue */
2015a9643ea8Slogwang 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
2016a9643ea8Slogwang }
2017a9643ea8Slogwang 
2018a9643ea8Slogwang static void
ixgbe_vlan_hw_extend_disable(struct rte_eth_dev * dev)2019a9643ea8Slogwang ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2020a9643ea8Slogwang {
2021a9643ea8Slogwang 	struct ixgbe_hw *hw =
2022a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2023a9643ea8Slogwang 	uint32_t ctrl;
2024a9643ea8Slogwang 
2025a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2026a9643ea8Slogwang 
2027a9643ea8Slogwang 	/* DMATXCTRL: Geric Double VLAN Disable */
2028a9643ea8Slogwang 	ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2029a9643ea8Slogwang 	ctrl &= ~IXGBE_DMATXCTL_GDV;
2030a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2031a9643ea8Slogwang 
2032a9643ea8Slogwang 	/* CTRL_EXT: Global Double VLAN Disable */
2033a9643ea8Slogwang 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2034a9643ea8Slogwang 	ctrl &= ~IXGBE_EXTENDED_VLAN;
2035a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2036a9643ea8Slogwang 
2037a9643ea8Slogwang }
2038a9643ea8Slogwang 
2039a9643ea8Slogwang static void
ixgbe_vlan_hw_extend_enable(struct rte_eth_dev * dev)2040a9643ea8Slogwang ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2041a9643ea8Slogwang {
2042a9643ea8Slogwang 	struct ixgbe_hw *hw =
2043a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2044a9643ea8Slogwang 	uint32_t ctrl;
2045a9643ea8Slogwang 
2046a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2047a9643ea8Slogwang 
2048a9643ea8Slogwang 	/* DMATXCTRL: Geric Double VLAN Enable */
2049a9643ea8Slogwang 	ctrl  = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2050a9643ea8Slogwang 	ctrl |= IXGBE_DMATXCTL_GDV;
2051a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl);
2052a9643ea8Slogwang 
2053a9643ea8Slogwang 	/* CTRL_EXT: Global Double VLAN Enable */
2054a9643ea8Slogwang 	ctrl  = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
2055a9643ea8Slogwang 	ctrl |= IXGBE_EXTENDED_VLAN;
2056a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
2057a9643ea8Slogwang 
2058a9643ea8Slogwang 	/* Clear pooling mode of PFVTCTL. It's required by X550. */
2059a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_X550 ||
2060a9643ea8Slogwang 	    hw->mac.type == ixgbe_mac_X550EM_x ||
2061a9643ea8Slogwang 	    hw->mac.type == ixgbe_mac_X550EM_a) {
2062a9643ea8Slogwang 		ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2063a9643ea8Slogwang 		ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
2064a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
2065a9643ea8Slogwang 	}
2066a9643ea8Slogwang 
2067a9643ea8Slogwang 	/*
2068a9643ea8Slogwang 	 * VET EXT field in the EXVET register = 0x8100 by default
2069a9643ea8Slogwang 	 * So no need to change. Same to VT field of DMATXCTL register
2070a9643ea8Slogwang 	 */
2071a9643ea8Slogwang }
2072a9643ea8Slogwang 
2073d30ea906Sjfb8856606 void
ixgbe_vlan_hw_strip_config(struct rte_eth_dev * dev)2074d30ea906Sjfb8856606 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
207528440c50Sjfb8856606 {
2076d30ea906Sjfb8856606 	struct ixgbe_hw *hw =
2077d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2078d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2079d30ea906Sjfb8856606 	uint32_t ctrl;
2080d30ea906Sjfb8856606 	uint16_t i;
2081d30ea906Sjfb8856606 	struct ixgbe_rx_queue *rxq;
2082d30ea906Sjfb8856606 	bool on;
2083d30ea906Sjfb8856606 
2084d30ea906Sjfb8856606 	PMD_INIT_FUNC_TRACE();
2085d30ea906Sjfb8856606 
2086d30ea906Sjfb8856606 	if (hw->mac.type == ixgbe_mac_82598EB) {
2087d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2088d30ea906Sjfb8856606 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2089d30ea906Sjfb8856606 			ctrl |= IXGBE_VLNCTRL_VME;
2090d30ea906Sjfb8856606 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2091d30ea906Sjfb8856606 		} else {
2092d30ea906Sjfb8856606 			ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2093d30ea906Sjfb8856606 			ctrl &= ~IXGBE_VLNCTRL_VME;
2094d30ea906Sjfb8856606 			IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2095d30ea906Sjfb8856606 		}
2096d30ea906Sjfb8856606 	} else {
2097d30ea906Sjfb8856606 		/*
2098d30ea906Sjfb8856606 		 * Other 10G NIC, the VLAN strip can be setup
2099d30ea906Sjfb8856606 		 * per queue in RXDCTL
2100d30ea906Sjfb8856606 		 */
2101d30ea906Sjfb8856606 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
2102d30ea906Sjfb8856606 			rxq = dev->data->rx_queues[i];
2103d30ea906Sjfb8856606 			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
2104d30ea906Sjfb8856606 			if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
2105d30ea906Sjfb8856606 				ctrl |= IXGBE_RXDCTL_VME;
2106d30ea906Sjfb8856606 				on = TRUE;
2107d30ea906Sjfb8856606 			} else {
2108d30ea906Sjfb8856606 				ctrl &= ~IXGBE_RXDCTL_VME;
2109d30ea906Sjfb8856606 				on = FALSE;
2110d30ea906Sjfb8856606 			}
2111d30ea906Sjfb8856606 			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
2112d30ea906Sjfb8856606 
2113d30ea906Sjfb8856606 			/* record those setting for HW strip per queue */
2114d30ea906Sjfb8856606 			ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
2115d30ea906Sjfb8856606 		}
2116d30ea906Sjfb8856606 	}
2117d30ea906Sjfb8856606 }
2118d30ea906Sjfb8856606 
2119d30ea906Sjfb8856606 static void
ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev * dev,int mask)2120d30ea906Sjfb8856606 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
2121d30ea906Sjfb8856606 {
2122d30ea906Sjfb8856606 	uint16_t i;
2123d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode;
2124d30ea906Sjfb8856606 	struct ixgbe_rx_queue *rxq;
2125d30ea906Sjfb8856606 
212628440c50Sjfb8856606 	if (mask & ETH_VLAN_STRIP_MASK) {
2127d30ea906Sjfb8856606 		rxmode = &dev->data->dev_conf.rxmode;
2128d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2129d30ea906Sjfb8856606 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
2130d30ea906Sjfb8856606 				rxq = dev->data->rx_queues[i];
2131d30ea906Sjfb8856606 				rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2132d30ea906Sjfb8856606 			}
21335af785ecSfengbojiang(姜凤波) 		else
2134d30ea906Sjfb8856606 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
2135d30ea906Sjfb8856606 				rxq = dev->data->rx_queues[i];
2136d30ea906Sjfb8856606 				rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2137d30ea906Sjfb8856606 			}
2138d30ea906Sjfb8856606 	}
2139d30ea906Sjfb8856606 }
2140d30ea906Sjfb8856606 
2141d30ea906Sjfb8856606 static int
ixgbe_vlan_offload_config(struct rte_eth_dev * dev,int mask)2142d30ea906Sjfb8856606 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
2143d30ea906Sjfb8856606 {
2144d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode;
2145d30ea906Sjfb8856606 	rxmode = &dev->data->dev_conf.rxmode;
2146d30ea906Sjfb8856606 
2147d30ea906Sjfb8856606 	if (mask & ETH_VLAN_STRIP_MASK) {
2148d30ea906Sjfb8856606 		ixgbe_vlan_hw_strip_config(dev);
2149a9643ea8Slogwang 	}
2150a9643ea8Slogwang 
2151a9643ea8Slogwang 	if (mask & ETH_VLAN_FILTER_MASK) {
2152d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2153a9643ea8Slogwang 			ixgbe_vlan_hw_filter_enable(dev);
2154a9643ea8Slogwang 		else
2155a9643ea8Slogwang 			ixgbe_vlan_hw_filter_disable(dev);
2156a9643ea8Slogwang 	}
2157a9643ea8Slogwang 
2158a9643ea8Slogwang 	if (mask & ETH_VLAN_EXTEND_MASK) {
2159d30ea906Sjfb8856606 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2160a9643ea8Slogwang 			ixgbe_vlan_hw_extend_enable(dev);
2161a9643ea8Slogwang 		else
2162a9643ea8Slogwang 			ixgbe_vlan_hw_extend_disable(dev);
2163a9643ea8Slogwang 	}
21642bfe3f2eSlogwang 
21652bfe3f2eSlogwang 	return 0;
2166a9643ea8Slogwang }
2167a9643ea8Slogwang 
2168d30ea906Sjfb8856606 static int
ixgbe_vlan_offload_set(struct rte_eth_dev * dev,int mask)2169d30ea906Sjfb8856606 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2170d30ea906Sjfb8856606 {
2171d30ea906Sjfb8856606 	ixgbe_config_vlan_strip_on_all_queues(dev, mask);
2172d30ea906Sjfb8856606 
2173d30ea906Sjfb8856606 	ixgbe_vlan_offload_config(dev, mask);
2174d30ea906Sjfb8856606 
2175d30ea906Sjfb8856606 	return 0;
2176d30ea906Sjfb8856606 }
2177d30ea906Sjfb8856606 
2178a9643ea8Slogwang static void
ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev * dev)2179a9643ea8Slogwang ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2180a9643ea8Slogwang {
2181a9643ea8Slogwang 	struct ixgbe_hw *hw =
2182a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2183a9643ea8Slogwang 	/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2184a9643ea8Slogwang 	uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2185a9643ea8Slogwang 
2186a9643ea8Slogwang 	vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
2187a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
2188a9643ea8Slogwang }
2189a9643ea8Slogwang 
2190a9643ea8Slogwang static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev * dev,uint16_t nb_rx_q)2191a9643ea8Slogwang ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
2192a9643ea8Slogwang {
21932bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
21942bfe3f2eSlogwang 
2195a9643ea8Slogwang 	switch (nb_rx_q) {
2196a9643ea8Slogwang 	case 1:
2197a9643ea8Slogwang 	case 2:
2198a9643ea8Slogwang 		RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
2199a9643ea8Slogwang 		break;
2200a9643ea8Slogwang 	case 4:
2201a9643ea8Slogwang 		RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
2202a9643ea8Slogwang 		break;
2203a9643ea8Slogwang 	default:
2204a9643ea8Slogwang 		return -EINVAL;
2205a9643ea8Slogwang 	}
2206a9643ea8Slogwang 
22072bfe3f2eSlogwang 	RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
22082bfe3f2eSlogwang 		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
22092bfe3f2eSlogwang 	RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
22102bfe3f2eSlogwang 		pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
2211a9643ea8Slogwang 	return 0;
2212a9643ea8Slogwang }
2213a9643ea8Slogwang 
2214a9643ea8Slogwang static int
ixgbe_check_mq_mode(struct rte_eth_dev * dev)2215a9643ea8Slogwang ixgbe_check_mq_mode(struct rte_eth_dev *dev)
2216a9643ea8Slogwang {
2217a9643ea8Slogwang 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
2218a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2219a9643ea8Slogwang 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
2220a9643ea8Slogwang 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
2221a9643ea8Slogwang 
2222a9643ea8Slogwang 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
2223a9643ea8Slogwang 		/* check multi-queue mode */
2224a9643ea8Slogwang 		switch (dev_conf->rxmode.mq_mode) {
2225a9643ea8Slogwang 		case ETH_MQ_RX_VMDQ_DCB:
22262bfe3f2eSlogwang 			PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
22272bfe3f2eSlogwang 			break;
2228a9643ea8Slogwang 		case ETH_MQ_RX_VMDQ_DCB_RSS:
2229a9643ea8Slogwang 			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2230a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "SRIOV active,"
2231a9643ea8Slogwang 					" unsupported mq_mode rx %d.",
2232a9643ea8Slogwang 					dev_conf->rxmode.mq_mode);
2233a9643ea8Slogwang 			return -EINVAL;
2234a9643ea8Slogwang 		case ETH_MQ_RX_RSS:
2235a9643ea8Slogwang 		case ETH_MQ_RX_VMDQ_RSS:
2236a9643ea8Slogwang 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
2237a9643ea8Slogwang 			if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
2238a9643ea8Slogwang 				if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
2239a9643ea8Slogwang 					PMD_INIT_LOG(ERR, "SRIOV is active,"
2240a9643ea8Slogwang 						" invalid queue number"
2241a9643ea8Slogwang 						" for VMDQ RSS, allowed"
2242a9643ea8Slogwang 						" value are 1, 2 or 4.");
2243a9643ea8Slogwang 					return -EINVAL;
2244a9643ea8Slogwang 				}
2245a9643ea8Slogwang 			break;
2246a9643ea8Slogwang 		case ETH_MQ_RX_VMDQ_ONLY:
2247a9643ea8Slogwang 		case ETH_MQ_RX_NONE:
2248a9643ea8Slogwang 			/* if nothing mq mode configure, use default scheme */
2249a9643ea8Slogwang 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
2250a9643ea8Slogwang 			break;
2251a9643ea8Slogwang 		default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2252a9643ea8Slogwang 			/* SRIOV only works in VMDq enable mode */
2253a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "SRIOV is active,"
2254a9643ea8Slogwang 					" wrong mq_mode rx %d.",
2255a9643ea8Slogwang 					dev_conf->rxmode.mq_mode);
2256a9643ea8Slogwang 			return -EINVAL;
2257a9643ea8Slogwang 		}
2258a9643ea8Slogwang 
2259a9643ea8Slogwang 		switch (dev_conf->txmode.mq_mode) {
2260a9643ea8Slogwang 		case ETH_MQ_TX_VMDQ_DCB:
22612bfe3f2eSlogwang 			PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
22622bfe3f2eSlogwang 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
22632bfe3f2eSlogwang 			break;
2264a9643ea8Slogwang 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2265a9643ea8Slogwang 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
2266a9643ea8Slogwang 			break;
2267a9643ea8Slogwang 		}
2268a9643ea8Slogwang 
2269a9643ea8Slogwang 		/* check valid queue number */
2270a9643ea8Slogwang 		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
2271a9643ea8Slogwang 		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
2272a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "SRIOV is active,"
2273a9643ea8Slogwang 					" nb_rx_q=%d nb_tx_q=%d queue number"
2274a9643ea8Slogwang 					" must be less than or equal to %d.",
2275a9643ea8Slogwang 					nb_rx_q, nb_tx_q,
2276a9643ea8Slogwang 					RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
2277a9643ea8Slogwang 			return -EINVAL;
2278a9643ea8Slogwang 		}
2279a9643ea8Slogwang 	} else {
2280a9643ea8Slogwang 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
2281a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
2282a9643ea8Slogwang 					  " not supported.");
2283a9643ea8Slogwang 			return -EINVAL;
2284a9643ea8Slogwang 		}
2285a9643ea8Slogwang 		/* check configuration for vmdb+dcb mode */
2286a9643ea8Slogwang 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
2287a9643ea8Slogwang 			const struct rte_eth_vmdq_dcb_conf *conf;
2288a9643ea8Slogwang 
2289a9643ea8Slogwang 			if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2290a9643ea8Slogwang 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
2291a9643ea8Slogwang 						IXGBE_VMDQ_DCB_NB_QUEUES);
2292a9643ea8Slogwang 				return -EINVAL;
2293a9643ea8Slogwang 			}
2294a9643ea8Slogwang 			conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
2295a9643ea8Slogwang 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2296a9643ea8Slogwang 			       conf->nb_queue_pools == ETH_32_POOLS)) {
2297a9643ea8Slogwang 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2298a9643ea8Slogwang 						" nb_queue_pools must be %d or %d.",
2299a9643ea8Slogwang 						ETH_16_POOLS, ETH_32_POOLS);
2300a9643ea8Slogwang 				return -EINVAL;
2301a9643ea8Slogwang 			}
2302a9643ea8Slogwang 		}
2303a9643ea8Slogwang 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
2304a9643ea8Slogwang 			const struct rte_eth_vmdq_dcb_tx_conf *conf;
2305a9643ea8Slogwang 
2306a9643ea8Slogwang 			if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
2307a9643ea8Slogwang 				PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
2308a9643ea8Slogwang 						 IXGBE_VMDQ_DCB_NB_QUEUES);
2309a9643ea8Slogwang 				return -EINVAL;
2310a9643ea8Slogwang 			}
2311a9643ea8Slogwang 			conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2312a9643ea8Slogwang 			if (!(conf->nb_queue_pools == ETH_16_POOLS ||
2313a9643ea8Slogwang 			       conf->nb_queue_pools == ETH_32_POOLS)) {
2314a9643ea8Slogwang 				PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
2315a9643ea8Slogwang 						" nb_queue_pools != %d and"
2316a9643ea8Slogwang 						" nb_queue_pools != %d.",
2317a9643ea8Slogwang 						ETH_16_POOLS, ETH_32_POOLS);
2318a9643ea8Slogwang 				return -EINVAL;
2319a9643ea8Slogwang 			}
2320a9643ea8Slogwang 		}
2321a9643ea8Slogwang 
2322a9643ea8Slogwang 		/* For DCB mode check our configuration before we go further */
2323a9643ea8Slogwang 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
2324a9643ea8Slogwang 			const struct rte_eth_dcb_rx_conf *conf;
2325a9643ea8Slogwang 
2326a9643ea8Slogwang 			conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
2327a9643ea8Slogwang 			if (!(conf->nb_tcs == ETH_4_TCS ||
2328a9643ea8Slogwang 			       conf->nb_tcs == ETH_8_TCS)) {
2329a9643ea8Slogwang 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2330a9643ea8Slogwang 						" and nb_tcs != %d.",
2331a9643ea8Slogwang 						ETH_4_TCS, ETH_8_TCS);
2332a9643ea8Slogwang 				return -EINVAL;
2333a9643ea8Slogwang 			}
2334a9643ea8Slogwang 		}
2335a9643ea8Slogwang 
2336a9643ea8Slogwang 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
2337a9643ea8Slogwang 			const struct rte_eth_dcb_tx_conf *conf;
2338a9643ea8Slogwang 
2339a9643ea8Slogwang 			conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
2340a9643ea8Slogwang 			if (!(conf->nb_tcs == ETH_4_TCS ||
2341a9643ea8Slogwang 			       conf->nb_tcs == ETH_8_TCS)) {
2342a9643ea8Slogwang 				PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
2343a9643ea8Slogwang 						" and nb_tcs != %d.",
2344a9643ea8Slogwang 						ETH_4_TCS, ETH_8_TCS);
2345a9643ea8Slogwang 				return -EINVAL;
2346a9643ea8Slogwang 			}
2347a9643ea8Slogwang 		}
2348a9643ea8Slogwang 
2349a9643ea8Slogwang 		/*
2350a9643ea8Slogwang 		 * When DCB/VT is off, maximum number of queues changes,
2351a9643ea8Slogwang 		 * except for 82598EB, which remains constant.
2352a9643ea8Slogwang 		 */
2353a9643ea8Slogwang 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
2354a9643ea8Slogwang 				hw->mac.type != ixgbe_mac_82598EB) {
2355a9643ea8Slogwang 			if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
2356a9643ea8Slogwang 				PMD_INIT_LOG(ERR,
2357a9643ea8Slogwang 					     "Neither VT nor DCB are enabled, "
2358a9643ea8Slogwang 					     "nb_tx_q > %d.",
2359a9643ea8Slogwang 					     IXGBE_NONE_MODE_TX_NB_QUEUES);
2360a9643ea8Slogwang 				return -EINVAL;
2361a9643ea8Slogwang 			}
2362a9643ea8Slogwang 		}
2363a9643ea8Slogwang 	}
2364a9643ea8Slogwang 	return 0;
2365a9643ea8Slogwang }
2366a9643ea8Slogwang 
2367a9643ea8Slogwang static int
ixgbe_dev_configure(struct rte_eth_dev * dev)2368a9643ea8Slogwang ixgbe_dev_configure(struct rte_eth_dev *dev)
2369a9643ea8Slogwang {
2370a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
2371a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
23724b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
2373a9643ea8Slogwang 	int ret;
2374a9643ea8Slogwang 
2375a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
23764418919fSjohnjiang 
23774418919fSjohnjiang 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
23784418919fSjohnjiang 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
23794418919fSjohnjiang 
2380a9643ea8Slogwang 	/* multipe queue mode checking */
2381a9643ea8Slogwang 	ret  = ixgbe_check_mq_mode(dev);
2382a9643ea8Slogwang 	if (ret != 0) {
2383a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
2384a9643ea8Slogwang 			    ret);
2385a9643ea8Slogwang 		return ret;
2386a9643ea8Slogwang 	}
2387a9643ea8Slogwang 
2388a9643ea8Slogwang 	/* set flag to update link status after init */
2389a9643ea8Slogwang 	intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2390a9643ea8Slogwang 
2391a9643ea8Slogwang 	/*
2392a9643ea8Slogwang 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2393a9643ea8Slogwang 	 * allocation or vector Rx preconditions we will reset it.
2394a9643ea8Slogwang 	 */
2395a9643ea8Slogwang 	adapter->rx_bulk_alloc_allowed = true;
2396a9643ea8Slogwang 	adapter->rx_vec_allowed = true;
2397a9643ea8Slogwang 
2398a9643ea8Slogwang 	return 0;
2399a9643ea8Slogwang }
2400a9643ea8Slogwang 
2401a9643ea8Slogwang static void
ixgbe_dev_phy_intr_setup(struct rte_eth_dev * dev)2402a9643ea8Slogwang ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
2403a9643ea8Slogwang {
2404a9643ea8Slogwang 	struct ixgbe_hw *hw =
2405a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2406a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
2407a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2408a9643ea8Slogwang 	uint32_t gpie;
2409a9643ea8Slogwang 
2410a9643ea8Slogwang 	/* only set up it on X550EM_X */
2411a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_X550EM_x) {
2412a9643ea8Slogwang 		gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2413a9643ea8Slogwang 		gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
2414a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2415a9643ea8Slogwang 		if (hw->phy.type == ixgbe_phy_x550em_ext_t)
2416a9643ea8Slogwang 			intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
2417a9643ea8Slogwang 	}
2418a9643ea8Slogwang }
2419a9643ea8Slogwang 
24202bfe3f2eSlogwang int
ixgbe_set_vf_rate_limit(struct rte_eth_dev * dev,uint16_t vf,uint16_t tx_rate,uint64_t q_msk)24212bfe3f2eSlogwang ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
24222bfe3f2eSlogwang 			uint16_t tx_rate, uint64_t q_msk)
24232bfe3f2eSlogwang {
24242bfe3f2eSlogwang 	struct ixgbe_hw *hw;
24252bfe3f2eSlogwang 	struct ixgbe_vf_info *vfinfo;
24262bfe3f2eSlogwang 	struct rte_eth_link link;
24272bfe3f2eSlogwang 	uint8_t  nb_q_per_pool;
24282bfe3f2eSlogwang 	uint32_t queue_stride;
24292bfe3f2eSlogwang 	uint32_t queue_idx, idx = 0, vf_idx;
24302bfe3f2eSlogwang 	uint32_t queue_end;
24312bfe3f2eSlogwang 	uint16_t total_rate = 0;
24322bfe3f2eSlogwang 	struct rte_pci_device *pci_dev;
24334418919fSjohnjiang 	int ret;
24342bfe3f2eSlogwang 
24352bfe3f2eSlogwang 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
24364418919fSjohnjiang 	ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
24374418919fSjohnjiang 	if (ret < 0)
24384418919fSjohnjiang 		return ret;
24392bfe3f2eSlogwang 
24402bfe3f2eSlogwang 	if (vf >= pci_dev->max_vfs)
24412bfe3f2eSlogwang 		return -EINVAL;
24422bfe3f2eSlogwang 
24432bfe3f2eSlogwang 	if (tx_rate > link.link_speed)
24442bfe3f2eSlogwang 		return -EINVAL;
24452bfe3f2eSlogwang 
24462bfe3f2eSlogwang 	if (q_msk == 0)
24472bfe3f2eSlogwang 		return 0;
24482bfe3f2eSlogwang 
24492bfe3f2eSlogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
24502bfe3f2eSlogwang 	vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
24512bfe3f2eSlogwang 	nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
24522bfe3f2eSlogwang 	queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
24532bfe3f2eSlogwang 	queue_idx = vf * queue_stride;
24542bfe3f2eSlogwang 	queue_end = queue_idx + nb_q_per_pool - 1;
24552bfe3f2eSlogwang 	if (queue_end >= hw->mac.max_tx_queues)
24562bfe3f2eSlogwang 		return -EINVAL;
24572bfe3f2eSlogwang 
24582bfe3f2eSlogwang 	if (vfinfo) {
24592bfe3f2eSlogwang 		for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
24602bfe3f2eSlogwang 			if (vf_idx == vf)
24612bfe3f2eSlogwang 				continue;
24622bfe3f2eSlogwang 			for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
24632bfe3f2eSlogwang 				idx++)
24642bfe3f2eSlogwang 				total_rate += vfinfo[vf_idx].tx_rate[idx];
24652bfe3f2eSlogwang 		}
24662bfe3f2eSlogwang 	} else {
24672bfe3f2eSlogwang 		return -EINVAL;
24682bfe3f2eSlogwang 	}
24692bfe3f2eSlogwang 
24702bfe3f2eSlogwang 	/* Store tx_rate for this vf. */
24712bfe3f2eSlogwang 	for (idx = 0; idx < nb_q_per_pool; idx++) {
24722bfe3f2eSlogwang 		if (((uint64_t)0x1 << idx) & q_msk) {
24732bfe3f2eSlogwang 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
24742bfe3f2eSlogwang 				vfinfo[vf].tx_rate[idx] = tx_rate;
24752bfe3f2eSlogwang 			total_rate += tx_rate;
24762bfe3f2eSlogwang 		}
24772bfe3f2eSlogwang 	}
24782bfe3f2eSlogwang 
24792bfe3f2eSlogwang 	if (total_rate > dev->data->dev_link.link_speed) {
24802bfe3f2eSlogwang 		/* Reset stored TX rate of the VF if it causes exceed
24812bfe3f2eSlogwang 		 * link speed.
24822bfe3f2eSlogwang 		 */
24832bfe3f2eSlogwang 		memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
24842bfe3f2eSlogwang 		return -EINVAL;
24852bfe3f2eSlogwang 	}
24862bfe3f2eSlogwang 
24872bfe3f2eSlogwang 	/* Set RTTBCNRC of each queue/pool for vf X  */
24882bfe3f2eSlogwang 	for (; queue_idx <= queue_end; queue_idx++) {
24892bfe3f2eSlogwang 		if (0x1 & q_msk)
24902bfe3f2eSlogwang 			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
24912bfe3f2eSlogwang 		q_msk = q_msk >> 1;
24922bfe3f2eSlogwang 	}
24932bfe3f2eSlogwang 
24942bfe3f2eSlogwang 	return 0;
24952bfe3f2eSlogwang }
24962bfe3f2eSlogwang 
24974418919fSjohnjiang static int
ixgbe_flow_ctrl_enable(struct rte_eth_dev * dev,struct ixgbe_hw * hw)24984418919fSjohnjiang ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw)
24994418919fSjohnjiang {
25004418919fSjohnjiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
25014418919fSjohnjiang 	int err;
25024418919fSjohnjiang 	uint32_t mflcn;
25034418919fSjohnjiang 
25040c6bd470Sfengbojiang 	ixgbe_setup_fc(hw);
25050c6bd470Sfengbojiang 
25064418919fSjohnjiang 	err = ixgbe_fc_enable(hw);
25074418919fSjohnjiang 
25084418919fSjohnjiang 	/* Not negotiated is not an error case */
25094418919fSjohnjiang 	if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) {
25104418919fSjohnjiang 		/*
25114418919fSjohnjiang 		 *check if we want to forward MAC frames - driver doesn't
25124418919fSjohnjiang 		 *have native capability to do that,
25134418919fSjohnjiang 		 *so we'll write the registers ourselves
25144418919fSjohnjiang 		 */
25154418919fSjohnjiang 
25164418919fSjohnjiang 		mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
25174418919fSjohnjiang 
25184418919fSjohnjiang 		/* set or clear MFLCN.PMCF bit depending on configuration */
25194418919fSjohnjiang 		if (adapter->mac_ctrl_frame_fwd != 0)
25204418919fSjohnjiang 			mflcn |= IXGBE_MFLCN_PMCF;
25214418919fSjohnjiang 		else
25224418919fSjohnjiang 			mflcn &= ~IXGBE_MFLCN_PMCF;
25234418919fSjohnjiang 
25244418919fSjohnjiang 		IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn);
25254418919fSjohnjiang 		IXGBE_WRITE_FLUSH(hw);
25264418919fSjohnjiang 
25274418919fSjohnjiang 		return 0;
25284418919fSjohnjiang 	}
25294418919fSjohnjiang 	return err;
25304418919fSjohnjiang }
25314418919fSjohnjiang 
2532a9643ea8Slogwang /*
2533a9643ea8Slogwang  * Configure device link speed and setup link.
2534a9643ea8Slogwang  * It returns 0 on success.
2535a9643ea8Slogwang  */
2536a9643ea8Slogwang static int
ixgbe_dev_start(struct rte_eth_dev * dev)2537a9643ea8Slogwang ixgbe_dev_start(struct rte_eth_dev *dev)
2538a9643ea8Slogwang {
2539a9643ea8Slogwang 	struct ixgbe_hw *hw =
2540a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2541a9643ea8Slogwang 	struct ixgbe_vf_info *vfinfo =
2542a9643ea8Slogwang 		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
25432bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
25442bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2545a9643ea8Slogwang 	uint32_t intr_vector = 0;
2546*2d9fd380Sjfb8856606 	int err;
2547*2d9fd380Sjfb8856606 	bool link_up = false, negotiate = 0;
2548a9643ea8Slogwang 	uint32_t speed = 0;
2549d30ea906Sjfb8856606 	uint32_t allowed_speeds = 0;
2550a9643ea8Slogwang 	int mask = 0;
2551a9643ea8Slogwang 	int status;
2552a9643ea8Slogwang 	uint16_t vf, idx;
2553a9643ea8Slogwang 	uint32_t *link_speeds;
25542bfe3f2eSlogwang 	struct ixgbe_tm_conf *tm_conf =
25552bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
25564418919fSjohnjiang 	struct ixgbe_macsec_setting *macsec_setting =
25574418919fSjohnjiang 		IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
2558a9643ea8Slogwang 
2559a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2560a9643ea8Slogwang 
2561d30ea906Sjfb8856606 	/* Stop the link setup handler before resetting the HW. */
25620c6bd470Sfengbojiang 	ixgbe_dev_wait_setup_link_complete(dev, 0);
2563d30ea906Sjfb8856606 
2564a9643ea8Slogwang 	/* disable uio/vfio intr/eventfd mapping */
2565a9643ea8Slogwang 	rte_intr_disable(intr_handle);
2566a9643ea8Slogwang 
2567a9643ea8Slogwang 	/* stop adapter */
2568a9643ea8Slogwang 	hw->adapter_stopped = 0;
2569a9643ea8Slogwang 	ixgbe_stop_adapter(hw);
2570a9643ea8Slogwang 
2571a9643ea8Slogwang 	/* reinitialize adapter
2572a9643ea8Slogwang 	 * this calls reset and start
2573a9643ea8Slogwang 	 */
2574a9643ea8Slogwang 	status = ixgbe_pf_reset_hw(hw);
2575a9643ea8Slogwang 	if (status != 0)
2576a9643ea8Slogwang 		return -1;
2577a9643ea8Slogwang 	hw->mac.ops.start_hw(hw);
2578a9643ea8Slogwang 	hw->mac.get_link_status = true;
2579a9643ea8Slogwang 
2580a9643ea8Slogwang 	/* configure PF module if SRIOV enabled */
2581a9643ea8Slogwang 	ixgbe_pf_host_configure(dev);
2582a9643ea8Slogwang 
2583a9643ea8Slogwang 	ixgbe_dev_phy_intr_setup(dev);
2584a9643ea8Slogwang 
2585a9643ea8Slogwang 	/* check and configure queue intr-vector mapping */
2586a9643ea8Slogwang 	if ((rte_intr_cap_multiple(intr_handle) ||
2587a9643ea8Slogwang 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
2588a9643ea8Slogwang 	    dev->data->dev_conf.intr_conf.rxq != 0) {
2589a9643ea8Slogwang 		intr_vector = dev->data->nb_rx_queues;
2590a9643ea8Slogwang 		if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
2591a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
2592a9643ea8Slogwang 					IXGBE_MAX_INTR_QUEUE_NUM);
2593a9643ea8Slogwang 			return -ENOTSUP;
2594a9643ea8Slogwang 		}
2595a9643ea8Slogwang 		if (rte_intr_efd_enable(intr_handle, intr_vector))
2596a9643ea8Slogwang 			return -1;
2597a9643ea8Slogwang 	}
2598a9643ea8Slogwang 
2599a9643ea8Slogwang 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2600a9643ea8Slogwang 		intr_handle->intr_vec =
2601a9643ea8Slogwang 			rte_zmalloc("intr_vec",
2602a9643ea8Slogwang 				    dev->data->nb_rx_queues * sizeof(int), 0);
2603a9643ea8Slogwang 		if (intr_handle->intr_vec == NULL) {
2604a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
26052bfe3f2eSlogwang 				     " intr_vec", dev->data->nb_rx_queues);
2606a9643ea8Slogwang 			return -ENOMEM;
2607a9643ea8Slogwang 		}
2608a9643ea8Slogwang 	}
2609a9643ea8Slogwang 
2610a9643ea8Slogwang 	/* confiugre msix for sleep until rx interrupt */
2611a9643ea8Slogwang 	ixgbe_configure_msix(dev);
2612a9643ea8Slogwang 
2613a9643ea8Slogwang 	/* initialize transmission unit */
2614a9643ea8Slogwang 	ixgbe_dev_tx_init(dev);
2615a9643ea8Slogwang 
2616a9643ea8Slogwang 	/* This can fail when allocating mbufs for descriptor rings */
2617a9643ea8Slogwang 	err = ixgbe_dev_rx_init(dev);
2618a9643ea8Slogwang 	if (err) {
2619a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
2620a9643ea8Slogwang 		goto error;
2621a9643ea8Slogwang 	}
2622a9643ea8Slogwang 
26232bfe3f2eSlogwang 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
26242bfe3f2eSlogwang 		ETH_VLAN_EXTEND_MASK;
2625d30ea906Sjfb8856606 	err = ixgbe_vlan_offload_config(dev, mask);
26262bfe3f2eSlogwang 	if (err) {
26272bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
26282bfe3f2eSlogwang 		goto error;
26292bfe3f2eSlogwang 	}
26302bfe3f2eSlogwang 
26312bfe3f2eSlogwang 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
26322bfe3f2eSlogwang 		/* Enable vlan filtering for VMDq */
26332bfe3f2eSlogwang 		ixgbe_vmdq_vlan_hw_filter_enable(dev);
26342bfe3f2eSlogwang 	}
26352bfe3f2eSlogwang 
26362bfe3f2eSlogwang 	/* Configure DCB hw */
26372bfe3f2eSlogwang 	ixgbe_configure_dcb(dev);
26382bfe3f2eSlogwang 
26392bfe3f2eSlogwang 	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
26402bfe3f2eSlogwang 		err = ixgbe_fdir_configure(dev);
26412bfe3f2eSlogwang 		if (err)
26422bfe3f2eSlogwang 			goto error;
26432bfe3f2eSlogwang 	}
26442bfe3f2eSlogwang 
26452bfe3f2eSlogwang 	/* Restore vf rate limit */
26462bfe3f2eSlogwang 	if (vfinfo != NULL) {
26472bfe3f2eSlogwang 		for (vf = 0; vf < pci_dev->max_vfs; vf++)
26482bfe3f2eSlogwang 			for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
26492bfe3f2eSlogwang 				if (vfinfo[vf].tx_rate[idx] != 0)
26502bfe3f2eSlogwang 					ixgbe_set_vf_rate_limit(
26512bfe3f2eSlogwang 						dev, vf,
26522bfe3f2eSlogwang 						vfinfo[vf].tx_rate[idx],
26532bfe3f2eSlogwang 						1 << idx);
26542bfe3f2eSlogwang 	}
26552bfe3f2eSlogwang 
26562bfe3f2eSlogwang 	ixgbe_restore_statistics_mapping(dev);
26572bfe3f2eSlogwang 
26584418919fSjohnjiang 	err = ixgbe_flow_ctrl_enable(dev, hw);
26594418919fSjohnjiang 	if (err < 0) {
26604418919fSjohnjiang 		PMD_INIT_LOG(ERR, "enable flow ctrl err");
26614418919fSjohnjiang 		goto error;
26624418919fSjohnjiang 	}
26634418919fSjohnjiang 
2664a9643ea8Slogwang 	err = ixgbe_dev_rxtx_start(dev);
2665a9643ea8Slogwang 	if (err < 0) {
2666a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
2667a9643ea8Slogwang 		goto error;
2668a9643ea8Slogwang 	}
2669a9643ea8Slogwang 
26704418919fSjohnjiang 	/* Skip link setup if loopback mode is enabled. */
26714418919fSjohnjiang 	if (dev->data->dev_conf.lpbk_mode != 0) {
26724418919fSjohnjiang 		err = ixgbe_check_supported_loopback_mode(dev);
26734418919fSjohnjiang 		if (err < 0) {
26744418919fSjohnjiang 			PMD_INIT_LOG(ERR, "Unsupported loopback mode");
26754418919fSjohnjiang 			goto error;
26764418919fSjohnjiang 		} else {
2677a9643ea8Slogwang 			goto skip_link_setup;
26784418919fSjohnjiang 		}
26794418919fSjohnjiang 	}
2680a9643ea8Slogwang 
2681a9643ea8Slogwang 	if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
2682a9643ea8Slogwang 		err = hw->mac.ops.setup_sfp(hw);
2683a9643ea8Slogwang 		if (err)
2684a9643ea8Slogwang 			goto error;
2685a9643ea8Slogwang 	}
2686a9643ea8Slogwang 
2687a9643ea8Slogwang 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2688a9643ea8Slogwang 		/* Turn on the copper */
2689a9643ea8Slogwang 		ixgbe_set_phy_power(hw, true);
2690a9643ea8Slogwang 	} else {
2691a9643ea8Slogwang 		/* Turn on the laser */
2692a9643ea8Slogwang 		ixgbe_enable_tx_laser(hw);
2693a9643ea8Slogwang 	}
2694a9643ea8Slogwang 
2695a9643ea8Slogwang 	err = ixgbe_check_link(hw, &speed, &link_up, 0);
2696a9643ea8Slogwang 	if (err)
2697a9643ea8Slogwang 		goto error;
2698a9643ea8Slogwang 	dev->data->dev_link.link_status = link_up;
2699a9643ea8Slogwang 
2700a9643ea8Slogwang 	err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
2701a9643ea8Slogwang 	if (err)
2702a9643ea8Slogwang 		goto error;
2703a9643ea8Slogwang 
2704d30ea906Sjfb8856606 	switch (hw->mac.type) {
2705d30ea906Sjfb8856606 	case ixgbe_mac_X550:
2706d30ea906Sjfb8856606 	case ixgbe_mac_X550EM_x:
2707d30ea906Sjfb8856606 	case ixgbe_mac_X550EM_a:
2708d30ea906Sjfb8856606 		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2709d30ea906Sjfb8856606 			ETH_LINK_SPEED_2_5G |  ETH_LINK_SPEED_5G |
2710d30ea906Sjfb8856606 			ETH_LINK_SPEED_10G;
27114418919fSjohnjiang 		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
27124418919fSjohnjiang 				hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
27134418919fSjohnjiang 			allowed_speeds = ETH_LINK_SPEED_10M |
27144418919fSjohnjiang 				ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
2715d30ea906Sjfb8856606 		break;
2716d30ea906Sjfb8856606 	default:
2717d30ea906Sjfb8856606 		allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
2718d30ea906Sjfb8856606 			ETH_LINK_SPEED_10G;
2719d30ea906Sjfb8856606 	}
2720d30ea906Sjfb8856606 
2721a9643ea8Slogwang 	link_speeds = &dev->data->dev_conf.link_speeds;
27224418919fSjohnjiang 
27234418919fSjohnjiang 	/* Ignore autoneg flag bit and check the validity of 
27244418919fSjohnjiang 	 * link_speed 
27254418919fSjohnjiang 	 */
27264418919fSjohnjiang 	if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
2727a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Invalid link setting");
2728a9643ea8Slogwang 		goto error;
2729a9643ea8Slogwang 	}
2730a9643ea8Slogwang 
2731a9643ea8Slogwang 	speed = 0x0;
2732a9643ea8Slogwang 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
27332bfe3f2eSlogwang 		switch (hw->mac.type) {
27342bfe3f2eSlogwang 		case ixgbe_mac_82598EB:
27352bfe3f2eSlogwang 			speed = IXGBE_LINK_SPEED_82598_AUTONEG;
27362bfe3f2eSlogwang 			break;
27372bfe3f2eSlogwang 		case ixgbe_mac_82599EB:
27382bfe3f2eSlogwang 		case ixgbe_mac_X540:
27392bfe3f2eSlogwang 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
27402bfe3f2eSlogwang 			break;
27412bfe3f2eSlogwang 		case ixgbe_mac_X550:
27422bfe3f2eSlogwang 		case ixgbe_mac_X550EM_x:
27432bfe3f2eSlogwang 		case ixgbe_mac_X550EM_a:
27442bfe3f2eSlogwang 			speed = IXGBE_LINK_SPEED_X550_AUTONEG;
27452bfe3f2eSlogwang 			break;
27462bfe3f2eSlogwang 		default:
27472bfe3f2eSlogwang 			speed = IXGBE_LINK_SPEED_82599_AUTONEG;
27482bfe3f2eSlogwang 		}
2749a9643ea8Slogwang 	} else {
2750a9643ea8Slogwang 		if (*link_speeds & ETH_LINK_SPEED_10G)
2751a9643ea8Slogwang 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
2752d30ea906Sjfb8856606 		if (*link_speeds & ETH_LINK_SPEED_5G)
2753d30ea906Sjfb8856606 			speed |= IXGBE_LINK_SPEED_5GB_FULL;
2754d30ea906Sjfb8856606 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
2755d30ea906Sjfb8856606 			speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2756a9643ea8Slogwang 		if (*link_speeds & ETH_LINK_SPEED_1G)
2757a9643ea8Slogwang 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
2758a9643ea8Slogwang 		if (*link_speeds & ETH_LINK_SPEED_100M)
2759a9643ea8Slogwang 			speed |= IXGBE_LINK_SPEED_100_FULL;
27604418919fSjohnjiang 		if (*link_speeds & ETH_LINK_SPEED_10M)
27614418919fSjohnjiang 			speed |= IXGBE_LINK_SPEED_10_FULL;
2762a9643ea8Slogwang 	}
2763a9643ea8Slogwang 
2764a9643ea8Slogwang 	err = ixgbe_setup_link(hw, speed, link_up);
2765a9643ea8Slogwang 	if (err)
2766a9643ea8Slogwang 		goto error;
2767a9643ea8Slogwang 
2768a9643ea8Slogwang skip_link_setup:
2769a9643ea8Slogwang 
2770a9643ea8Slogwang 	if (rte_intr_allow_others(intr_handle)) {
2771a9643ea8Slogwang 		/* check if lsc interrupt is enabled */
2772a9643ea8Slogwang 		if (dev->data->dev_conf.intr_conf.lsc != 0)
27732bfe3f2eSlogwang 			ixgbe_dev_lsc_interrupt_setup(dev, TRUE);
27742bfe3f2eSlogwang 		else
27752bfe3f2eSlogwang 			ixgbe_dev_lsc_interrupt_setup(dev, FALSE);
27762bfe3f2eSlogwang 		ixgbe_dev_macsec_interrupt_setup(dev);
2777a9643ea8Slogwang 	} else {
2778a9643ea8Slogwang 		rte_intr_callback_unregister(intr_handle,
27792bfe3f2eSlogwang 					     ixgbe_dev_interrupt_handler, dev);
2780a9643ea8Slogwang 		if (dev->data->dev_conf.intr_conf.lsc != 0)
2781a9643ea8Slogwang 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
27822bfe3f2eSlogwang 				     " no intr multiplex");
2783a9643ea8Slogwang 	}
2784a9643ea8Slogwang 
2785a9643ea8Slogwang 	/* check if rxq interrupt is enabled */
2786a9643ea8Slogwang 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
2787a9643ea8Slogwang 	    rte_intr_dp_is_en(intr_handle))
2788a9643ea8Slogwang 		ixgbe_dev_rxq_interrupt_setup(dev);
2789a9643ea8Slogwang 
2790a9643ea8Slogwang 	/* enable uio/vfio intr/eventfd mapping */
2791a9643ea8Slogwang 	rte_intr_enable(intr_handle);
2792a9643ea8Slogwang 
2793a9643ea8Slogwang 	/* resume enabled intr since hw reset */
2794a9643ea8Slogwang 	ixgbe_enable_intr(dev);
27952bfe3f2eSlogwang 	ixgbe_l2_tunnel_conf(dev);
27962bfe3f2eSlogwang 	ixgbe_filter_restore(dev);
2797a9643ea8Slogwang 
27982bfe3f2eSlogwang 	if (tm_conf->root && !tm_conf->committed)
27992bfe3f2eSlogwang 		PMD_DRV_LOG(WARNING,
28002bfe3f2eSlogwang 			    "please call hierarchy_commit() "
28012bfe3f2eSlogwang 			    "before starting the port");
2802a9643ea8Slogwang 
28034418919fSjohnjiang 	/* wait for the controller to acquire link */
28044418919fSjohnjiang 	err = ixgbe_wait_for_link_up(hw);
28054418919fSjohnjiang 	if (err)
28064418919fSjohnjiang 		goto error;
28074418919fSjohnjiang 
2808d30ea906Sjfb8856606 	/*
2809d30ea906Sjfb8856606 	 * Update link status right before return, because it may
2810d30ea906Sjfb8856606 	 * start link configuration process in a separate thread.
2811d30ea906Sjfb8856606 	 */
2812d30ea906Sjfb8856606 	ixgbe_dev_link_update(dev, 0);
2813d30ea906Sjfb8856606 
28144418919fSjohnjiang 	/* setup the macsec setting register */
28154418919fSjohnjiang 	if (macsec_setting->offload_en)
28164418919fSjohnjiang 		ixgbe_dev_macsec_register_enable(dev, macsec_setting);
28174418919fSjohnjiang 
2818a9643ea8Slogwang 	return 0;
2819a9643ea8Slogwang 
2820a9643ea8Slogwang error:
2821a9643ea8Slogwang 	PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
2822a9643ea8Slogwang 	ixgbe_dev_clear_queues(dev);
2823a9643ea8Slogwang 	return -EIO;
2824a9643ea8Slogwang }
2825a9643ea8Slogwang 
2826a9643ea8Slogwang /*
2827a9643ea8Slogwang  * Stop device: disable rx and tx functions to allow for reconfiguring.
2828a9643ea8Slogwang  */
2829*2d9fd380Sjfb8856606 static int
ixgbe_dev_stop(struct rte_eth_dev * dev)2830a9643ea8Slogwang ixgbe_dev_stop(struct rte_eth_dev *dev)
2831a9643ea8Slogwang {
2832a9643ea8Slogwang 	struct rte_eth_link link;
28334b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
2834a9643ea8Slogwang 	struct ixgbe_hw *hw =
2835a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2836a9643ea8Slogwang 	struct ixgbe_vf_info *vfinfo =
2837a9643ea8Slogwang 		*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
28382bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
28392bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2840a9643ea8Slogwang 	int vf;
28412bfe3f2eSlogwang 	struct ixgbe_tm_conf *tm_conf =
28422bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private);
2843a9643ea8Slogwang 
28444418919fSjohnjiang 	if (hw->adapter_stopped)
2845*2d9fd380Sjfb8856606 		return 0;
28464418919fSjohnjiang 
2847a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2848a9643ea8Slogwang 
28490c6bd470Sfengbojiang 	ixgbe_dev_wait_setup_link_complete(dev, 0);
2850d30ea906Sjfb8856606 
2851a9643ea8Slogwang 	/* disable interrupts */
2852a9643ea8Slogwang 	ixgbe_disable_intr(hw);
2853a9643ea8Slogwang 
2854a9643ea8Slogwang 	/* reset the NIC */
2855a9643ea8Slogwang 	ixgbe_pf_reset_hw(hw);
2856a9643ea8Slogwang 	hw->adapter_stopped = 0;
2857a9643ea8Slogwang 
2858a9643ea8Slogwang 	/* stop adapter */
2859a9643ea8Slogwang 	ixgbe_stop_adapter(hw);
2860a9643ea8Slogwang 
28612bfe3f2eSlogwang 	for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
2862a9643ea8Slogwang 		vfinfo[vf].clear_to_send = false;
2863a9643ea8Slogwang 
2864a9643ea8Slogwang 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2865a9643ea8Slogwang 		/* Turn off the copper */
2866a9643ea8Slogwang 		ixgbe_set_phy_power(hw, false);
2867a9643ea8Slogwang 	} else {
2868a9643ea8Slogwang 		/* Turn off the laser */
2869a9643ea8Slogwang 		ixgbe_disable_tx_laser(hw);
2870a9643ea8Slogwang 	}
2871a9643ea8Slogwang 
2872a9643ea8Slogwang 	ixgbe_dev_clear_queues(dev);
2873a9643ea8Slogwang 
2874a9643ea8Slogwang 	/* Clear stored conf */
2875a9643ea8Slogwang 	dev->data->scattered_rx = 0;
2876a9643ea8Slogwang 	dev->data->lro = 0;
2877a9643ea8Slogwang 
2878a9643ea8Slogwang 	/* Clear recorded link status */
2879a9643ea8Slogwang 	memset(&link, 0, sizeof(link));
2880d30ea906Sjfb8856606 	rte_eth_linkstatus_set(dev, &link);
2881a9643ea8Slogwang 
2882a9643ea8Slogwang 	if (!rte_intr_allow_others(intr_handle))
2883a9643ea8Slogwang 		/* resume to the default handler */
2884a9643ea8Slogwang 		rte_intr_callback_register(intr_handle,
2885a9643ea8Slogwang 					   ixgbe_dev_interrupt_handler,
2886a9643ea8Slogwang 					   (void *)dev);
2887a9643ea8Slogwang 
2888a9643ea8Slogwang 	/* Clean datapath event and queue/vec mapping */
2889a9643ea8Slogwang 	rte_intr_efd_disable(intr_handle);
2890a9643ea8Slogwang 	if (intr_handle->intr_vec != NULL) {
2891a9643ea8Slogwang 		rte_free(intr_handle->intr_vec);
2892a9643ea8Slogwang 		intr_handle->intr_vec = NULL;
2893a9643ea8Slogwang 	}
28942bfe3f2eSlogwang 
28952bfe3f2eSlogwang 	/* reset hierarchy commit */
28962bfe3f2eSlogwang 	tm_conf->committed = false;
28971646932aSjfb8856606 
28981646932aSjfb8856606 	adapter->rss_reta_updated = 0;
28994418919fSjohnjiang 
29004418919fSjohnjiang 	hw->adapter_stopped = true;
2901*2d9fd380Sjfb8856606 	dev->data->dev_started = 0;
2902*2d9fd380Sjfb8856606 
2903*2d9fd380Sjfb8856606 	return 0;
2904a9643ea8Slogwang }
2905a9643ea8Slogwang 
2906a9643ea8Slogwang /*
2907a9643ea8Slogwang  * Set device link up: enable tx.
2908a9643ea8Slogwang  */
2909a9643ea8Slogwang static int
ixgbe_dev_set_link_up(struct rte_eth_dev * dev)2910a9643ea8Slogwang ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
2911a9643ea8Slogwang {
2912a9643ea8Slogwang 	struct ixgbe_hw *hw =
2913a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2914a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82599EB) {
29152bfe3f2eSlogwang #ifdef RTE_LIBRTE_IXGBE_BYPASS
2916a9643ea8Slogwang 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2917a9643ea8Slogwang 			/* Not suported in bypass mode */
2918a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Set link up is not supported "
2919a9643ea8Slogwang 				     "by device id 0x%x", hw->device_id);
2920a9643ea8Slogwang 			return -ENOTSUP;
2921a9643ea8Slogwang 		}
2922a9643ea8Slogwang #endif
2923a9643ea8Slogwang 	}
2924a9643ea8Slogwang 
2925a9643ea8Slogwang 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2926a9643ea8Slogwang 		/* Turn on the copper */
2927a9643ea8Slogwang 		ixgbe_set_phy_power(hw, true);
2928a9643ea8Slogwang 	} else {
2929a9643ea8Slogwang 		/* Turn on the laser */
2930a9643ea8Slogwang 		ixgbe_enable_tx_laser(hw);
29314418919fSjohnjiang 		ixgbe_dev_link_update(dev, 0);
2932a9643ea8Slogwang 	}
2933a9643ea8Slogwang 
2934a9643ea8Slogwang 	return 0;
2935a9643ea8Slogwang }
2936a9643ea8Slogwang 
2937a9643ea8Slogwang /*
2938a9643ea8Slogwang  * Set device link down: disable tx.
2939a9643ea8Slogwang  */
2940a9643ea8Slogwang static int
ixgbe_dev_set_link_down(struct rte_eth_dev * dev)2941a9643ea8Slogwang ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
2942a9643ea8Slogwang {
2943a9643ea8Slogwang 	struct ixgbe_hw *hw =
2944a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2945a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82599EB) {
29462bfe3f2eSlogwang #ifdef RTE_LIBRTE_IXGBE_BYPASS
2947a9643ea8Slogwang 		if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
2948a9643ea8Slogwang 			/* Not suported in bypass mode */
2949a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Set link down is not supported "
2950a9643ea8Slogwang 				     "by device id 0x%x", hw->device_id);
2951a9643ea8Slogwang 			return -ENOTSUP;
2952a9643ea8Slogwang 		}
2953a9643ea8Slogwang #endif
2954a9643ea8Slogwang 	}
2955a9643ea8Slogwang 
2956a9643ea8Slogwang 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
2957a9643ea8Slogwang 		/* Turn off the copper */
2958a9643ea8Slogwang 		ixgbe_set_phy_power(hw, false);
2959a9643ea8Slogwang 	} else {
2960a9643ea8Slogwang 		/* Turn off the laser */
2961a9643ea8Slogwang 		ixgbe_disable_tx_laser(hw);
29624418919fSjohnjiang 		ixgbe_dev_link_update(dev, 0);
2963a9643ea8Slogwang 	}
2964a9643ea8Slogwang 
2965a9643ea8Slogwang 	return 0;
2966a9643ea8Slogwang }
2967a9643ea8Slogwang 
2968a9643ea8Slogwang /*
29692bfe3f2eSlogwang  * Reset and stop device.
2970a9643ea8Slogwang  */
2971*2d9fd380Sjfb8856606 static int
ixgbe_dev_close(struct rte_eth_dev * dev)2972a9643ea8Slogwang ixgbe_dev_close(struct rte_eth_dev *dev)
2973a9643ea8Slogwang {
2974a9643ea8Slogwang 	struct ixgbe_hw *hw =
2975a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
29764418919fSjohnjiang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
29774418919fSjohnjiang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
29784418919fSjohnjiang 	int retries = 0;
29794418919fSjohnjiang 	int ret;
2980a9643ea8Slogwang 
2981a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
2982*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2983*2d9fd380Sjfb8856606 		return 0;
2984a9643ea8Slogwang 
2985a9643ea8Slogwang 	ixgbe_pf_reset_hw(hw);
2986a9643ea8Slogwang 
2987*2d9fd380Sjfb8856606 	ret = ixgbe_dev_stop(dev);
2988a9643ea8Slogwang 
2989a9643ea8Slogwang 	ixgbe_dev_free_queues(dev);
2990a9643ea8Slogwang 
2991a9643ea8Slogwang 	ixgbe_disable_pcie_master(hw);
2992a9643ea8Slogwang 
2993a9643ea8Slogwang 	/* reprogram the RAR[0] in case user changed it. */
2994a9643ea8Slogwang 	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
29954418919fSjohnjiang 
29964418919fSjohnjiang 	/* Unlock any pending hardware semaphore */
29974418919fSjohnjiang 	ixgbe_swfw_lock_reset(hw);
29984418919fSjohnjiang 
29994418919fSjohnjiang 	/* disable uio intr before callback unregister */
30004418919fSjohnjiang 	rte_intr_disable(intr_handle);
30014418919fSjohnjiang 
30024418919fSjohnjiang 	do {
30034418919fSjohnjiang 		ret = rte_intr_callback_unregister(intr_handle,
30044418919fSjohnjiang 				ixgbe_dev_interrupt_handler, dev);
30054418919fSjohnjiang 		if (ret >= 0 || ret == -ENOENT) {
30064418919fSjohnjiang 			break;
30074418919fSjohnjiang 		} else if (ret != -EAGAIN) {
30084418919fSjohnjiang 			PMD_INIT_LOG(ERR,
30094418919fSjohnjiang 				"intr callback unregister failed: %d",
30104418919fSjohnjiang 				ret);
30114418919fSjohnjiang 		}
30124418919fSjohnjiang 		rte_delay_ms(100);
30134418919fSjohnjiang 	} while (retries++ < (10 + IXGBE_LINK_UP_TIME));
30144418919fSjohnjiang 
30154418919fSjohnjiang 	/* cancel the delay handler before remove dev */
30164418919fSjohnjiang 	rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev);
30174418919fSjohnjiang 
30184418919fSjohnjiang 	/* uninitialize PF if max_vfs not zero */
30194418919fSjohnjiang 	ixgbe_pf_host_uninit(dev);
30204418919fSjohnjiang 
30214418919fSjohnjiang 	/* remove all the fdir filters & hash */
30224418919fSjohnjiang 	ixgbe_fdir_filter_uninit(dev);
30234418919fSjohnjiang 
30244418919fSjohnjiang 	/* remove all the L2 tunnel filters & hash */
30254418919fSjohnjiang 	ixgbe_l2_tn_filter_uninit(dev);
30264418919fSjohnjiang 
30274418919fSjohnjiang 	/* Remove all ntuple filters of the device */
30284418919fSjohnjiang 	ixgbe_ntuple_filter_uninit(dev);
30294418919fSjohnjiang 
30304418919fSjohnjiang 	/* clear all the filters list */
30314418919fSjohnjiang 	ixgbe_filterlist_flush();
30324418919fSjohnjiang 
30334418919fSjohnjiang 	/* Remove all Traffic Manager configuration */
30344418919fSjohnjiang 	ixgbe_tm_conf_uninit(dev);
30354418919fSjohnjiang 
3036*2d9fd380Sjfb8856606 #ifdef RTE_LIB_SECURITY
30374418919fSjohnjiang 	rte_free(dev->security_ctx);
30384418919fSjohnjiang #endif
30394418919fSjohnjiang 
3040*2d9fd380Sjfb8856606 	return ret;
3041a9643ea8Slogwang }
3042a9643ea8Slogwang 
30432bfe3f2eSlogwang /*
30442bfe3f2eSlogwang  * Reset PF device.
30452bfe3f2eSlogwang  */
30462bfe3f2eSlogwang static int
ixgbe_dev_reset(struct rte_eth_dev * dev)30472bfe3f2eSlogwang ixgbe_dev_reset(struct rte_eth_dev *dev)
30482bfe3f2eSlogwang {
30492bfe3f2eSlogwang 	int ret;
30502bfe3f2eSlogwang 
30512bfe3f2eSlogwang 	/* When a DPDK PMD PF begin to reset PF port, it should notify all
30522bfe3f2eSlogwang 	 * its VF to make them align with it. The detailed notification
30532bfe3f2eSlogwang 	 * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
30542bfe3f2eSlogwang 	 * To avoid unexpected behavior in VF, currently reset of PF with
30552bfe3f2eSlogwang 	 * SR-IOV activation is not supported. It might be supported later.
30562bfe3f2eSlogwang 	 */
30572bfe3f2eSlogwang 	if (dev->data->sriov.active)
30582bfe3f2eSlogwang 		return -ENOTSUP;
30592bfe3f2eSlogwang 
30602bfe3f2eSlogwang 	ret = eth_ixgbe_dev_uninit(dev);
30612bfe3f2eSlogwang 	if (ret)
30622bfe3f2eSlogwang 		return ret;
30632bfe3f2eSlogwang 
3064d30ea906Sjfb8856606 	ret = eth_ixgbe_dev_init(dev, NULL);
30652bfe3f2eSlogwang 
30662bfe3f2eSlogwang 	return ret;
30672bfe3f2eSlogwang }
30682bfe3f2eSlogwang 
3069a9643ea8Slogwang static void
ixgbe_read_stats_registers(struct ixgbe_hw * hw,struct ixgbe_hw_stats * hw_stats,struct ixgbe_macsec_stats * macsec_stats,uint64_t * total_missed_rx,uint64_t * total_qbrc,uint64_t * total_qprc,uint64_t * total_qprdc)3070a9643ea8Slogwang ixgbe_read_stats_registers(struct ixgbe_hw *hw,
3071a9643ea8Slogwang 			   struct ixgbe_hw_stats *hw_stats,
30722bfe3f2eSlogwang 			   struct ixgbe_macsec_stats *macsec_stats,
3073a9643ea8Slogwang 			   uint64_t *total_missed_rx, uint64_t *total_qbrc,
3074a9643ea8Slogwang 			   uint64_t *total_qprc, uint64_t *total_qprdc)
3075a9643ea8Slogwang {
3076a9643ea8Slogwang 	uint32_t bprc, lxon, lxoff, total;
3077a9643ea8Slogwang 	uint32_t delta_gprc = 0;
3078a9643ea8Slogwang 	unsigned i;
3079a9643ea8Slogwang 	/* Workaround for RX byte count not including CRC bytes when CRC
30802bfe3f2eSlogwang 	 * strip is enabled. CRC bytes are removed from counters when crc_strip
3081a9643ea8Slogwang 	 * is disabled.
30822bfe3f2eSlogwang 	 */
3083a9643ea8Slogwang 	int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
3084a9643ea8Slogwang 			IXGBE_HLREG0_RXCRCSTRP);
3085a9643ea8Slogwang 
3086a9643ea8Slogwang 	hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3087a9643ea8Slogwang 	hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3088a9643ea8Slogwang 	hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3089a9643ea8Slogwang 	hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3090a9643ea8Slogwang 
3091a9643ea8Slogwang 	for (i = 0; i < 8; i++) {
3092a9643ea8Slogwang 		uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3093a9643ea8Slogwang 
3094a9643ea8Slogwang 		/* global total per queue */
3095a9643ea8Slogwang 		hw_stats->mpc[i] += mp;
3096a9643ea8Slogwang 		/* Running comprehensive total for stats display */
3097a9643ea8Slogwang 		*total_missed_rx += hw_stats->mpc[i];
3098a9643ea8Slogwang 		if (hw->mac.type == ixgbe_mac_82598EB) {
3099a9643ea8Slogwang 			hw_stats->rnbc[i] +=
3100a9643ea8Slogwang 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3101a9643ea8Slogwang 			hw_stats->pxonrxc[i] +=
3102a9643ea8Slogwang 				IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3103a9643ea8Slogwang 			hw_stats->pxoffrxc[i] +=
3104a9643ea8Slogwang 				IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3105a9643ea8Slogwang 		} else {
3106a9643ea8Slogwang 			hw_stats->pxonrxc[i] +=
3107a9643ea8Slogwang 				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3108a9643ea8Slogwang 			hw_stats->pxoffrxc[i] +=
3109a9643ea8Slogwang 				IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
3110a9643ea8Slogwang 			hw_stats->pxon2offc[i] +=
3111a9643ea8Slogwang 				IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3112a9643ea8Slogwang 		}
3113a9643ea8Slogwang 		hw_stats->pxontxc[i] +=
3114a9643ea8Slogwang 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3115a9643ea8Slogwang 		hw_stats->pxofftxc[i] +=
3116a9643ea8Slogwang 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3117a9643ea8Slogwang 	}
3118a9643ea8Slogwang 	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3119a9643ea8Slogwang 		uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3120a9643ea8Slogwang 		uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3121a9643ea8Slogwang 		uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3122a9643ea8Slogwang 
3123a9643ea8Slogwang 		delta_gprc += delta_qprc;
3124a9643ea8Slogwang 
3125a9643ea8Slogwang 		hw_stats->qprc[i] += delta_qprc;
3126a9643ea8Slogwang 		hw_stats->qptc[i] += delta_qptc;
3127a9643ea8Slogwang 
3128a9643ea8Slogwang 		hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
3129a9643ea8Slogwang 		hw_stats->qbrc[i] +=
3130a9643ea8Slogwang 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
3131a9643ea8Slogwang 		if (crc_strip == 0)
31324418919fSjohnjiang 			hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;
3133a9643ea8Slogwang 
3134a9643ea8Slogwang 		hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
3135a9643ea8Slogwang 		hw_stats->qbtc[i] +=
3136a9643ea8Slogwang 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
3137a9643ea8Slogwang 
3138a9643ea8Slogwang 		hw_stats->qprdc[i] += delta_qprdc;
3139a9643ea8Slogwang 		*total_qprdc += hw_stats->qprdc[i];
3140a9643ea8Slogwang 
3141a9643ea8Slogwang 		*total_qprc += hw_stats->qprc[i];
3142a9643ea8Slogwang 		*total_qbrc += hw_stats->qbrc[i];
3143a9643ea8Slogwang 	}
3144a9643ea8Slogwang 	hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3145a9643ea8Slogwang 	hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3146a9643ea8Slogwang 	hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3147a9643ea8Slogwang 
3148a9643ea8Slogwang 	/*
3149a9643ea8Slogwang 	 * An errata states that gprc actually counts good + missed packets:
3150a9643ea8Slogwang 	 * Workaround to set gprc to summated queue packet receives
3151a9643ea8Slogwang 	 */
3152a9643ea8Slogwang 	hw_stats->gprc = *total_qprc;
3153a9643ea8Slogwang 
3154a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_82598EB) {
3155a9643ea8Slogwang 		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3156a9643ea8Slogwang 		hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3157a9643ea8Slogwang 		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3158a9643ea8Slogwang 		hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3159a9643ea8Slogwang 		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3160a9643ea8Slogwang 		hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3161a9643ea8Slogwang 		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3162a9643ea8Slogwang 		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3163a9643ea8Slogwang 	} else {
3164a9643ea8Slogwang 		hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3165a9643ea8Slogwang 		hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3166a9643ea8Slogwang 		/* 82598 only has a counter in the high register */
3167a9643ea8Slogwang 		hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3168a9643ea8Slogwang 		hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3169a9643ea8Slogwang 		hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3170a9643ea8Slogwang 	}
3171a9643ea8Slogwang 	uint64_t old_tpr = hw_stats->tpr;
3172a9643ea8Slogwang 
3173a9643ea8Slogwang 	hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3174a9643ea8Slogwang 	hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3175a9643ea8Slogwang 
3176a9643ea8Slogwang 	if (crc_strip == 0)
31774418919fSjohnjiang 		hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;
3178a9643ea8Slogwang 
3179a9643ea8Slogwang 	uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
3180a9643ea8Slogwang 	hw_stats->gptc += delta_gptc;
31814418919fSjohnjiang 	hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;
31824418919fSjohnjiang 	hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
3183a9643ea8Slogwang 
3184a9643ea8Slogwang 	/*
3185a9643ea8Slogwang 	 * Workaround: mprc hardware is incorrectly counting
3186a9643ea8Slogwang 	 * broadcasts, so for now we subtract those.
3187a9643ea8Slogwang 	 */
3188a9643ea8Slogwang 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3189a9643ea8Slogwang 	hw_stats->bprc += bprc;
3190a9643ea8Slogwang 	hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3191a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB)
3192a9643ea8Slogwang 		hw_stats->mprc -= bprc;
3193a9643ea8Slogwang 
3194a9643ea8Slogwang 	hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3195a9643ea8Slogwang 	hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3196a9643ea8Slogwang 	hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3197a9643ea8Slogwang 	hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3198a9643ea8Slogwang 	hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3199a9643ea8Slogwang 	hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3200a9643ea8Slogwang 
3201a9643ea8Slogwang 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3202a9643ea8Slogwang 	hw_stats->lxontxc += lxon;
3203a9643ea8Slogwang 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3204a9643ea8Slogwang 	hw_stats->lxofftxc += lxoff;
3205a9643ea8Slogwang 	total = lxon + lxoff;
3206a9643ea8Slogwang 
3207a9643ea8Slogwang 	hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3208a9643ea8Slogwang 	hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3209a9643ea8Slogwang 	hw_stats->gptc -= total;
3210a9643ea8Slogwang 	hw_stats->mptc -= total;
3211a9643ea8Slogwang 	hw_stats->ptc64 -= total;
32124418919fSjohnjiang 	hw_stats->gotc -= total * RTE_ETHER_MIN_LEN;
3213a9643ea8Slogwang 
3214a9643ea8Slogwang 	hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3215a9643ea8Slogwang 	hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3216a9643ea8Slogwang 	hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3217a9643ea8Slogwang 	hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3218a9643ea8Slogwang 	hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3219a9643ea8Slogwang 	hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3220a9643ea8Slogwang 	hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3221a9643ea8Slogwang 	hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3222a9643ea8Slogwang 	hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3223a9643ea8Slogwang 	hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3224a9643ea8Slogwang 	hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3225a9643ea8Slogwang 	hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3226a9643ea8Slogwang 	hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3227a9643ea8Slogwang 	hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3228a9643ea8Slogwang 	hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3229a9643ea8Slogwang 	hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3230a9643ea8Slogwang 	/* Only read FCOE on 82599 */
3231a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_82598EB) {
3232a9643ea8Slogwang 		hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3233a9643ea8Slogwang 		hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3234a9643ea8Slogwang 		hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3235a9643ea8Slogwang 		hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3236a9643ea8Slogwang 		hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3237a9643ea8Slogwang 	}
3238a9643ea8Slogwang 
3239a9643ea8Slogwang 	/* Flow Director Stats registers */
3240d30ea906Sjfb8856606 	if (hw->mac.type != ixgbe_mac_82598EB) {
3241a9643ea8Slogwang 		hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
3242a9643ea8Slogwang 		hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
3243d30ea906Sjfb8856606 		hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
3244d30ea906Sjfb8856606 					IXGBE_FDIRUSTAT) & 0xFFFF;
3245d30ea906Sjfb8856606 		hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
3246d30ea906Sjfb8856606 					IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
3247d30ea906Sjfb8856606 		hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
3248d30ea906Sjfb8856606 					IXGBE_FDIRFSTAT) & 0xFFFF;
3249d30ea906Sjfb8856606 		hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
3250d30ea906Sjfb8856606 					IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
3251d30ea906Sjfb8856606 	}
32522bfe3f2eSlogwang 	/* MACsec Stats registers */
32532bfe3f2eSlogwang 	macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
32542bfe3f2eSlogwang 	macsec_stats->out_pkts_encrypted +=
32552bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
32562bfe3f2eSlogwang 	macsec_stats->out_pkts_protected +=
32572bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
32582bfe3f2eSlogwang 	macsec_stats->out_octets_encrypted +=
32592bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
32602bfe3f2eSlogwang 	macsec_stats->out_octets_protected +=
32612bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
32622bfe3f2eSlogwang 	macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
32632bfe3f2eSlogwang 	macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
32642bfe3f2eSlogwang 	macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
32652bfe3f2eSlogwang 	macsec_stats->in_pkts_unknownsci +=
32662bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
32672bfe3f2eSlogwang 	macsec_stats->in_octets_decrypted +=
32682bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
32692bfe3f2eSlogwang 	macsec_stats->in_octets_validated +=
32702bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
32712bfe3f2eSlogwang 	macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
32722bfe3f2eSlogwang 	macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
32732bfe3f2eSlogwang 	macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
32742bfe3f2eSlogwang 	for (i = 0; i < 2; i++) {
32752bfe3f2eSlogwang 		macsec_stats->in_pkts_ok +=
32762bfe3f2eSlogwang 			IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
32772bfe3f2eSlogwang 		macsec_stats->in_pkts_invalid +=
32782bfe3f2eSlogwang 			IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
32792bfe3f2eSlogwang 		macsec_stats->in_pkts_notvalid +=
32802bfe3f2eSlogwang 			IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
32812bfe3f2eSlogwang 	}
32822bfe3f2eSlogwang 	macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
32832bfe3f2eSlogwang 	macsec_stats->in_pkts_notusingsa +=
32842bfe3f2eSlogwang 		IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
3285a9643ea8Slogwang }
3286a9643ea8Slogwang 
3287a9643ea8Slogwang /*
3288a9643ea8Slogwang  * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3289a9643ea8Slogwang  */
32902bfe3f2eSlogwang static int
ixgbe_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)3291a9643ea8Slogwang ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3292a9643ea8Slogwang {
3293a9643ea8Slogwang 	struct ixgbe_hw *hw =
3294a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3295a9643ea8Slogwang 	struct ixgbe_hw_stats *hw_stats =
3296a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
32972bfe3f2eSlogwang 	struct ixgbe_macsec_stats *macsec_stats =
32982bfe3f2eSlogwang 			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
32992bfe3f2eSlogwang 				dev->data->dev_private);
3300a9643ea8Slogwang 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3301a9643ea8Slogwang 	unsigned i;
3302a9643ea8Slogwang 
3303a9643ea8Slogwang 	total_missed_rx = 0;
3304a9643ea8Slogwang 	total_qbrc = 0;
3305a9643ea8Slogwang 	total_qprc = 0;
3306a9643ea8Slogwang 	total_qprdc = 0;
3307a9643ea8Slogwang 
33082bfe3f2eSlogwang 	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
33092bfe3f2eSlogwang 			&total_qbrc, &total_qprc, &total_qprdc);
3310a9643ea8Slogwang 
3311a9643ea8Slogwang 	if (stats == NULL)
33122bfe3f2eSlogwang 		return -EINVAL;
3313a9643ea8Slogwang 
3314a9643ea8Slogwang 	/* Fill out the rte_eth_stats statistics structure */
3315a9643ea8Slogwang 	stats->ipackets = total_qprc;
3316a9643ea8Slogwang 	stats->ibytes = total_qbrc;
3317a9643ea8Slogwang 	stats->opackets = hw_stats->gptc;
3318a9643ea8Slogwang 	stats->obytes = hw_stats->gotc;
3319a9643ea8Slogwang 
3320a9643ea8Slogwang 	for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
3321a9643ea8Slogwang 		stats->q_ipackets[i] = hw_stats->qprc[i];
3322a9643ea8Slogwang 		stats->q_opackets[i] = hw_stats->qptc[i];
3323a9643ea8Slogwang 		stats->q_ibytes[i] = hw_stats->qbrc[i];
3324a9643ea8Slogwang 		stats->q_obytes[i] = hw_stats->qbtc[i];
3325a9643ea8Slogwang 		stats->q_errors[i] = hw_stats->qprdc[i];
3326a9643ea8Slogwang 	}
3327a9643ea8Slogwang 
3328a9643ea8Slogwang 	/* Rx Errors */
3329a9643ea8Slogwang 	stats->imissed  = total_missed_rx;
3330a9643ea8Slogwang 	stats->ierrors  = hw_stats->crcerrs +
3331a9643ea8Slogwang 			  hw_stats->mspdc +
3332a9643ea8Slogwang 			  hw_stats->rlec +
3333a9643ea8Slogwang 			  hw_stats->ruc +
3334a9643ea8Slogwang 			  hw_stats->roc +
3335a9643ea8Slogwang 			  hw_stats->illerrc +
3336a9643ea8Slogwang 			  hw_stats->errbc +
3337a9643ea8Slogwang 			  hw_stats->rfc +
3338a9643ea8Slogwang 			  hw_stats->fccrc +
3339a9643ea8Slogwang 			  hw_stats->fclast;
3340a9643ea8Slogwang 
3341a9643ea8Slogwang 	/* Tx Errors */
3342a9643ea8Slogwang 	stats->oerrors  = 0;
33432bfe3f2eSlogwang 	return 0;
3344a9643ea8Slogwang }
3345a9643ea8Slogwang 
33464418919fSjohnjiang static int
ixgbe_dev_stats_reset(struct rte_eth_dev * dev)3347a9643ea8Slogwang ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
3348a9643ea8Slogwang {
3349a9643ea8Slogwang 	struct ixgbe_hw_stats *stats =
3350a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3351a9643ea8Slogwang 
3352a9643ea8Slogwang 	/* HW registers are cleared on read */
3353a9643ea8Slogwang 	ixgbe_dev_stats_get(dev, NULL);
3354a9643ea8Slogwang 
3355a9643ea8Slogwang 	/* Reset software totals */
3356a9643ea8Slogwang 	memset(stats, 0, sizeof(*stats));
33574418919fSjohnjiang 
33584418919fSjohnjiang 	return 0;
3359a9643ea8Slogwang }
3360a9643ea8Slogwang 
3361a9643ea8Slogwang /* This function calculates the number of xstats based on the current config */
3362a9643ea8Slogwang static unsigned
ixgbe_xstats_calc_num(void)3363a9643ea8Slogwang ixgbe_xstats_calc_num(void) {
33642bfe3f2eSlogwang 	return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
3365a9643ea8Slogwang 		(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
3366a9643ea8Slogwang 		(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
3367a9643ea8Slogwang }
3368a9643ea8Slogwang 
ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned int size)3369a9643ea8Slogwang static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
33702bfe3f2eSlogwang 	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
3371a9643ea8Slogwang {
3372a9643ea8Slogwang 	const unsigned cnt_stats = ixgbe_xstats_calc_num();
3373a9643ea8Slogwang 	unsigned stat, i, count;
3374a9643ea8Slogwang 
3375a9643ea8Slogwang 	if (xstats_names != NULL) {
3376a9643ea8Slogwang 		count = 0;
3377a9643ea8Slogwang 
3378a9643ea8Slogwang 		/* Note: limit >= cnt_stats checked upstream
3379a9643ea8Slogwang 		 * in rte_eth_xstats_names()
3380a9643ea8Slogwang 		 */
3381a9643ea8Slogwang 
3382a9643ea8Slogwang 		/* Extended stats from ixgbe_hw_stats */
3383a9643ea8Slogwang 		for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
33844418919fSjohnjiang 			strlcpy(xstats_names[count].name,
33854418919fSjohnjiang 				rte_ixgbe_stats_strings[i].name,
33864418919fSjohnjiang 				sizeof(xstats_names[count].name));
3387a9643ea8Slogwang 			count++;
3388a9643ea8Slogwang 		}
3389a9643ea8Slogwang 
33902bfe3f2eSlogwang 		/* MACsec Stats */
33912bfe3f2eSlogwang 		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
33924418919fSjohnjiang 			strlcpy(xstats_names[count].name,
33934418919fSjohnjiang 				rte_ixgbe_macsec_strings[i].name,
33944418919fSjohnjiang 				sizeof(xstats_names[count].name));
33952bfe3f2eSlogwang 			count++;
33962bfe3f2eSlogwang 		}
33972bfe3f2eSlogwang 
3398a9643ea8Slogwang 		/* RX Priority Stats */
3399a9643ea8Slogwang 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3400a9643ea8Slogwang 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3401a9643ea8Slogwang 				snprintf(xstats_names[count].name,
3402a9643ea8Slogwang 					sizeof(xstats_names[count].name),
3403a9643ea8Slogwang 					"rx_priority%u_%s", i,
3404a9643ea8Slogwang 					rte_ixgbe_rxq_strings[stat].name);
3405a9643ea8Slogwang 				count++;
3406a9643ea8Slogwang 			}
3407a9643ea8Slogwang 		}
3408a9643ea8Slogwang 
3409a9643ea8Slogwang 		/* TX Priority Stats */
3410a9643ea8Slogwang 		for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3411a9643ea8Slogwang 			for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3412a9643ea8Slogwang 				snprintf(xstats_names[count].name,
3413a9643ea8Slogwang 					sizeof(xstats_names[count].name),
3414a9643ea8Slogwang 					"tx_priority%u_%s", i,
3415a9643ea8Slogwang 					rte_ixgbe_txq_strings[stat].name);
3416a9643ea8Slogwang 				count++;
3417a9643ea8Slogwang 			}
3418a9643ea8Slogwang 		}
3419a9643ea8Slogwang 	}
3420a9643ea8Slogwang 	return cnt_stats;
3421a9643ea8Slogwang }
3422a9643ea8Slogwang 
ixgbe_dev_xstats_get_names_by_id(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,const uint64_t * ids,unsigned int limit)34232bfe3f2eSlogwang static int ixgbe_dev_xstats_get_names_by_id(
34242bfe3f2eSlogwang 	struct rte_eth_dev *dev,
34252bfe3f2eSlogwang 	struct rte_eth_xstat_name *xstats_names,
34262bfe3f2eSlogwang 	const uint64_t *ids,
34272bfe3f2eSlogwang 	unsigned int limit)
34282bfe3f2eSlogwang {
34292bfe3f2eSlogwang 	if (!ids) {
34302bfe3f2eSlogwang 		const unsigned int cnt_stats = ixgbe_xstats_calc_num();
34312bfe3f2eSlogwang 		unsigned int stat, i, count;
34322bfe3f2eSlogwang 
34332bfe3f2eSlogwang 		if (xstats_names != NULL) {
34342bfe3f2eSlogwang 			count = 0;
34352bfe3f2eSlogwang 
34362bfe3f2eSlogwang 			/* Note: limit >= cnt_stats checked upstream
34372bfe3f2eSlogwang 			 * in rte_eth_xstats_names()
34382bfe3f2eSlogwang 			 */
34392bfe3f2eSlogwang 
34402bfe3f2eSlogwang 			/* Extended stats from ixgbe_hw_stats */
34412bfe3f2eSlogwang 			for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
34424418919fSjohnjiang 				strlcpy(xstats_names[count].name,
34434418919fSjohnjiang 					rte_ixgbe_stats_strings[i].name,
34444418919fSjohnjiang 					sizeof(xstats_names[count].name));
34452bfe3f2eSlogwang 				count++;
34462bfe3f2eSlogwang 			}
34472bfe3f2eSlogwang 
34482bfe3f2eSlogwang 			/* MACsec Stats */
34492bfe3f2eSlogwang 			for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
34504418919fSjohnjiang 				strlcpy(xstats_names[count].name,
34514418919fSjohnjiang 					rte_ixgbe_macsec_strings[i].name,
34524418919fSjohnjiang 					sizeof(xstats_names[count].name));
34532bfe3f2eSlogwang 				count++;
34542bfe3f2eSlogwang 			}
34552bfe3f2eSlogwang 
34562bfe3f2eSlogwang 			/* RX Priority Stats */
34572bfe3f2eSlogwang 			for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
34582bfe3f2eSlogwang 				for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
34592bfe3f2eSlogwang 					snprintf(xstats_names[count].name,
34602bfe3f2eSlogwang 					    sizeof(xstats_names[count].name),
34612bfe3f2eSlogwang 					    "rx_priority%u_%s", i,
34622bfe3f2eSlogwang 					    rte_ixgbe_rxq_strings[stat].name);
34632bfe3f2eSlogwang 					count++;
34642bfe3f2eSlogwang 				}
34652bfe3f2eSlogwang 			}
34662bfe3f2eSlogwang 
34672bfe3f2eSlogwang 			/* TX Priority Stats */
34682bfe3f2eSlogwang 			for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
34692bfe3f2eSlogwang 				for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
34702bfe3f2eSlogwang 					snprintf(xstats_names[count].name,
34712bfe3f2eSlogwang 					    sizeof(xstats_names[count].name),
34722bfe3f2eSlogwang 					    "tx_priority%u_%s", i,
34732bfe3f2eSlogwang 					    rte_ixgbe_txq_strings[stat].name);
34742bfe3f2eSlogwang 					count++;
34752bfe3f2eSlogwang 				}
34762bfe3f2eSlogwang 			}
34772bfe3f2eSlogwang 		}
34782bfe3f2eSlogwang 		return cnt_stats;
34792bfe3f2eSlogwang 	}
34802bfe3f2eSlogwang 
34812bfe3f2eSlogwang 	uint16_t i;
34822bfe3f2eSlogwang 	uint16_t size = ixgbe_xstats_calc_num();
34832bfe3f2eSlogwang 	struct rte_eth_xstat_name xstats_names_copy[size];
34842bfe3f2eSlogwang 
34852bfe3f2eSlogwang 	ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
34862bfe3f2eSlogwang 			size);
34872bfe3f2eSlogwang 
34882bfe3f2eSlogwang 	for (i = 0; i < limit; i++) {
34892bfe3f2eSlogwang 		if (ids[i] >= size) {
34902bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "id value isn't valid");
34912bfe3f2eSlogwang 			return -1;
34922bfe3f2eSlogwang 		}
34932bfe3f2eSlogwang 		strcpy(xstats_names[i].name,
34942bfe3f2eSlogwang 				xstats_names_copy[ids[i]].name);
34952bfe3f2eSlogwang 	}
34962bfe3f2eSlogwang 	return limit;
34972bfe3f2eSlogwang }
34982bfe3f2eSlogwang 
ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned limit)3499a9643ea8Slogwang static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3500a9643ea8Slogwang 	struct rte_eth_xstat_name *xstats_names, unsigned limit)
3501a9643ea8Slogwang {
3502a9643ea8Slogwang 	unsigned i;
3503a9643ea8Slogwang 
3504a9643ea8Slogwang 	if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
3505a9643ea8Slogwang 		return -ENOMEM;
3506a9643ea8Slogwang 
3507a9643ea8Slogwang 	if (xstats_names != NULL)
3508a9643ea8Slogwang 		for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
35094418919fSjohnjiang 			strlcpy(xstats_names[i].name,
35104418919fSjohnjiang 				rte_ixgbevf_stats_strings[i].name,
35114418919fSjohnjiang 				sizeof(xstats_names[i].name));
3512a9643ea8Slogwang 	return IXGBEVF_NB_XSTATS;
3513a9643ea8Slogwang }
3514a9643ea8Slogwang 
3515a9643ea8Slogwang static int
ixgbe_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)3516a9643ea8Slogwang ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3517a9643ea8Slogwang 					 unsigned n)
3518a9643ea8Slogwang {
3519a9643ea8Slogwang 	struct ixgbe_hw *hw =
3520a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3521a9643ea8Slogwang 	struct ixgbe_hw_stats *hw_stats =
3522a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
35232bfe3f2eSlogwang 	struct ixgbe_macsec_stats *macsec_stats =
35242bfe3f2eSlogwang 			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
35252bfe3f2eSlogwang 				dev->data->dev_private);
3526a9643ea8Slogwang 	uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
3527a9643ea8Slogwang 	unsigned i, stat, count = 0;
3528a9643ea8Slogwang 
3529a9643ea8Slogwang 	count = ixgbe_xstats_calc_num();
3530a9643ea8Slogwang 
3531a9643ea8Slogwang 	if (n < count)
3532a9643ea8Slogwang 		return count;
3533a9643ea8Slogwang 
3534a9643ea8Slogwang 	total_missed_rx = 0;
3535a9643ea8Slogwang 	total_qbrc = 0;
3536a9643ea8Slogwang 	total_qprc = 0;
3537a9643ea8Slogwang 	total_qprdc = 0;
3538a9643ea8Slogwang 
35392bfe3f2eSlogwang 	ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
35402bfe3f2eSlogwang 			&total_qbrc, &total_qprc, &total_qprdc);
3541a9643ea8Slogwang 
3542a9643ea8Slogwang 	/* If this is a reset xstats is NULL, and we have cleared the
3543a9643ea8Slogwang 	 * registers by reading them.
3544a9643ea8Slogwang 	 */
3545a9643ea8Slogwang 	if (!xstats)
3546a9643ea8Slogwang 		return 0;
3547a9643ea8Slogwang 
3548a9643ea8Slogwang 	/* Extended stats from ixgbe_hw_stats */
3549a9643ea8Slogwang 	count = 0;
3550a9643ea8Slogwang 	for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
3551a9643ea8Slogwang 		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3552a9643ea8Slogwang 				rte_ixgbe_stats_strings[i].offset);
35532bfe3f2eSlogwang 		xstats[count].id = count;
35542bfe3f2eSlogwang 		count++;
35552bfe3f2eSlogwang 	}
35562bfe3f2eSlogwang 
35572bfe3f2eSlogwang 	/* MACsec Stats */
35582bfe3f2eSlogwang 	for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
35592bfe3f2eSlogwang 		xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
35602bfe3f2eSlogwang 				rte_ixgbe_macsec_strings[i].offset);
35612bfe3f2eSlogwang 		xstats[count].id = count;
3562a9643ea8Slogwang 		count++;
3563a9643ea8Slogwang 	}
3564a9643ea8Slogwang 
3565a9643ea8Slogwang 	/* RX Priority Stats */
3566a9643ea8Slogwang 	for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
3567a9643ea8Slogwang 		for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
3568a9643ea8Slogwang 			xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3569a9643ea8Slogwang 					rte_ixgbe_rxq_strings[stat].offset +
3570a9643ea8Slogwang 					(sizeof(uint64_t) * i));
35712bfe3f2eSlogwang 			xstats[count].id = count;
3572a9643ea8Slogwang 			count++;
3573a9643ea8Slogwang 		}
3574a9643ea8Slogwang 	}
3575a9643ea8Slogwang 
3576a9643ea8Slogwang 	/* TX Priority Stats */
3577a9643ea8Slogwang 	for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
3578a9643ea8Slogwang 		for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
3579a9643ea8Slogwang 			xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3580a9643ea8Slogwang 					rte_ixgbe_txq_strings[stat].offset +
3581a9643ea8Slogwang 					(sizeof(uint64_t) * i));
35822bfe3f2eSlogwang 			xstats[count].id = count;
3583a9643ea8Slogwang 			count++;
3584a9643ea8Slogwang 		}
3585a9643ea8Slogwang 	}
3586a9643ea8Slogwang 	return count;
3587a9643ea8Slogwang }
3588a9643ea8Slogwang 
35892bfe3f2eSlogwang static int
ixgbe_dev_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)35902bfe3f2eSlogwang ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
35912bfe3f2eSlogwang 		uint64_t *values, unsigned int n)
35922bfe3f2eSlogwang {
35932bfe3f2eSlogwang 	if (!ids) {
35942bfe3f2eSlogwang 		struct ixgbe_hw *hw =
35952bfe3f2eSlogwang 				IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
35962bfe3f2eSlogwang 		struct ixgbe_hw_stats *hw_stats =
35972bfe3f2eSlogwang 				IXGBE_DEV_PRIVATE_TO_STATS(
35982bfe3f2eSlogwang 						dev->data->dev_private);
35992bfe3f2eSlogwang 		struct ixgbe_macsec_stats *macsec_stats =
36002bfe3f2eSlogwang 				IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
36012bfe3f2eSlogwang 					dev->data->dev_private);
36022bfe3f2eSlogwang 		uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
36032bfe3f2eSlogwang 		unsigned int i, stat, count = 0;
36042bfe3f2eSlogwang 
36052bfe3f2eSlogwang 		count = ixgbe_xstats_calc_num();
36062bfe3f2eSlogwang 
36072bfe3f2eSlogwang 		if (!ids && n < count)
36082bfe3f2eSlogwang 			return count;
36092bfe3f2eSlogwang 
36102bfe3f2eSlogwang 		total_missed_rx = 0;
36112bfe3f2eSlogwang 		total_qbrc = 0;
36122bfe3f2eSlogwang 		total_qprc = 0;
36132bfe3f2eSlogwang 		total_qprdc = 0;
36142bfe3f2eSlogwang 
36152bfe3f2eSlogwang 		ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
36162bfe3f2eSlogwang 				&total_missed_rx, &total_qbrc, &total_qprc,
36172bfe3f2eSlogwang 				&total_qprdc);
36182bfe3f2eSlogwang 
36192bfe3f2eSlogwang 		/* If this is a reset xstats is NULL, and we have cleared the
36202bfe3f2eSlogwang 		 * registers by reading them.
36212bfe3f2eSlogwang 		 */
36222bfe3f2eSlogwang 		if (!ids && !values)
36232bfe3f2eSlogwang 			return 0;
36242bfe3f2eSlogwang 
36252bfe3f2eSlogwang 		/* Extended stats from ixgbe_hw_stats */
36262bfe3f2eSlogwang 		count = 0;
36272bfe3f2eSlogwang 		for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
36282bfe3f2eSlogwang 			values[count] = *(uint64_t *)(((char *)hw_stats) +
36292bfe3f2eSlogwang 					rte_ixgbe_stats_strings[i].offset);
36302bfe3f2eSlogwang 			count++;
36312bfe3f2eSlogwang 		}
36322bfe3f2eSlogwang 
36332bfe3f2eSlogwang 		/* MACsec Stats */
36342bfe3f2eSlogwang 		for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
36352bfe3f2eSlogwang 			values[count] = *(uint64_t *)(((char *)macsec_stats) +
36362bfe3f2eSlogwang 					rte_ixgbe_macsec_strings[i].offset);
36372bfe3f2eSlogwang 			count++;
36382bfe3f2eSlogwang 		}
36392bfe3f2eSlogwang 
36402bfe3f2eSlogwang 		/* RX Priority Stats */
36412bfe3f2eSlogwang 		for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
36422bfe3f2eSlogwang 			for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
36432bfe3f2eSlogwang 				values[count] =
36442bfe3f2eSlogwang 					*(uint64_t *)(((char *)hw_stats) +
36452bfe3f2eSlogwang 					rte_ixgbe_rxq_strings[stat].offset +
36462bfe3f2eSlogwang 					(sizeof(uint64_t) * i));
36472bfe3f2eSlogwang 				count++;
36482bfe3f2eSlogwang 			}
36492bfe3f2eSlogwang 		}
36502bfe3f2eSlogwang 
36512bfe3f2eSlogwang 		/* TX Priority Stats */
36522bfe3f2eSlogwang 		for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
36532bfe3f2eSlogwang 			for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
36542bfe3f2eSlogwang 				values[count] =
36552bfe3f2eSlogwang 					*(uint64_t *)(((char *)hw_stats) +
36562bfe3f2eSlogwang 					rte_ixgbe_txq_strings[stat].offset +
36572bfe3f2eSlogwang 					(sizeof(uint64_t) * i));
36582bfe3f2eSlogwang 				count++;
36592bfe3f2eSlogwang 			}
36602bfe3f2eSlogwang 		}
36612bfe3f2eSlogwang 		return count;
36622bfe3f2eSlogwang 	}
36632bfe3f2eSlogwang 
36642bfe3f2eSlogwang 	uint16_t i;
36652bfe3f2eSlogwang 	uint16_t size = ixgbe_xstats_calc_num();
36662bfe3f2eSlogwang 	uint64_t values_copy[size];
36672bfe3f2eSlogwang 
36682bfe3f2eSlogwang 	ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
36692bfe3f2eSlogwang 
36702bfe3f2eSlogwang 	for (i = 0; i < n; i++) {
36712bfe3f2eSlogwang 		if (ids[i] >= size) {
36722bfe3f2eSlogwang 			PMD_INIT_LOG(ERR, "id value isn't valid");
36732bfe3f2eSlogwang 			return -1;
36742bfe3f2eSlogwang 		}
36752bfe3f2eSlogwang 		values[i] = values_copy[ids[i]];
36762bfe3f2eSlogwang 	}
36772bfe3f2eSlogwang 	return n;
36782bfe3f2eSlogwang }
36792bfe3f2eSlogwang 
36804418919fSjohnjiang static int
ixgbe_dev_xstats_reset(struct rte_eth_dev * dev)3681a9643ea8Slogwang ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
3682a9643ea8Slogwang {
3683a9643ea8Slogwang 	struct ixgbe_hw_stats *stats =
3684a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
36852bfe3f2eSlogwang 	struct ixgbe_macsec_stats *macsec_stats =
36862bfe3f2eSlogwang 			IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
36872bfe3f2eSlogwang 				dev->data->dev_private);
3688a9643ea8Slogwang 
3689a9643ea8Slogwang 	unsigned count = ixgbe_xstats_calc_num();
3690a9643ea8Slogwang 
3691a9643ea8Slogwang 	/* HW registers are cleared on read */
3692a9643ea8Slogwang 	ixgbe_dev_xstats_get(dev, NULL, count);
3693a9643ea8Slogwang 
3694a9643ea8Slogwang 	/* Reset software totals */
3695a9643ea8Slogwang 	memset(stats, 0, sizeof(*stats));
36962bfe3f2eSlogwang 	memset(macsec_stats, 0, sizeof(*macsec_stats));
36974418919fSjohnjiang 
36984418919fSjohnjiang 	return 0;
3699a9643ea8Slogwang }
3700a9643ea8Slogwang 
3701a9643ea8Slogwang static void
ixgbevf_update_stats(struct rte_eth_dev * dev)3702a9643ea8Slogwang ixgbevf_update_stats(struct rte_eth_dev *dev)
3703a9643ea8Slogwang {
3704a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3705a9643ea8Slogwang 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3706a9643ea8Slogwang 			  IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3707a9643ea8Slogwang 
3708a9643ea8Slogwang 	/* Good Rx packet, include VF loopback */
3709a9643ea8Slogwang 	UPDATE_VF_STAT(IXGBE_VFGPRC,
3710a9643ea8Slogwang 	    hw_stats->last_vfgprc, hw_stats->vfgprc);
3711a9643ea8Slogwang 
3712a9643ea8Slogwang 	/* Good Rx octets, include VF loopback */
3713a9643ea8Slogwang 	UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3714a9643ea8Slogwang 	    hw_stats->last_vfgorc, hw_stats->vfgorc);
3715a9643ea8Slogwang 
3716a9643ea8Slogwang 	/* Good Tx packet, include VF loopback */
3717a9643ea8Slogwang 	UPDATE_VF_STAT(IXGBE_VFGPTC,
3718a9643ea8Slogwang 	    hw_stats->last_vfgptc, hw_stats->vfgptc);
3719a9643ea8Slogwang 
3720a9643ea8Slogwang 	/* Good Tx octets, include VF loopback */
3721a9643ea8Slogwang 	UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3722a9643ea8Slogwang 	    hw_stats->last_vfgotc, hw_stats->vfgotc);
3723a9643ea8Slogwang 
3724a9643ea8Slogwang 	/* Rx Multicst Packet */
3725a9643ea8Slogwang 	UPDATE_VF_STAT(IXGBE_VFMPRC,
3726a9643ea8Slogwang 	    hw_stats->last_vfmprc, hw_stats->vfmprc);
3727a9643ea8Slogwang }
3728a9643ea8Slogwang 
3729a9643ea8Slogwang static int
ixgbevf_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)3730a9643ea8Slogwang ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3731a9643ea8Slogwang 		       unsigned n)
3732a9643ea8Slogwang {
3733a9643ea8Slogwang 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3734a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3735a9643ea8Slogwang 	unsigned i;
3736a9643ea8Slogwang 
3737a9643ea8Slogwang 	if (n < IXGBEVF_NB_XSTATS)
3738a9643ea8Slogwang 		return IXGBEVF_NB_XSTATS;
3739a9643ea8Slogwang 
3740a9643ea8Slogwang 	ixgbevf_update_stats(dev);
3741a9643ea8Slogwang 
3742a9643ea8Slogwang 	if (!xstats)
3743a9643ea8Slogwang 		return 0;
3744a9643ea8Slogwang 
3745a9643ea8Slogwang 	/* Extended stats */
3746a9643ea8Slogwang 	for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
37472bfe3f2eSlogwang 		xstats[i].id = i;
3748a9643ea8Slogwang 		xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
3749a9643ea8Slogwang 			rte_ixgbevf_stats_strings[i].offset);
3750a9643ea8Slogwang 	}
3751a9643ea8Slogwang 
3752a9643ea8Slogwang 	return IXGBEVF_NB_XSTATS;
3753a9643ea8Slogwang }
3754a9643ea8Slogwang 
37552bfe3f2eSlogwang static int
ixgbevf_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)3756a9643ea8Slogwang ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3757a9643ea8Slogwang {
3758a9643ea8Slogwang 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3759a9643ea8Slogwang 			  IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3760a9643ea8Slogwang 
3761a9643ea8Slogwang 	ixgbevf_update_stats(dev);
3762a9643ea8Slogwang 
3763a9643ea8Slogwang 	if (stats == NULL)
37642bfe3f2eSlogwang 		return -EINVAL;
3765a9643ea8Slogwang 
3766a9643ea8Slogwang 	stats->ipackets = hw_stats->vfgprc;
3767a9643ea8Slogwang 	stats->ibytes = hw_stats->vfgorc;
3768a9643ea8Slogwang 	stats->opackets = hw_stats->vfgptc;
3769a9643ea8Slogwang 	stats->obytes = hw_stats->vfgotc;
37702bfe3f2eSlogwang 	return 0;
3771a9643ea8Slogwang }
3772a9643ea8Slogwang 
37734418919fSjohnjiang static int
ixgbevf_dev_stats_reset(struct rte_eth_dev * dev)3774a9643ea8Slogwang ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
3775a9643ea8Slogwang {
3776a9643ea8Slogwang 	struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
3777a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
3778a9643ea8Slogwang 
3779a9643ea8Slogwang 	/* Sync HW register to the last stats */
3780a9643ea8Slogwang 	ixgbevf_dev_stats_get(dev, NULL);
3781a9643ea8Slogwang 
3782a9643ea8Slogwang 	/* reset HW current stats*/
3783a9643ea8Slogwang 	hw_stats->vfgprc = 0;
3784a9643ea8Slogwang 	hw_stats->vfgorc = 0;
3785a9643ea8Slogwang 	hw_stats->vfgptc = 0;
3786a9643ea8Slogwang 	hw_stats->vfgotc = 0;
37874418919fSjohnjiang 
37884418919fSjohnjiang 	return 0;
3789a9643ea8Slogwang }
3790a9643ea8Slogwang 
37912bfe3f2eSlogwang static int
ixgbe_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)37922bfe3f2eSlogwang ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
37932bfe3f2eSlogwang {
37942bfe3f2eSlogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
37952bfe3f2eSlogwang 	u16 eeprom_verh, eeprom_verl;
37962bfe3f2eSlogwang 	u32 etrack_id;
37972bfe3f2eSlogwang 	int ret;
37982bfe3f2eSlogwang 
37992bfe3f2eSlogwang 	ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
38002bfe3f2eSlogwang 	ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
38012bfe3f2eSlogwang 
38022bfe3f2eSlogwang 	etrack_id = (eeprom_verh << 16) | eeprom_verl;
38032bfe3f2eSlogwang 	ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
38042bfe3f2eSlogwang 
38052bfe3f2eSlogwang 	ret += 1; /* add the size of '\0' */
38062bfe3f2eSlogwang 	if (fw_size < (u32)ret)
38072bfe3f2eSlogwang 		return ret;
38082bfe3f2eSlogwang 	else
38092bfe3f2eSlogwang 		return 0;
38102bfe3f2eSlogwang }
38112bfe3f2eSlogwang 
38124418919fSjohnjiang static int
ixgbe_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)3813a9643ea8Slogwang ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3814a9643ea8Slogwang {
38152bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3816a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3817a9643ea8Slogwang 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3818a9643ea8Slogwang 
3819a9643ea8Slogwang 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3820a9643ea8Slogwang 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3821a9643ea8Slogwang 	if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3822a9643ea8Slogwang 		/*
3823a9643ea8Slogwang 		 * When DCB/VT is off, maximum number of queues changes,
3824a9643ea8Slogwang 		 * except for 82598EB, which remains constant.
3825a9643ea8Slogwang 		 */
3826a9643ea8Slogwang 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
3827a9643ea8Slogwang 				hw->mac.type != ixgbe_mac_82598EB)
3828a9643ea8Slogwang 			dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
3829a9643ea8Slogwang 	}
3830a9643ea8Slogwang 	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
3831a9643ea8Slogwang 	dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
3832a9643ea8Slogwang 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3833a9643ea8Slogwang 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
38342bfe3f2eSlogwang 	dev_info->max_vfs = pci_dev->max_vfs;
3835a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB)
3836a9643ea8Slogwang 		dev_info->max_vmdq_pools = ETH_16_POOLS;
3837a9643ea8Slogwang 	else
3838a9643ea8Slogwang 		dev_info->max_vmdq_pools = ETH_64_POOLS;
38394418919fSjohnjiang 	dev_info->max_mtu =  dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
38404418919fSjohnjiang 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
3841a9643ea8Slogwang 	dev_info->vmdq_queue_num = dev_info->max_rx_queues;
3842d30ea906Sjfb8856606 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3843d30ea906Sjfb8856606 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3844d30ea906Sjfb8856606 				     dev_info->rx_queue_offload_capa);
3845d30ea906Sjfb8856606 	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3846d30ea906Sjfb8856606 	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
38472bfe3f2eSlogwang 
3848a9643ea8Slogwang 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3849a9643ea8Slogwang 		.rx_thresh = {
3850a9643ea8Slogwang 			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3851a9643ea8Slogwang 			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3852a9643ea8Slogwang 			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3853a9643ea8Slogwang 		},
3854a9643ea8Slogwang 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3855a9643ea8Slogwang 		.rx_drop_en = 0,
3856d30ea906Sjfb8856606 		.offloads = 0,
3857a9643ea8Slogwang 	};
3858a9643ea8Slogwang 
3859a9643ea8Slogwang 	dev_info->default_txconf = (struct rte_eth_txconf) {
3860a9643ea8Slogwang 		.tx_thresh = {
3861a9643ea8Slogwang 			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3862a9643ea8Slogwang 			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3863a9643ea8Slogwang 			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3864a9643ea8Slogwang 		},
3865a9643ea8Slogwang 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3866a9643ea8Slogwang 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3867d30ea906Sjfb8856606 		.offloads = 0,
3868a9643ea8Slogwang 	};
3869a9643ea8Slogwang 
3870a9643ea8Slogwang 	dev_info->rx_desc_lim = rx_desc_lim;
3871a9643ea8Slogwang 	dev_info->tx_desc_lim = tx_desc_lim;
3872a9643ea8Slogwang 
3873a9643ea8Slogwang 	dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
3874a9643ea8Slogwang 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
3875a9643ea8Slogwang 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3876a9643ea8Slogwang 
3877a9643ea8Slogwang 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
38784418919fSjohnjiang 	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
38794418919fSjohnjiang 			hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
38804418919fSjohnjiang 		dev_info->speed_capa = ETH_LINK_SPEED_10M |
38814418919fSjohnjiang 			ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G;
38824418919fSjohnjiang 
3883a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_X540 ||
3884a9643ea8Slogwang 	    hw->mac.type == ixgbe_mac_X540_vf ||
3885a9643ea8Slogwang 	    hw->mac.type == ixgbe_mac_X550 ||
3886a9643ea8Slogwang 	    hw->mac.type == ixgbe_mac_X550_vf) {
3887a9643ea8Slogwang 		dev_info->speed_capa |= ETH_LINK_SPEED_100M;
3888a9643ea8Slogwang 	}
38892bfe3f2eSlogwang 	if (hw->mac.type == ixgbe_mac_X550) {
38902bfe3f2eSlogwang 		dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
38912bfe3f2eSlogwang 		dev_info->speed_capa |= ETH_LINK_SPEED_5G;
38922bfe3f2eSlogwang 	}
3893d30ea906Sjfb8856606 
3894d30ea906Sjfb8856606 	/* Driver-preferred Rx/Tx parameters */
3895d30ea906Sjfb8856606 	dev_info->default_rxportconf.burst_size = 32;
3896d30ea906Sjfb8856606 	dev_info->default_txportconf.burst_size = 32;
3897d30ea906Sjfb8856606 	dev_info->default_rxportconf.nb_queues = 1;
3898d30ea906Sjfb8856606 	dev_info->default_txportconf.nb_queues = 1;
3899d30ea906Sjfb8856606 	dev_info->default_rxportconf.ring_size = 256;
3900d30ea906Sjfb8856606 	dev_info->default_txportconf.ring_size = 256;
39014418919fSjohnjiang 
39024418919fSjohnjiang 	return 0;
3903a9643ea8Slogwang }
3904a9643ea8Slogwang 
3905a9643ea8Slogwang static const uint32_t *
ixgbe_dev_supported_ptypes_get(struct rte_eth_dev * dev)3906a9643ea8Slogwang ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
3907a9643ea8Slogwang {
3908a9643ea8Slogwang 	static const uint32_t ptypes[] = {
3909a9643ea8Slogwang 		/* For non-vec functions,
3910a9643ea8Slogwang 		 * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3911a9643ea8Slogwang 		 * for vec functions,
3912a9643ea8Slogwang 		 * refers to _recv_raw_pkts_vec().
3913a9643ea8Slogwang 		 */
3914a9643ea8Slogwang 		RTE_PTYPE_L2_ETHER,
3915a9643ea8Slogwang 		RTE_PTYPE_L3_IPV4,
3916a9643ea8Slogwang 		RTE_PTYPE_L3_IPV4_EXT,
3917a9643ea8Slogwang 		RTE_PTYPE_L3_IPV6,
3918a9643ea8Slogwang 		RTE_PTYPE_L3_IPV6_EXT,
3919a9643ea8Slogwang 		RTE_PTYPE_L4_SCTP,
3920a9643ea8Slogwang 		RTE_PTYPE_L4_TCP,
3921a9643ea8Slogwang 		RTE_PTYPE_L4_UDP,
3922a9643ea8Slogwang 		RTE_PTYPE_TUNNEL_IP,
3923a9643ea8Slogwang 		RTE_PTYPE_INNER_L3_IPV6,
3924a9643ea8Slogwang 		RTE_PTYPE_INNER_L3_IPV6_EXT,
3925a9643ea8Slogwang 		RTE_PTYPE_INNER_L4_TCP,
3926a9643ea8Slogwang 		RTE_PTYPE_INNER_L4_UDP,
3927a9643ea8Slogwang 		RTE_PTYPE_UNKNOWN
3928a9643ea8Slogwang 	};
3929a9643ea8Slogwang 
3930a9643ea8Slogwang 	if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
3931a9643ea8Slogwang 	    dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
3932a9643ea8Slogwang 	    dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
3933a9643ea8Slogwang 	    dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
3934a9643ea8Slogwang 		return ptypes;
39352bfe3f2eSlogwang 
3936*2d9fd380Sjfb8856606 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON)
39372bfe3f2eSlogwang 	if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
39382bfe3f2eSlogwang 	    dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
39392bfe3f2eSlogwang 		return ptypes;
39402bfe3f2eSlogwang #endif
3941a9643ea8Slogwang 	return NULL;
3942a9643ea8Slogwang }
3943a9643ea8Slogwang 
39444418919fSjohnjiang static int
ixgbevf_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)3945a9643ea8Slogwang ixgbevf_dev_info_get(struct rte_eth_dev *dev,
3946a9643ea8Slogwang 		     struct rte_eth_dev_info *dev_info)
3947a9643ea8Slogwang {
39482bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3949a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3950a9643ea8Slogwang 
3951a9643ea8Slogwang 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
3952a9643ea8Slogwang 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
3953a9643ea8Slogwang 	dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
39542bfe3f2eSlogwang 	dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
39554418919fSjohnjiang 	dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
3956a9643ea8Slogwang 	dev_info->max_mac_addrs = hw->mac.num_rar_entries;
3957a9643ea8Slogwang 	dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
39582bfe3f2eSlogwang 	dev_info->max_vfs = pci_dev->max_vfs;
3959a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB)
3960a9643ea8Slogwang 		dev_info->max_vmdq_pools = ETH_16_POOLS;
3961a9643ea8Slogwang 	else
3962a9643ea8Slogwang 		dev_info->max_vmdq_pools = ETH_64_POOLS;
3963d30ea906Sjfb8856606 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
3964d30ea906Sjfb8856606 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
3965d30ea906Sjfb8856606 				     dev_info->rx_queue_offload_capa);
3966d30ea906Sjfb8856606 	dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
3967d30ea906Sjfb8856606 	dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
39684b05018fSfengbojiang 	dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
39694b05018fSfengbojiang 	dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
39704418919fSjohnjiang 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
3971a9643ea8Slogwang 
3972a9643ea8Slogwang 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
3973a9643ea8Slogwang 		.rx_thresh = {
3974a9643ea8Slogwang 			.pthresh = IXGBE_DEFAULT_RX_PTHRESH,
3975a9643ea8Slogwang 			.hthresh = IXGBE_DEFAULT_RX_HTHRESH,
3976a9643ea8Slogwang 			.wthresh = IXGBE_DEFAULT_RX_WTHRESH,
3977a9643ea8Slogwang 		},
3978a9643ea8Slogwang 		.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
3979a9643ea8Slogwang 		.rx_drop_en = 0,
3980d30ea906Sjfb8856606 		.offloads = 0,
3981a9643ea8Slogwang 	};
3982a9643ea8Slogwang 
3983a9643ea8Slogwang 	dev_info->default_txconf = (struct rte_eth_txconf) {
3984a9643ea8Slogwang 		.tx_thresh = {
3985a9643ea8Slogwang 			.pthresh = IXGBE_DEFAULT_TX_PTHRESH,
3986a9643ea8Slogwang 			.hthresh = IXGBE_DEFAULT_TX_HTHRESH,
3987a9643ea8Slogwang 			.wthresh = IXGBE_DEFAULT_TX_WTHRESH,
3988a9643ea8Slogwang 		},
3989a9643ea8Slogwang 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
3990a9643ea8Slogwang 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
3991d30ea906Sjfb8856606 		.offloads = 0,
3992a9643ea8Slogwang 	};
3993a9643ea8Slogwang 
3994a9643ea8Slogwang 	dev_info->rx_desc_lim = rx_desc_lim;
3995a9643ea8Slogwang 	dev_info->tx_desc_lim = tx_desc_lim;
39964418919fSjohnjiang 
39974418919fSjohnjiang 	return 0;
3998a9643ea8Slogwang }
3999a9643ea8Slogwang 
40002bfe3f2eSlogwang static int
ixgbevf_check_link(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,int wait_to_complete)40012bfe3f2eSlogwang ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4002*2d9fd380Sjfb8856606 		   bool *link_up, int wait_to_complete)
40032bfe3f2eSlogwang {
40044b05018fSfengbojiang 	struct ixgbe_adapter *adapter = container_of(hw,
40054b05018fSfengbojiang 						     struct ixgbe_adapter, hw);
40062bfe3f2eSlogwang 	struct ixgbe_mbx_info *mbx = &hw->mbx;
40072bfe3f2eSlogwang 	struct ixgbe_mac_info *mac = &hw->mac;
40082bfe3f2eSlogwang 	uint32_t links_reg, in_msg;
40092bfe3f2eSlogwang 	int ret_val = 0;
40102bfe3f2eSlogwang 
40112bfe3f2eSlogwang 	/* If we were hit with a reset drop the link */
40122bfe3f2eSlogwang 	if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
40132bfe3f2eSlogwang 		mac->get_link_status = true;
40142bfe3f2eSlogwang 
40152bfe3f2eSlogwang 	if (!mac->get_link_status)
40162bfe3f2eSlogwang 		goto out;
40172bfe3f2eSlogwang 
40182bfe3f2eSlogwang 	/* if link status is down no point in checking to see if pf is up */
40192bfe3f2eSlogwang 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
40202bfe3f2eSlogwang 	if (!(links_reg & IXGBE_LINKS_UP))
40212bfe3f2eSlogwang 		goto out;
40222bfe3f2eSlogwang 
40232bfe3f2eSlogwang 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
40242bfe3f2eSlogwang 	 * before the link status is correct
40252bfe3f2eSlogwang 	 */
4026579bf1e2Sjfb8856606 	if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
40272bfe3f2eSlogwang 		int i;
40282bfe3f2eSlogwang 
40292bfe3f2eSlogwang 		for (i = 0; i < 5; i++) {
40302bfe3f2eSlogwang 			rte_delay_us(100);
40312bfe3f2eSlogwang 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
40322bfe3f2eSlogwang 
40332bfe3f2eSlogwang 			if (!(links_reg & IXGBE_LINKS_UP))
40342bfe3f2eSlogwang 				goto out;
40352bfe3f2eSlogwang 		}
40362bfe3f2eSlogwang 	}
40372bfe3f2eSlogwang 
40382bfe3f2eSlogwang 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
40392bfe3f2eSlogwang 	case IXGBE_LINKS_SPEED_10G_82599:
40402bfe3f2eSlogwang 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
40412bfe3f2eSlogwang 		if (hw->mac.type >= ixgbe_mac_X550) {
40422bfe3f2eSlogwang 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
40432bfe3f2eSlogwang 				*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
40442bfe3f2eSlogwang 		}
40452bfe3f2eSlogwang 		break;
40462bfe3f2eSlogwang 	case IXGBE_LINKS_SPEED_1G_82599:
40472bfe3f2eSlogwang 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
40482bfe3f2eSlogwang 		break;
40492bfe3f2eSlogwang 	case IXGBE_LINKS_SPEED_100_82599:
40502bfe3f2eSlogwang 		*speed = IXGBE_LINK_SPEED_100_FULL;
40512bfe3f2eSlogwang 		if (hw->mac.type == ixgbe_mac_X550) {
40522bfe3f2eSlogwang 			if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
40532bfe3f2eSlogwang 				*speed = IXGBE_LINK_SPEED_5GB_FULL;
40542bfe3f2eSlogwang 		}
40552bfe3f2eSlogwang 		break;
40562bfe3f2eSlogwang 	case IXGBE_LINKS_SPEED_10_X550EM_A:
40572bfe3f2eSlogwang 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
40582bfe3f2eSlogwang 		/* Since Reserved in older MAC's */
40592bfe3f2eSlogwang 		if (hw->mac.type >= ixgbe_mac_X550)
40602bfe3f2eSlogwang 			*speed = IXGBE_LINK_SPEED_10_FULL;
40612bfe3f2eSlogwang 		break;
40622bfe3f2eSlogwang 	default:
40632bfe3f2eSlogwang 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
40642bfe3f2eSlogwang 	}
40652bfe3f2eSlogwang 
40664b05018fSfengbojiang 	if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) {
40674b05018fSfengbojiang 		if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
40684b05018fSfengbojiang 			mac->get_link_status = true;
40694b05018fSfengbojiang 		else
40704b05018fSfengbojiang 			mac->get_link_status = false;
40714b05018fSfengbojiang 
40724b05018fSfengbojiang 		goto out;
40734b05018fSfengbojiang 	}
40744b05018fSfengbojiang 
40752bfe3f2eSlogwang 	/* if the read failed it could just be a mailbox collision, best wait
40762bfe3f2eSlogwang 	 * until we are called again and don't report an error
40772bfe3f2eSlogwang 	 */
40782bfe3f2eSlogwang 	if (mbx->ops.read(hw, &in_msg, 1, 0))
40792bfe3f2eSlogwang 		goto out;
40802bfe3f2eSlogwang 
40812bfe3f2eSlogwang 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
40822bfe3f2eSlogwang 		/* msg is not CTS and is NACK we must have lost CTS status */
40832bfe3f2eSlogwang 		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
4084d30ea906Sjfb8856606 			mac->get_link_status = false;
40852bfe3f2eSlogwang 		goto out;
40862bfe3f2eSlogwang 	}
40872bfe3f2eSlogwang 
40882bfe3f2eSlogwang 	/* the pf is talking, if we timed out in the past we reinit */
40892bfe3f2eSlogwang 	if (!mbx->timeout) {
40902bfe3f2eSlogwang 		ret_val = -1;
40912bfe3f2eSlogwang 		goto out;
40922bfe3f2eSlogwang 	}
40932bfe3f2eSlogwang 
40942bfe3f2eSlogwang 	/* if we passed all the tests above then the link is up and we no
40952bfe3f2eSlogwang 	 * longer need to check for link
40962bfe3f2eSlogwang 	 */
40972bfe3f2eSlogwang 	mac->get_link_status = false;
40982bfe3f2eSlogwang 
40992bfe3f2eSlogwang out:
41002bfe3f2eSlogwang 	*link_up = !mac->get_link_status;
41012bfe3f2eSlogwang 	return ret_val;
41022bfe3f2eSlogwang }
41032bfe3f2eSlogwang 
41040c6bd470Sfengbojiang /*
41050c6bd470Sfengbojiang  * If @timeout_ms was 0, it means that it will not return until link complete.
41060c6bd470Sfengbojiang  * It returns 1 on complete, return 0 on timeout.
41070c6bd470Sfengbojiang  */
41080c6bd470Sfengbojiang static int
ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev * dev,uint32_t timeout_ms)41090c6bd470Sfengbojiang ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms)
41104418919fSjohnjiang {
41110c6bd470Sfengbojiang #define WARNING_TIMEOUT    9000 /* 9s  in total */
41124418919fSjohnjiang 	struct ixgbe_adapter *ad = dev->data->dev_private;
41130c6bd470Sfengbojiang 	uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT;
41144418919fSjohnjiang 
41150c6bd470Sfengbojiang 	while (rte_atomic32_read(&ad->link_thread_running)) {
41160c6bd470Sfengbojiang 		msec_delay(1);
41170c6bd470Sfengbojiang 		timeout--;
41180c6bd470Sfengbojiang 
41190c6bd470Sfengbojiang 		if (timeout_ms) {
41200c6bd470Sfengbojiang 			if (!timeout)
41210c6bd470Sfengbojiang 				return 0;
41220c6bd470Sfengbojiang 		} else if (!timeout) {
41230c6bd470Sfengbojiang 			/* It will not return until link complete */
41240c6bd470Sfengbojiang 			timeout = WARNING_TIMEOUT;
41250c6bd470Sfengbojiang 			PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!");
41264418919fSjohnjiang 		}
41274418919fSjohnjiang 	}
41284418919fSjohnjiang 
41290c6bd470Sfengbojiang 	return 1;
41300c6bd470Sfengbojiang }
41310c6bd470Sfengbojiang 
41324418919fSjohnjiang static void *
ixgbe_dev_setup_link_thread_handler(void * param)41334418919fSjohnjiang ixgbe_dev_setup_link_thread_handler(void *param)
4134d30ea906Sjfb8856606 {
4135d30ea906Sjfb8856606 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
41364418919fSjohnjiang 	struct ixgbe_adapter *ad = dev->data->dev_private;
4137d30ea906Sjfb8856606 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4138d30ea906Sjfb8856606 	struct ixgbe_interrupt *intr =
4139d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4140d30ea906Sjfb8856606 	u32 speed;
4141d30ea906Sjfb8856606 	bool autoneg = false;
4142d30ea906Sjfb8856606 
41430c6bd470Sfengbojiang 	pthread_detach(pthread_self());
4144d30ea906Sjfb8856606 	speed = hw->phy.autoneg_advertised;
4145d30ea906Sjfb8856606 	if (!speed)
4146d30ea906Sjfb8856606 		ixgbe_get_link_capabilities(hw, &speed, &autoneg);
4147d30ea906Sjfb8856606 
4148d30ea906Sjfb8856606 	ixgbe_setup_link(hw, speed, true);
4149d30ea906Sjfb8856606 
4150d30ea906Sjfb8856606 	intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
41514418919fSjohnjiang 	rte_atomic32_clear(&ad->link_thread_running);
41524418919fSjohnjiang 	return NULL;
41534418919fSjohnjiang }
41544418919fSjohnjiang 
41554418919fSjohnjiang /*
41564418919fSjohnjiang  * In freebsd environment, nic_uio drivers do not support interrupts,
41574418919fSjohnjiang  * rte_intr_callback_register() will fail to register interrupts.
41584418919fSjohnjiang  * We can not make link status to change from down to up by interrupt
41594418919fSjohnjiang  * callback. So we need to wait for the controller to acquire link
41604418919fSjohnjiang  * when ports start.
41614418919fSjohnjiang  * It returns 0 on link up.
41624418919fSjohnjiang  */
41634418919fSjohnjiang static int
ixgbe_wait_for_link_up(struct ixgbe_hw * hw)41644418919fSjohnjiang ixgbe_wait_for_link_up(struct ixgbe_hw *hw)
41654418919fSjohnjiang {
41664418919fSjohnjiang #ifdef RTE_EXEC_ENV_FREEBSD
4167*2d9fd380Sjfb8856606 	int err, i;
4168*2d9fd380Sjfb8856606 	bool link_up = false;
41694418919fSjohnjiang 	uint32_t speed = 0;
41704418919fSjohnjiang 	const int nb_iter = 25;
41714418919fSjohnjiang 
41724418919fSjohnjiang 	for (i = 0; i < nb_iter; i++) {
41734418919fSjohnjiang 		err = ixgbe_check_link(hw, &speed, &link_up, 0);
41744418919fSjohnjiang 		if (err)
41754418919fSjohnjiang 			return err;
41764418919fSjohnjiang 		if (link_up)
41774418919fSjohnjiang 			return 0;
41784418919fSjohnjiang 		msec_delay(200);
41794418919fSjohnjiang 	}
41804418919fSjohnjiang 
41814418919fSjohnjiang 	return 0;
41824418919fSjohnjiang #else
41834418919fSjohnjiang 	RTE_SET_USED(hw);
41844418919fSjohnjiang 	return 0;
41854418919fSjohnjiang #endif
4186d30ea906Sjfb8856606 }
4187d30ea906Sjfb8856606 
4188a9643ea8Slogwang /* return 0 means link status changed, -1 means not changed */
4189d30ea906Sjfb8856606 int
ixgbe_dev_link_update_share(struct rte_eth_dev * dev,int wait_to_complete,int vf)41902bfe3f2eSlogwang ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
41912bfe3f2eSlogwang 			    int wait_to_complete, int vf)
4192a9643ea8Slogwang {
4193a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
41944418919fSjohnjiang 	struct ixgbe_adapter *ad = dev->data->dev_private;
4195d30ea906Sjfb8856606 	struct rte_eth_link link;
4196a9643ea8Slogwang 	ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
41972bfe3f2eSlogwang 	struct ixgbe_interrupt *intr =
41982bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4199*2d9fd380Sjfb8856606 	bool link_up;
4200a9643ea8Slogwang 	int diag;
42012bfe3f2eSlogwang 	int wait = 1;
42024418919fSjohnjiang 	u32 esdp_reg;
4203a9643ea8Slogwang 
4204d30ea906Sjfb8856606 	memset(&link, 0, sizeof(link));
4205a9643ea8Slogwang 	link.link_status = ETH_LINK_DOWN;
4206d30ea906Sjfb8856606 	link.link_speed = ETH_SPEED_NUM_NONE;
4207a9643ea8Slogwang 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
42084418919fSjohnjiang 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
42094418919fSjohnjiang 			ETH_LINK_SPEED_FIXED);
4210a9643ea8Slogwang 
4211a9643ea8Slogwang 	hw->mac.get_link_status = true;
4212a9643ea8Slogwang 
4213d30ea906Sjfb8856606 	if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG)
4214d30ea906Sjfb8856606 		return rte_eth_linkstatus_set(dev, &link);
42152bfe3f2eSlogwang 
4216a9643ea8Slogwang 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
4217a9643ea8Slogwang 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
42182bfe3f2eSlogwang 		wait = 0;
42192bfe3f2eSlogwang 
42200c6bd470Sfengbojiang /* BSD has no interrupt mechanism, so force NIC status synchronization. */
42210c6bd470Sfengbojiang #ifdef RTE_EXEC_ENV_FREEBSD
42220c6bd470Sfengbojiang 	wait = 1;
42230c6bd470Sfengbojiang #endif
42240c6bd470Sfengbojiang 
42252bfe3f2eSlogwang 	if (vf)
42262bfe3f2eSlogwang 		diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
4227a9643ea8Slogwang 	else
42282bfe3f2eSlogwang 		diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
4229a9643ea8Slogwang 
4230a9643ea8Slogwang 	if (diag != 0) {
4231a9643ea8Slogwang 		link.link_speed = ETH_SPEED_NUM_100M;
4232a9643ea8Slogwang 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
4233d30ea906Sjfb8856606 		return rte_eth_linkstatus_set(dev, &link);
4234a9643ea8Slogwang 	}
4235a9643ea8Slogwang 
42364418919fSjohnjiang 	if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
42374418919fSjohnjiang 		esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
42384418919fSjohnjiang 		if ((esdp_reg & IXGBE_ESDP_SDP3))
42394418919fSjohnjiang 			link_up = 0;
42404418919fSjohnjiang 	}
42414418919fSjohnjiang 
4242a9643ea8Slogwang 	if (link_up == 0) {
4243d30ea906Sjfb8856606 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
42440c6bd470Sfengbojiang 			ixgbe_dev_wait_setup_link_complete(dev, 0);
42454418919fSjohnjiang 			if (rte_atomic32_test_and_set(&ad->link_thread_running)) {
42460c6bd470Sfengbojiang 				/* To avoid race condition between threads, set
42470c6bd470Sfengbojiang 				 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only
42480c6bd470Sfengbojiang 				 * when there is no link thread running.
42490c6bd470Sfengbojiang 				 */
42500c6bd470Sfengbojiang 				intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
42514418919fSjohnjiang 				if (rte_ctrl_thread_create(&ad->link_thread_tid,
42524418919fSjohnjiang 					"ixgbe-link-handler",
42534418919fSjohnjiang 					NULL,
42544418919fSjohnjiang 					ixgbe_dev_setup_link_thread_handler,
42554418919fSjohnjiang 					dev) < 0) {
42564418919fSjohnjiang 					PMD_DRV_LOG(ERR,
42574418919fSjohnjiang 						"Create link thread failed!");
42584418919fSjohnjiang 					rte_atomic32_clear(&ad->link_thread_running);
42594418919fSjohnjiang 				}
42604418919fSjohnjiang 			} else {
42614418919fSjohnjiang 				PMD_DRV_LOG(ERR,
42624418919fSjohnjiang 					"Other link thread is running now!");
42634418919fSjohnjiang 			}
4264a9643ea8Slogwang 		}
4265d30ea906Sjfb8856606 		return rte_eth_linkstatus_set(dev, &link);
4266d30ea906Sjfb8856606 	}
4267d30ea906Sjfb8856606 
4268a9643ea8Slogwang 	link.link_status = ETH_LINK_UP;
4269a9643ea8Slogwang 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
4270a9643ea8Slogwang 
4271a9643ea8Slogwang 	switch (link_speed) {
4272a9643ea8Slogwang 	default:
4273a9643ea8Slogwang 	case IXGBE_LINK_SPEED_UNKNOWN:
4274*2d9fd380Sjfb8856606 		link.link_speed = ETH_SPEED_NUM_UNKNOWN;
4275a9643ea8Slogwang 		break;
4276a9643ea8Slogwang 
42770c6bd470Sfengbojiang 	case IXGBE_LINK_SPEED_10_FULL:
42780c6bd470Sfengbojiang 		link.link_speed = ETH_SPEED_NUM_10M;
42790c6bd470Sfengbojiang 		break;
42800c6bd470Sfengbojiang 
4281a9643ea8Slogwang 	case IXGBE_LINK_SPEED_100_FULL:
4282a9643ea8Slogwang 		link.link_speed = ETH_SPEED_NUM_100M;
4283a9643ea8Slogwang 		break;
4284a9643ea8Slogwang 
4285a9643ea8Slogwang 	case IXGBE_LINK_SPEED_1GB_FULL:
4286a9643ea8Slogwang 		link.link_speed = ETH_SPEED_NUM_1G;
4287a9643ea8Slogwang 		break;
4288a9643ea8Slogwang 
42892bfe3f2eSlogwang 	case IXGBE_LINK_SPEED_2_5GB_FULL:
42902bfe3f2eSlogwang 		link.link_speed = ETH_SPEED_NUM_2_5G;
42912bfe3f2eSlogwang 		break;
42922bfe3f2eSlogwang 
42932bfe3f2eSlogwang 	case IXGBE_LINK_SPEED_5GB_FULL:
42942bfe3f2eSlogwang 		link.link_speed = ETH_SPEED_NUM_5G;
42952bfe3f2eSlogwang 		break;
42962bfe3f2eSlogwang 
4297a9643ea8Slogwang 	case IXGBE_LINK_SPEED_10GB_FULL:
4298a9643ea8Slogwang 		link.link_speed = ETH_SPEED_NUM_10G;
4299a9643ea8Slogwang 		break;
4300a9643ea8Slogwang 	}
4301a9643ea8Slogwang 
4302d30ea906Sjfb8856606 	return rte_eth_linkstatus_set(dev, &link);
4303a9643ea8Slogwang }
4304a9643ea8Slogwang 
43052bfe3f2eSlogwang static int
ixgbe_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)43062bfe3f2eSlogwang ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
43072bfe3f2eSlogwang {
43082bfe3f2eSlogwang 	return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
43092bfe3f2eSlogwang }
43102bfe3f2eSlogwang 
43112bfe3f2eSlogwang static int
ixgbevf_dev_link_update(struct rte_eth_dev * dev,int wait_to_complete)43122bfe3f2eSlogwang ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
43132bfe3f2eSlogwang {
43142bfe3f2eSlogwang 	return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
43152bfe3f2eSlogwang }
43162bfe3f2eSlogwang 
43174418919fSjohnjiang static int
ixgbe_dev_promiscuous_enable(struct rte_eth_dev * dev)4318a9643ea8Slogwang ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
4319a9643ea8Slogwang {
4320a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4321a9643ea8Slogwang 	uint32_t fctrl;
4322a9643ea8Slogwang 
4323a9643ea8Slogwang 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4324a9643ea8Slogwang 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4325a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
43264418919fSjohnjiang 
43274418919fSjohnjiang 	return 0;
4328a9643ea8Slogwang }
4329a9643ea8Slogwang 
43304418919fSjohnjiang static int
ixgbe_dev_promiscuous_disable(struct rte_eth_dev * dev)4331a9643ea8Slogwang ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
4332a9643ea8Slogwang {
4333a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4334a9643ea8Slogwang 	uint32_t fctrl;
4335a9643ea8Slogwang 
4336a9643ea8Slogwang 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4337a9643ea8Slogwang 	fctrl &= (~IXGBE_FCTRL_UPE);
4338a9643ea8Slogwang 	if (dev->data->all_multicast == 1)
4339a9643ea8Slogwang 		fctrl |= IXGBE_FCTRL_MPE;
4340a9643ea8Slogwang 	else
4341a9643ea8Slogwang 		fctrl &= (~IXGBE_FCTRL_MPE);
4342a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
43434418919fSjohnjiang 
43444418919fSjohnjiang 	return 0;
4345a9643ea8Slogwang }
4346a9643ea8Slogwang 
43474418919fSjohnjiang static int
ixgbe_dev_allmulticast_enable(struct rte_eth_dev * dev)4348a9643ea8Slogwang ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
4349a9643ea8Slogwang {
4350a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4351a9643ea8Slogwang 	uint32_t fctrl;
4352a9643ea8Slogwang 
4353a9643ea8Slogwang 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4354a9643ea8Slogwang 	fctrl |= IXGBE_FCTRL_MPE;
4355a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
43564418919fSjohnjiang 
43574418919fSjohnjiang 	return 0;
4358a9643ea8Slogwang }
4359a9643ea8Slogwang 
43604418919fSjohnjiang static int
ixgbe_dev_allmulticast_disable(struct rte_eth_dev * dev)4361a9643ea8Slogwang ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
4362a9643ea8Slogwang {
4363a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4364a9643ea8Slogwang 	uint32_t fctrl;
4365a9643ea8Slogwang 
4366a9643ea8Slogwang 	if (dev->data->promiscuous == 1)
43674418919fSjohnjiang 		return 0; /* must remain in all_multicast mode */
4368a9643ea8Slogwang 
4369a9643ea8Slogwang 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4370a9643ea8Slogwang 	fctrl &= (~IXGBE_FCTRL_MPE);
4371a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
43724418919fSjohnjiang 
43734418919fSjohnjiang 	return 0;
4374a9643ea8Slogwang }
4375a9643ea8Slogwang 
4376a9643ea8Slogwang /**
4377a9643ea8Slogwang  * It clears the interrupt causes and enables the interrupt.
4378a9643ea8Slogwang  * It will be called once only during nic initialized.
4379a9643ea8Slogwang  *
4380a9643ea8Slogwang  * @param dev
4381a9643ea8Slogwang  *  Pointer to struct rte_eth_dev.
43822bfe3f2eSlogwang  * @param on
43832bfe3f2eSlogwang  *  Enable or Disable.
4384a9643ea8Slogwang  *
4385a9643ea8Slogwang  * @return
4386a9643ea8Slogwang  *  - On success, zero.
4387a9643ea8Slogwang  *  - On failure, a negative value.
4388a9643ea8Slogwang  */
4389a9643ea8Slogwang static int
ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on)43902bfe3f2eSlogwang ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
4391a9643ea8Slogwang {
4392a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
4393a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4394a9643ea8Slogwang 
4395a9643ea8Slogwang 	ixgbe_dev_link_status_print(dev);
43962bfe3f2eSlogwang 	if (on)
4397a9643ea8Slogwang 		intr->mask |= IXGBE_EICR_LSC;
43982bfe3f2eSlogwang 	else
43992bfe3f2eSlogwang 		intr->mask &= ~IXGBE_EICR_LSC;
4400a9643ea8Slogwang 
4401a9643ea8Slogwang 	return 0;
4402a9643ea8Slogwang }
4403a9643ea8Slogwang 
4404a9643ea8Slogwang /**
4405a9643ea8Slogwang  * It clears the interrupt causes and enables the interrupt.
4406a9643ea8Slogwang  * It will be called once only during nic initialized.
4407a9643ea8Slogwang  *
4408a9643ea8Slogwang  * @param dev
4409a9643ea8Slogwang  *  Pointer to struct rte_eth_dev.
4410a9643ea8Slogwang  *
4411a9643ea8Slogwang  * @return
4412a9643ea8Slogwang  *  - On success, zero.
4413a9643ea8Slogwang  *  - On failure, a negative value.
4414a9643ea8Slogwang  */
4415a9643ea8Slogwang static int
ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev * dev)4416a9643ea8Slogwang ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
4417a9643ea8Slogwang {
4418a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
4419a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4420a9643ea8Slogwang 
4421a9643ea8Slogwang 	intr->mask |= IXGBE_EICR_RTX_QUEUE;
4422a9643ea8Slogwang 
4423a9643ea8Slogwang 	return 0;
4424a9643ea8Slogwang }
4425a9643ea8Slogwang 
44262bfe3f2eSlogwang /**
44272bfe3f2eSlogwang  * It clears the interrupt causes and enables the interrupt.
44282bfe3f2eSlogwang  * It will be called once only during nic initialized.
44292bfe3f2eSlogwang  *
44302bfe3f2eSlogwang  * @param dev
44312bfe3f2eSlogwang  *  Pointer to struct rte_eth_dev.
44322bfe3f2eSlogwang  *
44332bfe3f2eSlogwang  * @return
44342bfe3f2eSlogwang  *  - On success, zero.
44352bfe3f2eSlogwang  *  - On failure, a negative value.
44362bfe3f2eSlogwang  */
44372bfe3f2eSlogwang static int
ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev * dev)44382bfe3f2eSlogwang ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
44392bfe3f2eSlogwang {
44402bfe3f2eSlogwang 	struct ixgbe_interrupt *intr =
44412bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
44422bfe3f2eSlogwang 
44432bfe3f2eSlogwang 	intr->mask |= IXGBE_EICR_LINKSEC;
44442bfe3f2eSlogwang 
44452bfe3f2eSlogwang 	return 0;
44462bfe3f2eSlogwang }
44472bfe3f2eSlogwang 
4448a9643ea8Slogwang /*
4449a9643ea8Slogwang  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4450a9643ea8Slogwang  *
4451a9643ea8Slogwang  * @param dev
4452a9643ea8Slogwang  *  Pointer to struct rte_eth_dev.
4453a9643ea8Slogwang  *
4454a9643ea8Slogwang  * @return
4455a9643ea8Slogwang  *  - On success, zero.
4456a9643ea8Slogwang  *  - On failure, a negative value.
4457a9643ea8Slogwang  */
4458a9643ea8Slogwang static int
ixgbe_dev_interrupt_get_status(struct rte_eth_dev * dev)4459a9643ea8Slogwang ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
4460a9643ea8Slogwang {
4461a9643ea8Slogwang 	uint32_t eicr;
4462a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4463a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
4464a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4465a9643ea8Slogwang 
4466a9643ea8Slogwang 	/* clear all cause mask */
4467a9643ea8Slogwang 	ixgbe_disable_intr(hw);
4468a9643ea8Slogwang 
4469a9643ea8Slogwang 	/* read-on-clear nic registers here */
4470a9643ea8Slogwang 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4471a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
4472a9643ea8Slogwang 
4473a9643ea8Slogwang 	intr->flags = 0;
4474a9643ea8Slogwang 
4475a9643ea8Slogwang 	/* set flag for async link update */
4476a9643ea8Slogwang 	if (eicr & IXGBE_EICR_LSC)
4477a9643ea8Slogwang 		intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4478a9643ea8Slogwang 
4479a9643ea8Slogwang 	if (eicr & IXGBE_EICR_MAILBOX)
4480a9643ea8Slogwang 		intr->flags |= IXGBE_FLAG_MAILBOX;
4481a9643ea8Slogwang 
44822bfe3f2eSlogwang 	if (eicr & IXGBE_EICR_LINKSEC)
44832bfe3f2eSlogwang 		intr->flags |= IXGBE_FLAG_MACSEC;
44842bfe3f2eSlogwang 
4485a9643ea8Slogwang 	if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
4486a9643ea8Slogwang 	    hw->phy.type == ixgbe_phy_x550em_ext_t &&
4487a9643ea8Slogwang 	    (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
4488a9643ea8Slogwang 		intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
4489a9643ea8Slogwang 
4490a9643ea8Slogwang 	return 0;
4491a9643ea8Slogwang }
4492a9643ea8Slogwang 
4493a9643ea8Slogwang /**
4494a9643ea8Slogwang  * It gets and then prints the link status.
4495a9643ea8Slogwang  *
4496a9643ea8Slogwang  * @param dev
4497a9643ea8Slogwang  *  Pointer to struct rte_eth_dev.
4498a9643ea8Slogwang  *
4499a9643ea8Slogwang  * @return
4500a9643ea8Slogwang  *  - On success, zero.
4501a9643ea8Slogwang  *  - On failure, a negative value.
4502a9643ea8Slogwang  */
4503a9643ea8Slogwang static void
ixgbe_dev_link_status_print(struct rte_eth_dev * dev)4504a9643ea8Slogwang ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
4505a9643ea8Slogwang {
45062bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4507a9643ea8Slogwang 	struct rte_eth_link link;
4508a9643ea8Slogwang 
4509d30ea906Sjfb8856606 	rte_eth_linkstatus_get(dev, &link);
4510d30ea906Sjfb8856606 
4511a9643ea8Slogwang 	if (link.link_status) {
4512a9643ea8Slogwang 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
4513a9643ea8Slogwang 					(int)(dev->data->port_id),
4514a9643ea8Slogwang 					(unsigned)link.link_speed,
4515a9643ea8Slogwang 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
4516a9643ea8Slogwang 					"full-duplex" : "half-duplex");
4517a9643ea8Slogwang 	} else {
4518a9643ea8Slogwang 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
4519a9643ea8Slogwang 				(int)(dev->data->port_id));
4520a9643ea8Slogwang 	}
45212bfe3f2eSlogwang 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
45222bfe3f2eSlogwang 				pci_dev->addr.domain,
45232bfe3f2eSlogwang 				pci_dev->addr.bus,
45242bfe3f2eSlogwang 				pci_dev->addr.devid,
45252bfe3f2eSlogwang 				pci_dev->addr.function);
4526a9643ea8Slogwang }
4527a9643ea8Slogwang 
4528a9643ea8Slogwang /*
4529a9643ea8Slogwang  * It executes link_update after knowing an interrupt occurred.
4530a9643ea8Slogwang  *
4531a9643ea8Slogwang  * @param dev
4532a9643ea8Slogwang  *  Pointer to struct rte_eth_dev.
4533a9643ea8Slogwang  *
4534a9643ea8Slogwang  * @return
4535a9643ea8Slogwang  *  - On success, zero.
4536a9643ea8Slogwang  *  - On failure, a negative value.
4537a9643ea8Slogwang  */
4538a9643ea8Slogwang static int
ixgbe_dev_interrupt_action(struct rte_eth_dev * dev)4539d30ea906Sjfb8856606 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
4540a9643ea8Slogwang {
4541a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
4542a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4543a9643ea8Slogwang 	int64_t timeout;
4544a9643ea8Slogwang 	struct ixgbe_hw *hw =
4545a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4546a9643ea8Slogwang 
4547a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
4548a9643ea8Slogwang 
4549a9643ea8Slogwang 	if (intr->flags & IXGBE_FLAG_MAILBOX) {
4550a9643ea8Slogwang 		ixgbe_pf_mbx_process(dev);
4551a9643ea8Slogwang 		intr->flags &= ~IXGBE_FLAG_MAILBOX;
4552a9643ea8Slogwang 	}
4553a9643ea8Slogwang 
4554a9643ea8Slogwang 	if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4555a9643ea8Slogwang 		ixgbe_handle_lasi(hw);
4556a9643ea8Slogwang 		intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4557a9643ea8Slogwang 	}
4558a9643ea8Slogwang 
4559a9643ea8Slogwang 	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4560d30ea906Sjfb8856606 		struct rte_eth_link link;
4561d30ea906Sjfb8856606 
4562a9643ea8Slogwang 		/* get the link status before link update, for predicting later */
4563d30ea906Sjfb8856606 		rte_eth_linkstatus_get(dev, &link);
4564a9643ea8Slogwang 
4565a9643ea8Slogwang 		ixgbe_dev_link_update(dev, 0);
4566a9643ea8Slogwang 
4567a9643ea8Slogwang 		/* likely to up */
4568a9643ea8Slogwang 		if (!link.link_status)
4569a9643ea8Slogwang 			/* handle it 1 sec later, wait it being stable */
4570a9643ea8Slogwang 			timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
4571a9643ea8Slogwang 		/* likely to down */
4572a9643ea8Slogwang 		else
4573a9643ea8Slogwang 			/* handle it 4 sec later, wait it being stable */
4574a9643ea8Slogwang 			timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
4575a9643ea8Slogwang 
4576a9643ea8Slogwang 		ixgbe_dev_link_status_print(dev);
4577a9643ea8Slogwang 		if (rte_eal_alarm_set(timeout * 1000,
4578a9643ea8Slogwang 				      ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
4579a9643ea8Slogwang 			PMD_DRV_LOG(ERR, "Error setting alarm");
45802bfe3f2eSlogwang 		else {
45812bfe3f2eSlogwang 			/* remember original mask */
45822bfe3f2eSlogwang 			intr->mask_original = intr->mask;
45832bfe3f2eSlogwang 			/* only disable lsc interrupt */
45842bfe3f2eSlogwang 			intr->mask &= ~IXGBE_EIMS_LSC;
45852bfe3f2eSlogwang 		}
4586a9643ea8Slogwang 	}
4587a9643ea8Slogwang 
45882bfe3f2eSlogwang 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
45892bfe3f2eSlogwang 	ixgbe_enable_intr(dev);
4590a9643ea8Slogwang 
4591a9643ea8Slogwang 	return 0;
4592a9643ea8Slogwang }
4593a9643ea8Slogwang 
4594a9643ea8Slogwang /**
4595a9643ea8Slogwang  * Interrupt handler which shall be registered for alarm callback for delayed
4596a9643ea8Slogwang  * handling specific interrupt to wait for the stable nic state. As the
4597a9643ea8Slogwang  * NIC interrupt state is not stable for ixgbe after link is just down,
4598a9643ea8Slogwang  * it needs to wait 4 seconds to get the stable status.
4599a9643ea8Slogwang  *
4600a9643ea8Slogwang  * @param handle
4601a9643ea8Slogwang  *  Pointer to interrupt handle.
4602a9643ea8Slogwang  * @param param
4603a9643ea8Slogwang  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4604a9643ea8Slogwang  *
4605a9643ea8Slogwang  * @return
4606a9643ea8Slogwang  *  void
4607a9643ea8Slogwang  */
4608a9643ea8Slogwang static void
ixgbe_dev_interrupt_delayed_handler(void * param)4609a9643ea8Slogwang ixgbe_dev_interrupt_delayed_handler(void *param)
4610a9643ea8Slogwang {
4611a9643ea8Slogwang 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
46122bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
46132bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
4614a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
4615a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
4616a9643ea8Slogwang 	struct ixgbe_hw *hw =
4617a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4618a9643ea8Slogwang 	uint32_t eicr;
4619a9643ea8Slogwang 
46202bfe3f2eSlogwang 	ixgbe_disable_intr(hw);
46212bfe3f2eSlogwang 
4622a9643ea8Slogwang 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4623a9643ea8Slogwang 	if (eicr & IXGBE_EICR_MAILBOX)
4624a9643ea8Slogwang 		ixgbe_pf_mbx_process(dev);
4625a9643ea8Slogwang 
4626a9643ea8Slogwang 	if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
4627a9643ea8Slogwang 		ixgbe_handle_lasi(hw);
4628a9643ea8Slogwang 		intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
4629a9643ea8Slogwang 	}
4630a9643ea8Slogwang 
4631a9643ea8Slogwang 	if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4632a9643ea8Slogwang 		ixgbe_dev_link_update(dev, 0);
4633a9643ea8Slogwang 		intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4634a9643ea8Slogwang 		ixgbe_dev_link_status_print(dev);
4635*2d9fd380Sjfb8856606 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
4636a9643ea8Slogwang 	}
4637a9643ea8Slogwang 
46382bfe3f2eSlogwang 	if (intr->flags & IXGBE_FLAG_MACSEC) {
4639*2d9fd380Sjfb8856606 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL);
46402bfe3f2eSlogwang 		intr->flags &= ~IXGBE_FLAG_MACSEC;
46412bfe3f2eSlogwang 	}
46422bfe3f2eSlogwang 
46432bfe3f2eSlogwang 	/* restore original mask */
46442bfe3f2eSlogwang 	intr->mask = intr->mask_original;
46452bfe3f2eSlogwang 	intr->mask_original = 0;
46462bfe3f2eSlogwang 
4647a9643ea8Slogwang 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
4648a9643ea8Slogwang 	ixgbe_enable_intr(dev);
46494418919fSjohnjiang 	rte_intr_ack(intr_handle);
4650a9643ea8Slogwang }
4651a9643ea8Slogwang 
4652a9643ea8Slogwang /**
4653a9643ea8Slogwang  * Interrupt handler triggered by NIC  for handling
4654a9643ea8Slogwang  * specific interrupt.
4655a9643ea8Slogwang  *
4656a9643ea8Slogwang  * @param handle
4657a9643ea8Slogwang  *  Pointer to interrupt handle.
4658a9643ea8Slogwang  * @param param
4659a9643ea8Slogwang  *  The address of parameter (struct rte_eth_dev *) regsitered before.
4660a9643ea8Slogwang  *
4661a9643ea8Slogwang  * @return
4662a9643ea8Slogwang  *  void
4663a9643ea8Slogwang  */
4664a9643ea8Slogwang static void
ixgbe_dev_interrupt_handler(void * param)46652bfe3f2eSlogwang ixgbe_dev_interrupt_handler(void *param)
4666a9643ea8Slogwang {
4667a9643ea8Slogwang 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4668a9643ea8Slogwang 
4669a9643ea8Slogwang 	ixgbe_dev_interrupt_get_status(dev);
4670d30ea906Sjfb8856606 	ixgbe_dev_interrupt_action(dev);
4671a9643ea8Slogwang }
4672a9643ea8Slogwang 
4673a9643ea8Slogwang static int
ixgbe_dev_led_on(struct rte_eth_dev * dev)4674a9643ea8Slogwang ixgbe_dev_led_on(struct rte_eth_dev *dev)
4675a9643ea8Slogwang {
4676a9643ea8Slogwang 	struct ixgbe_hw *hw;
4677a9643ea8Slogwang 
4678a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4679a9643ea8Slogwang 	return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4680a9643ea8Slogwang }
4681a9643ea8Slogwang 
4682a9643ea8Slogwang static int
ixgbe_dev_led_off(struct rte_eth_dev * dev)4683a9643ea8Slogwang ixgbe_dev_led_off(struct rte_eth_dev *dev)
4684a9643ea8Slogwang {
4685a9643ea8Slogwang 	struct ixgbe_hw *hw;
4686a9643ea8Slogwang 
4687a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4688a9643ea8Slogwang 	return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
4689a9643ea8Slogwang }
4690a9643ea8Slogwang 
4691a9643ea8Slogwang static int
ixgbe_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)4692a9643ea8Slogwang ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4693a9643ea8Slogwang {
4694a9643ea8Slogwang 	struct ixgbe_hw *hw;
4695a9643ea8Slogwang 	uint32_t mflcn_reg;
4696a9643ea8Slogwang 	uint32_t fccfg_reg;
4697a9643ea8Slogwang 	int rx_pause;
4698a9643ea8Slogwang 	int tx_pause;
4699a9643ea8Slogwang 
4700a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4701a9643ea8Slogwang 
4702a9643ea8Slogwang 	fc_conf->pause_time = hw->fc.pause_time;
4703a9643ea8Slogwang 	fc_conf->high_water = hw->fc.high_water[0];
4704a9643ea8Slogwang 	fc_conf->low_water = hw->fc.low_water[0];
4705a9643ea8Slogwang 	fc_conf->send_xon = hw->fc.send_xon;
4706a9643ea8Slogwang 	fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
4707a9643ea8Slogwang 
4708a9643ea8Slogwang 	/*
4709a9643ea8Slogwang 	 * Return rx_pause status according to actual setting of
4710a9643ea8Slogwang 	 * MFLCN register.
4711a9643ea8Slogwang 	 */
4712a9643ea8Slogwang 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
47130c6bd470Sfengbojiang 	if (mflcn_reg & IXGBE_MFLCN_PMCF)
47140c6bd470Sfengbojiang 		fc_conf->mac_ctrl_frame_fwd = 1;
47150c6bd470Sfengbojiang 	else
47160c6bd470Sfengbojiang 		fc_conf->mac_ctrl_frame_fwd = 0;
47170c6bd470Sfengbojiang 
4718a9643ea8Slogwang 	if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
4719a9643ea8Slogwang 		rx_pause = 1;
4720a9643ea8Slogwang 	else
4721a9643ea8Slogwang 		rx_pause = 0;
4722a9643ea8Slogwang 
4723a9643ea8Slogwang 	/*
4724a9643ea8Slogwang 	 * Return tx_pause status according to actual setting of
4725a9643ea8Slogwang 	 * FCCFG register.
4726a9643ea8Slogwang 	 */
4727a9643ea8Slogwang 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4728a9643ea8Slogwang 	if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
4729a9643ea8Slogwang 		tx_pause = 1;
4730a9643ea8Slogwang 	else
4731a9643ea8Slogwang 		tx_pause = 0;
4732a9643ea8Slogwang 
4733a9643ea8Slogwang 	if (rx_pause && tx_pause)
4734a9643ea8Slogwang 		fc_conf->mode = RTE_FC_FULL;
4735a9643ea8Slogwang 	else if (rx_pause)
4736a9643ea8Slogwang 		fc_conf->mode = RTE_FC_RX_PAUSE;
4737a9643ea8Slogwang 	else if (tx_pause)
4738a9643ea8Slogwang 		fc_conf->mode = RTE_FC_TX_PAUSE;
4739a9643ea8Slogwang 	else
4740a9643ea8Slogwang 		fc_conf->mode = RTE_FC_NONE;
4741a9643ea8Slogwang 
4742a9643ea8Slogwang 	return 0;
4743a9643ea8Slogwang }
4744a9643ea8Slogwang 
4745a9643ea8Slogwang static int
ixgbe_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)4746a9643ea8Slogwang ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
4747a9643ea8Slogwang {
4748a9643ea8Slogwang 	struct ixgbe_hw *hw;
47494418919fSjohnjiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
4750a9643ea8Slogwang 	int err;
4751a9643ea8Slogwang 	uint32_t rx_buf_size;
4752a9643ea8Slogwang 	uint32_t max_high_water;
4753a9643ea8Slogwang 	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4754a9643ea8Slogwang 		ixgbe_fc_none,
4755a9643ea8Slogwang 		ixgbe_fc_rx_pause,
4756a9643ea8Slogwang 		ixgbe_fc_tx_pause,
4757a9643ea8Slogwang 		ixgbe_fc_full
4758a9643ea8Slogwang 	};
4759a9643ea8Slogwang 
4760a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
4761a9643ea8Slogwang 
4762a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4763a9643ea8Slogwang 	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
4764a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4765a9643ea8Slogwang 
4766a9643ea8Slogwang 	/*
4767a9643ea8Slogwang 	 * At least reserve one Ethernet frame for watermark
4768a9643ea8Slogwang 	 * high_water/low_water in kilo bytes for ixgbe
4769a9643ea8Slogwang 	 */
47704418919fSjohnjiang 	max_high_water = (rx_buf_size -
47714418919fSjohnjiang 			RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4772a9643ea8Slogwang 	if ((fc_conf->high_water > max_high_water) ||
4773a9643ea8Slogwang 		(fc_conf->high_water < fc_conf->low_water)) {
4774a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4775a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4776a9643ea8Slogwang 		return -EINVAL;
4777a9643ea8Slogwang 	}
4778a9643ea8Slogwang 
4779a9643ea8Slogwang 	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
4780a9643ea8Slogwang 	hw->fc.pause_time     = fc_conf->pause_time;
4781a9643ea8Slogwang 	hw->fc.high_water[0]  = fc_conf->high_water;
4782a9643ea8Slogwang 	hw->fc.low_water[0]   = fc_conf->low_water;
4783a9643ea8Slogwang 	hw->fc.send_xon       = fc_conf->send_xon;
4784a9643ea8Slogwang 	hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
47854418919fSjohnjiang 	adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
4786a9643ea8Slogwang 
47874418919fSjohnjiang 	err = ixgbe_flow_ctrl_enable(dev, hw);
47884418919fSjohnjiang 	if (err < 0) {
47894418919fSjohnjiang 		PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err);
4790a9643ea8Slogwang 		return -EIO;
4791a9643ea8Slogwang 	}
47924418919fSjohnjiang 	return err;
47934418919fSjohnjiang }
4794a9643ea8Slogwang 
4795a9643ea8Slogwang /**
4796a9643ea8Slogwang  *  ixgbe_pfc_enable_generic - Enable flow control
4797a9643ea8Slogwang  *  @hw: pointer to hardware structure
4798a9643ea8Slogwang  *  @tc_num: traffic class number
4799a9643ea8Slogwang  *  Enable flow control according to the current settings.
4800a9643ea8Slogwang  */
4801a9643ea8Slogwang static int
ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw * hw,uint8_t tc_num)4802a9643ea8Slogwang ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
4803a9643ea8Slogwang {
4804a9643ea8Slogwang 	int ret_val = 0;
4805a9643ea8Slogwang 	uint32_t mflcn_reg, fccfg_reg;
4806a9643ea8Slogwang 	uint32_t reg;
4807a9643ea8Slogwang 	uint32_t fcrtl, fcrth;
4808a9643ea8Slogwang 	uint8_t i;
4809a9643ea8Slogwang 	uint8_t nb_rx_en;
4810a9643ea8Slogwang 
4811a9643ea8Slogwang 	/* Validate the water mark configuration */
4812a9643ea8Slogwang 	if (!hw->fc.pause_time) {
4813a9643ea8Slogwang 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4814a9643ea8Slogwang 		goto out;
4815a9643ea8Slogwang 	}
4816a9643ea8Slogwang 
4817a9643ea8Slogwang 	/* Low water mark of zero causes XOFF floods */
4818a9643ea8Slogwang 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
4819a9643ea8Slogwang 		 /* High/Low water can not be 0 */
4820a9643ea8Slogwang 		if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
4821a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4822a9643ea8Slogwang 			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4823a9643ea8Slogwang 			goto out;
4824a9643ea8Slogwang 		}
4825a9643ea8Slogwang 
4826a9643ea8Slogwang 		if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
4827a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Invalid water mark configuration");
4828a9643ea8Slogwang 			ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
4829a9643ea8Slogwang 			goto out;
4830a9643ea8Slogwang 		}
4831a9643ea8Slogwang 	}
4832a9643ea8Slogwang 	/* Negotiate the fc mode to use */
4833a9643ea8Slogwang 	ixgbe_fc_autoneg(hw);
4834a9643ea8Slogwang 
4835a9643ea8Slogwang 	/* Disable any previous flow control settings */
4836a9643ea8Slogwang 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4837a9643ea8Slogwang 	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
4838a9643ea8Slogwang 
4839a9643ea8Slogwang 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4840a9643ea8Slogwang 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
4841a9643ea8Slogwang 
4842a9643ea8Slogwang 	switch (hw->fc.current_mode) {
4843a9643ea8Slogwang 	case ixgbe_fc_none:
4844a9643ea8Slogwang 		/*
4845a9643ea8Slogwang 		 * If the count of enabled RX Priority Flow control >1,
4846a9643ea8Slogwang 		 * and the TX pause can not be disabled
4847a9643ea8Slogwang 		 */
4848a9643ea8Slogwang 		nb_rx_en = 0;
4849a9643ea8Slogwang 		for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4850a9643ea8Slogwang 			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4851a9643ea8Slogwang 			if (reg & IXGBE_FCRTH_FCEN)
4852a9643ea8Slogwang 				nb_rx_en++;
4853a9643ea8Slogwang 		}
4854a9643ea8Slogwang 		if (nb_rx_en > 1)
4855a9643ea8Slogwang 			fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4856a9643ea8Slogwang 		break;
4857a9643ea8Slogwang 	case ixgbe_fc_rx_pause:
4858a9643ea8Slogwang 		/*
4859a9643ea8Slogwang 		 * Rx Flow control is enabled and Tx Flow control is
4860a9643ea8Slogwang 		 * disabled by software override. Since there really
4861a9643ea8Slogwang 		 * isn't a way to advertise that we are capable of RX
4862a9643ea8Slogwang 		 * Pause ONLY, we will advertise that we support both
4863a9643ea8Slogwang 		 * symmetric and asymmetric Rx PAUSE.  Later, we will
4864a9643ea8Slogwang 		 * disable the adapter's ability to send PAUSE frames.
4865a9643ea8Slogwang 		 */
4866a9643ea8Slogwang 		mflcn_reg |= IXGBE_MFLCN_RPFCE;
4867a9643ea8Slogwang 		/*
4868a9643ea8Slogwang 		 * If the count of enabled RX Priority Flow control >1,
4869a9643ea8Slogwang 		 * and the TX pause can not be disabled
4870a9643ea8Slogwang 		 */
4871a9643ea8Slogwang 		nb_rx_en = 0;
4872a9643ea8Slogwang 		for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4873a9643ea8Slogwang 			reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
4874a9643ea8Slogwang 			if (reg & IXGBE_FCRTH_FCEN)
4875a9643ea8Slogwang 				nb_rx_en++;
4876a9643ea8Slogwang 		}
4877a9643ea8Slogwang 		if (nb_rx_en > 1)
4878a9643ea8Slogwang 			fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4879a9643ea8Slogwang 		break;
4880a9643ea8Slogwang 	case ixgbe_fc_tx_pause:
4881a9643ea8Slogwang 		/*
4882a9643ea8Slogwang 		 * Tx Flow control is enabled, and Rx Flow control is
4883a9643ea8Slogwang 		 * disabled by software override.
4884a9643ea8Slogwang 		 */
4885a9643ea8Slogwang 		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4886a9643ea8Slogwang 		break;
4887a9643ea8Slogwang 	case ixgbe_fc_full:
4888a9643ea8Slogwang 		/* Flow control (both Rx and Tx) is enabled by SW override. */
4889a9643ea8Slogwang 		mflcn_reg |= IXGBE_MFLCN_RPFCE;
4890a9643ea8Slogwang 		fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
4891a9643ea8Slogwang 		break;
4892a9643ea8Slogwang 	default:
4893a9643ea8Slogwang 		PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
4894a9643ea8Slogwang 		ret_val = IXGBE_ERR_CONFIG;
4895a9643ea8Slogwang 		goto out;
4896a9643ea8Slogwang 	}
4897a9643ea8Slogwang 
4898a9643ea8Slogwang 	/* Set 802.3x based flow control settings. */
4899a9643ea8Slogwang 	mflcn_reg |= IXGBE_MFLCN_DPF;
4900a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
4901a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
4902a9643ea8Slogwang 
4903a9643ea8Slogwang 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
4904a9643ea8Slogwang 	if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
4905a9643ea8Slogwang 		hw->fc.high_water[tc_num]) {
4906a9643ea8Slogwang 		fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
4907a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
4908a9643ea8Slogwang 		fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
4909a9643ea8Slogwang 	} else {
4910a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
4911a9643ea8Slogwang 		/*
4912a9643ea8Slogwang 		 * In order to prevent Tx hangs when the internal Tx
4913a9643ea8Slogwang 		 * switch is enabled we must set the high water mark
4914a9643ea8Slogwang 		 * to the maximum FCRTH value.  This allows the Tx
4915a9643ea8Slogwang 		 * switch to function even under heavy Rx workloads.
4916a9643ea8Slogwang 		 */
4917a9643ea8Slogwang 		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
4918a9643ea8Slogwang 	}
4919a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
4920a9643ea8Slogwang 
4921a9643ea8Slogwang 	/* Configure pause time (2 TCs per register) */
4922a9643ea8Slogwang 	reg = hw->fc.pause_time * 0x00010001;
4923a9643ea8Slogwang 	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
4924a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
4925a9643ea8Slogwang 
4926a9643ea8Slogwang 	/* Configure flow control refresh threshold value */
4927a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
4928a9643ea8Slogwang 
4929a9643ea8Slogwang out:
4930a9643ea8Slogwang 	return ret_val;
4931a9643ea8Slogwang }
4932a9643ea8Slogwang 
4933a9643ea8Slogwang static int
ixgbe_dcb_pfc_enable(struct rte_eth_dev * dev,uint8_t tc_num)4934a9643ea8Slogwang ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
4935a9643ea8Slogwang {
4936a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4937a9643ea8Slogwang 	int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
4938a9643ea8Slogwang 
4939a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_82598EB) {
4940a9643ea8Slogwang 		ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
4941a9643ea8Slogwang 	}
4942a9643ea8Slogwang 	return ret_val;
4943a9643ea8Slogwang }
4944a9643ea8Slogwang 
4945a9643ea8Slogwang static int
ixgbe_priority_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_pfc_conf * pfc_conf)4946a9643ea8Slogwang ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
4947a9643ea8Slogwang {
4948a9643ea8Slogwang 	int err;
4949a9643ea8Slogwang 	uint32_t rx_buf_size;
4950a9643ea8Slogwang 	uint32_t max_high_water;
4951a9643ea8Slogwang 	uint8_t tc_num;
4952a9643ea8Slogwang 	uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
4953a9643ea8Slogwang 	struct ixgbe_hw *hw =
4954a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4955a9643ea8Slogwang 	struct ixgbe_dcb_config *dcb_config =
4956a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4957a9643ea8Slogwang 
4958a9643ea8Slogwang 	enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
4959a9643ea8Slogwang 		ixgbe_fc_none,
4960a9643ea8Slogwang 		ixgbe_fc_rx_pause,
4961a9643ea8Slogwang 		ixgbe_fc_tx_pause,
4962a9643ea8Slogwang 		ixgbe_fc_full
4963a9643ea8Slogwang 	};
4964a9643ea8Slogwang 
4965a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
4966a9643ea8Slogwang 
4967a9643ea8Slogwang 	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4968a9643ea8Slogwang 	tc_num = map[pfc_conf->priority];
4969a9643ea8Slogwang 	rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
4970a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
4971a9643ea8Slogwang 	/*
4972a9643ea8Slogwang 	 * At least reserve one Ethernet frame for watermark
4973a9643ea8Slogwang 	 * high_water/low_water in kilo bytes for ixgbe
4974a9643ea8Slogwang 	 */
49754418919fSjohnjiang 	max_high_water = (rx_buf_size -
49764418919fSjohnjiang 			RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
4977a9643ea8Slogwang 	if ((pfc_conf->fc.high_water > max_high_water) ||
4978a9643ea8Slogwang 	    (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
4979a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
4980a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
4981a9643ea8Slogwang 		return -EINVAL;
4982a9643ea8Slogwang 	}
4983a9643ea8Slogwang 
4984a9643ea8Slogwang 	hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
4985a9643ea8Slogwang 	hw->fc.pause_time = pfc_conf->fc.pause_time;
4986a9643ea8Slogwang 	hw->fc.send_xon = pfc_conf->fc.send_xon;
4987a9643ea8Slogwang 	hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
4988a9643ea8Slogwang 	hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
4989a9643ea8Slogwang 
4990a9643ea8Slogwang 	err = ixgbe_dcb_pfc_enable(dev, tc_num);
4991a9643ea8Slogwang 
4992a9643ea8Slogwang 	/* Not negotiated is not an error case */
4993a9643ea8Slogwang 	if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
4994a9643ea8Slogwang 		return 0;
4995a9643ea8Slogwang 
4996a9643ea8Slogwang 	PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
4997a9643ea8Slogwang 	return -EIO;
4998a9643ea8Slogwang }
4999a9643ea8Slogwang 
5000a9643ea8Slogwang static int
ixgbe_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)5001a9643ea8Slogwang ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
5002a9643ea8Slogwang 			  struct rte_eth_rss_reta_entry64 *reta_conf,
5003a9643ea8Slogwang 			  uint16_t reta_size)
5004a9643ea8Slogwang {
5005a9643ea8Slogwang 	uint16_t i, sp_reta_size;
5006a9643ea8Slogwang 	uint8_t j, mask;
5007a9643ea8Slogwang 	uint32_t reta, r;
5008a9643ea8Slogwang 	uint16_t idx, shift;
50094b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
5010a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5011a9643ea8Slogwang 	uint32_t reta_reg;
5012a9643ea8Slogwang 
5013a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5014a9643ea8Slogwang 
5015a9643ea8Slogwang 	if (!ixgbe_rss_update_sp(hw->mac.type)) {
5016a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
5017a9643ea8Slogwang 			"NIC.");
5018a9643ea8Slogwang 		return -ENOTSUP;
5019a9643ea8Slogwang 	}
5020a9643ea8Slogwang 
5021a9643ea8Slogwang 	sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5022a9643ea8Slogwang 	if (reta_size != sp_reta_size) {
5023a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
5024a9643ea8Slogwang 			"(%d) doesn't match the number hardware can supported "
50252bfe3f2eSlogwang 			"(%d)", reta_size, sp_reta_size);
5026a9643ea8Slogwang 		return -EINVAL;
5027a9643ea8Slogwang 	}
5028a9643ea8Slogwang 
5029a9643ea8Slogwang 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
5030a9643ea8Slogwang 		idx = i / RTE_RETA_GROUP_SIZE;
5031a9643ea8Slogwang 		shift = i % RTE_RETA_GROUP_SIZE;
5032a9643ea8Slogwang 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
5033a9643ea8Slogwang 						IXGBE_4_BIT_MASK);
5034a9643ea8Slogwang 		if (!mask)
5035a9643ea8Slogwang 			continue;
5036a9643ea8Slogwang 		reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5037a9643ea8Slogwang 		if (mask == IXGBE_4_BIT_MASK)
5038a9643ea8Slogwang 			r = 0;
5039a9643ea8Slogwang 		else
5040a9643ea8Slogwang 			r = IXGBE_READ_REG(hw, reta_reg);
5041a9643ea8Slogwang 		for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
5042a9643ea8Slogwang 			if (mask & (0x1 << j))
5043a9643ea8Slogwang 				reta |= reta_conf[idx].reta[shift + j] <<
5044a9643ea8Slogwang 							(CHAR_BIT * j);
5045a9643ea8Slogwang 			else
5046a9643ea8Slogwang 				reta |= r & (IXGBE_8_BIT_MASK <<
5047a9643ea8Slogwang 						(CHAR_BIT * j));
5048a9643ea8Slogwang 		}
5049a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, reta_reg, reta);
5050a9643ea8Slogwang 	}
50511646932aSjfb8856606 	adapter->rss_reta_updated = 1;
5052a9643ea8Slogwang 
5053a9643ea8Slogwang 	return 0;
5054a9643ea8Slogwang }
5055a9643ea8Slogwang 
5056a9643ea8Slogwang static int
ixgbe_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)5057a9643ea8Slogwang ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
5058a9643ea8Slogwang 			 struct rte_eth_rss_reta_entry64 *reta_conf,
5059a9643ea8Slogwang 			 uint16_t reta_size)
5060a9643ea8Slogwang {
5061a9643ea8Slogwang 	uint16_t i, sp_reta_size;
5062a9643ea8Slogwang 	uint8_t j, mask;
5063a9643ea8Slogwang 	uint32_t reta;
5064a9643ea8Slogwang 	uint16_t idx, shift;
5065a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5066a9643ea8Slogwang 	uint32_t reta_reg;
5067a9643ea8Slogwang 
5068a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5069a9643ea8Slogwang 	sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5070a9643ea8Slogwang 	if (reta_size != sp_reta_size) {
5071a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
5072a9643ea8Slogwang 			"(%d) doesn't match the number hardware can supported "
50732bfe3f2eSlogwang 			"(%d)", reta_size, sp_reta_size);
5074a9643ea8Slogwang 		return -EINVAL;
5075a9643ea8Slogwang 	}
5076a9643ea8Slogwang 
5077a9643ea8Slogwang 	for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
5078a9643ea8Slogwang 		idx = i / RTE_RETA_GROUP_SIZE;
5079a9643ea8Slogwang 		shift = i % RTE_RETA_GROUP_SIZE;
5080a9643ea8Slogwang 		mask = (uint8_t)((reta_conf[idx].mask >> shift) &
5081a9643ea8Slogwang 						IXGBE_4_BIT_MASK);
5082a9643ea8Slogwang 		if (!mask)
5083a9643ea8Slogwang 			continue;
5084a9643ea8Slogwang 
5085a9643ea8Slogwang 		reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5086a9643ea8Slogwang 		reta = IXGBE_READ_REG(hw, reta_reg);
5087a9643ea8Slogwang 		for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
5088a9643ea8Slogwang 			if (mask & (0x1 << j))
5089a9643ea8Slogwang 				reta_conf[idx].reta[shift + j] =
5090a9643ea8Slogwang 					((reta >> (CHAR_BIT * j)) &
5091a9643ea8Slogwang 						IXGBE_8_BIT_MASK);
5092a9643ea8Slogwang 		}
5093a9643ea8Slogwang 	}
5094a9643ea8Slogwang 
5095a9643ea8Slogwang 	return 0;
5096a9643ea8Slogwang }
5097a9643ea8Slogwang 
50982bfe3f2eSlogwang static int
ixgbe_add_rar(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)50994418919fSjohnjiang ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
5100a9643ea8Slogwang 				uint32_t index, uint32_t pool)
5101a9643ea8Slogwang {
5102a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5103a9643ea8Slogwang 	uint32_t enable_addr = 1;
5104a9643ea8Slogwang 
51052bfe3f2eSlogwang 	return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
51062bfe3f2eSlogwang 			     pool, enable_addr);
5107a9643ea8Slogwang }
5108a9643ea8Slogwang 
5109a9643ea8Slogwang static void
ixgbe_remove_rar(struct rte_eth_dev * dev,uint32_t index)5110a9643ea8Slogwang ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
5111a9643ea8Slogwang {
5112a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5113a9643ea8Slogwang 
5114a9643ea8Slogwang 	ixgbe_clear_rar(hw, index);
5115a9643ea8Slogwang }
5116a9643ea8Slogwang 
5117d30ea906Sjfb8856606 static int
ixgbe_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)51184418919fSjohnjiang ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
5119a9643ea8Slogwang {
51202bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
51212bfe3f2eSlogwang 
5122a9643ea8Slogwang 	ixgbe_remove_rar(dev, 0);
51235af785ecSfengbojiang(姜凤波) 	ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
5124d30ea906Sjfb8856606 
5125d30ea906Sjfb8856606 	return 0;
51262bfe3f2eSlogwang }
51272bfe3f2eSlogwang 
51282bfe3f2eSlogwang static bool
is_device_supported(struct rte_eth_dev * dev,struct rte_pci_driver * drv)51292bfe3f2eSlogwang is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
51302bfe3f2eSlogwang {
51312bfe3f2eSlogwang 	if (strcmp(dev->device->driver->name, drv->driver.name))
51322bfe3f2eSlogwang 		return false;
51332bfe3f2eSlogwang 
51342bfe3f2eSlogwang 	return true;
51352bfe3f2eSlogwang }
51362bfe3f2eSlogwang 
51372bfe3f2eSlogwang bool
is_ixgbe_supported(struct rte_eth_dev * dev)51382bfe3f2eSlogwang is_ixgbe_supported(struct rte_eth_dev *dev)
51392bfe3f2eSlogwang {
51402bfe3f2eSlogwang 	return is_device_supported(dev, &rte_ixgbe_pmd);
5141a9643ea8Slogwang }
5142a9643ea8Slogwang 
5143a9643ea8Slogwang static int
ixgbe_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)5144a9643ea8Slogwang ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
5145a9643ea8Slogwang {
5146a9643ea8Slogwang 	uint32_t hlreg0;
5147a9643ea8Slogwang 	uint32_t maxfrs;
5148a9643ea8Slogwang 	struct ixgbe_hw *hw;
5149a9643ea8Slogwang 	struct rte_eth_dev_info dev_info;
51504418919fSjohnjiang 	uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
51512bfe3f2eSlogwang 	struct rte_eth_dev_data *dev_data = dev->data;
51524418919fSjohnjiang 	int ret;
5153a9643ea8Slogwang 
51544418919fSjohnjiang 	ret = ixgbe_dev_info_get(dev, &dev_info);
51554418919fSjohnjiang 	if (ret != 0)
51564418919fSjohnjiang 		return ret;
5157a9643ea8Slogwang 
5158a9643ea8Slogwang 	/* check that mtu is within the allowed range */
51594418919fSjohnjiang 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
5160a9643ea8Slogwang 		return -EINVAL;
5161a9643ea8Slogwang 
51622bfe3f2eSlogwang 	/* If device is started, refuse mtu that requires the support of
51632bfe3f2eSlogwang 	 * scattered packets when this feature has not been enabled before.
5164a9643ea8Slogwang 	 */
51652bfe3f2eSlogwang 	if (dev_data->dev_started && !dev_data->scattered_rx &&
5166a9643ea8Slogwang 	    (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
51672bfe3f2eSlogwang 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
51682bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Stop port first.");
5169a9643ea8Slogwang 		return -EINVAL;
51702bfe3f2eSlogwang 	}
5171a9643ea8Slogwang 
5172a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5173a9643ea8Slogwang 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5174a9643ea8Slogwang 
5175a9643ea8Slogwang 	/* switch to jumbo mode if needed */
51764418919fSjohnjiang 	if (frame_size > RTE_ETHER_MAX_LEN) {
5177d30ea906Sjfb8856606 		dev->data->dev_conf.rxmode.offloads |=
5178d30ea906Sjfb8856606 			DEV_RX_OFFLOAD_JUMBO_FRAME;
5179a9643ea8Slogwang 		hlreg0 |= IXGBE_HLREG0_JUMBOEN;
5180a9643ea8Slogwang 	} else {
5181d30ea906Sjfb8856606 		dev->data->dev_conf.rxmode.offloads &=
5182d30ea906Sjfb8856606 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
5183a9643ea8Slogwang 		hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
5184a9643ea8Slogwang 	}
5185a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5186a9643ea8Slogwang 
5187a9643ea8Slogwang 	/* update max frame size */
5188a9643ea8Slogwang 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
5189a9643ea8Slogwang 
5190a9643ea8Slogwang 	maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
5191a9643ea8Slogwang 	maxfrs &= 0x0000FFFF;
5192a9643ea8Slogwang 	maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
5193a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
5194a9643ea8Slogwang 
5195a9643ea8Slogwang 	return 0;
5196a9643ea8Slogwang }
5197a9643ea8Slogwang 
5198a9643ea8Slogwang /*
5199a9643ea8Slogwang  * Virtual Function operations
5200a9643ea8Slogwang  */
5201a9643ea8Slogwang static void
ixgbevf_intr_disable(struct rte_eth_dev * dev)5202d30ea906Sjfb8856606 ixgbevf_intr_disable(struct rte_eth_dev *dev)
5203a9643ea8Slogwang {
5204d30ea906Sjfb8856606 	struct ixgbe_interrupt *intr =
5205d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5206d30ea906Sjfb8856606 	struct ixgbe_hw *hw =
5207d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5208d30ea906Sjfb8856606 
5209a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5210a9643ea8Slogwang 
5211a9643ea8Slogwang 	/* Clear interrupt mask to stop from interrupts being generated */
5212a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
5213a9643ea8Slogwang 
5214a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
5215d30ea906Sjfb8856606 
5216d30ea906Sjfb8856606 	/* Clear mask value. */
5217d30ea906Sjfb8856606 	intr->mask = 0;
5218a9643ea8Slogwang }
5219a9643ea8Slogwang 
5220a9643ea8Slogwang static void
ixgbevf_intr_enable(struct rte_eth_dev * dev)5221d30ea906Sjfb8856606 ixgbevf_intr_enable(struct rte_eth_dev *dev)
5222a9643ea8Slogwang {
5223d30ea906Sjfb8856606 	struct ixgbe_interrupt *intr =
5224d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5225d30ea906Sjfb8856606 	struct ixgbe_hw *hw =
5226d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5227d30ea906Sjfb8856606 
5228a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5229a9643ea8Slogwang 
5230a9643ea8Slogwang 	/* VF enable interrupt autoclean */
5231a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
5232a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
5233a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
5234a9643ea8Slogwang 
5235a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
5236d30ea906Sjfb8856606 
5237d30ea906Sjfb8856606 	/* Save IXGBE_VTEIMS value to mask. */
5238d30ea906Sjfb8856606 	intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
5239a9643ea8Slogwang }
5240a9643ea8Slogwang 
5241a9643ea8Slogwang static int
ixgbevf_dev_configure(struct rte_eth_dev * dev)5242a9643ea8Slogwang ixgbevf_dev_configure(struct rte_eth_dev *dev)
5243a9643ea8Slogwang {
5244a9643ea8Slogwang 	struct rte_eth_conf *conf = &dev->data->dev_conf;
52454b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
5246a9643ea8Slogwang 
5247a9643ea8Slogwang 	PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
5248a9643ea8Slogwang 		     dev->data->port_id);
5249a9643ea8Slogwang 
52504418919fSjohnjiang 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
52514418919fSjohnjiang 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
52524418919fSjohnjiang 
5253a9643ea8Slogwang 	/*
5254a9643ea8Slogwang 	 * VF has no ability to enable/disable HW CRC
5255a9643ea8Slogwang 	 * Keep the persistent behavior the same as Host PF
5256a9643ea8Slogwang 	 */
5257a9643ea8Slogwang #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
5258d30ea906Sjfb8856606 	if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
5259a9643ea8Slogwang 		PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
5260d30ea906Sjfb8856606 		conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
5261a9643ea8Slogwang 	}
5262a9643ea8Slogwang #else
5263d30ea906Sjfb8856606 	if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
5264a9643ea8Slogwang 		PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
5265d30ea906Sjfb8856606 		conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
5266a9643ea8Slogwang 	}
5267a9643ea8Slogwang #endif
5268a9643ea8Slogwang 
5269a9643ea8Slogwang 	/*
5270a9643ea8Slogwang 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5271a9643ea8Slogwang 	 * allocation or vector Rx preconditions we will reset it.
5272a9643ea8Slogwang 	 */
5273a9643ea8Slogwang 	adapter->rx_bulk_alloc_allowed = true;
5274a9643ea8Slogwang 	adapter->rx_vec_allowed = true;
5275a9643ea8Slogwang 
5276a9643ea8Slogwang 	return 0;
5277a9643ea8Slogwang }
5278a9643ea8Slogwang 
5279a9643ea8Slogwang static int
ixgbevf_dev_start(struct rte_eth_dev * dev)5280a9643ea8Slogwang ixgbevf_dev_start(struct rte_eth_dev *dev)
5281a9643ea8Slogwang {
5282a9643ea8Slogwang 	struct ixgbe_hw *hw =
5283a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5284a9643ea8Slogwang 	uint32_t intr_vector = 0;
52852bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
52862bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5287a9643ea8Slogwang 
5288a9643ea8Slogwang 	int err, mask = 0;
5289a9643ea8Slogwang 
5290a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5291a9643ea8Slogwang 
5292d30ea906Sjfb8856606 	/* Stop the link setup handler before resetting the HW. */
52930c6bd470Sfengbojiang 	ixgbe_dev_wait_setup_link_complete(dev, 0);
5294d30ea906Sjfb8856606 
52952bfe3f2eSlogwang 	err = hw->mac.ops.reset_hw(hw);
52960c6bd470Sfengbojiang 
52970c6bd470Sfengbojiang 	/**
52980c6bd470Sfengbojiang 	 * In this case, reuses the MAC address assigned by VF
52990c6bd470Sfengbojiang 	 * initialization.
53000c6bd470Sfengbojiang 	 */
53010c6bd470Sfengbojiang 	if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) {
53022bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
53032bfe3f2eSlogwang 		return err;
53042bfe3f2eSlogwang 	}
53050c6bd470Sfengbojiang 
5306a9643ea8Slogwang 	hw->mac.get_link_status = true;
5307a9643ea8Slogwang 
5308a9643ea8Slogwang 	/* negotiate mailbox API version to use with the PF. */
5309a9643ea8Slogwang 	ixgbevf_negotiate_api(hw);
5310a9643ea8Slogwang 
5311a9643ea8Slogwang 	ixgbevf_dev_tx_init(dev);
5312a9643ea8Slogwang 
5313a9643ea8Slogwang 	/* This can fail when allocating mbufs for descriptor rings */
5314a9643ea8Slogwang 	err = ixgbevf_dev_rx_init(dev);
5315a9643ea8Slogwang 	if (err) {
5316a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
5317a9643ea8Slogwang 		ixgbe_dev_clear_queues(dev);
5318a9643ea8Slogwang 		return err;
5319a9643ea8Slogwang 	}
5320a9643ea8Slogwang 
5321a9643ea8Slogwang 	/* Set vfta */
5322a9643ea8Slogwang 	ixgbevf_set_vfta_all(dev, 1);
5323a9643ea8Slogwang 
5324a9643ea8Slogwang 	/* Set HW strip */
5325a9643ea8Slogwang 	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
5326a9643ea8Slogwang 		ETH_VLAN_EXTEND_MASK;
5327d30ea906Sjfb8856606 	err = ixgbevf_vlan_offload_config(dev, mask);
53282bfe3f2eSlogwang 	if (err) {
53292bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
53302bfe3f2eSlogwang 		ixgbe_dev_clear_queues(dev);
53312bfe3f2eSlogwang 		return err;
53322bfe3f2eSlogwang 	}
5333a9643ea8Slogwang 
5334a9643ea8Slogwang 	ixgbevf_dev_rxtx_start(dev);
5335a9643ea8Slogwang 
5336a9643ea8Slogwang 	/* check and configure queue intr-vector mapping */
53372bfe3f2eSlogwang 	if (rte_intr_cap_multiple(intr_handle) &&
53382bfe3f2eSlogwang 	    dev->data->dev_conf.intr_conf.rxq) {
53392bfe3f2eSlogwang 		/* According to datasheet, only vector 0/1/2 can be used,
53402bfe3f2eSlogwang 		 * now only one vector is used for Rx queue
53412bfe3f2eSlogwang 		 */
53422bfe3f2eSlogwang 		intr_vector = 1;
5343a9643ea8Slogwang 		if (rte_intr_efd_enable(intr_handle, intr_vector))
5344a9643ea8Slogwang 			return -1;
5345a9643ea8Slogwang 	}
5346a9643ea8Slogwang 
5347a9643ea8Slogwang 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
5348a9643ea8Slogwang 		intr_handle->intr_vec =
5349a9643ea8Slogwang 			rte_zmalloc("intr_vec",
5350a9643ea8Slogwang 				    dev->data->nb_rx_queues * sizeof(int), 0);
5351a9643ea8Slogwang 		if (intr_handle->intr_vec == NULL) {
5352a9643ea8Slogwang 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
53532bfe3f2eSlogwang 				     " intr_vec", dev->data->nb_rx_queues);
5354a9643ea8Slogwang 			return -ENOMEM;
5355a9643ea8Slogwang 		}
5356a9643ea8Slogwang 	}
5357a9643ea8Slogwang 	ixgbevf_configure_msix(dev);
5358a9643ea8Slogwang 
53592bfe3f2eSlogwang 	/* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
53602bfe3f2eSlogwang 	 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
53612bfe3f2eSlogwang 	 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
53622bfe3f2eSlogwang 	 * is not cleared, it will fail when following rte_intr_enable( ) tries
53632bfe3f2eSlogwang 	 * to map Rx queue interrupt to other VFIO vectors.
53642bfe3f2eSlogwang 	 * So clear uio/vfio intr/evevnfd first to avoid failure.
53652bfe3f2eSlogwang 	 */
53662bfe3f2eSlogwang 	rte_intr_disable(intr_handle);
53672bfe3f2eSlogwang 
5368a9643ea8Slogwang 	rte_intr_enable(intr_handle);
5369a9643ea8Slogwang 
5370a9643ea8Slogwang 	/* Re-enable interrupt for VF */
5371d30ea906Sjfb8856606 	ixgbevf_intr_enable(dev);
5372d30ea906Sjfb8856606 
5373d30ea906Sjfb8856606 	/*
5374d30ea906Sjfb8856606 	 * Update link status right before return, because it may
5375d30ea906Sjfb8856606 	 * start link configuration process in a separate thread.
5376d30ea906Sjfb8856606 	 */
5377d30ea906Sjfb8856606 	ixgbevf_dev_link_update(dev, 0);
5378a9643ea8Slogwang 
53794418919fSjohnjiang 	hw->adapter_stopped = false;
53804418919fSjohnjiang 
5381a9643ea8Slogwang 	return 0;
5382a9643ea8Slogwang }
5383a9643ea8Slogwang 
5384*2d9fd380Sjfb8856606 static int
ixgbevf_dev_stop(struct rte_eth_dev * dev)5385a9643ea8Slogwang ixgbevf_dev_stop(struct rte_eth_dev *dev)
5386a9643ea8Slogwang {
5387a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53884b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
53892bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
53902bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5391a9643ea8Slogwang 
53924418919fSjohnjiang 	if (hw->adapter_stopped)
5393*2d9fd380Sjfb8856606 		return 0;
53944418919fSjohnjiang 
5395a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5396a9643ea8Slogwang 
53970c6bd470Sfengbojiang 	ixgbe_dev_wait_setup_link_complete(dev, 0);
5398d30ea906Sjfb8856606 
5399d30ea906Sjfb8856606 	ixgbevf_intr_disable(dev);
5400a9643ea8Slogwang 
5401*2d9fd380Sjfb8856606 	dev->data->dev_started = 0;
5402a9643ea8Slogwang 	hw->adapter_stopped = 1;
5403a9643ea8Slogwang 	ixgbe_stop_adapter(hw);
5404a9643ea8Slogwang 
5405a9643ea8Slogwang 	/*
5406a9643ea8Slogwang 	  * Clear what we set, but we still keep shadow_vfta to
5407a9643ea8Slogwang 	  * restore after device starts
5408a9643ea8Slogwang 	  */
5409a9643ea8Slogwang 	ixgbevf_set_vfta_all(dev, 0);
5410a9643ea8Slogwang 
5411a9643ea8Slogwang 	/* Clear stored conf */
5412a9643ea8Slogwang 	dev->data->scattered_rx = 0;
5413a9643ea8Slogwang 
5414a9643ea8Slogwang 	ixgbe_dev_clear_queues(dev);
5415a9643ea8Slogwang 
5416a9643ea8Slogwang 	/* Clean datapath event and queue/vec mapping */
5417a9643ea8Slogwang 	rte_intr_efd_disable(intr_handle);
5418a9643ea8Slogwang 	if (intr_handle->intr_vec != NULL) {
5419a9643ea8Slogwang 		rte_free(intr_handle->intr_vec);
5420a9643ea8Slogwang 		intr_handle->intr_vec = NULL;
5421a9643ea8Slogwang 	}
54221646932aSjfb8856606 
54231646932aSjfb8856606 	adapter->rss_reta_updated = 0;
5424*2d9fd380Sjfb8856606 
5425*2d9fd380Sjfb8856606 	return 0;
5426a9643ea8Slogwang }
5427a9643ea8Slogwang 
5428*2d9fd380Sjfb8856606 static int
ixgbevf_dev_close(struct rte_eth_dev * dev)5429a9643ea8Slogwang ixgbevf_dev_close(struct rte_eth_dev *dev)
5430a9643ea8Slogwang {
5431a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
54324418919fSjohnjiang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
54334418919fSjohnjiang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5434*2d9fd380Sjfb8856606 	int ret;
5435a9643ea8Slogwang 
5436a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5437*2d9fd380Sjfb8856606 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
5438*2d9fd380Sjfb8856606 		return 0;
5439a9643ea8Slogwang 
5440a9643ea8Slogwang 	ixgbe_reset_hw(hw);
5441a9643ea8Slogwang 
5442*2d9fd380Sjfb8856606 	ret = ixgbevf_dev_stop(dev);
5443a9643ea8Slogwang 
5444a9643ea8Slogwang 	ixgbe_dev_free_queues(dev);
5445a9643ea8Slogwang 
5446a9643ea8Slogwang 	/**
5447a9643ea8Slogwang 	 * Remove the VF MAC address ro ensure
5448a9643ea8Slogwang 	 * that the VF traffic goes to the PF
5449a9643ea8Slogwang 	 * after stop, close and detach of the VF
5450a9643ea8Slogwang 	 **/
5451a9643ea8Slogwang 	ixgbevf_remove_mac_addr(dev, 0);
54524418919fSjohnjiang 
54534418919fSjohnjiang 	rte_intr_disable(intr_handle);
54544418919fSjohnjiang 	rte_intr_callback_unregister(intr_handle,
54554418919fSjohnjiang 				     ixgbevf_dev_interrupt_handler, dev);
5456*2d9fd380Sjfb8856606 
5457*2d9fd380Sjfb8856606 	return ret;
5458a9643ea8Slogwang }
5459a9643ea8Slogwang 
54602bfe3f2eSlogwang /*
54612bfe3f2eSlogwang  * Reset VF device
54622bfe3f2eSlogwang  */
54632bfe3f2eSlogwang static int
ixgbevf_dev_reset(struct rte_eth_dev * dev)54642bfe3f2eSlogwang ixgbevf_dev_reset(struct rte_eth_dev *dev)
54652bfe3f2eSlogwang {
54662bfe3f2eSlogwang 	int ret;
54672bfe3f2eSlogwang 
54682bfe3f2eSlogwang 	ret = eth_ixgbevf_dev_uninit(dev);
54692bfe3f2eSlogwang 	if (ret)
54702bfe3f2eSlogwang 		return ret;
54712bfe3f2eSlogwang 
54722bfe3f2eSlogwang 	ret = eth_ixgbevf_dev_init(dev);
54732bfe3f2eSlogwang 
54742bfe3f2eSlogwang 	return ret;
54752bfe3f2eSlogwang }
54762bfe3f2eSlogwang 
ixgbevf_set_vfta_all(struct rte_eth_dev * dev,bool on)5477a9643ea8Slogwang static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
5478a9643ea8Slogwang {
5479a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5480a9643ea8Slogwang 	struct ixgbe_vfta *shadow_vfta =
5481a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5482a9643ea8Slogwang 	int i = 0, j = 0, vfta = 0, mask = 1;
5483a9643ea8Slogwang 
5484a9643ea8Slogwang 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
5485a9643ea8Slogwang 		vfta = shadow_vfta->vfta[i];
5486a9643ea8Slogwang 		if (vfta) {
5487a9643ea8Slogwang 			mask = 1;
5488a9643ea8Slogwang 			for (j = 0; j < 32; j++) {
5489a9643ea8Slogwang 				if (vfta & mask)
5490a9643ea8Slogwang 					ixgbe_set_vfta(hw, (i<<5)+j, 0,
5491a9643ea8Slogwang 						       on, false);
5492a9643ea8Slogwang 				mask <<= 1;
5493a9643ea8Slogwang 			}
5494a9643ea8Slogwang 		}
5495a9643ea8Slogwang 	}
5496a9643ea8Slogwang 
5497a9643ea8Slogwang }
5498a9643ea8Slogwang 
5499a9643ea8Slogwang static int
ixgbevf_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)5500a9643ea8Slogwang ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
5501a9643ea8Slogwang {
5502a9643ea8Slogwang 	struct ixgbe_hw *hw =
5503a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5504a9643ea8Slogwang 	struct ixgbe_vfta *shadow_vfta =
5505a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
5506a9643ea8Slogwang 	uint32_t vid_idx = 0;
5507a9643ea8Slogwang 	uint32_t vid_bit = 0;
5508a9643ea8Slogwang 	int ret = 0;
5509a9643ea8Slogwang 
5510a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5511a9643ea8Slogwang 
5512a9643ea8Slogwang 	/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5513a9643ea8Slogwang 	ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
5514a9643ea8Slogwang 	if (ret) {
5515a9643ea8Slogwang 		PMD_INIT_LOG(ERR, "Unable to set VF vlan");
5516a9643ea8Slogwang 		return ret;
5517a9643ea8Slogwang 	}
5518a9643ea8Slogwang 	vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
5519a9643ea8Slogwang 	vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
5520a9643ea8Slogwang 
5521a9643ea8Slogwang 	/* Save what we set and retore it after device reset */
5522a9643ea8Slogwang 	if (on)
5523a9643ea8Slogwang 		shadow_vfta->vfta[vid_idx] |= vid_bit;
5524a9643ea8Slogwang 	else
5525a9643ea8Slogwang 		shadow_vfta->vfta[vid_idx] &= ~vid_bit;
5526a9643ea8Slogwang 
5527a9643ea8Slogwang 	return 0;
5528a9643ea8Slogwang }
5529a9643ea8Slogwang 
5530a9643ea8Slogwang static void
ixgbevf_vlan_strip_queue_set(struct rte_eth_dev * dev,uint16_t queue,int on)5531a9643ea8Slogwang ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
5532a9643ea8Slogwang {
5533a9643ea8Slogwang 	struct ixgbe_hw *hw =
5534a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5535a9643ea8Slogwang 	uint32_t ctrl;
5536a9643ea8Slogwang 
5537a9643ea8Slogwang 	PMD_INIT_FUNC_TRACE();
5538a9643ea8Slogwang 
5539a9643ea8Slogwang 	if (queue >= hw->mac.max_rx_queues)
5540a9643ea8Slogwang 		return;
5541a9643ea8Slogwang 
5542a9643ea8Slogwang 	ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
5543a9643ea8Slogwang 	if (on)
5544a9643ea8Slogwang 		ctrl |= IXGBE_RXDCTL_VME;
5545a9643ea8Slogwang 	else
5546a9643ea8Slogwang 		ctrl &= ~IXGBE_RXDCTL_VME;
5547a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
5548a9643ea8Slogwang 
5549a9643ea8Slogwang 	ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
5550a9643ea8Slogwang }
5551a9643ea8Slogwang 
55522bfe3f2eSlogwang static int
ixgbevf_vlan_offload_config(struct rte_eth_dev * dev,int mask)5553d30ea906Sjfb8856606 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
5554a9643ea8Slogwang {
5555d30ea906Sjfb8856606 	struct ixgbe_rx_queue *rxq;
5556a9643ea8Slogwang 	uint16_t i;
5557a9643ea8Slogwang 	int on = 0;
5558a9643ea8Slogwang 
5559a9643ea8Slogwang 	/* VF function only support hw strip feature, others are not support */
5560a9643ea8Slogwang 	if (mask & ETH_VLAN_STRIP_MASK) {
5561d30ea906Sjfb8856606 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
5562d30ea906Sjfb8856606 			rxq = dev->data->rx_queues[i];
5563d30ea906Sjfb8856606 			on = !!(rxq->offloads &	DEV_RX_OFFLOAD_VLAN_STRIP);
5564a9643ea8Slogwang 			ixgbevf_vlan_strip_queue_set(dev, i, on);
5565a9643ea8Slogwang 		}
5566d30ea906Sjfb8856606 	}
5567d30ea906Sjfb8856606 
5568d30ea906Sjfb8856606 	return 0;
5569d30ea906Sjfb8856606 }
5570d30ea906Sjfb8856606 
5571d30ea906Sjfb8856606 static int
ixgbevf_vlan_offload_set(struct rte_eth_dev * dev,int mask)5572d30ea906Sjfb8856606 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
5573d30ea906Sjfb8856606 {
5574d30ea906Sjfb8856606 	ixgbe_config_vlan_strip_on_all_queues(dev, mask);
5575d30ea906Sjfb8856606 
5576d30ea906Sjfb8856606 	ixgbevf_vlan_offload_config(dev, mask);
55772bfe3f2eSlogwang 
55782bfe3f2eSlogwang 	return 0;
5579a9643ea8Slogwang }
5580a9643ea8Slogwang 
55812bfe3f2eSlogwang int
ixgbe_vt_check(struct ixgbe_hw * hw)55822bfe3f2eSlogwang ixgbe_vt_check(struct ixgbe_hw *hw)
5583a9643ea8Slogwang {
5584a9643ea8Slogwang 	uint32_t reg_val;
5585a9643ea8Slogwang 
55862bfe3f2eSlogwang 	/* if Virtualization Technology is enabled */
5587a9643ea8Slogwang 	reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
5588a9643ea8Slogwang 	if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
55892bfe3f2eSlogwang 		PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
5590a9643ea8Slogwang 		return -1;
5591a9643ea8Slogwang 	}
5592a9643ea8Slogwang 
5593a9643ea8Slogwang 	return 0;
5594a9643ea8Slogwang }
5595a9643ea8Slogwang 
5596a9643ea8Slogwang static uint32_t
ixgbe_uta_vector(struct ixgbe_hw * hw,struct rte_ether_addr * uc_addr)55974418919fSjohnjiang ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr)
5598a9643ea8Slogwang {
5599a9643ea8Slogwang 	uint32_t vector = 0;
5600a9643ea8Slogwang 
5601a9643ea8Slogwang 	switch (hw->mac.mc_filter_type) {
5602a9643ea8Slogwang 	case 0:   /* use bits [47:36] of the address */
5603a9643ea8Slogwang 		vector = ((uc_addr->addr_bytes[4] >> 4) |
5604a9643ea8Slogwang 			(((uint16_t)uc_addr->addr_bytes[5]) << 4));
5605a9643ea8Slogwang 		break;
5606a9643ea8Slogwang 	case 1:   /* use bits [46:35] of the address */
5607a9643ea8Slogwang 		vector = ((uc_addr->addr_bytes[4] >> 3) |
5608a9643ea8Slogwang 			(((uint16_t)uc_addr->addr_bytes[5]) << 5));
5609a9643ea8Slogwang 		break;
5610a9643ea8Slogwang 	case 2:   /* use bits [45:34] of the address */
5611a9643ea8Slogwang 		vector = ((uc_addr->addr_bytes[4] >> 2) |
5612a9643ea8Slogwang 			(((uint16_t)uc_addr->addr_bytes[5]) << 6));
5613a9643ea8Slogwang 		break;
5614a9643ea8Slogwang 	case 3:   /* use bits [43:32] of the address */
5615a9643ea8Slogwang 		vector = ((uc_addr->addr_bytes[4]) |
5616a9643ea8Slogwang 			(((uint16_t)uc_addr->addr_bytes[5]) << 8));
5617a9643ea8Slogwang 		break;
5618a9643ea8Slogwang 	default:  /* Invalid mc_filter_type */
5619a9643ea8Slogwang 		break;
5620a9643ea8Slogwang 	}
5621a9643ea8Slogwang 
5622a9643ea8Slogwang 	/* vector can only be 12-bits or boundary will be exceeded */
5623a9643ea8Slogwang 	vector &= 0xFFF;
5624a9643ea8Slogwang 	return vector;
5625a9643ea8Slogwang }
5626a9643ea8Slogwang 
5627a9643ea8Slogwang static int
ixgbe_uc_hash_table_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint8_t on)56284418919fSjohnjiang ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,
56294418919fSjohnjiang 			struct rte_ether_addr *mac_addr, uint8_t on)
5630a9643ea8Slogwang {
5631a9643ea8Slogwang 	uint32_t vector;
5632a9643ea8Slogwang 	uint32_t uta_idx;
5633a9643ea8Slogwang 	uint32_t reg_val;
5634a9643ea8Slogwang 	uint32_t uta_shift;
5635a9643ea8Slogwang 	uint32_t rc;
5636a9643ea8Slogwang 	const uint32_t ixgbe_uta_idx_mask = 0x7F;
5637a9643ea8Slogwang 	const uint32_t ixgbe_uta_bit_shift = 5;
5638a9643ea8Slogwang 	const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
5639a9643ea8Slogwang 	const uint32_t bit1 = 0x1;
5640a9643ea8Slogwang 
5641a9643ea8Slogwang 	struct ixgbe_hw *hw =
5642a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5643a9643ea8Slogwang 	struct ixgbe_uta_info *uta_info =
5644a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5645a9643ea8Slogwang 
5646a9643ea8Slogwang 	/* The UTA table only exists on 82599 hardware and newer */
5647a9643ea8Slogwang 	if (hw->mac.type < ixgbe_mac_82599EB)
5648a9643ea8Slogwang 		return -ENOTSUP;
5649a9643ea8Slogwang 
5650a9643ea8Slogwang 	vector = ixgbe_uta_vector(hw, mac_addr);
5651a9643ea8Slogwang 	uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
5652a9643ea8Slogwang 	uta_shift = vector & ixgbe_uta_bit_mask;
5653a9643ea8Slogwang 
5654a9643ea8Slogwang 	rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
5655a9643ea8Slogwang 	if (rc == on)
5656a9643ea8Slogwang 		return 0;
5657a9643ea8Slogwang 
5658a9643ea8Slogwang 	reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
5659a9643ea8Slogwang 	if (on) {
5660a9643ea8Slogwang 		uta_info->uta_in_use++;
5661a9643ea8Slogwang 		reg_val |= (bit1 << uta_shift);
5662a9643ea8Slogwang 		uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
5663a9643ea8Slogwang 	} else {
5664a9643ea8Slogwang 		uta_info->uta_in_use--;
5665a9643ea8Slogwang 		reg_val &= ~(bit1 << uta_shift);
5666a9643ea8Slogwang 		uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
5667a9643ea8Slogwang 	}
5668a9643ea8Slogwang 
5669a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
5670a9643ea8Slogwang 
5671a9643ea8Slogwang 	if (uta_info->uta_in_use > 0)
5672a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
5673a9643ea8Slogwang 				IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
5674a9643ea8Slogwang 	else
5675a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
5676a9643ea8Slogwang 
5677a9643ea8Slogwang 	return 0;
5678a9643ea8Slogwang }
5679a9643ea8Slogwang 
5680a9643ea8Slogwang static int
ixgbe_uc_all_hash_table_set(struct rte_eth_dev * dev,uint8_t on)5681a9643ea8Slogwang ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
5682a9643ea8Slogwang {
5683a9643ea8Slogwang 	int i;
5684a9643ea8Slogwang 	struct ixgbe_hw *hw =
5685a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5686a9643ea8Slogwang 	struct ixgbe_uta_info *uta_info =
5687a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
5688a9643ea8Slogwang 
5689a9643ea8Slogwang 	/* The UTA table only exists on 82599 hardware and newer */
5690a9643ea8Slogwang 	if (hw->mac.type < ixgbe_mac_82599EB)
5691a9643ea8Slogwang 		return -ENOTSUP;
5692a9643ea8Slogwang 
5693a9643ea8Slogwang 	if (on) {
5694a9643ea8Slogwang 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5695a9643ea8Slogwang 			uta_info->uta_shadow[i] = ~0;
5696a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
5697a9643ea8Slogwang 		}
5698a9643ea8Slogwang 	} else {
5699a9643ea8Slogwang 		for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
5700a9643ea8Slogwang 			uta_info->uta_shadow[i] = 0;
5701a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
5702a9643ea8Slogwang 		}
5703a9643ea8Slogwang 	}
5704a9643ea8Slogwang 	return 0;
5705a9643ea8Slogwang 
5706a9643ea8Slogwang }
5707a9643ea8Slogwang 
5708a9643ea8Slogwang uint32_t
ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask,uint32_t orig_val)5709a9643ea8Slogwang ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
5710a9643ea8Slogwang {
5711a9643ea8Slogwang 	uint32_t new_val = orig_val;
5712a9643ea8Slogwang 
5713a9643ea8Slogwang 	if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
5714a9643ea8Slogwang 		new_val |= IXGBE_VMOLR_AUPE;
5715a9643ea8Slogwang 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
5716a9643ea8Slogwang 		new_val |= IXGBE_VMOLR_ROMPE;
5717a9643ea8Slogwang 	if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
5718a9643ea8Slogwang 		new_val |= IXGBE_VMOLR_ROPE;
5719a9643ea8Slogwang 	if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
5720a9643ea8Slogwang 		new_val |= IXGBE_VMOLR_BAM;
5721a9643ea8Slogwang 	if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
5722a9643ea8Slogwang 		new_val |= IXGBE_VMOLR_MPE;
5723a9643ea8Slogwang 
5724a9643ea8Slogwang 	return new_val;
5725a9643ea8Slogwang }
5726a9643ea8Slogwang 
5727a9643ea8Slogwang #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
5728a9643ea8Slogwang #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
5729a9643ea8Slogwang #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
5730a9643ea8Slogwang #define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
5731a9643ea8Slogwang #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5732a9643ea8Slogwang 	((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5733a9643ea8Slogwang 	ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5734a9643ea8Slogwang 
5735a9643ea8Slogwang static int
ixgbe_mirror_rule_set(struct rte_eth_dev * dev,struct rte_eth_mirror_conf * mirror_conf,uint8_t rule_id,uint8_t on)5736a9643ea8Slogwang ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
5737a9643ea8Slogwang 		      struct rte_eth_mirror_conf *mirror_conf,
5738a9643ea8Slogwang 		      uint8_t rule_id, uint8_t on)
5739a9643ea8Slogwang {
5740a9643ea8Slogwang 	uint32_t mr_ctl, vlvf;
5741a9643ea8Slogwang 	uint32_t mp_lsb = 0;
5742a9643ea8Slogwang 	uint32_t mv_msb = 0;
5743a9643ea8Slogwang 	uint32_t mv_lsb = 0;
5744a9643ea8Slogwang 	uint32_t mp_msb = 0;
5745a9643ea8Slogwang 	uint8_t i = 0;
5746a9643ea8Slogwang 	int reg_index = 0;
5747a9643ea8Slogwang 	uint64_t vlan_mask = 0;
5748a9643ea8Slogwang 
5749a9643ea8Slogwang 	const uint8_t pool_mask_offset = 32;
5750a9643ea8Slogwang 	const uint8_t vlan_mask_offset = 32;
5751a9643ea8Slogwang 	const uint8_t dst_pool_offset = 8;
5752a9643ea8Slogwang 	const uint8_t rule_mr_offset  = 4;
5753a9643ea8Slogwang 	const uint8_t mirror_rule_mask = 0x0F;
5754a9643ea8Slogwang 
5755a9643ea8Slogwang 	struct ixgbe_mirror_info *mr_info =
5756a9643ea8Slogwang 			(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5757a9643ea8Slogwang 	struct ixgbe_hw *hw =
5758a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5759a9643ea8Slogwang 	uint8_t mirror_type = 0;
5760a9643ea8Slogwang 
57612bfe3f2eSlogwang 	if (ixgbe_vt_check(hw) < 0)
5762a9643ea8Slogwang 		return -ENOTSUP;
5763a9643ea8Slogwang 
5764a9643ea8Slogwang 	if (rule_id >= IXGBE_MAX_MIRROR_RULES)
5765a9643ea8Slogwang 		return -EINVAL;
5766a9643ea8Slogwang 
5767a9643ea8Slogwang 	if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
5768a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
5769a9643ea8Slogwang 			    mirror_conf->rule_type);
5770a9643ea8Slogwang 		return -EINVAL;
5771a9643ea8Slogwang 	}
5772a9643ea8Slogwang 
5773a9643ea8Slogwang 	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5774a9643ea8Slogwang 		mirror_type |= IXGBE_MRCTL_VLME;
57752bfe3f2eSlogwang 		/* Check if vlan id is valid and find conresponding VLAN ID
57762bfe3f2eSlogwang 		 * index in VLVF
57772bfe3f2eSlogwang 		 */
5778a9643ea8Slogwang 		for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
5779a9643ea8Slogwang 			if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
57802bfe3f2eSlogwang 				/* search vlan id related pool vlan filter
57812bfe3f2eSlogwang 				 * index
57822bfe3f2eSlogwang 				 */
57832bfe3f2eSlogwang 				reg_index = ixgbe_find_vlvf_slot(
57842bfe3f2eSlogwang 						hw,
5785a9643ea8Slogwang 						mirror_conf->vlan.vlan_id[i],
5786a9643ea8Slogwang 						false);
5787a9643ea8Slogwang 				if (reg_index < 0)
5788a9643ea8Slogwang 					return -EINVAL;
57892bfe3f2eSlogwang 				vlvf = IXGBE_READ_REG(hw,
57902bfe3f2eSlogwang 						      IXGBE_VLVF(reg_index));
5791a9643ea8Slogwang 				if ((vlvf & IXGBE_VLVF_VIEN) &&
5792a9643ea8Slogwang 				    ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
5793a9643ea8Slogwang 				      mirror_conf->vlan.vlan_id[i]))
5794a9643ea8Slogwang 					vlan_mask |= (1ULL << reg_index);
5795a9643ea8Slogwang 				else
5796a9643ea8Slogwang 					return -EINVAL;
5797a9643ea8Slogwang 			}
5798a9643ea8Slogwang 		}
5799a9643ea8Slogwang 
5800a9643ea8Slogwang 		if (on) {
5801a9643ea8Slogwang 			mv_lsb = vlan_mask & 0xFFFFFFFF;
5802a9643ea8Slogwang 			mv_msb = vlan_mask >> vlan_mask_offset;
5803a9643ea8Slogwang 
5804a9643ea8Slogwang 			mr_info->mr_conf[rule_id].vlan.vlan_mask =
5805a9643ea8Slogwang 						mirror_conf->vlan.vlan_mask;
5806a9643ea8Slogwang 			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
5807a9643ea8Slogwang 				if (mirror_conf->vlan.vlan_mask & (1ULL << i))
5808a9643ea8Slogwang 					mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
5809a9643ea8Slogwang 						mirror_conf->vlan.vlan_id[i];
5810a9643ea8Slogwang 			}
5811a9643ea8Slogwang 		} else {
5812a9643ea8Slogwang 			mv_lsb = 0;
5813a9643ea8Slogwang 			mv_msb = 0;
5814a9643ea8Slogwang 			mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
5815a9643ea8Slogwang 			for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
5816a9643ea8Slogwang 				mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
5817a9643ea8Slogwang 		}
5818a9643ea8Slogwang 	}
5819a9643ea8Slogwang 
58202bfe3f2eSlogwang 	/**
5821a9643ea8Slogwang 	 * if enable pool mirror, write related pool mask register,if disable
5822a9643ea8Slogwang 	 * pool mirror, clear PFMRVM register
5823a9643ea8Slogwang 	 */
5824a9643ea8Slogwang 	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5825a9643ea8Slogwang 		mirror_type |= IXGBE_MRCTL_VPME;
5826a9643ea8Slogwang 		if (on) {
5827a9643ea8Slogwang 			mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
5828a9643ea8Slogwang 			mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
5829a9643ea8Slogwang 			mr_info->mr_conf[rule_id].pool_mask =
5830a9643ea8Slogwang 					mirror_conf->pool_mask;
5831a9643ea8Slogwang 
5832a9643ea8Slogwang 		} else {
5833a9643ea8Slogwang 			mp_lsb = 0;
5834a9643ea8Slogwang 			mp_msb = 0;
5835a9643ea8Slogwang 			mr_info->mr_conf[rule_id].pool_mask = 0;
5836a9643ea8Slogwang 		}
5837a9643ea8Slogwang 	}
5838a9643ea8Slogwang 	if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
5839a9643ea8Slogwang 		mirror_type |= IXGBE_MRCTL_UPME;
5840a9643ea8Slogwang 	if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
5841a9643ea8Slogwang 		mirror_type |= IXGBE_MRCTL_DPME;
5842a9643ea8Slogwang 
5843a9643ea8Slogwang 	/* read  mirror control register and recalculate it */
5844a9643ea8Slogwang 	mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
5845a9643ea8Slogwang 
5846a9643ea8Slogwang 	if (on) {
5847a9643ea8Slogwang 		mr_ctl |= mirror_type;
5848a9643ea8Slogwang 		mr_ctl &= mirror_rule_mask;
5849a9643ea8Slogwang 		mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
58502bfe3f2eSlogwang 	} else {
5851a9643ea8Slogwang 		mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
58522bfe3f2eSlogwang 	}
5853a9643ea8Slogwang 
5854a9643ea8Slogwang 	mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
5855a9643ea8Slogwang 	mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
5856a9643ea8Slogwang 
5857a9643ea8Slogwang 	/* write mirrror control  register */
5858a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5859a9643ea8Slogwang 
5860a9643ea8Slogwang 	/* write pool mirrror control  register */
58612bfe3f2eSlogwang 	if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
5862a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
5863a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
5864a9643ea8Slogwang 				mp_msb);
5865a9643ea8Slogwang 	}
5866a9643ea8Slogwang 	/* write VLAN mirrror control  register */
58672bfe3f2eSlogwang 	if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
5868a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
5869a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
5870a9643ea8Slogwang 				mv_msb);
5871a9643ea8Slogwang 	}
5872a9643ea8Slogwang 
5873a9643ea8Slogwang 	return 0;
5874a9643ea8Slogwang }
5875a9643ea8Slogwang 
5876a9643ea8Slogwang static int
ixgbe_mirror_rule_reset(struct rte_eth_dev * dev,uint8_t rule_id)5877a9643ea8Slogwang ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
5878a9643ea8Slogwang {
5879a9643ea8Slogwang 	int mr_ctl = 0;
5880a9643ea8Slogwang 	uint32_t lsb_val = 0;
5881a9643ea8Slogwang 	uint32_t msb_val = 0;
5882a9643ea8Slogwang 	const uint8_t rule_mr_offset = 4;
5883a9643ea8Slogwang 
5884a9643ea8Slogwang 	struct ixgbe_hw *hw =
5885a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5886a9643ea8Slogwang 	struct ixgbe_mirror_info *mr_info =
5887a9643ea8Slogwang 		(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
5888a9643ea8Slogwang 
58892bfe3f2eSlogwang 	if (ixgbe_vt_check(hw) < 0)
5890a9643ea8Slogwang 		return -ENOTSUP;
5891a9643ea8Slogwang 
58922bfe3f2eSlogwang 	if (rule_id >= IXGBE_MAX_MIRROR_RULES)
58932bfe3f2eSlogwang 		return -EINVAL;
58942bfe3f2eSlogwang 
5895a9643ea8Slogwang 	memset(&mr_info->mr_conf[rule_id], 0,
5896a9643ea8Slogwang 	       sizeof(struct rte_eth_mirror_conf));
5897a9643ea8Slogwang 
5898a9643ea8Slogwang 	/* clear PFVMCTL register */
5899a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
5900a9643ea8Slogwang 
5901a9643ea8Slogwang 	/* clear pool mask register */
5902a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
5903a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
5904a9643ea8Slogwang 
5905a9643ea8Slogwang 	/* clear vlan mask register */
5906a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
5907a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
5908a9643ea8Slogwang 
5909a9643ea8Slogwang 	return 0;
5910a9643ea8Slogwang }
5911a9643ea8Slogwang 
5912a9643ea8Slogwang static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)5913a9643ea8Slogwang ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5914a9643ea8Slogwang {
59152bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
59162bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5917d30ea906Sjfb8856606 	struct ixgbe_interrupt *intr =
5918d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5919a9643ea8Slogwang 	struct ixgbe_hw *hw =
5920a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
59212bfe3f2eSlogwang 	uint32_t vec = IXGBE_MISC_VEC_ID;
5922a9643ea8Slogwang 
59232bfe3f2eSlogwang 	if (rte_intr_allow_others(intr_handle))
59242bfe3f2eSlogwang 		vec = IXGBE_RX_VEC_START;
5925d30ea906Sjfb8856606 	intr->mask |= (1 << vec);
5926a9643ea8Slogwang 	RTE_SET_USED(queue_id);
5927d30ea906Sjfb8856606 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5928a9643ea8Slogwang 
59294418919fSjohnjiang 	rte_intr_ack(intr_handle);
5930a9643ea8Slogwang 
5931a9643ea8Slogwang 	return 0;
5932a9643ea8Slogwang }
5933a9643ea8Slogwang 
5934a9643ea8Slogwang static int
ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)5935a9643ea8Slogwang ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5936a9643ea8Slogwang {
5937d30ea906Sjfb8856606 	struct ixgbe_interrupt *intr =
5938d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5939a9643ea8Slogwang 	struct ixgbe_hw *hw =
5940a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
59412bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
59422bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
59432bfe3f2eSlogwang 	uint32_t vec = IXGBE_MISC_VEC_ID;
5944a9643ea8Slogwang 
59452bfe3f2eSlogwang 	if (rte_intr_allow_others(intr_handle))
59462bfe3f2eSlogwang 		vec = IXGBE_RX_VEC_START;
5947d30ea906Sjfb8856606 	intr->mask &= ~(1 << vec);
5948a9643ea8Slogwang 	RTE_SET_USED(queue_id);
5949d30ea906Sjfb8856606 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
5950a9643ea8Slogwang 
5951a9643ea8Slogwang 	return 0;
5952a9643ea8Slogwang }
5953a9643ea8Slogwang 
5954a9643ea8Slogwang static int
ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)5955a9643ea8Slogwang ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5956a9643ea8Slogwang {
59572bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
59582bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5959a9643ea8Slogwang 	uint32_t mask;
5960a9643ea8Slogwang 	struct ixgbe_hw *hw =
5961a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5962a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
5963a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5964a9643ea8Slogwang 
5965a9643ea8Slogwang 	if (queue_id < 16) {
5966a9643ea8Slogwang 		ixgbe_disable_intr(hw);
5967a9643ea8Slogwang 		intr->mask |= (1 << queue_id);
5968a9643ea8Slogwang 		ixgbe_enable_intr(dev);
5969a9643ea8Slogwang 	} else if (queue_id < 32) {
5970a9643ea8Slogwang 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5971a9643ea8Slogwang 		mask &= (1 << queue_id);
5972a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
5973a9643ea8Slogwang 	} else if (queue_id < 64) {
5974a9643ea8Slogwang 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
5975a9643ea8Slogwang 		mask &= (1 << (queue_id - 32));
5976a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
5977a9643ea8Slogwang 	}
59784418919fSjohnjiang 	rte_intr_ack(intr_handle);
5979a9643ea8Slogwang 
5980a9643ea8Slogwang 	return 0;
5981a9643ea8Slogwang }
5982a9643ea8Slogwang 
5983a9643ea8Slogwang static int
ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)5984a9643ea8Slogwang ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5985a9643ea8Slogwang {
5986a9643ea8Slogwang 	uint32_t mask;
5987a9643ea8Slogwang 	struct ixgbe_hw *hw =
5988a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5989a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
5990a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
5991a9643ea8Slogwang 
5992a9643ea8Slogwang 	if (queue_id < 16) {
5993a9643ea8Slogwang 		ixgbe_disable_intr(hw);
5994a9643ea8Slogwang 		intr->mask &= ~(1 << queue_id);
5995a9643ea8Slogwang 		ixgbe_enable_intr(dev);
5996a9643ea8Slogwang 	} else if (queue_id < 32) {
5997a9643ea8Slogwang 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
5998a9643ea8Slogwang 		mask &= ~(1 << queue_id);
5999a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
6000a9643ea8Slogwang 	} else if (queue_id < 64) {
6001a9643ea8Slogwang 		mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
6002a9643ea8Slogwang 		mask &= ~(1 << (queue_id - 32));
6003a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
6004a9643ea8Slogwang 	}
6005a9643ea8Slogwang 
6006a9643ea8Slogwang 	return 0;
6007a9643ea8Slogwang }
6008a9643ea8Slogwang 
6009a9643ea8Slogwang static void
ixgbevf_set_ivar_map(struct ixgbe_hw * hw,int8_t direction,uint8_t queue,uint8_t msix_vector)6010a9643ea8Slogwang ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
6011a9643ea8Slogwang 		     uint8_t queue, uint8_t msix_vector)
6012a9643ea8Slogwang {
6013a9643ea8Slogwang 	uint32_t tmp, idx;
6014a9643ea8Slogwang 
6015a9643ea8Slogwang 	if (direction == -1) {
6016a9643ea8Slogwang 		/* other causes */
6017a9643ea8Slogwang 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
6018a9643ea8Slogwang 		tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
6019a9643ea8Slogwang 		tmp &= ~0xFF;
6020a9643ea8Slogwang 		tmp |= msix_vector;
6021a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
6022a9643ea8Slogwang 	} else {
6023a9643ea8Slogwang 		/* rx or tx cause */
6024a9643ea8Slogwang 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
6025a9643ea8Slogwang 		idx = ((16 * (queue & 1)) + (8 * direction));
6026a9643ea8Slogwang 		tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
6027a9643ea8Slogwang 		tmp &= ~(0xFF << idx);
6028a9643ea8Slogwang 		tmp |= (msix_vector << idx);
6029a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
6030a9643ea8Slogwang 	}
6031a9643ea8Slogwang }
6032a9643ea8Slogwang 
6033a9643ea8Slogwang /**
6034a9643ea8Slogwang  * set the IVAR registers, mapping interrupt causes to vectors
6035a9643ea8Slogwang  * @param hw
6036a9643ea8Slogwang  *  pointer to ixgbe_hw struct
6037a9643ea8Slogwang  * @direction
6038a9643ea8Slogwang  *  0 for Rx, 1 for Tx, -1 for other causes
6039a9643ea8Slogwang  * @queue
6040a9643ea8Slogwang  *  queue to map the corresponding interrupt to
6041a9643ea8Slogwang  * @msix_vector
6042a9643ea8Slogwang  *  the vector to map to the corresponding queue
6043a9643ea8Slogwang  */
6044a9643ea8Slogwang static void
ixgbe_set_ivar_map(struct ixgbe_hw * hw,int8_t direction,uint8_t queue,uint8_t msix_vector)6045a9643ea8Slogwang ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
6046a9643ea8Slogwang 		   uint8_t queue, uint8_t msix_vector)
6047a9643ea8Slogwang {
6048a9643ea8Slogwang 	uint32_t tmp, idx;
6049a9643ea8Slogwang 
6050a9643ea8Slogwang 	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
6051a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB) {
6052a9643ea8Slogwang 		if (direction == -1)
6053a9643ea8Slogwang 			direction = 0;
6054a9643ea8Slogwang 		idx = (((direction * 64) + queue) >> 2) & 0x1F;
6055a9643ea8Slogwang 		tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
6056a9643ea8Slogwang 		tmp &= ~(0xFF << (8 * (queue & 0x3)));
6057a9643ea8Slogwang 		tmp |= (msix_vector << (8 * (queue & 0x3)));
6058a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
6059a9643ea8Slogwang 	} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
60602bfe3f2eSlogwang 			(hw->mac.type == ixgbe_mac_X540) ||
60614418919fSjohnjiang 			(hw->mac.type == ixgbe_mac_X550) ||
60624418919fSjohnjiang 			(hw->mac.type == ixgbe_mac_X550EM_x)) {
6063a9643ea8Slogwang 		if (direction == -1) {
6064a9643ea8Slogwang 			/* other causes */
6065a9643ea8Slogwang 			idx = ((queue & 1) * 8);
6066a9643ea8Slogwang 			tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
6067a9643ea8Slogwang 			tmp &= ~(0xFF << idx);
6068a9643ea8Slogwang 			tmp |= (msix_vector << idx);
6069a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
6070a9643ea8Slogwang 		} else {
6071a9643ea8Slogwang 			/* rx or tx causes */
6072a9643ea8Slogwang 			idx = ((16 * (queue & 1)) + (8 * direction));
6073a9643ea8Slogwang 			tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
6074a9643ea8Slogwang 			tmp &= ~(0xFF << idx);
6075a9643ea8Slogwang 			tmp |= (msix_vector << idx);
6076a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
6077a9643ea8Slogwang 		}
6078a9643ea8Slogwang 	}
6079a9643ea8Slogwang }
6080a9643ea8Slogwang 
6081a9643ea8Slogwang static void
ixgbevf_configure_msix(struct rte_eth_dev * dev)6082a9643ea8Slogwang ixgbevf_configure_msix(struct rte_eth_dev *dev)
6083a9643ea8Slogwang {
60842bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
60852bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6086a9643ea8Slogwang 	struct ixgbe_hw *hw =
6087a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6088a9643ea8Slogwang 	uint32_t q_idx;
6089a9643ea8Slogwang 	uint32_t vector_idx = IXGBE_MISC_VEC_ID;
60902bfe3f2eSlogwang 	uint32_t base = IXGBE_MISC_VEC_ID;
6091a9643ea8Slogwang 
6092a9643ea8Slogwang 	/* Configure VF other cause ivar */
6093a9643ea8Slogwang 	ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
6094a9643ea8Slogwang 
6095a9643ea8Slogwang 	/* won't configure msix register if no mapping is done
6096a9643ea8Slogwang 	 * between intr vector and event fd.
6097a9643ea8Slogwang 	 */
6098a9643ea8Slogwang 	if (!rte_intr_dp_is_en(intr_handle))
6099a9643ea8Slogwang 		return;
6100a9643ea8Slogwang 
61012bfe3f2eSlogwang 	if (rte_intr_allow_others(intr_handle)) {
61022bfe3f2eSlogwang 		base = IXGBE_RX_VEC_START;
61032bfe3f2eSlogwang 		vector_idx = IXGBE_RX_VEC_START;
61042bfe3f2eSlogwang 	}
61052bfe3f2eSlogwang 
6106a9643ea8Slogwang 	/* Configure all RX queues of VF */
6107a9643ea8Slogwang 	for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
6108a9643ea8Slogwang 		/* Force all queue use vector 0,
6109a9643ea8Slogwang 		 * as IXGBE_VF_MAXMSIVECOTR = 1
6110a9643ea8Slogwang 		 */
6111a9643ea8Slogwang 		ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
6112a9643ea8Slogwang 		intr_handle->intr_vec[q_idx] = vector_idx;
61132bfe3f2eSlogwang 		if (vector_idx < base + intr_handle->nb_efd - 1)
61142bfe3f2eSlogwang 			vector_idx++;
6115a9643ea8Slogwang 	}
6116d30ea906Sjfb8856606 
6117d30ea906Sjfb8856606 	/* As RX queue setting above show, all queues use the vector 0.
6118d30ea906Sjfb8856606 	 * Set only the ITR value of IXGBE_MISC_VEC_ID.
6119d30ea906Sjfb8856606 	 */
6120d30ea906Sjfb8856606 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
6121d30ea906Sjfb8856606 			IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6122d30ea906Sjfb8856606 			| IXGBE_EITR_CNT_WDIS);
6123a9643ea8Slogwang }
6124a9643ea8Slogwang 
6125a9643ea8Slogwang /**
6126a9643ea8Slogwang  * Sets up the hardware to properly generate MSI-X interrupts
6127a9643ea8Slogwang  * @hw
6128a9643ea8Slogwang  *  board private structure
6129a9643ea8Slogwang  */
6130a9643ea8Slogwang static void
ixgbe_configure_msix(struct rte_eth_dev * dev)6131a9643ea8Slogwang ixgbe_configure_msix(struct rte_eth_dev *dev)
6132a9643ea8Slogwang {
61332bfe3f2eSlogwang 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
61342bfe3f2eSlogwang 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
6135a9643ea8Slogwang 	struct ixgbe_hw *hw =
6136a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6137a9643ea8Slogwang 	uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
6138a9643ea8Slogwang 	uint32_t vec = IXGBE_MISC_VEC_ID;
6139a9643ea8Slogwang 	uint32_t mask;
6140a9643ea8Slogwang 	uint32_t gpie;
6141a9643ea8Slogwang 
6142a9643ea8Slogwang 	/* won't configure msix register if no mapping is done
6143a9643ea8Slogwang 	 * between intr vector and event fd
6144579bf1e2Sjfb8856606 	 * but if misx has been enabled already, need to configure
6145579bf1e2Sjfb8856606 	 * auto clean, auto mask and throttling.
6146a9643ea8Slogwang 	 */
6147579bf1e2Sjfb8856606 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6148579bf1e2Sjfb8856606 	if (!rte_intr_dp_is_en(intr_handle) &&
6149579bf1e2Sjfb8856606 	    !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
6150a9643ea8Slogwang 		return;
6151a9643ea8Slogwang 
6152a9643ea8Slogwang 	if (rte_intr_allow_others(intr_handle))
6153a9643ea8Slogwang 		vec = base = IXGBE_RX_VEC_START;
6154a9643ea8Slogwang 
6155a9643ea8Slogwang 	/* setup GPIE for MSI-x mode */
6156a9643ea8Slogwang 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
6157a9643ea8Slogwang 	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
6158a9643ea8Slogwang 		IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
6159a9643ea8Slogwang 	/* auto clearing and auto setting corresponding bits in EIMS
6160a9643ea8Slogwang 	 * when MSI-X interrupt is triggered
6161a9643ea8Slogwang 	 */
6162a9643ea8Slogwang 	if (hw->mac.type == ixgbe_mac_82598EB) {
6163a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
6164a9643ea8Slogwang 	} else {
6165a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
6166a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
6167a9643ea8Slogwang 	}
6168a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
6169a9643ea8Slogwang 
6170a9643ea8Slogwang 	/* Populate the IVAR table and set the ITR values to the
6171a9643ea8Slogwang 	 * corresponding register.
6172a9643ea8Slogwang 	 */
6173579bf1e2Sjfb8856606 	if (rte_intr_dp_is_en(intr_handle)) {
6174a9643ea8Slogwang 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
6175a9643ea8Slogwang 			queue_id++) {
6176a9643ea8Slogwang 			/* by default, 1:1 mapping */
6177a9643ea8Slogwang 			ixgbe_set_ivar_map(hw, 0, queue_id, vec);
6178a9643ea8Slogwang 			intr_handle->intr_vec[queue_id] = vec;
6179a9643ea8Slogwang 			if (vec < base + intr_handle->nb_efd - 1)
6180a9643ea8Slogwang 				vec++;
6181a9643ea8Slogwang 		}
6182a9643ea8Slogwang 
6183a9643ea8Slogwang 		switch (hw->mac.type) {
6184a9643ea8Slogwang 		case ixgbe_mac_82598EB:
6185579bf1e2Sjfb8856606 			ixgbe_set_ivar_map(hw, -1,
6186579bf1e2Sjfb8856606 					   IXGBE_IVAR_OTHER_CAUSES_INDEX,
6187a9643ea8Slogwang 					   IXGBE_MISC_VEC_ID);
6188a9643ea8Slogwang 			break;
6189a9643ea8Slogwang 		case ixgbe_mac_82599EB:
6190a9643ea8Slogwang 		case ixgbe_mac_X540:
61912bfe3f2eSlogwang 		case ixgbe_mac_X550:
61924418919fSjohnjiang 		case ixgbe_mac_X550EM_x:
6193a9643ea8Slogwang 			ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
6194a9643ea8Slogwang 			break;
6195a9643ea8Slogwang 		default:
6196a9643ea8Slogwang 			break;
6197a9643ea8Slogwang 		}
6198579bf1e2Sjfb8856606 	}
6199a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
6200d30ea906Sjfb8856606 			IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
6201d30ea906Sjfb8856606 			| IXGBE_EITR_CNT_WDIS);
6202a9643ea8Slogwang 
6203a9643ea8Slogwang 	/* set up to autoclear timer, and the vectors */
6204a9643ea8Slogwang 	mask = IXGBE_EIMS_ENABLE_MASK;
6205a9643ea8Slogwang 	mask &= ~(IXGBE_EIMS_OTHER |
6206a9643ea8Slogwang 		  IXGBE_EIMS_MAILBOX |
6207a9643ea8Slogwang 		  IXGBE_EIMS_LSC);
6208a9643ea8Slogwang 
6209a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
6210a9643ea8Slogwang }
6211a9643ea8Slogwang 
62122bfe3f2eSlogwang int
ixgbe_set_queue_rate_limit(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t tx_rate)62132bfe3f2eSlogwang ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
6214a9643ea8Slogwang 			   uint16_t queue_idx, uint16_t tx_rate)
6215a9643ea8Slogwang {
6216a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6217d30ea906Sjfb8856606 	struct rte_eth_rxmode *rxmode;
6218a9643ea8Slogwang 	uint32_t rf_dec, rf_int;
6219a9643ea8Slogwang 	uint32_t bcnrc_val;
6220a9643ea8Slogwang 	uint16_t link_speed = dev->data->dev_link.link_speed;
6221a9643ea8Slogwang 
6222a9643ea8Slogwang 	if (queue_idx >= hw->mac.max_tx_queues)
6223a9643ea8Slogwang 		return -EINVAL;
6224a9643ea8Slogwang 
6225a9643ea8Slogwang 	if (tx_rate != 0) {
6226a9643ea8Slogwang 		/* Calculate the rate factor values to set */
6227a9643ea8Slogwang 		rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
6228a9643ea8Slogwang 		rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
6229a9643ea8Slogwang 		rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
6230a9643ea8Slogwang 
6231a9643ea8Slogwang 		bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
6232a9643ea8Slogwang 		bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
6233a9643ea8Slogwang 				IXGBE_RTTBCNRC_RF_INT_MASK_M);
6234a9643ea8Slogwang 		bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
6235a9643ea8Slogwang 	} else {
6236a9643ea8Slogwang 		bcnrc_val = 0;
6237a9643ea8Slogwang 	}
6238a9643ea8Slogwang 
6239d30ea906Sjfb8856606 	rxmode = &dev->data->dev_conf.rxmode;
6240a9643ea8Slogwang 	/*
6241a9643ea8Slogwang 	 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6242a9643ea8Slogwang 	 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
6243a9643ea8Slogwang 	 * set as 0x4.
6244a9643ea8Slogwang 	 */
6245d30ea906Sjfb8856606 	if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
6246d30ea906Sjfb8856606 	    (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
6247a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6248a9643ea8Slogwang 			IXGBE_MMW_SIZE_JUMBO_FRAME);
6249a9643ea8Slogwang 	else
6250a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
6251a9643ea8Slogwang 			IXGBE_MMW_SIZE_DEFAULT);
6252a9643ea8Slogwang 
6253a9643ea8Slogwang 	/* Set RTTBCNRC of queue X */
6254a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
6255a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
6256a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
6257a9643ea8Slogwang 
6258a9643ea8Slogwang 	return 0;
6259a9643ea8Slogwang }
6260a9643ea8Slogwang 
62612bfe3f2eSlogwang static int
ixgbevf_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,__rte_unused uint32_t index,__rte_unused uint32_t pool)62624418919fSjohnjiang ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
6263*2d9fd380Sjfb8856606 		     __rte_unused uint32_t index,
6264*2d9fd380Sjfb8856606 		     __rte_unused uint32_t pool)
6265a9643ea8Slogwang {
6266a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6267a9643ea8Slogwang 	int diag;
6268a9643ea8Slogwang 
6269a9643ea8Slogwang 	/*
6270a9643ea8Slogwang 	 * On a 82599 VF, adding again the same MAC addr is not an idempotent
6271a9643ea8Slogwang 	 * operation. Trap this case to avoid exhausting the [very limited]
6272a9643ea8Slogwang 	 * set of PF resources used to store VF MAC addresses.
6273a9643ea8Slogwang 	 */
62744418919fSjohnjiang 	if (memcmp(hw->mac.perm_addr, mac_addr,
62754418919fSjohnjiang 			sizeof(struct rte_ether_addr)) == 0)
62762bfe3f2eSlogwang 		return -1;
6277a9643ea8Slogwang 	diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
62782bfe3f2eSlogwang 	if (diag != 0)
62792bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "Unable to add MAC address "
62802bfe3f2eSlogwang 			    "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
62812bfe3f2eSlogwang 			    mac_addr->addr_bytes[0],
62822bfe3f2eSlogwang 			    mac_addr->addr_bytes[1],
62832bfe3f2eSlogwang 			    mac_addr->addr_bytes[2],
62842bfe3f2eSlogwang 			    mac_addr->addr_bytes[3],
62852bfe3f2eSlogwang 			    mac_addr->addr_bytes[4],
62862bfe3f2eSlogwang 			    mac_addr->addr_bytes[5],
62872bfe3f2eSlogwang 			    diag);
62882bfe3f2eSlogwang 	return diag;
6289a9643ea8Slogwang }
6290a9643ea8Slogwang 
6291a9643ea8Slogwang static void
ixgbevf_remove_mac_addr(struct rte_eth_dev * dev,uint32_t index)6292a9643ea8Slogwang ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
6293a9643ea8Slogwang {
6294a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
62954418919fSjohnjiang 	struct rte_ether_addr *perm_addr =
62964418919fSjohnjiang 		(struct rte_ether_addr *)hw->mac.perm_addr;
62974418919fSjohnjiang 	struct rte_ether_addr *mac_addr;
6298a9643ea8Slogwang 	uint32_t i;
6299a9643ea8Slogwang 	int diag;
6300a9643ea8Slogwang 
6301a9643ea8Slogwang 	/*
6302a9643ea8Slogwang 	 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
6303a9643ea8Slogwang 	 * not support the deletion of a given MAC address.
6304a9643ea8Slogwang 	 * Instead, it imposes to delete all MAC addresses, then to add again
6305a9643ea8Slogwang 	 * all MAC addresses with the exception of the one to be deleted.
6306a9643ea8Slogwang 	 */
6307a9643ea8Slogwang 	(void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
6308a9643ea8Slogwang 
6309a9643ea8Slogwang 	/*
6310a9643ea8Slogwang 	 * Add again all MAC addresses, with the exception of the deleted one
6311a9643ea8Slogwang 	 * and of the permanent MAC address.
6312a9643ea8Slogwang 	 */
6313a9643ea8Slogwang 	for (i = 0, mac_addr = dev->data->mac_addrs;
6314a9643ea8Slogwang 	     i < hw->mac.num_rar_entries; i++, mac_addr++) {
6315a9643ea8Slogwang 		/* Skip the deleted MAC address */
6316a9643ea8Slogwang 		if (i == index)
6317a9643ea8Slogwang 			continue;
6318a9643ea8Slogwang 		/* Skip NULL MAC addresses */
63194418919fSjohnjiang 		if (rte_is_zero_ether_addr(mac_addr))
6320a9643ea8Slogwang 			continue;
6321a9643ea8Slogwang 		/* Skip the permanent MAC address */
63224418919fSjohnjiang 		if (memcmp(perm_addr, mac_addr,
63234418919fSjohnjiang 				sizeof(struct rte_ether_addr)) == 0)
6324a9643ea8Slogwang 			continue;
6325a9643ea8Slogwang 		diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
6326a9643ea8Slogwang 		if (diag != 0)
6327a9643ea8Slogwang 			PMD_DRV_LOG(ERR,
6328a9643ea8Slogwang 				    "Adding again MAC address "
6329a9643ea8Slogwang 				    "%02x:%02x:%02x:%02x:%02x:%02x failed "
6330a9643ea8Slogwang 				    "diag=%d",
6331a9643ea8Slogwang 				    mac_addr->addr_bytes[0],
6332a9643ea8Slogwang 				    mac_addr->addr_bytes[1],
6333a9643ea8Slogwang 				    mac_addr->addr_bytes[2],
6334a9643ea8Slogwang 				    mac_addr->addr_bytes[3],
6335a9643ea8Slogwang 				    mac_addr->addr_bytes[4],
6336a9643ea8Slogwang 				    mac_addr->addr_bytes[5],
6337a9643ea8Slogwang 				    diag);
6338a9643ea8Slogwang 	}
6339a9643ea8Slogwang }
6340a9643ea8Slogwang 
6341d30ea906Sjfb8856606 static int
ixgbevf_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)63424418919fSjohnjiang ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
63434418919fSjohnjiang 			struct rte_ether_addr *addr)
6344a9643ea8Slogwang {
6345a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6346a9643ea8Slogwang 
6347a9643ea8Slogwang 	hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
6348d30ea906Sjfb8856606 
6349d30ea906Sjfb8856606 	return 0;
6350a9643ea8Slogwang }
6351a9643ea8Slogwang 
63522bfe3f2eSlogwang int
ixgbe_syn_filter_set(struct rte_eth_dev * dev,struct rte_eth_syn_filter * filter,bool add)6353a9643ea8Slogwang ixgbe_syn_filter_set(struct rte_eth_dev *dev,
6354a9643ea8Slogwang 			struct rte_eth_syn_filter *filter,
6355a9643ea8Slogwang 			bool add)
6356a9643ea8Slogwang {
6357a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
63582bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
63592bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
63602bfe3f2eSlogwang 	uint32_t syn_info;
6361a9643ea8Slogwang 	uint32_t synqf;
6362a9643ea8Slogwang 
6363a9643ea8Slogwang 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6364a9643ea8Slogwang 		return -EINVAL;
6365a9643ea8Slogwang 
63662bfe3f2eSlogwang 	syn_info = filter_info->syn_info;
6367a9643ea8Slogwang 
6368a9643ea8Slogwang 	if (add) {
63692bfe3f2eSlogwang 		if (syn_info & IXGBE_SYN_FILTER_ENABLE)
6370a9643ea8Slogwang 			return -EINVAL;
6371a9643ea8Slogwang 		synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
6372a9643ea8Slogwang 			IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
6373a9643ea8Slogwang 
6374a9643ea8Slogwang 		if (filter->hig_pri)
6375a9643ea8Slogwang 			synqf |= IXGBE_SYN_FILTER_SYNQFP;
6376a9643ea8Slogwang 		else
6377a9643ea8Slogwang 			synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
6378a9643ea8Slogwang 	} else {
63792bfe3f2eSlogwang 		synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
63802bfe3f2eSlogwang 		if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
6381a9643ea8Slogwang 			return -ENOENT;
6382a9643ea8Slogwang 		synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
6383a9643ea8Slogwang 	}
63842bfe3f2eSlogwang 
63852bfe3f2eSlogwang 	filter_info->syn_info = synqf;
6386a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
6387a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
6388a9643ea8Slogwang 	return 0;
6389a9643ea8Slogwang }
6390a9643ea8Slogwang 
6391a9643ea8Slogwang 
6392a9643ea8Slogwang static inline enum ixgbe_5tuple_protocol
convert_protocol_type(uint8_t protocol_value)6393a9643ea8Slogwang convert_protocol_type(uint8_t protocol_value)
6394a9643ea8Slogwang {
6395a9643ea8Slogwang 	if (protocol_value == IPPROTO_TCP)
6396a9643ea8Slogwang 		return IXGBE_FILTER_PROTOCOL_TCP;
6397a9643ea8Slogwang 	else if (protocol_value == IPPROTO_UDP)
6398a9643ea8Slogwang 		return IXGBE_FILTER_PROTOCOL_UDP;
6399a9643ea8Slogwang 	else if (protocol_value == IPPROTO_SCTP)
6400a9643ea8Slogwang 		return IXGBE_FILTER_PROTOCOL_SCTP;
6401a9643ea8Slogwang 	else
6402a9643ea8Slogwang 		return IXGBE_FILTER_PROTOCOL_NONE;
6403a9643ea8Slogwang }
6404a9643ea8Slogwang 
64052bfe3f2eSlogwang /* inject a 5-tuple filter to HW */
64062bfe3f2eSlogwang static inline void
ixgbe_inject_5tuple_filter(struct rte_eth_dev * dev,struct ixgbe_5tuple_filter * filter)64072bfe3f2eSlogwang ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
6408a9643ea8Slogwang 			   struct ixgbe_5tuple_filter *filter)
6409a9643ea8Slogwang {
6410a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
64112bfe3f2eSlogwang 	int i;
6412a9643ea8Slogwang 	uint32_t ftqf, sdpqf;
6413a9643ea8Slogwang 	uint32_t l34timir = 0;
6414a9643ea8Slogwang 	uint8_t mask = 0xff;
6415a9643ea8Slogwang 
64162bfe3f2eSlogwang 	i = filter->index;
6417a9643ea8Slogwang 
6418a9643ea8Slogwang 	sdpqf = (uint32_t)(filter->filter_info.dst_port <<
6419a9643ea8Slogwang 				IXGBE_SDPQF_DSTPORT_SHIFT);
6420a9643ea8Slogwang 	sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
6421a9643ea8Slogwang 
6422a9643ea8Slogwang 	ftqf = (uint32_t)(filter->filter_info.proto &
6423a9643ea8Slogwang 		IXGBE_FTQF_PROTOCOL_MASK);
6424a9643ea8Slogwang 	ftqf |= (uint32_t)((filter->filter_info.priority &
6425a9643ea8Slogwang 		IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
6426a9643ea8Slogwang 	if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
6427a9643ea8Slogwang 		mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
6428a9643ea8Slogwang 	if (filter->filter_info.dst_ip_mask == 0)
6429a9643ea8Slogwang 		mask &= IXGBE_FTQF_DEST_ADDR_MASK;
6430a9643ea8Slogwang 	if (filter->filter_info.src_port_mask == 0)
6431a9643ea8Slogwang 		mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
6432a9643ea8Slogwang 	if (filter->filter_info.dst_port_mask == 0)
6433a9643ea8Slogwang 		mask &= IXGBE_FTQF_DEST_PORT_MASK;
6434a9643ea8Slogwang 	if (filter->filter_info.proto_mask == 0)
6435a9643ea8Slogwang 		mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
6436a9643ea8Slogwang 	ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
6437a9643ea8Slogwang 	ftqf |= IXGBE_FTQF_POOL_MASK_EN;
6438a9643ea8Slogwang 	ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
6439a9643ea8Slogwang 
6440a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
6441a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
6442a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
6443a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
6444a9643ea8Slogwang 
6445a9643ea8Slogwang 	l34timir |= IXGBE_L34T_IMIR_RESERVE;
6446a9643ea8Slogwang 	l34timir |= (uint32_t)(filter->queue <<
6447a9643ea8Slogwang 				IXGBE_L34T_IMIR_QUEUE_SHIFT);
6448a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
64492bfe3f2eSlogwang }
64502bfe3f2eSlogwang 
64512bfe3f2eSlogwang /*
64522bfe3f2eSlogwang  * add a 5tuple filter
64532bfe3f2eSlogwang  *
64542bfe3f2eSlogwang  * @param
64552bfe3f2eSlogwang  * dev: Pointer to struct rte_eth_dev.
64562bfe3f2eSlogwang  * index: the index the filter allocates.
64572bfe3f2eSlogwang  * filter: ponter to the filter that will be added.
64582bfe3f2eSlogwang  * rx_queue: the queue id the filter assigned to.
64592bfe3f2eSlogwang  *
64602bfe3f2eSlogwang  * @return
64612bfe3f2eSlogwang  *    - On success, zero.
64622bfe3f2eSlogwang  *    - On failure, a negative value.
64632bfe3f2eSlogwang  */
64642bfe3f2eSlogwang static int
ixgbe_add_5tuple_filter(struct rte_eth_dev * dev,struct ixgbe_5tuple_filter * filter)64652bfe3f2eSlogwang ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
64662bfe3f2eSlogwang 			struct ixgbe_5tuple_filter *filter)
64672bfe3f2eSlogwang {
64682bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
64692bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
64702bfe3f2eSlogwang 	int i, idx, shift;
64712bfe3f2eSlogwang 
64722bfe3f2eSlogwang 	/*
64732bfe3f2eSlogwang 	 * look for an unused 5tuple filter index,
64742bfe3f2eSlogwang 	 * and insert the filter to list.
64752bfe3f2eSlogwang 	 */
64762bfe3f2eSlogwang 	for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
64772bfe3f2eSlogwang 		idx = i / (sizeof(uint32_t) * NBBY);
64782bfe3f2eSlogwang 		shift = i % (sizeof(uint32_t) * NBBY);
64792bfe3f2eSlogwang 		if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
64802bfe3f2eSlogwang 			filter_info->fivetuple_mask[idx] |= 1 << shift;
64812bfe3f2eSlogwang 			filter->index = i;
64822bfe3f2eSlogwang 			TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
64832bfe3f2eSlogwang 					  filter,
64842bfe3f2eSlogwang 					  entries);
64852bfe3f2eSlogwang 			break;
64862bfe3f2eSlogwang 		}
64872bfe3f2eSlogwang 	}
64882bfe3f2eSlogwang 	if (i >= IXGBE_MAX_FTQF_FILTERS) {
64892bfe3f2eSlogwang 		PMD_DRV_LOG(ERR, "5tuple filters are full.");
64902bfe3f2eSlogwang 		return -ENOSYS;
64912bfe3f2eSlogwang 	}
64922bfe3f2eSlogwang 
64932bfe3f2eSlogwang 	ixgbe_inject_5tuple_filter(dev, filter);
64942bfe3f2eSlogwang 
6495a9643ea8Slogwang 	return 0;
6496a9643ea8Slogwang }
6497a9643ea8Slogwang 
6498a9643ea8Slogwang /*
6499a9643ea8Slogwang  * remove a 5tuple filter
6500a9643ea8Slogwang  *
6501a9643ea8Slogwang  * @param
6502a9643ea8Slogwang  * dev: Pointer to struct rte_eth_dev.
6503a9643ea8Slogwang  * filter: the pointer of the filter will be removed.
6504a9643ea8Slogwang  */
6505a9643ea8Slogwang static void
ixgbe_remove_5tuple_filter(struct rte_eth_dev * dev,struct ixgbe_5tuple_filter * filter)6506a9643ea8Slogwang ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
6507a9643ea8Slogwang 			struct ixgbe_5tuple_filter *filter)
6508a9643ea8Slogwang {
6509a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6510a9643ea8Slogwang 	struct ixgbe_filter_info *filter_info =
6511a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6512a9643ea8Slogwang 	uint16_t index = filter->index;
6513a9643ea8Slogwang 
6514a9643ea8Slogwang 	filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
6515a9643ea8Slogwang 				~(1 << (index % (sizeof(uint32_t) * NBBY)));
6516a9643ea8Slogwang 	TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
6517a9643ea8Slogwang 	rte_free(filter);
6518a9643ea8Slogwang 
6519a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
6520a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
6521a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
6522a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
6523a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
6524a9643ea8Slogwang }
6525a9643ea8Slogwang 
6526a9643ea8Slogwang static int
ixgbevf_dev_set_mtu(struct rte_eth_dev * dev,uint16_t mtu)6527a9643ea8Slogwang ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
6528a9643ea8Slogwang {
6529a9643ea8Slogwang 	struct ixgbe_hw *hw;
65304418919fSjohnjiang 	uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD;
65314418919fSjohnjiang 	struct rte_eth_dev_data *dev_data = dev->data;
6532a9643ea8Slogwang 
6533a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6534a9643ea8Slogwang 
65354418919fSjohnjiang 	if (mtu < RTE_ETHER_MIN_MTU ||
65364418919fSjohnjiang 			max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
6537a9643ea8Slogwang 		return -EINVAL;
6538a9643ea8Slogwang 
65394418919fSjohnjiang 	/* If device is started, refuse mtu that requires the support of
65404418919fSjohnjiang 	 * scattered packets when this feature has not been enabled before.
6541a9643ea8Slogwang 	 */
65424418919fSjohnjiang 	if (dev_data->dev_started && !dev_data->scattered_rx &&
6543a9643ea8Slogwang 	    (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
65444418919fSjohnjiang 	     dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
65454418919fSjohnjiang 		PMD_INIT_LOG(ERR, "Stop port first.");
6546a9643ea8Slogwang 		return -EINVAL;
65474418919fSjohnjiang 	}
6548a9643ea8Slogwang 
6549a9643ea8Slogwang 	/*
6550a9643ea8Slogwang 	 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6551a9643ea8Slogwang 	 * request of the version 2.0 of the mailbox API.
6552a9643ea8Slogwang 	 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6553a9643ea8Slogwang 	 * of the mailbox API.
6554a9643ea8Slogwang 	 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6555a9643ea8Slogwang 	 * prior to 3.11.33 which contains the following change:
6556a9643ea8Slogwang 	 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6557a9643ea8Slogwang 	 */
6558a9643ea8Slogwang 	ixgbevf_rlpml_set_vf(hw, max_frame);
6559a9643ea8Slogwang 
6560a9643ea8Slogwang 	/* update max frame size */
6561a9643ea8Slogwang 	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
6562a9643ea8Slogwang 	return 0;
6563a9643ea8Slogwang }
6564a9643ea8Slogwang 
6565a9643ea8Slogwang static inline struct ixgbe_5tuple_filter *
ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list * filter_list,struct ixgbe_5tuple_filter_info * key)6566a9643ea8Slogwang ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
6567a9643ea8Slogwang 			struct ixgbe_5tuple_filter_info *key)
6568a9643ea8Slogwang {
6569a9643ea8Slogwang 	struct ixgbe_5tuple_filter *it;
6570a9643ea8Slogwang 
6571a9643ea8Slogwang 	TAILQ_FOREACH(it, filter_list, entries) {
6572a9643ea8Slogwang 		if (memcmp(key, &it->filter_info,
6573a9643ea8Slogwang 			sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
6574a9643ea8Slogwang 			return it;
6575a9643ea8Slogwang 		}
6576a9643ea8Slogwang 	}
6577a9643ea8Slogwang 	return NULL;
6578a9643ea8Slogwang }
6579a9643ea8Slogwang 
6580a9643ea8Slogwang /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6581a9643ea8Slogwang static inline int
ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter * filter,struct ixgbe_5tuple_filter_info * filter_info)6582a9643ea8Slogwang ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
6583a9643ea8Slogwang 			struct ixgbe_5tuple_filter_info *filter_info)
6584a9643ea8Slogwang {
6585a9643ea8Slogwang 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
6586a9643ea8Slogwang 		filter->priority > IXGBE_5TUPLE_MAX_PRI ||
6587a9643ea8Slogwang 		filter->priority < IXGBE_5TUPLE_MIN_PRI)
6588a9643ea8Slogwang 		return -EINVAL;
6589a9643ea8Slogwang 
6590a9643ea8Slogwang 	switch (filter->dst_ip_mask) {
6591a9643ea8Slogwang 	case UINT32_MAX:
6592a9643ea8Slogwang 		filter_info->dst_ip_mask = 0;
6593a9643ea8Slogwang 		filter_info->dst_ip = filter->dst_ip;
6594a9643ea8Slogwang 		break;
6595a9643ea8Slogwang 	case 0:
6596a9643ea8Slogwang 		filter_info->dst_ip_mask = 1;
6597a9643ea8Slogwang 		break;
6598a9643ea8Slogwang 	default:
6599a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
6600a9643ea8Slogwang 		return -EINVAL;
6601a9643ea8Slogwang 	}
6602a9643ea8Slogwang 
6603a9643ea8Slogwang 	switch (filter->src_ip_mask) {
6604a9643ea8Slogwang 	case UINT32_MAX:
6605a9643ea8Slogwang 		filter_info->src_ip_mask = 0;
6606a9643ea8Slogwang 		filter_info->src_ip = filter->src_ip;
6607a9643ea8Slogwang 		break;
6608a9643ea8Slogwang 	case 0:
6609a9643ea8Slogwang 		filter_info->src_ip_mask = 1;
6610a9643ea8Slogwang 		break;
6611a9643ea8Slogwang 	default:
6612a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid src_ip mask.");
6613a9643ea8Slogwang 		return -EINVAL;
6614a9643ea8Slogwang 	}
6615a9643ea8Slogwang 
6616a9643ea8Slogwang 	switch (filter->dst_port_mask) {
6617a9643ea8Slogwang 	case UINT16_MAX:
6618a9643ea8Slogwang 		filter_info->dst_port_mask = 0;
6619a9643ea8Slogwang 		filter_info->dst_port = filter->dst_port;
6620a9643ea8Slogwang 		break;
6621a9643ea8Slogwang 	case 0:
6622a9643ea8Slogwang 		filter_info->dst_port_mask = 1;
6623a9643ea8Slogwang 		break;
6624a9643ea8Slogwang 	default:
6625a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid dst_port mask.");
6626a9643ea8Slogwang 		return -EINVAL;
6627a9643ea8Slogwang 	}
6628a9643ea8Slogwang 
6629a9643ea8Slogwang 	switch (filter->src_port_mask) {
6630a9643ea8Slogwang 	case UINT16_MAX:
6631a9643ea8Slogwang 		filter_info->src_port_mask = 0;
6632a9643ea8Slogwang 		filter_info->src_port = filter->src_port;
6633a9643ea8Slogwang 		break;
6634a9643ea8Slogwang 	case 0:
6635a9643ea8Slogwang 		filter_info->src_port_mask = 1;
6636a9643ea8Slogwang 		break;
6637a9643ea8Slogwang 	default:
6638a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid src_port mask.");
6639a9643ea8Slogwang 		return -EINVAL;
6640a9643ea8Slogwang 	}
6641a9643ea8Slogwang 
6642a9643ea8Slogwang 	switch (filter->proto_mask) {
6643a9643ea8Slogwang 	case UINT8_MAX:
6644a9643ea8Slogwang 		filter_info->proto_mask = 0;
6645a9643ea8Slogwang 		filter_info->proto =
6646a9643ea8Slogwang 			convert_protocol_type(filter->proto);
6647a9643ea8Slogwang 		break;
6648a9643ea8Slogwang 	case 0:
6649a9643ea8Slogwang 		filter_info->proto_mask = 1;
6650a9643ea8Slogwang 		break;
6651a9643ea8Slogwang 	default:
6652a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "invalid protocol mask.");
6653a9643ea8Slogwang 		return -EINVAL;
6654a9643ea8Slogwang 	}
6655a9643ea8Slogwang 
6656a9643ea8Slogwang 	filter_info->priority = (uint8_t)filter->priority;
6657a9643ea8Slogwang 	return 0;
6658a9643ea8Slogwang }
6659a9643ea8Slogwang 
6660a9643ea8Slogwang /*
6661a9643ea8Slogwang  * add or delete a ntuple filter
6662a9643ea8Slogwang  *
6663a9643ea8Slogwang  * @param
6664a9643ea8Slogwang  * dev: Pointer to struct rte_eth_dev.
6665a9643ea8Slogwang  * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6666a9643ea8Slogwang  * add: if true, add filter, if false, remove filter
6667a9643ea8Slogwang  *
6668a9643ea8Slogwang  * @return
6669a9643ea8Slogwang  *    - On success, zero.
6670a9643ea8Slogwang  *    - On failure, a negative value.
6671a9643ea8Slogwang  */
66722bfe3f2eSlogwang int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter,bool add)6673a9643ea8Slogwang ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
6674a9643ea8Slogwang 			struct rte_eth_ntuple_filter *ntuple_filter,
6675a9643ea8Slogwang 			bool add)
6676a9643ea8Slogwang {
6677a9643ea8Slogwang 	struct ixgbe_filter_info *filter_info =
6678a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6679a9643ea8Slogwang 	struct ixgbe_5tuple_filter_info filter_5tuple;
6680a9643ea8Slogwang 	struct ixgbe_5tuple_filter *filter;
6681a9643ea8Slogwang 	int ret;
6682a9643ea8Slogwang 
6683a9643ea8Slogwang 	if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
6684a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "only 5tuple is supported.");
6685a9643ea8Slogwang 		return -EINVAL;
6686a9643ea8Slogwang 	}
6687a9643ea8Slogwang 
6688a9643ea8Slogwang 	memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
6689a9643ea8Slogwang 	ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
6690a9643ea8Slogwang 	if (ret < 0)
6691a9643ea8Slogwang 		return ret;
6692a9643ea8Slogwang 
6693a9643ea8Slogwang 	filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
6694a9643ea8Slogwang 					 &filter_5tuple);
6695a9643ea8Slogwang 	if (filter != NULL && add) {
6696a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "filter exists.");
6697a9643ea8Slogwang 		return -EEXIST;
6698a9643ea8Slogwang 	}
6699a9643ea8Slogwang 	if (filter == NULL && !add) {
6700a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "filter doesn't exist.");
6701a9643ea8Slogwang 		return -ENOENT;
6702a9643ea8Slogwang 	}
6703a9643ea8Slogwang 
6704a9643ea8Slogwang 	if (add) {
6705a9643ea8Slogwang 		filter = rte_zmalloc("ixgbe_5tuple_filter",
6706a9643ea8Slogwang 				sizeof(struct ixgbe_5tuple_filter), 0);
6707a9643ea8Slogwang 		if (filter == NULL)
6708a9643ea8Slogwang 			return -ENOMEM;
67092bfe3f2eSlogwang 		rte_memcpy(&filter->filter_info,
6710a9643ea8Slogwang 				 &filter_5tuple,
6711a9643ea8Slogwang 				 sizeof(struct ixgbe_5tuple_filter_info));
6712a9643ea8Slogwang 		filter->queue = ntuple_filter->queue;
6713a9643ea8Slogwang 		ret = ixgbe_add_5tuple_filter(dev, filter);
6714a9643ea8Slogwang 		if (ret < 0) {
6715a9643ea8Slogwang 			rte_free(filter);
6716a9643ea8Slogwang 			return ret;
6717a9643ea8Slogwang 		}
6718a9643ea8Slogwang 	} else
6719a9643ea8Slogwang 		ixgbe_remove_5tuple_filter(dev, filter);
6720a9643ea8Slogwang 
6721a9643ea8Slogwang 	return 0;
6722a9643ea8Slogwang }
6723a9643ea8Slogwang 
67242bfe3f2eSlogwang int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev * dev,struct rte_eth_ethertype_filter * filter,bool add)6725a9643ea8Slogwang ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
6726a9643ea8Slogwang 			struct rte_eth_ethertype_filter *filter,
6727a9643ea8Slogwang 			bool add)
6728a9643ea8Slogwang {
6729a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6730a9643ea8Slogwang 	struct ixgbe_filter_info *filter_info =
6731a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
6732a9643ea8Slogwang 	uint32_t etqf = 0;
6733a9643ea8Slogwang 	uint32_t etqs = 0;
6734a9643ea8Slogwang 	int ret;
67352bfe3f2eSlogwang 	struct ixgbe_ethertype_filter ethertype_filter;
6736a9643ea8Slogwang 
6737a9643ea8Slogwang 	if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
6738a9643ea8Slogwang 		return -EINVAL;
6739a9643ea8Slogwang 
67404418919fSjohnjiang 	if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
67414418919fSjohnjiang 		filter->ether_type == RTE_ETHER_TYPE_IPV6) {
6742a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
6743a9643ea8Slogwang 			" ethertype filter.", filter->ether_type);
6744a9643ea8Slogwang 		return -EINVAL;
6745a9643ea8Slogwang 	}
6746a9643ea8Slogwang 
6747a9643ea8Slogwang 	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
6748a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "mac compare is unsupported.");
6749a9643ea8Slogwang 		return -EINVAL;
6750a9643ea8Slogwang 	}
6751a9643ea8Slogwang 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
6752a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "drop option is unsupported.");
6753a9643ea8Slogwang 		return -EINVAL;
6754a9643ea8Slogwang 	}
6755a9643ea8Slogwang 
6756a9643ea8Slogwang 	ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
6757a9643ea8Slogwang 	if (ret >= 0 && add) {
6758a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
6759a9643ea8Slogwang 			    filter->ether_type);
6760a9643ea8Slogwang 		return -EEXIST;
6761a9643ea8Slogwang 	}
6762a9643ea8Slogwang 	if (ret < 0 && !add) {
6763a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
6764a9643ea8Slogwang 			    filter->ether_type);
6765a9643ea8Slogwang 		return -ENOENT;
6766a9643ea8Slogwang 	}
6767a9643ea8Slogwang 
6768a9643ea8Slogwang 	if (add) {
6769a9643ea8Slogwang 		etqf = IXGBE_ETQF_FILTER_EN;
6770a9643ea8Slogwang 		etqf |= (uint32_t)filter->ether_type;
6771a9643ea8Slogwang 		etqs |= (uint32_t)((filter->queue <<
6772a9643ea8Slogwang 				    IXGBE_ETQS_RX_QUEUE_SHIFT) &
6773a9643ea8Slogwang 				    IXGBE_ETQS_RX_QUEUE);
6774a9643ea8Slogwang 		etqs |= IXGBE_ETQS_QUEUE_EN;
67752bfe3f2eSlogwang 
67762bfe3f2eSlogwang 		ethertype_filter.ethertype = filter->ether_type;
67772bfe3f2eSlogwang 		ethertype_filter.etqf = etqf;
67782bfe3f2eSlogwang 		ethertype_filter.etqs = etqs;
67792bfe3f2eSlogwang 		ethertype_filter.conf = FALSE;
67802bfe3f2eSlogwang 		ret = ixgbe_ethertype_filter_insert(filter_info,
67812bfe3f2eSlogwang 						    &ethertype_filter);
67822bfe3f2eSlogwang 		if (ret < 0) {
67832bfe3f2eSlogwang 			PMD_DRV_LOG(ERR, "ethertype filters are full.");
67842bfe3f2eSlogwang 			return -ENOSPC;
67852bfe3f2eSlogwang 		}
6786a9643ea8Slogwang 	} else {
6787a9643ea8Slogwang 		ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
6788a9643ea8Slogwang 		if (ret < 0)
6789a9643ea8Slogwang 			return -ENOSYS;
6790a9643ea8Slogwang 	}
6791a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
6792a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
6793a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
6794a9643ea8Slogwang 
6795a9643ea8Slogwang 	return 0;
6796a9643ea8Slogwang }
6797a9643ea8Slogwang 
6798a9643ea8Slogwang static int
ixgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev * dev,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)6799*2d9fd380Sjfb8856606 ixgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
6800a9643ea8Slogwang 		     enum rte_filter_type filter_type,
6801a9643ea8Slogwang 		     enum rte_filter_op filter_op,
6802a9643ea8Slogwang 		     void *arg)
6803a9643ea8Slogwang {
68042bfe3f2eSlogwang 	int ret = 0;
6805a9643ea8Slogwang 
6806a9643ea8Slogwang 	switch (filter_type) {
68072bfe3f2eSlogwang 	case RTE_ETH_FILTER_GENERIC:
68082bfe3f2eSlogwang 		if (filter_op != RTE_ETH_FILTER_GET)
68092bfe3f2eSlogwang 			return -EINVAL;
68102bfe3f2eSlogwang 		*(const void **)arg = &ixgbe_flow_ops;
68112bfe3f2eSlogwang 		break;
6812a9643ea8Slogwang 	default:
6813a9643ea8Slogwang 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
6814a9643ea8Slogwang 							filter_type);
68152bfe3f2eSlogwang 		ret = -EINVAL;
6816a9643ea8Slogwang 		break;
6817a9643ea8Slogwang 	}
6818a9643ea8Slogwang 
6819a9643ea8Slogwang 	return ret;
6820a9643ea8Slogwang }
6821a9643ea8Slogwang 
6822a9643ea8Slogwang static u8 *
ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw * hw,u8 ** mc_addr_ptr,u32 * vmdq)6823*2d9fd380Sjfb8856606 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw,
6824a9643ea8Slogwang 			u8 **mc_addr_ptr, u32 *vmdq)
6825a9643ea8Slogwang {
6826a9643ea8Slogwang 	u8 *mc_addr;
6827a9643ea8Slogwang 
6828a9643ea8Slogwang 	*vmdq = 0;
6829a9643ea8Slogwang 	mc_addr = *mc_addr_ptr;
68304418919fSjohnjiang 	*mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
6831a9643ea8Slogwang 	return mc_addr;
6832a9643ea8Slogwang }
6833a9643ea8Slogwang 
6834a9643ea8Slogwang static int
ixgbe_dev_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)6835a9643ea8Slogwang ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
68364418919fSjohnjiang 			  struct rte_ether_addr *mc_addr_set,
6837a9643ea8Slogwang 			  uint32_t nb_mc_addr)
6838a9643ea8Slogwang {
6839a9643ea8Slogwang 	struct ixgbe_hw *hw;
6840a9643ea8Slogwang 	u8 *mc_addr_list;
6841a9643ea8Slogwang 
6842a9643ea8Slogwang 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6843a9643ea8Slogwang 	mc_addr_list = (u8 *)mc_addr_set;
6844a9643ea8Slogwang 	return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
6845a9643ea8Slogwang 					 ixgbe_dev_addr_list_itr, TRUE);
6846a9643ea8Slogwang }
6847a9643ea8Slogwang 
6848a9643ea8Slogwang static uint64_t
ixgbe_read_systime_cyclecounter(struct rte_eth_dev * dev)6849a9643ea8Slogwang ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
6850a9643ea8Slogwang {
6851a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6852a9643ea8Slogwang 	uint64_t systime_cycles;
6853a9643ea8Slogwang 
6854a9643ea8Slogwang 	switch (hw->mac.type) {
6855a9643ea8Slogwang 	case ixgbe_mac_X550:
6856a9643ea8Slogwang 	case ixgbe_mac_X550EM_x:
6857a9643ea8Slogwang 	case ixgbe_mac_X550EM_a:
6858a9643ea8Slogwang 		/* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6859a9643ea8Slogwang 		systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6860a9643ea8Slogwang 		systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6861a9643ea8Slogwang 				* NSEC_PER_SEC;
6862a9643ea8Slogwang 		break;
6863a9643ea8Slogwang 	default:
6864a9643ea8Slogwang 		systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
6865a9643ea8Slogwang 		systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
6866a9643ea8Slogwang 				<< 32;
6867a9643ea8Slogwang 	}
6868a9643ea8Slogwang 
6869a9643ea8Slogwang 	return systime_cycles;
6870a9643ea8Slogwang }
6871a9643ea8Slogwang 
6872a9643ea8Slogwang static uint64_t
ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev * dev)6873a9643ea8Slogwang ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6874a9643ea8Slogwang {
6875a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6876a9643ea8Slogwang 	uint64_t rx_tstamp_cycles;
6877a9643ea8Slogwang 
6878a9643ea8Slogwang 	switch (hw->mac.type) {
6879a9643ea8Slogwang 	case ixgbe_mac_X550:
6880a9643ea8Slogwang 	case ixgbe_mac_X550EM_x:
6881a9643ea8Slogwang 	case ixgbe_mac_X550EM_a:
6882a9643ea8Slogwang 		/* RXSTMPL stores ns and RXSTMPH stores seconds. */
6883a9643ea8Slogwang 		rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6884a9643ea8Slogwang 		rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6885a9643ea8Slogwang 				* NSEC_PER_SEC;
6886a9643ea8Slogwang 		break;
6887a9643ea8Slogwang 	default:
6888a9643ea8Slogwang 		/* RXSTMPL stores ns and RXSTMPH stores seconds. */
6889a9643ea8Slogwang 		rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
6890a9643ea8Slogwang 		rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
6891a9643ea8Slogwang 				<< 32;
6892a9643ea8Slogwang 	}
6893a9643ea8Slogwang 
6894a9643ea8Slogwang 	return rx_tstamp_cycles;
6895a9643ea8Slogwang }
6896a9643ea8Slogwang 
6897a9643ea8Slogwang static uint64_t
ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev * dev)6898a9643ea8Slogwang ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
6899a9643ea8Slogwang {
6900a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6901a9643ea8Slogwang 	uint64_t tx_tstamp_cycles;
6902a9643ea8Slogwang 
6903a9643ea8Slogwang 	switch (hw->mac.type) {
6904a9643ea8Slogwang 	case ixgbe_mac_X550:
6905a9643ea8Slogwang 	case ixgbe_mac_X550EM_x:
6906a9643ea8Slogwang 	case ixgbe_mac_X550EM_a:
6907a9643ea8Slogwang 		/* TXSTMPL stores ns and TXSTMPH stores seconds. */
6908a9643ea8Slogwang 		tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6909a9643ea8Slogwang 		tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6910a9643ea8Slogwang 				* NSEC_PER_SEC;
6911a9643ea8Slogwang 		break;
6912a9643ea8Slogwang 	default:
6913a9643ea8Slogwang 		/* TXSTMPL stores ns and TXSTMPH stores seconds. */
6914a9643ea8Slogwang 		tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
6915a9643ea8Slogwang 		tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
6916a9643ea8Slogwang 				<< 32;
6917a9643ea8Slogwang 	}
6918a9643ea8Slogwang 
6919a9643ea8Slogwang 	return tx_tstamp_cycles;
6920a9643ea8Slogwang }
6921a9643ea8Slogwang 
6922a9643ea8Slogwang static void
ixgbe_start_timecounters(struct rte_eth_dev * dev)6923a9643ea8Slogwang ixgbe_start_timecounters(struct rte_eth_dev *dev)
6924a9643ea8Slogwang {
6925a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
69264b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
6927a9643ea8Slogwang 	struct rte_eth_link link;
6928a9643ea8Slogwang 	uint32_t incval = 0;
6929a9643ea8Slogwang 	uint32_t shift = 0;
6930a9643ea8Slogwang 
6931a9643ea8Slogwang 	/* Get current link speed. */
6932a9643ea8Slogwang 	ixgbe_dev_link_update(dev, 1);
6933d30ea906Sjfb8856606 	rte_eth_linkstatus_get(dev, &link);
6934a9643ea8Slogwang 
6935a9643ea8Slogwang 	switch (link.link_speed) {
6936a9643ea8Slogwang 	case ETH_SPEED_NUM_100M:
6937a9643ea8Slogwang 		incval = IXGBE_INCVAL_100;
6938a9643ea8Slogwang 		shift = IXGBE_INCVAL_SHIFT_100;
6939a9643ea8Slogwang 		break;
6940a9643ea8Slogwang 	case ETH_SPEED_NUM_1G:
6941a9643ea8Slogwang 		incval = IXGBE_INCVAL_1GB;
6942a9643ea8Slogwang 		shift = IXGBE_INCVAL_SHIFT_1GB;
6943a9643ea8Slogwang 		break;
6944a9643ea8Slogwang 	case ETH_SPEED_NUM_10G:
6945a9643ea8Slogwang 	default:
6946a9643ea8Slogwang 		incval = IXGBE_INCVAL_10GB;
6947a9643ea8Slogwang 		shift = IXGBE_INCVAL_SHIFT_10GB;
6948a9643ea8Slogwang 		break;
6949a9643ea8Slogwang 	}
6950a9643ea8Slogwang 
6951a9643ea8Slogwang 	switch (hw->mac.type) {
6952a9643ea8Slogwang 	case ixgbe_mac_X550:
6953a9643ea8Slogwang 	case ixgbe_mac_X550EM_x:
6954a9643ea8Slogwang 	case ixgbe_mac_X550EM_a:
6955a9643ea8Slogwang 		/* Independent of link speed. */
6956a9643ea8Slogwang 		incval = 1;
6957a9643ea8Slogwang 		/* Cycles read will be interpreted as ns. */
6958a9643ea8Slogwang 		shift = 0;
6959a9643ea8Slogwang 		/* Fall-through */
6960a9643ea8Slogwang 	case ixgbe_mac_X540:
6961a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
6962a9643ea8Slogwang 		break;
6963a9643ea8Slogwang 	case ixgbe_mac_82599EB:
6964a9643ea8Slogwang 		incval >>= IXGBE_INCVAL_SHIFT_82599;
6965a9643ea8Slogwang 		shift -= IXGBE_INCVAL_SHIFT_82599;
6966a9643ea8Slogwang 		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
6967a9643ea8Slogwang 				(1 << IXGBE_INCPER_SHIFT_82599) | incval);
6968a9643ea8Slogwang 		break;
6969a9643ea8Slogwang 	default:
6970a9643ea8Slogwang 		/* Not supported. */
6971a9643ea8Slogwang 		return;
6972a9643ea8Slogwang 	}
6973a9643ea8Slogwang 
6974a9643ea8Slogwang 	memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
6975a9643ea8Slogwang 	memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6976a9643ea8Slogwang 	memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
6977a9643ea8Slogwang 
6978a9643ea8Slogwang 	adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6979a9643ea8Slogwang 	adapter->systime_tc.cc_shift = shift;
6980a9643ea8Slogwang 	adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
6981a9643ea8Slogwang 
6982a9643ea8Slogwang 	adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6983a9643ea8Slogwang 	adapter->rx_tstamp_tc.cc_shift = shift;
6984a9643ea8Slogwang 	adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6985a9643ea8Slogwang 
6986a9643ea8Slogwang 	adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
6987a9643ea8Slogwang 	adapter->tx_tstamp_tc.cc_shift = shift;
6988a9643ea8Slogwang 	adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
6989a9643ea8Slogwang }
6990a9643ea8Slogwang 
6991a9643ea8Slogwang static int
ixgbe_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)6992a9643ea8Slogwang ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
6993a9643ea8Slogwang {
69944b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
6995a9643ea8Slogwang 
6996a9643ea8Slogwang 	adapter->systime_tc.nsec += delta;
6997a9643ea8Slogwang 	adapter->rx_tstamp_tc.nsec += delta;
6998a9643ea8Slogwang 	adapter->tx_tstamp_tc.nsec += delta;
6999a9643ea8Slogwang 
7000a9643ea8Slogwang 	return 0;
7001a9643ea8Slogwang }
7002a9643ea8Slogwang 
7003a9643ea8Slogwang static int
ixgbe_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)7004a9643ea8Slogwang ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
7005a9643ea8Slogwang {
7006a9643ea8Slogwang 	uint64_t ns;
70074b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7008a9643ea8Slogwang 
7009a9643ea8Slogwang 	ns = rte_timespec_to_ns(ts);
7010a9643ea8Slogwang 	/* Set the timecounters to a new value. */
7011a9643ea8Slogwang 	adapter->systime_tc.nsec = ns;
7012a9643ea8Slogwang 	adapter->rx_tstamp_tc.nsec = ns;
7013a9643ea8Slogwang 	adapter->tx_tstamp_tc.nsec = ns;
7014a9643ea8Slogwang 
7015a9643ea8Slogwang 	return 0;
7016a9643ea8Slogwang }
7017a9643ea8Slogwang 
7018a9643ea8Slogwang static int
ixgbe_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)7019a9643ea8Slogwang ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
7020a9643ea8Slogwang {
7021a9643ea8Slogwang 	uint64_t ns, systime_cycles;
70224b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7023a9643ea8Slogwang 
7024a9643ea8Slogwang 	systime_cycles = ixgbe_read_systime_cyclecounter(dev);
7025a9643ea8Slogwang 	ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
7026a9643ea8Slogwang 	*ts = rte_ns_to_timespec(ns);
7027a9643ea8Slogwang 
7028a9643ea8Slogwang 	return 0;
7029a9643ea8Slogwang }
7030a9643ea8Slogwang 
7031a9643ea8Slogwang static int
ixgbe_timesync_enable(struct rte_eth_dev * dev)7032a9643ea8Slogwang ixgbe_timesync_enable(struct rte_eth_dev *dev)
7033a9643ea8Slogwang {
7034a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7035a9643ea8Slogwang 	uint32_t tsync_ctl;
7036a9643ea8Slogwang 	uint32_t tsauxc;
7037a9643ea8Slogwang 
7038a9643ea8Slogwang 	/* Stop the timesync system time. */
7039a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
7040a9643ea8Slogwang 	/* Reset the timesync system time value. */
7041a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
7042a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
7043a9643ea8Slogwang 
7044a9643ea8Slogwang 	/* Enable system time for platforms where it isn't on by default. */
7045a9643ea8Slogwang 	tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
7046a9643ea8Slogwang 	tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
7047a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
7048a9643ea8Slogwang 
7049a9643ea8Slogwang 	ixgbe_start_timecounters(dev);
7050a9643ea8Slogwang 
7051a9643ea8Slogwang 	/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7052a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
70534418919fSjohnjiang 			(RTE_ETHER_TYPE_1588 |
7054a9643ea8Slogwang 			 IXGBE_ETQF_FILTER_EN |
7055a9643ea8Slogwang 			 IXGBE_ETQF_1588));
7056a9643ea8Slogwang 
7057a9643ea8Slogwang 	/* Enable timestamping of received PTP packets. */
7058a9643ea8Slogwang 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7059a9643ea8Slogwang 	tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
7060a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7061a9643ea8Slogwang 
7062a9643ea8Slogwang 	/* Enable timestamping of transmitted PTP packets. */
7063a9643ea8Slogwang 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7064a9643ea8Slogwang 	tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
7065a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7066a9643ea8Slogwang 
7067a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
7068a9643ea8Slogwang 
7069a9643ea8Slogwang 	return 0;
7070a9643ea8Slogwang }
7071a9643ea8Slogwang 
7072a9643ea8Slogwang static int
ixgbe_timesync_disable(struct rte_eth_dev * dev)7073a9643ea8Slogwang ixgbe_timesync_disable(struct rte_eth_dev *dev)
7074a9643ea8Slogwang {
7075a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7076a9643ea8Slogwang 	uint32_t tsync_ctl;
7077a9643ea8Slogwang 
7078a9643ea8Slogwang 	/* Disable timestamping of transmitted PTP packets. */
7079a9643ea8Slogwang 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7080a9643ea8Slogwang 	tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
7081a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
7082a9643ea8Slogwang 
7083a9643ea8Slogwang 	/* Disable timestamping of received PTP packets. */
7084a9643ea8Slogwang 	tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7085a9643ea8Slogwang 	tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
7086a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
7087a9643ea8Slogwang 
7088a9643ea8Slogwang 	/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7089a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
7090a9643ea8Slogwang 
7091a9643ea8Slogwang 	/* Stop incrementating the System Time registers. */
7092a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
7093a9643ea8Slogwang 
7094a9643ea8Slogwang 	return 0;
7095a9643ea8Slogwang }
7096a9643ea8Slogwang 
7097a9643ea8Slogwang static int
ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags __rte_unused)7098a9643ea8Slogwang ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
7099a9643ea8Slogwang 				 struct timespec *timestamp,
7100a9643ea8Slogwang 				 uint32_t flags __rte_unused)
7101a9643ea8Slogwang {
7102a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
71034b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7104a9643ea8Slogwang 	uint32_t tsync_rxctl;
7105a9643ea8Slogwang 	uint64_t rx_tstamp_cycles;
7106a9643ea8Slogwang 	uint64_t ns;
7107a9643ea8Slogwang 
7108a9643ea8Slogwang 	tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
7109a9643ea8Slogwang 	if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
7110a9643ea8Slogwang 		return -EINVAL;
7111a9643ea8Slogwang 
7112a9643ea8Slogwang 	rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
7113a9643ea8Slogwang 	ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
7114a9643ea8Slogwang 	*timestamp = rte_ns_to_timespec(ns);
7115a9643ea8Slogwang 
7116a9643ea8Slogwang 	return  0;
7117a9643ea8Slogwang }
7118a9643ea8Slogwang 
7119a9643ea8Slogwang static int
ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)7120a9643ea8Slogwang ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
7121a9643ea8Slogwang 				 struct timespec *timestamp)
7122a9643ea8Slogwang {
7123a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
71244b05018fSfengbojiang 	struct ixgbe_adapter *adapter = dev->data->dev_private;
7125a9643ea8Slogwang 	uint32_t tsync_txctl;
7126a9643ea8Slogwang 	uint64_t tx_tstamp_cycles;
7127a9643ea8Slogwang 	uint64_t ns;
7128a9643ea8Slogwang 
7129a9643ea8Slogwang 	tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
7130a9643ea8Slogwang 	if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
7131a9643ea8Slogwang 		return -EINVAL;
7132a9643ea8Slogwang 
7133a9643ea8Slogwang 	tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
7134a9643ea8Slogwang 	ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
7135a9643ea8Slogwang 	*timestamp = rte_ns_to_timespec(ns);
7136a9643ea8Slogwang 
7137a9643ea8Slogwang 	return 0;
7138a9643ea8Slogwang }
7139a9643ea8Slogwang 
7140a9643ea8Slogwang static int
ixgbe_get_reg_length(struct rte_eth_dev * dev)7141a9643ea8Slogwang ixgbe_get_reg_length(struct rte_eth_dev *dev)
7142a9643ea8Slogwang {
7143a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7144a9643ea8Slogwang 	int count = 0;
7145a9643ea8Slogwang 	int g_ind = 0;
7146a9643ea8Slogwang 	const struct reg_info *reg_group;
7147a9643ea8Slogwang 	const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7148a9643ea8Slogwang 				    ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7149a9643ea8Slogwang 
7150a9643ea8Slogwang 	while ((reg_group = reg_set[g_ind++]))
7151a9643ea8Slogwang 		count += ixgbe_regs_group_count(reg_group);
7152a9643ea8Slogwang 
7153a9643ea8Slogwang 	return count;
7154a9643ea8Slogwang }
7155a9643ea8Slogwang 
7156a9643ea8Slogwang static int
ixgbevf_get_reg_length(struct rte_eth_dev * dev __rte_unused)7157a9643ea8Slogwang ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
7158a9643ea8Slogwang {
7159a9643ea8Slogwang 	int count = 0;
7160a9643ea8Slogwang 	int g_ind = 0;
7161a9643ea8Slogwang 	const struct reg_info *reg_group;
7162a9643ea8Slogwang 
7163a9643ea8Slogwang 	while ((reg_group = ixgbevf_regs[g_ind++]))
7164a9643ea8Slogwang 		count += ixgbe_regs_group_count(reg_group);
7165a9643ea8Slogwang 
7166a9643ea8Slogwang 	return count;
7167a9643ea8Slogwang }
7168a9643ea8Slogwang 
7169a9643ea8Slogwang static int
ixgbe_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)7170a9643ea8Slogwang ixgbe_get_regs(struct rte_eth_dev *dev,
7171a9643ea8Slogwang 	      struct rte_dev_reg_info *regs)
7172a9643ea8Slogwang {
7173a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7174a9643ea8Slogwang 	uint32_t *data = regs->data;
7175a9643ea8Slogwang 	int g_ind = 0;
7176a9643ea8Slogwang 	int count = 0;
7177a9643ea8Slogwang 	const struct reg_info *reg_group;
7178a9643ea8Slogwang 	const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
7179a9643ea8Slogwang 				    ixgbe_regs_mac_82598EB : ixgbe_regs_others;
7180a9643ea8Slogwang 
7181a9643ea8Slogwang 	if (data == NULL) {
7182a9643ea8Slogwang 		regs->length = ixgbe_get_reg_length(dev);
7183a9643ea8Slogwang 		regs->width = sizeof(uint32_t);
7184a9643ea8Slogwang 		return 0;
7185a9643ea8Slogwang 	}
7186a9643ea8Slogwang 
7187a9643ea8Slogwang 	/* Support only full register dump */
7188a9643ea8Slogwang 	if ((regs->length == 0) ||
7189a9643ea8Slogwang 	    (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
7190a9643ea8Slogwang 		regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7191a9643ea8Slogwang 			hw->device_id;
7192a9643ea8Slogwang 		while ((reg_group = reg_set[g_ind++]))
7193a9643ea8Slogwang 			count += ixgbe_read_regs_group(dev, &data[count],
7194a9643ea8Slogwang 				reg_group);
7195a9643ea8Slogwang 		return 0;
7196a9643ea8Slogwang 	}
7197a9643ea8Slogwang 
7198a9643ea8Slogwang 	return -ENOTSUP;
7199a9643ea8Slogwang }
7200a9643ea8Slogwang 
7201a9643ea8Slogwang static int
ixgbevf_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)7202a9643ea8Slogwang ixgbevf_get_regs(struct rte_eth_dev *dev,
7203a9643ea8Slogwang 		struct rte_dev_reg_info *regs)
7204a9643ea8Slogwang {
7205a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7206a9643ea8Slogwang 	uint32_t *data = regs->data;
7207a9643ea8Slogwang 	int g_ind = 0;
7208a9643ea8Slogwang 	int count = 0;
7209a9643ea8Slogwang 	const struct reg_info *reg_group;
7210a9643ea8Slogwang 
7211a9643ea8Slogwang 	if (data == NULL) {
7212a9643ea8Slogwang 		regs->length = ixgbevf_get_reg_length(dev);
7213a9643ea8Slogwang 		regs->width = sizeof(uint32_t);
7214a9643ea8Slogwang 		return 0;
7215a9643ea8Slogwang 	}
7216a9643ea8Slogwang 
7217a9643ea8Slogwang 	/* Support only full register dump */
7218a9643ea8Slogwang 	if ((regs->length == 0) ||
7219a9643ea8Slogwang 	    (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
7220a9643ea8Slogwang 		regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
7221a9643ea8Slogwang 			hw->device_id;
7222a9643ea8Slogwang 		while ((reg_group = ixgbevf_regs[g_ind++]))
7223a9643ea8Slogwang 			count += ixgbe_read_regs_group(dev, &data[count],
7224a9643ea8Slogwang 						      reg_group);
7225a9643ea8Slogwang 		return 0;
7226a9643ea8Slogwang 	}
7227a9643ea8Slogwang 
7228a9643ea8Slogwang 	return -ENOTSUP;
7229a9643ea8Slogwang }
7230a9643ea8Slogwang 
7231a9643ea8Slogwang static int
ixgbe_get_eeprom_length(struct rte_eth_dev * dev)7232a9643ea8Slogwang ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
7233a9643ea8Slogwang {
7234a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7235a9643ea8Slogwang 
7236a9643ea8Slogwang 	/* Return unit is byte count */
7237a9643ea8Slogwang 	return hw->eeprom.word_size * 2;
7238a9643ea8Slogwang }
7239a9643ea8Slogwang 
7240a9643ea8Slogwang static int
ixgbe_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)7241a9643ea8Slogwang ixgbe_get_eeprom(struct rte_eth_dev *dev,
7242a9643ea8Slogwang 		struct rte_dev_eeprom_info *in_eeprom)
7243a9643ea8Slogwang {
7244a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7245a9643ea8Slogwang 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7246a9643ea8Slogwang 	uint16_t *data = in_eeprom->data;
7247a9643ea8Slogwang 	int first, length;
7248a9643ea8Slogwang 
7249a9643ea8Slogwang 	first = in_eeprom->offset >> 1;
7250a9643ea8Slogwang 	length = in_eeprom->length >> 1;
7251a9643ea8Slogwang 	if ((first > hw->eeprom.word_size) ||
7252a9643ea8Slogwang 	    ((first + length) > hw->eeprom.word_size))
7253a9643ea8Slogwang 		return -EINVAL;
7254a9643ea8Slogwang 
7255a9643ea8Slogwang 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7256a9643ea8Slogwang 
7257a9643ea8Slogwang 	return eeprom->ops.read_buffer(hw, first, length, data);
7258a9643ea8Slogwang }
7259a9643ea8Slogwang 
7260a9643ea8Slogwang static int
ixgbe_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)7261a9643ea8Slogwang ixgbe_set_eeprom(struct rte_eth_dev *dev,
7262a9643ea8Slogwang 		struct rte_dev_eeprom_info *in_eeprom)
7263a9643ea8Slogwang {
7264a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7265a9643ea8Slogwang 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
7266a9643ea8Slogwang 	uint16_t *data = in_eeprom->data;
7267a9643ea8Slogwang 	int first, length;
7268a9643ea8Slogwang 
7269a9643ea8Slogwang 	first = in_eeprom->offset >> 1;
7270a9643ea8Slogwang 	length = in_eeprom->length >> 1;
7271a9643ea8Slogwang 	if ((first > hw->eeprom.word_size) ||
7272a9643ea8Slogwang 	    ((first + length) > hw->eeprom.word_size))
7273a9643ea8Slogwang 		return -EINVAL;
7274a9643ea8Slogwang 
7275a9643ea8Slogwang 	in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
7276a9643ea8Slogwang 
7277a9643ea8Slogwang 	return eeprom->ops.write_buffer(hw,  first, length, data);
7278a9643ea8Slogwang }
7279a9643ea8Slogwang 
7280d30ea906Sjfb8856606 static int
ixgbe_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)7281d30ea906Sjfb8856606 ixgbe_get_module_info(struct rte_eth_dev *dev,
7282d30ea906Sjfb8856606 		      struct rte_eth_dev_module_info *modinfo)
7283d30ea906Sjfb8856606 {
7284d30ea906Sjfb8856606 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7285d30ea906Sjfb8856606 	uint32_t status;
7286d30ea906Sjfb8856606 	uint8_t sff8472_rev, addr_mode;
7287d30ea906Sjfb8856606 	bool page_swap = false;
7288d30ea906Sjfb8856606 
7289d30ea906Sjfb8856606 	/* Check whether we support SFF-8472 or not */
7290d30ea906Sjfb8856606 	status = hw->phy.ops.read_i2c_eeprom(hw,
7291d30ea906Sjfb8856606 					     IXGBE_SFF_SFF_8472_COMP,
7292d30ea906Sjfb8856606 					     &sff8472_rev);
7293d30ea906Sjfb8856606 	if (status != 0)
7294d30ea906Sjfb8856606 		return -EIO;
7295d30ea906Sjfb8856606 
7296d30ea906Sjfb8856606 	/* addressing mode is not supported */
7297d30ea906Sjfb8856606 	status = hw->phy.ops.read_i2c_eeprom(hw,
7298d30ea906Sjfb8856606 					     IXGBE_SFF_SFF_8472_SWAP,
7299d30ea906Sjfb8856606 					     &addr_mode);
7300d30ea906Sjfb8856606 	if (status != 0)
7301d30ea906Sjfb8856606 		return -EIO;
7302d30ea906Sjfb8856606 
7303d30ea906Sjfb8856606 	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
7304d30ea906Sjfb8856606 		PMD_DRV_LOG(ERR,
7305d30ea906Sjfb8856606 			    "Address change required to access page 0xA2, "
7306d30ea906Sjfb8856606 			    "but not supported. Please report the module "
7307d30ea906Sjfb8856606 			    "type to the driver maintainers.");
7308d30ea906Sjfb8856606 		page_swap = true;
7309d30ea906Sjfb8856606 	}
7310d30ea906Sjfb8856606 
7311d30ea906Sjfb8856606 	if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
7312d30ea906Sjfb8856606 		/* We have a SFP, but it does not support SFF-8472 */
7313d30ea906Sjfb8856606 		modinfo->type = RTE_ETH_MODULE_SFF_8079;
7314d30ea906Sjfb8856606 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
7315d30ea906Sjfb8856606 	} else {
7316d30ea906Sjfb8856606 		/* We have a SFP which supports a revision of SFF-8472. */
7317d30ea906Sjfb8856606 		modinfo->type = RTE_ETH_MODULE_SFF_8472;
7318d30ea906Sjfb8856606 		modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
7319d30ea906Sjfb8856606 	}
7320d30ea906Sjfb8856606 
7321d30ea906Sjfb8856606 	return 0;
7322d30ea906Sjfb8856606 }
7323d30ea906Sjfb8856606 
7324d30ea906Sjfb8856606 static int
ixgbe_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)7325d30ea906Sjfb8856606 ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
7326d30ea906Sjfb8856606 			struct rte_dev_eeprom_info *info)
7327d30ea906Sjfb8856606 {
7328d30ea906Sjfb8856606 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7329d30ea906Sjfb8856606 	uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
7330d30ea906Sjfb8856606 	uint8_t databyte = 0xFF;
7331d30ea906Sjfb8856606 	uint8_t *data = info->data;
7332d30ea906Sjfb8856606 	uint32_t i = 0;
7333d30ea906Sjfb8856606 
7334d30ea906Sjfb8856606 	if (info->length == 0)
7335d30ea906Sjfb8856606 		return -EINVAL;
7336d30ea906Sjfb8856606 
7337d30ea906Sjfb8856606 	for (i = info->offset; i < info->offset + info->length; i++) {
7338d30ea906Sjfb8856606 		if (i < RTE_ETH_MODULE_SFF_8079_LEN)
7339d30ea906Sjfb8856606 			status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
7340d30ea906Sjfb8856606 		else
7341d30ea906Sjfb8856606 			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
7342d30ea906Sjfb8856606 
7343d30ea906Sjfb8856606 		if (status != 0)
7344d30ea906Sjfb8856606 			return -EIO;
7345d30ea906Sjfb8856606 
7346d30ea906Sjfb8856606 		data[i - info->offset] = databyte;
7347d30ea906Sjfb8856606 	}
7348d30ea906Sjfb8856606 
7349d30ea906Sjfb8856606 	return 0;
7350d30ea906Sjfb8856606 }
7351d30ea906Sjfb8856606 
7352a9643ea8Slogwang uint16_t
ixgbe_reta_size_get(enum ixgbe_mac_type mac_type)7353a9643ea8Slogwang ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
7354a9643ea8Slogwang 	switch (mac_type) {
7355a9643ea8Slogwang 	case ixgbe_mac_X550:
7356a9643ea8Slogwang 	case ixgbe_mac_X550EM_x:
7357a9643ea8Slogwang 	case ixgbe_mac_X550EM_a:
7358a9643ea8Slogwang 		return ETH_RSS_RETA_SIZE_512;
7359a9643ea8Slogwang 	case ixgbe_mac_X550_vf:
7360a9643ea8Slogwang 	case ixgbe_mac_X550EM_x_vf:
7361a9643ea8Slogwang 	case ixgbe_mac_X550EM_a_vf:
7362a9643ea8Slogwang 		return ETH_RSS_RETA_SIZE_64;
73634b05018fSfengbojiang 	case ixgbe_mac_X540_vf:
73644b05018fSfengbojiang 	case ixgbe_mac_82599_vf:
73654b05018fSfengbojiang 		return 0;
7366a9643ea8Slogwang 	default:
7367a9643ea8Slogwang 		return ETH_RSS_RETA_SIZE_128;
7368a9643ea8Slogwang 	}
7369a9643ea8Slogwang }
7370a9643ea8Slogwang 
7371a9643ea8Slogwang uint32_t
ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type,uint16_t reta_idx)7372a9643ea8Slogwang ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
7373a9643ea8Slogwang 	switch (mac_type) {
7374a9643ea8Slogwang 	case ixgbe_mac_X550:
7375a9643ea8Slogwang 	case ixgbe_mac_X550EM_x:
7376a9643ea8Slogwang 	case ixgbe_mac_X550EM_a:
7377a9643ea8Slogwang 		if (reta_idx < ETH_RSS_RETA_SIZE_128)
7378a9643ea8Slogwang 			return IXGBE_RETA(reta_idx >> 2);
7379a9643ea8Slogwang 		else
7380a9643ea8Slogwang 			return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
7381a9643ea8Slogwang 	case ixgbe_mac_X550_vf:
7382a9643ea8Slogwang 	case ixgbe_mac_X550EM_x_vf:
7383a9643ea8Slogwang 	case ixgbe_mac_X550EM_a_vf:
7384a9643ea8Slogwang 		return IXGBE_VFRETA(reta_idx >> 2);
7385a9643ea8Slogwang 	default:
7386a9643ea8Slogwang 		return IXGBE_RETA(reta_idx >> 2);
7387a9643ea8Slogwang 	}
7388a9643ea8Slogwang }
7389a9643ea8Slogwang 
7390a9643ea8Slogwang uint32_t
ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type)7391a9643ea8Slogwang ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
7392a9643ea8Slogwang 	switch (mac_type) {
7393a9643ea8Slogwang 	case ixgbe_mac_X550_vf:
7394a9643ea8Slogwang 	case ixgbe_mac_X550EM_x_vf:
7395a9643ea8Slogwang 	case ixgbe_mac_X550EM_a_vf:
7396a9643ea8Slogwang 		return IXGBE_VFMRQC;
7397a9643ea8Slogwang 	default:
7398a9643ea8Slogwang 		return IXGBE_MRQC;
7399a9643ea8Slogwang 	}
7400a9643ea8Slogwang }
7401a9643ea8Slogwang 
7402a9643ea8Slogwang uint32_t
ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type,uint8_t i)7403a9643ea8Slogwang ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
7404a9643ea8Slogwang 	switch (mac_type) {
7405a9643ea8Slogwang 	case ixgbe_mac_X550_vf:
7406a9643ea8Slogwang 	case ixgbe_mac_X550EM_x_vf:
7407a9643ea8Slogwang 	case ixgbe_mac_X550EM_a_vf:
7408a9643ea8Slogwang 		return IXGBE_VFRSSRK(i);
7409a9643ea8Slogwang 	default:
7410a9643ea8Slogwang 		return IXGBE_RSSRK(i);
7411a9643ea8Slogwang 	}
7412a9643ea8Slogwang }
7413a9643ea8Slogwang 
7414a9643ea8Slogwang bool
ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type)7415a9643ea8Slogwang ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
7416a9643ea8Slogwang 	switch (mac_type) {
7417a9643ea8Slogwang 	case ixgbe_mac_82599_vf:
7418a9643ea8Slogwang 	case ixgbe_mac_X540_vf:
7419a9643ea8Slogwang 		return 0;
7420a9643ea8Slogwang 	default:
7421a9643ea8Slogwang 		return 1;
7422a9643ea8Slogwang 	}
7423a9643ea8Slogwang }
7424a9643ea8Slogwang 
7425a9643ea8Slogwang static int
ixgbe_dev_get_dcb_info(struct rte_eth_dev * dev,struct rte_eth_dcb_info * dcb_info)7426a9643ea8Slogwang ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
7427a9643ea8Slogwang 			struct rte_eth_dcb_info *dcb_info)
7428a9643ea8Slogwang {
7429a9643ea8Slogwang 	struct ixgbe_dcb_config *dcb_config =
7430a9643ea8Slogwang 			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
7431a9643ea8Slogwang 	struct ixgbe_dcb_tc_config *tc;
74322bfe3f2eSlogwang 	struct rte_eth_dcb_tc_queue_mapping *tc_queue;
74332bfe3f2eSlogwang 	uint8_t nb_tcs;
7434a9643ea8Slogwang 	uint8_t i, j;
7435a9643ea8Slogwang 
7436a9643ea8Slogwang 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
7437a9643ea8Slogwang 		dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
7438a9643ea8Slogwang 	else
7439a9643ea8Slogwang 		dcb_info->nb_tcs = 1;
7440a9643ea8Slogwang 
74412bfe3f2eSlogwang 	tc_queue = &dcb_info->tc_queue;
74422bfe3f2eSlogwang 	nb_tcs = dcb_info->nb_tcs;
74432bfe3f2eSlogwang 
7444a9643ea8Slogwang 	if (dcb_config->vt_mode) { /* vt is enabled*/
7445a9643ea8Slogwang 		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
7446a9643ea8Slogwang 				&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
7447a9643ea8Slogwang 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7448a9643ea8Slogwang 			dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
74492bfe3f2eSlogwang 		if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
74502bfe3f2eSlogwang 			for (j = 0; j < nb_tcs; j++) {
74512bfe3f2eSlogwang 				tc_queue->tc_rxq[0][j].base = j;
74522bfe3f2eSlogwang 				tc_queue->tc_rxq[0][j].nb_queue = 1;
74532bfe3f2eSlogwang 				tc_queue->tc_txq[0][j].base = j;
74542bfe3f2eSlogwang 				tc_queue->tc_txq[0][j].nb_queue = 1;
74552bfe3f2eSlogwang 			}
74562bfe3f2eSlogwang 		} else {
7457a9643ea8Slogwang 			for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
74582bfe3f2eSlogwang 				for (j = 0; j < nb_tcs; j++) {
74592bfe3f2eSlogwang 					tc_queue->tc_rxq[i][j].base =
74602bfe3f2eSlogwang 						i * nb_tcs + j;
74612bfe3f2eSlogwang 					tc_queue->tc_rxq[i][j].nb_queue = 1;
74622bfe3f2eSlogwang 					tc_queue->tc_txq[i][j].base =
74632bfe3f2eSlogwang 						i * nb_tcs + j;
74642bfe3f2eSlogwang 					tc_queue->tc_txq[i][j].nb_queue = 1;
74652bfe3f2eSlogwang 				}
7466a9643ea8Slogwang 			}
7467a9643ea8Slogwang 		}
7468a9643ea8Slogwang 	} else { /* vt is disabled*/
7469a9643ea8Slogwang 		struct rte_eth_dcb_rx_conf *rx_conf =
7470a9643ea8Slogwang 				&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
7471a9643ea8Slogwang 		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
7472a9643ea8Slogwang 			dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
7473a9643ea8Slogwang 		if (dcb_info->nb_tcs == ETH_4_TCS) {
7474a9643ea8Slogwang 			for (i = 0; i < dcb_info->nb_tcs; i++) {
7475a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
7476a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7477a9643ea8Slogwang 			}
7478a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
7479a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][1].base = 64;
7480a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][2].base = 96;
7481a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][3].base = 112;
7482a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
7483a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7484a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7485a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7486a9643ea8Slogwang 		} else if (dcb_info->nb_tcs == ETH_8_TCS) {
7487a9643ea8Slogwang 			for (i = 0; i < dcb_info->nb_tcs; i++) {
7488a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
7489a9643ea8Slogwang 				dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
7490a9643ea8Slogwang 			}
7491a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][0].base = 0;
7492a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][1].base = 32;
7493a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][2].base = 64;
7494a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][3].base = 80;
7495a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][4].base = 96;
7496a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][5].base = 104;
7497a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][6].base = 112;
7498a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][7].base = 120;
7499a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
7500a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
7501a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
7502a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
7503a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
7504a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
7505a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
7506a9643ea8Slogwang 			dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
7507a9643ea8Slogwang 		}
7508a9643ea8Slogwang 	}
7509a9643ea8Slogwang 	for (i = 0; i < dcb_info->nb_tcs; i++) {
7510a9643ea8Slogwang 		tc = &dcb_config->tc_config[i];
7511a9643ea8Slogwang 		dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
7512a9643ea8Slogwang 	}
7513a9643ea8Slogwang 	return 0;
7514a9643ea8Slogwang }
7515a9643ea8Slogwang 
7516a9643ea8Slogwang /* Update e-tag ether type */
7517a9643ea8Slogwang static int
ixgbe_update_e_tag_eth_type(struct ixgbe_hw * hw,uint16_t ether_type)7518a9643ea8Slogwang ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
7519a9643ea8Slogwang 			    uint16_t ether_type)
7520a9643ea8Slogwang {
7521a9643ea8Slogwang 	uint32_t etag_etype;
7522a9643ea8Slogwang 
7523a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7524a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7525a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7526a9643ea8Slogwang 		return -ENOTSUP;
7527a9643ea8Slogwang 	}
7528a9643ea8Slogwang 
7529a9643ea8Slogwang 	etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7530a9643ea8Slogwang 	etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
7531a9643ea8Slogwang 	etag_etype |= ether_type;
7532a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7533a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
7534a9643ea8Slogwang 
7535a9643ea8Slogwang 	return 0;
7536a9643ea8Slogwang }
7537a9643ea8Slogwang 
7538a9643ea8Slogwang /* Enable e-tag tunnel */
7539a9643ea8Slogwang static int
ixgbe_e_tag_enable(struct ixgbe_hw * hw)7540a9643ea8Slogwang ixgbe_e_tag_enable(struct ixgbe_hw *hw)
7541a9643ea8Slogwang {
7542a9643ea8Slogwang 	uint32_t etag_etype;
7543a9643ea8Slogwang 
7544a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7545a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7546a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7547a9643ea8Slogwang 		return -ENOTSUP;
7548a9643ea8Slogwang 	}
7549a9643ea8Slogwang 
7550a9643ea8Slogwang 	etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
7551a9643ea8Slogwang 	etag_etype |= IXGBE_ETAG_ETYPE_VALID;
7552a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
7553a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
7554a9643ea8Slogwang 
7555a9643ea8Slogwang 	return 0;
7556a9643ea8Slogwang }
7557a9643ea8Slogwang 
7558a9643ea8Slogwang static int
ixgbe_e_tag_filter_del(struct rte_eth_dev * dev,struct ixgbe_l2_tunnel_conf * l2_tunnel)7559a9643ea8Slogwang ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
7560*2d9fd380Sjfb8856606 		       struct ixgbe_l2_tunnel_conf *l2_tunnel)
7561a9643ea8Slogwang {
7562a9643ea8Slogwang 	int ret = 0;
7563a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7564a9643ea8Slogwang 	uint32_t i, rar_entries;
7565a9643ea8Slogwang 	uint32_t rar_low, rar_high;
7566a9643ea8Slogwang 
7567a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7568a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7569a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7570a9643ea8Slogwang 		return -ENOTSUP;
7571a9643ea8Slogwang 	}
7572a9643ea8Slogwang 
7573a9643ea8Slogwang 	rar_entries = ixgbe_get_num_rx_addrs(hw);
7574a9643ea8Slogwang 
7575a9643ea8Slogwang 	for (i = 1; i < rar_entries; i++) {
7576a9643ea8Slogwang 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7577a9643ea8Slogwang 		rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
7578a9643ea8Slogwang 		if ((rar_high & IXGBE_RAH_AV) &&
7579a9643ea8Slogwang 		    (rar_high & IXGBE_RAH_ADTYPE) &&
7580a9643ea8Slogwang 		    ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
7581a9643ea8Slogwang 		     l2_tunnel->tunnel_id)) {
7582a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
7583a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
7584a9643ea8Slogwang 
7585a9643ea8Slogwang 			ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
7586a9643ea8Slogwang 
7587a9643ea8Slogwang 			return ret;
7588a9643ea8Slogwang 		}
7589a9643ea8Slogwang 	}
7590a9643ea8Slogwang 
7591a9643ea8Slogwang 	return ret;
7592a9643ea8Slogwang }
7593a9643ea8Slogwang 
7594a9643ea8Slogwang static int
ixgbe_e_tag_filter_add(struct rte_eth_dev * dev,struct ixgbe_l2_tunnel_conf * l2_tunnel)7595a9643ea8Slogwang ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
7596*2d9fd380Sjfb8856606 		       struct ixgbe_l2_tunnel_conf *l2_tunnel)
7597a9643ea8Slogwang {
7598a9643ea8Slogwang 	int ret = 0;
7599a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7600a9643ea8Slogwang 	uint32_t i, rar_entries;
7601a9643ea8Slogwang 	uint32_t rar_low, rar_high;
7602a9643ea8Slogwang 
7603a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7604a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7605a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7606a9643ea8Slogwang 		return -ENOTSUP;
7607a9643ea8Slogwang 	}
7608a9643ea8Slogwang 
7609a9643ea8Slogwang 	/* One entry for one tunnel. Try to remove potential existing entry. */
7610a9643ea8Slogwang 	ixgbe_e_tag_filter_del(dev, l2_tunnel);
7611a9643ea8Slogwang 
7612a9643ea8Slogwang 	rar_entries = ixgbe_get_num_rx_addrs(hw);
7613a9643ea8Slogwang 
7614a9643ea8Slogwang 	for (i = 1; i < rar_entries; i++) {
7615a9643ea8Slogwang 		rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
7616a9643ea8Slogwang 		if (rar_high & IXGBE_RAH_AV) {
7617a9643ea8Slogwang 			continue;
7618a9643ea8Slogwang 		} else {
7619a9643ea8Slogwang 			ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
7620a9643ea8Slogwang 			rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
7621a9643ea8Slogwang 			rar_low = l2_tunnel->tunnel_id;
7622a9643ea8Slogwang 
7623a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
7624a9643ea8Slogwang 			IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
7625a9643ea8Slogwang 
7626a9643ea8Slogwang 			return ret;
7627a9643ea8Slogwang 		}
7628a9643ea8Slogwang 	}
7629a9643ea8Slogwang 
7630a9643ea8Slogwang 	PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
7631a9643ea8Slogwang 		     " Please remove a rule before adding a new one.");
7632a9643ea8Slogwang 	return -EINVAL;
7633a9643ea8Slogwang }
7634a9643ea8Slogwang 
76352bfe3f2eSlogwang static inline struct ixgbe_l2_tn_filter *
ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info * l2_tn_info,struct ixgbe_l2_tn_key * key)76362bfe3f2eSlogwang ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
76372bfe3f2eSlogwang 			  struct ixgbe_l2_tn_key *key)
7638a9643ea8Slogwang {
76392bfe3f2eSlogwang 	int ret;
76402bfe3f2eSlogwang 
76412bfe3f2eSlogwang 	ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
76422bfe3f2eSlogwang 	if (ret < 0)
76432bfe3f2eSlogwang 		return NULL;
76442bfe3f2eSlogwang 
76452bfe3f2eSlogwang 	return l2_tn_info->hash_map[ret];
76462bfe3f2eSlogwang }
76472bfe3f2eSlogwang 
76482bfe3f2eSlogwang static inline int
ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info * l2_tn_info,struct ixgbe_l2_tn_filter * l2_tn_filter)76492bfe3f2eSlogwang ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
76502bfe3f2eSlogwang 			  struct ixgbe_l2_tn_filter *l2_tn_filter)
76512bfe3f2eSlogwang {
76522bfe3f2eSlogwang 	int ret;
76532bfe3f2eSlogwang 
76542bfe3f2eSlogwang 	ret = rte_hash_add_key(l2_tn_info->hash_handle,
76552bfe3f2eSlogwang 			       &l2_tn_filter->key);
76562bfe3f2eSlogwang 
76572bfe3f2eSlogwang 	if (ret < 0) {
76582bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
76592bfe3f2eSlogwang 			    "Failed to insert L2 tunnel filter"
76602bfe3f2eSlogwang 			    " to hash table %d!",
76612bfe3f2eSlogwang 			    ret);
76622bfe3f2eSlogwang 		return ret;
76632bfe3f2eSlogwang 	}
76642bfe3f2eSlogwang 
76652bfe3f2eSlogwang 	l2_tn_info->hash_map[ret] = l2_tn_filter;
76662bfe3f2eSlogwang 
76672bfe3f2eSlogwang 	TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
76682bfe3f2eSlogwang 
76692bfe3f2eSlogwang 	return 0;
76702bfe3f2eSlogwang }
76712bfe3f2eSlogwang 
76722bfe3f2eSlogwang static inline int
ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info * l2_tn_info,struct ixgbe_l2_tn_key * key)76732bfe3f2eSlogwang ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
76742bfe3f2eSlogwang 			  struct ixgbe_l2_tn_key *key)
76752bfe3f2eSlogwang {
76762bfe3f2eSlogwang 	int ret;
76772bfe3f2eSlogwang 	struct ixgbe_l2_tn_filter *l2_tn_filter;
76782bfe3f2eSlogwang 
76792bfe3f2eSlogwang 	ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
76802bfe3f2eSlogwang 
76812bfe3f2eSlogwang 	if (ret < 0) {
76822bfe3f2eSlogwang 		PMD_DRV_LOG(ERR,
76832bfe3f2eSlogwang 			    "No such L2 tunnel filter to delete %d!",
76842bfe3f2eSlogwang 			    ret);
76852bfe3f2eSlogwang 		return ret;
76862bfe3f2eSlogwang 	}
76872bfe3f2eSlogwang 
76882bfe3f2eSlogwang 	l2_tn_filter = l2_tn_info->hash_map[ret];
76892bfe3f2eSlogwang 	l2_tn_info->hash_map[ret] = NULL;
76902bfe3f2eSlogwang 
76912bfe3f2eSlogwang 	TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
76922bfe3f2eSlogwang 	rte_free(l2_tn_filter);
76932bfe3f2eSlogwang 
76942bfe3f2eSlogwang 	return 0;
76952bfe3f2eSlogwang }
76962bfe3f2eSlogwang 
76972bfe3f2eSlogwang /* Add l2 tunnel filter */
76982bfe3f2eSlogwang int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev * dev,struct ixgbe_l2_tunnel_conf * l2_tunnel,bool restore)76992bfe3f2eSlogwang ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
7700*2d9fd380Sjfb8856606 			       struct ixgbe_l2_tunnel_conf *l2_tunnel,
77012bfe3f2eSlogwang 			       bool restore)
77022bfe3f2eSlogwang {
77032bfe3f2eSlogwang 	int ret;
77042bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
77052bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
77062bfe3f2eSlogwang 	struct ixgbe_l2_tn_key key;
77072bfe3f2eSlogwang 	struct ixgbe_l2_tn_filter *node;
77082bfe3f2eSlogwang 
77092bfe3f2eSlogwang 	if (!restore) {
77102bfe3f2eSlogwang 		key.l2_tn_type = l2_tunnel->l2_tunnel_type;
77112bfe3f2eSlogwang 		key.tn_id = l2_tunnel->tunnel_id;
77122bfe3f2eSlogwang 
77132bfe3f2eSlogwang 		node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
77142bfe3f2eSlogwang 
77152bfe3f2eSlogwang 		if (node) {
77162bfe3f2eSlogwang 			PMD_DRV_LOG(ERR,
77172bfe3f2eSlogwang 				    "The L2 tunnel filter already exists!");
77182bfe3f2eSlogwang 			return -EINVAL;
77192bfe3f2eSlogwang 		}
77202bfe3f2eSlogwang 
77212bfe3f2eSlogwang 		node = rte_zmalloc("ixgbe_l2_tn",
77222bfe3f2eSlogwang 				   sizeof(struct ixgbe_l2_tn_filter),
77232bfe3f2eSlogwang 				   0);
77242bfe3f2eSlogwang 		if (!node)
77252bfe3f2eSlogwang 			return -ENOMEM;
77262bfe3f2eSlogwang 
77272bfe3f2eSlogwang 		rte_memcpy(&node->key,
77282bfe3f2eSlogwang 				 &key,
77292bfe3f2eSlogwang 				 sizeof(struct ixgbe_l2_tn_key));
77302bfe3f2eSlogwang 		node->pool = l2_tunnel->pool;
77312bfe3f2eSlogwang 		ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
77322bfe3f2eSlogwang 		if (ret < 0) {
77332bfe3f2eSlogwang 			rte_free(node);
77342bfe3f2eSlogwang 			return ret;
77352bfe3f2eSlogwang 		}
77362bfe3f2eSlogwang 	}
7737a9643ea8Slogwang 
7738a9643ea8Slogwang 	switch (l2_tunnel->l2_tunnel_type) {
7739a9643ea8Slogwang 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7740a9643ea8Slogwang 		ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
7741a9643ea8Slogwang 		break;
7742a9643ea8Slogwang 	default:
7743a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7744a9643ea8Slogwang 		ret = -EINVAL;
7745a9643ea8Slogwang 		break;
7746a9643ea8Slogwang 	}
7747a9643ea8Slogwang 
77482bfe3f2eSlogwang 	if ((!restore) && (ret < 0))
77492bfe3f2eSlogwang 		(void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
77502bfe3f2eSlogwang 
7751a9643ea8Slogwang 	return ret;
7752a9643ea8Slogwang }
7753a9643ea8Slogwang 
7754a9643ea8Slogwang /* Delete l2 tunnel filter */
77552bfe3f2eSlogwang int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev * dev,struct ixgbe_l2_tunnel_conf * l2_tunnel)7756a9643ea8Slogwang ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
7757*2d9fd380Sjfb8856606 			       struct ixgbe_l2_tunnel_conf *l2_tunnel)
7758a9643ea8Slogwang {
77592bfe3f2eSlogwang 	int ret;
77602bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
77612bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
77622bfe3f2eSlogwang 	struct ixgbe_l2_tn_key key;
77632bfe3f2eSlogwang 
77642bfe3f2eSlogwang 	key.l2_tn_type = l2_tunnel->l2_tunnel_type;
77652bfe3f2eSlogwang 	key.tn_id = l2_tunnel->tunnel_id;
77662bfe3f2eSlogwang 	ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
77672bfe3f2eSlogwang 	if (ret < 0)
77682bfe3f2eSlogwang 		return ret;
7769a9643ea8Slogwang 
7770a9643ea8Slogwang 	switch (l2_tunnel->l2_tunnel_type) {
7771a9643ea8Slogwang 	case RTE_L2_TUNNEL_TYPE_E_TAG:
7772a9643ea8Slogwang 		ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
7773a9643ea8Slogwang 		break;
7774a9643ea8Slogwang 	default:
7775a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7776a9643ea8Slogwang 		ret = -EINVAL;
7777a9643ea8Slogwang 		break;
7778a9643ea8Slogwang 	}
7779a9643ea8Slogwang 
7780a9643ea8Slogwang 	return ret;
7781a9643ea8Slogwang }
7782a9643ea8Slogwang 
7783a9643ea8Slogwang static int
ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev * dev,bool en)7784a9643ea8Slogwang ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
7785a9643ea8Slogwang {
7786a9643ea8Slogwang 	int ret = 0;
7787a9643ea8Slogwang 	uint32_t ctrl;
7788a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7789a9643ea8Slogwang 
7790a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7791a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7792a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7793a9643ea8Slogwang 		return -ENOTSUP;
7794a9643ea8Slogwang 	}
7795a9643ea8Slogwang 
7796a9643ea8Slogwang 	ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
7797a9643ea8Slogwang 	ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
7798a9643ea8Slogwang 	if (en)
7799a9643ea8Slogwang 		ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
7800a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
7801a9643ea8Slogwang 
7802a9643ea8Slogwang 	return ret;
7803a9643ea8Slogwang }
7804a9643ea8Slogwang 
7805a9643ea8Slogwang static int
ixgbe_update_vxlan_port(struct ixgbe_hw * hw,uint16_t port)7806a9643ea8Slogwang ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
7807a9643ea8Slogwang 			uint16_t port)
7808a9643ea8Slogwang {
7809a9643ea8Slogwang 	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
7810a9643ea8Slogwang 	IXGBE_WRITE_FLUSH(hw);
7811a9643ea8Slogwang 
7812a9643ea8Slogwang 	return 0;
7813a9643ea8Slogwang }
7814a9643ea8Slogwang 
7815a9643ea8Slogwang /* There's only one register for VxLAN UDP port.
7816a9643ea8Slogwang  * So, we cannot add several ports. Will update it.
7817a9643ea8Slogwang  */
7818a9643ea8Slogwang static int
ixgbe_add_vxlan_port(struct ixgbe_hw * hw,uint16_t port)7819a9643ea8Slogwang ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
7820a9643ea8Slogwang 		     uint16_t port)
7821a9643ea8Slogwang {
7822a9643ea8Slogwang 	if (port == 0) {
7823a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
7824a9643ea8Slogwang 		return -EINVAL;
7825a9643ea8Slogwang 	}
7826a9643ea8Slogwang 
7827a9643ea8Slogwang 	return ixgbe_update_vxlan_port(hw, port);
7828a9643ea8Slogwang }
7829a9643ea8Slogwang 
7830a9643ea8Slogwang /* We cannot delete the VxLAN port. For there's a register for VxLAN
7831a9643ea8Slogwang  * UDP port, it must have a value.
7832a9643ea8Slogwang  * So, will reset it to the original value 0.
7833a9643ea8Slogwang  */
7834a9643ea8Slogwang static int
ixgbe_del_vxlan_port(struct ixgbe_hw * hw,uint16_t port)7835a9643ea8Slogwang ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
7836a9643ea8Slogwang 		     uint16_t port)
7837a9643ea8Slogwang {
7838a9643ea8Slogwang 	uint16_t cur_port;
7839a9643ea8Slogwang 
7840a9643ea8Slogwang 	cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
7841a9643ea8Slogwang 
7842a9643ea8Slogwang 	if (cur_port != port) {
7843a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
7844a9643ea8Slogwang 		return -EINVAL;
7845a9643ea8Slogwang 	}
7846a9643ea8Slogwang 
7847a9643ea8Slogwang 	return ixgbe_update_vxlan_port(hw, 0);
7848a9643ea8Slogwang }
7849a9643ea8Slogwang 
7850a9643ea8Slogwang /* Add UDP tunneling port */
7851a9643ea8Slogwang static int
ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)7852a9643ea8Slogwang ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
7853a9643ea8Slogwang 			      struct rte_eth_udp_tunnel *udp_tunnel)
7854a9643ea8Slogwang {
7855a9643ea8Slogwang 	int ret = 0;
7856a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7857a9643ea8Slogwang 
7858a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7859a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7860a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7861a9643ea8Slogwang 		return -ENOTSUP;
7862a9643ea8Slogwang 	}
7863a9643ea8Slogwang 
7864a9643ea8Slogwang 	if (udp_tunnel == NULL)
7865a9643ea8Slogwang 		return -EINVAL;
7866a9643ea8Slogwang 
7867a9643ea8Slogwang 	switch (udp_tunnel->prot_type) {
7868a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_VXLAN:
7869a9643ea8Slogwang 		ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
7870a9643ea8Slogwang 		break;
7871a9643ea8Slogwang 
7872a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_GENEVE:
7873a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_TEREDO:
7874a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7875a9643ea8Slogwang 		ret = -EINVAL;
7876a9643ea8Slogwang 		break;
7877a9643ea8Slogwang 
7878a9643ea8Slogwang 	default:
7879a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7880a9643ea8Slogwang 		ret = -EINVAL;
7881a9643ea8Slogwang 		break;
7882a9643ea8Slogwang 	}
7883a9643ea8Slogwang 
7884a9643ea8Slogwang 	return ret;
7885a9643ea8Slogwang }
7886a9643ea8Slogwang 
7887a9643ea8Slogwang /* Remove UDP tunneling port */
7888a9643ea8Slogwang static int
ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev * dev,struct rte_eth_udp_tunnel * udp_tunnel)7889a9643ea8Slogwang ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
7890a9643ea8Slogwang 			      struct rte_eth_udp_tunnel *udp_tunnel)
7891a9643ea8Slogwang {
7892a9643ea8Slogwang 	int ret = 0;
7893a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7894a9643ea8Slogwang 
7895a9643ea8Slogwang 	if (hw->mac.type != ixgbe_mac_X550 &&
7896a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_x &&
7897a9643ea8Slogwang 	    hw->mac.type != ixgbe_mac_X550EM_a) {
7898a9643ea8Slogwang 		return -ENOTSUP;
7899a9643ea8Slogwang 	}
7900a9643ea8Slogwang 
7901a9643ea8Slogwang 	if (udp_tunnel == NULL)
7902a9643ea8Slogwang 		return -EINVAL;
7903a9643ea8Slogwang 
7904a9643ea8Slogwang 	switch (udp_tunnel->prot_type) {
7905a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_VXLAN:
7906a9643ea8Slogwang 		ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
7907a9643ea8Slogwang 		break;
7908a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_GENEVE:
7909a9643ea8Slogwang 	case RTE_TUNNEL_TYPE_TEREDO:
7910a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
7911a9643ea8Slogwang 		ret = -EINVAL;
7912a9643ea8Slogwang 		break;
7913a9643ea8Slogwang 	default:
7914a9643ea8Slogwang 		PMD_DRV_LOG(ERR, "Invalid tunnel type");
7915a9643ea8Slogwang 		ret = -EINVAL;
7916a9643ea8Slogwang 		break;
7917a9643ea8Slogwang 	}
7918a9643ea8Slogwang 
7919a9643ea8Slogwang 	return ret;
7920a9643ea8Slogwang }
7921a9643ea8Slogwang 
79224418919fSjohnjiang static int
ixgbevf_dev_promiscuous_enable(struct rte_eth_dev * dev)79234418919fSjohnjiang ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
79244418919fSjohnjiang {
79254418919fSjohnjiang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
79264418919fSjohnjiang 	int ret;
79274418919fSjohnjiang 
79284418919fSjohnjiang 	switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) {
79294418919fSjohnjiang 	case IXGBE_SUCCESS:
79304418919fSjohnjiang 		ret = 0;
79314418919fSjohnjiang 		break;
79324418919fSjohnjiang 	case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
79334418919fSjohnjiang 		ret = -ENOTSUP;
79344418919fSjohnjiang 		break;
79354418919fSjohnjiang 	default:
79364418919fSjohnjiang 		ret = -EAGAIN;
79374418919fSjohnjiang 		break;
79384418919fSjohnjiang 	}
79394418919fSjohnjiang 
79404418919fSjohnjiang 	return ret;
79414418919fSjohnjiang }
79424418919fSjohnjiang 
79434418919fSjohnjiang static int
ixgbevf_dev_promiscuous_disable(struct rte_eth_dev * dev)79444418919fSjohnjiang ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
79454418919fSjohnjiang {
79464418919fSjohnjiang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
79474418919fSjohnjiang 	int ret;
79484418919fSjohnjiang 
79494418919fSjohnjiang 	switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) {
79504418919fSjohnjiang 	case IXGBE_SUCCESS:
79514418919fSjohnjiang 		ret = 0;
79524418919fSjohnjiang 		break;
79534418919fSjohnjiang 	case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
79544418919fSjohnjiang 		ret = -ENOTSUP;
79554418919fSjohnjiang 		break;
79564418919fSjohnjiang 	default:
79574418919fSjohnjiang 		ret = -EAGAIN;
79584418919fSjohnjiang 		break;
79594418919fSjohnjiang 	}
79604418919fSjohnjiang 
79614418919fSjohnjiang 	return ret;
79624418919fSjohnjiang }
79634418919fSjohnjiang 
79644418919fSjohnjiang static int
ixgbevf_dev_allmulticast_enable(struct rte_eth_dev * dev)7965a9643ea8Slogwang ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
7966a9643ea8Slogwang {
7967a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
79684418919fSjohnjiang 	int ret;
79694418919fSjohnjiang 	int mode = IXGBEVF_XCAST_MODE_ALLMULTI;
7970a9643ea8Slogwang 
79714418919fSjohnjiang 	switch (hw->mac.ops.update_xcast_mode(hw, mode)) {
79724418919fSjohnjiang 	case IXGBE_SUCCESS:
79734418919fSjohnjiang 		ret = 0;
79744418919fSjohnjiang 		break;
79754418919fSjohnjiang 	case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
79764418919fSjohnjiang 		ret = -ENOTSUP;
79774418919fSjohnjiang 		break;
79784418919fSjohnjiang 	default:
79794418919fSjohnjiang 		ret = -EAGAIN;
79804418919fSjohnjiang 		break;
7981a9643ea8Slogwang 	}
7982a9643ea8Slogwang 
79834418919fSjohnjiang 	return ret;
79844418919fSjohnjiang }
79854418919fSjohnjiang 
79864418919fSjohnjiang static int
ixgbevf_dev_allmulticast_disable(struct rte_eth_dev * dev)7987a9643ea8Slogwang ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
7988a9643ea8Slogwang {
7989a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
79904418919fSjohnjiang 	int ret;
7991a9643ea8Slogwang 
79924418919fSjohnjiang 	switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) {
79934418919fSjohnjiang 	case IXGBE_SUCCESS:
79944418919fSjohnjiang 		ret = 0;
79954418919fSjohnjiang 		break;
79964418919fSjohnjiang 	case IXGBE_ERR_FEATURE_NOT_SUPPORTED:
79974418919fSjohnjiang 		ret = -ENOTSUP;
79984418919fSjohnjiang 		break;
79994418919fSjohnjiang 	default:
80004418919fSjohnjiang 		ret = -EAGAIN;
80014418919fSjohnjiang 		break;
80024418919fSjohnjiang 	}
80034418919fSjohnjiang 
80044418919fSjohnjiang 	return ret;
8005a9643ea8Slogwang }
8006a9643ea8Slogwang 
ixgbevf_mbx_process(struct rte_eth_dev * dev)8007a9643ea8Slogwang static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
8008a9643ea8Slogwang {
8009a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8010a9643ea8Slogwang 	u32 in_msg = 0;
8011a9643ea8Slogwang 
8012d30ea906Sjfb8856606 	/* peek the message first */
8013d30ea906Sjfb8856606 	in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
80145af785ecSfengbojiang(姜凤波) 
80155af785ecSfengbojiang(姜凤波) 	/* PF reset VF event */
8016d30ea906Sjfb8856606 	if (in_msg == IXGBE_PF_CONTROL_MSG) {
8017d30ea906Sjfb8856606 		/* dummy mbx read to ack pf */
8018d30ea906Sjfb8856606 		if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
8019d30ea906Sjfb8856606 			return;
8020*2d9fd380Sjfb8856606 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
8021d30ea906Sjfb8856606 					     NULL);
8022d30ea906Sjfb8856606 	}
8023a9643ea8Slogwang }
8024a9643ea8Slogwang 
8025a9643ea8Slogwang static int
ixgbevf_dev_interrupt_get_status(struct rte_eth_dev * dev)8026a9643ea8Slogwang ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
8027a9643ea8Slogwang {
8028a9643ea8Slogwang 	uint32_t eicr;
8029a9643ea8Slogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8030a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
8031a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8032d30ea906Sjfb8856606 	ixgbevf_intr_disable(dev);
8033a9643ea8Slogwang 
8034a9643ea8Slogwang 	/* read-on-clear nic registers here */
8035a9643ea8Slogwang 	eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
8036a9643ea8Slogwang 	intr->flags = 0;
8037a9643ea8Slogwang 
8038a9643ea8Slogwang 	/* only one misc vector supported - mailbox */
8039a9643ea8Slogwang 	eicr &= IXGBE_VTEICR_MASK;
8040a9643ea8Slogwang 	if (eicr == IXGBE_MISC_VEC_ID)
8041a9643ea8Slogwang 		intr->flags |= IXGBE_FLAG_MAILBOX;
8042a9643ea8Slogwang 
8043a9643ea8Slogwang 	return 0;
8044a9643ea8Slogwang }
8045a9643ea8Slogwang 
8046a9643ea8Slogwang static int
ixgbevf_dev_interrupt_action(struct rte_eth_dev * dev)8047a9643ea8Slogwang ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
8048a9643ea8Slogwang {
8049a9643ea8Slogwang 	struct ixgbe_interrupt *intr =
8050a9643ea8Slogwang 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
8051a9643ea8Slogwang 
8052a9643ea8Slogwang 	if (intr->flags & IXGBE_FLAG_MAILBOX) {
8053a9643ea8Slogwang 		ixgbevf_mbx_process(dev);
8054a9643ea8Slogwang 		intr->flags &= ~IXGBE_FLAG_MAILBOX;
8055a9643ea8Slogwang 	}
8056a9643ea8Slogwang 
8057d30ea906Sjfb8856606 	ixgbevf_intr_enable(dev);
8058a9643ea8Slogwang 
8059a9643ea8Slogwang 	return 0;
8060a9643ea8Slogwang }
8061a9643ea8Slogwang 
8062a9643ea8Slogwang static void
ixgbevf_dev_interrupt_handler(void * param)80632bfe3f2eSlogwang ixgbevf_dev_interrupt_handler(void *param)
8064a9643ea8Slogwang {
8065a9643ea8Slogwang 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
8066a9643ea8Slogwang 
8067a9643ea8Slogwang 	ixgbevf_dev_interrupt_get_status(dev);
8068a9643ea8Slogwang 	ixgbevf_dev_interrupt_action(dev);
8069a9643ea8Slogwang }
8070a9643ea8Slogwang 
80712bfe3f2eSlogwang /**
80722bfe3f2eSlogwang  *  ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
80732bfe3f2eSlogwang  *  @hw: pointer to hardware structure
80742bfe3f2eSlogwang  *
80752bfe3f2eSlogwang  *  Stops the transmit data path and waits for the HW to internally empty
80762bfe3f2eSlogwang  *  the Tx security block
80772bfe3f2eSlogwang  **/
ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw * hw)80782bfe3f2eSlogwang int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
80792bfe3f2eSlogwang {
80802bfe3f2eSlogwang #define IXGBE_MAX_SECTX_POLL 40
8081a9643ea8Slogwang 
80822bfe3f2eSlogwang 	int i;
80832bfe3f2eSlogwang 	int sectxreg;
8084a9643ea8Slogwang 
80852bfe3f2eSlogwang 	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
80862bfe3f2eSlogwang 	sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
80872bfe3f2eSlogwang 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
80882bfe3f2eSlogwang 	for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
80892bfe3f2eSlogwang 		sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
80902bfe3f2eSlogwang 		if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
80912bfe3f2eSlogwang 			break;
80922bfe3f2eSlogwang 		/* Use interrupt-safe sleep just in case */
80932bfe3f2eSlogwang 		usec_delay(1000);
80942bfe3f2eSlogwang 	}
80952bfe3f2eSlogwang 
80962bfe3f2eSlogwang 	/* For informational purposes only */
80972bfe3f2eSlogwang 	if (i >= IXGBE_MAX_SECTX_POLL)
80982bfe3f2eSlogwang 		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
80992bfe3f2eSlogwang 			 "path fully disabled.  Continuing with init.");
81002bfe3f2eSlogwang 
81012bfe3f2eSlogwang 	return IXGBE_SUCCESS;
81022bfe3f2eSlogwang }
81032bfe3f2eSlogwang 
81042bfe3f2eSlogwang /**
81052bfe3f2eSlogwang  *  ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
81062bfe3f2eSlogwang  *  @hw: pointer to hardware structure
81072bfe3f2eSlogwang  *
81082bfe3f2eSlogwang  *  Enables the transmit data path.
81092bfe3f2eSlogwang  **/
ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw * hw)81102bfe3f2eSlogwang int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
81112bfe3f2eSlogwang {
81122bfe3f2eSlogwang 	uint32_t sectxreg;
81132bfe3f2eSlogwang 
81142bfe3f2eSlogwang 	sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
81152bfe3f2eSlogwang 	sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
81162bfe3f2eSlogwang 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
81172bfe3f2eSlogwang 	IXGBE_WRITE_FLUSH(hw);
81182bfe3f2eSlogwang 
81192bfe3f2eSlogwang 	return IXGBE_SUCCESS;
81202bfe3f2eSlogwang }
81212bfe3f2eSlogwang 
81222bfe3f2eSlogwang /* restore n-tuple filter */
81232bfe3f2eSlogwang static inline void
ixgbe_ntuple_filter_restore(struct rte_eth_dev * dev)81242bfe3f2eSlogwang ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
81252bfe3f2eSlogwang {
81262bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
81272bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
81282bfe3f2eSlogwang 	struct ixgbe_5tuple_filter *node;
81292bfe3f2eSlogwang 
81302bfe3f2eSlogwang 	TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
81312bfe3f2eSlogwang 		ixgbe_inject_5tuple_filter(dev, node);
81322bfe3f2eSlogwang 	}
81332bfe3f2eSlogwang }
81342bfe3f2eSlogwang 
81352bfe3f2eSlogwang /* restore ethernet type filter */
81362bfe3f2eSlogwang static inline void
ixgbe_ethertype_filter_restore(struct rte_eth_dev * dev)81372bfe3f2eSlogwang ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
81382bfe3f2eSlogwang {
81392bfe3f2eSlogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
81402bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
81412bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
81422bfe3f2eSlogwang 	int i;
81432bfe3f2eSlogwang 
81442bfe3f2eSlogwang 	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
81452bfe3f2eSlogwang 		if (filter_info->ethertype_mask & (1 << i)) {
81462bfe3f2eSlogwang 			IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
81472bfe3f2eSlogwang 					filter_info->ethertype_filters[i].etqf);
81482bfe3f2eSlogwang 			IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
81492bfe3f2eSlogwang 					filter_info->ethertype_filters[i].etqs);
81502bfe3f2eSlogwang 			IXGBE_WRITE_FLUSH(hw);
81512bfe3f2eSlogwang 		}
81522bfe3f2eSlogwang 	}
81532bfe3f2eSlogwang }
81542bfe3f2eSlogwang 
81552bfe3f2eSlogwang /* restore SYN filter */
81562bfe3f2eSlogwang static inline void
ixgbe_syn_filter_restore(struct rte_eth_dev * dev)81572bfe3f2eSlogwang ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
81582bfe3f2eSlogwang {
81592bfe3f2eSlogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
81602bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
81612bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
81622bfe3f2eSlogwang 	uint32_t synqf;
81632bfe3f2eSlogwang 
81642bfe3f2eSlogwang 	synqf = filter_info->syn_info;
81652bfe3f2eSlogwang 
81662bfe3f2eSlogwang 	if (synqf & IXGBE_SYN_FILTER_ENABLE) {
81672bfe3f2eSlogwang 		IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
81682bfe3f2eSlogwang 		IXGBE_WRITE_FLUSH(hw);
81692bfe3f2eSlogwang 	}
81702bfe3f2eSlogwang }
81712bfe3f2eSlogwang 
81722bfe3f2eSlogwang /* restore L2 tunnel filter */
81732bfe3f2eSlogwang static inline void
ixgbe_l2_tn_filter_restore(struct rte_eth_dev * dev)81742bfe3f2eSlogwang ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
81752bfe3f2eSlogwang {
81762bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
81772bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
81782bfe3f2eSlogwang 	struct ixgbe_l2_tn_filter *node;
8179*2d9fd380Sjfb8856606 	struct ixgbe_l2_tunnel_conf l2_tn_conf;
81802bfe3f2eSlogwang 
81812bfe3f2eSlogwang 	TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
81822bfe3f2eSlogwang 		l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
81832bfe3f2eSlogwang 		l2_tn_conf.tunnel_id      = node->key.tn_id;
81842bfe3f2eSlogwang 		l2_tn_conf.pool           = node->pool;
81852bfe3f2eSlogwang 		(void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
81862bfe3f2eSlogwang 	}
81872bfe3f2eSlogwang }
81882bfe3f2eSlogwang 
8189d30ea906Sjfb8856606 /* restore rss filter */
8190d30ea906Sjfb8856606 static inline void
ixgbe_rss_filter_restore(struct rte_eth_dev * dev)8191d30ea906Sjfb8856606 ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
8192d30ea906Sjfb8856606 {
8193d30ea906Sjfb8856606 	struct ixgbe_filter_info *filter_info =
8194d30ea906Sjfb8856606 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
8195d30ea906Sjfb8856606 
8196d30ea906Sjfb8856606 	if (filter_info->rss_info.conf.queue_num)
8197d30ea906Sjfb8856606 		ixgbe_config_rss_filter(dev,
8198d30ea906Sjfb8856606 			&filter_info->rss_info, TRUE);
8199d30ea906Sjfb8856606 }
8200d30ea906Sjfb8856606 
82012bfe3f2eSlogwang static int
ixgbe_filter_restore(struct rte_eth_dev * dev)82022bfe3f2eSlogwang ixgbe_filter_restore(struct rte_eth_dev *dev)
82032bfe3f2eSlogwang {
82042bfe3f2eSlogwang 	ixgbe_ntuple_filter_restore(dev);
82052bfe3f2eSlogwang 	ixgbe_ethertype_filter_restore(dev);
82062bfe3f2eSlogwang 	ixgbe_syn_filter_restore(dev);
82072bfe3f2eSlogwang 	ixgbe_fdir_filter_restore(dev);
82082bfe3f2eSlogwang 	ixgbe_l2_tn_filter_restore(dev);
8209d30ea906Sjfb8856606 	ixgbe_rss_filter_restore(dev);
82102bfe3f2eSlogwang 
82112bfe3f2eSlogwang 	return 0;
82122bfe3f2eSlogwang }
82132bfe3f2eSlogwang 
82142bfe3f2eSlogwang static void
ixgbe_l2_tunnel_conf(struct rte_eth_dev * dev)82152bfe3f2eSlogwang ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
82162bfe3f2eSlogwang {
82172bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
82182bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
82192bfe3f2eSlogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
82202bfe3f2eSlogwang 
82212bfe3f2eSlogwang 	if (l2_tn_info->e_tag_en)
82222bfe3f2eSlogwang 		(void)ixgbe_e_tag_enable(hw);
82232bfe3f2eSlogwang 
82242bfe3f2eSlogwang 	if (l2_tn_info->e_tag_fwd_en)
82252bfe3f2eSlogwang 		(void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
82262bfe3f2eSlogwang 
82272bfe3f2eSlogwang 	(void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
82282bfe3f2eSlogwang }
82292bfe3f2eSlogwang 
82302bfe3f2eSlogwang /* remove all the n-tuple filters */
82312bfe3f2eSlogwang void
ixgbe_clear_all_ntuple_filter(struct rte_eth_dev * dev)82322bfe3f2eSlogwang ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
82332bfe3f2eSlogwang {
82342bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
82352bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
82362bfe3f2eSlogwang 	struct ixgbe_5tuple_filter *p_5tuple;
82372bfe3f2eSlogwang 
82382bfe3f2eSlogwang 	while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
82392bfe3f2eSlogwang 		ixgbe_remove_5tuple_filter(dev, p_5tuple);
82402bfe3f2eSlogwang }
82412bfe3f2eSlogwang 
82422bfe3f2eSlogwang /* remove all the ether type filters */
82432bfe3f2eSlogwang void
ixgbe_clear_all_ethertype_filter(struct rte_eth_dev * dev)82442bfe3f2eSlogwang ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
82452bfe3f2eSlogwang {
82462bfe3f2eSlogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
82472bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
82482bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
82492bfe3f2eSlogwang 	int i;
82502bfe3f2eSlogwang 
82512bfe3f2eSlogwang 	for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
82522bfe3f2eSlogwang 		if (filter_info->ethertype_mask & (1 << i) &&
82532bfe3f2eSlogwang 		    !filter_info->ethertype_filters[i].conf) {
82542bfe3f2eSlogwang 			(void)ixgbe_ethertype_filter_remove(filter_info,
82552bfe3f2eSlogwang 							    (uint8_t)i);
82562bfe3f2eSlogwang 			IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
82572bfe3f2eSlogwang 			IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
82582bfe3f2eSlogwang 			IXGBE_WRITE_FLUSH(hw);
82592bfe3f2eSlogwang 		}
82602bfe3f2eSlogwang 	}
82612bfe3f2eSlogwang }
82622bfe3f2eSlogwang 
82632bfe3f2eSlogwang /* remove the SYN filter */
82642bfe3f2eSlogwang void
ixgbe_clear_syn_filter(struct rte_eth_dev * dev)82652bfe3f2eSlogwang ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
82662bfe3f2eSlogwang {
82672bfe3f2eSlogwang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
82682bfe3f2eSlogwang 	struct ixgbe_filter_info *filter_info =
82692bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
82702bfe3f2eSlogwang 
82712bfe3f2eSlogwang 	if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
82722bfe3f2eSlogwang 		filter_info->syn_info = 0;
82732bfe3f2eSlogwang 
82742bfe3f2eSlogwang 		IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
82752bfe3f2eSlogwang 		IXGBE_WRITE_FLUSH(hw);
82762bfe3f2eSlogwang 	}
82772bfe3f2eSlogwang }
82782bfe3f2eSlogwang 
82792bfe3f2eSlogwang /* remove all the L2 tunnel filters */
82802bfe3f2eSlogwang int
ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev * dev)82812bfe3f2eSlogwang ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
82822bfe3f2eSlogwang {
82832bfe3f2eSlogwang 	struct ixgbe_l2_tn_info *l2_tn_info =
82842bfe3f2eSlogwang 		IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
82852bfe3f2eSlogwang 	struct ixgbe_l2_tn_filter *l2_tn_filter;
8286*2d9fd380Sjfb8856606 	struct ixgbe_l2_tunnel_conf l2_tn_conf;
82872bfe3f2eSlogwang 	int ret = 0;
82882bfe3f2eSlogwang 
82892bfe3f2eSlogwang 	while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
82902bfe3f2eSlogwang 		l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
82912bfe3f2eSlogwang 		l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
82922bfe3f2eSlogwang 		l2_tn_conf.pool           = l2_tn_filter->pool;
82932bfe3f2eSlogwang 		ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
82942bfe3f2eSlogwang 		if (ret < 0)
82952bfe3f2eSlogwang 			return ret;
82962bfe3f2eSlogwang 	}
82972bfe3f2eSlogwang 
82982bfe3f2eSlogwang 	return 0;
82992bfe3f2eSlogwang }
83002bfe3f2eSlogwang 
83014418919fSjohnjiang void
ixgbe_dev_macsec_setting_save(struct rte_eth_dev * dev,struct ixgbe_macsec_setting * macsec_setting)83024418919fSjohnjiang ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev,
83034418919fSjohnjiang 				struct ixgbe_macsec_setting *macsec_setting)
83044418919fSjohnjiang {
83054418919fSjohnjiang 	struct ixgbe_macsec_setting *macsec =
83064418919fSjohnjiang 		IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
83074418919fSjohnjiang 
83084418919fSjohnjiang 	macsec->offload_en = macsec_setting->offload_en;
83094418919fSjohnjiang 	macsec->encrypt_en = macsec_setting->encrypt_en;
83104418919fSjohnjiang 	macsec->replayprotect_en = macsec_setting->replayprotect_en;
83114418919fSjohnjiang }
83124418919fSjohnjiang 
83134418919fSjohnjiang void
ixgbe_dev_macsec_setting_reset(struct rte_eth_dev * dev)83144418919fSjohnjiang ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev)
83154418919fSjohnjiang {
83164418919fSjohnjiang 	struct ixgbe_macsec_setting *macsec =
83174418919fSjohnjiang 		IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private);
83184418919fSjohnjiang 
83194418919fSjohnjiang 	macsec->offload_en = 0;
83204418919fSjohnjiang 	macsec->encrypt_en = 0;
83214418919fSjohnjiang 	macsec->replayprotect_en = 0;
83224418919fSjohnjiang }
83234418919fSjohnjiang 
83244418919fSjohnjiang void
ixgbe_dev_macsec_register_enable(struct rte_eth_dev * dev,struct ixgbe_macsec_setting * macsec_setting)83254418919fSjohnjiang ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
83264418919fSjohnjiang 				struct ixgbe_macsec_setting *macsec_setting)
83274418919fSjohnjiang {
83284418919fSjohnjiang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
83294418919fSjohnjiang 	uint32_t ctrl;
83304418919fSjohnjiang 	uint8_t en = macsec_setting->encrypt_en;
83314418919fSjohnjiang 	uint8_t rp = macsec_setting->replayprotect_en;
83324418919fSjohnjiang 
83334418919fSjohnjiang 	/**
83344418919fSjohnjiang 	 * Workaround:
83354418919fSjohnjiang 	 * As no ixgbe_disable_sec_rx_path equivalent is
83364418919fSjohnjiang 	 * implemented for tx in the base code, and we are
83374418919fSjohnjiang 	 * not allowed to modify the base code in DPDK, so
83384418919fSjohnjiang 	 * just call the hand-written one directly for now.
83394418919fSjohnjiang 	 * The hardware support has been checked by
83404418919fSjohnjiang 	 * ixgbe_disable_sec_rx_path().
83414418919fSjohnjiang 	 */
83424418919fSjohnjiang 	ixgbe_disable_sec_tx_path_generic(hw);
83434418919fSjohnjiang 
83444418919fSjohnjiang 	/* Enable Ethernet CRC (required by MACsec offload) */
83454418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
83464418919fSjohnjiang 	ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
83474418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
83484418919fSjohnjiang 
83494418919fSjohnjiang 	/* Enable the TX and RX crypto engines */
83504418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
83514418919fSjohnjiang 	ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
83524418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
83534418919fSjohnjiang 
83544418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
83554418919fSjohnjiang 	ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
83564418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
83574418919fSjohnjiang 
83584418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
83594418919fSjohnjiang 	ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
83604418919fSjohnjiang 	ctrl |= 0x3;
83614418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
83624418919fSjohnjiang 
83634418919fSjohnjiang 	/* Enable SA lookup */
83644418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
83654418919fSjohnjiang 	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
83664418919fSjohnjiang 	ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
83674418919fSjohnjiang 		     IXGBE_LSECTXCTRL_AUTH;
83684418919fSjohnjiang 	ctrl |= IXGBE_LSECTXCTRL_AISCI;
83694418919fSjohnjiang 	ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
83704418919fSjohnjiang 	ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
83714418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
83724418919fSjohnjiang 
83734418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
83744418919fSjohnjiang 	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
83754418919fSjohnjiang 	ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
83764418919fSjohnjiang 	ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
83774418919fSjohnjiang 	if (rp)
83784418919fSjohnjiang 		ctrl |= IXGBE_LSECRXCTRL_RP;
83794418919fSjohnjiang 	else
83804418919fSjohnjiang 		ctrl &= ~IXGBE_LSECRXCTRL_RP;
83814418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
83824418919fSjohnjiang 
83834418919fSjohnjiang 	/* Start the data paths */
83844418919fSjohnjiang 	ixgbe_enable_sec_rx_path(hw);
83854418919fSjohnjiang 	/**
83864418919fSjohnjiang 	 * Workaround:
83874418919fSjohnjiang 	 * As no ixgbe_enable_sec_rx_path equivalent is
83884418919fSjohnjiang 	 * implemented for tx in the base code, and we are
83894418919fSjohnjiang 	 * not allowed to modify the base code in DPDK, so
83904418919fSjohnjiang 	 * just call the hand-written one directly for now.
83914418919fSjohnjiang 	 */
83924418919fSjohnjiang 	ixgbe_enable_sec_tx_path_generic(hw);
83934418919fSjohnjiang }
83944418919fSjohnjiang 
83954418919fSjohnjiang void
ixgbe_dev_macsec_register_disable(struct rte_eth_dev * dev)83964418919fSjohnjiang ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev)
83974418919fSjohnjiang {
83984418919fSjohnjiang 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
83994418919fSjohnjiang 	uint32_t ctrl;
84004418919fSjohnjiang 
84014418919fSjohnjiang 	/**
84024418919fSjohnjiang 	 * Workaround:
84034418919fSjohnjiang 	 * As no ixgbe_disable_sec_rx_path equivalent is
84044418919fSjohnjiang 	 * implemented for tx in the base code, and we are
84054418919fSjohnjiang 	 * not allowed to modify the base code in DPDK, so
84064418919fSjohnjiang 	 * just call the hand-written one directly for now.
84074418919fSjohnjiang 	 * The hardware support has been checked by
84084418919fSjohnjiang 	 * ixgbe_disable_sec_rx_path().
84094418919fSjohnjiang 	 */
84104418919fSjohnjiang 	ixgbe_disable_sec_tx_path_generic(hw);
84114418919fSjohnjiang 
84124418919fSjohnjiang 	/* Disable the TX and RX crypto engines */
84134418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
84144418919fSjohnjiang 	ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
84154418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
84164418919fSjohnjiang 
84174418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
84184418919fSjohnjiang 	ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
84194418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
84204418919fSjohnjiang 
84214418919fSjohnjiang 	/* Disable SA lookup */
84224418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
84234418919fSjohnjiang 	ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
84244418919fSjohnjiang 	ctrl |= IXGBE_LSECTXCTRL_DISABLE;
84254418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
84264418919fSjohnjiang 
84274418919fSjohnjiang 	ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
84284418919fSjohnjiang 	ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
84294418919fSjohnjiang 	ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
84304418919fSjohnjiang 	IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
84314418919fSjohnjiang 
84324418919fSjohnjiang 	/* Start the data paths */
84334418919fSjohnjiang 	ixgbe_enable_sec_rx_path(hw);
84344418919fSjohnjiang 	/**
84354418919fSjohnjiang 	 * Workaround:
84364418919fSjohnjiang 	 * As no ixgbe_enable_sec_rx_path equivalent is
84374418919fSjohnjiang 	 * implemented for tx in the base code, and we are
84384418919fSjohnjiang 	 * not allowed to modify the base code in DPDK, so
84394418919fSjohnjiang 	 * just call the hand-written one directly for now.
84404418919fSjohnjiang 	 */
84414418919fSjohnjiang 	ixgbe_enable_sec_tx_path_generic(hw);
84424418919fSjohnjiang }
84434418919fSjohnjiang 
84442bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
84452bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
84462bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci");
84472bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
84482bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
84492bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
84504b05018fSfengbojiang RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf,
84514b05018fSfengbojiang 			      IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>");
8452d30ea906Sjfb8856606 
8453*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ixgbe_logtype_init, pmd.net.ixgbe.init, NOTICE);
8454*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ixgbe_logtype_driver, pmd.net.ixgbe.driver, NOTICE);
8455*2d9fd380Sjfb8856606 
84564418919fSjohnjiang #ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
8457*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ixgbe_logtype_rx, pmd.net.ixgbe.rx, DEBUG);
84584418919fSjohnjiang #endif
84594418919fSjohnjiang #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
8460*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ixgbe_logtype_tx, pmd.net.ixgbe.tx, DEBUG);
84614418919fSjohnjiang #endif
84624418919fSjohnjiang #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
8463*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ixgbe_logtype_tx_free, pmd.net.ixgbe.tx_free, DEBUG);
84644418919fSjohnjiang #endif
8465