1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2010-2016 Intel Corporation
3a9643ea8Slogwang */
4a9643ea8Slogwang
5a9643ea8Slogwang #include <sys/queue.h>
6a9643ea8Slogwang #include <stdio.h>
7a9643ea8Slogwang #include <errno.h>
8a9643ea8Slogwang #include <stdint.h>
9a9643ea8Slogwang #include <stdarg.h>
10a9643ea8Slogwang
114418919fSjohnjiang #include <rte_string_fns.h>
12a9643ea8Slogwang #include <rte_common.h>
13a9643ea8Slogwang #include <rte_interrupts.h>
14a9643ea8Slogwang #include <rte_byteorder.h>
15a9643ea8Slogwang #include <rte_log.h>
16a9643ea8Slogwang #include <rte_debug.h>
17a9643ea8Slogwang #include <rte_pci.h>
182bfe3f2eSlogwang #include <rte_bus_pci.h>
19a9643ea8Slogwang #include <rte_ether.h>
20d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
212bfe3f2eSlogwang #include <rte_ethdev_pci.h>
22a9643ea8Slogwang #include <rte_memory.h>
23a9643ea8Slogwang #include <rte_eal.h>
24a9643ea8Slogwang #include <rte_malloc.h>
25a9643ea8Slogwang #include <rte_dev.h>
26a9643ea8Slogwang
27a9643ea8Slogwang #include "e1000_logs.h"
28a9643ea8Slogwang #include "base/e1000_api.h"
29a9643ea8Slogwang #include "e1000_ethdev.h"
30a9643ea8Slogwang #include "igb_regs.h"
31a9643ea8Slogwang
32a9643ea8Slogwang /*
33a9643ea8Slogwang * Default values for port configuration
34a9643ea8Slogwang */
35a9643ea8Slogwang #define IGB_DEFAULT_RX_FREE_THRESH 32
36a9643ea8Slogwang
37a9643ea8Slogwang #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
38a9643ea8Slogwang #define IGB_DEFAULT_RX_HTHRESH 8
39a9643ea8Slogwang #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
40a9643ea8Slogwang
41a9643ea8Slogwang #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
42a9643ea8Slogwang #define IGB_DEFAULT_TX_HTHRESH 1
43a9643ea8Slogwang #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
44a9643ea8Slogwang
45a9643ea8Slogwang /* Bit shift and mask */
46a9643ea8Slogwang #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
47a9643ea8Slogwang #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
48a9643ea8Slogwang #define IGB_8_BIT_WIDTH CHAR_BIT
49a9643ea8Slogwang #define IGB_8_BIT_MASK UINT8_MAX
50a9643ea8Slogwang
51a9643ea8Slogwang /* Additional timesync values. */
52a9643ea8Slogwang #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
53a9643ea8Slogwang #define E1000_ETQF_FILTER_1588 3
54a9643ea8Slogwang #define IGB_82576_TSYNC_SHIFT 16
55a9643ea8Slogwang #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
56a9643ea8Slogwang #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
57a9643ea8Slogwang #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
58a9643ea8Slogwang
59a9643ea8Slogwang #define E1000_VTIVAR_MISC 0x01740
60a9643ea8Slogwang #define E1000_VTIVAR_MISC_MASK 0xFF
61a9643ea8Slogwang #define E1000_VTIVAR_VALID 0x80
62a9643ea8Slogwang #define E1000_VTIVAR_MISC_MAILBOX 0
63a9643ea8Slogwang #define E1000_VTIVAR_MISC_INTR_MASK 0x3
64a9643ea8Slogwang
65a9643ea8Slogwang /* External VLAN Enable bit mask */
66a9643ea8Slogwang #define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
67a9643ea8Slogwang
68a9643ea8Slogwang /* External VLAN Ether Type bit mask and shift */
69a9643ea8Slogwang #define E1000_VET_VET_EXT 0xFFFF0000
70a9643ea8Slogwang #define E1000_VET_VET_EXT_SHIFT 16
71a9643ea8Slogwang
721646932aSjfb8856606 /* MSI-X other interrupt vector */
731646932aSjfb8856606 #define IGB_MSIX_OTHER_INTR_VEC 0
741646932aSjfb8856606
75a9643ea8Slogwang static int eth_igb_configure(struct rte_eth_dev *dev);
76a9643ea8Slogwang static int eth_igb_start(struct rte_eth_dev *dev);
77*2d9fd380Sjfb8856606 static int eth_igb_stop(struct rte_eth_dev *dev);
78a9643ea8Slogwang static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
79a9643ea8Slogwang static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
80*2d9fd380Sjfb8856606 static int eth_igb_close(struct rte_eth_dev *dev);
81d30ea906Sjfb8856606 static int eth_igb_reset(struct rte_eth_dev *dev);
824418919fSjohnjiang static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
834418919fSjohnjiang static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
844418919fSjohnjiang static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
854418919fSjohnjiang static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
86a9643ea8Slogwang static int eth_igb_link_update(struct rte_eth_dev *dev,
87a9643ea8Slogwang int wait_to_complete);
882bfe3f2eSlogwang static int eth_igb_stats_get(struct rte_eth_dev *dev,
89a9643ea8Slogwang struct rte_eth_stats *rte_stats);
90a9643ea8Slogwang static int eth_igb_xstats_get(struct rte_eth_dev *dev,
91a9643ea8Slogwang struct rte_eth_xstat *xstats, unsigned n);
922bfe3f2eSlogwang static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
932bfe3f2eSlogwang const uint64_t *ids,
942bfe3f2eSlogwang uint64_t *values, unsigned int n);
95a9643ea8Slogwang static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
96a9643ea8Slogwang struct rte_eth_xstat_name *xstats_names,
972bfe3f2eSlogwang unsigned int size);
982bfe3f2eSlogwang static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
992bfe3f2eSlogwang struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
1002bfe3f2eSlogwang unsigned int limit);
1014418919fSjohnjiang static int eth_igb_stats_reset(struct rte_eth_dev *dev);
1024418919fSjohnjiang static int eth_igb_xstats_reset(struct rte_eth_dev *dev);
1032bfe3f2eSlogwang static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
1042bfe3f2eSlogwang char *fw_version, size_t fw_size);
1054418919fSjohnjiang static int eth_igb_infos_get(struct rte_eth_dev *dev,
106a9643ea8Slogwang struct rte_eth_dev_info *dev_info);
107a9643ea8Slogwang static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
1084418919fSjohnjiang static int eth_igbvf_infos_get(struct rte_eth_dev *dev,
109a9643ea8Slogwang struct rte_eth_dev_info *dev_info);
110a9643ea8Slogwang static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
111a9643ea8Slogwang struct rte_eth_fc_conf *fc_conf);
112a9643ea8Slogwang static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
113a9643ea8Slogwang struct rte_eth_fc_conf *fc_conf);
1142bfe3f2eSlogwang static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
115a9643ea8Slogwang static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
116a9643ea8Slogwang static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
1172bfe3f2eSlogwang static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
1182bfe3f2eSlogwang struct rte_intr_handle *handle);
1192bfe3f2eSlogwang static void eth_igb_interrupt_handler(void *param);
120a9643ea8Slogwang static int igb_hardware_init(struct e1000_hw *hw);
121a9643ea8Slogwang static void igb_hw_control_acquire(struct e1000_hw *hw);
122a9643ea8Slogwang static void igb_hw_control_release(struct e1000_hw *hw);
123a9643ea8Slogwang static void igb_init_manageability(struct e1000_hw *hw);
124a9643ea8Slogwang static void igb_release_manageability(struct e1000_hw *hw);
125a9643ea8Slogwang
126a9643ea8Slogwang static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
127a9643ea8Slogwang
128a9643ea8Slogwang static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
129a9643ea8Slogwang uint16_t vlan_id, int on);
130a9643ea8Slogwang static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
131a9643ea8Slogwang enum rte_vlan_type vlan_type,
132a9643ea8Slogwang uint16_t tpid_id);
1332bfe3f2eSlogwang static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
134a9643ea8Slogwang
135a9643ea8Slogwang static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
136a9643ea8Slogwang static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
137a9643ea8Slogwang static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
138a9643ea8Slogwang static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
139a9643ea8Slogwang static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
140a9643ea8Slogwang static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
141a9643ea8Slogwang
142a9643ea8Slogwang static int eth_igb_led_on(struct rte_eth_dev *dev);
143a9643ea8Slogwang static int eth_igb_led_off(struct rte_eth_dev *dev);
144a9643ea8Slogwang
1451646932aSjfb8856606 static void igb_intr_disable(struct rte_eth_dev *dev);
146a9643ea8Slogwang static int igb_get_rx_buffer_size(struct e1000_hw *hw);
1472bfe3f2eSlogwang static int eth_igb_rar_set(struct rte_eth_dev *dev,
1484418919fSjohnjiang struct rte_ether_addr *mac_addr,
149a9643ea8Slogwang uint32_t index, uint32_t pool);
150a9643ea8Slogwang static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
151d30ea906Sjfb8856606 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
1524418919fSjohnjiang struct rte_ether_addr *addr);
153a9643ea8Slogwang
154a9643ea8Slogwang static void igbvf_intr_disable(struct e1000_hw *hw);
155a9643ea8Slogwang static int igbvf_dev_configure(struct rte_eth_dev *dev);
156a9643ea8Slogwang static int igbvf_dev_start(struct rte_eth_dev *dev);
157*2d9fd380Sjfb8856606 static int igbvf_dev_stop(struct rte_eth_dev *dev);
158*2d9fd380Sjfb8856606 static int igbvf_dev_close(struct rte_eth_dev *dev);
1594418919fSjohnjiang static int igbvf_promiscuous_enable(struct rte_eth_dev *dev);
1604418919fSjohnjiang static int igbvf_promiscuous_disable(struct rte_eth_dev *dev);
1614418919fSjohnjiang static int igbvf_allmulticast_enable(struct rte_eth_dev *dev);
1624418919fSjohnjiang static int igbvf_allmulticast_disable(struct rte_eth_dev *dev);
163a9643ea8Slogwang static int eth_igbvf_link_update(struct e1000_hw *hw);
1642bfe3f2eSlogwang static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
165a9643ea8Slogwang struct rte_eth_stats *rte_stats);
166a9643ea8Slogwang static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
167a9643ea8Slogwang struct rte_eth_xstat *xstats, unsigned n);
168a9643ea8Slogwang static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
169a9643ea8Slogwang struct rte_eth_xstat_name *xstats_names,
170a9643ea8Slogwang unsigned limit);
1714418919fSjohnjiang static int eth_igbvf_stats_reset(struct rte_eth_dev *dev);
172a9643ea8Slogwang static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
173a9643ea8Slogwang uint16_t vlan_id, int on);
174a9643ea8Slogwang static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
175a9643ea8Slogwang static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
176d30ea906Sjfb8856606 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
1774418919fSjohnjiang struct rte_ether_addr *addr);
178a9643ea8Slogwang static int igbvf_get_reg_length(struct rte_eth_dev *dev);
179a9643ea8Slogwang static int igbvf_get_regs(struct rte_eth_dev *dev,
180a9643ea8Slogwang struct rte_dev_reg_info *regs);
181a9643ea8Slogwang
182a9643ea8Slogwang static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
183a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
184a9643ea8Slogwang uint16_t reta_size);
185a9643ea8Slogwang static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
186a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
187a9643ea8Slogwang uint16_t reta_size);
188a9643ea8Slogwang
189a9643ea8Slogwang static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
190a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter);
191a9643ea8Slogwang static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
192a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter);
193a9643ea8Slogwang static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
194a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter);
195a9643ea8Slogwang static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
196a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter);
197a9643ea8Slogwang static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
198a9643ea8Slogwang enum rte_filter_type filter_type,
199a9643ea8Slogwang enum rte_filter_op filter_op,
200a9643ea8Slogwang void *arg);
201a9643ea8Slogwang static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
202a9643ea8Slogwang static int eth_igb_get_regs(struct rte_eth_dev *dev,
203a9643ea8Slogwang struct rte_dev_reg_info *regs);
204a9643ea8Slogwang static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
205a9643ea8Slogwang static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
206a9643ea8Slogwang struct rte_dev_eeprom_info *eeprom);
207a9643ea8Slogwang static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
208a9643ea8Slogwang struct rte_dev_eeprom_info *eeprom);
209d30ea906Sjfb8856606 static int eth_igb_get_module_info(struct rte_eth_dev *dev,
210d30ea906Sjfb8856606 struct rte_eth_dev_module_info *modinfo);
211d30ea906Sjfb8856606 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
212d30ea906Sjfb8856606 struct rte_dev_eeprom_info *info);
213a9643ea8Slogwang static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
2144418919fSjohnjiang struct rte_ether_addr *mc_addr_set,
215a9643ea8Slogwang uint32_t nb_mc_addr);
216a9643ea8Slogwang static int igb_timesync_enable(struct rte_eth_dev *dev);
217a9643ea8Slogwang static int igb_timesync_disable(struct rte_eth_dev *dev);
218a9643ea8Slogwang static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
219a9643ea8Slogwang struct timespec *timestamp,
220a9643ea8Slogwang uint32_t flags);
221a9643ea8Slogwang static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
222a9643ea8Slogwang struct timespec *timestamp);
223a9643ea8Slogwang static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
224a9643ea8Slogwang static int igb_timesync_read_time(struct rte_eth_dev *dev,
225a9643ea8Slogwang struct timespec *timestamp);
226a9643ea8Slogwang static int igb_timesync_write_time(struct rte_eth_dev *dev,
227a9643ea8Slogwang const struct timespec *timestamp);
228a9643ea8Slogwang static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
229a9643ea8Slogwang uint16_t queue_id);
230a9643ea8Slogwang static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
231a9643ea8Slogwang uint16_t queue_id);
232a9643ea8Slogwang static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
233a9643ea8Slogwang uint8_t queue, uint8_t msix_vector);
234a9643ea8Slogwang static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
235a9643ea8Slogwang uint8_t index, uint8_t offset);
236a9643ea8Slogwang static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
2372bfe3f2eSlogwang static void eth_igbvf_interrupt_handler(void *param);
238a9643ea8Slogwang static void igbvf_mbx_process(struct rte_eth_dev *dev);
2392bfe3f2eSlogwang static int igb_filter_restore(struct rte_eth_dev *dev);
240a9643ea8Slogwang
241a9643ea8Slogwang /*
242a9643ea8Slogwang * Define VF Stats MACRO for Non "cleared on read" register
243a9643ea8Slogwang */
244a9643ea8Slogwang #define UPDATE_VF_STAT(reg, last, cur) \
245a9643ea8Slogwang { \
246a9643ea8Slogwang u32 latest = E1000_READ_REG(hw, reg); \
247a9643ea8Slogwang cur += (latest - last) & UINT_MAX; \
248a9643ea8Slogwang last = latest; \
249a9643ea8Slogwang }
250a9643ea8Slogwang
251a9643ea8Slogwang #define IGB_FC_PAUSE_TIME 0x0680
252a9643ea8Slogwang #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
253a9643ea8Slogwang #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
254a9643ea8Slogwang
255a9643ea8Slogwang #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
256a9643ea8Slogwang
257a9643ea8Slogwang static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
258a9643ea8Slogwang
259a9643ea8Slogwang /*
260a9643ea8Slogwang * The set of PCI devices this driver supports
261a9643ea8Slogwang */
262a9643ea8Slogwang static const struct rte_pci_id pci_id_igb_map[] = {
2632bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
2642bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
2652bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
2662bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
2672bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
2682bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
2692bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
2702bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
271a9643ea8Slogwang
2722bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
2732bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
2742bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
275a9643ea8Slogwang
2762bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
2772bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
2782bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
2792bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
2802bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
2812bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
2822bfe3f2eSlogwang
2832bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
2842bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
2852bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
2862bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
2872bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
2882bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
2892bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
2902bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
2912bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
2922bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
2932bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
2942bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
2952bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
2962bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
2972bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
2982bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
2992bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
3002bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
3012bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
3022bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
3032bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
3042bfe3f2eSlogwang { .vendor_id = 0, /* sentinel */ },
305a9643ea8Slogwang };
306a9643ea8Slogwang
307a9643ea8Slogwang /*
308a9643ea8Slogwang * The set of PCI devices this driver supports (for 82576&I350 VF)
309a9643ea8Slogwang */
310a9643ea8Slogwang static const struct rte_pci_id pci_id_igbvf_map[] = {
3112bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
3122bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
3132bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
3142bfe3f2eSlogwang { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
3152bfe3f2eSlogwang { .vendor_id = 0, /* sentinel */ },
316a9643ea8Slogwang };
317a9643ea8Slogwang
318a9643ea8Slogwang static const struct rte_eth_desc_lim rx_desc_lim = {
319a9643ea8Slogwang .nb_max = E1000_MAX_RING_DESC,
320a9643ea8Slogwang .nb_min = E1000_MIN_RING_DESC,
321a9643ea8Slogwang .nb_align = IGB_RXD_ALIGN,
322a9643ea8Slogwang };
323a9643ea8Slogwang
324a9643ea8Slogwang static const struct rte_eth_desc_lim tx_desc_lim = {
325a9643ea8Slogwang .nb_max = E1000_MAX_RING_DESC,
326a9643ea8Slogwang .nb_min = E1000_MIN_RING_DESC,
327a9643ea8Slogwang .nb_align = IGB_RXD_ALIGN,
3282bfe3f2eSlogwang .nb_seg_max = IGB_TX_MAX_SEG,
3292bfe3f2eSlogwang .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
330a9643ea8Slogwang };
331a9643ea8Slogwang
332a9643ea8Slogwang static const struct eth_dev_ops eth_igb_ops = {
333a9643ea8Slogwang .dev_configure = eth_igb_configure,
334a9643ea8Slogwang .dev_start = eth_igb_start,
335a9643ea8Slogwang .dev_stop = eth_igb_stop,
336a9643ea8Slogwang .dev_set_link_up = eth_igb_dev_set_link_up,
337a9643ea8Slogwang .dev_set_link_down = eth_igb_dev_set_link_down,
338a9643ea8Slogwang .dev_close = eth_igb_close,
339d30ea906Sjfb8856606 .dev_reset = eth_igb_reset,
340a9643ea8Slogwang .promiscuous_enable = eth_igb_promiscuous_enable,
341a9643ea8Slogwang .promiscuous_disable = eth_igb_promiscuous_disable,
342a9643ea8Slogwang .allmulticast_enable = eth_igb_allmulticast_enable,
343a9643ea8Slogwang .allmulticast_disable = eth_igb_allmulticast_disable,
344a9643ea8Slogwang .link_update = eth_igb_link_update,
345a9643ea8Slogwang .stats_get = eth_igb_stats_get,
346a9643ea8Slogwang .xstats_get = eth_igb_xstats_get,
3472bfe3f2eSlogwang .xstats_get_by_id = eth_igb_xstats_get_by_id,
3482bfe3f2eSlogwang .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
349a9643ea8Slogwang .xstats_get_names = eth_igb_xstats_get_names,
350a9643ea8Slogwang .stats_reset = eth_igb_stats_reset,
351a9643ea8Slogwang .xstats_reset = eth_igb_xstats_reset,
3522bfe3f2eSlogwang .fw_version_get = eth_igb_fw_version_get,
353a9643ea8Slogwang .dev_infos_get = eth_igb_infos_get,
354a9643ea8Slogwang .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
355a9643ea8Slogwang .mtu_set = eth_igb_mtu_set,
356a9643ea8Slogwang .vlan_filter_set = eth_igb_vlan_filter_set,
357a9643ea8Slogwang .vlan_tpid_set = eth_igb_vlan_tpid_set,
358a9643ea8Slogwang .vlan_offload_set = eth_igb_vlan_offload_set,
359a9643ea8Slogwang .rx_queue_setup = eth_igb_rx_queue_setup,
360a9643ea8Slogwang .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
361a9643ea8Slogwang .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
362a9643ea8Slogwang .rx_queue_release = eth_igb_rx_queue_release,
363a9643ea8Slogwang .tx_queue_setup = eth_igb_tx_queue_setup,
364a9643ea8Slogwang .tx_queue_release = eth_igb_tx_queue_release,
3652bfe3f2eSlogwang .tx_done_cleanup = eth_igb_tx_done_cleanup,
366a9643ea8Slogwang .dev_led_on = eth_igb_led_on,
367a9643ea8Slogwang .dev_led_off = eth_igb_led_off,
368a9643ea8Slogwang .flow_ctrl_get = eth_igb_flow_ctrl_get,
369a9643ea8Slogwang .flow_ctrl_set = eth_igb_flow_ctrl_set,
370a9643ea8Slogwang .mac_addr_add = eth_igb_rar_set,
371a9643ea8Slogwang .mac_addr_remove = eth_igb_rar_clear,
372a9643ea8Slogwang .mac_addr_set = eth_igb_default_mac_addr_set,
373a9643ea8Slogwang .reta_update = eth_igb_rss_reta_update,
374a9643ea8Slogwang .reta_query = eth_igb_rss_reta_query,
375a9643ea8Slogwang .rss_hash_update = eth_igb_rss_hash_update,
376a9643ea8Slogwang .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
377a9643ea8Slogwang .filter_ctrl = eth_igb_filter_ctrl,
378a9643ea8Slogwang .set_mc_addr_list = eth_igb_set_mc_addr_list,
379a9643ea8Slogwang .rxq_info_get = igb_rxq_info_get,
380a9643ea8Slogwang .txq_info_get = igb_txq_info_get,
381a9643ea8Slogwang .timesync_enable = igb_timesync_enable,
382a9643ea8Slogwang .timesync_disable = igb_timesync_disable,
383a9643ea8Slogwang .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
384a9643ea8Slogwang .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
385a9643ea8Slogwang .get_reg = eth_igb_get_regs,
386a9643ea8Slogwang .get_eeprom_length = eth_igb_get_eeprom_length,
387a9643ea8Slogwang .get_eeprom = eth_igb_get_eeprom,
388a9643ea8Slogwang .set_eeprom = eth_igb_set_eeprom,
389d30ea906Sjfb8856606 .get_module_info = eth_igb_get_module_info,
390d30ea906Sjfb8856606 .get_module_eeprom = eth_igb_get_module_eeprom,
391a9643ea8Slogwang .timesync_adjust_time = igb_timesync_adjust_time,
392a9643ea8Slogwang .timesync_read_time = igb_timesync_read_time,
393a9643ea8Slogwang .timesync_write_time = igb_timesync_write_time,
394a9643ea8Slogwang };
395a9643ea8Slogwang
396a9643ea8Slogwang /*
397a9643ea8Slogwang * dev_ops for virtual function, bare necessities for basic vf
398a9643ea8Slogwang * operation have been implemented
399a9643ea8Slogwang */
400a9643ea8Slogwang static const struct eth_dev_ops igbvf_eth_dev_ops = {
401a9643ea8Slogwang .dev_configure = igbvf_dev_configure,
402a9643ea8Slogwang .dev_start = igbvf_dev_start,
403a9643ea8Slogwang .dev_stop = igbvf_dev_stop,
404a9643ea8Slogwang .dev_close = igbvf_dev_close,
405a9643ea8Slogwang .promiscuous_enable = igbvf_promiscuous_enable,
406a9643ea8Slogwang .promiscuous_disable = igbvf_promiscuous_disable,
407a9643ea8Slogwang .allmulticast_enable = igbvf_allmulticast_enable,
408a9643ea8Slogwang .allmulticast_disable = igbvf_allmulticast_disable,
409a9643ea8Slogwang .link_update = eth_igb_link_update,
410a9643ea8Slogwang .stats_get = eth_igbvf_stats_get,
411a9643ea8Slogwang .xstats_get = eth_igbvf_xstats_get,
412a9643ea8Slogwang .xstats_get_names = eth_igbvf_xstats_get_names,
413a9643ea8Slogwang .stats_reset = eth_igbvf_stats_reset,
414a9643ea8Slogwang .xstats_reset = eth_igbvf_stats_reset,
415a9643ea8Slogwang .vlan_filter_set = igbvf_vlan_filter_set,
416a9643ea8Slogwang .dev_infos_get = eth_igbvf_infos_get,
417a9643ea8Slogwang .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
418a9643ea8Slogwang .rx_queue_setup = eth_igb_rx_queue_setup,
419a9643ea8Slogwang .rx_queue_release = eth_igb_rx_queue_release,
420a9643ea8Slogwang .tx_queue_setup = eth_igb_tx_queue_setup,
421a9643ea8Slogwang .tx_queue_release = eth_igb_tx_queue_release,
422*2d9fd380Sjfb8856606 .tx_done_cleanup = eth_igb_tx_done_cleanup,
423a9643ea8Slogwang .set_mc_addr_list = eth_igb_set_mc_addr_list,
424a9643ea8Slogwang .rxq_info_get = igb_rxq_info_get,
425a9643ea8Slogwang .txq_info_get = igb_txq_info_get,
426a9643ea8Slogwang .mac_addr_set = igbvf_default_mac_addr_set,
427a9643ea8Slogwang .get_reg = igbvf_get_regs,
428a9643ea8Slogwang };
429a9643ea8Slogwang
430a9643ea8Slogwang /* store statistics names and its offset in stats structure */
431a9643ea8Slogwang struct rte_igb_xstats_name_off {
432a9643ea8Slogwang char name[RTE_ETH_XSTATS_NAME_SIZE];
433a9643ea8Slogwang unsigned offset;
434a9643ea8Slogwang };
435a9643ea8Slogwang
436a9643ea8Slogwang static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
437a9643ea8Slogwang {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
438a9643ea8Slogwang {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
439a9643ea8Slogwang {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
440a9643ea8Slogwang {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
441a9643ea8Slogwang {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
442a9643ea8Slogwang {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
443a9643ea8Slogwang {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
444a9643ea8Slogwang ecol)},
445a9643ea8Slogwang {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
446a9643ea8Slogwang {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
447a9643ea8Slogwang {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
448a9643ea8Slogwang {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
449a9643ea8Slogwang {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
450a9643ea8Slogwang {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
451a9643ea8Slogwang {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
452a9643ea8Slogwang {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
453a9643ea8Slogwang {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
454a9643ea8Slogwang {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
455a9643ea8Slogwang {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
456a9643ea8Slogwang fcruc)},
457a9643ea8Slogwang {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
458a9643ea8Slogwang {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
459a9643ea8Slogwang {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
460a9643ea8Slogwang {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
461a9643ea8Slogwang {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
462a9643ea8Slogwang prc1023)},
463a9643ea8Slogwang {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
464a9643ea8Slogwang prc1522)},
465a9643ea8Slogwang {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
466a9643ea8Slogwang {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
467a9643ea8Slogwang {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
468a9643ea8Slogwang {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
469a9643ea8Slogwang {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
470a9643ea8Slogwang {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
471a9643ea8Slogwang {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
472a9643ea8Slogwang {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
473a9643ea8Slogwang {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
474a9643ea8Slogwang {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
475a9643ea8Slogwang {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
476a9643ea8Slogwang {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
477a9643ea8Slogwang {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
478a9643ea8Slogwang {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
479a9643ea8Slogwang {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
480a9643ea8Slogwang {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
481a9643ea8Slogwang {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
482a9643ea8Slogwang {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
483a9643ea8Slogwang ptc1023)},
484a9643ea8Slogwang {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
485a9643ea8Slogwang ptc1522)},
486a9643ea8Slogwang {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
487a9643ea8Slogwang {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
488a9643ea8Slogwang {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
489a9643ea8Slogwang {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
490a9643ea8Slogwang {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
491a9643ea8Slogwang {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
492a9643ea8Slogwang {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
493a9643ea8Slogwang
494a9643ea8Slogwang {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
495a9643ea8Slogwang };
496a9643ea8Slogwang
497a9643ea8Slogwang #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
498a9643ea8Slogwang sizeof(rte_igb_stats_strings[0]))
499a9643ea8Slogwang
500a9643ea8Slogwang static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
501a9643ea8Slogwang {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
502a9643ea8Slogwang {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
503a9643ea8Slogwang {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
504a9643ea8Slogwang {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
505a9643ea8Slogwang {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
506a9643ea8Slogwang };
507a9643ea8Slogwang
508a9643ea8Slogwang #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
509a9643ea8Slogwang sizeof(rte_igbvf_stats_strings[0]))
510a9643ea8Slogwang
511a9643ea8Slogwang
512a9643ea8Slogwang static inline void
igb_intr_enable(struct rte_eth_dev * dev)513a9643ea8Slogwang igb_intr_enable(struct rte_eth_dev *dev)
514a9643ea8Slogwang {
515a9643ea8Slogwang struct e1000_interrupt *intr =
516a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
517a9643ea8Slogwang struct e1000_hw *hw =
518a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5191646932aSjfb8856606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5201646932aSjfb8856606 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5211646932aSjfb8856606
5221646932aSjfb8856606 if (rte_intr_allow_others(intr_handle) &&
5231646932aSjfb8856606 dev->data->dev_conf.intr_conf.lsc != 0) {
5241646932aSjfb8856606 E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC);
5251646932aSjfb8856606 }
526a9643ea8Slogwang
527a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
528a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
529a9643ea8Slogwang }
530a9643ea8Slogwang
531a9643ea8Slogwang static void
igb_intr_disable(struct rte_eth_dev * dev)5321646932aSjfb8856606 igb_intr_disable(struct rte_eth_dev *dev)
533a9643ea8Slogwang {
5341646932aSjfb8856606 struct e1000_hw *hw =
5351646932aSjfb8856606 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5361646932aSjfb8856606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5371646932aSjfb8856606 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5381646932aSjfb8856606
5391646932aSjfb8856606 if (rte_intr_allow_others(intr_handle) &&
5401646932aSjfb8856606 dev->data->dev_conf.intr_conf.lsc != 0) {
5411646932aSjfb8856606 E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC);
5421646932aSjfb8856606 }
5431646932aSjfb8856606
544a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_IMC, ~0);
545a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
546a9643ea8Slogwang }
547a9643ea8Slogwang
548a9643ea8Slogwang static inline void
igbvf_intr_enable(struct rte_eth_dev * dev)549a9643ea8Slogwang igbvf_intr_enable(struct rte_eth_dev *dev)
550a9643ea8Slogwang {
551a9643ea8Slogwang struct e1000_hw *hw =
552a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553a9643ea8Slogwang
554a9643ea8Slogwang /* only for mailbox */
555a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
556a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
557a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
558a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
559a9643ea8Slogwang }
560a9643ea8Slogwang
561a9643ea8Slogwang /* only for mailbox now. If RX/TX needed, should extend this function. */
562a9643ea8Slogwang static void
igbvf_set_ivar_map(struct e1000_hw * hw,uint8_t msix_vector)563a9643ea8Slogwang igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
564a9643ea8Slogwang {
565a9643ea8Slogwang uint32_t tmp = 0;
566a9643ea8Slogwang
567a9643ea8Slogwang /* mailbox */
568a9643ea8Slogwang tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
569a9643ea8Slogwang tmp |= E1000_VTIVAR_VALID;
570a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
571a9643ea8Slogwang }
572a9643ea8Slogwang
573a9643ea8Slogwang static void
eth_igbvf_configure_msix_intr(struct rte_eth_dev * dev)574a9643ea8Slogwang eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
575a9643ea8Slogwang {
576a9643ea8Slogwang struct e1000_hw *hw =
577a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
578a9643ea8Slogwang
579a9643ea8Slogwang /* Configure VF other cause ivar */
580a9643ea8Slogwang igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
581a9643ea8Slogwang }
582a9643ea8Slogwang
583a9643ea8Slogwang static inline int32_t
igb_pf_reset_hw(struct e1000_hw * hw)584a9643ea8Slogwang igb_pf_reset_hw(struct e1000_hw *hw)
585a9643ea8Slogwang {
586a9643ea8Slogwang uint32_t ctrl_ext;
587a9643ea8Slogwang int32_t status;
588a9643ea8Slogwang
589a9643ea8Slogwang status = e1000_reset_hw(hw);
590a9643ea8Slogwang
591a9643ea8Slogwang ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
592a9643ea8Slogwang /* Set PF Reset Done bit so PF/VF Mail Ops can work */
593a9643ea8Slogwang ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
594a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
595a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
596a9643ea8Slogwang
597a9643ea8Slogwang return status;
598a9643ea8Slogwang }
599a9643ea8Slogwang
600a9643ea8Slogwang static void
igb_identify_hardware(struct rte_eth_dev * dev,struct rte_pci_device * pci_dev)6012bfe3f2eSlogwang igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
602a9643ea8Slogwang {
603a9643ea8Slogwang struct e1000_hw *hw =
604a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
605a9643ea8Slogwang
6062bfe3f2eSlogwang
6072bfe3f2eSlogwang hw->vendor_id = pci_dev->id.vendor_id;
6082bfe3f2eSlogwang hw->device_id = pci_dev->id.device_id;
6092bfe3f2eSlogwang hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
6102bfe3f2eSlogwang hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
611a9643ea8Slogwang
612a9643ea8Slogwang e1000_set_mac_type(hw);
613a9643ea8Slogwang
614a9643ea8Slogwang /* need to check if it is a vf device below */
615a9643ea8Slogwang }
616a9643ea8Slogwang
617a9643ea8Slogwang static int
igb_reset_swfw_lock(struct e1000_hw * hw)618a9643ea8Slogwang igb_reset_swfw_lock(struct e1000_hw *hw)
619a9643ea8Slogwang {
620a9643ea8Slogwang int ret_val;
621a9643ea8Slogwang
622a9643ea8Slogwang /*
623a9643ea8Slogwang * Do mac ops initialization manually here, since we will need
624a9643ea8Slogwang * some function pointers set by this call.
625a9643ea8Slogwang */
626a9643ea8Slogwang ret_val = e1000_init_mac_params(hw);
627a9643ea8Slogwang if (ret_val)
628a9643ea8Slogwang return ret_val;
629a9643ea8Slogwang
630a9643ea8Slogwang /*
631a9643ea8Slogwang * SMBI lock should not fail in this early stage. If this is the case,
632a9643ea8Slogwang * it is due to an improper exit of the application.
633a9643ea8Slogwang * So force the release of the faulty lock.
634a9643ea8Slogwang */
635a9643ea8Slogwang if (e1000_get_hw_semaphore_generic(hw) < 0) {
636a9643ea8Slogwang PMD_DRV_LOG(DEBUG, "SMBI lock released");
637a9643ea8Slogwang }
638a9643ea8Slogwang e1000_put_hw_semaphore_generic(hw);
639a9643ea8Slogwang
640a9643ea8Slogwang if (hw->mac.ops.acquire_swfw_sync != NULL) {
641a9643ea8Slogwang uint16_t mask;
642a9643ea8Slogwang
643a9643ea8Slogwang /*
644a9643ea8Slogwang * Phy lock should not fail in this early stage. If this is the case,
645a9643ea8Slogwang * it is due to an improper exit of the application.
646a9643ea8Slogwang * So force the release of the faulty lock.
647a9643ea8Slogwang */
648a9643ea8Slogwang mask = E1000_SWFW_PHY0_SM << hw->bus.func;
649a9643ea8Slogwang if (hw->bus.func > E1000_FUNC_1)
650a9643ea8Slogwang mask <<= 2;
651a9643ea8Slogwang if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
652a9643ea8Slogwang PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
653a9643ea8Slogwang hw->bus.func);
654a9643ea8Slogwang }
655a9643ea8Slogwang hw->mac.ops.release_swfw_sync(hw, mask);
656a9643ea8Slogwang
657a9643ea8Slogwang /*
658a9643ea8Slogwang * This one is more tricky since it is common to all ports; but
659a9643ea8Slogwang * swfw_sync retries last long enough (1s) to be almost sure that if
660a9643ea8Slogwang * lock can not be taken it is due to an improper lock of the
661a9643ea8Slogwang * semaphore.
662a9643ea8Slogwang */
663a9643ea8Slogwang mask = E1000_SWFW_EEP_SM;
664a9643ea8Slogwang if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
665a9643ea8Slogwang PMD_DRV_LOG(DEBUG, "SWFW common locks released");
666a9643ea8Slogwang }
667a9643ea8Slogwang hw->mac.ops.release_swfw_sync(hw, mask);
668a9643ea8Slogwang }
669a9643ea8Slogwang
670a9643ea8Slogwang return E1000_SUCCESS;
671a9643ea8Slogwang }
672a9643ea8Slogwang
6732bfe3f2eSlogwang /* Remove all ntuple filters of the device */
igb_ntuple_filter_uninit(struct rte_eth_dev * eth_dev)6742bfe3f2eSlogwang static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
6752bfe3f2eSlogwang {
6762bfe3f2eSlogwang struct e1000_filter_info *filter_info =
6772bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
6782bfe3f2eSlogwang struct e1000_5tuple_filter *p_5tuple;
6792bfe3f2eSlogwang struct e1000_2tuple_filter *p_2tuple;
6802bfe3f2eSlogwang
6812bfe3f2eSlogwang while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
6822bfe3f2eSlogwang TAILQ_REMOVE(&filter_info->fivetuple_list,
6832bfe3f2eSlogwang p_5tuple, entries);
6842bfe3f2eSlogwang rte_free(p_5tuple);
6852bfe3f2eSlogwang }
6862bfe3f2eSlogwang filter_info->fivetuple_mask = 0;
6872bfe3f2eSlogwang while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
6882bfe3f2eSlogwang TAILQ_REMOVE(&filter_info->twotuple_list,
6892bfe3f2eSlogwang p_2tuple, entries);
6902bfe3f2eSlogwang rte_free(p_2tuple);
6912bfe3f2eSlogwang }
6922bfe3f2eSlogwang filter_info->twotuple_mask = 0;
6932bfe3f2eSlogwang
6942bfe3f2eSlogwang return 0;
6952bfe3f2eSlogwang }
6962bfe3f2eSlogwang
6972bfe3f2eSlogwang /* Remove all flex filters of the device */
igb_flex_filter_uninit(struct rte_eth_dev * eth_dev)6982bfe3f2eSlogwang static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
6992bfe3f2eSlogwang {
7002bfe3f2eSlogwang struct e1000_filter_info *filter_info =
7012bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
7022bfe3f2eSlogwang struct e1000_flex_filter *p_flex;
7032bfe3f2eSlogwang
7042bfe3f2eSlogwang while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
7052bfe3f2eSlogwang TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
7062bfe3f2eSlogwang rte_free(p_flex);
7072bfe3f2eSlogwang }
7082bfe3f2eSlogwang filter_info->flex_mask = 0;
7092bfe3f2eSlogwang
7102bfe3f2eSlogwang return 0;
7112bfe3f2eSlogwang }
7122bfe3f2eSlogwang
713a9643ea8Slogwang static int
eth_igb_dev_init(struct rte_eth_dev * eth_dev)714a9643ea8Slogwang eth_igb_dev_init(struct rte_eth_dev *eth_dev)
715a9643ea8Slogwang {
716a9643ea8Slogwang int error = 0;
7172bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
718a9643ea8Slogwang struct e1000_hw *hw =
719a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
720a9643ea8Slogwang struct e1000_vfta * shadow_vfta =
721a9643ea8Slogwang E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
722a9643ea8Slogwang struct e1000_filter_info *filter_info =
723a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
724a9643ea8Slogwang struct e1000_adapter *adapter =
725a9643ea8Slogwang E1000_DEV_PRIVATE(eth_dev->data->dev_private);
726a9643ea8Slogwang
727a9643ea8Slogwang uint32_t ctrl_ext;
728a9643ea8Slogwang
729a9643ea8Slogwang eth_dev->dev_ops = ð_igb_ops;
730*2d9fd380Sjfb8856606 eth_dev->rx_queue_count = eth_igb_rx_queue_count;
731*2d9fd380Sjfb8856606 eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
732*2d9fd380Sjfb8856606 eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
733*2d9fd380Sjfb8856606 eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
734a9643ea8Slogwang eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
735a9643ea8Slogwang eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
7362bfe3f2eSlogwang eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
737a9643ea8Slogwang
738a9643ea8Slogwang /* for secondary processes, we don't initialise any further as primary
739a9643ea8Slogwang * has already done this work. Only check we don't need a different
740a9643ea8Slogwang * RX function */
741a9643ea8Slogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY){
742a9643ea8Slogwang if (eth_dev->data->scattered_rx)
743a9643ea8Slogwang eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
744a9643ea8Slogwang return 0;
745a9643ea8Slogwang }
746a9643ea8Slogwang
747a9643ea8Slogwang rte_eth_copy_pci_info(eth_dev, pci_dev);
748*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
749a9643ea8Slogwang
750a9643ea8Slogwang hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
751a9643ea8Slogwang
7522bfe3f2eSlogwang igb_identify_hardware(eth_dev, pci_dev);
753a9643ea8Slogwang if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
754a9643ea8Slogwang error = -EIO;
755a9643ea8Slogwang goto err_late;
756a9643ea8Slogwang }
757a9643ea8Slogwang
758a9643ea8Slogwang e1000_get_bus_info(hw);
759a9643ea8Slogwang
760a9643ea8Slogwang /* Reset any pending lock */
761a9643ea8Slogwang if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
762a9643ea8Slogwang error = -EIO;
763a9643ea8Slogwang goto err_late;
764a9643ea8Slogwang }
765a9643ea8Slogwang
766a9643ea8Slogwang /* Finish initialization */
767a9643ea8Slogwang if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
768a9643ea8Slogwang error = -EIO;
769a9643ea8Slogwang goto err_late;
770a9643ea8Slogwang }
771a9643ea8Slogwang
772a9643ea8Slogwang hw->mac.autoneg = 1;
773a9643ea8Slogwang hw->phy.autoneg_wait_to_complete = 0;
774a9643ea8Slogwang hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
775a9643ea8Slogwang
776a9643ea8Slogwang /* Copper options */
777a9643ea8Slogwang if (hw->phy.media_type == e1000_media_type_copper) {
778a9643ea8Slogwang hw->phy.mdix = 0; /* AUTO_ALL_MODES */
779a9643ea8Slogwang hw->phy.disable_polarity_correction = 0;
780a9643ea8Slogwang hw->phy.ms_type = e1000_ms_hw_default;
781a9643ea8Slogwang }
782a9643ea8Slogwang
783a9643ea8Slogwang /*
784a9643ea8Slogwang * Start from a known state, this is important in reading the nvm
785a9643ea8Slogwang * and mac from that.
786a9643ea8Slogwang */
787a9643ea8Slogwang igb_pf_reset_hw(hw);
788a9643ea8Slogwang
789a9643ea8Slogwang /* Make sure we have a good EEPROM before we read from it */
790a9643ea8Slogwang if (e1000_validate_nvm_checksum(hw) < 0) {
791a9643ea8Slogwang /*
792a9643ea8Slogwang * Some PCI-E parts fail the first check due to
793a9643ea8Slogwang * the link being in sleep state, call it again,
794a9643ea8Slogwang * if it fails a second time its a real issue.
795a9643ea8Slogwang */
796a9643ea8Slogwang if (e1000_validate_nvm_checksum(hw) < 0) {
797a9643ea8Slogwang PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
798a9643ea8Slogwang error = -EIO;
799a9643ea8Slogwang goto err_late;
800a9643ea8Slogwang }
801a9643ea8Slogwang }
802a9643ea8Slogwang
803a9643ea8Slogwang /* Read the permanent MAC address out of the EEPROM */
804a9643ea8Slogwang if (e1000_read_mac_addr(hw) != 0) {
805a9643ea8Slogwang PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
806a9643ea8Slogwang error = -EIO;
807a9643ea8Slogwang goto err_late;
808a9643ea8Slogwang }
809a9643ea8Slogwang
810a9643ea8Slogwang /* Allocate memory for storing MAC addresses */
811a9643ea8Slogwang eth_dev->data->mac_addrs = rte_zmalloc("e1000",
8124418919fSjohnjiang RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
813a9643ea8Slogwang if (eth_dev->data->mac_addrs == NULL) {
814a9643ea8Slogwang PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
815a9643ea8Slogwang "store MAC addresses",
8164418919fSjohnjiang RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
817a9643ea8Slogwang error = -ENOMEM;
818a9643ea8Slogwang goto err_late;
819a9643ea8Slogwang }
820a9643ea8Slogwang
821a9643ea8Slogwang /* Copy the permanent MAC address */
8224418919fSjohnjiang rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
8234418919fSjohnjiang ð_dev->data->mac_addrs[0]);
8244418919fSjohnjiang
825a9643ea8Slogwang /* initialize the vfta */
826a9643ea8Slogwang memset(shadow_vfta, 0, sizeof(*shadow_vfta));
827a9643ea8Slogwang
828a9643ea8Slogwang /* Now initialize the hardware */
829a9643ea8Slogwang if (igb_hardware_init(hw) != 0) {
830a9643ea8Slogwang PMD_INIT_LOG(ERR, "Hardware initialization failed");
831a9643ea8Slogwang rte_free(eth_dev->data->mac_addrs);
832a9643ea8Slogwang eth_dev->data->mac_addrs = NULL;
833a9643ea8Slogwang error = -ENODEV;
834a9643ea8Slogwang goto err_late;
835a9643ea8Slogwang }
836a9643ea8Slogwang hw->mac.get_link_status = 1;
837a9643ea8Slogwang adapter->stopped = 0;
838a9643ea8Slogwang
839a9643ea8Slogwang /* Indicate SOL/IDER usage */
840a9643ea8Slogwang if (e1000_check_reset_block(hw) < 0) {
841a9643ea8Slogwang PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
842a9643ea8Slogwang "SOL/IDER session");
843a9643ea8Slogwang }
844a9643ea8Slogwang
845a9643ea8Slogwang /* initialize PF if max_vfs not zero */
846a9643ea8Slogwang igb_pf_host_init(eth_dev);
847a9643ea8Slogwang
848a9643ea8Slogwang ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
849a9643ea8Slogwang /* Set PF Reset Done bit so PF/VF Mail Ops can work */
850a9643ea8Slogwang ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
851a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
852a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
853a9643ea8Slogwang
854a9643ea8Slogwang PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
855a9643ea8Slogwang eth_dev->data->port_id, pci_dev->id.vendor_id,
856a9643ea8Slogwang pci_dev->id.device_id);
857a9643ea8Slogwang
858a9643ea8Slogwang rte_intr_callback_register(&pci_dev->intr_handle,
859a9643ea8Slogwang eth_igb_interrupt_handler,
860a9643ea8Slogwang (void *)eth_dev);
861a9643ea8Slogwang
862a9643ea8Slogwang /* enable uio/vfio intr/eventfd mapping */
863a9643ea8Slogwang rte_intr_enable(&pci_dev->intr_handle);
864a9643ea8Slogwang
865a9643ea8Slogwang /* enable support intr */
866a9643ea8Slogwang igb_intr_enable(eth_dev);
867a9643ea8Slogwang
8684418919fSjohnjiang eth_igb_dev_set_link_down(eth_dev);
8694418919fSjohnjiang
8702bfe3f2eSlogwang /* initialize filter info */
8712bfe3f2eSlogwang memset(filter_info, 0,
8722bfe3f2eSlogwang sizeof(struct e1000_filter_info));
8732bfe3f2eSlogwang
874a9643ea8Slogwang TAILQ_INIT(&filter_info->flex_list);
875a9643ea8Slogwang TAILQ_INIT(&filter_info->twotuple_list);
876a9643ea8Slogwang TAILQ_INIT(&filter_info->fivetuple_list);
8772bfe3f2eSlogwang
8782bfe3f2eSlogwang TAILQ_INIT(&igb_filter_ntuple_list);
8792bfe3f2eSlogwang TAILQ_INIT(&igb_filter_ethertype_list);
8802bfe3f2eSlogwang TAILQ_INIT(&igb_filter_syn_list);
8812bfe3f2eSlogwang TAILQ_INIT(&igb_filter_flex_list);
882d30ea906Sjfb8856606 TAILQ_INIT(&igb_filter_rss_list);
8832bfe3f2eSlogwang TAILQ_INIT(&igb_flow_list);
884a9643ea8Slogwang
885a9643ea8Slogwang return 0;
886a9643ea8Slogwang
887a9643ea8Slogwang err_late:
888a9643ea8Slogwang igb_hw_control_release(hw);
889a9643ea8Slogwang
890a9643ea8Slogwang return error;
891a9643ea8Slogwang }
892a9643ea8Slogwang
893a9643ea8Slogwang static int
eth_igb_dev_uninit(struct rte_eth_dev * eth_dev)894a9643ea8Slogwang eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
895a9643ea8Slogwang {
896a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
897a9643ea8Slogwang
898a9643ea8Slogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
8990c6bd470Sfengbojiang return 0;
900a9643ea8Slogwang
901a9643ea8Slogwang eth_igb_close(eth_dev);
902a9643ea8Slogwang
903a9643ea8Slogwang return 0;
904a9643ea8Slogwang }
905a9643ea8Slogwang
906a9643ea8Slogwang /*
907a9643ea8Slogwang * Virtual Function device init
908a9643ea8Slogwang */
909a9643ea8Slogwang static int
eth_igbvf_dev_init(struct rte_eth_dev * eth_dev)910a9643ea8Slogwang eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
911a9643ea8Slogwang {
912a9643ea8Slogwang struct rte_pci_device *pci_dev;
9132bfe3f2eSlogwang struct rte_intr_handle *intr_handle;
914a9643ea8Slogwang struct e1000_adapter *adapter =
915a9643ea8Slogwang E1000_DEV_PRIVATE(eth_dev->data->dev_private);
916a9643ea8Slogwang struct e1000_hw *hw =
917a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
918a9643ea8Slogwang int diag;
9194418919fSjohnjiang struct rte_ether_addr *perm_addr =
9204418919fSjohnjiang (struct rte_ether_addr *)hw->mac.perm_addr;
921a9643ea8Slogwang
922a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
923a9643ea8Slogwang
924a9643ea8Slogwang eth_dev->dev_ops = &igbvf_eth_dev_ops;
925*2d9fd380Sjfb8856606 eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
926*2d9fd380Sjfb8856606 eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
927*2d9fd380Sjfb8856606 eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
928a9643ea8Slogwang eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
929a9643ea8Slogwang eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
9302bfe3f2eSlogwang eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
931a9643ea8Slogwang
932a9643ea8Slogwang /* for secondary processes, we don't initialise any further as primary
933a9643ea8Slogwang * has already done this work. Only check we don't need a different
934a9643ea8Slogwang * RX function */
935a9643ea8Slogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY){
936a9643ea8Slogwang if (eth_dev->data->scattered_rx)
937a9643ea8Slogwang eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
938a9643ea8Slogwang return 0;
939a9643ea8Slogwang }
940a9643ea8Slogwang
9412bfe3f2eSlogwang pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
942a9643ea8Slogwang rte_eth_copy_pci_info(eth_dev, pci_dev);
943*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
944a9643ea8Slogwang
945a9643ea8Slogwang hw->device_id = pci_dev->id.device_id;
946a9643ea8Slogwang hw->vendor_id = pci_dev->id.vendor_id;
947a9643ea8Slogwang hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
948a9643ea8Slogwang adapter->stopped = 0;
949a9643ea8Slogwang
950a9643ea8Slogwang /* Initialize the shared code (base driver) */
951a9643ea8Slogwang diag = e1000_setup_init_funcs(hw, TRUE);
952a9643ea8Slogwang if (diag != 0) {
953a9643ea8Slogwang PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
954a9643ea8Slogwang diag);
955a9643ea8Slogwang return -EIO;
956a9643ea8Slogwang }
957a9643ea8Slogwang
958a9643ea8Slogwang /* init_mailbox_params */
959a9643ea8Slogwang hw->mbx.ops.init_params(hw);
960a9643ea8Slogwang
961a9643ea8Slogwang /* Disable the interrupts for VF */
962a9643ea8Slogwang igbvf_intr_disable(hw);
963a9643ea8Slogwang
964a9643ea8Slogwang diag = hw->mac.ops.reset_hw(hw);
965a9643ea8Slogwang
966a9643ea8Slogwang /* Allocate memory for storing MAC addresses */
9674418919fSjohnjiang eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN *
968a9643ea8Slogwang hw->mac.rar_entry_count, 0);
969a9643ea8Slogwang if (eth_dev->data->mac_addrs == NULL) {
970a9643ea8Slogwang PMD_INIT_LOG(ERR,
971a9643ea8Slogwang "Failed to allocate %d bytes needed to store MAC "
972a9643ea8Slogwang "addresses",
9734418919fSjohnjiang RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
974a9643ea8Slogwang return -ENOMEM;
975a9643ea8Slogwang }
976a9643ea8Slogwang
977a9643ea8Slogwang /* Generate a random MAC address, if none was assigned by PF. */
9784418919fSjohnjiang if (rte_is_zero_ether_addr(perm_addr)) {
9794418919fSjohnjiang rte_eth_random_addr(perm_addr->addr_bytes);
980a9643ea8Slogwang PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
981a9643ea8Slogwang PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
982a9643ea8Slogwang "%02x:%02x:%02x:%02x:%02x:%02x",
983a9643ea8Slogwang perm_addr->addr_bytes[0],
984a9643ea8Slogwang perm_addr->addr_bytes[1],
985a9643ea8Slogwang perm_addr->addr_bytes[2],
986a9643ea8Slogwang perm_addr->addr_bytes[3],
987a9643ea8Slogwang perm_addr->addr_bytes[4],
988a9643ea8Slogwang perm_addr->addr_bytes[5]);
989a9643ea8Slogwang }
990a9643ea8Slogwang
9912bfe3f2eSlogwang diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
9922bfe3f2eSlogwang if (diag) {
9932bfe3f2eSlogwang rte_free(eth_dev->data->mac_addrs);
9942bfe3f2eSlogwang eth_dev->data->mac_addrs = NULL;
9952bfe3f2eSlogwang return diag;
9962bfe3f2eSlogwang }
997a9643ea8Slogwang /* Copy the permanent MAC address */
9984418919fSjohnjiang rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
999a9643ea8Slogwang ð_dev->data->mac_addrs[0]);
1000a9643ea8Slogwang
1001a9643ea8Slogwang PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
1002a9643ea8Slogwang "mac.type=%s",
1003a9643ea8Slogwang eth_dev->data->port_id, pci_dev->id.vendor_id,
1004a9643ea8Slogwang pci_dev->id.device_id, "igb_mac_82576_vf");
1005a9643ea8Slogwang
10062bfe3f2eSlogwang intr_handle = &pci_dev->intr_handle;
10072bfe3f2eSlogwang rte_intr_callback_register(intr_handle,
10082bfe3f2eSlogwang eth_igbvf_interrupt_handler, eth_dev);
1009a9643ea8Slogwang
1010a9643ea8Slogwang return 0;
1011a9643ea8Slogwang }
1012a9643ea8Slogwang
1013a9643ea8Slogwang static int
eth_igbvf_dev_uninit(struct rte_eth_dev * eth_dev)1014a9643ea8Slogwang eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
1015a9643ea8Slogwang {
1016a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
1017a9643ea8Slogwang
1018a9643ea8Slogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
10190c6bd470Sfengbojiang return 0;
1020a9643ea8Slogwang
1021a9643ea8Slogwang igbvf_dev_close(eth_dev);
1022a9643ea8Slogwang
1023a9643ea8Slogwang return 0;
1024a9643ea8Slogwang }
1025a9643ea8Slogwang
eth_igb_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)10262bfe3f2eSlogwang static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
10272bfe3f2eSlogwang struct rte_pci_device *pci_dev)
10282bfe3f2eSlogwang {
10292bfe3f2eSlogwang return rte_eth_dev_pci_generic_probe(pci_dev,
10302bfe3f2eSlogwang sizeof(struct e1000_adapter), eth_igb_dev_init);
10312bfe3f2eSlogwang }
10322bfe3f2eSlogwang
eth_igb_pci_remove(struct rte_pci_device * pci_dev)10332bfe3f2eSlogwang static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
10342bfe3f2eSlogwang {
10352bfe3f2eSlogwang return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
10362bfe3f2eSlogwang }
10372bfe3f2eSlogwang
10382bfe3f2eSlogwang static struct rte_pci_driver rte_igb_pmd = {
1039a9643ea8Slogwang .id_table = pci_id_igb_map,
10404418919fSjohnjiang .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
10412bfe3f2eSlogwang .probe = eth_igb_pci_probe,
10422bfe3f2eSlogwang .remove = eth_igb_pci_remove,
1043a9643ea8Slogwang };
1044a9643ea8Slogwang
10452bfe3f2eSlogwang
eth_igbvf_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)10462bfe3f2eSlogwang static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
10472bfe3f2eSlogwang struct rte_pci_device *pci_dev)
10482bfe3f2eSlogwang {
10492bfe3f2eSlogwang return rte_eth_dev_pci_generic_probe(pci_dev,
10502bfe3f2eSlogwang sizeof(struct e1000_adapter), eth_igbvf_dev_init);
10512bfe3f2eSlogwang }
10522bfe3f2eSlogwang
eth_igbvf_pci_remove(struct rte_pci_device * pci_dev)10532bfe3f2eSlogwang static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
10542bfe3f2eSlogwang {
10552bfe3f2eSlogwang return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
10562bfe3f2eSlogwang }
10572bfe3f2eSlogwang
1058a9643ea8Slogwang /*
1059a9643ea8Slogwang * virtual function driver struct
1060a9643ea8Slogwang */
10612bfe3f2eSlogwang static struct rte_pci_driver rte_igbvf_pmd = {
1062a9643ea8Slogwang .id_table = pci_id_igbvf_map,
10634418919fSjohnjiang .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
10642bfe3f2eSlogwang .probe = eth_igbvf_pci_probe,
10652bfe3f2eSlogwang .remove = eth_igbvf_pci_remove,
1066a9643ea8Slogwang };
1067a9643ea8Slogwang
1068a9643ea8Slogwang static void
igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev * dev)1069a9643ea8Slogwang igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1070a9643ea8Slogwang {
1071a9643ea8Slogwang struct e1000_hw *hw =
1072a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1073a9643ea8Slogwang /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
1074a9643ea8Slogwang uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1075a9643ea8Slogwang rctl |= E1000_RCTL_VFE;
1076a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1077a9643ea8Slogwang }
1078a9643ea8Slogwang
1079a9643ea8Slogwang static int
igb_check_mq_mode(struct rte_eth_dev * dev)1080a9643ea8Slogwang igb_check_mq_mode(struct rte_eth_dev *dev)
1081a9643ea8Slogwang {
1082a9643ea8Slogwang enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1083a9643ea8Slogwang enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1084a9643ea8Slogwang uint16_t nb_rx_q = dev->data->nb_rx_queues;
10852bfe3f2eSlogwang uint16_t nb_tx_q = dev->data->nb_tx_queues;
1086a9643ea8Slogwang
1087a9643ea8Slogwang if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
1088a9643ea8Slogwang tx_mq_mode == ETH_MQ_TX_DCB ||
1089a9643ea8Slogwang tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1090a9643ea8Slogwang PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1091a9643ea8Slogwang return -EINVAL;
1092a9643ea8Slogwang }
1093a9643ea8Slogwang if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1094a9643ea8Slogwang /* Check multi-queue mode.
1095a9643ea8Slogwang * To no break software we accept ETH_MQ_RX_NONE as this might
1096a9643ea8Slogwang * be used to turn off VLAN filter.
1097a9643ea8Slogwang */
1098a9643ea8Slogwang
1099a9643ea8Slogwang if (rx_mq_mode == ETH_MQ_RX_NONE ||
1100a9643ea8Slogwang rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1101a9643ea8Slogwang dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1102a9643ea8Slogwang RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1103a9643ea8Slogwang } else {
1104a9643ea8Slogwang /* Only support one queue on VFs.
1105a9643ea8Slogwang * RSS together with SRIOV is not supported.
1106a9643ea8Slogwang */
1107a9643ea8Slogwang PMD_INIT_LOG(ERR, "SRIOV is active,"
1108a9643ea8Slogwang " wrong mq_mode rx %d.",
1109a9643ea8Slogwang rx_mq_mode);
1110a9643ea8Slogwang return -EINVAL;
1111a9643ea8Slogwang }
1112a9643ea8Slogwang /* TX mode is not used here, so mode might be ignored.*/
1113a9643ea8Slogwang if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1114a9643ea8Slogwang /* SRIOV only works in VMDq enable mode */
1115a9643ea8Slogwang PMD_INIT_LOG(WARNING, "SRIOV is active,"
1116a9643ea8Slogwang " TX mode %d is not supported. "
1117a9643ea8Slogwang " Driver will behave as %d mode.",
1118a9643ea8Slogwang tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
1119a9643ea8Slogwang }
1120a9643ea8Slogwang
1121a9643ea8Slogwang /* check valid queue number */
1122a9643ea8Slogwang if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1123a9643ea8Slogwang PMD_INIT_LOG(ERR, "SRIOV is active,"
1124a9643ea8Slogwang " only support one queue on VFs.");
1125a9643ea8Slogwang return -EINVAL;
1126a9643ea8Slogwang }
1127a9643ea8Slogwang } else {
1128a9643ea8Slogwang /* To no break software that set invalid mode, only display
1129a9643ea8Slogwang * warning if invalid mode is used.
1130a9643ea8Slogwang */
1131a9643ea8Slogwang if (rx_mq_mode != ETH_MQ_RX_NONE &&
1132a9643ea8Slogwang rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
1133a9643ea8Slogwang rx_mq_mode != ETH_MQ_RX_RSS) {
1134a9643ea8Slogwang /* RSS together with VMDq not supported*/
1135a9643ea8Slogwang PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1136a9643ea8Slogwang rx_mq_mode);
1137a9643ea8Slogwang return -EINVAL;
1138a9643ea8Slogwang }
1139a9643ea8Slogwang
1140a9643ea8Slogwang if (tx_mq_mode != ETH_MQ_TX_NONE &&
1141a9643ea8Slogwang tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1142a9643ea8Slogwang PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1143a9643ea8Slogwang " Due to txmode is meaningless in this"
1144a9643ea8Slogwang " driver, just ignore.",
1145a9643ea8Slogwang tx_mq_mode);
1146a9643ea8Slogwang }
1147a9643ea8Slogwang }
1148a9643ea8Slogwang return 0;
1149a9643ea8Slogwang }
1150a9643ea8Slogwang
1151a9643ea8Slogwang static int
eth_igb_configure(struct rte_eth_dev * dev)1152a9643ea8Slogwang eth_igb_configure(struct rte_eth_dev *dev)
1153a9643ea8Slogwang {
1154a9643ea8Slogwang struct e1000_interrupt *intr =
1155a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1156a9643ea8Slogwang int ret;
1157a9643ea8Slogwang
1158a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
1159a9643ea8Slogwang
11604418919fSjohnjiang if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
11614418919fSjohnjiang dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
11624418919fSjohnjiang
1163a9643ea8Slogwang /* multipe queue mode checking */
1164a9643ea8Slogwang ret = igb_check_mq_mode(dev);
1165a9643ea8Slogwang if (ret != 0) {
1166a9643ea8Slogwang PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1167a9643ea8Slogwang ret);
1168a9643ea8Slogwang return ret;
1169a9643ea8Slogwang }
1170a9643ea8Slogwang
1171a9643ea8Slogwang intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1172a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
1173a9643ea8Slogwang
1174a9643ea8Slogwang return 0;
1175a9643ea8Slogwang }
1176a9643ea8Slogwang
1177d30ea906Sjfb8856606 static void
eth_igb_rxtx_control(struct rte_eth_dev * dev,bool enable)1178d30ea906Sjfb8856606 eth_igb_rxtx_control(struct rte_eth_dev *dev,
1179d30ea906Sjfb8856606 bool enable)
1180d30ea906Sjfb8856606 {
1181d30ea906Sjfb8856606 struct e1000_hw *hw =
1182d30ea906Sjfb8856606 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1183d30ea906Sjfb8856606 uint32_t tctl, rctl;
1184d30ea906Sjfb8856606
1185d30ea906Sjfb8856606 tctl = E1000_READ_REG(hw, E1000_TCTL);
1186d30ea906Sjfb8856606 rctl = E1000_READ_REG(hw, E1000_RCTL);
1187d30ea906Sjfb8856606
1188d30ea906Sjfb8856606 if (enable) {
1189d30ea906Sjfb8856606 /* enable Tx/Rx */
1190d30ea906Sjfb8856606 tctl |= E1000_TCTL_EN;
1191d30ea906Sjfb8856606 rctl |= E1000_RCTL_EN;
1192d30ea906Sjfb8856606 } else {
1193d30ea906Sjfb8856606 /* disable Tx/Rx */
1194d30ea906Sjfb8856606 tctl &= ~E1000_TCTL_EN;
1195d30ea906Sjfb8856606 rctl &= ~E1000_RCTL_EN;
1196d30ea906Sjfb8856606 }
1197d30ea906Sjfb8856606 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1198d30ea906Sjfb8856606 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1199d30ea906Sjfb8856606 E1000_WRITE_FLUSH(hw);
1200d30ea906Sjfb8856606 }
1201d30ea906Sjfb8856606
1202a9643ea8Slogwang static int
eth_igb_start(struct rte_eth_dev * dev)1203a9643ea8Slogwang eth_igb_start(struct rte_eth_dev *dev)
1204a9643ea8Slogwang {
1205a9643ea8Slogwang struct e1000_hw *hw =
1206a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207a9643ea8Slogwang struct e1000_adapter *adapter =
1208a9643ea8Slogwang E1000_DEV_PRIVATE(dev->data->dev_private);
12092bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
12102bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1211a9643ea8Slogwang int ret, mask;
1212a9643ea8Slogwang uint32_t intr_vector = 0;
1213a9643ea8Slogwang uint32_t ctrl_ext;
1214a9643ea8Slogwang uint32_t *speeds;
1215a9643ea8Slogwang int num_speeds;
1216a9643ea8Slogwang bool autoneg;
1217a9643ea8Slogwang
1218a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
1219a9643ea8Slogwang
1220a9643ea8Slogwang /* disable uio/vfio intr/eventfd mapping */
1221a9643ea8Slogwang rte_intr_disable(intr_handle);
1222a9643ea8Slogwang
1223a9643ea8Slogwang /* Power up the phy. Needed to make the link go Up */
1224a9643ea8Slogwang eth_igb_dev_set_link_up(dev);
1225a9643ea8Slogwang
1226a9643ea8Slogwang /*
1227a9643ea8Slogwang * Packet Buffer Allocation (PBA)
1228a9643ea8Slogwang * Writing PBA sets the receive portion of the buffer
1229a9643ea8Slogwang * the remainder is used for the transmit buffer.
1230a9643ea8Slogwang */
1231a9643ea8Slogwang if (hw->mac.type == e1000_82575) {
1232a9643ea8Slogwang uint32_t pba;
1233a9643ea8Slogwang
1234a9643ea8Slogwang pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1235a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_PBA, pba);
1236a9643ea8Slogwang }
1237a9643ea8Slogwang
1238a9643ea8Slogwang /* Put the address into the Receive Address Array */
1239a9643ea8Slogwang e1000_rar_set(hw, hw->mac.addr, 0);
1240a9643ea8Slogwang
1241a9643ea8Slogwang /* Initialize the hardware */
1242a9643ea8Slogwang if (igb_hardware_init(hw)) {
1243a9643ea8Slogwang PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1244a9643ea8Slogwang return -EIO;
1245a9643ea8Slogwang }
1246a9643ea8Slogwang adapter->stopped = 0;
1247a9643ea8Slogwang
12484418919fSjohnjiang E1000_WRITE_REG(hw, E1000_VET,
12494418919fSjohnjiang RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
1250a9643ea8Slogwang
1251a9643ea8Slogwang ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1252a9643ea8Slogwang /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1253a9643ea8Slogwang ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1254a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1255a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
1256a9643ea8Slogwang
1257a9643ea8Slogwang /* configure PF module if SRIOV enabled */
1258a9643ea8Slogwang igb_pf_host_configure(dev);
1259a9643ea8Slogwang
1260a9643ea8Slogwang /* check and configure queue intr-vector mapping */
1261a9643ea8Slogwang if ((rte_intr_cap_multiple(intr_handle) ||
1262a9643ea8Slogwang !RTE_ETH_DEV_SRIOV(dev).active) &&
1263a9643ea8Slogwang dev->data->dev_conf.intr_conf.rxq != 0) {
1264a9643ea8Slogwang intr_vector = dev->data->nb_rx_queues;
1265a9643ea8Slogwang if (rte_intr_efd_enable(intr_handle, intr_vector))
1266a9643ea8Slogwang return -1;
1267a9643ea8Slogwang }
1268a9643ea8Slogwang
1269a9643ea8Slogwang if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1270a9643ea8Slogwang intr_handle->intr_vec =
1271a9643ea8Slogwang rte_zmalloc("intr_vec",
1272a9643ea8Slogwang dev->data->nb_rx_queues * sizeof(int), 0);
1273a9643ea8Slogwang if (intr_handle->intr_vec == NULL) {
1274a9643ea8Slogwang PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
12752bfe3f2eSlogwang " intr_vec", dev->data->nb_rx_queues);
1276a9643ea8Slogwang return -ENOMEM;
1277a9643ea8Slogwang }
1278a9643ea8Slogwang }
1279a9643ea8Slogwang
1280a9643ea8Slogwang /* confiugre msix for rx interrupt */
1281a9643ea8Slogwang eth_igb_configure_msix_intr(dev);
1282a9643ea8Slogwang
1283a9643ea8Slogwang /* Configure for OS presence */
1284a9643ea8Slogwang igb_init_manageability(hw);
1285a9643ea8Slogwang
1286a9643ea8Slogwang eth_igb_tx_init(dev);
1287a9643ea8Slogwang
1288a9643ea8Slogwang /* This can fail when allocating mbufs for descriptor rings */
1289a9643ea8Slogwang ret = eth_igb_rx_init(dev);
1290a9643ea8Slogwang if (ret) {
1291a9643ea8Slogwang PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1292a9643ea8Slogwang igb_dev_clear_queues(dev);
1293a9643ea8Slogwang return ret;
1294a9643ea8Slogwang }
1295a9643ea8Slogwang
1296a9643ea8Slogwang e1000_clear_hw_cntrs_base_generic(hw);
1297a9643ea8Slogwang
1298a9643ea8Slogwang /*
1299a9643ea8Slogwang * VLAN Offload Settings
1300a9643ea8Slogwang */
1301a9643ea8Slogwang mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1302a9643ea8Slogwang ETH_VLAN_EXTEND_MASK;
13032bfe3f2eSlogwang ret = eth_igb_vlan_offload_set(dev, mask);
13042bfe3f2eSlogwang if (ret) {
13052bfe3f2eSlogwang PMD_INIT_LOG(ERR, "Unable to set vlan offload");
13062bfe3f2eSlogwang igb_dev_clear_queues(dev);
13072bfe3f2eSlogwang return ret;
13082bfe3f2eSlogwang }
1309a9643ea8Slogwang
1310a9643ea8Slogwang if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1311a9643ea8Slogwang /* Enable VLAN filter since VMDq always use VLAN filter */
1312a9643ea8Slogwang igb_vmdq_vlan_hw_filter_enable(dev);
1313a9643ea8Slogwang }
1314a9643ea8Slogwang
1315a9643ea8Slogwang if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1316a9643ea8Slogwang (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1317a9643ea8Slogwang (hw->mac.type == e1000_i211)) {
1318a9643ea8Slogwang /* Configure EITR with the maximum possible value (0xFFFF) */
1319a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1320a9643ea8Slogwang }
1321a9643ea8Slogwang
1322a9643ea8Slogwang /* Setup link speed and duplex */
1323a9643ea8Slogwang speeds = &dev->data->dev_conf.link_speeds;
1324a9643ea8Slogwang if (*speeds == ETH_LINK_SPEED_AUTONEG) {
1325a9643ea8Slogwang hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
13262bfe3f2eSlogwang hw->mac.autoneg = 1;
1327a9643ea8Slogwang } else {
1328a9643ea8Slogwang num_speeds = 0;
1329a9643ea8Slogwang autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
1330a9643ea8Slogwang
1331a9643ea8Slogwang /* Reset */
1332a9643ea8Slogwang hw->phy.autoneg_advertised = 0;
1333a9643ea8Slogwang
1334a9643ea8Slogwang if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1335a9643ea8Slogwang ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1336a9643ea8Slogwang ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
1337a9643ea8Slogwang num_speeds = -1;
1338a9643ea8Slogwang goto error_invalid_config;
1339a9643ea8Slogwang }
1340a9643ea8Slogwang if (*speeds & ETH_LINK_SPEED_10M_HD) {
1341a9643ea8Slogwang hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1342a9643ea8Slogwang num_speeds++;
1343a9643ea8Slogwang }
1344a9643ea8Slogwang if (*speeds & ETH_LINK_SPEED_10M) {
1345a9643ea8Slogwang hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1346a9643ea8Slogwang num_speeds++;
1347a9643ea8Slogwang }
1348a9643ea8Slogwang if (*speeds & ETH_LINK_SPEED_100M_HD) {
1349a9643ea8Slogwang hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1350a9643ea8Slogwang num_speeds++;
1351a9643ea8Slogwang }
1352a9643ea8Slogwang if (*speeds & ETH_LINK_SPEED_100M) {
1353a9643ea8Slogwang hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1354a9643ea8Slogwang num_speeds++;
1355a9643ea8Slogwang }
1356a9643ea8Slogwang if (*speeds & ETH_LINK_SPEED_1G) {
1357a9643ea8Slogwang hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1358a9643ea8Slogwang num_speeds++;
1359a9643ea8Slogwang }
1360a9643ea8Slogwang if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
1361a9643ea8Slogwang goto error_invalid_config;
13622bfe3f2eSlogwang
13632bfe3f2eSlogwang /* Set/reset the mac.autoneg based on the link speed,
13642bfe3f2eSlogwang * fixed or not
13652bfe3f2eSlogwang */
13662bfe3f2eSlogwang if (!autoneg) {
13672bfe3f2eSlogwang hw->mac.autoneg = 0;
13682bfe3f2eSlogwang hw->mac.forced_speed_duplex =
13692bfe3f2eSlogwang hw->phy.autoneg_advertised;
13702bfe3f2eSlogwang } else {
13712bfe3f2eSlogwang hw->mac.autoneg = 1;
13722bfe3f2eSlogwang }
1373a9643ea8Slogwang }
1374a9643ea8Slogwang
1375a9643ea8Slogwang e1000_setup_link(hw);
1376a9643ea8Slogwang
1377a9643ea8Slogwang if (rte_intr_allow_others(intr_handle)) {
1378a9643ea8Slogwang /* check if lsc interrupt is enabled */
1379a9643ea8Slogwang if (dev->data->dev_conf.intr_conf.lsc != 0)
13802bfe3f2eSlogwang eth_igb_lsc_interrupt_setup(dev, TRUE);
13812bfe3f2eSlogwang else
13822bfe3f2eSlogwang eth_igb_lsc_interrupt_setup(dev, FALSE);
1383a9643ea8Slogwang } else {
1384a9643ea8Slogwang rte_intr_callback_unregister(intr_handle,
1385a9643ea8Slogwang eth_igb_interrupt_handler,
1386a9643ea8Slogwang (void *)dev);
1387a9643ea8Slogwang if (dev->data->dev_conf.intr_conf.lsc != 0)
1388a9643ea8Slogwang PMD_INIT_LOG(INFO, "lsc won't enable because of"
13892bfe3f2eSlogwang " no intr multiplex");
1390a9643ea8Slogwang }
1391a9643ea8Slogwang
1392a9643ea8Slogwang /* check if rxq interrupt is enabled */
1393a9643ea8Slogwang if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1394a9643ea8Slogwang rte_intr_dp_is_en(intr_handle))
1395a9643ea8Slogwang eth_igb_rxq_interrupt_setup(dev);
1396a9643ea8Slogwang
1397a9643ea8Slogwang /* enable uio/vfio intr/eventfd mapping */
1398a9643ea8Slogwang rte_intr_enable(intr_handle);
1399a9643ea8Slogwang
1400a9643ea8Slogwang /* resume enabled intr since hw reset */
1401a9643ea8Slogwang igb_intr_enable(dev);
1402a9643ea8Slogwang
14032bfe3f2eSlogwang /* restore all types filter */
14042bfe3f2eSlogwang igb_filter_restore(dev);
14052bfe3f2eSlogwang
1406d30ea906Sjfb8856606 eth_igb_rxtx_control(dev, true);
1407d30ea906Sjfb8856606 eth_igb_link_update(dev, 0);
1408d30ea906Sjfb8856606
1409a9643ea8Slogwang PMD_INIT_LOG(DEBUG, "<<");
1410a9643ea8Slogwang
1411a9643ea8Slogwang return 0;
1412a9643ea8Slogwang
1413a9643ea8Slogwang error_invalid_config:
1414a9643ea8Slogwang PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1415a9643ea8Slogwang dev->data->dev_conf.link_speeds, dev->data->port_id);
1416a9643ea8Slogwang igb_dev_clear_queues(dev);
1417a9643ea8Slogwang return -EINVAL;
1418a9643ea8Slogwang }
1419a9643ea8Slogwang
1420a9643ea8Slogwang /*********************************************************************
1421a9643ea8Slogwang *
1422a9643ea8Slogwang * This routine disables all traffic on the adapter by issuing a
1423a9643ea8Slogwang * global reset on the MAC.
1424a9643ea8Slogwang *
1425a9643ea8Slogwang **********************************************************************/
1426*2d9fd380Sjfb8856606 static int
eth_igb_stop(struct rte_eth_dev * dev)1427a9643ea8Slogwang eth_igb_stop(struct rte_eth_dev *dev)
1428a9643ea8Slogwang {
1429a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
14302bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1431a9643ea8Slogwang struct rte_eth_link link;
14322bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
14334418919fSjohnjiang struct e1000_adapter *adapter =
14344418919fSjohnjiang E1000_DEV_PRIVATE(dev->data->dev_private);
14354418919fSjohnjiang
14364418919fSjohnjiang if (adapter->stopped)
1437*2d9fd380Sjfb8856606 return 0;
1438a9643ea8Slogwang
1439d30ea906Sjfb8856606 eth_igb_rxtx_control(dev, false);
1440d30ea906Sjfb8856606
14411646932aSjfb8856606 igb_intr_disable(dev);
1442a9643ea8Slogwang
1443a9643ea8Slogwang /* disable intr eventfd mapping */
1444a9643ea8Slogwang rte_intr_disable(intr_handle);
1445a9643ea8Slogwang
1446a9643ea8Slogwang igb_pf_reset_hw(hw);
1447a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_WUC, 0);
1448a9643ea8Slogwang
14494418919fSjohnjiang /* Set bit for Go Link disconnect if PHY reset is not blocked */
14504418919fSjohnjiang if (hw->mac.type >= e1000_82580 &&
14514418919fSjohnjiang (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
1452a9643ea8Slogwang uint32_t phpm_reg;
1453a9643ea8Slogwang
1454a9643ea8Slogwang phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1455a9643ea8Slogwang phpm_reg |= E1000_82580_PM_GO_LINKD;
1456a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1457a9643ea8Slogwang }
1458a9643ea8Slogwang
1459a9643ea8Slogwang /* Power down the phy. Needed to make the link go Down */
1460a9643ea8Slogwang eth_igb_dev_set_link_down(dev);
1461a9643ea8Slogwang
1462a9643ea8Slogwang igb_dev_clear_queues(dev);
1463a9643ea8Slogwang
1464a9643ea8Slogwang /* clear the recorded link status */
1465a9643ea8Slogwang memset(&link, 0, sizeof(link));
1466d30ea906Sjfb8856606 rte_eth_linkstatus_set(dev, &link);
1467a9643ea8Slogwang
1468a9643ea8Slogwang if (!rte_intr_allow_others(intr_handle))
1469a9643ea8Slogwang /* resume to the default handler */
1470a9643ea8Slogwang rte_intr_callback_register(intr_handle,
1471a9643ea8Slogwang eth_igb_interrupt_handler,
1472a9643ea8Slogwang (void *)dev);
1473a9643ea8Slogwang
1474a9643ea8Slogwang /* Clean datapath event and queue/vec mapping */
1475a9643ea8Slogwang rte_intr_efd_disable(intr_handle);
1476a9643ea8Slogwang if (intr_handle->intr_vec != NULL) {
1477a9643ea8Slogwang rte_free(intr_handle->intr_vec);
1478a9643ea8Slogwang intr_handle->intr_vec = NULL;
1479a9643ea8Slogwang }
14804418919fSjohnjiang
14814418919fSjohnjiang adapter->stopped = true;
1482*2d9fd380Sjfb8856606 dev->data->dev_started = 0;
1483*2d9fd380Sjfb8856606
1484*2d9fd380Sjfb8856606 return 0;
1485a9643ea8Slogwang }
1486a9643ea8Slogwang
1487a9643ea8Slogwang static int
eth_igb_dev_set_link_up(struct rte_eth_dev * dev)1488a9643ea8Slogwang eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
1489a9643ea8Slogwang {
1490a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1491a9643ea8Slogwang
1492a9643ea8Slogwang if (hw->phy.media_type == e1000_media_type_copper)
1493a9643ea8Slogwang e1000_power_up_phy(hw);
1494a9643ea8Slogwang else
1495a9643ea8Slogwang e1000_power_up_fiber_serdes_link(hw);
1496a9643ea8Slogwang
1497a9643ea8Slogwang return 0;
1498a9643ea8Slogwang }
1499a9643ea8Slogwang
1500a9643ea8Slogwang static int
eth_igb_dev_set_link_down(struct rte_eth_dev * dev)1501a9643ea8Slogwang eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
1502a9643ea8Slogwang {
1503a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504a9643ea8Slogwang
1505a9643ea8Slogwang if (hw->phy.media_type == e1000_media_type_copper)
1506a9643ea8Slogwang e1000_power_down_phy(hw);
1507a9643ea8Slogwang else
1508a9643ea8Slogwang e1000_shutdown_fiber_serdes_link(hw);
1509a9643ea8Slogwang
1510a9643ea8Slogwang return 0;
1511a9643ea8Slogwang }
1512a9643ea8Slogwang
1513*2d9fd380Sjfb8856606 static int
eth_igb_close(struct rte_eth_dev * dev)1514a9643ea8Slogwang eth_igb_close(struct rte_eth_dev *dev)
1515a9643ea8Slogwang {
1516a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1517a9643ea8Slogwang struct rte_eth_link link;
15182bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
15192bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
15204418919fSjohnjiang struct e1000_filter_info *filter_info =
15214418919fSjohnjiang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1522*2d9fd380Sjfb8856606 int ret;
1523a9643ea8Slogwang
1524*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1525*2d9fd380Sjfb8856606 return 0;
1526*2d9fd380Sjfb8856606
1527*2d9fd380Sjfb8856606 ret = eth_igb_stop(dev);
1528a9643ea8Slogwang
1529a9643ea8Slogwang e1000_phy_hw_reset(hw);
1530a9643ea8Slogwang igb_release_manageability(hw);
1531a9643ea8Slogwang igb_hw_control_release(hw);
1532a9643ea8Slogwang
15334418919fSjohnjiang /* Clear bit for Go Link disconnect if PHY reset is not blocked */
15344418919fSjohnjiang if (hw->mac.type >= e1000_82580 &&
15354418919fSjohnjiang (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) {
1536a9643ea8Slogwang uint32_t phpm_reg;
1537a9643ea8Slogwang
1538a9643ea8Slogwang phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1539a9643ea8Slogwang phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1540a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1541a9643ea8Slogwang }
1542a9643ea8Slogwang
1543a9643ea8Slogwang igb_dev_free_queues(dev);
1544a9643ea8Slogwang
15452bfe3f2eSlogwang if (intr_handle->intr_vec) {
15462bfe3f2eSlogwang rte_free(intr_handle->intr_vec);
15472bfe3f2eSlogwang intr_handle->intr_vec = NULL;
1548a9643ea8Slogwang }
1549a9643ea8Slogwang
1550a9643ea8Slogwang memset(&link, 0, sizeof(link));
1551d30ea906Sjfb8856606 rte_eth_linkstatus_set(dev, &link);
15524418919fSjohnjiang
15534418919fSjohnjiang /* Reset any pending lock */
15544418919fSjohnjiang igb_reset_swfw_lock(hw);
15554418919fSjohnjiang
15564418919fSjohnjiang /* uninitialize PF if max_vfs not zero */
15574418919fSjohnjiang igb_pf_host_uninit(dev);
15584418919fSjohnjiang
15594418919fSjohnjiang rte_intr_callback_unregister(intr_handle,
15604418919fSjohnjiang eth_igb_interrupt_handler, dev);
15614418919fSjohnjiang
15624418919fSjohnjiang /* clear the SYN filter info */
15634418919fSjohnjiang filter_info->syn_info = 0;
15644418919fSjohnjiang
15654418919fSjohnjiang /* clear the ethertype filters info */
15664418919fSjohnjiang filter_info->ethertype_mask = 0;
15674418919fSjohnjiang memset(filter_info->ethertype_filters, 0,
15684418919fSjohnjiang E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
15694418919fSjohnjiang
15704418919fSjohnjiang /* clear the rss filter info */
15714418919fSjohnjiang memset(&filter_info->rss_info, 0,
15724418919fSjohnjiang sizeof(struct igb_rte_flow_rss_conf));
15734418919fSjohnjiang
15744418919fSjohnjiang /* remove all ntuple filters of the device */
15754418919fSjohnjiang igb_ntuple_filter_uninit(dev);
15764418919fSjohnjiang
15774418919fSjohnjiang /* remove all flex filters of the device */
15784418919fSjohnjiang igb_flex_filter_uninit(dev);
15794418919fSjohnjiang
15804418919fSjohnjiang /* clear all the filters list */
15814418919fSjohnjiang igb_filterlist_flush(dev);
1582*2d9fd380Sjfb8856606
1583*2d9fd380Sjfb8856606 return ret;
1584a9643ea8Slogwang }
1585a9643ea8Slogwang
1586d30ea906Sjfb8856606 /*
1587d30ea906Sjfb8856606 * Reset PF device.
1588d30ea906Sjfb8856606 */
1589d30ea906Sjfb8856606 static int
eth_igb_reset(struct rte_eth_dev * dev)1590d30ea906Sjfb8856606 eth_igb_reset(struct rte_eth_dev *dev)
1591d30ea906Sjfb8856606 {
1592d30ea906Sjfb8856606 int ret;
1593d30ea906Sjfb8856606
1594d30ea906Sjfb8856606 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1595d30ea906Sjfb8856606 * its VF to make them align with it. The detailed notification
1596d30ea906Sjfb8856606 * mechanism is PMD specific and is currently not implemented.
1597d30ea906Sjfb8856606 * To avoid unexpected behavior in VF, currently reset of PF with
1598d30ea906Sjfb8856606 * SR-IOV activation is not supported. It might be supported later.
1599d30ea906Sjfb8856606 */
1600d30ea906Sjfb8856606 if (dev->data->sriov.active)
1601d30ea906Sjfb8856606 return -ENOTSUP;
1602d30ea906Sjfb8856606
1603d30ea906Sjfb8856606 ret = eth_igb_dev_uninit(dev);
1604d30ea906Sjfb8856606 if (ret)
1605d30ea906Sjfb8856606 return ret;
1606d30ea906Sjfb8856606
1607d30ea906Sjfb8856606 ret = eth_igb_dev_init(dev);
1608d30ea906Sjfb8856606
1609d30ea906Sjfb8856606 return ret;
1610d30ea906Sjfb8856606 }
1611d30ea906Sjfb8856606
1612d30ea906Sjfb8856606
1613a9643ea8Slogwang static int
igb_get_rx_buffer_size(struct e1000_hw * hw)1614a9643ea8Slogwang igb_get_rx_buffer_size(struct e1000_hw *hw)
1615a9643ea8Slogwang {
1616a9643ea8Slogwang uint32_t rx_buf_size;
1617a9643ea8Slogwang if (hw->mac.type == e1000_82576) {
1618a9643ea8Slogwang rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1619a9643ea8Slogwang } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1620a9643ea8Slogwang /* PBS needs to be translated according to a lookup table */
1621a9643ea8Slogwang rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1622a9643ea8Slogwang rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1623a9643ea8Slogwang rx_buf_size = (rx_buf_size << 10);
1624a9643ea8Slogwang } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1625a9643ea8Slogwang rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1626a9643ea8Slogwang } else {
1627a9643ea8Slogwang rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1628a9643ea8Slogwang }
1629a9643ea8Slogwang
1630a9643ea8Slogwang return rx_buf_size;
1631a9643ea8Slogwang }
1632a9643ea8Slogwang
1633a9643ea8Slogwang /*********************************************************************
1634a9643ea8Slogwang *
1635a9643ea8Slogwang * Initialize the hardware
1636a9643ea8Slogwang *
1637a9643ea8Slogwang **********************************************************************/
1638a9643ea8Slogwang static int
igb_hardware_init(struct e1000_hw * hw)1639a9643ea8Slogwang igb_hardware_init(struct e1000_hw *hw)
1640a9643ea8Slogwang {
1641a9643ea8Slogwang uint32_t rx_buf_size;
1642a9643ea8Slogwang int diag;
1643a9643ea8Slogwang
1644a9643ea8Slogwang /* Let the firmware know the OS is in control */
1645a9643ea8Slogwang igb_hw_control_acquire(hw);
1646a9643ea8Slogwang
1647a9643ea8Slogwang /*
1648a9643ea8Slogwang * These parameters control the automatic generation (Tx) and
1649a9643ea8Slogwang * response (Rx) to Ethernet PAUSE frames.
1650a9643ea8Slogwang * - High water mark should allow for at least two standard size (1518)
1651a9643ea8Slogwang * frames to be received after sending an XOFF.
1652a9643ea8Slogwang * - Low water mark works best when it is very near the high water mark.
1653a9643ea8Slogwang * This allows the receiver to restart by sending XON when it has
1654a9643ea8Slogwang * drained a bit. Here we use an arbitrary value of 1500 which will
1655a9643ea8Slogwang * restart after one full frame is pulled from the buffer. There
1656a9643ea8Slogwang * could be several smaller frames in the buffer and if so they will
1657a9643ea8Slogwang * not trigger the XON until their total number reduces the buffer
1658a9643ea8Slogwang * by 1500.
1659a9643ea8Slogwang * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1660a9643ea8Slogwang */
1661a9643ea8Slogwang rx_buf_size = igb_get_rx_buffer_size(hw);
1662a9643ea8Slogwang
16634418919fSjohnjiang hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
1664a9643ea8Slogwang hw->fc.low_water = hw->fc.high_water - 1500;
1665a9643ea8Slogwang hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1666a9643ea8Slogwang hw->fc.send_xon = 1;
1667a9643ea8Slogwang
1668a9643ea8Slogwang /* Set Flow control, use the tunable location if sane */
1669a9643ea8Slogwang if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1670a9643ea8Slogwang hw->fc.requested_mode = igb_fc_setting;
1671a9643ea8Slogwang else
1672a9643ea8Slogwang hw->fc.requested_mode = e1000_fc_none;
1673a9643ea8Slogwang
1674a9643ea8Slogwang /* Issue a global reset */
1675a9643ea8Slogwang igb_pf_reset_hw(hw);
1676a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_WUC, 0);
1677a9643ea8Slogwang
1678a9643ea8Slogwang diag = e1000_init_hw(hw);
1679a9643ea8Slogwang if (diag < 0)
1680a9643ea8Slogwang return diag;
1681a9643ea8Slogwang
16824418919fSjohnjiang E1000_WRITE_REG(hw, E1000_VET,
16834418919fSjohnjiang RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
1684a9643ea8Slogwang e1000_get_phy_info(hw);
1685a9643ea8Slogwang e1000_check_for_link(hw);
1686a9643ea8Slogwang
1687a9643ea8Slogwang return 0;
1688a9643ea8Slogwang }
1689a9643ea8Slogwang
1690a9643ea8Slogwang /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1691a9643ea8Slogwang static void
igb_read_stats_registers(struct e1000_hw * hw,struct e1000_hw_stats * stats)1692a9643ea8Slogwang igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1693a9643ea8Slogwang {
1694a9643ea8Slogwang int pause_frames;
1695a9643ea8Slogwang
1696a9643ea8Slogwang uint64_t old_gprc = stats->gprc;
1697a9643ea8Slogwang uint64_t old_gptc = stats->gptc;
1698a9643ea8Slogwang uint64_t old_tpr = stats->tpr;
1699a9643ea8Slogwang uint64_t old_tpt = stats->tpt;
1700a9643ea8Slogwang uint64_t old_rpthc = stats->rpthc;
1701a9643ea8Slogwang uint64_t old_hgptc = stats->hgptc;
1702a9643ea8Slogwang
1703a9643ea8Slogwang if(hw->phy.media_type == e1000_media_type_copper ||
1704a9643ea8Slogwang (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1705a9643ea8Slogwang stats->symerrs +=
1706a9643ea8Slogwang E1000_READ_REG(hw,E1000_SYMERRS);
1707a9643ea8Slogwang stats->sec += E1000_READ_REG(hw, E1000_SEC);
1708a9643ea8Slogwang }
1709a9643ea8Slogwang
1710a9643ea8Slogwang stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1711a9643ea8Slogwang stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1712a9643ea8Slogwang stats->scc += E1000_READ_REG(hw, E1000_SCC);
1713a9643ea8Slogwang stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1714a9643ea8Slogwang
1715a9643ea8Slogwang stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1716a9643ea8Slogwang stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1717a9643ea8Slogwang stats->colc += E1000_READ_REG(hw, E1000_COLC);
1718a9643ea8Slogwang stats->dc += E1000_READ_REG(hw, E1000_DC);
1719a9643ea8Slogwang stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1720a9643ea8Slogwang stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1721a9643ea8Slogwang stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1722a9643ea8Slogwang /*
1723a9643ea8Slogwang ** For watchdog management we need to know if we have been
1724a9643ea8Slogwang ** paused during the last interval, so capture that here.
1725a9643ea8Slogwang */
1726a9643ea8Slogwang pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1727a9643ea8Slogwang stats->xoffrxc += pause_frames;
1728a9643ea8Slogwang stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1729a9643ea8Slogwang stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1730a9643ea8Slogwang stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1731a9643ea8Slogwang stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1732a9643ea8Slogwang stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1733a9643ea8Slogwang stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1734a9643ea8Slogwang stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1735a9643ea8Slogwang stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1736a9643ea8Slogwang stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1737a9643ea8Slogwang stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1738a9643ea8Slogwang stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1739a9643ea8Slogwang stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1740a9643ea8Slogwang
1741a9643ea8Slogwang /* For the 64-bit byte counters the low dword must be read first. */
1742a9643ea8Slogwang /* Both registers clear on the read of the high dword */
1743a9643ea8Slogwang
1744a9643ea8Slogwang /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1745a9643ea8Slogwang stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1746a9643ea8Slogwang stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
17474418919fSjohnjiang stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1748a9643ea8Slogwang stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1749a9643ea8Slogwang stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
17504418919fSjohnjiang stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1751a9643ea8Slogwang
1752a9643ea8Slogwang stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1753a9643ea8Slogwang stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1754a9643ea8Slogwang stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1755a9643ea8Slogwang stats->roc += E1000_READ_REG(hw, E1000_ROC);
1756a9643ea8Slogwang stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1757a9643ea8Slogwang
1758a9643ea8Slogwang stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1759a9643ea8Slogwang stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1760a9643ea8Slogwang
1761a9643ea8Slogwang stats->tor += E1000_READ_REG(hw, E1000_TORL);
1762a9643ea8Slogwang stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
17634418919fSjohnjiang stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1764a9643ea8Slogwang stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1765a9643ea8Slogwang stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
17664418919fSjohnjiang stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1767a9643ea8Slogwang
1768a9643ea8Slogwang stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1769a9643ea8Slogwang stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1770a9643ea8Slogwang stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1771a9643ea8Slogwang stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1772a9643ea8Slogwang stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1773a9643ea8Slogwang stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1774a9643ea8Slogwang stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1775a9643ea8Slogwang stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1776a9643ea8Slogwang
1777a9643ea8Slogwang /* Interrupt Counts */
1778a9643ea8Slogwang
1779a9643ea8Slogwang stats->iac += E1000_READ_REG(hw, E1000_IAC);
1780a9643ea8Slogwang stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1781a9643ea8Slogwang stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1782a9643ea8Slogwang stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1783a9643ea8Slogwang stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1784a9643ea8Slogwang stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1785a9643ea8Slogwang stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1786a9643ea8Slogwang stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1787a9643ea8Slogwang stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1788a9643ea8Slogwang
1789a9643ea8Slogwang /* Host to Card Statistics */
1790a9643ea8Slogwang
1791a9643ea8Slogwang stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1792a9643ea8Slogwang stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1793a9643ea8Slogwang stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1794a9643ea8Slogwang stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1795a9643ea8Slogwang stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1796a9643ea8Slogwang stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1797a9643ea8Slogwang stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1798a9643ea8Slogwang stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1799a9643ea8Slogwang stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
18004418919fSjohnjiang stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1801a9643ea8Slogwang stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1802a9643ea8Slogwang stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
18034418919fSjohnjiang stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1804a9643ea8Slogwang stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1805a9643ea8Slogwang stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1806a9643ea8Slogwang stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1807a9643ea8Slogwang
1808a9643ea8Slogwang stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1809a9643ea8Slogwang stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1810a9643ea8Slogwang stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1811a9643ea8Slogwang stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1812a9643ea8Slogwang stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1813a9643ea8Slogwang stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1814a9643ea8Slogwang }
1815a9643ea8Slogwang
18162bfe3f2eSlogwang static int
eth_igb_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * rte_stats)1817a9643ea8Slogwang eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1818a9643ea8Slogwang {
1819a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1820a9643ea8Slogwang struct e1000_hw_stats *stats =
1821a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1822a9643ea8Slogwang
1823a9643ea8Slogwang igb_read_stats_registers(hw, stats);
1824a9643ea8Slogwang
1825a9643ea8Slogwang if (rte_stats == NULL)
18262bfe3f2eSlogwang return -EINVAL;
1827a9643ea8Slogwang
1828a9643ea8Slogwang /* Rx Errors */
1829a9643ea8Slogwang rte_stats->imissed = stats->mpc;
1830a9643ea8Slogwang rte_stats->ierrors = stats->crcerrs +
1831a9643ea8Slogwang stats->rlec + stats->ruc + stats->roc +
1832a9643ea8Slogwang stats->rxerrc + stats->algnerrc + stats->cexterr;
1833a9643ea8Slogwang
1834a9643ea8Slogwang /* Tx Errors */
1835a9643ea8Slogwang rte_stats->oerrors = stats->ecol + stats->latecol;
1836a9643ea8Slogwang
1837a9643ea8Slogwang rte_stats->ipackets = stats->gprc;
1838a9643ea8Slogwang rte_stats->opackets = stats->gptc;
1839a9643ea8Slogwang rte_stats->ibytes = stats->gorc;
1840a9643ea8Slogwang rte_stats->obytes = stats->gotc;
18412bfe3f2eSlogwang return 0;
1842a9643ea8Slogwang }
1843a9643ea8Slogwang
18444418919fSjohnjiang static int
eth_igb_stats_reset(struct rte_eth_dev * dev)1845a9643ea8Slogwang eth_igb_stats_reset(struct rte_eth_dev *dev)
1846a9643ea8Slogwang {
1847a9643ea8Slogwang struct e1000_hw_stats *hw_stats =
1848a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1849a9643ea8Slogwang
1850a9643ea8Slogwang /* HW registers are cleared on read */
1851a9643ea8Slogwang eth_igb_stats_get(dev, NULL);
1852a9643ea8Slogwang
1853a9643ea8Slogwang /* Reset software totals */
1854a9643ea8Slogwang memset(hw_stats, 0, sizeof(*hw_stats));
18554418919fSjohnjiang
18564418919fSjohnjiang return 0;
1857a9643ea8Slogwang }
1858a9643ea8Slogwang
18594418919fSjohnjiang static int
eth_igb_xstats_reset(struct rte_eth_dev * dev)1860a9643ea8Slogwang eth_igb_xstats_reset(struct rte_eth_dev *dev)
1861a9643ea8Slogwang {
1862a9643ea8Slogwang struct e1000_hw_stats *stats =
1863a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1864a9643ea8Slogwang
1865a9643ea8Slogwang /* HW registers are cleared on read */
1866a9643ea8Slogwang eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1867a9643ea8Slogwang
1868a9643ea8Slogwang /* Reset software totals */
1869a9643ea8Slogwang memset(stats, 0, sizeof(*stats));
18704418919fSjohnjiang
18714418919fSjohnjiang return 0;
1872a9643ea8Slogwang }
1873a9643ea8Slogwang
eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned int size)1874a9643ea8Slogwang static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1875a9643ea8Slogwang struct rte_eth_xstat_name *xstats_names,
18762bfe3f2eSlogwang __rte_unused unsigned int size)
1877a9643ea8Slogwang {
1878a9643ea8Slogwang unsigned i;
1879a9643ea8Slogwang
1880a9643ea8Slogwang if (xstats_names == NULL)
1881a9643ea8Slogwang return IGB_NB_XSTATS;
1882a9643ea8Slogwang
1883a9643ea8Slogwang /* Note: limit checked in rte_eth_xstats_names() */
1884a9643ea8Slogwang
1885a9643ea8Slogwang for (i = 0; i < IGB_NB_XSTATS; i++) {
18864418919fSjohnjiang strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name,
18874418919fSjohnjiang sizeof(xstats_names[i].name));
1888a9643ea8Slogwang }
1889a9643ea8Slogwang
1890a9643ea8Slogwang return IGB_NB_XSTATS;
1891a9643ea8Slogwang }
1892a9643ea8Slogwang
eth_igb_xstats_get_names_by_id(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,const uint64_t * ids,unsigned int limit)18932bfe3f2eSlogwang static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
18942bfe3f2eSlogwang struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
18952bfe3f2eSlogwang unsigned int limit)
18962bfe3f2eSlogwang {
18972bfe3f2eSlogwang unsigned int i;
18982bfe3f2eSlogwang
18992bfe3f2eSlogwang if (!ids) {
19002bfe3f2eSlogwang if (xstats_names == NULL)
19012bfe3f2eSlogwang return IGB_NB_XSTATS;
19022bfe3f2eSlogwang
19032bfe3f2eSlogwang for (i = 0; i < IGB_NB_XSTATS; i++)
19044418919fSjohnjiang strlcpy(xstats_names[i].name,
19054418919fSjohnjiang rte_igb_stats_strings[i].name,
19064418919fSjohnjiang sizeof(xstats_names[i].name));
19072bfe3f2eSlogwang
19082bfe3f2eSlogwang return IGB_NB_XSTATS;
19092bfe3f2eSlogwang
19102bfe3f2eSlogwang } else {
19112bfe3f2eSlogwang struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
19122bfe3f2eSlogwang
19132bfe3f2eSlogwang eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
19142bfe3f2eSlogwang IGB_NB_XSTATS);
19152bfe3f2eSlogwang
19162bfe3f2eSlogwang for (i = 0; i < limit; i++) {
19172bfe3f2eSlogwang if (ids[i] >= IGB_NB_XSTATS) {
19182bfe3f2eSlogwang PMD_INIT_LOG(ERR, "id value isn't valid");
19192bfe3f2eSlogwang return -1;
19202bfe3f2eSlogwang }
19212bfe3f2eSlogwang strcpy(xstats_names[i].name,
19222bfe3f2eSlogwang xstats_names_copy[ids[i]].name);
19232bfe3f2eSlogwang }
19242bfe3f2eSlogwang return limit;
19252bfe3f2eSlogwang }
19262bfe3f2eSlogwang }
19272bfe3f2eSlogwang
1928a9643ea8Slogwang static int
eth_igb_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)1929a9643ea8Slogwang eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1930a9643ea8Slogwang unsigned n)
1931a9643ea8Slogwang {
1932a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1933a9643ea8Slogwang struct e1000_hw_stats *hw_stats =
1934a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1935a9643ea8Slogwang unsigned i;
1936a9643ea8Slogwang
1937a9643ea8Slogwang if (n < IGB_NB_XSTATS)
1938a9643ea8Slogwang return IGB_NB_XSTATS;
1939a9643ea8Slogwang
1940a9643ea8Slogwang igb_read_stats_registers(hw, hw_stats);
1941a9643ea8Slogwang
1942a9643ea8Slogwang /* If this is a reset xstats is NULL, and we have cleared the
1943a9643ea8Slogwang * registers by reading them.
1944a9643ea8Slogwang */
1945a9643ea8Slogwang if (!xstats)
1946a9643ea8Slogwang return 0;
1947a9643ea8Slogwang
1948a9643ea8Slogwang /* Extended stats */
1949a9643ea8Slogwang for (i = 0; i < IGB_NB_XSTATS; i++) {
1950a9643ea8Slogwang xstats[i].id = i;
1951a9643ea8Slogwang xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1952a9643ea8Slogwang rte_igb_stats_strings[i].offset);
1953a9643ea8Slogwang }
1954a9643ea8Slogwang
1955a9643ea8Slogwang return IGB_NB_XSTATS;
1956a9643ea8Slogwang }
1957a9643ea8Slogwang
19582bfe3f2eSlogwang static int
eth_igb_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)19592bfe3f2eSlogwang eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
19602bfe3f2eSlogwang uint64_t *values, unsigned int n)
19612bfe3f2eSlogwang {
19622bfe3f2eSlogwang unsigned int i;
19632bfe3f2eSlogwang
19642bfe3f2eSlogwang if (!ids) {
19652bfe3f2eSlogwang struct e1000_hw *hw =
19662bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
19672bfe3f2eSlogwang struct e1000_hw_stats *hw_stats =
19682bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
19692bfe3f2eSlogwang
19702bfe3f2eSlogwang if (n < IGB_NB_XSTATS)
19712bfe3f2eSlogwang return IGB_NB_XSTATS;
19722bfe3f2eSlogwang
19732bfe3f2eSlogwang igb_read_stats_registers(hw, hw_stats);
19742bfe3f2eSlogwang
19752bfe3f2eSlogwang /* If this is a reset xstats is NULL, and we have cleared the
19762bfe3f2eSlogwang * registers by reading them.
19772bfe3f2eSlogwang */
19782bfe3f2eSlogwang if (!values)
19792bfe3f2eSlogwang return 0;
19802bfe3f2eSlogwang
19812bfe3f2eSlogwang /* Extended stats */
19822bfe3f2eSlogwang for (i = 0; i < IGB_NB_XSTATS; i++)
19832bfe3f2eSlogwang values[i] = *(uint64_t *)(((char *)hw_stats) +
19842bfe3f2eSlogwang rte_igb_stats_strings[i].offset);
19852bfe3f2eSlogwang
19862bfe3f2eSlogwang return IGB_NB_XSTATS;
19872bfe3f2eSlogwang
19882bfe3f2eSlogwang } else {
19892bfe3f2eSlogwang uint64_t values_copy[IGB_NB_XSTATS];
19902bfe3f2eSlogwang
19912bfe3f2eSlogwang eth_igb_xstats_get_by_id(dev, NULL, values_copy,
19922bfe3f2eSlogwang IGB_NB_XSTATS);
19932bfe3f2eSlogwang
19942bfe3f2eSlogwang for (i = 0; i < n; i++) {
19952bfe3f2eSlogwang if (ids[i] >= IGB_NB_XSTATS) {
19962bfe3f2eSlogwang PMD_INIT_LOG(ERR, "id value isn't valid");
19972bfe3f2eSlogwang return -1;
19982bfe3f2eSlogwang }
19992bfe3f2eSlogwang values[i] = values_copy[ids[i]];
20002bfe3f2eSlogwang }
20012bfe3f2eSlogwang return n;
20022bfe3f2eSlogwang }
20032bfe3f2eSlogwang }
20042bfe3f2eSlogwang
2005a9643ea8Slogwang static void
igbvf_read_stats_registers(struct e1000_hw * hw,struct e1000_vf_stats * hw_stats)2006a9643ea8Slogwang igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
2007a9643ea8Slogwang {
2008a9643ea8Slogwang /* Good Rx packets, include VF loopback */
2009a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGPRC,
2010a9643ea8Slogwang hw_stats->last_gprc, hw_stats->gprc);
2011a9643ea8Slogwang
2012a9643ea8Slogwang /* Good Rx octets, include VF loopback */
2013a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGORC,
2014a9643ea8Slogwang hw_stats->last_gorc, hw_stats->gorc);
2015a9643ea8Slogwang
2016a9643ea8Slogwang /* Good Tx packets, include VF loopback */
2017a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGPTC,
2018a9643ea8Slogwang hw_stats->last_gptc, hw_stats->gptc);
2019a9643ea8Slogwang
2020a9643ea8Slogwang /* Good Tx octets, include VF loopback */
2021a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGOTC,
2022a9643ea8Slogwang hw_stats->last_gotc, hw_stats->gotc);
2023a9643ea8Slogwang
2024a9643ea8Slogwang /* Rx Multicst packets */
2025a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFMPRC,
2026a9643ea8Slogwang hw_stats->last_mprc, hw_stats->mprc);
2027a9643ea8Slogwang
2028a9643ea8Slogwang /* Good Rx loopback packets */
2029a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGPRLBC,
2030a9643ea8Slogwang hw_stats->last_gprlbc, hw_stats->gprlbc);
2031a9643ea8Slogwang
2032a9643ea8Slogwang /* Good Rx loopback octets */
2033a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGORLBC,
2034a9643ea8Slogwang hw_stats->last_gorlbc, hw_stats->gorlbc);
2035a9643ea8Slogwang
2036a9643ea8Slogwang /* Good Tx loopback packets */
2037a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGPTLBC,
2038a9643ea8Slogwang hw_stats->last_gptlbc, hw_stats->gptlbc);
2039a9643ea8Slogwang
2040a9643ea8Slogwang /* Good Tx loopback octets */
2041a9643ea8Slogwang UPDATE_VF_STAT(E1000_VFGOTLBC,
2042a9643ea8Slogwang hw_stats->last_gotlbc, hw_stats->gotlbc);
2043a9643ea8Slogwang }
2044a9643ea8Slogwang
eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)2045a9643ea8Slogwang static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2046a9643ea8Slogwang struct rte_eth_xstat_name *xstats_names,
2047a9643ea8Slogwang __rte_unused unsigned limit)
2048a9643ea8Slogwang {
2049a9643ea8Slogwang unsigned i;
2050a9643ea8Slogwang
2051a9643ea8Slogwang if (xstats_names != NULL)
2052a9643ea8Slogwang for (i = 0; i < IGBVF_NB_XSTATS; i++) {
20534418919fSjohnjiang strlcpy(xstats_names[i].name,
20544418919fSjohnjiang rte_igbvf_stats_strings[i].name,
20554418919fSjohnjiang sizeof(xstats_names[i].name));
2056a9643ea8Slogwang }
2057a9643ea8Slogwang return IGBVF_NB_XSTATS;
2058a9643ea8Slogwang }
2059a9643ea8Slogwang
2060a9643ea8Slogwang static int
eth_igbvf_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)2061a9643ea8Slogwang eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2062a9643ea8Slogwang unsigned n)
2063a9643ea8Slogwang {
2064a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065a9643ea8Slogwang struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2066a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2067a9643ea8Slogwang unsigned i;
2068a9643ea8Slogwang
2069a9643ea8Slogwang if (n < IGBVF_NB_XSTATS)
2070a9643ea8Slogwang return IGBVF_NB_XSTATS;
2071a9643ea8Slogwang
2072a9643ea8Slogwang igbvf_read_stats_registers(hw, hw_stats);
2073a9643ea8Slogwang
2074a9643ea8Slogwang if (!xstats)
2075a9643ea8Slogwang return 0;
2076a9643ea8Slogwang
2077a9643ea8Slogwang for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2078a9643ea8Slogwang xstats[i].id = i;
2079a9643ea8Slogwang xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2080a9643ea8Slogwang rte_igbvf_stats_strings[i].offset);
2081a9643ea8Slogwang }
2082a9643ea8Slogwang
2083a9643ea8Slogwang return IGBVF_NB_XSTATS;
2084a9643ea8Slogwang }
2085a9643ea8Slogwang
20862bfe3f2eSlogwang static int
eth_igbvf_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * rte_stats)2087a9643ea8Slogwang eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
2088a9643ea8Slogwang {
2089a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2090a9643ea8Slogwang struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2091a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2092a9643ea8Slogwang
2093a9643ea8Slogwang igbvf_read_stats_registers(hw, hw_stats);
2094a9643ea8Slogwang
2095a9643ea8Slogwang if (rte_stats == NULL)
20962bfe3f2eSlogwang return -EINVAL;
2097a9643ea8Slogwang
2098a9643ea8Slogwang rte_stats->ipackets = hw_stats->gprc;
2099a9643ea8Slogwang rte_stats->ibytes = hw_stats->gorc;
2100a9643ea8Slogwang rte_stats->opackets = hw_stats->gptc;
2101a9643ea8Slogwang rte_stats->obytes = hw_stats->gotc;
21022bfe3f2eSlogwang return 0;
2103a9643ea8Slogwang }
2104a9643ea8Slogwang
21054418919fSjohnjiang static int
eth_igbvf_stats_reset(struct rte_eth_dev * dev)2106a9643ea8Slogwang eth_igbvf_stats_reset(struct rte_eth_dev *dev)
2107a9643ea8Slogwang {
2108a9643ea8Slogwang struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
2109a9643ea8Slogwang E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2110a9643ea8Slogwang
2111a9643ea8Slogwang /* Sync HW register to the last stats */
2112a9643ea8Slogwang eth_igbvf_stats_get(dev, NULL);
2113a9643ea8Slogwang
2114a9643ea8Slogwang /* reset HW current stats*/
2115a9643ea8Slogwang memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
2116a9643ea8Slogwang offsetof(struct e1000_vf_stats, gprc));
21174418919fSjohnjiang
21184418919fSjohnjiang return 0;
2119a9643ea8Slogwang }
2120a9643ea8Slogwang
21212bfe3f2eSlogwang static int
eth_igb_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)21222bfe3f2eSlogwang eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
21232bfe3f2eSlogwang size_t fw_size)
21242bfe3f2eSlogwang {
21252bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
21262bfe3f2eSlogwang struct e1000_fw_version fw;
21272bfe3f2eSlogwang int ret;
21282bfe3f2eSlogwang
21292bfe3f2eSlogwang e1000_get_fw_version(hw, &fw);
21302bfe3f2eSlogwang
21312bfe3f2eSlogwang switch (hw->mac.type) {
21322bfe3f2eSlogwang case e1000_i210:
21332bfe3f2eSlogwang case e1000_i211:
21342bfe3f2eSlogwang if (!(e1000_get_flash_presence_i210(hw))) {
21352bfe3f2eSlogwang ret = snprintf(fw_version, fw_size,
21362bfe3f2eSlogwang "%2d.%2d-%d",
21372bfe3f2eSlogwang fw.invm_major, fw.invm_minor,
21382bfe3f2eSlogwang fw.invm_img_type);
21392bfe3f2eSlogwang break;
21402bfe3f2eSlogwang }
21412bfe3f2eSlogwang /* fall through */
21422bfe3f2eSlogwang default:
21432bfe3f2eSlogwang /* if option rom is valid, display its version too */
21442bfe3f2eSlogwang if (fw.or_valid) {
21452bfe3f2eSlogwang ret = snprintf(fw_version, fw_size,
21462bfe3f2eSlogwang "%d.%d, 0x%08x, %d.%d.%d",
21472bfe3f2eSlogwang fw.eep_major, fw.eep_minor, fw.etrack_id,
21482bfe3f2eSlogwang fw.or_major, fw.or_build, fw.or_patch);
21492bfe3f2eSlogwang /* no option rom */
21502bfe3f2eSlogwang } else {
21512bfe3f2eSlogwang if (fw.etrack_id != 0X0000) {
21522bfe3f2eSlogwang ret = snprintf(fw_version, fw_size,
21532bfe3f2eSlogwang "%d.%d, 0x%08x",
21542bfe3f2eSlogwang fw.eep_major, fw.eep_minor,
21552bfe3f2eSlogwang fw.etrack_id);
21562bfe3f2eSlogwang } else {
21572bfe3f2eSlogwang ret = snprintf(fw_version, fw_size,
21582bfe3f2eSlogwang "%d.%d.%d",
21592bfe3f2eSlogwang fw.eep_major, fw.eep_minor,
21602bfe3f2eSlogwang fw.eep_build);
21612bfe3f2eSlogwang }
21622bfe3f2eSlogwang }
21632bfe3f2eSlogwang break;
21642bfe3f2eSlogwang }
21652bfe3f2eSlogwang
21662bfe3f2eSlogwang ret += 1; /* add the size of '\0' */
21672bfe3f2eSlogwang if (fw_size < (u32)ret)
21682bfe3f2eSlogwang return ret;
21692bfe3f2eSlogwang else
21702bfe3f2eSlogwang return 0;
21712bfe3f2eSlogwang }
21722bfe3f2eSlogwang
21734418919fSjohnjiang static int
eth_igb_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)2174a9643ea8Slogwang eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2175a9643ea8Slogwang {
2176a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2177a9643ea8Slogwang
2178a9643ea8Slogwang dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2179a9643ea8Slogwang dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2180a9643ea8Slogwang dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2181d30ea906Sjfb8856606 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2182d30ea906Sjfb8856606 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2183d30ea906Sjfb8856606 dev_info->rx_queue_offload_capa;
2184d30ea906Sjfb8856606 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2185d30ea906Sjfb8856606 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2186d30ea906Sjfb8856606 dev_info->tx_queue_offload_capa;
2187a9643ea8Slogwang
2188a9643ea8Slogwang switch (hw->mac.type) {
2189a9643ea8Slogwang case e1000_82575:
2190a9643ea8Slogwang dev_info->max_rx_queues = 4;
2191a9643ea8Slogwang dev_info->max_tx_queues = 4;
2192a9643ea8Slogwang dev_info->max_vmdq_pools = 0;
2193a9643ea8Slogwang break;
2194a9643ea8Slogwang
2195a9643ea8Slogwang case e1000_82576:
2196a9643ea8Slogwang dev_info->max_rx_queues = 16;
2197a9643ea8Slogwang dev_info->max_tx_queues = 16;
2198a9643ea8Slogwang dev_info->max_vmdq_pools = ETH_8_POOLS;
2199a9643ea8Slogwang dev_info->vmdq_queue_num = 16;
2200a9643ea8Slogwang break;
2201a9643ea8Slogwang
2202a9643ea8Slogwang case e1000_82580:
2203a9643ea8Slogwang dev_info->max_rx_queues = 8;
2204a9643ea8Slogwang dev_info->max_tx_queues = 8;
2205a9643ea8Slogwang dev_info->max_vmdq_pools = ETH_8_POOLS;
2206a9643ea8Slogwang dev_info->vmdq_queue_num = 8;
2207a9643ea8Slogwang break;
2208a9643ea8Slogwang
2209a9643ea8Slogwang case e1000_i350:
2210a9643ea8Slogwang dev_info->max_rx_queues = 8;
2211a9643ea8Slogwang dev_info->max_tx_queues = 8;
2212a9643ea8Slogwang dev_info->max_vmdq_pools = ETH_8_POOLS;
2213a9643ea8Slogwang dev_info->vmdq_queue_num = 8;
2214a9643ea8Slogwang break;
2215a9643ea8Slogwang
2216a9643ea8Slogwang case e1000_i354:
2217a9643ea8Slogwang dev_info->max_rx_queues = 8;
2218a9643ea8Slogwang dev_info->max_tx_queues = 8;
2219a9643ea8Slogwang break;
2220a9643ea8Slogwang
2221a9643ea8Slogwang case e1000_i210:
2222a9643ea8Slogwang dev_info->max_rx_queues = 4;
2223a9643ea8Slogwang dev_info->max_tx_queues = 4;
2224a9643ea8Slogwang dev_info->max_vmdq_pools = 0;
2225a9643ea8Slogwang break;
2226a9643ea8Slogwang
2227a9643ea8Slogwang case e1000_i211:
2228a9643ea8Slogwang dev_info->max_rx_queues = 2;
2229a9643ea8Slogwang dev_info->max_tx_queues = 2;
2230a9643ea8Slogwang dev_info->max_vmdq_pools = 0;
2231a9643ea8Slogwang break;
2232a9643ea8Slogwang
2233a9643ea8Slogwang default:
2234a9643ea8Slogwang /* Should not happen */
22354418919fSjohnjiang return -EINVAL;
2236a9643ea8Slogwang }
2237a9643ea8Slogwang dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
2238a9643ea8Slogwang dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2239a9643ea8Slogwang dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
2240a9643ea8Slogwang
2241a9643ea8Slogwang dev_info->default_rxconf = (struct rte_eth_rxconf) {
2242a9643ea8Slogwang .rx_thresh = {
2243a9643ea8Slogwang .pthresh = IGB_DEFAULT_RX_PTHRESH,
2244a9643ea8Slogwang .hthresh = IGB_DEFAULT_RX_HTHRESH,
2245a9643ea8Slogwang .wthresh = IGB_DEFAULT_RX_WTHRESH,
2246a9643ea8Slogwang },
2247a9643ea8Slogwang .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2248a9643ea8Slogwang .rx_drop_en = 0,
2249d30ea906Sjfb8856606 .offloads = 0,
2250a9643ea8Slogwang };
2251a9643ea8Slogwang
2252a9643ea8Slogwang dev_info->default_txconf = (struct rte_eth_txconf) {
2253a9643ea8Slogwang .tx_thresh = {
2254a9643ea8Slogwang .pthresh = IGB_DEFAULT_TX_PTHRESH,
2255a9643ea8Slogwang .hthresh = IGB_DEFAULT_TX_HTHRESH,
2256a9643ea8Slogwang .wthresh = IGB_DEFAULT_TX_WTHRESH,
2257a9643ea8Slogwang },
2258d30ea906Sjfb8856606 .offloads = 0,
2259a9643ea8Slogwang };
2260a9643ea8Slogwang
2261a9643ea8Slogwang dev_info->rx_desc_lim = rx_desc_lim;
2262a9643ea8Slogwang dev_info->tx_desc_lim = tx_desc_lim;
2263a9643ea8Slogwang
2264a9643ea8Slogwang dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
2265a9643ea8Slogwang ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
2266a9643ea8Slogwang ETH_LINK_SPEED_1G;
22674418919fSjohnjiang
22684418919fSjohnjiang dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
22694418919fSjohnjiang dev_info->min_mtu = RTE_ETHER_MIN_MTU;
22704418919fSjohnjiang
22714418919fSjohnjiang return 0;
2272a9643ea8Slogwang }
2273a9643ea8Slogwang
2274a9643ea8Slogwang static const uint32_t *
eth_igb_supported_ptypes_get(struct rte_eth_dev * dev)2275a9643ea8Slogwang eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
2276a9643ea8Slogwang {
2277a9643ea8Slogwang static const uint32_t ptypes[] = {
2278a9643ea8Slogwang /* refers to igb_rxd_pkt_info_to_pkt_type() */
2279a9643ea8Slogwang RTE_PTYPE_L2_ETHER,
2280a9643ea8Slogwang RTE_PTYPE_L3_IPV4,
2281a9643ea8Slogwang RTE_PTYPE_L3_IPV4_EXT,
2282a9643ea8Slogwang RTE_PTYPE_L3_IPV6,
2283a9643ea8Slogwang RTE_PTYPE_L3_IPV6_EXT,
2284a9643ea8Slogwang RTE_PTYPE_L4_TCP,
2285a9643ea8Slogwang RTE_PTYPE_L4_UDP,
2286a9643ea8Slogwang RTE_PTYPE_L4_SCTP,
2287a9643ea8Slogwang RTE_PTYPE_TUNNEL_IP,
2288a9643ea8Slogwang RTE_PTYPE_INNER_L3_IPV6,
2289a9643ea8Slogwang RTE_PTYPE_INNER_L3_IPV6_EXT,
2290a9643ea8Slogwang RTE_PTYPE_INNER_L4_TCP,
2291a9643ea8Slogwang RTE_PTYPE_INNER_L4_UDP,
2292a9643ea8Slogwang RTE_PTYPE_UNKNOWN
2293a9643ea8Slogwang };
2294a9643ea8Slogwang
2295a9643ea8Slogwang if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
2296a9643ea8Slogwang dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
2297a9643ea8Slogwang return ptypes;
2298a9643ea8Slogwang return NULL;
2299a9643ea8Slogwang }
2300a9643ea8Slogwang
23014418919fSjohnjiang static int
eth_igbvf_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)2302a9643ea8Slogwang eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2303a9643ea8Slogwang {
2304a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305a9643ea8Slogwang
2306a9643ea8Slogwang dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2307a9643ea8Slogwang dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2308a9643ea8Slogwang dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2309a9643ea8Slogwang dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2310a9643ea8Slogwang DEV_TX_OFFLOAD_IPV4_CKSUM |
2311a9643ea8Slogwang DEV_TX_OFFLOAD_UDP_CKSUM |
2312a9643ea8Slogwang DEV_TX_OFFLOAD_TCP_CKSUM |
2313a9643ea8Slogwang DEV_TX_OFFLOAD_SCTP_CKSUM |
2314a9643ea8Slogwang DEV_TX_OFFLOAD_TCP_TSO;
2315a9643ea8Slogwang switch (hw->mac.type) {
2316a9643ea8Slogwang case e1000_vfadapt:
2317a9643ea8Slogwang dev_info->max_rx_queues = 2;
2318a9643ea8Slogwang dev_info->max_tx_queues = 2;
2319a9643ea8Slogwang break;
2320a9643ea8Slogwang case e1000_vfadapt_i350:
2321a9643ea8Slogwang dev_info->max_rx_queues = 1;
2322a9643ea8Slogwang dev_info->max_tx_queues = 1;
2323a9643ea8Slogwang break;
2324a9643ea8Slogwang default:
2325a9643ea8Slogwang /* Should not happen */
23264418919fSjohnjiang return -EINVAL;
2327a9643ea8Slogwang }
2328a9643ea8Slogwang
2329d30ea906Sjfb8856606 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2330d30ea906Sjfb8856606 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2331d30ea906Sjfb8856606 dev_info->rx_queue_offload_capa;
2332d30ea906Sjfb8856606 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2333d30ea906Sjfb8856606 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2334d30ea906Sjfb8856606 dev_info->tx_queue_offload_capa;
2335d30ea906Sjfb8856606
2336a9643ea8Slogwang dev_info->default_rxconf = (struct rte_eth_rxconf) {
2337a9643ea8Slogwang .rx_thresh = {
2338a9643ea8Slogwang .pthresh = IGB_DEFAULT_RX_PTHRESH,
2339a9643ea8Slogwang .hthresh = IGB_DEFAULT_RX_HTHRESH,
2340a9643ea8Slogwang .wthresh = IGB_DEFAULT_RX_WTHRESH,
2341a9643ea8Slogwang },
2342a9643ea8Slogwang .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2343a9643ea8Slogwang .rx_drop_en = 0,
2344d30ea906Sjfb8856606 .offloads = 0,
2345a9643ea8Slogwang };
2346a9643ea8Slogwang
2347a9643ea8Slogwang dev_info->default_txconf = (struct rte_eth_txconf) {
2348a9643ea8Slogwang .tx_thresh = {
2349a9643ea8Slogwang .pthresh = IGB_DEFAULT_TX_PTHRESH,
2350a9643ea8Slogwang .hthresh = IGB_DEFAULT_TX_HTHRESH,
2351a9643ea8Slogwang .wthresh = IGB_DEFAULT_TX_WTHRESH,
2352a9643ea8Slogwang },
2353d30ea906Sjfb8856606 .offloads = 0,
2354a9643ea8Slogwang };
2355a9643ea8Slogwang
2356a9643ea8Slogwang dev_info->rx_desc_lim = rx_desc_lim;
2357a9643ea8Slogwang dev_info->tx_desc_lim = tx_desc_lim;
23584418919fSjohnjiang
23594418919fSjohnjiang return 0;
2360a9643ea8Slogwang }
2361a9643ea8Slogwang
2362a9643ea8Slogwang /* return 0 means link status changed, -1 means not changed */
2363a9643ea8Slogwang static int
eth_igb_link_update(struct rte_eth_dev * dev,int wait_to_complete)2364a9643ea8Slogwang eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2365a9643ea8Slogwang {
2366a9643ea8Slogwang struct e1000_hw *hw =
2367a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2368d30ea906Sjfb8856606 struct rte_eth_link link;
2369a9643ea8Slogwang int link_check, count;
2370a9643ea8Slogwang
2371a9643ea8Slogwang link_check = 0;
2372a9643ea8Slogwang hw->mac.get_link_status = 1;
2373a9643ea8Slogwang
2374a9643ea8Slogwang /* possible wait-to-complete in up to 9 seconds */
2375a9643ea8Slogwang for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
2376a9643ea8Slogwang /* Read the real link status */
2377a9643ea8Slogwang switch (hw->phy.media_type) {
2378a9643ea8Slogwang case e1000_media_type_copper:
2379a9643ea8Slogwang /* Do the work to read phy */
2380a9643ea8Slogwang e1000_check_for_link(hw);
2381a9643ea8Slogwang link_check = !hw->mac.get_link_status;
2382a9643ea8Slogwang break;
2383a9643ea8Slogwang
2384a9643ea8Slogwang case e1000_media_type_fiber:
2385a9643ea8Slogwang e1000_check_for_link(hw);
2386a9643ea8Slogwang link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2387a9643ea8Slogwang E1000_STATUS_LU);
2388a9643ea8Slogwang break;
2389a9643ea8Slogwang
2390a9643ea8Slogwang case e1000_media_type_internal_serdes:
2391a9643ea8Slogwang e1000_check_for_link(hw);
2392a9643ea8Slogwang link_check = hw->mac.serdes_has_link;
2393a9643ea8Slogwang break;
2394a9643ea8Slogwang
2395a9643ea8Slogwang /* VF device is type_unknown */
2396a9643ea8Slogwang case e1000_media_type_unknown:
2397a9643ea8Slogwang eth_igbvf_link_update(hw);
2398a9643ea8Slogwang link_check = !hw->mac.get_link_status;
2399a9643ea8Slogwang break;
2400a9643ea8Slogwang
2401a9643ea8Slogwang default:
2402a9643ea8Slogwang break;
2403a9643ea8Slogwang }
2404a9643ea8Slogwang if (link_check || wait_to_complete == 0)
2405a9643ea8Slogwang break;
2406a9643ea8Slogwang rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2407a9643ea8Slogwang }
2408a9643ea8Slogwang memset(&link, 0, sizeof(link));
2409a9643ea8Slogwang
2410a9643ea8Slogwang /* Now we check if a transition has happened */
2411a9643ea8Slogwang if (link_check) {
2412a9643ea8Slogwang uint16_t duplex, speed;
2413a9643ea8Slogwang hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
2414a9643ea8Slogwang link.link_duplex = (duplex == FULL_DUPLEX) ?
2415a9643ea8Slogwang ETH_LINK_FULL_DUPLEX :
2416a9643ea8Slogwang ETH_LINK_HALF_DUPLEX;
2417a9643ea8Slogwang link.link_speed = speed;
2418a9643ea8Slogwang link.link_status = ETH_LINK_UP;
2419a9643ea8Slogwang link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2420a9643ea8Slogwang ETH_LINK_SPEED_FIXED);
2421a9643ea8Slogwang } else if (!link_check) {
2422a9643ea8Slogwang link.link_speed = 0;
2423a9643ea8Slogwang link.link_duplex = ETH_LINK_HALF_DUPLEX;
2424a9643ea8Slogwang link.link_status = ETH_LINK_DOWN;
24252bfe3f2eSlogwang link.link_autoneg = ETH_LINK_FIXED;
2426a9643ea8Slogwang }
2427a9643ea8Slogwang
2428d30ea906Sjfb8856606 return rte_eth_linkstatus_set(dev, &link);
2429a9643ea8Slogwang }
2430a9643ea8Slogwang
2431a9643ea8Slogwang /*
2432a9643ea8Slogwang * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2433a9643ea8Slogwang * For ASF and Pass Through versions of f/w this means
2434a9643ea8Slogwang * that the driver is loaded.
2435a9643ea8Slogwang */
2436a9643ea8Slogwang static void
igb_hw_control_acquire(struct e1000_hw * hw)2437a9643ea8Slogwang igb_hw_control_acquire(struct e1000_hw *hw)
2438a9643ea8Slogwang {
2439a9643ea8Slogwang uint32_t ctrl_ext;
2440a9643ea8Slogwang
2441a9643ea8Slogwang /* Let firmware know the driver has taken over */
2442a9643ea8Slogwang ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2443a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2444a9643ea8Slogwang }
2445a9643ea8Slogwang
2446a9643ea8Slogwang /*
2447a9643ea8Slogwang * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2448a9643ea8Slogwang * For ASF and Pass Through versions of f/w this means that the
2449a9643ea8Slogwang * driver is no longer loaded.
2450a9643ea8Slogwang */
2451a9643ea8Slogwang static void
igb_hw_control_release(struct e1000_hw * hw)2452a9643ea8Slogwang igb_hw_control_release(struct e1000_hw *hw)
2453a9643ea8Slogwang {
2454a9643ea8Slogwang uint32_t ctrl_ext;
2455a9643ea8Slogwang
2456a9643ea8Slogwang /* Let firmware taken over control of h/w */
2457a9643ea8Slogwang ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2458a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2459a9643ea8Slogwang ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2460a9643ea8Slogwang }
2461a9643ea8Slogwang
2462a9643ea8Slogwang /*
2463a9643ea8Slogwang * Bit of a misnomer, what this really means is
2464a9643ea8Slogwang * to enable OS management of the system... aka
2465a9643ea8Slogwang * to disable special hardware management features.
2466a9643ea8Slogwang */
2467a9643ea8Slogwang static void
igb_init_manageability(struct e1000_hw * hw)2468a9643ea8Slogwang igb_init_manageability(struct e1000_hw *hw)
2469a9643ea8Slogwang {
2470a9643ea8Slogwang if (e1000_enable_mng_pass_thru(hw)) {
2471a9643ea8Slogwang uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2472a9643ea8Slogwang uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2473a9643ea8Slogwang
2474a9643ea8Slogwang /* disable hardware interception of ARP */
2475a9643ea8Slogwang manc &= ~(E1000_MANC_ARP_EN);
2476a9643ea8Slogwang
2477a9643ea8Slogwang /* enable receiving management packets to the host */
2478a9643ea8Slogwang manc |= E1000_MANC_EN_MNG2HOST;
2479a9643ea8Slogwang manc2h |= 1 << 5; /* Mng Port 623 */
2480a9643ea8Slogwang manc2h |= 1 << 6; /* Mng Port 664 */
2481a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2482a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_MANC, manc);
2483a9643ea8Slogwang }
2484a9643ea8Slogwang }
2485a9643ea8Slogwang
2486a9643ea8Slogwang static void
igb_release_manageability(struct e1000_hw * hw)2487a9643ea8Slogwang igb_release_manageability(struct e1000_hw *hw)
2488a9643ea8Slogwang {
2489a9643ea8Slogwang if (e1000_enable_mng_pass_thru(hw)) {
2490a9643ea8Slogwang uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2491a9643ea8Slogwang
2492a9643ea8Slogwang manc |= E1000_MANC_ARP_EN;
2493a9643ea8Slogwang manc &= ~E1000_MANC_EN_MNG2HOST;
2494a9643ea8Slogwang
2495a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_MANC, manc);
2496a9643ea8Slogwang }
2497a9643ea8Slogwang }
2498a9643ea8Slogwang
24994418919fSjohnjiang static int
eth_igb_promiscuous_enable(struct rte_eth_dev * dev)2500a9643ea8Slogwang eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2501a9643ea8Slogwang {
2502a9643ea8Slogwang struct e1000_hw *hw =
2503a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2504a9643ea8Slogwang uint32_t rctl;
2505a9643ea8Slogwang
2506a9643ea8Slogwang rctl = E1000_READ_REG(hw, E1000_RCTL);
2507a9643ea8Slogwang rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2508a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
25094418919fSjohnjiang
25104418919fSjohnjiang return 0;
2511a9643ea8Slogwang }
2512a9643ea8Slogwang
25134418919fSjohnjiang static int
eth_igb_promiscuous_disable(struct rte_eth_dev * dev)2514a9643ea8Slogwang eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2515a9643ea8Slogwang {
2516a9643ea8Slogwang struct e1000_hw *hw =
2517a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2518a9643ea8Slogwang uint32_t rctl;
2519a9643ea8Slogwang
2520a9643ea8Slogwang rctl = E1000_READ_REG(hw, E1000_RCTL);
2521a9643ea8Slogwang rctl &= (~E1000_RCTL_UPE);
2522a9643ea8Slogwang if (dev->data->all_multicast == 1)
2523a9643ea8Slogwang rctl |= E1000_RCTL_MPE;
2524a9643ea8Slogwang else
2525a9643ea8Slogwang rctl &= (~E1000_RCTL_MPE);
2526a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
25274418919fSjohnjiang
25284418919fSjohnjiang return 0;
2529a9643ea8Slogwang }
2530a9643ea8Slogwang
25314418919fSjohnjiang static int
eth_igb_allmulticast_enable(struct rte_eth_dev * dev)2532a9643ea8Slogwang eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2533a9643ea8Slogwang {
2534a9643ea8Slogwang struct e1000_hw *hw =
2535a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2536a9643ea8Slogwang uint32_t rctl;
2537a9643ea8Slogwang
2538a9643ea8Slogwang rctl = E1000_READ_REG(hw, E1000_RCTL);
2539a9643ea8Slogwang rctl |= E1000_RCTL_MPE;
2540a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
25414418919fSjohnjiang
25424418919fSjohnjiang return 0;
2543a9643ea8Slogwang }
2544a9643ea8Slogwang
25454418919fSjohnjiang static int
eth_igb_allmulticast_disable(struct rte_eth_dev * dev)2546a9643ea8Slogwang eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2547a9643ea8Slogwang {
2548a9643ea8Slogwang struct e1000_hw *hw =
2549a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2550a9643ea8Slogwang uint32_t rctl;
2551a9643ea8Slogwang
2552a9643ea8Slogwang if (dev->data->promiscuous == 1)
25534418919fSjohnjiang return 0; /* must remain in all_multicast mode */
2554a9643ea8Slogwang rctl = E1000_READ_REG(hw, E1000_RCTL);
2555a9643ea8Slogwang rctl &= (~E1000_RCTL_MPE);
2556a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
25574418919fSjohnjiang
25584418919fSjohnjiang return 0;
2559a9643ea8Slogwang }
2560a9643ea8Slogwang
2561a9643ea8Slogwang static int
eth_igb_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)2562a9643ea8Slogwang eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2563a9643ea8Slogwang {
2564a9643ea8Slogwang struct e1000_hw *hw =
2565a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2566a9643ea8Slogwang struct e1000_vfta * shadow_vfta =
2567a9643ea8Slogwang E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2568a9643ea8Slogwang uint32_t vfta;
2569a9643ea8Slogwang uint32_t vid_idx;
2570a9643ea8Slogwang uint32_t vid_bit;
2571a9643ea8Slogwang
2572a9643ea8Slogwang vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2573a9643ea8Slogwang E1000_VFTA_ENTRY_MASK);
2574a9643ea8Slogwang vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2575a9643ea8Slogwang vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2576a9643ea8Slogwang if (on)
2577a9643ea8Slogwang vfta |= vid_bit;
2578a9643ea8Slogwang else
2579a9643ea8Slogwang vfta &= ~vid_bit;
2580a9643ea8Slogwang E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2581a9643ea8Slogwang
2582a9643ea8Slogwang /* update local VFTA copy */
2583a9643ea8Slogwang shadow_vfta->vfta[vid_idx] = vfta;
2584a9643ea8Slogwang
2585a9643ea8Slogwang return 0;
2586a9643ea8Slogwang }
2587a9643ea8Slogwang
2588a9643ea8Slogwang static int
eth_igb_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)2589a9643ea8Slogwang eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2590a9643ea8Slogwang enum rte_vlan_type vlan_type,
2591a9643ea8Slogwang uint16_t tpid)
2592a9643ea8Slogwang {
2593a9643ea8Slogwang struct e1000_hw *hw =
2594a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2595a9643ea8Slogwang uint32_t reg, qinq;
2596a9643ea8Slogwang
2597a9643ea8Slogwang qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
2598a9643ea8Slogwang qinq &= E1000_CTRL_EXT_EXT_VLAN;
2599a9643ea8Slogwang
2600a9643ea8Slogwang /* only outer TPID of double VLAN can be configured*/
2601a9643ea8Slogwang if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
2602a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_VET);
2603a9643ea8Slogwang reg = (reg & (~E1000_VET_VET_EXT)) |
2604a9643ea8Slogwang ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
2605a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_VET, reg);
2606a9643ea8Slogwang
2607a9643ea8Slogwang return 0;
2608a9643ea8Slogwang }
2609a9643ea8Slogwang
2610a9643ea8Slogwang /* all other TPID values are read-only*/
2611a9643ea8Slogwang PMD_DRV_LOG(ERR, "Not supported");
2612a9643ea8Slogwang
2613a9643ea8Slogwang return -ENOTSUP;
2614a9643ea8Slogwang }
2615a9643ea8Slogwang
2616a9643ea8Slogwang static void
igb_vlan_hw_filter_disable(struct rte_eth_dev * dev)2617a9643ea8Slogwang igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2618a9643ea8Slogwang {
2619a9643ea8Slogwang struct e1000_hw *hw =
2620a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2621a9643ea8Slogwang uint32_t reg;
2622a9643ea8Slogwang
2623a9643ea8Slogwang /* Filter Table Disable */
2624a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_RCTL);
2625a9643ea8Slogwang reg &= ~E1000_RCTL_CFIEN;
2626a9643ea8Slogwang reg &= ~E1000_RCTL_VFE;
2627a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, reg);
2628a9643ea8Slogwang }
2629a9643ea8Slogwang
2630a9643ea8Slogwang static void
igb_vlan_hw_filter_enable(struct rte_eth_dev * dev)2631a9643ea8Slogwang igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2632a9643ea8Slogwang {
2633a9643ea8Slogwang struct e1000_hw *hw =
2634a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2635a9643ea8Slogwang struct e1000_vfta * shadow_vfta =
2636a9643ea8Slogwang E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2637a9643ea8Slogwang uint32_t reg;
2638a9643ea8Slogwang int i;
2639a9643ea8Slogwang
2640a9643ea8Slogwang /* Filter Table Enable, CFI not used for packet acceptance */
2641a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_RCTL);
2642a9643ea8Slogwang reg &= ~E1000_RCTL_CFIEN;
2643a9643ea8Slogwang reg |= E1000_RCTL_VFE;
2644a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, reg);
2645a9643ea8Slogwang
2646a9643ea8Slogwang /* restore VFTA table */
2647a9643ea8Slogwang for (i = 0; i < IGB_VFTA_SIZE; i++)
2648a9643ea8Slogwang E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2649a9643ea8Slogwang }
2650a9643ea8Slogwang
2651a9643ea8Slogwang static void
igb_vlan_hw_strip_disable(struct rte_eth_dev * dev)2652a9643ea8Slogwang igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2653a9643ea8Slogwang {
2654a9643ea8Slogwang struct e1000_hw *hw =
2655a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2656a9643ea8Slogwang uint32_t reg;
2657a9643ea8Slogwang
2658a9643ea8Slogwang /* VLAN Mode Disable */
2659a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_CTRL);
2660a9643ea8Slogwang reg &= ~E1000_CTRL_VME;
2661a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL, reg);
2662a9643ea8Slogwang }
2663a9643ea8Slogwang
2664a9643ea8Slogwang static void
igb_vlan_hw_strip_enable(struct rte_eth_dev * dev)2665a9643ea8Slogwang igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2666a9643ea8Slogwang {
2667a9643ea8Slogwang struct e1000_hw *hw =
2668a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2669a9643ea8Slogwang uint32_t reg;
2670a9643ea8Slogwang
2671a9643ea8Slogwang /* VLAN Mode Enable */
2672a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_CTRL);
2673a9643ea8Slogwang reg |= E1000_CTRL_VME;
2674a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL, reg);
2675a9643ea8Slogwang }
2676a9643ea8Slogwang
2677a9643ea8Slogwang static void
igb_vlan_hw_extend_disable(struct rte_eth_dev * dev)2678a9643ea8Slogwang igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2679a9643ea8Slogwang {
2680a9643ea8Slogwang struct e1000_hw *hw =
2681a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2682a9643ea8Slogwang uint32_t reg;
2683a9643ea8Slogwang
2684a9643ea8Slogwang /* CTRL_EXT: Extended VLAN */
2685a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2686a9643ea8Slogwang reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2687a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2688a9643ea8Slogwang
2689a9643ea8Slogwang /* Update maximum packet length */
2690d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
2691a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RLPML,
2692a9643ea8Slogwang dev->data->dev_conf.rxmode.max_rx_pkt_len +
2693a9643ea8Slogwang VLAN_TAG_SIZE);
2694a9643ea8Slogwang }
2695a9643ea8Slogwang
2696a9643ea8Slogwang static void
igb_vlan_hw_extend_enable(struct rte_eth_dev * dev)2697a9643ea8Slogwang igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2698a9643ea8Slogwang {
2699a9643ea8Slogwang struct e1000_hw *hw =
2700a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2701a9643ea8Slogwang uint32_t reg;
2702a9643ea8Slogwang
2703a9643ea8Slogwang /* CTRL_EXT: Extended VLAN */
2704a9643ea8Slogwang reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2705a9643ea8Slogwang reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2706a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2707a9643ea8Slogwang
2708a9643ea8Slogwang /* Update maximum packet length */
2709d30ea906Sjfb8856606 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
2710a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RLPML,
2711a9643ea8Slogwang dev->data->dev_conf.rxmode.max_rx_pkt_len +
2712a9643ea8Slogwang 2 * VLAN_TAG_SIZE);
2713a9643ea8Slogwang }
2714a9643ea8Slogwang
27152bfe3f2eSlogwang static int
eth_igb_vlan_offload_set(struct rte_eth_dev * dev,int mask)2716a9643ea8Slogwang eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2717a9643ea8Slogwang {
2718d30ea906Sjfb8856606 struct rte_eth_rxmode *rxmode;
2719d30ea906Sjfb8856606
2720d30ea906Sjfb8856606 rxmode = &dev->data->dev_conf.rxmode;
2721a9643ea8Slogwang if(mask & ETH_VLAN_STRIP_MASK){
2722d30ea906Sjfb8856606 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2723a9643ea8Slogwang igb_vlan_hw_strip_enable(dev);
2724a9643ea8Slogwang else
2725a9643ea8Slogwang igb_vlan_hw_strip_disable(dev);
2726a9643ea8Slogwang }
2727a9643ea8Slogwang
2728a9643ea8Slogwang if(mask & ETH_VLAN_FILTER_MASK){
2729d30ea906Sjfb8856606 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2730a9643ea8Slogwang igb_vlan_hw_filter_enable(dev);
2731a9643ea8Slogwang else
2732a9643ea8Slogwang igb_vlan_hw_filter_disable(dev);
2733a9643ea8Slogwang }
2734a9643ea8Slogwang
2735a9643ea8Slogwang if(mask & ETH_VLAN_EXTEND_MASK){
2736d30ea906Sjfb8856606 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2737a9643ea8Slogwang igb_vlan_hw_extend_enable(dev);
2738a9643ea8Slogwang else
2739a9643ea8Slogwang igb_vlan_hw_extend_disable(dev);
2740a9643ea8Slogwang }
27412bfe3f2eSlogwang
27422bfe3f2eSlogwang return 0;
2743a9643ea8Slogwang }
2744a9643ea8Slogwang
2745a9643ea8Slogwang
2746a9643ea8Slogwang /**
2747a9643ea8Slogwang * It enables the interrupt mask and then enable the interrupt.
2748a9643ea8Slogwang *
2749a9643ea8Slogwang * @param dev
2750a9643ea8Slogwang * Pointer to struct rte_eth_dev.
27512bfe3f2eSlogwang * @param on
27522bfe3f2eSlogwang * Enable or Disable
2753a9643ea8Slogwang *
2754a9643ea8Slogwang * @return
2755a9643ea8Slogwang * - On success, zero.
2756a9643ea8Slogwang * - On failure, a negative value.
2757a9643ea8Slogwang */
2758a9643ea8Slogwang static int
eth_igb_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on)27592bfe3f2eSlogwang eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2760a9643ea8Slogwang {
2761a9643ea8Slogwang struct e1000_interrupt *intr =
2762a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2763a9643ea8Slogwang
27642bfe3f2eSlogwang if (on)
2765a9643ea8Slogwang intr->mask |= E1000_ICR_LSC;
27662bfe3f2eSlogwang else
27672bfe3f2eSlogwang intr->mask &= ~E1000_ICR_LSC;
2768a9643ea8Slogwang
2769a9643ea8Slogwang return 0;
2770a9643ea8Slogwang }
2771a9643ea8Slogwang
2772a9643ea8Slogwang /* It clears the interrupt causes and enables the interrupt.
2773a9643ea8Slogwang * It will be called once only during nic initialized.
2774a9643ea8Slogwang *
2775a9643ea8Slogwang * @param dev
2776a9643ea8Slogwang * Pointer to struct rte_eth_dev.
2777a9643ea8Slogwang *
2778a9643ea8Slogwang * @return
2779a9643ea8Slogwang * - On success, zero.
2780a9643ea8Slogwang * - On failure, a negative value.
2781a9643ea8Slogwang */
eth_igb_rxq_interrupt_setup(struct rte_eth_dev * dev)2782a9643ea8Slogwang static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2783a9643ea8Slogwang {
2784a9643ea8Slogwang uint32_t mask, regval;
27854418919fSjohnjiang int ret;
2786a9643ea8Slogwang struct e1000_hw *hw =
2787a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
27881646932aSjfb8856606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
27891646932aSjfb8856606 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
27901646932aSjfb8856606 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
2791a9643ea8Slogwang struct rte_eth_dev_info dev_info;
2792a9643ea8Slogwang
2793a9643ea8Slogwang memset(&dev_info, 0, sizeof(dev_info));
27944418919fSjohnjiang ret = eth_igb_infos_get(dev, &dev_info);
27954418919fSjohnjiang if (ret != 0)
27964418919fSjohnjiang return ret;
2797a9643ea8Slogwang
27981646932aSjfb8856606 mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift;
2799a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIMS);
2800a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2801a9643ea8Slogwang
2802a9643ea8Slogwang return 0;
2803a9643ea8Slogwang }
2804a9643ea8Slogwang
2805a9643ea8Slogwang /*
2806a9643ea8Slogwang * It reads ICR and gets interrupt causes, check it and set a bit flag
2807a9643ea8Slogwang * to update link status.
2808a9643ea8Slogwang *
2809a9643ea8Slogwang * @param dev
2810a9643ea8Slogwang * Pointer to struct rte_eth_dev.
2811a9643ea8Slogwang *
2812a9643ea8Slogwang * @return
2813a9643ea8Slogwang * - On success, zero.
2814a9643ea8Slogwang * - On failure, a negative value.
2815a9643ea8Slogwang */
2816a9643ea8Slogwang static int
eth_igb_interrupt_get_status(struct rte_eth_dev * dev)2817a9643ea8Slogwang eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2818a9643ea8Slogwang {
2819a9643ea8Slogwang uint32_t icr;
2820a9643ea8Slogwang struct e1000_hw *hw =
2821a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2822a9643ea8Slogwang struct e1000_interrupt *intr =
2823a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2824a9643ea8Slogwang
28251646932aSjfb8856606 igb_intr_disable(dev);
2826a9643ea8Slogwang
2827a9643ea8Slogwang /* read-on-clear nic registers here */
2828a9643ea8Slogwang icr = E1000_READ_REG(hw, E1000_ICR);
2829a9643ea8Slogwang
2830a9643ea8Slogwang intr->flags = 0;
2831a9643ea8Slogwang if (icr & E1000_ICR_LSC) {
2832a9643ea8Slogwang intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2833a9643ea8Slogwang }
2834a9643ea8Slogwang
2835a9643ea8Slogwang if (icr & E1000_ICR_VMMB)
2836a9643ea8Slogwang intr->flags |= E1000_FLAG_MAILBOX;
2837a9643ea8Slogwang
2838a9643ea8Slogwang return 0;
2839a9643ea8Slogwang }
2840a9643ea8Slogwang
2841a9643ea8Slogwang /*
2842a9643ea8Slogwang * It executes link_update after knowing an interrupt is prsent.
2843a9643ea8Slogwang *
2844a9643ea8Slogwang * @param dev
2845a9643ea8Slogwang * Pointer to struct rte_eth_dev.
2846a9643ea8Slogwang *
2847a9643ea8Slogwang * @return
2848a9643ea8Slogwang * - On success, zero.
2849a9643ea8Slogwang * - On failure, a negative value.
2850a9643ea8Slogwang */
2851a9643ea8Slogwang static int
eth_igb_interrupt_action(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)28522bfe3f2eSlogwang eth_igb_interrupt_action(struct rte_eth_dev *dev,
28532bfe3f2eSlogwang struct rte_intr_handle *intr_handle)
2854a9643ea8Slogwang {
2855a9643ea8Slogwang struct e1000_hw *hw =
2856a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2857a9643ea8Slogwang struct e1000_interrupt *intr =
2858a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
28592bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2860a9643ea8Slogwang struct rte_eth_link link;
2861a9643ea8Slogwang int ret;
2862a9643ea8Slogwang
2863a9643ea8Slogwang if (intr->flags & E1000_FLAG_MAILBOX) {
2864a9643ea8Slogwang igb_pf_mbx_process(dev);
2865a9643ea8Slogwang intr->flags &= ~E1000_FLAG_MAILBOX;
2866a9643ea8Slogwang }
2867a9643ea8Slogwang
2868a9643ea8Slogwang igb_intr_enable(dev);
28694418919fSjohnjiang rte_intr_ack(intr_handle);
2870a9643ea8Slogwang
2871a9643ea8Slogwang if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2872a9643ea8Slogwang intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2873a9643ea8Slogwang
2874a9643ea8Slogwang /* set get_link_status to check register later */
2875a9643ea8Slogwang hw->mac.get_link_status = 1;
2876a9643ea8Slogwang ret = eth_igb_link_update(dev, 0);
2877a9643ea8Slogwang
2878a9643ea8Slogwang /* check if link has changed */
2879a9643ea8Slogwang if (ret < 0)
2880a9643ea8Slogwang return 0;
2881a9643ea8Slogwang
2882d30ea906Sjfb8856606 rte_eth_linkstatus_get(dev, &link);
2883a9643ea8Slogwang if (link.link_status) {
2884a9643ea8Slogwang PMD_INIT_LOG(INFO,
2885a9643ea8Slogwang " Port %d: Link Up - speed %u Mbps - %s",
2886a9643ea8Slogwang dev->data->port_id,
2887a9643ea8Slogwang (unsigned)link.link_speed,
2888a9643ea8Slogwang link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2889a9643ea8Slogwang "full-duplex" : "half-duplex");
2890a9643ea8Slogwang } else {
2891a9643ea8Slogwang PMD_INIT_LOG(INFO, " Port %d: Link Down",
2892a9643ea8Slogwang dev->data->port_id);
2893a9643ea8Slogwang }
2894a9643ea8Slogwang
2895*2d9fd380Sjfb8856606 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
28962bfe3f2eSlogwang pci_dev->addr.domain,
28972bfe3f2eSlogwang pci_dev->addr.bus,
28982bfe3f2eSlogwang pci_dev->addr.devid,
28992bfe3f2eSlogwang pci_dev->addr.function);
2900*2d9fd380Sjfb8856606 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2901a9643ea8Slogwang }
2902a9643ea8Slogwang
2903a9643ea8Slogwang return 0;
2904a9643ea8Slogwang }
2905a9643ea8Slogwang
2906a9643ea8Slogwang /**
2907a9643ea8Slogwang * Interrupt handler which shall be registered at first.
2908a9643ea8Slogwang *
2909a9643ea8Slogwang * @param handle
2910a9643ea8Slogwang * Pointer to interrupt handle.
2911a9643ea8Slogwang * @param param
2912a9643ea8Slogwang * The address of parameter (struct rte_eth_dev *) regsitered before.
2913a9643ea8Slogwang *
2914a9643ea8Slogwang * @return
2915a9643ea8Slogwang * void
2916a9643ea8Slogwang */
2917a9643ea8Slogwang static void
eth_igb_interrupt_handler(void * param)29182bfe3f2eSlogwang eth_igb_interrupt_handler(void *param)
2919a9643ea8Slogwang {
2920a9643ea8Slogwang struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2921a9643ea8Slogwang
2922a9643ea8Slogwang eth_igb_interrupt_get_status(dev);
29232bfe3f2eSlogwang eth_igb_interrupt_action(dev, dev->intr_handle);
2924a9643ea8Slogwang }
2925a9643ea8Slogwang
2926a9643ea8Slogwang static int
eth_igbvf_interrupt_get_status(struct rte_eth_dev * dev)2927a9643ea8Slogwang eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
2928a9643ea8Slogwang {
2929a9643ea8Slogwang uint32_t eicr;
2930a9643ea8Slogwang struct e1000_hw *hw =
2931a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2932a9643ea8Slogwang struct e1000_interrupt *intr =
2933a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2934a9643ea8Slogwang
2935a9643ea8Slogwang igbvf_intr_disable(hw);
2936a9643ea8Slogwang
2937a9643ea8Slogwang /* read-on-clear nic registers here */
2938a9643ea8Slogwang eicr = E1000_READ_REG(hw, E1000_EICR);
2939a9643ea8Slogwang intr->flags = 0;
2940a9643ea8Slogwang
2941a9643ea8Slogwang if (eicr == E1000_VTIVAR_MISC_MAILBOX)
2942a9643ea8Slogwang intr->flags |= E1000_FLAG_MAILBOX;
2943a9643ea8Slogwang
2944a9643ea8Slogwang return 0;
2945a9643ea8Slogwang }
2946a9643ea8Slogwang
igbvf_mbx_process(struct rte_eth_dev * dev)2947a9643ea8Slogwang void igbvf_mbx_process(struct rte_eth_dev *dev)
2948a9643ea8Slogwang {
2949a9643ea8Slogwang struct e1000_hw *hw =
2950a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2951a9643ea8Slogwang struct e1000_mbx_info *mbx = &hw->mbx;
2952a9643ea8Slogwang u32 in_msg = 0;
2953a9643ea8Slogwang
2954d30ea906Sjfb8856606 /* peek the message first */
2955d30ea906Sjfb8856606 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
29565af785ecSfengbojiang(姜凤波)
29575af785ecSfengbojiang(姜凤波) /* PF reset VF event */
2958d30ea906Sjfb8856606 if (in_msg == E1000_PF_CONTROL_MSG) {
2959d30ea906Sjfb8856606 /* dummy mbx read to ack pf */
2960d30ea906Sjfb8856606 if (mbx->ops.read(hw, &in_msg, 1, 0))
2961d30ea906Sjfb8856606 return;
2962*2d9fd380Sjfb8856606 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
2963d30ea906Sjfb8856606 NULL);
2964d30ea906Sjfb8856606 }
2965a9643ea8Slogwang }
2966a9643ea8Slogwang
2967a9643ea8Slogwang static int
eth_igbvf_interrupt_action(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)29682bfe3f2eSlogwang eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
2969a9643ea8Slogwang {
2970a9643ea8Slogwang struct e1000_interrupt *intr =
2971a9643ea8Slogwang E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2972a9643ea8Slogwang
2973a9643ea8Slogwang if (intr->flags & E1000_FLAG_MAILBOX) {
2974a9643ea8Slogwang igbvf_mbx_process(dev);
2975a9643ea8Slogwang intr->flags &= ~E1000_FLAG_MAILBOX;
2976a9643ea8Slogwang }
2977a9643ea8Slogwang
2978a9643ea8Slogwang igbvf_intr_enable(dev);
29794418919fSjohnjiang rte_intr_ack(intr_handle);
2980a9643ea8Slogwang
2981a9643ea8Slogwang return 0;
2982a9643ea8Slogwang }
2983a9643ea8Slogwang
2984a9643ea8Slogwang static void
eth_igbvf_interrupt_handler(void * param)29852bfe3f2eSlogwang eth_igbvf_interrupt_handler(void *param)
2986a9643ea8Slogwang {
2987a9643ea8Slogwang struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2988a9643ea8Slogwang
2989a9643ea8Slogwang eth_igbvf_interrupt_get_status(dev);
29902bfe3f2eSlogwang eth_igbvf_interrupt_action(dev, dev->intr_handle);
2991a9643ea8Slogwang }
2992a9643ea8Slogwang
2993a9643ea8Slogwang static int
eth_igb_led_on(struct rte_eth_dev * dev)2994a9643ea8Slogwang eth_igb_led_on(struct rte_eth_dev *dev)
2995a9643ea8Slogwang {
2996a9643ea8Slogwang struct e1000_hw *hw;
2997a9643ea8Slogwang
2998a9643ea8Slogwang hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2999a9643ea8Slogwang return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
3000a9643ea8Slogwang }
3001a9643ea8Slogwang
3002a9643ea8Slogwang static int
eth_igb_led_off(struct rte_eth_dev * dev)3003a9643ea8Slogwang eth_igb_led_off(struct rte_eth_dev *dev)
3004a9643ea8Slogwang {
3005a9643ea8Slogwang struct e1000_hw *hw;
3006a9643ea8Slogwang
3007a9643ea8Slogwang hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3008a9643ea8Slogwang return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
3009a9643ea8Slogwang }
3010a9643ea8Slogwang
3011a9643ea8Slogwang static int
eth_igb_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)3012a9643ea8Slogwang eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3013a9643ea8Slogwang {
3014a9643ea8Slogwang struct e1000_hw *hw;
3015a9643ea8Slogwang uint32_t ctrl;
3016a9643ea8Slogwang int tx_pause;
3017a9643ea8Slogwang int rx_pause;
3018a9643ea8Slogwang
3019a9643ea8Slogwang hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3020a9643ea8Slogwang fc_conf->pause_time = hw->fc.pause_time;
3021a9643ea8Slogwang fc_conf->high_water = hw->fc.high_water;
3022a9643ea8Slogwang fc_conf->low_water = hw->fc.low_water;
3023a9643ea8Slogwang fc_conf->send_xon = hw->fc.send_xon;
3024a9643ea8Slogwang fc_conf->autoneg = hw->mac.autoneg;
3025a9643ea8Slogwang
3026a9643ea8Slogwang /*
3027a9643ea8Slogwang * Return rx_pause and tx_pause status according to actual setting of
3028a9643ea8Slogwang * the TFCE and RFCE bits in the CTRL register.
3029a9643ea8Slogwang */
3030a9643ea8Slogwang ctrl = E1000_READ_REG(hw, E1000_CTRL);
3031a9643ea8Slogwang if (ctrl & E1000_CTRL_TFCE)
3032a9643ea8Slogwang tx_pause = 1;
3033a9643ea8Slogwang else
3034a9643ea8Slogwang tx_pause = 0;
3035a9643ea8Slogwang
3036a9643ea8Slogwang if (ctrl & E1000_CTRL_RFCE)
3037a9643ea8Slogwang rx_pause = 1;
3038a9643ea8Slogwang else
3039a9643ea8Slogwang rx_pause = 0;
3040a9643ea8Slogwang
3041a9643ea8Slogwang if (rx_pause && tx_pause)
3042a9643ea8Slogwang fc_conf->mode = RTE_FC_FULL;
3043a9643ea8Slogwang else if (rx_pause)
3044a9643ea8Slogwang fc_conf->mode = RTE_FC_RX_PAUSE;
3045a9643ea8Slogwang else if (tx_pause)
3046a9643ea8Slogwang fc_conf->mode = RTE_FC_TX_PAUSE;
3047a9643ea8Slogwang else
3048a9643ea8Slogwang fc_conf->mode = RTE_FC_NONE;
3049a9643ea8Slogwang
3050a9643ea8Slogwang return 0;
3051a9643ea8Slogwang }
3052a9643ea8Slogwang
3053a9643ea8Slogwang static int
eth_igb_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)3054a9643ea8Slogwang eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3055a9643ea8Slogwang {
3056a9643ea8Slogwang struct e1000_hw *hw;
3057a9643ea8Slogwang int err;
3058a9643ea8Slogwang enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
3059a9643ea8Slogwang e1000_fc_none,
3060a9643ea8Slogwang e1000_fc_rx_pause,
3061a9643ea8Slogwang e1000_fc_tx_pause,
3062a9643ea8Slogwang e1000_fc_full
3063a9643ea8Slogwang };
3064a9643ea8Slogwang uint32_t rx_buf_size;
3065a9643ea8Slogwang uint32_t max_high_water;
3066a9643ea8Slogwang uint32_t rctl;
3067a9643ea8Slogwang
3068a9643ea8Slogwang hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3069a9643ea8Slogwang if (fc_conf->autoneg != hw->mac.autoneg)
3070a9643ea8Slogwang return -ENOTSUP;
3071a9643ea8Slogwang rx_buf_size = igb_get_rx_buffer_size(hw);
3072a9643ea8Slogwang PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3073a9643ea8Slogwang
3074a9643ea8Slogwang /* At least reserve one Ethernet frame for watermark */
30754418919fSjohnjiang max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
3076a9643ea8Slogwang if ((fc_conf->high_water > max_high_water) ||
3077a9643ea8Slogwang (fc_conf->high_water < fc_conf->low_water)) {
3078a9643ea8Slogwang PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
3079a9643ea8Slogwang PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
3080a9643ea8Slogwang return -EINVAL;
3081a9643ea8Slogwang }
3082a9643ea8Slogwang
3083a9643ea8Slogwang hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
3084a9643ea8Slogwang hw->fc.pause_time = fc_conf->pause_time;
3085a9643ea8Slogwang hw->fc.high_water = fc_conf->high_water;
3086a9643ea8Slogwang hw->fc.low_water = fc_conf->low_water;
3087a9643ea8Slogwang hw->fc.send_xon = fc_conf->send_xon;
3088a9643ea8Slogwang
3089a9643ea8Slogwang err = e1000_setup_link_generic(hw);
3090a9643ea8Slogwang if (err == E1000_SUCCESS) {
3091a9643ea8Slogwang
3092a9643ea8Slogwang /* check if we want to forward MAC frames - driver doesn't have native
3093a9643ea8Slogwang * capability to do that, so we'll write the registers ourselves */
3094a9643ea8Slogwang
3095a9643ea8Slogwang rctl = E1000_READ_REG(hw, E1000_RCTL);
3096a9643ea8Slogwang
3097a9643ea8Slogwang /* set or clear MFLCN.PMCF bit depending on configuration */
3098a9643ea8Slogwang if (fc_conf->mac_ctrl_frame_fwd != 0)
3099a9643ea8Slogwang rctl |= E1000_RCTL_PMCF;
3100a9643ea8Slogwang else
3101a9643ea8Slogwang rctl &= ~E1000_RCTL_PMCF;
3102a9643ea8Slogwang
3103a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3104a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
3105a9643ea8Slogwang
3106a9643ea8Slogwang return 0;
3107a9643ea8Slogwang }
3108a9643ea8Slogwang
3109a9643ea8Slogwang PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
3110a9643ea8Slogwang return -EIO;
3111a9643ea8Slogwang }
3112a9643ea8Slogwang
3113a9643ea8Slogwang #define E1000_RAH_POOLSEL_SHIFT (18)
31142bfe3f2eSlogwang static int
eth_igb_rar_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool)31154418919fSjohnjiang eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
31162bfe3f2eSlogwang uint32_t index, uint32_t pool)
3117a9643ea8Slogwang {
3118a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3119a9643ea8Slogwang uint32_t rah;
3120a9643ea8Slogwang
3121a9643ea8Slogwang e1000_rar_set(hw, mac_addr->addr_bytes, index);
3122a9643ea8Slogwang rah = E1000_READ_REG(hw, E1000_RAH(index));
3123a9643ea8Slogwang rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
3124a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RAH(index), rah);
31252bfe3f2eSlogwang return 0;
3126a9643ea8Slogwang }
3127a9643ea8Slogwang
3128a9643ea8Slogwang static void
eth_igb_rar_clear(struct rte_eth_dev * dev,uint32_t index)3129a9643ea8Slogwang eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
3130a9643ea8Slogwang {
31314418919fSjohnjiang uint8_t addr[RTE_ETHER_ADDR_LEN];
3132a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3133a9643ea8Slogwang
3134a9643ea8Slogwang memset(addr, 0, sizeof(addr));
3135a9643ea8Slogwang
3136a9643ea8Slogwang e1000_rar_set(hw, addr, index);
3137a9643ea8Slogwang }
3138a9643ea8Slogwang
3139d30ea906Sjfb8856606 static int
eth_igb_default_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * addr)3140a9643ea8Slogwang eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
31414418919fSjohnjiang struct rte_ether_addr *addr)
3142a9643ea8Slogwang {
3143a9643ea8Slogwang eth_igb_rar_clear(dev, 0);
31445af785ecSfengbojiang(姜凤波) eth_igb_rar_set(dev, (void *)addr, 0, 0);
3145d30ea906Sjfb8856606
3146d30ea906Sjfb8856606 return 0;
3147a9643ea8Slogwang }
3148a9643ea8Slogwang /*
3149a9643ea8Slogwang * Virtual Function operations
3150a9643ea8Slogwang */
3151a9643ea8Slogwang static void
igbvf_intr_disable(struct e1000_hw * hw)3152a9643ea8Slogwang igbvf_intr_disable(struct e1000_hw *hw)
3153a9643ea8Slogwang {
3154a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
3155a9643ea8Slogwang
3156a9643ea8Slogwang /* Clear interrupt mask to stop from interrupts being generated */
3157a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
3158a9643ea8Slogwang
3159a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
3160a9643ea8Slogwang }
3161a9643ea8Slogwang
3162a9643ea8Slogwang static void
igbvf_stop_adapter(struct rte_eth_dev * dev)3163a9643ea8Slogwang igbvf_stop_adapter(struct rte_eth_dev *dev)
3164a9643ea8Slogwang {
3165a9643ea8Slogwang u32 reg_val;
3166a9643ea8Slogwang u16 i;
3167a9643ea8Slogwang struct rte_eth_dev_info dev_info;
3168a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
31694418919fSjohnjiang int ret;
3170a9643ea8Slogwang
3171a9643ea8Slogwang memset(&dev_info, 0, sizeof(dev_info));
31724418919fSjohnjiang ret = eth_igbvf_infos_get(dev, &dev_info);
31734418919fSjohnjiang if (ret != 0)
31744418919fSjohnjiang return;
3175a9643ea8Slogwang
3176a9643ea8Slogwang /* Clear interrupt mask to stop from interrupts being generated */
3177a9643ea8Slogwang igbvf_intr_disable(hw);
3178a9643ea8Slogwang
3179a9643ea8Slogwang /* Clear any pending interrupts, flush previous writes */
3180a9643ea8Slogwang E1000_READ_REG(hw, E1000_EICR);
3181a9643ea8Slogwang
3182a9643ea8Slogwang /* Disable the transmit unit. Each queue must be disabled. */
3183a9643ea8Slogwang for (i = 0; i < dev_info.max_tx_queues; i++)
3184a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
3185a9643ea8Slogwang
3186a9643ea8Slogwang /* Disable the receive unit by stopping each queue */
3187a9643ea8Slogwang for (i = 0; i < dev_info.max_rx_queues; i++) {
3188a9643ea8Slogwang reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
3189a9643ea8Slogwang reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
3190a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
3191a9643ea8Slogwang while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
3192a9643ea8Slogwang ;
3193a9643ea8Slogwang }
3194a9643ea8Slogwang
3195a9643ea8Slogwang /* flush all queues disables */
3196a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
3197a9643ea8Slogwang msec_delay(2);
3198a9643ea8Slogwang }
3199a9643ea8Slogwang
eth_igbvf_link_update(struct e1000_hw * hw)3200a9643ea8Slogwang static int eth_igbvf_link_update(struct e1000_hw *hw)
3201a9643ea8Slogwang {
3202a9643ea8Slogwang struct e1000_mbx_info *mbx = &hw->mbx;
3203a9643ea8Slogwang struct e1000_mac_info *mac = &hw->mac;
3204a9643ea8Slogwang int ret_val = E1000_SUCCESS;
3205a9643ea8Slogwang
3206a9643ea8Slogwang PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
3207a9643ea8Slogwang
3208a9643ea8Slogwang /*
3209a9643ea8Slogwang * We only want to run this if there has been a rst asserted.
3210a9643ea8Slogwang * in this case that could mean a link change, device reset,
3211a9643ea8Slogwang * or a virtual function reset
3212a9643ea8Slogwang */
3213a9643ea8Slogwang
3214a9643ea8Slogwang /* If we were hit with a reset or timeout drop the link */
3215a9643ea8Slogwang if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
3216a9643ea8Slogwang mac->get_link_status = TRUE;
3217a9643ea8Slogwang
3218a9643ea8Slogwang if (!mac->get_link_status)
3219a9643ea8Slogwang goto out;
3220a9643ea8Slogwang
3221a9643ea8Slogwang /* if link status is down no point in checking to see if pf is up */
3222a9643ea8Slogwang if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
3223a9643ea8Slogwang goto out;
3224a9643ea8Slogwang
3225a9643ea8Slogwang /* if we passed all the tests above then the link is up and we no
3226a9643ea8Slogwang * longer need to check for link */
3227a9643ea8Slogwang mac->get_link_status = FALSE;
3228a9643ea8Slogwang
3229a9643ea8Slogwang out:
3230a9643ea8Slogwang return ret_val;
3231a9643ea8Slogwang }
3232a9643ea8Slogwang
3233a9643ea8Slogwang
3234a9643ea8Slogwang static int
igbvf_dev_configure(struct rte_eth_dev * dev)3235a9643ea8Slogwang igbvf_dev_configure(struct rte_eth_dev *dev)
3236a9643ea8Slogwang {
3237a9643ea8Slogwang struct rte_eth_conf* conf = &dev->data->dev_conf;
3238a9643ea8Slogwang
3239a9643ea8Slogwang PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3240a9643ea8Slogwang dev->data->port_id);
3241a9643ea8Slogwang
32424418919fSjohnjiang if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
32434418919fSjohnjiang dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
32444418919fSjohnjiang
3245a9643ea8Slogwang /*
3246a9643ea8Slogwang * VF has no ability to enable/disable HW CRC
3247a9643ea8Slogwang * Keep the persistent behavior the same as Host PF
3248a9643ea8Slogwang */
3249a9643ea8Slogwang #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3250d30ea906Sjfb8856606 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
3251a9643ea8Slogwang PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3252d30ea906Sjfb8856606 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
3253a9643ea8Slogwang }
3254a9643ea8Slogwang #else
3255d30ea906Sjfb8856606 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
3256a9643ea8Slogwang PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3257d30ea906Sjfb8856606 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
3258a9643ea8Slogwang }
3259a9643ea8Slogwang #endif
3260a9643ea8Slogwang
3261a9643ea8Slogwang return 0;
3262a9643ea8Slogwang }
3263a9643ea8Slogwang
3264a9643ea8Slogwang static int
igbvf_dev_start(struct rte_eth_dev * dev)3265a9643ea8Slogwang igbvf_dev_start(struct rte_eth_dev *dev)
3266a9643ea8Slogwang {
3267a9643ea8Slogwang struct e1000_hw *hw =
3268a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3269a9643ea8Slogwang struct e1000_adapter *adapter =
3270a9643ea8Slogwang E1000_DEV_PRIVATE(dev->data->dev_private);
32712bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
32722bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3273a9643ea8Slogwang int ret;
3274a9643ea8Slogwang uint32_t intr_vector = 0;
3275a9643ea8Slogwang
3276a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
3277a9643ea8Slogwang
3278a9643ea8Slogwang hw->mac.ops.reset_hw(hw);
3279a9643ea8Slogwang adapter->stopped = 0;
3280a9643ea8Slogwang
3281a9643ea8Slogwang /* Set all vfta */
3282a9643ea8Slogwang igbvf_set_vfta_all(dev,1);
3283a9643ea8Slogwang
3284a9643ea8Slogwang eth_igbvf_tx_init(dev);
3285a9643ea8Slogwang
3286a9643ea8Slogwang /* This can fail when allocating mbufs for descriptor rings */
3287a9643ea8Slogwang ret = eth_igbvf_rx_init(dev);
3288a9643ea8Slogwang if (ret) {
3289a9643ea8Slogwang PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
3290a9643ea8Slogwang igb_dev_clear_queues(dev);
3291a9643ea8Slogwang return ret;
3292a9643ea8Slogwang }
3293a9643ea8Slogwang
3294a9643ea8Slogwang /* check and configure queue intr-vector mapping */
32952bfe3f2eSlogwang if (rte_intr_cap_multiple(intr_handle) &&
32962bfe3f2eSlogwang dev->data->dev_conf.intr_conf.rxq) {
3297a9643ea8Slogwang intr_vector = dev->data->nb_rx_queues;
3298a9643ea8Slogwang ret = rte_intr_efd_enable(intr_handle, intr_vector);
3299a9643ea8Slogwang if (ret)
3300a9643ea8Slogwang return ret;
3301a9643ea8Slogwang }
3302a9643ea8Slogwang
3303a9643ea8Slogwang if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3304a9643ea8Slogwang intr_handle->intr_vec =
3305a9643ea8Slogwang rte_zmalloc("intr_vec",
3306a9643ea8Slogwang dev->data->nb_rx_queues * sizeof(int), 0);
3307a9643ea8Slogwang if (!intr_handle->intr_vec) {
3308a9643ea8Slogwang PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
33092bfe3f2eSlogwang " intr_vec", dev->data->nb_rx_queues);
3310a9643ea8Slogwang return -ENOMEM;
3311a9643ea8Slogwang }
3312a9643ea8Slogwang }
3313a9643ea8Slogwang
3314a9643ea8Slogwang eth_igbvf_configure_msix_intr(dev);
3315a9643ea8Slogwang
3316a9643ea8Slogwang /* enable uio/vfio intr/eventfd mapping */
3317a9643ea8Slogwang rte_intr_enable(intr_handle);
3318a9643ea8Slogwang
3319a9643ea8Slogwang /* resume enabled intr since hw reset */
3320a9643ea8Slogwang igbvf_intr_enable(dev);
3321a9643ea8Slogwang
3322a9643ea8Slogwang return 0;
3323a9643ea8Slogwang }
3324a9643ea8Slogwang
3325*2d9fd380Sjfb8856606 static int
igbvf_dev_stop(struct rte_eth_dev * dev)3326a9643ea8Slogwang igbvf_dev_stop(struct rte_eth_dev *dev)
3327a9643ea8Slogwang {
33282bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
33292bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
33304418919fSjohnjiang struct e1000_adapter *adapter =
33314418919fSjohnjiang E1000_DEV_PRIVATE(dev->data->dev_private);
33324418919fSjohnjiang
33334418919fSjohnjiang if (adapter->stopped)
3334*2d9fd380Sjfb8856606 return 0;
3335a9643ea8Slogwang
3336a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
3337a9643ea8Slogwang
3338a9643ea8Slogwang igbvf_stop_adapter(dev);
3339a9643ea8Slogwang
3340a9643ea8Slogwang /*
3341a9643ea8Slogwang * Clear what we set, but we still keep shadow_vfta to
3342a9643ea8Slogwang * restore after device starts
3343a9643ea8Slogwang */
3344a9643ea8Slogwang igbvf_set_vfta_all(dev,0);
3345a9643ea8Slogwang
3346a9643ea8Slogwang igb_dev_clear_queues(dev);
3347a9643ea8Slogwang
3348a9643ea8Slogwang /* disable intr eventfd mapping */
3349a9643ea8Slogwang rte_intr_disable(intr_handle);
3350a9643ea8Slogwang
3351a9643ea8Slogwang /* Clean datapath event and queue/vec mapping */
3352a9643ea8Slogwang rte_intr_efd_disable(intr_handle);
3353a9643ea8Slogwang if (intr_handle->intr_vec) {
3354a9643ea8Slogwang rte_free(intr_handle->intr_vec);
3355a9643ea8Slogwang intr_handle->intr_vec = NULL;
3356a9643ea8Slogwang }
33574418919fSjohnjiang
33584418919fSjohnjiang adapter->stopped = true;
3359*2d9fd380Sjfb8856606 dev->data->dev_started = 0;
3360*2d9fd380Sjfb8856606
3361*2d9fd380Sjfb8856606 return 0;
3362a9643ea8Slogwang }
3363a9643ea8Slogwang
3364*2d9fd380Sjfb8856606 static int
igbvf_dev_close(struct rte_eth_dev * dev)3365a9643ea8Slogwang igbvf_dev_close(struct rte_eth_dev *dev)
3366a9643ea8Slogwang {
3367a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
33684418919fSjohnjiang struct rte_ether_addr addr;
33694418919fSjohnjiang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3370*2d9fd380Sjfb8856606 int ret;
3371a9643ea8Slogwang
3372a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
3373a9643ea8Slogwang
3374*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3375*2d9fd380Sjfb8856606 return 0;
3376*2d9fd380Sjfb8856606
3377a9643ea8Slogwang e1000_reset_hw(hw);
3378a9643ea8Slogwang
3379*2d9fd380Sjfb8856606 ret = igbvf_dev_stop(dev);
3380*2d9fd380Sjfb8856606 if (ret != 0)
3381*2d9fd380Sjfb8856606 return ret;
33824418919fSjohnjiang
3383a9643ea8Slogwang igb_dev_free_queues(dev);
3384a9643ea8Slogwang
3385a9643ea8Slogwang /**
3386a9643ea8Slogwang * reprogram the RAR with a zero mac address,
3387a9643ea8Slogwang * to ensure that the VF traffic goes to the PF
3388a9643ea8Slogwang * after stop, close and detach of the VF.
3389a9643ea8Slogwang **/
3390a9643ea8Slogwang
3391a9643ea8Slogwang memset(&addr, 0, sizeof(addr));
3392a9643ea8Slogwang igbvf_default_mac_addr_set(dev, &addr);
33934418919fSjohnjiang
33944418919fSjohnjiang rte_intr_callback_unregister(&pci_dev->intr_handle,
33954418919fSjohnjiang eth_igbvf_interrupt_handler,
33964418919fSjohnjiang (void *)dev);
3397*2d9fd380Sjfb8856606
3398*2d9fd380Sjfb8856606 return 0;
3399a9643ea8Slogwang }
3400a9643ea8Slogwang
34014418919fSjohnjiang static int
igbvf_promiscuous_enable(struct rte_eth_dev * dev)3402a9643ea8Slogwang igbvf_promiscuous_enable(struct rte_eth_dev *dev)
3403a9643ea8Slogwang {
3404a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3405a9643ea8Slogwang
3406a9643ea8Slogwang /* Set both unicast and multicast promisc */
3407a9643ea8Slogwang e1000_promisc_set_vf(hw, e1000_promisc_enabled);
34084418919fSjohnjiang
34094418919fSjohnjiang return 0;
3410a9643ea8Slogwang }
3411a9643ea8Slogwang
34124418919fSjohnjiang static int
igbvf_promiscuous_disable(struct rte_eth_dev * dev)3413a9643ea8Slogwang igbvf_promiscuous_disable(struct rte_eth_dev *dev)
3414a9643ea8Slogwang {
3415a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3416a9643ea8Slogwang
3417a9643ea8Slogwang /* If in allmulticast mode leave multicast promisc */
3418a9643ea8Slogwang if (dev->data->all_multicast == 1)
3419a9643ea8Slogwang e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3420a9643ea8Slogwang else
3421a9643ea8Slogwang e1000_promisc_set_vf(hw, e1000_promisc_disabled);
34224418919fSjohnjiang
34234418919fSjohnjiang return 0;
3424a9643ea8Slogwang }
3425a9643ea8Slogwang
34264418919fSjohnjiang static int
igbvf_allmulticast_enable(struct rte_eth_dev * dev)3427a9643ea8Slogwang igbvf_allmulticast_enable(struct rte_eth_dev *dev)
3428a9643ea8Slogwang {
3429a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3430a9643ea8Slogwang
3431a9643ea8Slogwang /* In promiscuous mode multicast promisc already set */
3432a9643ea8Slogwang if (dev->data->promiscuous == 0)
3433a9643ea8Slogwang e1000_promisc_set_vf(hw, e1000_promisc_multicast);
34344418919fSjohnjiang
34354418919fSjohnjiang return 0;
3436a9643ea8Slogwang }
3437a9643ea8Slogwang
34384418919fSjohnjiang static int
igbvf_allmulticast_disable(struct rte_eth_dev * dev)3439a9643ea8Slogwang igbvf_allmulticast_disable(struct rte_eth_dev *dev)
3440a9643ea8Slogwang {
3441a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3442a9643ea8Slogwang
3443a9643ea8Slogwang /* In promiscuous mode leave multicast promisc enabled */
3444a9643ea8Slogwang if (dev->data->promiscuous == 0)
3445a9643ea8Slogwang e1000_promisc_set_vf(hw, e1000_promisc_disabled);
34464418919fSjohnjiang
34474418919fSjohnjiang return 0;
3448a9643ea8Slogwang }
3449a9643ea8Slogwang
igbvf_set_vfta(struct e1000_hw * hw,uint16_t vid,bool on)3450a9643ea8Slogwang static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
3451a9643ea8Slogwang {
3452a9643ea8Slogwang struct e1000_mbx_info *mbx = &hw->mbx;
3453a9643ea8Slogwang uint32_t msgbuf[2];
3454a9643ea8Slogwang s32 err;
3455a9643ea8Slogwang
3456a9643ea8Slogwang /* After set vlan, vlan strip will also be enabled in igb driver*/
3457a9643ea8Slogwang msgbuf[0] = E1000_VF_SET_VLAN;
3458a9643ea8Slogwang msgbuf[1] = vid;
3459a9643ea8Slogwang /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
3460a9643ea8Slogwang if (on)
3461a9643ea8Slogwang msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
3462a9643ea8Slogwang
3463a9643ea8Slogwang err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
3464a9643ea8Slogwang if (err)
3465a9643ea8Slogwang goto mbx_err;
3466a9643ea8Slogwang
3467a9643ea8Slogwang err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
3468a9643ea8Slogwang if (err)
3469a9643ea8Slogwang goto mbx_err;
3470a9643ea8Slogwang
3471a9643ea8Slogwang msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
3472a9643ea8Slogwang if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
3473a9643ea8Slogwang err = -EINVAL;
3474a9643ea8Slogwang
3475a9643ea8Slogwang mbx_err:
3476a9643ea8Slogwang return err;
3477a9643ea8Slogwang }
3478a9643ea8Slogwang
igbvf_set_vfta_all(struct rte_eth_dev * dev,bool on)3479a9643ea8Slogwang static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3480a9643ea8Slogwang {
3481a9643ea8Slogwang struct e1000_hw *hw =
3482a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3483a9643ea8Slogwang struct e1000_vfta * shadow_vfta =
3484a9643ea8Slogwang E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3485a9643ea8Slogwang int i = 0, j = 0, vfta = 0, mask = 1;
3486a9643ea8Slogwang
3487a9643ea8Slogwang for (i = 0; i < IGB_VFTA_SIZE; i++){
3488a9643ea8Slogwang vfta = shadow_vfta->vfta[i];
3489a9643ea8Slogwang if(vfta){
3490a9643ea8Slogwang mask = 1;
3491a9643ea8Slogwang for (j = 0; j < 32; j++){
3492a9643ea8Slogwang if(vfta & mask)
3493a9643ea8Slogwang igbvf_set_vfta(hw,
3494a9643ea8Slogwang (uint16_t)((i<<5)+j), on);
3495a9643ea8Slogwang mask<<=1;
3496a9643ea8Slogwang }
3497a9643ea8Slogwang }
3498a9643ea8Slogwang }
3499a9643ea8Slogwang
3500a9643ea8Slogwang }
3501a9643ea8Slogwang
3502a9643ea8Slogwang static int
igbvf_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)3503a9643ea8Slogwang igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3504a9643ea8Slogwang {
3505a9643ea8Slogwang struct e1000_hw *hw =
3506a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3507a9643ea8Slogwang struct e1000_vfta * shadow_vfta =
3508a9643ea8Slogwang E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3509a9643ea8Slogwang uint32_t vid_idx = 0;
3510a9643ea8Slogwang uint32_t vid_bit = 0;
3511a9643ea8Slogwang int ret = 0;
3512a9643ea8Slogwang
3513a9643ea8Slogwang PMD_INIT_FUNC_TRACE();
3514a9643ea8Slogwang
3515a9643ea8Slogwang /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
3516a9643ea8Slogwang ret = igbvf_set_vfta(hw, vlan_id, !!on);
3517a9643ea8Slogwang if(ret){
3518a9643ea8Slogwang PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3519a9643ea8Slogwang return ret;
3520a9643ea8Slogwang }
3521a9643ea8Slogwang vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3522a9643ea8Slogwang vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3523a9643ea8Slogwang
3524a9643ea8Slogwang /*Save what we set and retore it after device reset*/
3525a9643ea8Slogwang if (on)
3526a9643ea8Slogwang shadow_vfta->vfta[vid_idx] |= vid_bit;
3527a9643ea8Slogwang else
3528a9643ea8Slogwang shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3529a9643ea8Slogwang
3530a9643ea8Slogwang return 0;
3531a9643ea8Slogwang }
3532a9643ea8Slogwang
3533d30ea906Sjfb8856606 static int
igbvf_default_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * addr)35344418919fSjohnjiang igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
3535a9643ea8Slogwang {
3536a9643ea8Slogwang struct e1000_hw *hw =
3537a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3538a9643ea8Slogwang
3539a9643ea8Slogwang /* index is not used by rar_set() */
3540a9643ea8Slogwang hw->mac.ops.rar_set(hw, (void *)addr, 0);
3541d30ea906Sjfb8856606 return 0;
3542a9643ea8Slogwang }
3543a9643ea8Slogwang
3544a9643ea8Slogwang
3545a9643ea8Slogwang static int
eth_igb_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3546a9643ea8Slogwang eth_igb_rss_reta_update(struct rte_eth_dev *dev,
3547a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
3548a9643ea8Slogwang uint16_t reta_size)
3549a9643ea8Slogwang {
3550a9643ea8Slogwang uint8_t i, j, mask;
3551a9643ea8Slogwang uint32_t reta, r;
3552a9643ea8Slogwang uint16_t idx, shift;
3553a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3554a9643ea8Slogwang
3555a9643ea8Slogwang if (reta_size != ETH_RSS_RETA_SIZE_128) {
3556a9643ea8Slogwang PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3557a9643ea8Slogwang "(%d) doesn't match the number hardware can supported "
35582bfe3f2eSlogwang "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3559a9643ea8Slogwang return -EINVAL;
3560a9643ea8Slogwang }
3561a9643ea8Slogwang
3562a9643ea8Slogwang for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3563a9643ea8Slogwang idx = i / RTE_RETA_GROUP_SIZE;
3564a9643ea8Slogwang shift = i % RTE_RETA_GROUP_SIZE;
3565a9643ea8Slogwang mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3566a9643ea8Slogwang IGB_4_BIT_MASK);
3567a9643ea8Slogwang if (!mask)
3568a9643ea8Slogwang continue;
3569a9643ea8Slogwang if (mask == IGB_4_BIT_MASK)
3570a9643ea8Slogwang r = 0;
3571a9643ea8Slogwang else
3572a9643ea8Slogwang r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3573a9643ea8Slogwang for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
3574a9643ea8Slogwang if (mask & (0x1 << j))
3575a9643ea8Slogwang reta |= reta_conf[idx].reta[shift + j] <<
3576a9643ea8Slogwang (CHAR_BIT * j);
3577a9643ea8Slogwang else
3578a9643ea8Slogwang reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
3579a9643ea8Slogwang }
3580a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
3581a9643ea8Slogwang }
3582a9643ea8Slogwang
3583a9643ea8Slogwang return 0;
3584a9643ea8Slogwang }
3585a9643ea8Slogwang
3586a9643ea8Slogwang static int
eth_igb_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)3587a9643ea8Slogwang eth_igb_rss_reta_query(struct rte_eth_dev *dev,
3588a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
3589a9643ea8Slogwang uint16_t reta_size)
3590a9643ea8Slogwang {
3591a9643ea8Slogwang uint8_t i, j, mask;
3592a9643ea8Slogwang uint32_t reta;
3593a9643ea8Slogwang uint16_t idx, shift;
3594a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3595a9643ea8Slogwang
3596a9643ea8Slogwang if (reta_size != ETH_RSS_RETA_SIZE_128) {
3597a9643ea8Slogwang PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3598a9643ea8Slogwang "(%d) doesn't match the number hardware can supported "
35992bfe3f2eSlogwang "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3600a9643ea8Slogwang return -EINVAL;
3601a9643ea8Slogwang }
3602a9643ea8Slogwang
3603a9643ea8Slogwang for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3604a9643ea8Slogwang idx = i / RTE_RETA_GROUP_SIZE;
3605a9643ea8Slogwang shift = i % RTE_RETA_GROUP_SIZE;
3606a9643ea8Slogwang mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3607a9643ea8Slogwang IGB_4_BIT_MASK);
3608a9643ea8Slogwang if (!mask)
3609a9643ea8Slogwang continue;
3610a9643ea8Slogwang reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3611a9643ea8Slogwang for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3612a9643ea8Slogwang if (mask & (0x1 << j))
3613a9643ea8Slogwang reta_conf[idx].reta[shift + j] =
3614a9643ea8Slogwang ((reta >> (CHAR_BIT * j)) &
3615a9643ea8Slogwang IGB_8_BIT_MASK);
3616a9643ea8Slogwang }
3617a9643ea8Slogwang }
3618a9643ea8Slogwang
3619a9643ea8Slogwang return 0;
3620a9643ea8Slogwang }
3621a9643ea8Slogwang
36222bfe3f2eSlogwang int
eth_igb_syn_filter_set(struct rte_eth_dev * dev,struct rte_eth_syn_filter * filter,bool add)3623a9643ea8Slogwang eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3624a9643ea8Slogwang struct rte_eth_syn_filter *filter,
3625a9643ea8Slogwang bool add)
3626a9643ea8Slogwang {
3627a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
36282bfe3f2eSlogwang struct e1000_filter_info *filter_info =
36292bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3630a9643ea8Slogwang uint32_t synqf, rfctl;
3631a9643ea8Slogwang
3632a9643ea8Slogwang if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3633a9643ea8Slogwang return -EINVAL;
3634a9643ea8Slogwang
3635a9643ea8Slogwang synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3636a9643ea8Slogwang
3637a9643ea8Slogwang if (add) {
3638a9643ea8Slogwang if (synqf & E1000_SYN_FILTER_ENABLE)
3639a9643ea8Slogwang return -EINVAL;
3640a9643ea8Slogwang
3641a9643ea8Slogwang synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3642a9643ea8Slogwang E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3643a9643ea8Slogwang
3644a9643ea8Slogwang rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3645a9643ea8Slogwang if (filter->hig_pri)
3646a9643ea8Slogwang rfctl |= E1000_RFCTL_SYNQFP;
3647a9643ea8Slogwang else
3648a9643ea8Slogwang rfctl &= ~E1000_RFCTL_SYNQFP;
3649a9643ea8Slogwang
3650a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3651a9643ea8Slogwang } else {
3652a9643ea8Slogwang if (!(synqf & E1000_SYN_FILTER_ENABLE))
3653a9643ea8Slogwang return -ENOENT;
3654a9643ea8Slogwang synqf = 0;
3655a9643ea8Slogwang }
3656a9643ea8Slogwang
36572bfe3f2eSlogwang filter_info->syn_info = synqf;
3658a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3659a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
3660a9643ea8Slogwang return 0;
3661a9643ea8Slogwang }
3662a9643ea8Slogwang
3663a9643ea8Slogwang /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3664a9643ea8Slogwang static inline int
ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter * filter,struct e1000_2tuple_filter_info * filter_info)3665a9643ea8Slogwang ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3666a9643ea8Slogwang struct e1000_2tuple_filter_info *filter_info)
3667a9643ea8Slogwang {
3668a9643ea8Slogwang if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3669a9643ea8Slogwang return -EINVAL;
3670a9643ea8Slogwang if (filter->priority > E1000_2TUPLE_MAX_PRI)
3671a9643ea8Slogwang return -EINVAL; /* filter index is out of range. */
36724418919fSjohnjiang if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
3673a9643ea8Slogwang return -EINVAL; /* flags is invalid. */
3674a9643ea8Slogwang
3675a9643ea8Slogwang switch (filter->dst_port_mask) {
3676a9643ea8Slogwang case UINT16_MAX:
3677a9643ea8Slogwang filter_info->dst_port_mask = 0;
3678a9643ea8Slogwang filter_info->dst_port = filter->dst_port;
3679a9643ea8Slogwang break;
3680a9643ea8Slogwang case 0:
3681a9643ea8Slogwang filter_info->dst_port_mask = 1;
3682a9643ea8Slogwang break;
3683a9643ea8Slogwang default:
3684a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3685a9643ea8Slogwang return -EINVAL;
3686a9643ea8Slogwang }
3687a9643ea8Slogwang
3688a9643ea8Slogwang switch (filter->proto_mask) {
3689a9643ea8Slogwang case UINT8_MAX:
3690a9643ea8Slogwang filter_info->proto_mask = 0;
3691a9643ea8Slogwang filter_info->proto = filter->proto;
3692a9643ea8Slogwang break;
3693a9643ea8Slogwang case 0:
3694a9643ea8Slogwang filter_info->proto_mask = 1;
3695a9643ea8Slogwang break;
3696a9643ea8Slogwang default:
3697a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid protocol mask.");
3698a9643ea8Slogwang return -EINVAL;
3699a9643ea8Slogwang }
3700a9643ea8Slogwang
3701a9643ea8Slogwang filter_info->priority = (uint8_t)filter->priority;
3702a9643ea8Slogwang if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3703a9643ea8Slogwang filter_info->tcp_flags = filter->tcp_flags;
3704a9643ea8Slogwang else
3705a9643ea8Slogwang filter_info->tcp_flags = 0;
3706a9643ea8Slogwang
3707a9643ea8Slogwang return 0;
3708a9643ea8Slogwang }
3709a9643ea8Slogwang
3710a9643ea8Slogwang static inline struct e1000_2tuple_filter *
igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list * filter_list,struct e1000_2tuple_filter_info * key)3711a9643ea8Slogwang igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3712a9643ea8Slogwang struct e1000_2tuple_filter_info *key)
3713a9643ea8Slogwang {
3714a9643ea8Slogwang struct e1000_2tuple_filter *it;
3715a9643ea8Slogwang
3716a9643ea8Slogwang TAILQ_FOREACH(it, filter_list, entries) {
3717a9643ea8Slogwang if (memcmp(key, &it->filter_info,
3718a9643ea8Slogwang sizeof(struct e1000_2tuple_filter_info)) == 0) {
3719a9643ea8Slogwang return it;
3720a9643ea8Slogwang }
3721a9643ea8Slogwang }
3722a9643ea8Slogwang return NULL;
3723a9643ea8Slogwang }
3724a9643ea8Slogwang
37252bfe3f2eSlogwang /* inject a igb 2tuple filter to HW */
37262bfe3f2eSlogwang static inline void
igb_inject_2uple_filter(struct rte_eth_dev * dev,struct e1000_2tuple_filter * filter)37272bfe3f2eSlogwang igb_inject_2uple_filter(struct rte_eth_dev *dev,
37282bfe3f2eSlogwang struct e1000_2tuple_filter *filter)
37292bfe3f2eSlogwang {
37302bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
37312bfe3f2eSlogwang uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
37322bfe3f2eSlogwang uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
37332bfe3f2eSlogwang int i;
37342bfe3f2eSlogwang
37352bfe3f2eSlogwang i = filter->index;
37362bfe3f2eSlogwang imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
37372bfe3f2eSlogwang if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
37382bfe3f2eSlogwang imir |= E1000_IMIR_PORT_BP;
37392bfe3f2eSlogwang else
37402bfe3f2eSlogwang imir &= ~E1000_IMIR_PORT_BP;
37412bfe3f2eSlogwang
37422bfe3f2eSlogwang imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
37432bfe3f2eSlogwang
37442bfe3f2eSlogwang ttqf |= E1000_TTQF_QUEUE_ENABLE;
37452bfe3f2eSlogwang ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
37462bfe3f2eSlogwang ttqf |= (uint32_t)(filter->filter_info.proto &
37472bfe3f2eSlogwang E1000_TTQF_PROTOCOL_MASK);
37482bfe3f2eSlogwang if (filter->filter_info.proto_mask == 0)
37492bfe3f2eSlogwang ttqf &= ~E1000_TTQF_MASK_ENABLE;
37502bfe3f2eSlogwang
37512bfe3f2eSlogwang /* tcp flags bits setting. */
37524418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
37534418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
37542bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_URG;
37554418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
37562bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_ACK;
37574418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG)
37582bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_PSH;
37594418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG)
37602bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_RST;
37614418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG)
37622bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_SYN;
37634418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG)
37642bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_FIN;
37652bfe3f2eSlogwang } else {
37662bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_BP;
37672bfe3f2eSlogwang }
37682bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
37692bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
37702bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
37712bfe3f2eSlogwang }
37722bfe3f2eSlogwang
3773a9643ea8Slogwang /*
3774a9643ea8Slogwang * igb_add_2tuple_filter - add a 2tuple filter
3775a9643ea8Slogwang *
3776a9643ea8Slogwang * @param
3777a9643ea8Slogwang * dev: Pointer to struct rte_eth_dev.
3778a9643ea8Slogwang * ntuple_filter: ponter to the filter that will be added.
3779a9643ea8Slogwang *
3780a9643ea8Slogwang * @return
3781a9643ea8Slogwang * - On success, zero.
3782a9643ea8Slogwang * - On failure, a negative value.
3783a9643ea8Slogwang */
3784a9643ea8Slogwang static int
igb_add_2tuple_filter(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter)3785a9643ea8Slogwang igb_add_2tuple_filter(struct rte_eth_dev *dev,
3786a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter)
3787a9643ea8Slogwang {
3788a9643ea8Slogwang struct e1000_filter_info *filter_info =
3789a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3790a9643ea8Slogwang struct e1000_2tuple_filter *filter;
3791a9643ea8Slogwang int i, ret;
3792a9643ea8Slogwang
3793a9643ea8Slogwang filter = rte_zmalloc("e1000_2tuple_filter",
3794a9643ea8Slogwang sizeof(struct e1000_2tuple_filter), 0);
3795a9643ea8Slogwang if (filter == NULL)
3796a9643ea8Slogwang return -ENOMEM;
3797a9643ea8Slogwang
3798a9643ea8Slogwang ret = ntuple_filter_to_2tuple(ntuple_filter,
3799a9643ea8Slogwang &filter->filter_info);
3800a9643ea8Slogwang if (ret < 0) {
3801a9643ea8Slogwang rte_free(filter);
3802a9643ea8Slogwang return ret;
3803a9643ea8Slogwang }
3804a9643ea8Slogwang if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3805a9643ea8Slogwang &filter->filter_info) != NULL) {
3806a9643ea8Slogwang PMD_DRV_LOG(ERR, "filter exists.");
3807a9643ea8Slogwang rte_free(filter);
3808a9643ea8Slogwang return -EEXIST;
3809a9643ea8Slogwang }
3810a9643ea8Slogwang filter->queue = ntuple_filter->queue;
3811a9643ea8Slogwang
3812a9643ea8Slogwang /*
3813a9643ea8Slogwang * look for an unused 2tuple filter index,
3814a9643ea8Slogwang * and insert the filter to list.
3815a9643ea8Slogwang */
3816a9643ea8Slogwang for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3817a9643ea8Slogwang if (!(filter_info->twotuple_mask & (1 << i))) {
3818a9643ea8Slogwang filter_info->twotuple_mask |= 1 << i;
3819a9643ea8Slogwang filter->index = i;
3820a9643ea8Slogwang TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3821a9643ea8Slogwang filter,
3822a9643ea8Slogwang entries);
3823a9643ea8Slogwang break;
3824a9643ea8Slogwang }
3825a9643ea8Slogwang }
3826a9643ea8Slogwang if (i >= E1000_MAX_TTQF_FILTERS) {
3827a9643ea8Slogwang PMD_DRV_LOG(ERR, "2tuple filters are full.");
3828a9643ea8Slogwang rte_free(filter);
3829a9643ea8Slogwang return -ENOSYS;
3830a9643ea8Slogwang }
3831a9643ea8Slogwang
38322bfe3f2eSlogwang igb_inject_2uple_filter(dev, filter);
38332bfe3f2eSlogwang return 0;
38342bfe3f2eSlogwang }
3835a9643ea8Slogwang
38362bfe3f2eSlogwang int
igb_delete_2tuple_filter(struct rte_eth_dev * dev,struct e1000_2tuple_filter * filter)38372bfe3f2eSlogwang igb_delete_2tuple_filter(struct rte_eth_dev *dev,
38382bfe3f2eSlogwang struct e1000_2tuple_filter *filter)
38392bfe3f2eSlogwang {
38402bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
38412bfe3f2eSlogwang struct e1000_filter_info *filter_info =
38422bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3843a9643ea8Slogwang
38442bfe3f2eSlogwang filter_info->twotuple_mask &= ~(1 << filter->index);
38452bfe3f2eSlogwang TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
38462bfe3f2eSlogwang rte_free(filter);
3847a9643ea8Slogwang
38482bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
38492bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
38502bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3851a9643ea8Slogwang return 0;
3852a9643ea8Slogwang }
3853a9643ea8Slogwang
3854a9643ea8Slogwang /*
3855a9643ea8Slogwang * igb_remove_2tuple_filter - remove a 2tuple filter
3856a9643ea8Slogwang *
3857a9643ea8Slogwang * @param
3858a9643ea8Slogwang * dev: Pointer to struct rte_eth_dev.
3859a9643ea8Slogwang * ntuple_filter: ponter to the filter that will be removed.
3860a9643ea8Slogwang *
3861a9643ea8Slogwang * @return
3862a9643ea8Slogwang * - On success, zero.
3863a9643ea8Slogwang * - On failure, a negative value.
3864a9643ea8Slogwang */
3865a9643ea8Slogwang static int
igb_remove_2tuple_filter(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter)3866a9643ea8Slogwang igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3867a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter)
3868a9643ea8Slogwang {
3869a9643ea8Slogwang struct e1000_filter_info *filter_info =
3870a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3871a9643ea8Slogwang struct e1000_2tuple_filter_info filter_2tuple;
3872a9643ea8Slogwang struct e1000_2tuple_filter *filter;
3873a9643ea8Slogwang int ret;
3874a9643ea8Slogwang
3875a9643ea8Slogwang memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3876a9643ea8Slogwang ret = ntuple_filter_to_2tuple(ntuple_filter,
3877a9643ea8Slogwang &filter_2tuple);
3878a9643ea8Slogwang if (ret < 0)
3879a9643ea8Slogwang return ret;
3880a9643ea8Slogwang
3881a9643ea8Slogwang filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3882a9643ea8Slogwang &filter_2tuple);
3883a9643ea8Slogwang if (filter == NULL) {
3884a9643ea8Slogwang PMD_DRV_LOG(ERR, "filter doesn't exist.");
3885a9643ea8Slogwang return -ENOENT;
3886a9643ea8Slogwang }
3887a9643ea8Slogwang
38882bfe3f2eSlogwang igb_delete_2tuple_filter(dev, filter);
3889a9643ea8Slogwang
3890a9643ea8Slogwang return 0;
3891a9643ea8Slogwang }
3892a9643ea8Slogwang
38932bfe3f2eSlogwang /* inject a igb flex filter to HW */
38942bfe3f2eSlogwang static inline void
igb_inject_flex_filter(struct rte_eth_dev * dev,struct e1000_flex_filter * filter)38952bfe3f2eSlogwang igb_inject_flex_filter(struct rte_eth_dev *dev,
38962bfe3f2eSlogwang struct e1000_flex_filter *filter)
38972bfe3f2eSlogwang {
38982bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
38992bfe3f2eSlogwang uint32_t wufc, queueing;
39002bfe3f2eSlogwang uint32_t reg_off;
39012bfe3f2eSlogwang uint8_t i, j = 0;
39022bfe3f2eSlogwang
39032bfe3f2eSlogwang wufc = E1000_READ_REG(hw, E1000_WUFC);
39042bfe3f2eSlogwang if (filter->index < E1000_MAX_FHFT)
39052bfe3f2eSlogwang reg_off = E1000_FHFT(filter->index);
39062bfe3f2eSlogwang else
39072bfe3f2eSlogwang reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
39082bfe3f2eSlogwang
39092bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
39102bfe3f2eSlogwang (E1000_WUFC_FLX0 << filter->index));
39112bfe3f2eSlogwang queueing = filter->filter_info.len |
39122bfe3f2eSlogwang (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
39132bfe3f2eSlogwang (filter->filter_info.priority <<
39142bfe3f2eSlogwang E1000_FHFT_QUEUEING_PRIO_SHIFT);
39152bfe3f2eSlogwang E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
39162bfe3f2eSlogwang queueing);
39172bfe3f2eSlogwang
39182bfe3f2eSlogwang for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
39192bfe3f2eSlogwang E1000_WRITE_REG(hw, reg_off,
39202bfe3f2eSlogwang filter->filter_info.dwords[j]);
39212bfe3f2eSlogwang reg_off += sizeof(uint32_t);
39222bfe3f2eSlogwang E1000_WRITE_REG(hw, reg_off,
39232bfe3f2eSlogwang filter->filter_info.dwords[++j]);
39242bfe3f2eSlogwang reg_off += sizeof(uint32_t);
39252bfe3f2eSlogwang E1000_WRITE_REG(hw, reg_off,
39262bfe3f2eSlogwang (uint32_t)filter->filter_info.mask[i]);
39272bfe3f2eSlogwang reg_off += sizeof(uint32_t) * 2;
39282bfe3f2eSlogwang ++j;
39292bfe3f2eSlogwang }
39302bfe3f2eSlogwang }
39312bfe3f2eSlogwang
3932a9643ea8Slogwang static inline struct e1000_flex_filter *
eth_igb_flex_filter_lookup(struct e1000_flex_filter_list * filter_list,struct e1000_flex_filter_info * key)3933a9643ea8Slogwang eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3934a9643ea8Slogwang struct e1000_flex_filter_info *key)
3935a9643ea8Slogwang {
3936a9643ea8Slogwang struct e1000_flex_filter *it;
3937a9643ea8Slogwang
3938a9643ea8Slogwang TAILQ_FOREACH(it, filter_list, entries) {
3939a9643ea8Slogwang if (memcmp(key, &it->filter_info,
3940a9643ea8Slogwang sizeof(struct e1000_flex_filter_info)) == 0)
3941a9643ea8Slogwang return it;
3942a9643ea8Slogwang }
3943a9643ea8Slogwang
3944a9643ea8Slogwang return NULL;
3945a9643ea8Slogwang }
3946a9643ea8Slogwang
39472bfe3f2eSlogwang /* remove a flex byte filter
39482bfe3f2eSlogwang * @param
39492bfe3f2eSlogwang * dev: Pointer to struct rte_eth_dev.
39502bfe3f2eSlogwang * filter: the pointer of the filter will be removed.
39512bfe3f2eSlogwang */
39522bfe3f2eSlogwang void
igb_remove_flex_filter(struct rte_eth_dev * dev,struct e1000_flex_filter * filter)39532bfe3f2eSlogwang igb_remove_flex_filter(struct rte_eth_dev *dev,
39542bfe3f2eSlogwang struct e1000_flex_filter *filter)
39552bfe3f2eSlogwang {
39562bfe3f2eSlogwang struct e1000_filter_info *filter_info =
39572bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
39582bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
39592bfe3f2eSlogwang uint32_t wufc, i;
39602bfe3f2eSlogwang uint32_t reg_off;
39612bfe3f2eSlogwang
39622bfe3f2eSlogwang wufc = E1000_READ_REG(hw, E1000_WUFC);
39632bfe3f2eSlogwang if (filter->index < E1000_MAX_FHFT)
39642bfe3f2eSlogwang reg_off = E1000_FHFT(filter->index);
39652bfe3f2eSlogwang else
39662bfe3f2eSlogwang reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
39672bfe3f2eSlogwang
39682bfe3f2eSlogwang for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
39692bfe3f2eSlogwang E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
39702bfe3f2eSlogwang
39712bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_WUFC, wufc &
39722bfe3f2eSlogwang (~(E1000_WUFC_FLX0 << filter->index)));
39732bfe3f2eSlogwang
39742bfe3f2eSlogwang filter_info->flex_mask &= ~(1 << filter->index);
39752bfe3f2eSlogwang TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
39762bfe3f2eSlogwang rte_free(filter);
39772bfe3f2eSlogwang }
39782bfe3f2eSlogwang
39792bfe3f2eSlogwang int
eth_igb_add_del_flex_filter(struct rte_eth_dev * dev,struct igb_flex_filter * filter,bool add)3980a9643ea8Slogwang eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3981*2d9fd380Sjfb8856606 struct igb_flex_filter *filter,
3982a9643ea8Slogwang bool add)
3983a9643ea8Slogwang {
3984a9643ea8Slogwang struct e1000_filter_info *filter_info =
3985a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3986a9643ea8Slogwang struct e1000_flex_filter *flex_filter, *it;
39872bfe3f2eSlogwang uint32_t mask;
39882bfe3f2eSlogwang uint8_t shift, i;
3989a9643ea8Slogwang
3990a9643ea8Slogwang flex_filter = rte_zmalloc("e1000_flex_filter",
3991a9643ea8Slogwang sizeof(struct e1000_flex_filter), 0);
3992a9643ea8Slogwang if (flex_filter == NULL)
3993a9643ea8Slogwang return -ENOMEM;
3994a9643ea8Slogwang
3995a9643ea8Slogwang flex_filter->filter_info.len = filter->len;
3996a9643ea8Slogwang flex_filter->filter_info.priority = filter->priority;
3997a9643ea8Slogwang memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3998a9643ea8Slogwang for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3999a9643ea8Slogwang mask = 0;
4000a9643ea8Slogwang /* reverse bits in flex filter's mask*/
4001a9643ea8Slogwang for (shift = 0; shift < CHAR_BIT; shift++) {
4002a9643ea8Slogwang if (filter->mask[i] & (0x01 << shift))
4003a9643ea8Slogwang mask |= (0x80 >> shift);
4004a9643ea8Slogwang }
4005a9643ea8Slogwang flex_filter->filter_info.mask[i] = mask;
4006a9643ea8Slogwang }
4007a9643ea8Slogwang
40082bfe3f2eSlogwang it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
40092bfe3f2eSlogwang &flex_filter->filter_info);
40102bfe3f2eSlogwang if (it == NULL && !add) {
40112bfe3f2eSlogwang PMD_DRV_LOG(ERR, "filter doesn't exist.");
40122bfe3f2eSlogwang rte_free(flex_filter);
40132bfe3f2eSlogwang return -ENOENT;
40142bfe3f2eSlogwang }
40152bfe3f2eSlogwang if (it != NULL && add) {
4016a9643ea8Slogwang PMD_DRV_LOG(ERR, "filter exists.");
4017a9643ea8Slogwang rte_free(flex_filter);
4018a9643ea8Slogwang return -EEXIST;
4019a9643ea8Slogwang }
40202bfe3f2eSlogwang
40212bfe3f2eSlogwang if (add) {
4022a9643ea8Slogwang flex_filter->queue = filter->queue;
4023a9643ea8Slogwang /*
4024a9643ea8Slogwang * look for an unused flex filter index
4025a9643ea8Slogwang * and insert the filter into the list.
4026a9643ea8Slogwang */
4027a9643ea8Slogwang for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
4028a9643ea8Slogwang if (!(filter_info->flex_mask & (1 << i))) {
4029a9643ea8Slogwang filter_info->flex_mask |= 1 << i;
4030a9643ea8Slogwang flex_filter->index = i;
4031a9643ea8Slogwang TAILQ_INSERT_TAIL(&filter_info->flex_list,
4032a9643ea8Slogwang flex_filter,
4033a9643ea8Slogwang entries);
4034a9643ea8Slogwang break;
4035a9643ea8Slogwang }
4036a9643ea8Slogwang }
4037a9643ea8Slogwang if (i >= E1000_MAX_FLEX_FILTERS) {
4038a9643ea8Slogwang PMD_DRV_LOG(ERR, "flex filters are full.");
4039a9643ea8Slogwang rte_free(flex_filter);
4040a9643ea8Slogwang return -ENOSYS;
4041a9643ea8Slogwang }
4042a9643ea8Slogwang
40432bfe3f2eSlogwang igb_inject_flex_filter(dev, flex_filter);
40442bfe3f2eSlogwang
4045a9643ea8Slogwang } else {
40462bfe3f2eSlogwang igb_remove_flex_filter(dev, it);
4047a9643ea8Slogwang rte_free(flex_filter);
4048a9643ea8Slogwang }
4049a9643ea8Slogwang
4050a9643ea8Slogwang return 0;
4051a9643ea8Slogwang }
4052a9643ea8Slogwang
4053a9643ea8Slogwang /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
4054a9643ea8Slogwang static inline int
ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter * filter,struct e1000_5tuple_filter_info * filter_info)4055a9643ea8Slogwang ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
4056a9643ea8Slogwang struct e1000_5tuple_filter_info *filter_info)
4057a9643ea8Slogwang {
4058a9643ea8Slogwang if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
4059a9643ea8Slogwang return -EINVAL;
4060a9643ea8Slogwang if (filter->priority > E1000_2TUPLE_MAX_PRI)
4061a9643ea8Slogwang return -EINVAL; /* filter index is out of range. */
40624418919fSjohnjiang if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
4063a9643ea8Slogwang return -EINVAL; /* flags is invalid. */
4064a9643ea8Slogwang
4065a9643ea8Slogwang switch (filter->dst_ip_mask) {
4066a9643ea8Slogwang case UINT32_MAX:
4067a9643ea8Slogwang filter_info->dst_ip_mask = 0;
4068a9643ea8Slogwang filter_info->dst_ip = filter->dst_ip;
4069a9643ea8Slogwang break;
4070a9643ea8Slogwang case 0:
4071a9643ea8Slogwang filter_info->dst_ip_mask = 1;
4072a9643ea8Slogwang break;
4073a9643ea8Slogwang default:
4074a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4075a9643ea8Slogwang return -EINVAL;
4076a9643ea8Slogwang }
4077a9643ea8Slogwang
4078a9643ea8Slogwang switch (filter->src_ip_mask) {
4079a9643ea8Slogwang case UINT32_MAX:
4080a9643ea8Slogwang filter_info->src_ip_mask = 0;
4081a9643ea8Slogwang filter_info->src_ip = filter->src_ip;
4082a9643ea8Slogwang break;
4083a9643ea8Slogwang case 0:
4084a9643ea8Slogwang filter_info->src_ip_mask = 1;
4085a9643ea8Slogwang break;
4086a9643ea8Slogwang default:
4087a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4088a9643ea8Slogwang return -EINVAL;
4089a9643ea8Slogwang }
4090a9643ea8Slogwang
4091a9643ea8Slogwang switch (filter->dst_port_mask) {
4092a9643ea8Slogwang case UINT16_MAX:
4093a9643ea8Slogwang filter_info->dst_port_mask = 0;
4094a9643ea8Slogwang filter_info->dst_port = filter->dst_port;
4095a9643ea8Slogwang break;
4096a9643ea8Slogwang case 0:
4097a9643ea8Slogwang filter_info->dst_port_mask = 1;
4098a9643ea8Slogwang break;
4099a9643ea8Slogwang default:
4100a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4101a9643ea8Slogwang return -EINVAL;
4102a9643ea8Slogwang }
4103a9643ea8Slogwang
4104a9643ea8Slogwang switch (filter->src_port_mask) {
4105a9643ea8Slogwang case UINT16_MAX:
4106a9643ea8Slogwang filter_info->src_port_mask = 0;
4107a9643ea8Slogwang filter_info->src_port = filter->src_port;
4108a9643ea8Slogwang break;
4109a9643ea8Slogwang case 0:
4110a9643ea8Slogwang filter_info->src_port_mask = 1;
4111a9643ea8Slogwang break;
4112a9643ea8Slogwang default:
4113a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid src_port mask.");
4114a9643ea8Slogwang return -EINVAL;
4115a9643ea8Slogwang }
4116a9643ea8Slogwang
4117a9643ea8Slogwang switch (filter->proto_mask) {
4118a9643ea8Slogwang case UINT8_MAX:
4119a9643ea8Slogwang filter_info->proto_mask = 0;
4120a9643ea8Slogwang filter_info->proto = filter->proto;
4121a9643ea8Slogwang break;
4122a9643ea8Slogwang case 0:
4123a9643ea8Slogwang filter_info->proto_mask = 1;
4124a9643ea8Slogwang break;
4125a9643ea8Slogwang default:
4126a9643ea8Slogwang PMD_DRV_LOG(ERR, "invalid protocol mask.");
4127a9643ea8Slogwang return -EINVAL;
4128a9643ea8Slogwang }
4129a9643ea8Slogwang
4130a9643ea8Slogwang filter_info->priority = (uint8_t)filter->priority;
4131a9643ea8Slogwang if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
4132a9643ea8Slogwang filter_info->tcp_flags = filter->tcp_flags;
4133a9643ea8Slogwang else
4134a9643ea8Slogwang filter_info->tcp_flags = 0;
4135a9643ea8Slogwang
4136a9643ea8Slogwang return 0;
4137a9643ea8Slogwang }
4138a9643ea8Slogwang
4139a9643ea8Slogwang static inline struct e1000_5tuple_filter *
igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list * filter_list,struct e1000_5tuple_filter_info * key)4140a9643ea8Slogwang igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
4141a9643ea8Slogwang struct e1000_5tuple_filter_info *key)
4142a9643ea8Slogwang {
4143a9643ea8Slogwang struct e1000_5tuple_filter *it;
4144a9643ea8Slogwang
4145a9643ea8Slogwang TAILQ_FOREACH(it, filter_list, entries) {
4146a9643ea8Slogwang if (memcmp(key, &it->filter_info,
4147a9643ea8Slogwang sizeof(struct e1000_5tuple_filter_info)) == 0) {
4148a9643ea8Slogwang return it;
4149a9643ea8Slogwang }
4150a9643ea8Slogwang }
4151a9643ea8Slogwang return NULL;
4152a9643ea8Slogwang }
4153a9643ea8Slogwang
41542bfe3f2eSlogwang /* inject a igb 5-tuple filter to HW */
41552bfe3f2eSlogwang static inline void
igb_inject_5tuple_filter_82576(struct rte_eth_dev * dev,struct e1000_5tuple_filter * filter)41562bfe3f2eSlogwang igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
41572bfe3f2eSlogwang struct e1000_5tuple_filter *filter)
41582bfe3f2eSlogwang {
41592bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
41602bfe3f2eSlogwang uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
41612bfe3f2eSlogwang uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
41622bfe3f2eSlogwang uint8_t i;
41632bfe3f2eSlogwang
41642bfe3f2eSlogwang i = filter->index;
41652bfe3f2eSlogwang ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
41662bfe3f2eSlogwang if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
41672bfe3f2eSlogwang ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
41682bfe3f2eSlogwang if (filter->filter_info.dst_ip_mask == 0)
41692bfe3f2eSlogwang ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
41702bfe3f2eSlogwang if (filter->filter_info.src_port_mask == 0)
41712bfe3f2eSlogwang ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
41722bfe3f2eSlogwang if (filter->filter_info.proto_mask == 0)
41732bfe3f2eSlogwang ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
41742bfe3f2eSlogwang ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
41752bfe3f2eSlogwang E1000_FTQF_QUEUE_MASK;
41762bfe3f2eSlogwang ftqf |= E1000_FTQF_QUEUE_ENABLE;
41772bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
41782bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
41792bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
41802bfe3f2eSlogwang
41812bfe3f2eSlogwang spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
41822bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
41832bfe3f2eSlogwang
41842bfe3f2eSlogwang imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
41852bfe3f2eSlogwang if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
41862bfe3f2eSlogwang imir |= E1000_IMIR_PORT_BP;
41872bfe3f2eSlogwang else
41882bfe3f2eSlogwang imir &= ~E1000_IMIR_PORT_BP;
41892bfe3f2eSlogwang imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
41902bfe3f2eSlogwang
41912bfe3f2eSlogwang /* tcp flags bits setting. */
41924418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
41934418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
41942bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_URG;
41954418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
41962bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_ACK;
41974418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG)
41982bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_PSH;
41994418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG)
42002bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_RST;
42014418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG)
42022bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_SYN;
42034418919fSjohnjiang if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG)
42042bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_FIN;
42052bfe3f2eSlogwang } else {
42062bfe3f2eSlogwang imir_ext |= E1000_IMIREXT_CTRL_BP;
42072bfe3f2eSlogwang }
42082bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
42092bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
42102bfe3f2eSlogwang }
42112bfe3f2eSlogwang
4212a9643ea8Slogwang /*
4213a9643ea8Slogwang * igb_add_5tuple_filter_82576 - add a 5tuple filter
4214a9643ea8Slogwang *
4215a9643ea8Slogwang * @param
4216a9643ea8Slogwang * dev: Pointer to struct rte_eth_dev.
4217a9643ea8Slogwang * ntuple_filter: ponter to the filter that will be added.
4218a9643ea8Slogwang *
4219a9643ea8Slogwang * @return
4220a9643ea8Slogwang * - On success, zero.
4221a9643ea8Slogwang * - On failure, a negative value.
4222a9643ea8Slogwang */
4223a9643ea8Slogwang static int
igb_add_5tuple_filter_82576(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter)4224a9643ea8Slogwang igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
4225a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter)
4226a9643ea8Slogwang {
4227a9643ea8Slogwang struct e1000_filter_info *filter_info =
4228a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4229a9643ea8Slogwang struct e1000_5tuple_filter *filter;
4230a9643ea8Slogwang uint8_t i;
4231a9643ea8Slogwang int ret;
4232a9643ea8Slogwang
4233a9643ea8Slogwang filter = rte_zmalloc("e1000_5tuple_filter",
4234a9643ea8Slogwang sizeof(struct e1000_5tuple_filter), 0);
4235a9643ea8Slogwang if (filter == NULL)
4236a9643ea8Slogwang return -ENOMEM;
4237a9643ea8Slogwang
4238a9643ea8Slogwang ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4239a9643ea8Slogwang &filter->filter_info);
4240a9643ea8Slogwang if (ret < 0) {
4241a9643ea8Slogwang rte_free(filter);
4242a9643ea8Slogwang return ret;
4243a9643ea8Slogwang }
4244a9643ea8Slogwang
4245a9643ea8Slogwang if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4246a9643ea8Slogwang &filter->filter_info) != NULL) {
4247a9643ea8Slogwang PMD_DRV_LOG(ERR, "filter exists.");
4248a9643ea8Slogwang rte_free(filter);
4249a9643ea8Slogwang return -EEXIST;
4250a9643ea8Slogwang }
4251a9643ea8Slogwang filter->queue = ntuple_filter->queue;
4252a9643ea8Slogwang
4253a9643ea8Slogwang /*
4254a9643ea8Slogwang * look for an unused 5tuple filter index,
4255a9643ea8Slogwang * and insert the filter to list.
4256a9643ea8Slogwang */
4257a9643ea8Slogwang for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
4258a9643ea8Slogwang if (!(filter_info->fivetuple_mask & (1 << i))) {
4259a9643ea8Slogwang filter_info->fivetuple_mask |= 1 << i;
4260a9643ea8Slogwang filter->index = i;
4261a9643ea8Slogwang TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4262a9643ea8Slogwang filter,
4263a9643ea8Slogwang entries);
4264a9643ea8Slogwang break;
4265a9643ea8Slogwang }
4266a9643ea8Slogwang }
4267a9643ea8Slogwang if (i >= E1000_MAX_FTQF_FILTERS) {
4268a9643ea8Slogwang PMD_DRV_LOG(ERR, "5tuple filters are full.");
4269a9643ea8Slogwang rte_free(filter);
4270a9643ea8Slogwang return -ENOSYS;
4271a9643ea8Slogwang }
4272a9643ea8Slogwang
42732bfe3f2eSlogwang igb_inject_5tuple_filter_82576(dev, filter);
42742bfe3f2eSlogwang return 0;
42752bfe3f2eSlogwang }
4276a9643ea8Slogwang
42772bfe3f2eSlogwang int
igb_delete_5tuple_filter_82576(struct rte_eth_dev * dev,struct e1000_5tuple_filter * filter)42782bfe3f2eSlogwang igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
42792bfe3f2eSlogwang struct e1000_5tuple_filter *filter)
42802bfe3f2eSlogwang {
42812bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
42822bfe3f2eSlogwang struct e1000_filter_info *filter_info =
42832bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4284a9643ea8Slogwang
42852bfe3f2eSlogwang filter_info->fivetuple_mask &= ~(1 << filter->index);
42862bfe3f2eSlogwang TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
42872bfe3f2eSlogwang rte_free(filter);
4288a9643ea8Slogwang
42892bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
42902bfe3f2eSlogwang E1000_FTQF_VF_BP | E1000_FTQF_MASK);
42912bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
42922bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
42932bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
42942bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
42952bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
4296a9643ea8Slogwang return 0;
4297a9643ea8Slogwang }
4298a9643ea8Slogwang
4299a9643ea8Slogwang /*
4300a9643ea8Slogwang * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
4301a9643ea8Slogwang *
4302a9643ea8Slogwang * @param
4303a9643ea8Slogwang * dev: Pointer to struct rte_eth_dev.
4304a9643ea8Slogwang * ntuple_filter: ponter to the filter that will be removed.
4305a9643ea8Slogwang *
4306a9643ea8Slogwang * @return
4307a9643ea8Slogwang * - On success, zero.
4308a9643ea8Slogwang * - On failure, a negative value.
4309a9643ea8Slogwang */
4310a9643ea8Slogwang static int
igb_remove_5tuple_filter_82576(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter)4311a9643ea8Slogwang igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
4312a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter)
4313a9643ea8Slogwang {
4314a9643ea8Slogwang struct e1000_filter_info *filter_info =
4315a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4316a9643ea8Slogwang struct e1000_5tuple_filter_info filter_5tuple;
4317a9643ea8Slogwang struct e1000_5tuple_filter *filter;
4318a9643ea8Slogwang int ret;
4319a9643ea8Slogwang
4320a9643ea8Slogwang memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
4321a9643ea8Slogwang ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4322a9643ea8Slogwang &filter_5tuple);
4323a9643ea8Slogwang if (ret < 0)
4324a9643ea8Slogwang return ret;
4325a9643ea8Slogwang
4326a9643ea8Slogwang filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4327a9643ea8Slogwang &filter_5tuple);
4328a9643ea8Slogwang if (filter == NULL) {
4329a9643ea8Slogwang PMD_DRV_LOG(ERR, "filter doesn't exist.");
4330a9643ea8Slogwang return -ENOENT;
4331a9643ea8Slogwang }
4332a9643ea8Slogwang
43332bfe3f2eSlogwang igb_delete_5tuple_filter_82576(dev, filter);
4334a9643ea8Slogwang
4335a9643ea8Slogwang return 0;
4336a9643ea8Slogwang }
4337a9643ea8Slogwang
4338a9643ea8Slogwang static int
eth_igb_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)4339a9643ea8Slogwang eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4340a9643ea8Slogwang {
4341a9643ea8Slogwang uint32_t rctl;
4342a9643ea8Slogwang struct e1000_hw *hw;
4343a9643ea8Slogwang struct rte_eth_dev_info dev_info;
43444418919fSjohnjiang uint32_t frame_size = mtu + E1000_ETH_OVERHEAD;
43454418919fSjohnjiang int ret;
4346a9643ea8Slogwang
4347a9643ea8Slogwang hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4348a9643ea8Slogwang
4349a9643ea8Slogwang #ifdef RTE_LIBRTE_82571_SUPPORT
4350a9643ea8Slogwang /* XXX: not bigger than max_rx_pktlen */
4351a9643ea8Slogwang if (hw->mac.type == e1000_82571)
4352a9643ea8Slogwang return -ENOTSUP;
4353a9643ea8Slogwang #endif
43544418919fSjohnjiang ret = eth_igb_infos_get(dev, &dev_info);
43554418919fSjohnjiang if (ret != 0)
43564418919fSjohnjiang return ret;
4357a9643ea8Slogwang
4358a9643ea8Slogwang /* check that mtu is within the allowed range */
43594418919fSjohnjiang if (mtu < RTE_ETHER_MIN_MTU ||
43604418919fSjohnjiang frame_size > dev_info.max_rx_pktlen)
4361a9643ea8Slogwang return -EINVAL;
4362a9643ea8Slogwang
4363a9643ea8Slogwang /* refuse mtu that requires the support of scattered packets when this
4364a9643ea8Slogwang * feature has not been enabled before. */
4365a9643ea8Slogwang if (!dev->data->scattered_rx &&
4366a9643ea8Slogwang frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
4367a9643ea8Slogwang return -EINVAL;
4368a9643ea8Slogwang
4369a9643ea8Slogwang rctl = E1000_READ_REG(hw, E1000_RCTL);
4370a9643ea8Slogwang
4371a9643ea8Slogwang /* switch to jumbo mode if needed */
43724418919fSjohnjiang if (frame_size > RTE_ETHER_MAX_LEN) {
4373d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.offloads |=
4374d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME;
4375a9643ea8Slogwang rctl |= E1000_RCTL_LPE;
4376a9643ea8Slogwang } else {
4377d30ea906Sjfb8856606 dev->data->dev_conf.rxmode.offloads &=
4378d30ea906Sjfb8856606 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
4379a9643ea8Slogwang rctl &= ~E1000_RCTL_LPE;
4380a9643ea8Slogwang }
4381a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4382a9643ea8Slogwang
4383a9643ea8Slogwang /* update max frame size */
4384a9643ea8Slogwang dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4385a9643ea8Slogwang
4386a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_RLPML,
4387a9643ea8Slogwang dev->data->dev_conf.rxmode.max_rx_pkt_len);
4388a9643ea8Slogwang
4389a9643ea8Slogwang return 0;
4390a9643ea8Slogwang }
4391a9643ea8Slogwang
4392a9643ea8Slogwang /*
4393a9643ea8Slogwang * igb_add_del_ntuple_filter - add or delete a ntuple filter
4394a9643ea8Slogwang *
4395a9643ea8Slogwang * @param
4396a9643ea8Slogwang * dev: Pointer to struct rte_eth_dev.
4397a9643ea8Slogwang * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4398a9643ea8Slogwang * add: if true, add filter, if false, remove filter
4399a9643ea8Slogwang *
4400a9643ea8Slogwang * @return
4401a9643ea8Slogwang * - On success, zero.
4402a9643ea8Slogwang * - On failure, a negative value.
4403a9643ea8Slogwang */
44042bfe3f2eSlogwang int
igb_add_del_ntuple_filter(struct rte_eth_dev * dev,struct rte_eth_ntuple_filter * ntuple_filter,bool add)4405a9643ea8Slogwang igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
4406a9643ea8Slogwang struct rte_eth_ntuple_filter *ntuple_filter,
4407a9643ea8Slogwang bool add)
4408a9643ea8Slogwang {
4409a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4410a9643ea8Slogwang int ret;
4411a9643ea8Slogwang
4412a9643ea8Slogwang switch (ntuple_filter->flags) {
4413a9643ea8Slogwang case RTE_5TUPLE_FLAGS:
4414a9643ea8Slogwang case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4415a9643ea8Slogwang if (hw->mac.type != e1000_82576)
4416a9643ea8Slogwang return -ENOTSUP;
4417a9643ea8Slogwang if (add)
4418a9643ea8Slogwang ret = igb_add_5tuple_filter_82576(dev,
4419a9643ea8Slogwang ntuple_filter);
4420a9643ea8Slogwang else
4421a9643ea8Slogwang ret = igb_remove_5tuple_filter_82576(dev,
4422a9643ea8Slogwang ntuple_filter);
4423a9643ea8Slogwang break;
4424a9643ea8Slogwang case RTE_2TUPLE_FLAGS:
4425a9643ea8Slogwang case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
44262bfe3f2eSlogwang if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
44272bfe3f2eSlogwang hw->mac.type != e1000_i210 &&
44282bfe3f2eSlogwang hw->mac.type != e1000_i211)
4429a9643ea8Slogwang return -ENOTSUP;
4430a9643ea8Slogwang if (add)
4431a9643ea8Slogwang ret = igb_add_2tuple_filter(dev, ntuple_filter);
4432a9643ea8Slogwang else
4433a9643ea8Slogwang ret = igb_remove_2tuple_filter(dev, ntuple_filter);
4434a9643ea8Slogwang break;
4435a9643ea8Slogwang default:
4436a9643ea8Slogwang ret = -EINVAL;
4437a9643ea8Slogwang break;
4438a9643ea8Slogwang }
4439a9643ea8Slogwang
4440a9643ea8Slogwang return ret;
4441a9643ea8Slogwang }
4442a9643ea8Slogwang
4443a9643ea8Slogwang static inline int
igb_ethertype_filter_lookup(struct e1000_filter_info * filter_info,uint16_t ethertype)4444a9643ea8Slogwang igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4445a9643ea8Slogwang uint16_t ethertype)
4446a9643ea8Slogwang {
4447a9643ea8Slogwang int i;
4448a9643ea8Slogwang
4449a9643ea8Slogwang for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
44502bfe3f2eSlogwang if (filter_info->ethertype_filters[i].ethertype == ethertype &&
4451a9643ea8Slogwang (filter_info->ethertype_mask & (1 << i)))
4452a9643ea8Slogwang return i;
4453a9643ea8Slogwang }
4454a9643ea8Slogwang return -1;
4455a9643ea8Slogwang }
4456a9643ea8Slogwang
4457a9643ea8Slogwang static inline int
igb_ethertype_filter_insert(struct e1000_filter_info * filter_info,uint16_t ethertype,uint32_t etqf)4458a9643ea8Slogwang igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
44592bfe3f2eSlogwang uint16_t ethertype, uint32_t etqf)
4460a9643ea8Slogwang {
4461a9643ea8Slogwang int i;
4462a9643ea8Slogwang
4463a9643ea8Slogwang for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4464a9643ea8Slogwang if (!(filter_info->ethertype_mask & (1 << i))) {
4465a9643ea8Slogwang filter_info->ethertype_mask |= 1 << i;
44662bfe3f2eSlogwang filter_info->ethertype_filters[i].ethertype = ethertype;
44672bfe3f2eSlogwang filter_info->ethertype_filters[i].etqf = etqf;
4468a9643ea8Slogwang return i;
4469a9643ea8Slogwang }
4470a9643ea8Slogwang }
4471a9643ea8Slogwang return -1;
4472a9643ea8Slogwang }
4473a9643ea8Slogwang
44742bfe3f2eSlogwang int
igb_ethertype_filter_remove(struct e1000_filter_info * filter_info,uint8_t idx)4475a9643ea8Slogwang igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4476a9643ea8Slogwang uint8_t idx)
4477a9643ea8Slogwang {
4478a9643ea8Slogwang if (idx >= E1000_MAX_ETQF_FILTERS)
4479a9643ea8Slogwang return -1;
4480a9643ea8Slogwang filter_info->ethertype_mask &= ~(1 << idx);
44812bfe3f2eSlogwang filter_info->ethertype_filters[idx].ethertype = 0;
44822bfe3f2eSlogwang filter_info->ethertype_filters[idx].etqf = 0;
4483a9643ea8Slogwang return idx;
4484a9643ea8Slogwang }
4485a9643ea8Slogwang
4486a9643ea8Slogwang
44872bfe3f2eSlogwang int
igb_add_del_ethertype_filter(struct rte_eth_dev * dev,struct rte_eth_ethertype_filter * filter,bool add)4488a9643ea8Slogwang igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4489a9643ea8Slogwang struct rte_eth_ethertype_filter *filter,
4490a9643ea8Slogwang bool add)
4491a9643ea8Slogwang {
4492a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4493a9643ea8Slogwang struct e1000_filter_info *filter_info =
4494a9643ea8Slogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4495a9643ea8Slogwang uint32_t etqf = 0;
4496a9643ea8Slogwang int ret;
4497a9643ea8Slogwang
44984418919fSjohnjiang if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
44994418919fSjohnjiang filter->ether_type == RTE_ETHER_TYPE_IPV6) {
4500a9643ea8Slogwang PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4501a9643ea8Slogwang " ethertype filter.", filter->ether_type);
4502a9643ea8Slogwang return -EINVAL;
4503a9643ea8Slogwang }
4504a9643ea8Slogwang
4505a9643ea8Slogwang if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4506a9643ea8Slogwang PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4507a9643ea8Slogwang return -EINVAL;
4508a9643ea8Slogwang }
4509a9643ea8Slogwang if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4510a9643ea8Slogwang PMD_DRV_LOG(ERR, "drop option is unsupported.");
4511a9643ea8Slogwang return -EINVAL;
4512a9643ea8Slogwang }
4513a9643ea8Slogwang
4514a9643ea8Slogwang ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4515a9643ea8Slogwang if (ret >= 0 && add) {
4516a9643ea8Slogwang PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4517a9643ea8Slogwang filter->ether_type);
4518a9643ea8Slogwang return -EEXIST;
4519a9643ea8Slogwang }
4520a9643ea8Slogwang if (ret < 0 && !add) {
4521a9643ea8Slogwang PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4522a9643ea8Slogwang filter->ether_type);
4523a9643ea8Slogwang return -ENOENT;
4524a9643ea8Slogwang }
4525a9643ea8Slogwang
4526a9643ea8Slogwang if (add) {
45272bfe3f2eSlogwang etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
45282bfe3f2eSlogwang etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
45292bfe3f2eSlogwang etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4530a9643ea8Slogwang ret = igb_ethertype_filter_insert(filter_info,
45312bfe3f2eSlogwang filter->ether_type, etqf);
4532a9643ea8Slogwang if (ret < 0) {
4533a9643ea8Slogwang PMD_DRV_LOG(ERR, "ethertype filters are full.");
4534a9643ea8Slogwang return -ENOSYS;
4535a9643ea8Slogwang }
4536a9643ea8Slogwang } else {
4537a9643ea8Slogwang ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4538a9643ea8Slogwang if (ret < 0)
4539a9643ea8Slogwang return -ENOSYS;
4540a9643ea8Slogwang }
4541a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4542a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
4543a9643ea8Slogwang
4544a9643ea8Slogwang return 0;
4545a9643ea8Slogwang }
4546a9643ea8Slogwang
4547a9643ea8Slogwang static int
eth_igb_filter_ctrl(struct rte_eth_dev * dev __rte_unused,enum rte_filter_type filter_type,enum rte_filter_op filter_op,void * arg)4548*2d9fd380Sjfb8856606 eth_igb_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
4549a9643ea8Slogwang enum rte_filter_type filter_type,
4550a9643ea8Slogwang enum rte_filter_op filter_op,
4551a9643ea8Slogwang void *arg)
4552a9643ea8Slogwang {
45532bfe3f2eSlogwang int ret = 0;
4554a9643ea8Slogwang
4555a9643ea8Slogwang switch (filter_type) {
45562bfe3f2eSlogwang case RTE_ETH_FILTER_GENERIC:
45572bfe3f2eSlogwang if (filter_op != RTE_ETH_FILTER_GET)
45582bfe3f2eSlogwang return -EINVAL;
45592bfe3f2eSlogwang *(const void **)arg = &igb_flow_ops;
45602bfe3f2eSlogwang break;
4561a9643ea8Slogwang default:
4562a9643ea8Slogwang PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4563a9643ea8Slogwang filter_type);
4564a9643ea8Slogwang break;
4565a9643ea8Slogwang }
4566a9643ea8Slogwang
4567a9643ea8Slogwang return ret;
4568a9643ea8Slogwang }
4569a9643ea8Slogwang
4570a9643ea8Slogwang static int
eth_igb_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)4571a9643ea8Slogwang eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
45724418919fSjohnjiang struct rte_ether_addr *mc_addr_set,
4573a9643ea8Slogwang uint32_t nb_mc_addr)
4574a9643ea8Slogwang {
4575a9643ea8Slogwang struct e1000_hw *hw;
4576a9643ea8Slogwang
4577a9643ea8Slogwang hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4578a9643ea8Slogwang e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4579a9643ea8Slogwang return 0;
4580a9643ea8Slogwang }
4581a9643ea8Slogwang
4582a9643ea8Slogwang static uint64_t
igb_read_systime_cyclecounter(struct rte_eth_dev * dev)4583a9643ea8Slogwang igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4584a9643ea8Slogwang {
4585a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4586a9643ea8Slogwang uint64_t systime_cycles;
4587a9643ea8Slogwang
4588a9643ea8Slogwang switch (hw->mac.type) {
4589a9643ea8Slogwang case e1000_i210:
4590a9643ea8Slogwang case e1000_i211:
4591a9643ea8Slogwang /*
4592a9643ea8Slogwang * Need to read System Time Residue Register to be able
4593a9643ea8Slogwang * to read the other two registers.
4594a9643ea8Slogwang */
4595a9643ea8Slogwang E1000_READ_REG(hw, E1000_SYSTIMR);
4596a9643ea8Slogwang /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4597a9643ea8Slogwang systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4598a9643ea8Slogwang systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4599a9643ea8Slogwang * NSEC_PER_SEC;
4600a9643ea8Slogwang break;
4601a9643ea8Slogwang case e1000_82580:
4602a9643ea8Slogwang case e1000_i350:
4603a9643ea8Slogwang case e1000_i354:
4604a9643ea8Slogwang /*
4605a9643ea8Slogwang * Need to read System Time Residue Register to be able
4606a9643ea8Slogwang * to read the other two registers.
4607a9643ea8Slogwang */
4608a9643ea8Slogwang E1000_READ_REG(hw, E1000_SYSTIMR);
4609a9643ea8Slogwang systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4610a9643ea8Slogwang /* Only the 8 LSB are valid. */
4611a9643ea8Slogwang systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4612a9643ea8Slogwang & 0xff) << 32;
4613a9643ea8Slogwang break;
4614a9643ea8Slogwang default:
4615a9643ea8Slogwang systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4616a9643ea8Slogwang systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4617a9643ea8Slogwang << 32;
4618a9643ea8Slogwang break;
4619a9643ea8Slogwang }
4620a9643ea8Slogwang
4621a9643ea8Slogwang return systime_cycles;
4622a9643ea8Slogwang }
4623a9643ea8Slogwang
4624a9643ea8Slogwang static uint64_t
igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev * dev)4625a9643ea8Slogwang igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4626a9643ea8Slogwang {
4627a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4628a9643ea8Slogwang uint64_t rx_tstamp_cycles;
4629a9643ea8Slogwang
4630a9643ea8Slogwang switch (hw->mac.type) {
4631a9643ea8Slogwang case e1000_i210:
4632a9643ea8Slogwang case e1000_i211:
4633a9643ea8Slogwang /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4634a9643ea8Slogwang rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4635a9643ea8Slogwang rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4636a9643ea8Slogwang * NSEC_PER_SEC;
4637a9643ea8Slogwang break;
4638a9643ea8Slogwang case e1000_82580:
4639a9643ea8Slogwang case e1000_i350:
4640a9643ea8Slogwang case e1000_i354:
4641a9643ea8Slogwang rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4642a9643ea8Slogwang /* Only the 8 LSB are valid. */
4643a9643ea8Slogwang rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4644a9643ea8Slogwang & 0xff) << 32;
4645a9643ea8Slogwang break;
4646a9643ea8Slogwang default:
4647a9643ea8Slogwang rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4648a9643ea8Slogwang rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4649a9643ea8Slogwang << 32;
4650a9643ea8Slogwang break;
4651a9643ea8Slogwang }
4652a9643ea8Slogwang
4653a9643ea8Slogwang return rx_tstamp_cycles;
4654a9643ea8Slogwang }
4655a9643ea8Slogwang
4656a9643ea8Slogwang static uint64_t
igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev * dev)4657a9643ea8Slogwang igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4658a9643ea8Slogwang {
4659a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4660a9643ea8Slogwang uint64_t tx_tstamp_cycles;
4661a9643ea8Slogwang
4662a9643ea8Slogwang switch (hw->mac.type) {
4663a9643ea8Slogwang case e1000_i210:
4664a9643ea8Slogwang case e1000_i211:
4665a9643ea8Slogwang /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4666a9643ea8Slogwang tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4667a9643ea8Slogwang tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4668a9643ea8Slogwang * NSEC_PER_SEC;
4669a9643ea8Slogwang break;
4670a9643ea8Slogwang case e1000_82580:
4671a9643ea8Slogwang case e1000_i350:
4672a9643ea8Slogwang case e1000_i354:
4673a9643ea8Slogwang tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4674a9643ea8Slogwang /* Only the 8 LSB are valid. */
4675a9643ea8Slogwang tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4676a9643ea8Slogwang & 0xff) << 32;
4677a9643ea8Slogwang break;
4678a9643ea8Slogwang default:
4679a9643ea8Slogwang tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4680a9643ea8Slogwang tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4681a9643ea8Slogwang << 32;
4682a9643ea8Slogwang break;
4683a9643ea8Slogwang }
4684a9643ea8Slogwang
4685a9643ea8Slogwang return tx_tstamp_cycles;
4686a9643ea8Slogwang }
4687a9643ea8Slogwang
4688a9643ea8Slogwang static void
igb_start_timecounters(struct rte_eth_dev * dev)4689a9643ea8Slogwang igb_start_timecounters(struct rte_eth_dev *dev)
4690a9643ea8Slogwang {
4691a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
46924b05018fSfengbojiang struct e1000_adapter *adapter = dev->data->dev_private;
4693a9643ea8Slogwang uint32_t incval = 1;
4694a9643ea8Slogwang uint32_t shift = 0;
4695a9643ea8Slogwang uint64_t mask = E1000_CYCLECOUNTER_MASK;
4696a9643ea8Slogwang
4697a9643ea8Slogwang switch (hw->mac.type) {
4698a9643ea8Slogwang case e1000_82580:
4699a9643ea8Slogwang case e1000_i350:
4700a9643ea8Slogwang case e1000_i354:
4701a9643ea8Slogwang /* 32 LSB bits + 8 MSB bits = 40 bits */
4702a9643ea8Slogwang mask = (1ULL << 40) - 1;
4703a9643ea8Slogwang /* fall-through */
4704a9643ea8Slogwang case e1000_i210:
4705a9643ea8Slogwang case e1000_i211:
4706a9643ea8Slogwang /*
4707a9643ea8Slogwang * Start incrementing the register
4708a9643ea8Slogwang * used to timestamp PTP packets.
4709a9643ea8Slogwang */
4710a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4711a9643ea8Slogwang break;
4712a9643ea8Slogwang case e1000_82576:
4713a9643ea8Slogwang incval = E1000_INCVALUE_82576;
4714a9643ea8Slogwang shift = IGB_82576_TSYNC_SHIFT;
4715a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TIMINCA,
4716a9643ea8Slogwang E1000_INCPERIOD_82576 | incval);
4717a9643ea8Slogwang break;
4718a9643ea8Slogwang default:
4719a9643ea8Slogwang /* Not supported */
4720a9643ea8Slogwang return;
4721a9643ea8Slogwang }
4722a9643ea8Slogwang
4723a9643ea8Slogwang memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
4724a9643ea8Slogwang memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4725a9643ea8Slogwang memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
4726a9643ea8Slogwang
4727a9643ea8Slogwang adapter->systime_tc.cc_mask = mask;
4728a9643ea8Slogwang adapter->systime_tc.cc_shift = shift;
4729a9643ea8Slogwang adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
4730a9643ea8Slogwang
4731a9643ea8Slogwang adapter->rx_tstamp_tc.cc_mask = mask;
4732a9643ea8Slogwang adapter->rx_tstamp_tc.cc_shift = shift;
4733a9643ea8Slogwang adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4734a9643ea8Slogwang
4735a9643ea8Slogwang adapter->tx_tstamp_tc.cc_mask = mask;
4736a9643ea8Slogwang adapter->tx_tstamp_tc.cc_shift = shift;
4737a9643ea8Slogwang adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
4738a9643ea8Slogwang }
4739a9643ea8Slogwang
4740a9643ea8Slogwang static int
igb_timesync_adjust_time(struct rte_eth_dev * dev,int64_t delta)4741a9643ea8Slogwang igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
4742a9643ea8Slogwang {
47434b05018fSfengbojiang struct e1000_adapter *adapter = dev->data->dev_private;
4744a9643ea8Slogwang
4745a9643ea8Slogwang adapter->systime_tc.nsec += delta;
4746a9643ea8Slogwang adapter->rx_tstamp_tc.nsec += delta;
4747a9643ea8Slogwang adapter->tx_tstamp_tc.nsec += delta;
4748a9643ea8Slogwang
4749a9643ea8Slogwang return 0;
4750a9643ea8Slogwang }
4751a9643ea8Slogwang
4752a9643ea8Slogwang static int
igb_timesync_write_time(struct rte_eth_dev * dev,const struct timespec * ts)4753a9643ea8Slogwang igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
4754a9643ea8Slogwang {
4755a9643ea8Slogwang uint64_t ns;
47564b05018fSfengbojiang struct e1000_adapter *adapter = dev->data->dev_private;
4757a9643ea8Slogwang
4758a9643ea8Slogwang ns = rte_timespec_to_ns(ts);
4759a9643ea8Slogwang
4760a9643ea8Slogwang /* Set the timecounters to a new value. */
4761a9643ea8Slogwang adapter->systime_tc.nsec = ns;
4762a9643ea8Slogwang adapter->rx_tstamp_tc.nsec = ns;
4763a9643ea8Slogwang adapter->tx_tstamp_tc.nsec = ns;
4764a9643ea8Slogwang
4765a9643ea8Slogwang return 0;
4766a9643ea8Slogwang }
4767a9643ea8Slogwang
4768a9643ea8Slogwang static int
igb_timesync_read_time(struct rte_eth_dev * dev,struct timespec * ts)4769a9643ea8Slogwang igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
4770a9643ea8Slogwang {
4771a9643ea8Slogwang uint64_t ns, systime_cycles;
47724b05018fSfengbojiang struct e1000_adapter *adapter = dev->data->dev_private;
4773a9643ea8Slogwang
4774a9643ea8Slogwang systime_cycles = igb_read_systime_cyclecounter(dev);
4775a9643ea8Slogwang ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
4776a9643ea8Slogwang *ts = rte_ns_to_timespec(ns);
4777a9643ea8Slogwang
4778a9643ea8Slogwang return 0;
4779a9643ea8Slogwang }
4780a9643ea8Slogwang
4781a9643ea8Slogwang static int
igb_timesync_enable(struct rte_eth_dev * dev)4782a9643ea8Slogwang igb_timesync_enable(struct rte_eth_dev *dev)
4783a9643ea8Slogwang {
4784a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4785a9643ea8Slogwang uint32_t tsync_ctl;
4786a9643ea8Slogwang uint32_t tsauxc;
4787a9643ea8Slogwang
4788a9643ea8Slogwang /* Stop the timesync system time. */
4789a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
4790a9643ea8Slogwang /* Reset the timesync system time value. */
4791a9643ea8Slogwang switch (hw->mac.type) {
4792a9643ea8Slogwang case e1000_82580:
4793a9643ea8Slogwang case e1000_i350:
4794a9643ea8Slogwang case e1000_i354:
4795a9643ea8Slogwang case e1000_i210:
4796a9643ea8Slogwang case e1000_i211:
4797a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
4798a9643ea8Slogwang /* fall-through */
4799a9643ea8Slogwang case e1000_82576:
4800a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
4801a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
4802a9643ea8Slogwang break;
4803a9643ea8Slogwang default:
4804a9643ea8Slogwang /* Not supported. */
4805a9643ea8Slogwang return -ENOTSUP;
4806a9643ea8Slogwang }
4807a9643ea8Slogwang
4808a9643ea8Slogwang /* Enable system time for it isn't on by default. */
4809a9643ea8Slogwang tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
4810a9643ea8Slogwang tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
4811a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
4812a9643ea8Slogwang
4813a9643ea8Slogwang igb_start_timecounters(dev);
4814a9643ea8Slogwang
4815a9643ea8Slogwang /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4816a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
48174418919fSjohnjiang (RTE_ETHER_TYPE_1588 |
4818a9643ea8Slogwang E1000_ETQF_FILTER_ENABLE |
4819a9643ea8Slogwang E1000_ETQF_1588));
4820a9643ea8Slogwang
4821a9643ea8Slogwang /* Enable timestamping of received PTP packets. */
4822a9643ea8Slogwang tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4823a9643ea8Slogwang tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
4824a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4825a9643ea8Slogwang
4826a9643ea8Slogwang /* Enable Timestamping of transmitted PTP packets. */
4827a9643ea8Slogwang tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4828a9643ea8Slogwang tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
4829a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4830a9643ea8Slogwang
4831a9643ea8Slogwang return 0;
4832a9643ea8Slogwang }
4833a9643ea8Slogwang
4834a9643ea8Slogwang static int
igb_timesync_disable(struct rte_eth_dev * dev)4835a9643ea8Slogwang igb_timesync_disable(struct rte_eth_dev *dev)
4836a9643ea8Slogwang {
4837a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4838a9643ea8Slogwang uint32_t tsync_ctl;
4839a9643ea8Slogwang
4840a9643ea8Slogwang /* Disable timestamping of transmitted PTP packets. */
4841a9643ea8Slogwang tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4842a9643ea8Slogwang tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
4843a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
4844a9643ea8Slogwang
4845a9643ea8Slogwang /* Disable timestamping of received PTP packets. */
4846a9643ea8Slogwang tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4847a9643ea8Slogwang tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
4848a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
4849a9643ea8Slogwang
4850a9643ea8Slogwang /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
4851a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
4852a9643ea8Slogwang
4853a9643ea8Slogwang /* Stop incrementating the System Time registers. */
4854a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
4855a9643ea8Slogwang
4856a9643ea8Slogwang return 0;
4857a9643ea8Slogwang }
4858a9643ea8Slogwang
4859a9643ea8Slogwang static int
igb_timesync_read_rx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp,uint32_t flags __rte_unused)4860a9643ea8Slogwang igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
4861a9643ea8Slogwang struct timespec *timestamp,
4862a9643ea8Slogwang uint32_t flags __rte_unused)
4863a9643ea8Slogwang {
4864a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
48654b05018fSfengbojiang struct e1000_adapter *adapter = dev->data->dev_private;
4866a9643ea8Slogwang uint32_t tsync_rxctl;
4867a9643ea8Slogwang uint64_t rx_tstamp_cycles;
4868a9643ea8Slogwang uint64_t ns;
4869a9643ea8Slogwang
4870a9643ea8Slogwang tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
4871a9643ea8Slogwang if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
4872a9643ea8Slogwang return -EINVAL;
4873a9643ea8Slogwang
4874a9643ea8Slogwang rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
4875a9643ea8Slogwang ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
4876a9643ea8Slogwang *timestamp = rte_ns_to_timespec(ns);
4877a9643ea8Slogwang
4878a9643ea8Slogwang return 0;
4879a9643ea8Slogwang }
4880a9643ea8Slogwang
4881a9643ea8Slogwang static int
igb_timesync_read_tx_timestamp(struct rte_eth_dev * dev,struct timespec * timestamp)4882a9643ea8Slogwang igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
4883a9643ea8Slogwang struct timespec *timestamp)
4884a9643ea8Slogwang {
4885a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
48864b05018fSfengbojiang struct e1000_adapter *adapter = dev->data->dev_private;
4887a9643ea8Slogwang uint32_t tsync_txctl;
4888a9643ea8Slogwang uint64_t tx_tstamp_cycles;
4889a9643ea8Slogwang uint64_t ns;
4890a9643ea8Slogwang
4891a9643ea8Slogwang tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
4892a9643ea8Slogwang if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
4893a9643ea8Slogwang return -EINVAL;
4894a9643ea8Slogwang
4895a9643ea8Slogwang tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
4896a9643ea8Slogwang ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
4897a9643ea8Slogwang *timestamp = rte_ns_to_timespec(ns);
4898a9643ea8Slogwang
4899a9643ea8Slogwang return 0;
4900a9643ea8Slogwang }
4901a9643ea8Slogwang
4902a9643ea8Slogwang static int
eth_igb_get_reg_length(struct rte_eth_dev * dev __rte_unused)4903a9643ea8Slogwang eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4904a9643ea8Slogwang {
4905a9643ea8Slogwang int count = 0;
4906a9643ea8Slogwang int g_ind = 0;
4907a9643ea8Slogwang const struct reg_info *reg_group;
4908a9643ea8Slogwang
4909a9643ea8Slogwang while ((reg_group = igb_regs[g_ind++]))
4910a9643ea8Slogwang count += igb_reg_group_count(reg_group);
4911a9643ea8Slogwang
4912a9643ea8Slogwang return count;
4913a9643ea8Slogwang }
4914a9643ea8Slogwang
4915a9643ea8Slogwang static int
igbvf_get_reg_length(struct rte_eth_dev * dev __rte_unused)4916a9643ea8Slogwang igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
4917a9643ea8Slogwang {
4918a9643ea8Slogwang int count = 0;
4919a9643ea8Slogwang int g_ind = 0;
4920a9643ea8Slogwang const struct reg_info *reg_group;
4921a9643ea8Slogwang
4922a9643ea8Slogwang while ((reg_group = igbvf_regs[g_ind++]))
4923a9643ea8Slogwang count += igb_reg_group_count(reg_group);
4924a9643ea8Slogwang
4925a9643ea8Slogwang return count;
4926a9643ea8Slogwang }
4927a9643ea8Slogwang
4928a9643ea8Slogwang static int
eth_igb_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)4929a9643ea8Slogwang eth_igb_get_regs(struct rte_eth_dev *dev,
4930a9643ea8Slogwang struct rte_dev_reg_info *regs)
4931a9643ea8Slogwang {
4932a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4933a9643ea8Slogwang uint32_t *data = regs->data;
4934a9643ea8Slogwang int g_ind = 0;
4935a9643ea8Slogwang int count = 0;
4936a9643ea8Slogwang const struct reg_info *reg_group;
4937a9643ea8Slogwang
4938a9643ea8Slogwang if (data == NULL) {
4939a9643ea8Slogwang regs->length = eth_igb_get_reg_length(dev);
4940a9643ea8Slogwang regs->width = sizeof(uint32_t);
4941a9643ea8Slogwang return 0;
4942a9643ea8Slogwang }
4943a9643ea8Slogwang
4944a9643ea8Slogwang /* Support only full register dump */
4945a9643ea8Slogwang if ((regs->length == 0) ||
4946a9643ea8Slogwang (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
4947a9643ea8Slogwang regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4948a9643ea8Slogwang hw->device_id;
4949a9643ea8Slogwang while ((reg_group = igb_regs[g_ind++]))
4950a9643ea8Slogwang count += igb_read_regs_group(dev, &data[count],
4951a9643ea8Slogwang reg_group);
4952a9643ea8Slogwang return 0;
4953a9643ea8Slogwang }
4954a9643ea8Slogwang
4955a9643ea8Slogwang return -ENOTSUP;
4956a9643ea8Slogwang }
4957a9643ea8Slogwang
4958a9643ea8Slogwang static int
igbvf_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)4959a9643ea8Slogwang igbvf_get_regs(struct rte_eth_dev *dev,
4960a9643ea8Slogwang struct rte_dev_reg_info *regs)
4961a9643ea8Slogwang {
4962a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4963a9643ea8Slogwang uint32_t *data = regs->data;
4964a9643ea8Slogwang int g_ind = 0;
4965a9643ea8Slogwang int count = 0;
4966a9643ea8Slogwang const struct reg_info *reg_group;
4967a9643ea8Slogwang
4968a9643ea8Slogwang if (data == NULL) {
4969a9643ea8Slogwang regs->length = igbvf_get_reg_length(dev);
4970a9643ea8Slogwang regs->width = sizeof(uint32_t);
4971a9643ea8Slogwang return 0;
4972a9643ea8Slogwang }
4973a9643ea8Slogwang
4974a9643ea8Slogwang /* Support only full register dump */
4975a9643ea8Slogwang if ((regs->length == 0) ||
4976a9643ea8Slogwang (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
4977a9643ea8Slogwang regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
4978a9643ea8Slogwang hw->device_id;
4979a9643ea8Slogwang while ((reg_group = igbvf_regs[g_ind++]))
4980a9643ea8Slogwang count += igb_read_regs_group(dev, &data[count],
4981a9643ea8Slogwang reg_group);
4982a9643ea8Slogwang return 0;
4983a9643ea8Slogwang }
4984a9643ea8Slogwang
4985a9643ea8Slogwang return -ENOTSUP;
4986a9643ea8Slogwang }
4987a9643ea8Slogwang
4988a9643ea8Slogwang static int
eth_igb_get_eeprom_length(struct rte_eth_dev * dev)4989a9643ea8Slogwang eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
4990a9643ea8Slogwang {
4991a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4992a9643ea8Slogwang
4993a9643ea8Slogwang /* Return unit is byte count */
4994a9643ea8Slogwang return hw->nvm.word_size * 2;
4995a9643ea8Slogwang }
4996a9643ea8Slogwang
4997a9643ea8Slogwang static int
eth_igb_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)4998a9643ea8Slogwang eth_igb_get_eeprom(struct rte_eth_dev *dev,
4999a9643ea8Slogwang struct rte_dev_eeprom_info *in_eeprom)
5000a9643ea8Slogwang {
5001a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5002a9643ea8Slogwang struct e1000_nvm_info *nvm = &hw->nvm;
5003a9643ea8Slogwang uint16_t *data = in_eeprom->data;
5004a9643ea8Slogwang int first, length;
5005a9643ea8Slogwang
5006a9643ea8Slogwang first = in_eeprom->offset >> 1;
5007a9643ea8Slogwang length = in_eeprom->length >> 1;
5008a9643ea8Slogwang if ((first >= hw->nvm.word_size) ||
5009a9643ea8Slogwang ((first + length) >= hw->nvm.word_size))
5010a9643ea8Slogwang return -EINVAL;
5011a9643ea8Slogwang
5012a9643ea8Slogwang in_eeprom->magic = hw->vendor_id |
5013a9643ea8Slogwang ((uint32_t)hw->device_id << 16);
5014a9643ea8Slogwang
5015a9643ea8Slogwang if ((nvm->ops.read) == NULL)
5016a9643ea8Slogwang return -ENOTSUP;
5017a9643ea8Slogwang
5018a9643ea8Slogwang return nvm->ops.read(hw, first, length, data);
5019a9643ea8Slogwang }
5020a9643ea8Slogwang
5021a9643ea8Slogwang static int
eth_igb_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * in_eeprom)5022a9643ea8Slogwang eth_igb_set_eeprom(struct rte_eth_dev *dev,
5023a9643ea8Slogwang struct rte_dev_eeprom_info *in_eeprom)
5024a9643ea8Slogwang {
5025a9643ea8Slogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5026a9643ea8Slogwang struct e1000_nvm_info *nvm = &hw->nvm;
5027a9643ea8Slogwang uint16_t *data = in_eeprom->data;
5028a9643ea8Slogwang int first, length;
5029a9643ea8Slogwang
5030a9643ea8Slogwang first = in_eeprom->offset >> 1;
5031a9643ea8Slogwang length = in_eeprom->length >> 1;
5032a9643ea8Slogwang if ((first >= hw->nvm.word_size) ||
5033a9643ea8Slogwang ((first + length) >= hw->nvm.word_size))
5034a9643ea8Slogwang return -EINVAL;
5035a9643ea8Slogwang
5036a9643ea8Slogwang in_eeprom->magic = (uint32_t)hw->vendor_id |
5037a9643ea8Slogwang ((uint32_t)hw->device_id << 16);
5038a9643ea8Slogwang
5039a9643ea8Slogwang if ((nvm->ops.write) == NULL)
5040a9643ea8Slogwang return -ENOTSUP;
5041a9643ea8Slogwang return nvm->ops.write(hw, first, length, data);
5042a9643ea8Slogwang }
5043a9643ea8Slogwang
5044a9643ea8Slogwang static int
eth_igb_get_module_info(struct rte_eth_dev * dev,struct rte_eth_dev_module_info * modinfo)5045d30ea906Sjfb8856606 eth_igb_get_module_info(struct rte_eth_dev *dev,
5046d30ea906Sjfb8856606 struct rte_eth_dev_module_info *modinfo)
5047d30ea906Sjfb8856606 {
5048d30ea906Sjfb8856606 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5049d30ea906Sjfb8856606
5050d30ea906Sjfb8856606 uint32_t status = 0;
5051d30ea906Sjfb8856606 uint16_t sff8472_rev, addr_mode;
5052d30ea906Sjfb8856606 bool page_swap = false;
5053d30ea906Sjfb8856606
5054d30ea906Sjfb8856606 if (hw->phy.media_type == e1000_media_type_copper ||
5055d30ea906Sjfb8856606 hw->phy.media_type == e1000_media_type_unknown)
5056d30ea906Sjfb8856606 return -EOPNOTSUPP;
5057d30ea906Sjfb8856606
5058d30ea906Sjfb8856606 /* Check whether we support SFF-8472 or not */
5059d30ea906Sjfb8856606 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
5060d30ea906Sjfb8856606 if (status)
5061d30ea906Sjfb8856606 return -EIO;
5062d30ea906Sjfb8856606
5063d30ea906Sjfb8856606 /* addressing mode is not supported */
5064d30ea906Sjfb8856606 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
5065d30ea906Sjfb8856606 if (status)
5066d30ea906Sjfb8856606 return -EIO;
5067d30ea906Sjfb8856606
5068d30ea906Sjfb8856606 /* addressing mode is not supported */
5069d30ea906Sjfb8856606 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
5070d30ea906Sjfb8856606 PMD_DRV_LOG(ERR,
5071d30ea906Sjfb8856606 "Address change required to access page 0xA2, "
5072d30ea906Sjfb8856606 "but not supported. Please report the module "
5073d30ea906Sjfb8856606 "type to the driver maintainers.\n");
5074d30ea906Sjfb8856606 page_swap = true;
5075d30ea906Sjfb8856606 }
5076d30ea906Sjfb8856606
5077d30ea906Sjfb8856606 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
5078d30ea906Sjfb8856606 /* We have an SFP, but it does not support SFF-8472 */
5079d30ea906Sjfb8856606 modinfo->type = RTE_ETH_MODULE_SFF_8079;
5080d30ea906Sjfb8856606 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
5081d30ea906Sjfb8856606 } else {
5082d30ea906Sjfb8856606 /* We have an SFP which supports a revision of SFF-8472 */
5083d30ea906Sjfb8856606 modinfo->type = RTE_ETH_MODULE_SFF_8472;
5084d30ea906Sjfb8856606 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
5085d30ea906Sjfb8856606 }
5086d30ea906Sjfb8856606
5087d30ea906Sjfb8856606 return 0;
5088d30ea906Sjfb8856606 }
5089d30ea906Sjfb8856606
5090d30ea906Sjfb8856606 static int
eth_igb_get_module_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * info)5091d30ea906Sjfb8856606 eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
5092d30ea906Sjfb8856606 struct rte_dev_eeprom_info *info)
5093d30ea906Sjfb8856606 {
5094d30ea906Sjfb8856606 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5095d30ea906Sjfb8856606
5096d30ea906Sjfb8856606 uint32_t status = 0;
5097d30ea906Sjfb8856606 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
5098d30ea906Sjfb8856606 u16 first_word, last_word;
5099d30ea906Sjfb8856606 int i = 0;
5100d30ea906Sjfb8856606
5101d30ea906Sjfb8856606 if (info->length == 0)
5102d30ea906Sjfb8856606 return -EINVAL;
5103d30ea906Sjfb8856606
5104d30ea906Sjfb8856606 first_word = info->offset >> 1;
5105d30ea906Sjfb8856606 last_word = (info->offset + info->length - 1) >> 1;
5106d30ea906Sjfb8856606
5107d30ea906Sjfb8856606 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
5108d30ea906Sjfb8856606 for (i = 0; i < last_word - first_word + 1; i++) {
5109d30ea906Sjfb8856606 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
5110d30ea906Sjfb8856606 &dataword[i]);
5111d30ea906Sjfb8856606 if (status) {
5112d30ea906Sjfb8856606 /* Error occurred while reading module */
5113d30ea906Sjfb8856606 return -EIO;
5114d30ea906Sjfb8856606 }
5115d30ea906Sjfb8856606
5116d30ea906Sjfb8856606 dataword[i] = rte_be_to_cpu_16(dataword[i]);
5117d30ea906Sjfb8856606 }
5118d30ea906Sjfb8856606
5119d30ea906Sjfb8856606 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
5120d30ea906Sjfb8856606
5121d30ea906Sjfb8856606 return 0;
5122d30ea906Sjfb8856606 }
5123d30ea906Sjfb8856606
5124d30ea906Sjfb8856606 static int
eth_igb_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)5125a9643ea8Slogwang eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5126a9643ea8Slogwang {
5127a9643ea8Slogwang struct e1000_hw *hw =
5128a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
51292bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
51302bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
51312bfe3f2eSlogwang uint32_t vec = E1000_MISC_VEC_ID;
51322bfe3f2eSlogwang
51332bfe3f2eSlogwang if (rte_intr_allow_others(intr_handle))
51342bfe3f2eSlogwang vec = E1000_RX_VEC_START;
51352bfe3f2eSlogwang
51362bfe3f2eSlogwang uint32_t mask = 1 << (queue_id + vec);
5137a9643ea8Slogwang
5138a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMC, mask);
5139a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
5140a9643ea8Slogwang
5141a9643ea8Slogwang return 0;
5142a9643ea8Slogwang }
5143a9643ea8Slogwang
5144a9643ea8Slogwang static int
eth_igb_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)5145a9643ea8Slogwang eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5146a9643ea8Slogwang {
5147a9643ea8Slogwang struct e1000_hw *hw =
5148a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
51492bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
51502bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
51512bfe3f2eSlogwang uint32_t vec = E1000_MISC_VEC_ID;
51522bfe3f2eSlogwang
51532bfe3f2eSlogwang if (rte_intr_allow_others(intr_handle))
51542bfe3f2eSlogwang vec = E1000_RX_VEC_START;
51552bfe3f2eSlogwang
51562bfe3f2eSlogwang uint32_t mask = 1 << (queue_id + vec);
5157a9643ea8Slogwang uint32_t regval;
5158a9643ea8Slogwang
5159a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIMS);
5160a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
5161a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
5162a9643ea8Slogwang
51634418919fSjohnjiang rte_intr_ack(intr_handle);
5164a9643ea8Slogwang
5165a9643ea8Slogwang return 0;
5166a9643ea8Slogwang }
5167a9643ea8Slogwang
5168a9643ea8Slogwang static void
eth_igb_write_ivar(struct e1000_hw * hw,uint8_t msix_vector,uint8_t index,uint8_t offset)5169a9643ea8Slogwang eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
5170a9643ea8Slogwang uint8_t index, uint8_t offset)
5171a9643ea8Slogwang {
5172a9643ea8Slogwang uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
5173a9643ea8Slogwang
5174a9643ea8Slogwang /* clear bits */
5175a9643ea8Slogwang val &= ~((uint32_t)0xFF << offset);
5176a9643ea8Slogwang
5177a9643ea8Slogwang /* write vector and valid bit */
5178a9643ea8Slogwang val |= (msix_vector | E1000_IVAR_VALID) << offset;
5179a9643ea8Slogwang
5180a9643ea8Slogwang E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
5181a9643ea8Slogwang }
5182a9643ea8Slogwang
5183a9643ea8Slogwang static void
eth_igb_assign_msix_vector(struct e1000_hw * hw,int8_t direction,uint8_t queue,uint8_t msix_vector)5184a9643ea8Slogwang eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
5185a9643ea8Slogwang uint8_t queue, uint8_t msix_vector)
5186a9643ea8Slogwang {
5187a9643ea8Slogwang uint32_t tmp = 0;
5188a9643ea8Slogwang
5189a9643ea8Slogwang if (hw->mac.type == e1000_82575) {
5190a9643ea8Slogwang if (direction == 0)
5191a9643ea8Slogwang tmp = E1000_EICR_RX_QUEUE0 << queue;
5192a9643ea8Slogwang else if (direction == 1)
5193a9643ea8Slogwang tmp = E1000_EICR_TX_QUEUE0 << queue;
5194a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
5195a9643ea8Slogwang } else if (hw->mac.type == e1000_82576) {
5196a9643ea8Slogwang if ((direction == 0) || (direction == 1))
5197a9643ea8Slogwang eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
5198a9643ea8Slogwang ((queue & 0x8) << 1) +
5199a9643ea8Slogwang 8 * direction);
5200a9643ea8Slogwang } else if ((hw->mac.type == e1000_82580) ||
5201a9643ea8Slogwang (hw->mac.type == e1000_i350) ||
5202a9643ea8Slogwang (hw->mac.type == e1000_i354) ||
5203a9643ea8Slogwang (hw->mac.type == e1000_i210) ||
5204a9643ea8Slogwang (hw->mac.type == e1000_i211)) {
5205a9643ea8Slogwang if ((direction == 0) || (direction == 1))
5206a9643ea8Slogwang eth_igb_write_ivar(hw, msix_vector,
5207a9643ea8Slogwang queue >> 1,
5208a9643ea8Slogwang ((queue & 0x1) << 4) +
5209a9643ea8Slogwang 8 * direction);
5210a9643ea8Slogwang }
5211a9643ea8Slogwang }
5212a9643ea8Slogwang
5213a9643ea8Slogwang /* Sets up the hardware to generate MSI-X interrupts properly
5214a9643ea8Slogwang * @hw
5215a9643ea8Slogwang * board private structure
5216a9643ea8Slogwang */
5217a9643ea8Slogwang static void
eth_igb_configure_msix_intr(struct rte_eth_dev * dev)5218a9643ea8Slogwang eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
5219a9643ea8Slogwang {
5220a9643ea8Slogwang int queue_id;
5221a9643ea8Slogwang uint32_t tmpval, regval, intr_mask;
5222a9643ea8Slogwang struct e1000_hw *hw =
5223a9643ea8Slogwang E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5224a9643ea8Slogwang uint32_t vec = E1000_MISC_VEC_ID;
5225a9643ea8Slogwang uint32_t base = E1000_MISC_VEC_ID;
5226a9643ea8Slogwang uint32_t misc_shift = 0;
52272bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
52282bfe3f2eSlogwang struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5229a9643ea8Slogwang
5230a9643ea8Slogwang /* won't configure msix register if no mapping is done
5231a9643ea8Slogwang * between intr vector and event fd
5232a9643ea8Slogwang */
5233a9643ea8Slogwang if (!rte_intr_dp_is_en(intr_handle))
5234a9643ea8Slogwang return;
5235a9643ea8Slogwang
5236a9643ea8Slogwang if (rte_intr_allow_others(intr_handle)) {
5237a9643ea8Slogwang vec = base = E1000_RX_VEC_START;
5238a9643ea8Slogwang misc_shift = 1;
5239a9643ea8Slogwang }
5240a9643ea8Slogwang
5241a9643ea8Slogwang /* set interrupt vector for other causes */
5242a9643ea8Slogwang if (hw->mac.type == e1000_82575) {
5243a9643ea8Slogwang tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
5244a9643ea8Slogwang /* enable MSI-X PBA support */
5245a9643ea8Slogwang tmpval |= E1000_CTRL_EXT_PBA_CLR;
5246a9643ea8Slogwang
5247a9643ea8Slogwang /* Auto-Mask interrupts upon ICR read */
5248a9643ea8Slogwang tmpval |= E1000_CTRL_EXT_EIAME;
5249a9643ea8Slogwang tmpval |= E1000_CTRL_EXT_IRCA;
5250a9643ea8Slogwang
5251a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
5252a9643ea8Slogwang
5253a9643ea8Slogwang /* enable msix_other interrupt */
5254a9643ea8Slogwang E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
5255a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIAC);
5256a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
5257a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIAM);
5258a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
5259a9643ea8Slogwang } else if ((hw->mac.type == e1000_82576) ||
5260a9643ea8Slogwang (hw->mac.type == e1000_82580) ||
5261a9643ea8Slogwang (hw->mac.type == e1000_i350) ||
5262a9643ea8Slogwang (hw->mac.type == e1000_i354) ||
5263a9643ea8Slogwang (hw->mac.type == e1000_i210) ||
5264a9643ea8Slogwang (hw->mac.type == e1000_i211)) {
5265a9643ea8Slogwang /* turn on MSI-X capability first */
5266a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
5267a9643ea8Slogwang E1000_GPIE_PBA | E1000_GPIE_EIAME |
5268a9643ea8Slogwang E1000_GPIE_NSICR);
5269a9643ea8Slogwang intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5270a9643ea8Slogwang misc_shift;
52711646932aSjfb8856606
52721646932aSjfb8856606 if (dev->data->dev_conf.intr_conf.lsc != 0)
52731646932aSjfb8856606 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
52741646932aSjfb8856606
5275a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIAC);
5276a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
5277a9643ea8Slogwang
5278a9643ea8Slogwang /* enable msix_other interrupt */
5279a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIMS);
5280a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
52811646932aSjfb8856606 tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8;
5282a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
5283a9643ea8Slogwang }
5284a9643ea8Slogwang
5285a9643ea8Slogwang /* use EIAM to auto-mask when MSI-X interrupt
5286a9643ea8Slogwang * is asserted, this saves a register write for every interrupt
5287a9643ea8Slogwang */
5288a9643ea8Slogwang intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5289a9643ea8Slogwang misc_shift;
52901646932aSjfb8856606
52911646932aSjfb8856606 if (dev->data->dev_conf.intr_conf.lsc != 0)
52921646932aSjfb8856606 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC);
52931646932aSjfb8856606
5294a9643ea8Slogwang regval = E1000_READ_REG(hw, E1000_EIAM);
5295a9643ea8Slogwang E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
5296a9643ea8Slogwang
5297a9643ea8Slogwang for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
5298a9643ea8Slogwang eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
5299a9643ea8Slogwang intr_handle->intr_vec[queue_id] = vec;
5300a9643ea8Slogwang if (vec < base + intr_handle->nb_efd - 1)
5301a9643ea8Slogwang vec++;
5302a9643ea8Slogwang }
5303a9643ea8Slogwang
5304a9643ea8Slogwang E1000_WRITE_FLUSH(hw);
5305a9643ea8Slogwang }
5306a9643ea8Slogwang
53072bfe3f2eSlogwang /* restore n-tuple filter */
53082bfe3f2eSlogwang static inline void
igb_ntuple_filter_restore(struct rte_eth_dev * dev)53092bfe3f2eSlogwang igb_ntuple_filter_restore(struct rte_eth_dev *dev)
53102bfe3f2eSlogwang {
53112bfe3f2eSlogwang struct e1000_filter_info *filter_info =
53122bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
53132bfe3f2eSlogwang struct e1000_5tuple_filter *p_5tuple;
53142bfe3f2eSlogwang struct e1000_2tuple_filter *p_2tuple;
53152bfe3f2eSlogwang
53162bfe3f2eSlogwang TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
53172bfe3f2eSlogwang igb_inject_5tuple_filter_82576(dev, p_5tuple);
53182bfe3f2eSlogwang }
53192bfe3f2eSlogwang
53202bfe3f2eSlogwang TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
53212bfe3f2eSlogwang igb_inject_2uple_filter(dev, p_2tuple);
53222bfe3f2eSlogwang }
53232bfe3f2eSlogwang }
53242bfe3f2eSlogwang
53252bfe3f2eSlogwang /* restore SYN filter */
53262bfe3f2eSlogwang static inline void
igb_syn_filter_restore(struct rte_eth_dev * dev)53272bfe3f2eSlogwang igb_syn_filter_restore(struct rte_eth_dev *dev)
53282bfe3f2eSlogwang {
53292bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53302bfe3f2eSlogwang struct e1000_filter_info *filter_info =
53312bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
53322bfe3f2eSlogwang uint32_t synqf;
53332bfe3f2eSlogwang
53342bfe3f2eSlogwang synqf = filter_info->syn_info;
53352bfe3f2eSlogwang
53362bfe3f2eSlogwang if (synqf & E1000_SYN_FILTER_ENABLE) {
53372bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
53382bfe3f2eSlogwang E1000_WRITE_FLUSH(hw);
53392bfe3f2eSlogwang }
53402bfe3f2eSlogwang }
53412bfe3f2eSlogwang
53422bfe3f2eSlogwang /* restore ethernet type filter */
53432bfe3f2eSlogwang static inline void
igb_ethertype_filter_restore(struct rte_eth_dev * dev)53442bfe3f2eSlogwang igb_ethertype_filter_restore(struct rte_eth_dev *dev)
53452bfe3f2eSlogwang {
53462bfe3f2eSlogwang struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53472bfe3f2eSlogwang struct e1000_filter_info *filter_info =
53482bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
53492bfe3f2eSlogwang int i;
53502bfe3f2eSlogwang
53512bfe3f2eSlogwang for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
53522bfe3f2eSlogwang if (filter_info->ethertype_mask & (1 << i)) {
53532bfe3f2eSlogwang E1000_WRITE_REG(hw, E1000_ETQF(i),
53542bfe3f2eSlogwang filter_info->ethertype_filters[i].etqf);
53552bfe3f2eSlogwang E1000_WRITE_FLUSH(hw);
53562bfe3f2eSlogwang }
53572bfe3f2eSlogwang }
53582bfe3f2eSlogwang }
53592bfe3f2eSlogwang
53602bfe3f2eSlogwang /* restore flex byte filter */
53612bfe3f2eSlogwang static inline void
igb_flex_filter_restore(struct rte_eth_dev * dev)53622bfe3f2eSlogwang igb_flex_filter_restore(struct rte_eth_dev *dev)
53632bfe3f2eSlogwang {
53642bfe3f2eSlogwang struct e1000_filter_info *filter_info =
53652bfe3f2eSlogwang E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
53662bfe3f2eSlogwang struct e1000_flex_filter *flex_filter;
53672bfe3f2eSlogwang
53682bfe3f2eSlogwang TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
53692bfe3f2eSlogwang igb_inject_flex_filter(dev, flex_filter);
53702bfe3f2eSlogwang }
53712bfe3f2eSlogwang }
53722bfe3f2eSlogwang
5373d30ea906Sjfb8856606 /* restore rss filter */
5374d30ea906Sjfb8856606 static inline void
igb_rss_filter_restore(struct rte_eth_dev * dev)5375d30ea906Sjfb8856606 igb_rss_filter_restore(struct rte_eth_dev *dev)
5376d30ea906Sjfb8856606 {
5377d30ea906Sjfb8856606 struct e1000_filter_info *filter_info =
5378d30ea906Sjfb8856606 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5379d30ea906Sjfb8856606
5380d30ea906Sjfb8856606 if (filter_info->rss_info.conf.queue_num)
5381d30ea906Sjfb8856606 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
5382d30ea906Sjfb8856606 }
5383d30ea906Sjfb8856606
53842bfe3f2eSlogwang /* restore all types filter */
53852bfe3f2eSlogwang static int
igb_filter_restore(struct rte_eth_dev * dev)53862bfe3f2eSlogwang igb_filter_restore(struct rte_eth_dev *dev)
53872bfe3f2eSlogwang {
53882bfe3f2eSlogwang igb_ntuple_filter_restore(dev);
53892bfe3f2eSlogwang igb_ethertype_filter_restore(dev);
53902bfe3f2eSlogwang igb_syn_filter_restore(dev);
53912bfe3f2eSlogwang igb_flex_filter_restore(dev);
5392d30ea906Sjfb8856606 igb_rss_filter_restore(dev);
53932bfe3f2eSlogwang
53942bfe3f2eSlogwang return 0;
53952bfe3f2eSlogwang }
53962bfe3f2eSlogwang
53972bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
53982bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
53992bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
54002bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
54012bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
54022bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
5403d30ea906Sjfb8856606
5404d30ea906Sjfb8856606 /* see e1000_logs.c */
RTE_INIT(e1000_init_log)5405d30ea906Sjfb8856606 RTE_INIT(e1000_init_log)
5406d30ea906Sjfb8856606 {
5407d30ea906Sjfb8856606 e1000_igb_init_log();
5408d30ea906Sjfb8856606 }
5409