1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdarg.h> 10 11 #include <rte_string_fns.h> 12 #include <rte_common.h> 13 #include <rte_interrupts.h> 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_pci.h> 18 #include <rte_bus_pci.h> 19 #include <rte_ether.h> 20 #include <ethdev_driver.h> 21 #include <ethdev_pci.h> 22 #include <rte_memory.h> 23 #include <rte_eal.h> 24 #include <rte_malloc.h> 25 #include <rte_dev.h> 26 27 #include "e1000_logs.h" 28 #include "base/e1000_api.h" 29 #include "e1000_ethdev.h" 30 #include "igb_regs.h" 31 32 /* 33 * Default values for port configuration 34 */ 35 #define IGB_DEFAULT_RX_FREE_THRESH 32 36 37 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) 38 #define IGB_DEFAULT_RX_HTHRESH 8 39 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) 40 41 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) 42 #define IGB_DEFAULT_TX_HTHRESH 1 43 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) 44 45 /* Bit shift and mask */ 46 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) 47 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) 48 #define IGB_8_BIT_WIDTH CHAR_BIT 49 #define IGB_8_BIT_MASK UINT8_MAX 50 51 /* Additional timesync values. */ 52 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL 53 #define E1000_ETQF_FILTER_1588 3 54 #define IGB_82576_TSYNC_SHIFT 16 55 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 56 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 57 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 58 59 #define E1000_VTIVAR_MISC 0x01740 60 #define E1000_VTIVAR_MISC_MASK 0xFF 61 #define E1000_VTIVAR_VALID 0x80 62 #define E1000_VTIVAR_MISC_MAILBOX 0 63 #define E1000_VTIVAR_MISC_INTR_MASK 0x3 64 65 /* External VLAN Enable bit mask */ 66 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26) 67 68 /* External VLAN Ether Type bit mask and shift */ 69 #define E1000_VET_VET_EXT 0xFFFF0000 70 #define E1000_VET_VET_EXT_SHIFT 16 71 72 /* MSI-X other interrupt vector */ 73 #define IGB_MSIX_OTHER_INTR_VEC 0 74 75 static int eth_igb_configure(struct rte_eth_dev *dev); 76 static int eth_igb_start(struct rte_eth_dev *dev); 77 static int eth_igb_stop(struct rte_eth_dev *dev); 78 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); 79 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); 80 static int eth_igb_close(struct rte_eth_dev *dev); 81 static int eth_igb_reset(struct rte_eth_dev *dev); 82 static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev); 83 static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev); 84 static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev); 85 static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev); 86 static int eth_igb_link_update(struct rte_eth_dev *dev, 87 int wait_to_complete); 88 static int eth_igb_stats_get(struct rte_eth_dev *dev, 89 struct rte_eth_stats *rte_stats); 90 static int eth_igb_xstats_get(struct rte_eth_dev *dev, 91 struct rte_eth_xstat *xstats, unsigned n); 92 static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, 93 const uint64_t *ids, 94 uint64_t *values, unsigned int n); 95 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, 96 struct rte_eth_xstat_name *xstats_names, 97 unsigned int size); 98 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 99 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 100 unsigned int limit); 101 static int eth_igb_stats_reset(struct rte_eth_dev *dev); 102 static int eth_igb_xstats_reset(struct rte_eth_dev *dev); 103 static int eth_igb_fw_version_get(struct rte_eth_dev *dev, 104 char *fw_version, size_t fw_size); 105 static int eth_igb_infos_get(struct rte_eth_dev *dev, 106 struct rte_eth_dev_info *dev_info); 107 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); 108 static int eth_igbvf_infos_get(struct rte_eth_dev *dev, 109 struct rte_eth_dev_info *dev_info); 110 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, 111 struct rte_eth_fc_conf *fc_conf); 112 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, 113 struct rte_eth_fc_conf *fc_conf); 114 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 115 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); 116 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); 117 static int eth_igb_interrupt_action(struct rte_eth_dev *dev, 118 struct rte_intr_handle *handle); 119 static void eth_igb_interrupt_handler(void *param); 120 static int igb_hardware_init(struct e1000_hw *hw); 121 static void igb_hw_control_acquire(struct e1000_hw *hw); 122 static void igb_hw_control_release(struct e1000_hw *hw); 123 static void igb_init_manageability(struct e1000_hw *hw); 124 static void igb_release_manageability(struct e1000_hw *hw); 125 126 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 127 128 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, 129 uint16_t vlan_id, int on); 130 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 131 enum rte_vlan_type vlan_type, 132 uint16_t tpid_id); 133 static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); 134 135 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); 136 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); 137 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); 138 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); 139 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); 140 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); 141 142 static int eth_igb_led_on(struct rte_eth_dev *dev); 143 static int eth_igb_led_off(struct rte_eth_dev *dev); 144 145 static void igb_intr_disable(struct rte_eth_dev *dev); 146 static int igb_get_rx_buffer_size(struct e1000_hw *hw); 147 static int eth_igb_rar_set(struct rte_eth_dev *dev, 148 struct rte_ether_addr *mac_addr, 149 uint32_t index, uint32_t pool); 150 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); 151 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 152 struct rte_ether_addr *addr); 153 154 static void igbvf_intr_disable(struct e1000_hw *hw); 155 static int igbvf_dev_configure(struct rte_eth_dev *dev); 156 static int igbvf_dev_start(struct rte_eth_dev *dev); 157 static int igbvf_dev_stop(struct rte_eth_dev *dev); 158 static int igbvf_dev_close(struct rte_eth_dev *dev); 159 static int igbvf_promiscuous_enable(struct rte_eth_dev *dev); 160 static int igbvf_promiscuous_disable(struct rte_eth_dev *dev); 161 static int igbvf_allmulticast_enable(struct rte_eth_dev *dev); 162 static int igbvf_allmulticast_disable(struct rte_eth_dev *dev); 163 static int eth_igbvf_link_update(struct e1000_hw *hw); 164 static int eth_igbvf_stats_get(struct rte_eth_dev *dev, 165 struct rte_eth_stats *rte_stats); 166 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, 167 struct rte_eth_xstat *xstats, unsigned n); 168 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, 169 struct rte_eth_xstat_name *xstats_names, 170 unsigned limit); 171 static int eth_igbvf_stats_reset(struct rte_eth_dev *dev); 172 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, 173 uint16_t vlan_id, int on); 174 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); 175 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); 176 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, 177 struct rte_ether_addr *addr); 178 static int igbvf_get_reg_length(struct rte_eth_dev *dev); 179 static int igbvf_get_regs(struct rte_eth_dev *dev, 180 struct rte_dev_reg_info *regs); 181 182 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, 183 struct rte_eth_rss_reta_entry64 *reta_conf, 184 uint16_t reta_size); 185 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, 186 struct rte_eth_rss_reta_entry64 *reta_conf, 187 uint16_t reta_size); 188 189 static int igb_add_2tuple_filter(struct rte_eth_dev *dev, 190 struct rte_eth_ntuple_filter *ntuple_filter); 191 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, 192 struct rte_eth_ntuple_filter *ntuple_filter); 193 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 194 struct rte_eth_ntuple_filter *ntuple_filter); 195 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 196 struct rte_eth_ntuple_filter *ntuple_filter); 197 static int eth_igb_flow_ops_get(struct rte_eth_dev *dev, 198 const struct rte_flow_ops **ops); 199 static int eth_igb_get_reg_length(struct rte_eth_dev *dev); 200 static int eth_igb_get_regs(struct rte_eth_dev *dev, 201 struct rte_dev_reg_info *regs); 202 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); 203 static int eth_igb_get_eeprom(struct rte_eth_dev *dev, 204 struct rte_dev_eeprom_info *eeprom); 205 static int eth_igb_set_eeprom(struct rte_eth_dev *dev, 206 struct rte_dev_eeprom_info *eeprom); 207 static int eth_igb_get_module_info(struct rte_eth_dev *dev, 208 struct rte_eth_dev_module_info *modinfo); 209 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 210 struct rte_dev_eeprom_info *info); 211 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 212 struct rte_ether_addr *mc_addr_set, 213 uint32_t nb_mc_addr); 214 static int igb_timesync_enable(struct rte_eth_dev *dev); 215 static int igb_timesync_disable(struct rte_eth_dev *dev); 216 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 217 struct timespec *timestamp, 218 uint32_t flags); 219 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 220 struct timespec *timestamp); 221 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 222 static int igb_timesync_read_time(struct rte_eth_dev *dev, 223 struct timespec *timestamp); 224 static int igb_timesync_write_time(struct rte_eth_dev *dev, 225 const struct timespec *timestamp); 226 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, 227 uint16_t queue_id); 228 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, 229 uint16_t queue_id); 230 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 231 uint8_t queue, uint8_t msix_vector); 232 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 233 uint8_t index, uint8_t offset); 234 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); 235 static void eth_igbvf_interrupt_handler(void *param); 236 static void igbvf_mbx_process(struct rte_eth_dev *dev); 237 static int igb_filter_restore(struct rte_eth_dev *dev); 238 239 /* 240 * Define VF Stats MACRO for Non "cleared on read" register 241 */ 242 #define UPDATE_VF_STAT(reg, last, cur) \ 243 { \ 244 u32 latest = E1000_READ_REG(hw, reg); \ 245 cur += (latest - last) & UINT_MAX; \ 246 last = latest; \ 247 } 248 249 #define IGB_FC_PAUSE_TIME 0x0680 250 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 251 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 252 253 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ 254 255 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; 256 257 /* 258 * The set of PCI devices this driver supports 259 */ 260 static const struct rte_pci_id pci_id_igb_map[] = { 261 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) }, 262 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) }, 263 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) }, 264 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) }, 265 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, 266 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) }, 267 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) }, 268 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) }, 269 270 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) }, 271 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) }, 272 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) }, 273 274 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) }, 275 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) }, 276 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) }, 277 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) }, 278 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) }, 279 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) }, 280 281 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) }, 282 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) }, 283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) }, 284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) }, 285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) }, 286 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) }, 287 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) }, 288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) }, 289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) }, 290 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) }, 291 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) }, 292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, 293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, 294 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) }, 295 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) }, 297 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 298 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) }, 299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) }, 300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, 301 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) }, 302 { .vendor_id = 0, /* sentinel */ }, 303 }; 304 305 /* 306 * The set of PCI devices this driver supports (for 82576&I350 VF) 307 */ 308 static const struct rte_pci_id pci_id_igbvf_map[] = { 309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) }, 310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) }, 311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) }, 312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) }, 313 { .vendor_id = 0, /* sentinel */ }, 314 }; 315 316 static const struct rte_eth_desc_lim rx_desc_lim = { 317 .nb_max = E1000_MAX_RING_DESC, 318 .nb_min = E1000_MIN_RING_DESC, 319 .nb_align = IGB_RXD_ALIGN, 320 }; 321 322 static const struct rte_eth_desc_lim tx_desc_lim = { 323 .nb_max = E1000_MAX_RING_DESC, 324 .nb_min = E1000_MIN_RING_DESC, 325 .nb_align = IGB_RXD_ALIGN, 326 .nb_seg_max = IGB_TX_MAX_SEG, 327 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG, 328 }; 329 330 static const struct eth_dev_ops eth_igb_ops = { 331 .dev_configure = eth_igb_configure, 332 .dev_start = eth_igb_start, 333 .dev_stop = eth_igb_stop, 334 .dev_set_link_up = eth_igb_dev_set_link_up, 335 .dev_set_link_down = eth_igb_dev_set_link_down, 336 .dev_close = eth_igb_close, 337 .dev_reset = eth_igb_reset, 338 .promiscuous_enable = eth_igb_promiscuous_enable, 339 .promiscuous_disable = eth_igb_promiscuous_disable, 340 .allmulticast_enable = eth_igb_allmulticast_enable, 341 .allmulticast_disable = eth_igb_allmulticast_disable, 342 .link_update = eth_igb_link_update, 343 .stats_get = eth_igb_stats_get, 344 .xstats_get = eth_igb_xstats_get, 345 .xstats_get_by_id = eth_igb_xstats_get_by_id, 346 .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id, 347 .xstats_get_names = eth_igb_xstats_get_names, 348 .stats_reset = eth_igb_stats_reset, 349 .xstats_reset = eth_igb_xstats_reset, 350 .fw_version_get = eth_igb_fw_version_get, 351 .dev_infos_get = eth_igb_infos_get, 352 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 353 .mtu_set = eth_igb_mtu_set, 354 .vlan_filter_set = eth_igb_vlan_filter_set, 355 .vlan_tpid_set = eth_igb_vlan_tpid_set, 356 .vlan_offload_set = eth_igb_vlan_offload_set, 357 .rx_queue_setup = eth_igb_rx_queue_setup, 358 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, 359 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, 360 .rx_queue_release = eth_igb_rx_queue_release, 361 .tx_queue_setup = eth_igb_tx_queue_setup, 362 .tx_queue_release = eth_igb_tx_queue_release, 363 .tx_done_cleanup = eth_igb_tx_done_cleanup, 364 .dev_led_on = eth_igb_led_on, 365 .dev_led_off = eth_igb_led_off, 366 .flow_ctrl_get = eth_igb_flow_ctrl_get, 367 .flow_ctrl_set = eth_igb_flow_ctrl_set, 368 .mac_addr_add = eth_igb_rar_set, 369 .mac_addr_remove = eth_igb_rar_clear, 370 .mac_addr_set = eth_igb_default_mac_addr_set, 371 .reta_update = eth_igb_rss_reta_update, 372 .reta_query = eth_igb_rss_reta_query, 373 .rss_hash_update = eth_igb_rss_hash_update, 374 .rss_hash_conf_get = eth_igb_rss_hash_conf_get, 375 .flow_ops_get = eth_igb_flow_ops_get, 376 .set_mc_addr_list = eth_igb_set_mc_addr_list, 377 .rxq_info_get = igb_rxq_info_get, 378 .txq_info_get = igb_txq_info_get, 379 .timesync_enable = igb_timesync_enable, 380 .timesync_disable = igb_timesync_disable, 381 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, 382 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, 383 .get_reg = eth_igb_get_regs, 384 .get_eeprom_length = eth_igb_get_eeprom_length, 385 .get_eeprom = eth_igb_get_eeprom, 386 .set_eeprom = eth_igb_set_eeprom, 387 .get_module_info = eth_igb_get_module_info, 388 .get_module_eeprom = eth_igb_get_module_eeprom, 389 .timesync_adjust_time = igb_timesync_adjust_time, 390 .timesync_read_time = igb_timesync_read_time, 391 .timesync_write_time = igb_timesync_write_time, 392 }; 393 394 /* 395 * dev_ops for virtual function, bare necessities for basic vf 396 * operation have been implemented 397 */ 398 static const struct eth_dev_ops igbvf_eth_dev_ops = { 399 .dev_configure = igbvf_dev_configure, 400 .dev_start = igbvf_dev_start, 401 .dev_stop = igbvf_dev_stop, 402 .dev_close = igbvf_dev_close, 403 .promiscuous_enable = igbvf_promiscuous_enable, 404 .promiscuous_disable = igbvf_promiscuous_disable, 405 .allmulticast_enable = igbvf_allmulticast_enable, 406 .allmulticast_disable = igbvf_allmulticast_disable, 407 .link_update = eth_igb_link_update, 408 .stats_get = eth_igbvf_stats_get, 409 .xstats_get = eth_igbvf_xstats_get, 410 .xstats_get_names = eth_igbvf_xstats_get_names, 411 .stats_reset = eth_igbvf_stats_reset, 412 .xstats_reset = eth_igbvf_stats_reset, 413 .vlan_filter_set = igbvf_vlan_filter_set, 414 .dev_infos_get = eth_igbvf_infos_get, 415 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 416 .rx_queue_setup = eth_igb_rx_queue_setup, 417 .rx_queue_release = eth_igb_rx_queue_release, 418 .tx_queue_setup = eth_igb_tx_queue_setup, 419 .tx_queue_release = eth_igb_tx_queue_release, 420 .tx_done_cleanup = eth_igb_tx_done_cleanup, 421 .set_mc_addr_list = eth_igb_set_mc_addr_list, 422 .rxq_info_get = igb_rxq_info_get, 423 .txq_info_get = igb_txq_info_get, 424 .mac_addr_set = igbvf_default_mac_addr_set, 425 .get_reg = igbvf_get_regs, 426 }; 427 428 /* store statistics names and its offset in stats structure */ 429 struct rte_igb_xstats_name_off { 430 char name[RTE_ETH_XSTATS_NAME_SIZE]; 431 unsigned offset; 432 }; 433 434 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { 435 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, 436 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, 437 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, 438 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, 439 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, 440 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, 441 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, 442 ecol)}, 443 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, 444 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, 445 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, 446 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, 447 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, 448 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, 449 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, 450 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, 451 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, 452 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, 453 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, 454 fcruc)}, 455 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, 456 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, 457 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, 458 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, 459 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 460 prc1023)}, 461 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, 462 prc1522)}, 463 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, 464 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, 465 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, 466 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, 467 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, 468 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, 469 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, 470 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, 471 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, 472 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, 473 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, 474 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, 475 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, 476 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, 477 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, 478 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, 479 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, 480 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 481 ptc1023)}, 482 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, 483 ptc1522)}, 484 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, 485 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, 486 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, 487 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, 488 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, 489 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, 490 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, 491 492 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, 493 }; 494 495 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ 496 sizeof(rte_igb_stats_strings[0])) 497 498 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { 499 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, 500 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, 501 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, 502 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, 503 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, 504 }; 505 506 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ 507 sizeof(rte_igbvf_stats_strings[0])) 508 509 510 static inline void 511 igb_intr_enable(struct rte_eth_dev *dev) 512 { 513 struct e1000_interrupt *intr = 514 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 515 struct e1000_hw *hw = 516 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 517 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 518 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 519 520 if (rte_intr_allow_others(intr_handle) && 521 dev->data->dev_conf.intr_conf.lsc != 0) { 522 E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC); 523 } 524 525 E1000_WRITE_REG(hw, E1000_IMS, intr->mask); 526 E1000_WRITE_FLUSH(hw); 527 } 528 529 static void 530 igb_intr_disable(struct rte_eth_dev *dev) 531 { 532 struct e1000_hw *hw = 533 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 534 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 535 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 536 537 if (rte_intr_allow_others(intr_handle) && 538 dev->data->dev_conf.intr_conf.lsc != 0) { 539 E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC); 540 } 541 542 E1000_WRITE_REG(hw, E1000_IMC, ~0); 543 E1000_WRITE_FLUSH(hw); 544 } 545 546 static inline void 547 igbvf_intr_enable(struct rte_eth_dev *dev) 548 { 549 struct e1000_hw *hw = 550 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 551 552 /* only for mailbox */ 553 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); 554 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); 555 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); 556 E1000_WRITE_FLUSH(hw); 557 } 558 559 /* only for mailbox now. If RX/TX needed, should extend this function. */ 560 static void 561 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) 562 { 563 uint32_t tmp = 0; 564 565 /* mailbox */ 566 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); 567 tmp |= E1000_VTIVAR_VALID; 568 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); 569 } 570 571 static void 572 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) 573 { 574 struct e1000_hw *hw = 575 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 576 577 /* Configure VF other cause ivar */ 578 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); 579 } 580 581 static inline int32_t 582 igb_pf_reset_hw(struct e1000_hw *hw) 583 { 584 uint32_t ctrl_ext; 585 int32_t status; 586 587 status = e1000_reset_hw(hw); 588 589 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 590 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 591 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 592 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 593 E1000_WRITE_FLUSH(hw); 594 595 return status; 596 } 597 598 static void 599 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 600 { 601 struct e1000_hw *hw = 602 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 603 604 605 hw->vendor_id = pci_dev->id.vendor_id; 606 hw->device_id = pci_dev->id.device_id; 607 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 608 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 609 610 e1000_set_mac_type(hw); 611 612 /* need to check if it is a vf device below */ 613 } 614 615 static int 616 igb_reset_swfw_lock(struct e1000_hw *hw) 617 { 618 int ret_val; 619 620 /* 621 * Do mac ops initialization manually here, since we will need 622 * some function pointers set by this call. 623 */ 624 ret_val = e1000_init_mac_params(hw); 625 if (ret_val) 626 return ret_val; 627 628 /* 629 * SMBI lock should not fail in this early stage. If this is the case, 630 * it is due to an improper exit of the application. 631 * So force the release of the faulty lock. 632 */ 633 if (e1000_get_hw_semaphore_generic(hw) < 0) { 634 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 635 } 636 e1000_put_hw_semaphore_generic(hw); 637 638 if (hw->mac.ops.acquire_swfw_sync != NULL) { 639 uint16_t mask; 640 641 /* 642 * Phy lock should not fail in this early stage. If this is the case, 643 * it is due to an improper exit of the application. 644 * So force the release of the faulty lock. 645 */ 646 mask = E1000_SWFW_PHY0_SM << hw->bus.func; 647 if (hw->bus.func > E1000_FUNC_1) 648 mask <<= 2; 649 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 650 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 651 hw->bus.func); 652 } 653 hw->mac.ops.release_swfw_sync(hw, mask); 654 655 /* 656 * This one is more tricky since it is common to all ports; but 657 * swfw_sync retries last long enough (1s) to be almost sure that if 658 * lock can not be taken it is due to an improper lock of the 659 * semaphore. 660 */ 661 mask = E1000_SWFW_EEP_SM; 662 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 663 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 664 } 665 hw->mac.ops.release_swfw_sync(hw, mask); 666 } 667 668 return E1000_SUCCESS; 669 } 670 671 /* Remove all ntuple filters of the device */ 672 static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 673 { 674 struct e1000_filter_info *filter_info = 675 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 676 struct e1000_5tuple_filter *p_5tuple; 677 struct e1000_2tuple_filter *p_2tuple; 678 679 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 680 TAILQ_REMOVE(&filter_info->fivetuple_list, 681 p_5tuple, entries); 682 rte_free(p_5tuple); 683 } 684 filter_info->fivetuple_mask = 0; 685 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { 686 TAILQ_REMOVE(&filter_info->twotuple_list, 687 p_2tuple, entries); 688 rte_free(p_2tuple); 689 } 690 filter_info->twotuple_mask = 0; 691 692 return 0; 693 } 694 695 /* Remove all flex filters of the device */ 696 static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev) 697 { 698 struct e1000_filter_info *filter_info = 699 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 700 struct e1000_flex_filter *p_flex; 701 702 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { 703 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); 704 rte_free(p_flex); 705 } 706 filter_info->flex_mask = 0; 707 708 return 0; 709 } 710 711 static int 712 eth_igb_dev_init(struct rte_eth_dev *eth_dev) 713 { 714 int error = 0; 715 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 716 struct e1000_hw *hw = 717 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 718 struct e1000_vfta * shadow_vfta = 719 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 720 struct e1000_filter_info *filter_info = 721 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 722 struct e1000_adapter *adapter = 723 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 724 725 uint32_t ctrl_ext; 726 727 eth_dev->dev_ops = ð_igb_ops; 728 eth_dev->rx_queue_count = eth_igb_rx_queue_count; 729 eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status; 730 eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status; 731 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 732 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 733 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 734 735 /* for secondary processes, we don't initialise any further as primary 736 * has already done this work. Only check we don't need a different 737 * RX function */ 738 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 739 if (eth_dev->data->scattered_rx) 740 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 741 return 0; 742 } 743 744 rte_eth_copy_pci_info(eth_dev, pci_dev); 745 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 746 747 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; 748 749 igb_identify_hardware(eth_dev, pci_dev); 750 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { 751 error = -EIO; 752 goto err_late; 753 } 754 755 e1000_get_bus_info(hw); 756 757 /* Reset any pending lock */ 758 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { 759 error = -EIO; 760 goto err_late; 761 } 762 763 /* Finish initialization */ 764 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { 765 error = -EIO; 766 goto err_late; 767 } 768 769 hw->mac.autoneg = 1; 770 hw->phy.autoneg_wait_to_complete = 0; 771 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 772 773 /* Copper options */ 774 if (hw->phy.media_type == e1000_media_type_copper) { 775 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 776 hw->phy.disable_polarity_correction = 0; 777 hw->phy.ms_type = e1000_ms_hw_default; 778 } 779 780 /* 781 * Start from a known state, this is important in reading the nvm 782 * and mac from that. 783 */ 784 igb_pf_reset_hw(hw); 785 786 /* Make sure we have a good EEPROM before we read from it */ 787 if (e1000_validate_nvm_checksum(hw) < 0) { 788 /* 789 * Some PCI-E parts fail the first check due to 790 * the link being in sleep state, call it again, 791 * if it fails a second time its a real issue. 792 */ 793 if (e1000_validate_nvm_checksum(hw) < 0) { 794 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 795 error = -EIO; 796 goto err_late; 797 } 798 } 799 800 /* Read the permanent MAC address out of the EEPROM */ 801 if (e1000_read_mac_addr(hw) != 0) { 802 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 803 error = -EIO; 804 goto err_late; 805 } 806 807 /* Allocate memory for storing MAC addresses */ 808 eth_dev->data->mac_addrs = rte_zmalloc("e1000", 809 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 810 if (eth_dev->data->mac_addrs == NULL) { 811 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 812 "store MAC addresses", 813 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 814 error = -ENOMEM; 815 goto err_late; 816 } 817 818 /* Copy the permanent MAC address */ 819 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 820 ð_dev->data->mac_addrs[0]); 821 822 /* initialize the vfta */ 823 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 824 825 /* Now initialize the hardware */ 826 if (igb_hardware_init(hw) != 0) { 827 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 828 rte_free(eth_dev->data->mac_addrs); 829 eth_dev->data->mac_addrs = NULL; 830 error = -ENODEV; 831 goto err_late; 832 } 833 hw->mac.get_link_status = 1; 834 adapter->stopped = 0; 835 836 /* Indicate SOL/IDER usage */ 837 if (e1000_check_reset_block(hw) < 0) { 838 PMD_INIT_LOG(ERR, "PHY reset is blocked due to" 839 "SOL/IDER session"); 840 } 841 842 /* initialize PF if max_vfs not zero */ 843 igb_pf_host_init(eth_dev); 844 845 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 846 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 847 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 848 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 849 E1000_WRITE_FLUSH(hw); 850 851 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 852 eth_dev->data->port_id, pci_dev->id.vendor_id, 853 pci_dev->id.device_id); 854 855 rte_intr_callback_register(&pci_dev->intr_handle, 856 eth_igb_interrupt_handler, 857 (void *)eth_dev); 858 859 /* enable uio/vfio intr/eventfd mapping */ 860 rte_intr_enable(&pci_dev->intr_handle); 861 862 /* enable support intr */ 863 igb_intr_enable(eth_dev); 864 865 eth_igb_dev_set_link_down(eth_dev); 866 867 /* initialize filter info */ 868 memset(filter_info, 0, 869 sizeof(struct e1000_filter_info)); 870 871 TAILQ_INIT(&filter_info->flex_list); 872 TAILQ_INIT(&filter_info->twotuple_list); 873 TAILQ_INIT(&filter_info->fivetuple_list); 874 875 TAILQ_INIT(&igb_filter_ntuple_list); 876 TAILQ_INIT(&igb_filter_ethertype_list); 877 TAILQ_INIT(&igb_filter_syn_list); 878 TAILQ_INIT(&igb_filter_flex_list); 879 TAILQ_INIT(&igb_filter_rss_list); 880 TAILQ_INIT(&igb_flow_list); 881 882 return 0; 883 884 err_late: 885 igb_hw_control_release(hw); 886 887 return error; 888 } 889 890 static int 891 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) 892 { 893 PMD_INIT_FUNC_TRACE(); 894 895 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 896 return 0; 897 898 eth_igb_close(eth_dev); 899 900 return 0; 901 } 902 903 /* 904 * Virtual Function device init 905 */ 906 static int 907 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) 908 { 909 struct rte_pci_device *pci_dev; 910 struct rte_intr_handle *intr_handle; 911 struct e1000_adapter *adapter = 912 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 913 struct e1000_hw *hw = 914 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 915 int diag; 916 struct rte_ether_addr *perm_addr = 917 (struct rte_ether_addr *)hw->mac.perm_addr; 918 919 PMD_INIT_FUNC_TRACE(); 920 921 eth_dev->dev_ops = &igbvf_eth_dev_ops; 922 eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status; 923 eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status; 924 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 925 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 926 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 927 928 /* for secondary processes, we don't initialise any further as primary 929 * has already done this work. Only check we don't need a different 930 * RX function */ 931 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 932 if (eth_dev->data->scattered_rx) 933 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 934 return 0; 935 } 936 937 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 938 rte_eth_copy_pci_info(eth_dev, pci_dev); 939 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 940 941 hw->device_id = pci_dev->id.device_id; 942 hw->vendor_id = pci_dev->id.vendor_id; 943 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 944 adapter->stopped = 0; 945 946 /* Initialize the shared code (base driver) */ 947 diag = e1000_setup_init_funcs(hw, TRUE); 948 if (diag != 0) { 949 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", 950 diag); 951 return -EIO; 952 } 953 954 /* init_mailbox_params */ 955 hw->mbx.ops.init_params(hw); 956 957 /* Disable the interrupts for VF */ 958 igbvf_intr_disable(hw); 959 960 diag = hw->mac.ops.reset_hw(hw); 961 962 /* Allocate memory for storing MAC addresses */ 963 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN * 964 hw->mac.rar_entry_count, 0); 965 if (eth_dev->data->mac_addrs == NULL) { 966 PMD_INIT_LOG(ERR, 967 "Failed to allocate %d bytes needed to store MAC " 968 "addresses", 969 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 970 return -ENOMEM; 971 } 972 973 /* Generate a random MAC address, if none was assigned by PF. */ 974 if (rte_is_zero_ether_addr(perm_addr)) { 975 rte_eth_random_addr(perm_addr->addr_bytes); 976 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 977 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 978 RTE_ETHER_ADDR_PRT_FMT, 979 RTE_ETHER_ADDR_BYTES(perm_addr)); 980 } 981 982 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); 983 if (diag) { 984 rte_free(eth_dev->data->mac_addrs); 985 eth_dev->data->mac_addrs = NULL; 986 return diag; 987 } 988 /* Copy the permanent MAC address */ 989 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 990 ð_dev->data->mac_addrs[0]); 991 992 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " 993 "mac.type=%s", 994 eth_dev->data->port_id, pci_dev->id.vendor_id, 995 pci_dev->id.device_id, "igb_mac_82576_vf"); 996 997 intr_handle = &pci_dev->intr_handle; 998 rte_intr_callback_register(intr_handle, 999 eth_igbvf_interrupt_handler, eth_dev); 1000 1001 return 0; 1002 } 1003 1004 static int 1005 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) 1006 { 1007 PMD_INIT_FUNC_TRACE(); 1008 1009 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1010 return 0; 1011 1012 igbvf_dev_close(eth_dev); 1013 1014 return 0; 1015 } 1016 1017 static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1018 struct rte_pci_device *pci_dev) 1019 { 1020 return rte_eth_dev_pci_generic_probe(pci_dev, 1021 sizeof(struct e1000_adapter), eth_igb_dev_init); 1022 } 1023 1024 static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) 1025 { 1026 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit); 1027 } 1028 1029 static struct rte_pci_driver rte_igb_pmd = { 1030 .id_table = pci_id_igb_map, 1031 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1032 .probe = eth_igb_pci_probe, 1033 .remove = eth_igb_pci_remove, 1034 }; 1035 1036 1037 static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1038 struct rte_pci_device *pci_dev) 1039 { 1040 return rte_eth_dev_pci_generic_probe(pci_dev, 1041 sizeof(struct e1000_adapter), eth_igbvf_dev_init); 1042 } 1043 1044 static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) 1045 { 1046 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit); 1047 } 1048 1049 /* 1050 * virtual function driver struct 1051 */ 1052 static struct rte_pci_driver rte_igbvf_pmd = { 1053 .id_table = pci_id_igbvf_map, 1054 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1055 .probe = eth_igbvf_pci_probe, 1056 .remove = eth_igbvf_pci_remove, 1057 }; 1058 1059 static void 1060 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1061 { 1062 struct e1000_hw *hw = 1063 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1064 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ 1065 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 1066 rctl |= E1000_RCTL_VFE; 1067 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1068 } 1069 1070 static int 1071 igb_check_mq_mode(struct rte_eth_dev *dev) 1072 { 1073 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1074 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1075 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1076 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1077 1078 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || 1079 tx_mq_mode == ETH_MQ_TX_DCB || 1080 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 1081 PMD_INIT_LOG(ERR, "DCB mode is not supported."); 1082 return -EINVAL; 1083 } 1084 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1085 /* Check multi-queue mode. 1086 * To no break software we accept ETH_MQ_RX_NONE as this might 1087 * be used to turn off VLAN filter. 1088 */ 1089 1090 if (rx_mq_mode == ETH_MQ_RX_NONE || 1091 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1092 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 1093 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 1094 } else { 1095 /* Only support one queue on VFs. 1096 * RSS together with SRIOV is not supported. 1097 */ 1098 PMD_INIT_LOG(ERR, "SRIOV is active," 1099 " wrong mq_mode rx %d.", 1100 rx_mq_mode); 1101 return -EINVAL; 1102 } 1103 /* TX mode is not used here, so mode might be ignored.*/ 1104 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1105 /* SRIOV only works in VMDq enable mode */ 1106 PMD_INIT_LOG(WARNING, "SRIOV is active," 1107 " TX mode %d is not supported. " 1108 " Driver will behave as %d mode.", 1109 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); 1110 } 1111 1112 /* check valid queue number */ 1113 if ((nb_rx_q > 1) || (nb_tx_q > 1)) { 1114 PMD_INIT_LOG(ERR, "SRIOV is active," 1115 " only support one queue on VFs."); 1116 return -EINVAL; 1117 } 1118 } else { 1119 /* To no break software that set invalid mode, only display 1120 * warning if invalid mode is used. 1121 */ 1122 if (rx_mq_mode != ETH_MQ_RX_NONE && 1123 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && 1124 rx_mq_mode != ETH_MQ_RX_RSS) { 1125 /* RSS together with VMDq not supported*/ 1126 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 1127 rx_mq_mode); 1128 return -EINVAL; 1129 } 1130 1131 if (tx_mq_mode != ETH_MQ_TX_NONE && 1132 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1133 PMD_INIT_LOG(WARNING, "TX mode %d is not supported." 1134 " Due to txmode is meaningless in this" 1135 " driver, just ignore.", 1136 tx_mq_mode); 1137 } 1138 } 1139 return 0; 1140 } 1141 1142 static int 1143 eth_igb_configure(struct rte_eth_dev *dev) 1144 { 1145 struct e1000_interrupt *intr = 1146 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1147 int ret; 1148 1149 PMD_INIT_FUNC_TRACE(); 1150 1151 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1152 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1153 1154 /* multipe queue mode checking */ 1155 ret = igb_check_mq_mode(dev); 1156 if (ret != 0) { 1157 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", 1158 ret); 1159 return ret; 1160 } 1161 1162 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1163 PMD_INIT_FUNC_TRACE(); 1164 1165 return 0; 1166 } 1167 1168 static void 1169 eth_igb_rxtx_control(struct rte_eth_dev *dev, 1170 bool enable) 1171 { 1172 struct e1000_hw *hw = 1173 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1174 uint32_t tctl, rctl; 1175 1176 tctl = E1000_READ_REG(hw, E1000_TCTL); 1177 rctl = E1000_READ_REG(hw, E1000_RCTL); 1178 1179 if (enable) { 1180 /* enable Tx/Rx */ 1181 tctl |= E1000_TCTL_EN; 1182 rctl |= E1000_RCTL_EN; 1183 } else { 1184 /* disable Tx/Rx */ 1185 tctl &= ~E1000_TCTL_EN; 1186 rctl &= ~E1000_RCTL_EN; 1187 } 1188 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1189 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1190 E1000_WRITE_FLUSH(hw); 1191 } 1192 1193 static int 1194 eth_igb_start(struct rte_eth_dev *dev) 1195 { 1196 struct e1000_hw *hw = 1197 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1198 struct e1000_adapter *adapter = 1199 E1000_DEV_PRIVATE(dev->data->dev_private); 1200 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1201 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1202 int ret, mask; 1203 uint32_t intr_vector = 0; 1204 uint32_t ctrl_ext; 1205 uint32_t *speeds; 1206 int num_speeds; 1207 bool autoneg; 1208 1209 PMD_INIT_FUNC_TRACE(); 1210 1211 /* disable uio/vfio intr/eventfd mapping */ 1212 rte_intr_disable(intr_handle); 1213 1214 /* Power up the phy. Needed to make the link go Up */ 1215 eth_igb_dev_set_link_up(dev); 1216 1217 /* 1218 * Packet Buffer Allocation (PBA) 1219 * Writing PBA sets the receive portion of the buffer 1220 * the remainder is used for the transmit buffer. 1221 */ 1222 if (hw->mac.type == e1000_82575) { 1223 uint32_t pba; 1224 1225 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1226 E1000_WRITE_REG(hw, E1000_PBA, pba); 1227 } 1228 1229 /* Put the address into the Receive Address Array */ 1230 e1000_rar_set(hw, hw->mac.addr, 0); 1231 1232 /* Initialize the hardware */ 1233 if (igb_hardware_init(hw)) { 1234 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 1235 return -EIO; 1236 } 1237 adapter->stopped = 0; 1238 1239 E1000_WRITE_REG(hw, E1000_VET, 1240 RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); 1241 1242 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1243 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1244 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 1245 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1246 E1000_WRITE_FLUSH(hw); 1247 1248 /* configure PF module if SRIOV enabled */ 1249 igb_pf_host_configure(dev); 1250 1251 /* check and configure queue intr-vector mapping */ 1252 if ((rte_intr_cap_multiple(intr_handle) || 1253 !RTE_ETH_DEV_SRIOV(dev).active) && 1254 dev->data->dev_conf.intr_conf.rxq != 0) { 1255 intr_vector = dev->data->nb_rx_queues; 1256 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1257 return -1; 1258 } 1259 1260 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 1261 intr_handle->intr_vec = 1262 rte_zmalloc("intr_vec", 1263 dev->data->nb_rx_queues * sizeof(int), 0); 1264 if (intr_handle->intr_vec == NULL) { 1265 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1266 " intr_vec", dev->data->nb_rx_queues); 1267 return -ENOMEM; 1268 } 1269 } 1270 1271 /* confiugre msix for rx interrupt */ 1272 eth_igb_configure_msix_intr(dev); 1273 1274 /* Configure for OS presence */ 1275 igb_init_manageability(hw); 1276 1277 eth_igb_tx_init(dev); 1278 1279 /* This can fail when allocating mbufs for descriptor rings */ 1280 ret = eth_igb_rx_init(dev); 1281 if (ret) { 1282 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1283 igb_dev_clear_queues(dev); 1284 return ret; 1285 } 1286 1287 e1000_clear_hw_cntrs_base_generic(hw); 1288 1289 /* 1290 * VLAN Offload Settings 1291 */ 1292 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 1293 ETH_VLAN_EXTEND_MASK; 1294 ret = eth_igb_vlan_offload_set(dev, mask); 1295 if (ret) { 1296 PMD_INIT_LOG(ERR, "Unable to set vlan offload"); 1297 igb_dev_clear_queues(dev); 1298 return ret; 1299 } 1300 1301 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1302 /* Enable VLAN filter since VMDq always use VLAN filter */ 1303 igb_vmdq_vlan_hw_filter_enable(dev); 1304 } 1305 1306 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || 1307 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || 1308 (hw->mac.type == e1000_i211)) { 1309 /* Configure EITR with the maximum possible value (0xFFFF) */ 1310 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); 1311 } 1312 1313 /* Setup link speed and duplex */ 1314 speeds = &dev->data->dev_conf.link_speeds; 1315 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 1316 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 1317 hw->mac.autoneg = 1; 1318 } else { 1319 num_speeds = 0; 1320 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 1321 1322 /* Reset */ 1323 hw->phy.autoneg_advertised = 0; 1324 1325 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1326 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1327 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 1328 num_speeds = -1; 1329 goto error_invalid_config; 1330 } 1331 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1332 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1333 num_speeds++; 1334 } 1335 if (*speeds & ETH_LINK_SPEED_10M) { 1336 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1337 num_speeds++; 1338 } 1339 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1340 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1341 num_speeds++; 1342 } 1343 if (*speeds & ETH_LINK_SPEED_100M) { 1344 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1345 num_speeds++; 1346 } 1347 if (*speeds & ETH_LINK_SPEED_1G) { 1348 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1349 num_speeds++; 1350 } 1351 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 1352 goto error_invalid_config; 1353 1354 /* Set/reset the mac.autoneg based on the link speed, 1355 * fixed or not 1356 */ 1357 if (!autoneg) { 1358 hw->mac.autoneg = 0; 1359 hw->mac.forced_speed_duplex = 1360 hw->phy.autoneg_advertised; 1361 } else { 1362 hw->mac.autoneg = 1; 1363 } 1364 } 1365 1366 e1000_setup_link(hw); 1367 1368 if (rte_intr_allow_others(intr_handle)) { 1369 /* check if lsc interrupt is enabled */ 1370 if (dev->data->dev_conf.intr_conf.lsc != 0) 1371 eth_igb_lsc_interrupt_setup(dev, TRUE); 1372 else 1373 eth_igb_lsc_interrupt_setup(dev, FALSE); 1374 } else { 1375 rte_intr_callback_unregister(intr_handle, 1376 eth_igb_interrupt_handler, 1377 (void *)dev); 1378 if (dev->data->dev_conf.intr_conf.lsc != 0) 1379 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1380 " no intr multiplex"); 1381 } 1382 1383 /* check if rxq interrupt is enabled */ 1384 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1385 rte_intr_dp_is_en(intr_handle)) 1386 eth_igb_rxq_interrupt_setup(dev); 1387 1388 /* enable uio/vfio intr/eventfd mapping */ 1389 rte_intr_enable(intr_handle); 1390 1391 /* resume enabled intr since hw reset */ 1392 igb_intr_enable(dev); 1393 1394 /* restore all types filter */ 1395 igb_filter_restore(dev); 1396 1397 eth_igb_rxtx_control(dev, true); 1398 eth_igb_link_update(dev, 0); 1399 1400 PMD_INIT_LOG(DEBUG, "<<"); 1401 1402 return 0; 1403 1404 error_invalid_config: 1405 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1406 dev->data->dev_conf.link_speeds, dev->data->port_id); 1407 igb_dev_clear_queues(dev); 1408 return -EINVAL; 1409 } 1410 1411 /********************************************************************* 1412 * 1413 * This routine disables all traffic on the adapter by issuing a 1414 * global reset on the MAC. 1415 * 1416 **********************************************************************/ 1417 static int 1418 eth_igb_stop(struct rte_eth_dev *dev) 1419 { 1420 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1421 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1422 struct rte_eth_link link; 1423 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1424 struct e1000_adapter *adapter = 1425 E1000_DEV_PRIVATE(dev->data->dev_private); 1426 1427 if (adapter->stopped) 1428 return 0; 1429 1430 eth_igb_rxtx_control(dev, false); 1431 1432 igb_intr_disable(dev); 1433 1434 /* disable intr eventfd mapping */ 1435 rte_intr_disable(intr_handle); 1436 1437 igb_pf_reset_hw(hw); 1438 E1000_WRITE_REG(hw, E1000_WUC, 0); 1439 1440 /* Set bit for Go Link disconnect if PHY reset is not blocked */ 1441 if (hw->mac.type >= e1000_82580 && 1442 (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { 1443 uint32_t phpm_reg; 1444 1445 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1446 phpm_reg |= E1000_82580_PM_GO_LINKD; 1447 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1448 } 1449 1450 /* Power down the phy. Needed to make the link go Down */ 1451 eth_igb_dev_set_link_down(dev); 1452 1453 igb_dev_clear_queues(dev); 1454 1455 /* clear the recorded link status */ 1456 memset(&link, 0, sizeof(link)); 1457 rte_eth_linkstatus_set(dev, &link); 1458 1459 if (!rte_intr_allow_others(intr_handle)) 1460 /* resume to the default handler */ 1461 rte_intr_callback_register(intr_handle, 1462 eth_igb_interrupt_handler, 1463 (void *)dev); 1464 1465 /* Clean datapath event and queue/vec mapping */ 1466 rte_intr_efd_disable(intr_handle); 1467 if (intr_handle->intr_vec != NULL) { 1468 rte_free(intr_handle->intr_vec); 1469 intr_handle->intr_vec = NULL; 1470 } 1471 1472 adapter->stopped = true; 1473 dev->data->dev_started = 0; 1474 1475 return 0; 1476 } 1477 1478 static int 1479 eth_igb_dev_set_link_up(struct rte_eth_dev *dev) 1480 { 1481 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1482 1483 if (hw->phy.media_type == e1000_media_type_copper) 1484 e1000_power_up_phy(hw); 1485 else 1486 e1000_power_up_fiber_serdes_link(hw); 1487 1488 return 0; 1489 } 1490 1491 static int 1492 eth_igb_dev_set_link_down(struct rte_eth_dev *dev) 1493 { 1494 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1495 1496 if (hw->phy.media_type == e1000_media_type_copper) 1497 e1000_power_down_phy(hw); 1498 else 1499 e1000_shutdown_fiber_serdes_link(hw); 1500 1501 return 0; 1502 } 1503 1504 static int 1505 eth_igb_close(struct rte_eth_dev *dev) 1506 { 1507 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1508 struct rte_eth_link link; 1509 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1510 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1511 struct e1000_filter_info *filter_info = 1512 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 1513 int ret; 1514 1515 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1516 return 0; 1517 1518 ret = eth_igb_stop(dev); 1519 1520 e1000_phy_hw_reset(hw); 1521 igb_release_manageability(hw); 1522 igb_hw_control_release(hw); 1523 1524 /* Clear bit for Go Link disconnect if PHY reset is not blocked */ 1525 if (hw->mac.type >= e1000_82580 && 1526 (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { 1527 uint32_t phpm_reg; 1528 1529 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1530 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1531 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1532 } 1533 1534 igb_dev_free_queues(dev); 1535 1536 if (intr_handle->intr_vec) { 1537 rte_free(intr_handle->intr_vec); 1538 intr_handle->intr_vec = NULL; 1539 } 1540 1541 memset(&link, 0, sizeof(link)); 1542 rte_eth_linkstatus_set(dev, &link); 1543 1544 /* Reset any pending lock */ 1545 igb_reset_swfw_lock(hw); 1546 1547 /* uninitialize PF if max_vfs not zero */ 1548 igb_pf_host_uninit(dev); 1549 1550 rte_intr_callback_unregister(intr_handle, 1551 eth_igb_interrupt_handler, dev); 1552 1553 /* clear the SYN filter info */ 1554 filter_info->syn_info = 0; 1555 1556 /* clear the ethertype filters info */ 1557 filter_info->ethertype_mask = 0; 1558 memset(filter_info->ethertype_filters, 0, 1559 E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); 1560 1561 /* clear the rss filter info */ 1562 memset(&filter_info->rss_info, 0, 1563 sizeof(struct igb_rte_flow_rss_conf)); 1564 1565 /* remove all ntuple filters of the device */ 1566 igb_ntuple_filter_uninit(dev); 1567 1568 /* remove all flex filters of the device */ 1569 igb_flex_filter_uninit(dev); 1570 1571 /* clear all the filters list */ 1572 igb_filterlist_flush(dev); 1573 1574 return ret; 1575 } 1576 1577 /* 1578 * Reset PF device. 1579 */ 1580 static int 1581 eth_igb_reset(struct rte_eth_dev *dev) 1582 { 1583 int ret; 1584 1585 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1586 * its VF to make them align with it. The detailed notification 1587 * mechanism is PMD specific and is currently not implemented. 1588 * To avoid unexpected behavior in VF, currently reset of PF with 1589 * SR-IOV activation is not supported. It might be supported later. 1590 */ 1591 if (dev->data->sriov.active) 1592 return -ENOTSUP; 1593 1594 ret = eth_igb_dev_uninit(dev); 1595 if (ret) 1596 return ret; 1597 1598 ret = eth_igb_dev_init(dev); 1599 1600 return ret; 1601 } 1602 1603 1604 static int 1605 igb_get_rx_buffer_size(struct e1000_hw *hw) 1606 { 1607 uint32_t rx_buf_size; 1608 if (hw->mac.type == e1000_82576) { 1609 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; 1610 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { 1611 /* PBS needs to be translated according to a lookup table */ 1612 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); 1613 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); 1614 rx_buf_size = (rx_buf_size << 10); 1615 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 1616 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; 1617 } else { 1618 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; 1619 } 1620 1621 return rx_buf_size; 1622 } 1623 1624 /********************************************************************* 1625 * 1626 * Initialize the hardware 1627 * 1628 **********************************************************************/ 1629 static int 1630 igb_hardware_init(struct e1000_hw *hw) 1631 { 1632 uint32_t rx_buf_size; 1633 int diag; 1634 1635 /* Let the firmware know the OS is in control */ 1636 igb_hw_control_acquire(hw); 1637 1638 /* 1639 * These parameters control the automatic generation (Tx) and 1640 * response (Rx) to Ethernet PAUSE frames. 1641 * - High water mark should allow for at least two standard size (1518) 1642 * frames to be received after sending an XOFF. 1643 * - Low water mark works best when it is very near the high water mark. 1644 * This allows the receiver to restart by sending XON when it has 1645 * drained a bit. Here we use an arbitrary value of 1500 which will 1646 * restart after one full frame is pulled from the buffer. There 1647 * could be several smaller frames in the buffer and if so they will 1648 * not trigger the XON until their total number reduces the buffer 1649 * by 1500. 1650 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1651 */ 1652 rx_buf_size = igb_get_rx_buffer_size(hw); 1653 1654 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 1655 hw->fc.low_water = hw->fc.high_water - 1500; 1656 hw->fc.pause_time = IGB_FC_PAUSE_TIME; 1657 hw->fc.send_xon = 1; 1658 1659 /* Set Flow control, use the tunable location if sane */ 1660 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) 1661 hw->fc.requested_mode = igb_fc_setting; 1662 else 1663 hw->fc.requested_mode = e1000_fc_none; 1664 1665 /* Issue a global reset */ 1666 igb_pf_reset_hw(hw); 1667 E1000_WRITE_REG(hw, E1000_WUC, 0); 1668 1669 diag = e1000_init_hw(hw); 1670 if (diag < 0) 1671 return diag; 1672 1673 E1000_WRITE_REG(hw, E1000_VET, 1674 RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); 1675 e1000_get_phy_info(hw); 1676 e1000_check_for_link(hw); 1677 1678 return 0; 1679 } 1680 1681 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */ 1682 static void 1683 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) 1684 { 1685 int pause_frames; 1686 1687 uint64_t old_gprc = stats->gprc; 1688 uint64_t old_gptc = stats->gptc; 1689 uint64_t old_tpr = stats->tpr; 1690 uint64_t old_tpt = stats->tpt; 1691 uint64_t old_rpthc = stats->rpthc; 1692 uint64_t old_hgptc = stats->hgptc; 1693 1694 if(hw->phy.media_type == e1000_media_type_copper || 1695 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1696 stats->symerrs += 1697 E1000_READ_REG(hw,E1000_SYMERRS); 1698 stats->sec += E1000_READ_REG(hw, E1000_SEC); 1699 } 1700 1701 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 1702 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 1703 stats->scc += E1000_READ_REG(hw, E1000_SCC); 1704 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 1705 1706 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 1707 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 1708 stats->colc += E1000_READ_REG(hw, E1000_COLC); 1709 stats->dc += E1000_READ_REG(hw, E1000_DC); 1710 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 1711 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 1712 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 1713 /* 1714 ** For watchdog management we need to know if we have been 1715 ** paused during the last interval, so capture that here. 1716 */ 1717 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 1718 stats->xoffrxc += pause_frames; 1719 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 1720 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 1721 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 1722 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 1723 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 1724 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 1725 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 1726 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 1727 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 1728 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 1729 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 1730 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 1731 1732 /* For the 64-bit byte counters the low dword must be read first. */ 1733 /* Both registers clear on the read of the high dword */ 1734 1735 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1736 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 1737 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 1738 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1739 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 1740 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 1741 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1742 1743 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 1744 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 1745 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 1746 stats->roc += E1000_READ_REG(hw, E1000_ROC); 1747 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 1748 1749 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 1750 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 1751 1752 stats->tor += E1000_READ_REG(hw, E1000_TORL); 1753 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 1754 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1755 stats->tot += E1000_READ_REG(hw, E1000_TOTL); 1756 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 1757 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1758 1759 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 1760 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 1761 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 1762 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 1763 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 1764 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 1765 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 1766 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 1767 1768 /* Interrupt Counts */ 1769 1770 stats->iac += E1000_READ_REG(hw, E1000_IAC); 1771 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 1772 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 1773 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 1774 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 1775 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 1776 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 1777 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 1778 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 1779 1780 /* Host to Card Statistics */ 1781 1782 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 1783 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 1784 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 1785 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 1786 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 1787 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 1788 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 1789 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); 1790 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); 1791 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1792 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); 1793 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); 1794 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1795 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 1796 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 1797 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 1798 1799 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 1800 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 1801 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 1802 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 1803 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 1804 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 1805 } 1806 1807 static int 1808 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1809 { 1810 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1811 struct e1000_hw_stats *stats = 1812 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1813 1814 igb_read_stats_registers(hw, stats); 1815 1816 if (rte_stats == NULL) 1817 return -EINVAL; 1818 1819 /* Rx Errors */ 1820 rte_stats->imissed = stats->mpc; 1821 rte_stats->ierrors = stats->crcerrs + stats->rlec + 1822 stats->rxerrc + stats->algnerrc + stats->cexterr; 1823 1824 /* Tx Errors */ 1825 rte_stats->oerrors = stats->ecol + stats->latecol; 1826 1827 rte_stats->ipackets = stats->gprc; 1828 rte_stats->opackets = stats->gptc; 1829 rte_stats->ibytes = stats->gorc; 1830 rte_stats->obytes = stats->gotc; 1831 return 0; 1832 } 1833 1834 static int 1835 eth_igb_stats_reset(struct rte_eth_dev *dev) 1836 { 1837 struct e1000_hw_stats *hw_stats = 1838 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1839 1840 /* HW registers are cleared on read */ 1841 eth_igb_stats_get(dev, NULL); 1842 1843 /* Reset software totals */ 1844 memset(hw_stats, 0, sizeof(*hw_stats)); 1845 1846 return 0; 1847 } 1848 1849 static int 1850 eth_igb_xstats_reset(struct rte_eth_dev *dev) 1851 { 1852 struct e1000_hw_stats *stats = 1853 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1854 1855 /* HW registers are cleared on read */ 1856 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); 1857 1858 /* Reset software totals */ 1859 memset(stats, 0, sizeof(*stats)); 1860 1861 return 0; 1862 } 1863 1864 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1865 struct rte_eth_xstat_name *xstats_names, 1866 __rte_unused unsigned int size) 1867 { 1868 unsigned i; 1869 1870 if (xstats_names == NULL) 1871 return IGB_NB_XSTATS; 1872 1873 /* Note: limit checked in rte_eth_xstats_names() */ 1874 1875 for (i = 0; i < IGB_NB_XSTATS; i++) { 1876 strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name, 1877 sizeof(xstats_names[i].name)); 1878 } 1879 1880 return IGB_NB_XSTATS; 1881 } 1882 1883 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 1884 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 1885 unsigned int limit) 1886 { 1887 unsigned int i; 1888 1889 if (!ids) { 1890 if (xstats_names == NULL) 1891 return IGB_NB_XSTATS; 1892 1893 for (i = 0; i < IGB_NB_XSTATS; i++) 1894 strlcpy(xstats_names[i].name, 1895 rte_igb_stats_strings[i].name, 1896 sizeof(xstats_names[i].name)); 1897 1898 return IGB_NB_XSTATS; 1899 1900 } else { 1901 struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; 1902 1903 eth_igb_xstats_get_names_by_id(dev, NULL, xstats_names_copy, 1904 IGB_NB_XSTATS); 1905 1906 for (i = 0; i < limit; i++) { 1907 if (ids[i] >= IGB_NB_XSTATS) { 1908 PMD_INIT_LOG(ERR, "id value isn't valid"); 1909 return -1; 1910 } 1911 strcpy(xstats_names[i].name, 1912 xstats_names_copy[ids[i]].name); 1913 } 1914 return limit; 1915 } 1916 } 1917 1918 static int 1919 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1920 unsigned n) 1921 { 1922 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1923 struct e1000_hw_stats *hw_stats = 1924 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1925 unsigned i; 1926 1927 if (n < IGB_NB_XSTATS) 1928 return IGB_NB_XSTATS; 1929 1930 igb_read_stats_registers(hw, hw_stats); 1931 1932 /* If this is a reset xstats is NULL, and we have cleared the 1933 * registers by reading them. 1934 */ 1935 if (!xstats) 1936 return 0; 1937 1938 /* Extended stats */ 1939 for (i = 0; i < IGB_NB_XSTATS; i++) { 1940 xstats[i].id = i; 1941 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1942 rte_igb_stats_strings[i].offset); 1943 } 1944 1945 return IGB_NB_XSTATS; 1946 } 1947 1948 static int 1949 eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1950 uint64_t *values, unsigned int n) 1951 { 1952 unsigned int i; 1953 1954 if (!ids) { 1955 struct e1000_hw *hw = 1956 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1957 struct e1000_hw_stats *hw_stats = 1958 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1959 1960 if (n < IGB_NB_XSTATS) 1961 return IGB_NB_XSTATS; 1962 1963 igb_read_stats_registers(hw, hw_stats); 1964 1965 /* If this is a reset xstats is NULL, and we have cleared the 1966 * registers by reading them. 1967 */ 1968 if (!values) 1969 return 0; 1970 1971 /* Extended stats */ 1972 for (i = 0; i < IGB_NB_XSTATS; i++) 1973 values[i] = *(uint64_t *)(((char *)hw_stats) + 1974 rte_igb_stats_strings[i].offset); 1975 1976 return IGB_NB_XSTATS; 1977 1978 } else { 1979 uint64_t values_copy[IGB_NB_XSTATS]; 1980 1981 eth_igb_xstats_get_by_id(dev, NULL, values_copy, 1982 IGB_NB_XSTATS); 1983 1984 for (i = 0; i < n; i++) { 1985 if (ids[i] >= IGB_NB_XSTATS) { 1986 PMD_INIT_LOG(ERR, "id value isn't valid"); 1987 return -1; 1988 } 1989 values[i] = values_copy[ids[i]]; 1990 } 1991 return n; 1992 } 1993 } 1994 1995 static void 1996 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) 1997 { 1998 /* Good Rx packets, include VF loopback */ 1999 UPDATE_VF_STAT(E1000_VFGPRC, 2000 hw_stats->last_gprc, hw_stats->gprc); 2001 2002 /* Good Rx octets, include VF loopback */ 2003 UPDATE_VF_STAT(E1000_VFGORC, 2004 hw_stats->last_gorc, hw_stats->gorc); 2005 2006 /* Good Tx packets, include VF loopback */ 2007 UPDATE_VF_STAT(E1000_VFGPTC, 2008 hw_stats->last_gptc, hw_stats->gptc); 2009 2010 /* Good Tx octets, include VF loopback */ 2011 UPDATE_VF_STAT(E1000_VFGOTC, 2012 hw_stats->last_gotc, hw_stats->gotc); 2013 2014 /* Rx Multicst packets */ 2015 UPDATE_VF_STAT(E1000_VFMPRC, 2016 hw_stats->last_mprc, hw_stats->mprc); 2017 2018 /* Good Rx loopback packets */ 2019 UPDATE_VF_STAT(E1000_VFGPRLBC, 2020 hw_stats->last_gprlbc, hw_stats->gprlbc); 2021 2022 /* Good Rx loopback octets */ 2023 UPDATE_VF_STAT(E1000_VFGORLBC, 2024 hw_stats->last_gorlbc, hw_stats->gorlbc); 2025 2026 /* Good Tx loopback packets */ 2027 UPDATE_VF_STAT(E1000_VFGPTLBC, 2028 hw_stats->last_gptlbc, hw_stats->gptlbc); 2029 2030 /* Good Tx loopback octets */ 2031 UPDATE_VF_STAT(E1000_VFGOTLBC, 2032 hw_stats->last_gotlbc, hw_stats->gotlbc); 2033 } 2034 2035 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2036 struct rte_eth_xstat_name *xstats_names, 2037 __rte_unused unsigned limit) 2038 { 2039 unsigned i; 2040 2041 if (xstats_names != NULL) 2042 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2043 strlcpy(xstats_names[i].name, 2044 rte_igbvf_stats_strings[i].name, 2045 sizeof(xstats_names[i].name)); 2046 } 2047 return IGBVF_NB_XSTATS; 2048 } 2049 2050 static int 2051 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2052 unsigned n) 2053 { 2054 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2055 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2056 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2057 unsigned i; 2058 2059 if (n < IGBVF_NB_XSTATS) 2060 return IGBVF_NB_XSTATS; 2061 2062 igbvf_read_stats_registers(hw, hw_stats); 2063 2064 if (!xstats) 2065 return 0; 2066 2067 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2068 xstats[i].id = i; 2069 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 2070 rte_igbvf_stats_strings[i].offset); 2071 } 2072 2073 return IGBVF_NB_XSTATS; 2074 } 2075 2076 static int 2077 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 2078 { 2079 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2080 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2081 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2082 2083 igbvf_read_stats_registers(hw, hw_stats); 2084 2085 if (rte_stats == NULL) 2086 return -EINVAL; 2087 2088 rte_stats->ipackets = hw_stats->gprc; 2089 rte_stats->ibytes = hw_stats->gorc; 2090 rte_stats->opackets = hw_stats->gptc; 2091 rte_stats->obytes = hw_stats->gotc; 2092 return 0; 2093 } 2094 2095 static int 2096 eth_igbvf_stats_reset(struct rte_eth_dev *dev) 2097 { 2098 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) 2099 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2100 2101 /* Sync HW register to the last stats */ 2102 eth_igbvf_stats_get(dev, NULL); 2103 2104 /* reset HW current stats*/ 2105 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - 2106 offsetof(struct e1000_vf_stats, gprc)); 2107 2108 return 0; 2109 } 2110 2111 static int 2112 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 2113 size_t fw_size) 2114 { 2115 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2116 struct e1000_fw_version fw; 2117 int ret; 2118 2119 e1000_get_fw_version(hw, &fw); 2120 2121 switch (hw->mac.type) { 2122 case e1000_i210: 2123 case e1000_i211: 2124 if (!(e1000_get_flash_presence_i210(hw))) { 2125 ret = snprintf(fw_version, fw_size, 2126 "%2d.%2d-%d", 2127 fw.invm_major, fw.invm_minor, 2128 fw.invm_img_type); 2129 break; 2130 } 2131 /* fall through */ 2132 default: 2133 /* if option rom is valid, display its version too */ 2134 if (fw.or_valid) { 2135 ret = snprintf(fw_version, fw_size, 2136 "%d.%d, 0x%08x, %d.%d.%d", 2137 fw.eep_major, fw.eep_minor, fw.etrack_id, 2138 fw.or_major, fw.or_build, fw.or_patch); 2139 /* no option rom */ 2140 } else { 2141 if (fw.etrack_id != 0X0000) { 2142 ret = snprintf(fw_version, fw_size, 2143 "%d.%d, 0x%08x", 2144 fw.eep_major, fw.eep_minor, 2145 fw.etrack_id); 2146 } else { 2147 ret = snprintf(fw_version, fw_size, 2148 "%d.%d.%d", 2149 fw.eep_major, fw.eep_minor, 2150 fw.eep_build); 2151 } 2152 } 2153 break; 2154 } 2155 if (ret < 0) 2156 return -EINVAL; 2157 2158 ret += 1; /* add the size of '\0' */ 2159 if (fw_size < (size_t)ret) 2160 return ret; 2161 else 2162 return 0; 2163 } 2164 2165 static int 2166 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2167 { 2168 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2169 2170 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2171 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2172 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2173 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2174 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2175 dev_info->rx_queue_offload_capa; 2176 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2177 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2178 dev_info->tx_queue_offload_capa; 2179 2180 switch (hw->mac.type) { 2181 case e1000_82575: 2182 dev_info->max_rx_queues = 4; 2183 dev_info->max_tx_queues = 4; 2184 dev_info->max_vmdq_pools = 0; 2185 break; 2186 2187 case e1000_82576: 2188 dev_info->max_rx_queues = 16; 2189 dev_info->max_tx_queues = 16; 2190 dev_info->max_vmdq_pools = ETH_8_POOLS; 2191 dev_info->vmdq_queue_num = 16; 2192 break; 2193 2194 case e1000_82580: 2195 dev_info->max_rx_queues = 8; 2196 dev_info->max_tx_queues = 8; 2197 dev_info->max_vmdq_pools = ETH_8_POOLS; 2198 dev_info->vmdq_queue_num = 8; 2199 break; 2200 2201 case e1000_i350: 2202 dev_info->max_rx_queues = 8; 2203 dev_info->max_tx_queues = 8; 2204 dev_info->max_vmdq_pools = ETH_8_POOLS; 2205 dev_info->vmdq_queue_num = 8; 2206 break; 2207 2208 case e1000_i354: 2209 dev_info->max_rx_queues = 8; 2210 dev_info->max_tx_queues = 8; 2211 break; 2212 2213 case e1000_i210: 2214 dev_info->max_rx_queues = 4; 2215 dev_info->max_tx_queues = 4; 2216 dev_info->max_vmdq_pools = 0; 2217 break; 2218 2219 case e1000_i211: 2220 dev_info->max_rx_queues = 2; 2221 dev_info->max_tx_queues = 2; 2222 dev_info->max_vmdq_pools = 0; 2223 break; 2224 2225 default: 2226 /* Should not happen */ 2227 return -EINVAL; 2228 } 2229 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); 2230 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 2231 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; 2232 2233 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2234 .rx_thresh = { 2235 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2236 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2237 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2238 }, 2239 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2240 .rx_drop_en = 0, 2241 .offloads = 0, 2242 }; 2243 2244 dev_info->default_txconf = (struct rte_eth_txconf) { 2245 .tx_thresh = { 2246 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2247 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2248 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2249 }, 2250 .offloads = 0, 2251 }; 2252 2253 dev_info->rx_desc_lim = rx_desc_lim; 2254 dev_info->tx_desc_lim = tx_desc_lim; 2255 2256 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 2257 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 2258 ETH_LINK_SPEED_1G; 2259 2260 dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD; 2261 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 2262 2263 return 0; 2264 } 2265 2266 static const uint32_t * 2267 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) 2268 { 2269 static const uint32_t ptypes[] = { 2270 /* refers to igb_rxd_pkt_info_to_pkt_type() */ 2271 RTE_PTYPE_L2_ETHER, 2272 RTE_PTYPE_L3_IPV4, 2273 RTE_PTYPE_L3_IPV4_EXT, 2274 RTE_PTYPE_L3_IPV6, 2275 RTE_PTYPE_L3_IPV6_EXT, 2276 RTE_PTYPE_L4_TCP, 2277 RTE_PTYPE_L4_UDP, 2278 RTE_PTYPE_L4_SCTP, 2279 RTE_PTYPE_TUNNEL_IP, 2280 RTE_PTYPE_INNER_L3_IPV6, 2281 RTE_PTYPE_INNER_L3_IPV6_EXT, 2282 RTE_PTYPE_INNER_L4_TCP, 2283 RTE_PTYPE_INNER_L4_UDP, 2284 RTE_PTYPE_UNKNOWN 2285 }; 2286 2287 if (dev->rx_pkt_burst == eth_igb_recv_pkts || 2288 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) 2289 return ptypes; 2290 return NULL; 2291 } 2292 2293 static int 2294 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2295 { 2296 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2297 2298 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2299 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2300 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2301 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 2302 DEV_TX_OFFLOAD_IPV4_CKSUM | 2303 DEV_TX_OFFLOAD_UDP_CKSUM | 2304 DEV_TX_OFFLOAD_TCP_CKSUM | 2305 DEV_TX_OFFLOAD_SCTP_CKSUM | 2306 DEV_TX_OFFLOAD_TCP_TSO; 2307 switch (hw->mac.type) { 2308 case e1000_vfadapt: 2309 dev_info->max_rx_queues = 2; 2310 dev_info->max_tx_queues = 2; 2311 break; 2312 case e1000_vfadapt_i350: 2313 dev_info->max_rx_queues = 1; 2314 dev_info->max_tx_queues = 1; 2315 break; 2316 default: 2317 /* Should not happen */ 2318 return -EINVAL; 2319 } 2320 2321 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2322 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2323 dev_info->rx_queue_offload_capa; 2324 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2325 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2326 dev_info->tx_queue_offload_capa; 2327 2328 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2329 .rx_thresh = { 2330 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2331 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2332 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2333 }, 2334 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2335 .rx_drop_en = 0, 2336 .offloads = 0, 2337 }; 2338 2339 dev_info->default_txconf = (struct rte_eth_txconf) { 2340 .tx_thresh = { 2341 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2342 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2343 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2344 }, 2345 .offloads = 0, 2346 }; 2347 2348 dev_info->rx_desc_lim = rx_desc_lim; 2349 dev_info->tx_desc_lim = tx_desc_lim; 2350 2351 return 0; 2352 } 2353 2354 /* return 0 means link status changed, -1 means not changed */ 2355 static int 2356 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2357 { 2358 struct e1000_hw *hw = 2359 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2360 struct rte_eth_link link; 2361 int link_check, count; 2362 2363 link_check = 0; 2364 hw->mac.get_link_status = 1; 2365 2366 /* possible wait-to-complete in up to 9 seconds */ 2367 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 2368 /* Read the real link status */ 2369 switch (hw->phy.media_type) { 2370 case e1000_media_type_copper: 2371 /* Do the work to read phy */ 2372 e1000_check_for_link(hw); 2373 link_check = !hw->mac.get_link_status; 2374 break; 2375 2376 case e1000_media_type_fiber: 2377 e1000_check_for_link(hw); 2378 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2379 E1000_STATUS_LU); 2380 break; 2381 2382 case e1000_media_type_internal_serdes: 2383 e1000_check_for_link(hw); 2384 link_check = hw->mac.serdes_has_link; 2385 break; 2386 2387 /* VF device is type_unknown */ 2388 case e1000_media_type_unknown: 2389 eth_igbvf_link_update(hw); 2390 link_check = !hw->mac.get_link_status; 2391 break; 2392 2393 default: 2394 break; 2395 } 2396 if (link_check || wait_to_complete == 0) 2397 break; 2398 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); 2399 } 2400 memset(&link, 0, sizeof(link)); 2401 2402 /* Now we check if a transition has happened */ 2403 if (link_check) { 2404 uint16_t duplex, speed; 2405 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 2406 link.link_duplex = (duplex == FULL_DUPLEX) ? 2407 ETH_LINK_FULL_DUPLEX : 2408 ETH_LINK_HALF_DUPLEX; 2409 link.link_speed = speed; 2410 link.link_status = ETH_LINK_UP; 2411 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 2412 ETH_LINK_SPEED_FIXED); 2413 } else if (!link_check) { 2414 link.link_speed = 0; 2415 link.link_duplex = ETH_LINK_HALF_DUPLEX; 2416 link.link_status = ETH_LINK_DOWN; 2417 link.link_autoneg = ETH_LINK_FIXED; 2418 } 2419 2420 return rte_eth_linkstatus_set(dev, &link); 2421 } 2422 2423 /* 2424 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 2425 * For ASF and Pass Through versions of f/w this means 2426 * that the driver is loaded. 2427 */ 2428 static void 2429 igb_hw_control_acquire(struct e1000_hw *hw) 2430 { 2431 uint32_t ctrl_ext; 2432 2433 /* Let firmware know the driver has taken over */ 2434 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2435 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2436 } 2437 2438 /* 2439 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 2440 * For ASF and Pass Through versions of f/w this means that the 2441 * driver is no longer loaded. 2442 */ 2443 static void 2444 igb_hw_control_release(struct e1000_hw *hw) 2445 { 2446 uint32_t ctrl_ext; 2447 2448 /* Let firmware taken over control of h/w */ 2449 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2450 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 2451 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2452 } 2453 2454 /* 2455 * Bit of a misnomer, what this really means is 2456 * to enable OS management of the system... aka 2457 * to disable special hardware management features. 2458 */ 2459 static void 2460 igb_init_manageability(struct e1000_hw *hw) 2461 { 2462 if (e1000_enable_mng_pass_thru(hw)) { 2463 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 2464 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2465 2466 /* disable hardware interception of ARP */ 2467 manc &= ~(E1000_MANC_ARP_EN); 2468 2469 /* enable receiving management packets to the host */ 2470 manc |= E1000_MANC_EN_MNG2HOST; 2471 manc2h |= 1 << 5; /* Mng Port 623 */ 2472 manc2h |= 1 << 6; /* Mng Port 664 */ 2473 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 2474 E1000_WRITE_REG(hw, E1000_MANC, manc); 2475 } 2476 } 2477 2478 static void 2479 igb_release_manageability(struct e1000_hw *hw) 2480 { 2481 if (e1000_enable_mng_pass_thru(hw)) { 2482 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2483 2484 manc |= E1000_MANC_ARP_EN; 2485 manc &= ~E1000_MANC_EN_MNG2HOST; 2486 2487 E1000_WRITE_REG(hw, E1000_MANC, manc); 2488 } 2489 } 2490 2491 static int 2492 eth_igb_promiscuous_enable(struct rte_eth_dev *dev) 2493 { 2494 struct e1000_hw *hw = 2495 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2496 uint32_t rctl; 2497 2498 rctl = E1000_READ_REG(hw, E1000_RCTL); 2499 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2500 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2501 2502 return 0; 2503 } 2504 2505 static int 2506 eth_igb_promiscuous_disable(struct rte_eth_dev *dev) 2507 { 2508 struct e1000_hw *hw = 2509 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2510 uint32_t rctl; 2511 2512 rctl = E1000_READ_REG(hw, E1000_RCTL); 2513 rctl &= (~E1000_RCTL_UPE); 2514 if (dev->data->all_multicast == 1) 2515 rctl |= E1000_RCTL_MPE; 2516 else 2517 rctl &= (~E1000_RCTL_MPE); 2518 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2519 2520 return 0; 2521 } 2522 2523 static int 2524 eth_igb_allmulticast_enable(struct rte_eth_dev *dev) 2525 { 2526 struct e1000_hw *hw = 2527 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2528 uint32_t rctl; 2529 2530 rctl = E1000_READ_REG(hw, E1000_RCTL); 2531 rctl |= E1000_RCTL_MPE; 2532 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2533 2534 return 0; 2535 } 2536 2537 static int 2538 eth_igb_allmulticast_disable(struct rte_eth_dev *dev) 2539 { 2540 struct e1000_hw *hw = 2541 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2542 uint32_t rctl; 2543 2544 if (dev->data->promiscuous == 1) 2545 return 0; /* must remain in all_multicast mode */ 2546 rctl = E1000_READ_REG(hw, E1000_RCTL); 2547 rctl &= (~E1000_RCTL_MPE); 2548 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2549 2550 return 0; 2551 } 2552 2553 static int 2554 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2555 { 2556 struct e1000_hw *hw = 2557 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2558 struct e1000_vfta * shadow_vfta = 2559 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2560 uint32_t vfta; 2561 uint32_t vid_idx; 2562 uint32_t vid_bit; 2563 2564 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 2565 E1000_VFTA_ENTRY_MASK); 2566 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 2567 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 2568 if (on) 2569 vfta |= vid_bit; 2570 else 2571 vfta &= ~vid_bit; 2572 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 2573 2574 /* update local VFTA copy */ 2575 shadow_vfta->vfta[vid_idx] = vfta; 2576 2577 return 0; 2578 } 2579 2580 static int 2581 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 2582 enum rte_vlan_type vlan_type, 2583 uint16_t tpid) 2584 { 2585 struct e1000_hw *hw = 2586 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2587 uint32_t reg, qinq; 2588 2589 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); 2590 qinq &= E1000_CTRL_EXT_EXT_VLAN; 2591 2592 /* only outer TPID of double VLAN can be configured*/ 2593 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { 2594 reg = E1000_READ_REG(hw, E1000_VET); 2595 reg = (reg & (~E1000_VET_VET_EXT)) | 2596 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); 2597 E1000_WRITE_REG(hw, E1000_VET, reg); 2598 2599 return 0; 2600 } 2601 2602 /* all other TPID values are read-only*/ 2603 PMD_DRV_LOG(ERR, "Not supported"); 2604 2605 return -ENOTSUP; 2606 } 2607 2608 static void 2609 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2610 { 2611 struct e1000_hw *hw = 2612 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2613 uint32_t reg; 2614 2615 /* Filter Table Disable */ 2616 reg = E1000_READ_REG(hw, E1000_RCTL); 2617 reg &= ~E1000_RCTL_CFIEN; 2618 reg &= ~E1000_RCTL_VFE; 2619 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2620 } 2621 2622 static void 2623 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2624 { 2625 struct e1000_hw *hw = 2626 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2627 struct e1000_vfta * shadow_vfta = 2628 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2629 uint32_t reg; 2630 int i; 2631 2632 /* Filter Table Enable, CFI not used for packet acceptance */ 2633 reg = E1000_READ_REG(hw, E1000_RCTL); 2634 reg &= ~E1000_RCTL_CFIEN; 2635 reg |= E1000_RCTL_VFE; 2636 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2637 2638 /* restore VFTA table */ 2639 for (i = 0; i < IGB_VFTA_SIZE; i++) 2640 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 2641 } 2642 2643 static void 2644 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2645 { 2646 struct e1000_hw *hw = 2647 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2648 uint32_t reg; 2649 2650 /* VLAN Mode Disable */ 2651 reg = E1000_READ_REG(hw, E1000_CTRL); 2652 reg &= ~E1000_CTRL_VME; 2653 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2654 } 2655 2656 static void 2657 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2658 { 2659 struct e1000_hw *hw = 2660 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2661 uint32_t reg; 2662 2663 /* VLAN Mode Enable */ 2664 reg = E1000_READ_REG(hw, E1000_CTRL); 2665 reg |= E1000_CTRL_VME; 2666 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2667 } 2668 2669 static void 2670 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2671 { 2672 struct e1000_hw *hw = 2673 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2674 uint32_t reg; 2675 2676 /* CTRL_EXT: Extended VLAN */ 2677 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2678 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; 2679 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2680 2681 /* Update maximum packet length */ 2682 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2683 E1000_WRITE_REG(hw, E1000_RLPML, 2684 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2685 } 2686 2687 static void 2688 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2689 { 2690 struct e1000_hw *hw = 2691 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2692 uint32_t reg; 2693 2694 /* CTRL_EXT: Extended VLAN */ 2695 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2696 reg |= E1000_CTRL_EXT_EXTEND_VLAN; 2697 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2698 2699 /* Update maximum packet length */ 2700 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2701 E1000_WRITE_REG(hw, E1000_RLPML, 2702 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2703 VLAN_TAG_SIZE); 2704 } 2705 2706 static int 2707 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2708 { 2709 struct rte_eth_rxmode *rxmode; 2710 2711 rxmode = &dev->data->dev_conf.rxmode; 2712 if(mask & ETH_VLAN_STRIP_MASK){ 2713 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2714 igb_vlan_hw_strip_enable(dev); 2715 else 2716 igb_vlan_hw_strip_disable(dev); 2717 } 2718 2719 if(mask & ETH_VLAN_FILTER_MASK){ 2720 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2721 igb_vlan_hw_filter_enable(dev); 2722 else 2723 igb_vlan_hw_filter_disable(dev); 2724 } 2725 2726 if(mask & ETH_VLAN_EXTEND_MASK){ 2727 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2728 igb_vlan_hw_extend_enable(dev); 2729 else 2730 igb_vlan_hw_extend_disable(dev); 2731 } 2732 2733 return 0; 2734 } 2735 2736 2737 /** 2738 * It enables the interrupt mask and then enable the interrupt. 2739 * 2740 * @param dev 2741 * Pointer to struct rte_eth_dev. 2742 * @param on 2743 * Enable or Disable 2744 * 2745 * @return 2746 * - On success, zero. 2747 * - On failure, a negative value. 2748 */ 2749 static int 2750 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2751 { 2752 struct e1000_interrupt *intr = 2753 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2754 2755 if (on) 2756 intr->mask |= E1000_ICR_LSC; 2757 else 2758 intr->mask &= ~E1000_ICR_LSC; 2759 2760 return 0; 2761 } 2762 2763 /* It clears the interrupt causes and enables the interrupt. 2764 * It will be called once only during nic initialized. 2765 * 2766 * @param dev 2767 * Pointer to struct rte_eth_dev. 2768 * 2769 * @return 2770 * - On success, zero. 2771 * - On failure, a negative value. 2772 */ 2773 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) 2774 { 2775 uint32_t mask, regval; 2776 int ret; 2777 struct e1000_hw *hw = 2778 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2779 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2780 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2781 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 2782 struct rte_eth_dev_info dev_info; 2783 2784 memset(&dev_info, 0, sizeof(dev_info)); 2785 ret = eth_igb_infos_get(dev, &dev_info); 2786 if (ret != 0) 2787 return ret; 2788 2789 mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift; 2790 regval = E1000_READ_REG(hw, E1000_EIMS); 2791 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 2792 2793 return 0; 2794 } 2795 2796 /* 2797 * It reads ICR and gets interrupt causes, check it and set a bit flag 2798 * to update link status. 2799 * 2800 * @param dev 2801 * Pointer to struct rte_eth_dev. 2802 * 2803 * @return 2804 * - On success, zero. 2805 * - On failure, a negative value. 2806 */ 2807 static int 2808 eth_igb_interrupt_get_status(struct rte_eth_dev *dev) 2809 { 2810 uint32_t icr; 2811 struct e1000_hw *hw = 2812 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2813 struct e1000_interrupt *intr = 2814 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2815 2816 igb_intr_disable(dev); 2817 2818 /* read-on-clear nic registers here */ 2819 icr = E1000_READ_REG(hw, E1000_ICR); 2820 2821 intr->flags = 0; 2822 if (icr & E1000_ICR_LSC) { 2823 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 2824 } 2825 2826 if (icr & E1000_ICR_VMMB) 2827 intr->flags |= E1000_FLAG_MAILBOX; 2828 2829 return 0; 2830 } 2831 2832 /* 2833 * It executes link_update after knowing an interrupt is prsent. 2834 * 2835 * @param dev 2836 * Pointer to struct rte_eth_dev. 2837 * 2838 * @return 2839 * - On success, zero. 2840 * - On failure, a negative value. 2841 */ 2842 static int 2843 eth_igb_interrupt_action(struct rte_eth_dev *dev, 2844 struct rte_intr_handle *intr_handle) 2845 { 2846 struct e1000_hw *hw = 2847 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2848 struct e1000_interrupt *intr = 2849 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2850 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2851 struct rte_eth_link link; 2852 int ret; 2853 2854 if (intr->flags & E1000_FLAG_MAILBOX) { 2855 igb_pf_mbx_process(dev); 2856 intr->flags &= ~E1000_FLAG_MAILBOX; 2857 } 2858 2859 igb_intr_enable(dev); 2860 rte_intr_ack(intr_handle); 2861 2862 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { 2863 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 2864 2865 /* set get_link_status to check register later */ 2866 hw->mac.get_link_status = 1; 2867 ret = eth_igb_link_update(dev, 0); 2868 2869 /* check if link has changed */ 2870 if (ret < 0) 2871 return 0; 2872 2873 rte_eth_linkstatus_get(dev, &link); 2874 if (link.link_status) { 2875 PMD_INIT_LOG(INFO, 2876 " Port %d: Link Up - speed %u Mbps - %s", 2877 dev->data->port_id, 2878 (unsigned)link.link_speed, 2879 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 2880 "full-duplex" : "half-duplex"); 2881 } else { 2882 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2883 dev->data->port_id); 2884 } 2885 2886 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 2887 pci_dev->addr.domain, 2888 pci_dev->addr.bus, 2889 pci_dev->addr.devid, 2890 pci_dev->addr.function); 2891 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2892 } 2893 2894 return 0; 2895 } 2896 2897 /** 2898 * Interrupt handler which shall be registered at first. 2899 * 2900 * @param handle 2901 * Pointer to interrupt handle. 2902 * @param param 2903 * The address of parameter (struct rte_eth_dev *) regsitered before. 2904 * 2905 * @return 2906 * void 2907 */ 2908 static void 2909 eth_igb_interrupt_handler(void *param) 2910 { 2911 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2912 2913 eth_igb_interrupt_get_status(dev); 2914 eth_igb_interrupt_action(dev, dev->intr_handle); 2915 } 2916 2917 static int 2918 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) 2919 { 2920 uint32_t eicr; 2921 struct e1000_hw *hw = 2922 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2923 struct e1000_interrupt *intr = 2924 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2925 2926 igbvf_intr_disable(hw); 2927 2928 /* read-on-clear nic registers here */ 2929 eicr = E1000_READ_REG(hw, E1000_EICR); 2930 intr->flags = 0; 2931 2932 if (eicr == E1000_VTIVAR_MISC_MAILBOX) 2933 intr->flags |= E1000_FLAG_MAILBOX; 2934 2935 return 0; 2936 } 2937 2938 void igbvf_mbx_process(struct rte_eth_dev *dev) 2939 { 2940 struct e1000_hw *hw = 2941 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2942 struct e1000_mbx_info *mbx = &hw->mbx; 2943 u32 in_msg = 0; 2944 2945 /* peek the message first */ 2946 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0)); 2947 2948 /* PF reset VF event */ 2949 if (in_msg == E1000_PF_CONTROL_MSG) { 2950 /* dummy mbx read to ack pf */ 2951 if (mbx->ops.read(hw, &in_msg, 1, 0)) 2952 return; 2953 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 2954 NULL); 2955 } 2956 } 2957 2958 static int 2959 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) 2960 { 2961 struct e1000_interrupt *intr = 2962 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2963 2964 if (intr->flags & E1000_FLAG_MAILBOX) { 2965 igbvf_mbx_process(dev); 2966 intr->flags &= ~E1000_FLAG_MAILBOX; 2967 } 2968 2969 igbvf_intr_enable(dev); 2970 rte_intr_ack(intr_handle); 2971 2972 return 0; 2973 } 2974 2975 static void 2976 eth_igbvf_interrupt_handler(void *param) 2977 { 2978 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2979 2980 eth_igbvf_interrupt_get_status(dev); 2981 eth_igbvf_interrupt_action(dev, dev->intr_handle); 2982 } 2983 2984 static int 2985 eth_igb_led_on(struct rte_eth_dev *dev) 2986 { 2987 struct e1000_hw *hw; 2988 2989 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2990 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 2991 } 2992 2993 static int 2994 eth_igb_led_off(struct rte_eth_dev *dev) 2995 { 2996 struct e1000_hw *hw; 2997 2998 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2999 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 3000 } 3001 3002 static int 3003 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3004 { 3005 struct e1000_hw *hw; 3006 uint32_t ctrl; 3007 int tx_pause; 3008 int rx_pause; 3009 3010 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3011 fc_conf->pause_time = hw->fc.pause_time; 3012 fc_conf->high_water = hw->fc.high_water; 3013 fc_conf->low_water = hw->fc.low_water; 3014 fc_conf->send_xon = hw->fc.send_xon; 3015 fc_conf->autoneg = hw->mac.autoneg; 3016 3017 /* 3018 * Return rx_pause and tx_pause status according to actual setting of 3019 * the TFCE and RFCE bits in the CTRL register. 3020 */ 3021 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3022 if (ctrl & E1000_CTRL_TFCE) 3023 tx_pause = 1; 3024 else 3025 tx_pause = 0; 3026 3027 if (ctrl & E1000_CTRL_RFCE) 3028 rx_pause = 1; 3029 else 3030 rx_pause = 0; 3031 3032 if (rx_pause && tx_pause) 3033 fc_conf->mode = RTE_FC_FULL; 3034 else if (rx_pause) 3035 fc_conf->mode = RTE_FC_RX_PAUSE; 3036 else if (tx_pause) 3037 fc_conf->mode = RTE_FC_TX_PAUSE; 3038 else 3039 fc_conf->mode = RTE_FC_NONE; 3040 3041 return 0; 3042 } 3043 3044 static int 3045 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3046 { 3047 struct e1000_hw *hw; 3048 int err; 3049 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 3050 e1000_fc_none, 3051 e1000_fc_rx_pause, 3052 e1000_fc_tx_pause, 3053 e1000_fc_full 3054 }; 3055 uint32_t rx_buf_size; 3056 uint32_t max_high_water; 3057 uint32_t rctl; 3058 uint32_t ctrl; 3059 3060 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3061 if (fc_conf->autoneg != hw->mac.autoneg) 3062 return -ENOTSUP; 3063 rx_buf_size = igb_get_rx_buffer_size(hw); 3064 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3065 3066 /* At least reserve one Ethernet frame for watermark */ 3067 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 3068 if ((fc_conf->high_water > max_high_water) || 3069 (fc_conf->high_water < fc_conf->low_water)) { 3070 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 3071 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 3072 return -EINVAL; 3073 } 3074 3075 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 3076 hw->fc.pause_time = fc_conf->pause_time; 3077 hw->fc.high_water = fc_conf->high_water; 3078 hw->fc.low_water = fc_conf->low_water; 3079 hw->fc.send_xon = fc_conf->send_xon; 3080 3081 err = e1000_setup_link_generic(hw); 3082 if (err == E1000_SUCCESS) { 3083 3084 /* check if we want to forward MAC frames - driver doesn't have native 3085 * capability to do that, so we'll write the registers ourselves */ 3086 3087 rctl = E1000_READ_REG(hw, E1000_RCTL); 3088 3089 /* set or clear MFLCN.PMCF bit depending on configuration */ 3090 if (fc_conf->mac_ctrl_frame_fwd != 0) 3091 rctl |= E1000_RCTL_PMCF; 3092 else 3093 rctl &= ~E1000_RCTL_PMCF; 3094 3095 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 3096 3097 /* 3098 * check if we want to change flow control mode - driver doesn't have native 3099 * capability to do that, so we'll write the registers ourselves 3100 */ 3101 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3102 3103 /* 3104 * set or clear E1000_CTRL_RFCE and E1000_CTRL_TFCE bits depending 3105 * on configuration 3106 */ 3107 switch (fc_conf->mode) { 3108 case RTE_FC_NONE: 3109 ctrl &= ~E1000_CTRL_RFCE & ~E1000_CTRL_TFCE; 3110 break; 3111 case RTE_FC_RX_PAUSE: 3112 ctrl |= E1000_CTRL_RFCE; 3113 ctrl &= ~E1000_CTRL_TFCE; 3114 break; 3115 case RTE_FC_TX_PAUSE: 3116 ctrl |= E1000_CTRL_TFCE; 3117 ctrl &= ~E1000_CTRL_RFCE; 3118 break; 3119 case RTE_FC_FULL: 3120 ctrl |= E1000_CTRL_RFCE | E1000_CTRL_TFCE; 3121 break; 3122 default: 3123 PMD_INIT_LOG(ERR, "invalid flow control mode"); 3124 return -EINVAL; 3125 } 3126 3127 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 3128 3129 E1000_WRITE_FLUSH(hw); 3130 3131 return 0; 3132 } 3133 3134 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 3135 return -EIO; 3136 } 3137 3138 #define E1000_RAH_POOLSEL_SHIFT (18) 3139 static int 3140 eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 3141 uint32_t index, uint32_t pool) 3142 { 3143 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3144 uint32_t rah; 3145 3146 e1000_rar_set(hw, mac_addr->addr_bytes, index); 3147 rah = E1000_READ_REG(hw, E1000_RAH(index)); 3148 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); 3149 E1000_WRITE_REG(hw, E1000_RAH(index), rah); 3150 return 0; 3151 } 3152 3153 static void 3154 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) 3155 { 3156 uint8_t addr[RTE_ETHER_ADDR_LEN]; 3157 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3158 3159 memset(addr, 0, sizeof(addr)); 3160 3161 e1000_rar_set(hw, addr, index); 3162 } 3163 3164 static int 3165 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 3166 struct rte_ether_addr *addr) 3167 { 3168 eth_igb_rar_clear(dev, 0); 3169 eth_igb_rar_set(dev, (void *)addr, 0, 0); 3170 3171 return 0; 3172 } 3173 /* 3174 * Virtual Function operations 3175 */ 3176 static void 3177 igbvf_intr_disable(struct e1000_hw *hw) 3178 { 3179 PMD_INIT_FUNC_TRACE(); 3180 3181 /* Clear interrupt mask to stop from interrupts being generated */ 3182 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); 3183 3184 E1000_WRITE_FLUSH(hw); 3185 } 3186 3187 static void 3188 igbvf_stop_adapter(struct rte_eth_dev *dev) 3189 { 3190 u32 reg_val; 3191 u16 i; 3192 struct rte_eth_dev_info dev_info; 3193 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3194 int ret; 3195 3196 memset(&dev_info, 0, sizeof(dev_info)); 3197 ret = eth_igbvf_infos_get(dev, &dev_info); 3198 if (ret != 0) 3199 return; 3200 3201 /* Clear interrupt mask to stop from interrupts being generated */ 3202 igbvf_intr_disable(hw); 3203 3204 /* Clear any pending interrupts, flush previous writes */ 3205 E1000_READ_REG(hw, E1000_EICR); 3206 3207 /* Disable the transmit unit. Each queue must be disabled. */ 3208 for (i = 0; i < dev_info.max_tx_queues; i++) 3209 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); 3210 3211 /* Disable the receive unit by stopping each queue */ 3212 for (i = 0; i < dev_info.max_rx_queues; i++) { 3213 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); 3214 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; 3215 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); 3216 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) 3217 ; 3218 } 3219 3220 /* flush all queues disables */ 3221 E1000_WRITE_FLUSH(hw); 3222 msec_delay(2); 3223 } 3224 3225 static int eth_igbvf_link_update(struct e1000_hw *hw) 3226 { 3227 struct e1000_mbx_info *mbx = &hw->mbx; 3228 struct e1000_mac_info *mac = &hw->mac; 3229 int ret_val = E1000_SUCCESS; 3230 3231 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); 3232 3233 /* 3234 * We only want to run this if there has been a rst asserted. 3235 * in this case that could mean a link change, device reset, 3236 * or a virtual function reset 3237 */ 3238 3239 /* If we were hit with a reset or timeout drop the link */ 3240 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) 3241 mac->get_link_status = TRUE; 3242 3243 if (!mac->get_link_status) 3244 goto out; 3245 3246 /* if link status is down no point in checking to see if pf is up */ 3247 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) 3248 goto out; 3249 3250 /* if we passed all the tests above then the link is up and we no 3251 * longer need to check for link */ 3252 mac->get_link_status = FALSE; 3253 3254 out: 3255 return ret_val; 3256 } 3257 3258 3259 static int 3260 igbvf_dev_configure(struct rte_eth_dev *dev) 3261 { 3262 struct rte_eth_conf* conf = &dev->data->dev_conf; 3263 3264 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 3265 dev->data->port_id); 3266 3267 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 3268 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 3269 3270 /* 3271 * VF has no ability to enable/disable HW CRC 3272 * Keep the persistent behavior the same as Host PF 3273 */ 3274 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC 3275 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 3276 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 3277 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 3278 } 3279 #else 3280 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 3281 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 3282 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 3283 } 3284 #endif 3285 3286 return 0; 3287 } 3288 3289 static int 3290 igbvf_dev_start(struct rte_eth_dev *dev) 3291 { 3292 struct e1000_hw *hw = 3293 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3294 struct e1000_adapter *adapter = 3295 E1000_DEV_PRIVATE(dev->data->dev_private); 3296 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3297 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3298 int ret; 3299 uint32_t intr_vector = 0; 3300 3301 PMD_INIT_FUNC_TRACE(); 3302 3303 hw->mac.ops.reset_hw(hw); 3304 adapter->stopped = 0; 3305 3306 /* Set all vfta */ 3307 igbvf_set_vfta_all(dev,1); 3308 3309 eth_igbvf_tx_init(dev); 3310 3311 /* This can fail when allocating mbufs for descriptor rings */ 3312 ret = eth_igbvf_rx_init(dev); 3313 if (ret) { 3314 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 3315 igb_dev_clear_queues(dev); 3316 return ret; 3317 } 3318 3319 /* check and configure queue intr-vector mapping */ 3320 if (rte_intr_cap_multiple(intr_handle) && 3321 dev->data->dev_conf.intr_conf.rxq) { 3322 intr_vector = dev->data->nb_rx_queues; 3323 ret = rte_intr_efd_enable(intr_handle, intr_vector); 3324 if (ret) 3325 return ret; 3326 } 3327 3328 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3329 intr_handle->intr_vec = 3330 rte_zmalloc("intr_vec", 3331 dev->data->nb_rx_queues * sizeof(int), 0); 3332 if (!intr_handle->intr_vec) { 3333 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 3334 " intr_vec", dev->data->nb_rx_queues); 3335 return -ENOMEM; 3336 } 3337 } 3338 3339 eth_igbvf_configure_msix_intr(dev); 3340 3341 /* enable uio/vfio intr/eventfd mapping */ 3342 rte_intr_enable(intr_handle); 3343 3344 /* resume enabled intr since hw reset */ 3345 igbvf_intr_enable(dev); 3346 3347 return 0; 3348 } 3349 3350 static int 3351 igbvf_dev_stop(struct rte_eth_dev *dev) 3352 { 3353 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3354 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3355 struct e1000_adapter *adapter = 3356 E1000_DEV_PRIVATE(dev->data->dev_private); 3357 3358 if (adapter->stopped) 3359 return 0; 3360 3361 PMD_INIT_FUNC_TRACE(); 3362 3363 igbvf_stop_adapter(dev); 3364 3365 /* 3366 * Clear what we set, but we still keep shadow_vfta to 3367 * restore after device starts 3368 */ 3369 igbvf_set_vfta_all(dev,0); 3370 3371 igb_dev_clear_queues(dev); 3372 3373 /* disable intr eventfd mapping */ 3374 rte_intr_disable(intr_handle); 3375 3376 /* Clean datapath event and queue/vec mapping */ 3377 rte_intr_efd_disable(intr_handle); 3378 if (intr_handle->intr_vec) { 3379 rte_free(intr_handle->intr_vec); 3380 intr_handle->intr_vec = NULL; 3381 } 3382 3383 adapter->stopped = true; 3384 dev->data->dev_started = 0; 3385 3386 return 0; 3387 } 3388 3389 static int 3390 igbvf_dev_close(struct rte_eth_dev *dev) 3391 { 3392 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3393 struct rte_ether_addr addr; 3394 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3395 int ret; 3396 3397 PMD_INIT_FUNC_TRACE(); 3398 3399 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3400 return 0; 3401 3402 e1000_reset_hw(hw); 3403 3404 ret = igbvf_dev_stop(dev); 3405 if (ret != 0) 3406 return ret; 3407 3408 igb_dev_free_queues(dev); 3409 3410 /** 3411 * reprogram the RAR with a zero mac address, 3412 * to ensure that the VF traffic goes to the PF 3413 * after stop, close and detach of the VF. 3414 **/ 3415 3416 memset(&addr, 0, sizeof(addr)); 3417 igbvf_default_mac_addr_set(dev, &addr); 3418 3419 rte_intr_callback_unregister(&pci_dev->intr_handle, 3420 eth_igbvf_interrupt_handler, 3421 (void *)dev); 3422 3423 return 0; 3424 } 3425 3426 static int 3427 igbvf_promiscuous_enable(struct rte_eth_dev *dev) 3428 { 3429 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3430 3431 /* Set both unicast and multicast promisc */ 3432 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 3433 3434 return 0; 3435 } 3436 3437 static int 3438 igbvf_promiscuous_disable(struct rte_eth_dev *dev) 3439 { 3440 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3441 3442 /* If in allmulticast mode leave multicast promisc */ 3443 if (dev->data->all_multicast == 1) 3444 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3445 else 3446 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3447 3448 return 0; 3449 } 3450 3451 static int 3452 igbvf_allmulticast_enable(struct rte_eth_dev *dev) 3453 { 3454 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3455 3456 /* In promiscuous mode multicast promisc already set */ 3457 if (dev->data->promiscuous == 0) 3458 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3459 3460 return 0; 3461 } 3462 3463 static int 3464 igbvf_allmulticast_disable(struct rte_eth_dev *dev) 3465 { 3466 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3467 3468 /* In promiscuous mode leave multicast promisc enabled */ 3469 if (dev->data->promiscuous == 0) 3470 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3471 3472 return 0; 3473 } 3474 3475 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) 3476 { 3477 struct e1000_mbx_info *mbx = &hw->mbx; 3478 uint32_t msgbuf[2]; 3479 s32 err; 3480 3481 /* After set vlan, vlan strip will also be enabled in igb driver*/ 3482 msgbuf[0] = E1000_VF_SET_VLAN; 3483 msgbuf[1] = vid; 3484 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 3485 if (on) 3486 msgbuf[0] |= E1000_VF_SET_VLAN_ADD; 3487 3488 err = mbx->ops.write_posted(hw, msgbuf, 2, 0); 3489 if (err) 3490 goto mbx_err; 3491 3492 err = mbx->ops.read_posted(hw, msgbuf, 2, 0); 3493 if (err) 3494 goto mbx_err; 3495 3496 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 3497 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) 3498 err = -EINVAL; 3499 3500 mbx_err: 3501 return err; 3502 } 3503 3504 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) 3505 { 3506 struct e1000_hw *hw = 3507 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3508 struct e1000_vfta * shadow_vfta = 3509 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3510 int i = 0, j = 0, vfta = 0, mask = 1; 3511 3512 for (i = 0; i < IGB_VFTA_SIZE; i++){ 3513 vfta = shadow_vfta->vfta[i]; 3514 if(vfta){ 3515 mask = 1; 3516 for (j = 0; j < 32; j++){ 3517 if(vfta & mask) 3518 igbvf_set_vfta(hw, 3519 (uint16_t)((i<<5)+j), on); 3520 mask<<=1; 3521 } 3522 } 3523 } 3524 3525 } 3526 3527 static int 3528 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 3529 { 3530 struct e1000_hw *hw = 3531 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3532 struct e1000_vfta * shadow_vfta = 3533 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3534 uint32_t vid_idx = 0; 3535 uint32_t vid_bit = 0; 3536 int ret = 0; 3537 3538 PMD_INIT_FUNC_TRACE(); 3539 3540 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ 3541 ret = igbvf_set_vfta(hw, vlan_id, !!on); 3542 if(ret){ 3543 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 3544 return ret; 3545 } 3546 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 3547 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 3548 3549 /*Save what we set and retore it after device reset*/ 3550 if (on) 3551 shadow_vfta->vfta[vid_idx] |= vid_bit; 3552 else 3553 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 3554 3555 return 0; 3556 } 3557 3558 static int 3559 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 3560 { 3561 struct e1000_hw *hw = 3562 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3563 3564 /* index is not used by rar_set() */ 3565 hw->mac.ops.rar_set(hw, (void *)addr, 0); 3566 return 0; 3567 } 3568 3569 3570 static int 3571 eth_igb_rss_reta_update(struct rte_eth_dev *dev, 3572 struct rte_eth_rss_reta_entry64 *reta_conf, 3573 uint16_t reta_size) 3574 { 3575 uint8_t i, j, mask; 3576 uint32_t reta, r; 3577 uint16_t idx, shift; 3578 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3579 3580 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3581 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3582 "(%d) doesn't match the number hardware can supported " 3583 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3584 return -EINVAL; 3585 } 3586 3587 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3588 idx = i / RTE_RETA_GROUP_SIZE; 3589 shift = i % RTE_RETA_GROUP_SIZE; 3590 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3591 IGB_4_BIT_MASK); 3592 if (!mask) 3593 continue; 3594 if (mask == IGB_4_BIT_MASK) 3595 r = 0; 3596 else 3597 r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3598 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { 3599 if (mask & (0x1 << j)) 3600 reta |= reta_conf[idx].reta[shift + j] << 3601 (CHAR_BIT * j); 3602 else 3603 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); 3604 } 3605 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); 3606 } 3607 3608 return 0; 3609 } 3610 3611 static int 3612 eth_igb_rss_reta_query(struct rte_eth_dev *dev, 3613 struct rte_eth_rss_reta_entry64 *reta_conf, 3614 uint16_t reta_size) 3615 { 3616 uint8_t i, j, mask; 3617 uint32_t reta; 3618 uint16_t idx, shift; 3619 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3620 3621 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3622 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3623 "(%d) doesn't match the number hardware can supported " 3624 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3625 return -EINVAL; 3626 } 3627 3628 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3629 idx = i / RTE_RETA_GROUP_SIZE; 3630 shift = i % RTE_RETA_GROUP_SIZE; 3631 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3632 IGB_4_BIT_MASK); 3633 if (!mask) 3634 continue; 3635 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3636 for (j = 0; j < IGB_4_BIT_WIDTH; j++) { 3637 if (mask & (0x1 << j)) 3638 reta_conf[idx].reta[shift + j] = 3639 ((reta >> (CHAR_BIT * j)) & 3640 IGB_8_BIT_MASK); 3641 } 3642 } 3643 3644 return 0; 3645 } 3646 3647 int 3648 eth_igb_syn_filter_set(struct rte_eth_dev *dev, 3649 struct rte_eth_syn_filter *filter, 3650 bool add) 3651 { 3652 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3653 struct e1000_filter_info *filter_info = 3654 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3655 uint32_t synqf, rfctl; 3656 3657 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3658 return -EINVAL; 3659 3660 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3661 3662 if (add) { 3663 if (synqf & E1000_SYN_FILTER_ENABLE) 3664 return -EINVAL; 3665 3666 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & 3667 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); 3668 3669 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3670 if (filter->hig_pri) 3671 rfctl |= E1000_RFCTL_SYNQFP; 3672 else 3673 rfctl &= ~E1000_RFCTL_SYNQFP; 3674 3675 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 3676 } else { 3677 if (!(synqf & E1000_SYN_FILTER_ENABLE)) 3678 return -ENOENT; 3679 synqf = 0; 3680 } 3681 3682 filter_info->syn_info = synqf; 3683 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 3684 E1000_WRITE_FLUSH(hw); 3685 return 0; 3686 } 3687 3688 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ 3689 static inline int 3690 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, 3691 struct e1000_2tuple_filter_info *filter_info) 3692 { 3693 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3694 return -EINVAL; 3695 if (filter->priority > E1000_2TUPLE_MAX_PRI) 3696 return -EINVAL; /* filter index is out of range. */ 3697 if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) 3698 return -EINVAL; /* flags is invalid. */ 3699 3700 switch (filter->dst_port_mask) { 3701 case UINT16_MAX: 3702 filter_info->dst_port_mask = 0; 3703 filter_info->dst_port = filter->dst_port; 3704 break; 3705 case 0: 3706 filter_info->dst_port_mask = 1; 3707 break; 3708 default: 3709 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3710 return -EINVAL; 3711 } 3712 3713 switch (filter->proto_mask) { 3714 case UINT8_MAX: 3715 filter_info->proto_mask = 0; 3716 filter_info->proto = filter->proto; 3717 break; 3718 case 0: 3719 filter_info->proto_mask = 1; 3720 break; 3721 default: 3722 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3723 return -EINVAL; 3724 } 3725 3726 filter_info->priority = (uint8_t)filter->priority; 3727 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 3728 filter_info->tcp_flags = filter->tcp_flags; 3729 else 3730 filter_info->tcp_flags = 0; 3731 3732 return 0; 3733 } 3734 3735 static inline struct e1000_2tuple_filter * 3736 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, 3737 struct e1000_2tuple_filter_info *key) 3738 { 3739 struct e1000_2tuple_filter *it; 3740 3741 TAILQ_FOREACH(it, filter_list, entries) { 3742 if (memcmp(key, &it->filter_info, 3743 sizeof(struct e1000_2tuple_filter_info)) == 0) { 3744 return it; 3745 } 3746 } 3747 return NULL; 3748 } 3749 3750 /* inject a igb 2tuple filter to HW */ 3751 static inline void 3752 igb_inject_2uple_filter(struct rte_eth_dev *dev, 3753 struct e1000_2tuple_filter *filter) 3754 { 3755 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3756 uint32_t ttqf = E1000_TTQF_DISABLE_MASK; 3757 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; 3758 int i; 3759 3760 i = filter->index; 3761 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 3762 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 3763 imir |= E1000_IMIR_PORT_BP; 3764 else 3765 imir &= ~E1000_IMIR_PORT_BP; 3766 3767 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 3768 3769 ttqf |= E1000_TTQF_QUEUE_ENABLE; 3770 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); 3771 ttqf |= (uint32_t)(filter->filter_info.proto & 3772 E1000_TTQF_PROTOCOL_MASK); 3773 if (filter->filter_info.proto_mask == 0) 3774 ttqf &= ~E1000_TTQF_MASK_ENABLE; 3775 3776 /* tcp flags bits setting. */ 3777 if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { 3778 if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) 3779 imir_ext |= E1000_IMIREXT_CTRL_URG; 3780 if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) 3781 imir_ext |= E1000_IMIREXT_CTRL_ACK; 3782 if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) 3783 imir_ext |= E1000_IMIREXT_CTRL_PSH; 3784 if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) 3785 imir_ext |= E1000_IMIREXT_CTRL_RST; 3786 if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) 3787 imir_ext |= E1000_IMIREXT_CTRL_SYN; 3788 if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) 3789 imir_ext |= E1000_IMIREXT_CTRL_FIN; 3790 } else { 3791 imir_ext |= E1000_IMIREXT_CTRL_BP; 3792 } 3793 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 3794 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); 3795 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 3796 } 3797 3798 /* 3799 * igb_add_2tuple_filter - add a 2tuple filter 3800 * 3801 * @param 3802 * dev: Pointer to struct rte_eth_dev. 3803 * ntuple_filter: ponter to the filter that will be added. 3804 * 3805 * @return 3806 * - On success, zero. 3807 * - On failure, a negative value. 3808 */ 3809 static int 3810 igb_add_2tuple_filter(struct rte_eth_dev *dev, 3811 struct rte_eth_ntuple_filter *ntuple_filter) 3812 { 3813 struct e1000_filter_info *filter_info = 3814 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3815 struct e1000_2tuple_filter *filter; 3816 int i, ret; 3817 3818 filter = rte_zmalloc("e1000_2tuple_filter", 3819 sizeof(struct e1000_2tuple_filter), 0); 3820 if (filter == NULL) 3821 return -ENOMEM; 3822 3823 ret = ntuple_filter_to_2tuple(ntuple_filter, 3824 &filter->filter_info); 3825 if (ret < 0) { 3826 rte_free(filter); 3827 return ret; 3828 } 3829 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3830 &filter->filter_info) != NULL) { 3831 PMD_DRV_LOG(ERR, "filter exists."); 3832 rte_free(filter); 3833 return -EEXIST; 3834 } 3835 filter->queue = ntuple_filter->queue; 3836 3837 /* 3838 * look for an unused 2tuple filter index, 3839 * and insert the filter to list. 3840 */ 3841 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { 3842 if (!(filter_info->twotuple_mask & (1 << i))) { 3843 filter_info->twotuple_mask |= 1 << i; 3844 filter->index = i; 3845 TAILQ_INSERT_TAIL(&filter_info->twotuple_list, 3846 filter, 3847 entries); 3848 break; 3849 } 3850 } 3851 if (i >= E1000_MAX_TTQF_FILTERS) { 3852 PMD_DRV_LOG(ERR, "2tuple filters are full."); 3853 rte_free(filter); 3854 return -ENOSYS; 3855 } 3856 3857 igb_inject_2uple_filter(dev, filter); 3858 return 0; 3859 } 3860 3861 int 3862 igb_delete_2tuple_filter(struct rte_eth_dev *dev, 3863 struct e1000_2tuple_filter *filter) 3864 { 3865 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3866 struct e1000_filter_info *filter_info = 3867 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3868 3869 filter_info->twotuple_mask &= ~(1 << filter->index); 3870 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); 3871 rte_free(filter); 3872 3873 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); 3874 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 3875 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 3876 return 0; 3877 } 3878 3879 /* 3880 * igb_remove_2tuple_filter - remove a 2tuple filter 3881 * 3882 * @param 3883 * dev: Pointer to struct rte_eth_dev. 3884 * ntuple_filter: ponter to the filter that will be removed. 3885 * 3886 * @return 3887 * - On success, zero. 3888 * - On failure, a negative value. 3889 */ 3890 static int 3891 igb_remove_2tuple_filter(struct rte_eth_dev *dev, 3892 struct rte_eth_ntuple_filter *ntuple_filter) 3893 { 3894 struct e1000_filter_info *filter_info = 3895 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3896 struct e1000_2tuple_filter_info filter_2tuple; 3897 struct e1000_2tuple_filter *filter; 3898 int ret; 3899 3900 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); 3901 ret = ntuple_filter_to_2tuple(ntuple_filter, 3902 &filter_2tuple); 3903 if (ret < 0) 3904 return ret; 3905 3906 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3907 &filter_2tuple); 3908 if (filter == NULL) { 3909 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3910 return -ENOENT; 3911 } 3912 3913 igb_delete_2tuple_filter(dev, filter); 3914 3915 return 0; 3916 } 3917 3918 /* inject a igb flex filter to HW */ 3919 static inline void 3920 igb_inject_flex_filter(struct rte_eth_dev *dev, 3921 struct e1000_flex_filter *filter) 3922 { 3923 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3924 uint32_t wufc, queueing; 3925 uint32_t reg_off; 3926 uint8_t i, j = 0; 3927 3928 wufc = E1000_READ_REG(hw, E1000_WUFC); 3929 if (filter->index < E1000_MAX_FHFT) 3930 reg_off = E1000_FHFT(filter->index); 3931 else 3932 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3933 3934 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | 3935 (E1000_WUFC_FLX0 << filter->index)); 3936 queueing = filter->filter_info.len | 3937 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | 3938 (filter->filter_info.priority << 3939 E1000_FHFT_QUEUEING_PRIO_SHIFT); 3940 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, 3941 queueing); 3942 3943 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { 3944 E1000_WRITE_REG(hw, reg_off, 3945 filter->filter_info.dwords[j]); 3946 reg_off += sizeof(uint32_t); 3947 E1000_WRITE_REG(hw, reg_off, 3948 filter->filter_info.dwords[++j]); 3949 reg_off += sizeof(uint32_t); 3950 E1000_WRITE_REG(hw, reg_off, 3951 (uint32_t)filter->filter_info.mask[i]); 3952 reg_off += sizeof(uint32_t) * 2; 3953 ++j; 3954 } 3955 } 3956 3957 static inline struct e1000_flex_filter * 3958 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, 3959 struct e1000_flex_filter_info *key) 3960 { 3961 struct e1000_flex_filter *it; 3962 3963 TAILQ_FOREACH(it, filter_list, entries) { 3964 if (memcmp(key, &it->filter_info, 3965 sizeof(struct e1000_flex_filter_info)) == 0) 3966 return it; 3967 } 3968 3969 return NULL; 3970 } 3971 3972 /* remove a flex byte filter 3973 * @param 3974 * dev: Pointer to struct rte_eth_dev. 3975 * filter: the pointer of the filter will be removed. 3976 */ 3977 void 3978 igb_remove_flex_filter(struct rte_eth_dev *dev, 3979 struct e1000_flex_filter *filter) 3980 { 3981 struct e1000_filter_info *filter_info = 3982 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3983 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3984 uint32_t wufc, i; 3985 uint32_t reg_off; 3986 3987 wufc = E1000_READ_REG(hw, E1000_WUFC); 3988 if (filter->index < E1000_MAX_FHFT) 3989 reg_off = E1000_FHFT(filter->index); 3990 else 3991 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3992 3993 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) 3994 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); 3995 3996 E1000_WRITE_REG(hw, E1000_WUFC, wufc & 3997 (~(E1000_WUFC_FLX0 << filter->index))); 3998 3999 filter_info->flex_mask &= ~(1 << filter->index); 4000 TAILQ_REMOVE(&filter_info->flex_list, filter, entries); 4001 rte_free(filter); 4002 } 4003 4004 int 4005 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, 4006 struct igb_flex_filter *filter, 4007 bool add) 4008 { 4009 struct e1000_filter_info *filter_info = 4010 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4011 struct e1000_flex_filter *flex_filter, *it; 4012 uint32_t mask; 4013 uint8_t shift, i; 4014 4015 flex_filter = rte_zmalloc("e1000_flex_filter", 4016 sizeof(struct e1000_flex_filter), 0); 4017 if (flex_filter == NULL) 4018 return -ENOMEM; 4019 4020 flex_filter->filter_info.len = filter->len; 4021 flex_filter->filter_info.priority = filter->priority; 4022 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); 4023 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { 4024 mask = 0; 4025 /* reverse bits in flex filter's mask*/ 4026 for (shift = 0; shift < CHAR_BIT; shift++) { 4027 if (filter->mask[i] & (0x01 << shift)) 4028 mask |= (0x80 >> shift); 4029 } 4030 flex_filter->filter_info.mask[i] = mask; 4031 } 4032 4033 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4034 &flex_filter->filter_info); 4035 if (it == NULL && !add) { 4036 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4037 rte_free(flex_filter); 4038 return -ENOENT; 4039 } 4040 if (it != NULL && add) { 4041 PMD_DRV_LOG(ERR, "filter exists."); 4042 rte_free(flex_filter); 4043 return -EEXIST; 4044 } 4045 4046 if (add) { 4047 flex_filter->queue = filter->queue; 4048 /* 4049 * look for an unused flex filter index 4050 * and insert the filter into the list. 4051 */ 4052 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { 4053 if (!(filter_info->flex_mask & (1 << i))) { 4054 filter_info->flex_mask |= 1 << i; 4055 flex_filter->index = i; 4056 TAILQ_INSERT_TAIL(&filter_info->flex_list, 4057 flex_filter, 4058 entries); 4059 break; 4060 } 4061 } 4062 if (i >= E1000_MAX_FLEX_FILTERS) { 4063 PMD_DRV_LOG(ERR, "flex filters are full."); 4064 rte_free(flex_filter); 4065 return -ENOSYS; 4066 } 4067 4068 igb_inject_flex_filter(dev, flex_filter); 4069 4070 } else { 4071 igb_remove_flex_filter(dev, it); 4072 rte_free(flex_filter); 4073 } 4074 4075 return 0; 4076 } 4077 4078 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ 4079 static inline int 4080 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, 4081 struct e1000_5tuple_filter_info *filter_info) 4082 { 4083 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) 4084 return -EINVAL; 4085 if (filter->priority > E1000_2TUPLE_MAX_PRI) 4086 return -EINVAL; /* filter index is out of range. */ 4087 if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) 4088 return -EINVAL; /* flags is invalid. */ 4089 4090 switch (filter->dst_ip_mask) { 4091 case UINT32_MAX: 4092 filter_info->dst_ip_mask = 0; 4093 filter_info->dst_ip = filter->dst_ip; 4094 break; 4095 case 0: 4096 filter_info->dst_ip_mask = 1; 4097 break; 4098 default: 4099 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 4100 return -EINVAL; 4101 } 4102 4103 switch (filter->src_ip_mask) { 4104 case UINT32_MAX: 4105 filter_info->src_ip_mask = 0; 4106 filter_info->src_ip = filter->src_ip; 4107 break; 4108 case 0: 4109 filter_info->src_ip_mask = 1; 4110 break; 4111 default: 4112 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 4113 return -EINVAL; 4114 } 4115 4116 switch (filter->dst_port_mask) { 4117 case UINT16_MAX: 4118 filter_info->dst_port_mask = 0; 4119 filter_info->dst_port = filter->dst_port; 4120 break; 4121 case 0: 4122 filter_info->dst_port_mask = 1; 4123 break; 4124 default: 4125 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 4126 return -EINVAL; 4127 } 4128 4129 switch (filter->src_port_mask) { 4130 case UINT16_MAX: 4131 filter_info->src_port_mask = 0; 4132 filter_info->src_port = filter->src_port; 4133 break; 4134 case 0: 4135 filter_info->src_port_mask = 1; 4136 break; 4137 default: 4138 PMD_DRV_LOG(ERR, "invalid src_port mask."); 4139 return -EINVAL; 4140 } 4141 4142 switch (filter->proto_mask) { 4143 case UINT8_MAX: 4144 filter_info->proto_mask = 0; 4145 filter_info->proto = filter->proto; 4146 break; 4147 case 0: 4148 filter_info->proto_mask = 1; 4149 break; 4150 default: 4151 PMD_DRV_LOG(ERR, "invalid protocol mask."); 4152 return -EINVAL; 4153 } 4154 4155 filter_info->priority = (uint8_t)filter->priority; 4156 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 4157 filter_info->tcp_flags = filter->tcp_flags; 4158 else 4159 filter_info->tcp_flags = 0; 4160 4161 return 0; 4162 } 4163 4164 static inline struct e1000_5tuple_filter * 4165 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, 4166 struct e1000_5tuple_filter_info *key) 4167 { 4168 struct e1000_5tuple_filter *it; 4169 4170 TAILQ_FOREACH(it, filter_list, entries) { 4171 if (memcmp(key, &it->filter_info, 4172 sizeof(struct e1000_5tuple_filter_info)) == 0) { 4173 return it; 4174 } 4175 } 4176 return NULL; 4177 } 4178 4179 /* inject a igb 5-tuple filter to HW */ 4180 static inline void 4181 igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, 4182 struct e1000_5tuple_filter *filter) 4183 { 4184 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4185 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; 4186 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; 4187 uint8_t i; 4188 4189 i = filter->index; 4190 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; 4191 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ 4192 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; 4193 if (filter->filter_info.dst_ip_mask == 0) 4194 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; 4195 if (filter->filter_info.src_port_mask == 0) 4196 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; 4197 if (filter->filter_info.proto_mask == 0) 4198 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; 4199 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & 4200 E1000_FTQF_QUEUE_MASK; 4201 ftqf |= E1000_FTQF_QUEUE_ENABLE; 4202 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); 4203 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); 4204 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); 4205 4206 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; 4207 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); 4208 4209 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 4210 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 4211 imir |= E1000_IMIR_PORT_BP; 4212 else 4213 imir &= ~E1000_IMIR_PORT_BP; 4214 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 4215 4216 /* tcp flags bits setting. */ 4217 if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { 4218 if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) 4219 imir_ext |= E1000_IMIREXT_CTRL_URG; 4220 if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) 4221 imir_ext |= E1000_IMIREXT_CTRL_ACK; 4222 if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) 4223 imir_ext |= E1000_IMIREXT_CTRL_PSH; 4224 if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) 4225 imir_ext |= E1000_IMIREXT_CTRL_RST; 4226 if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) 4227 imir_ext |= E1000_IMIREXT_CTRL_SYN; 4228 if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) 4229 imir_ext |= E1000_IMIREXT_CTRL_FIN; 4230 } else { 4231 imir_ext |= E1000_IMIREXT_CTRL_BP; 4232 } 4233 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 4234 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 4235 } 4236 4237 /* 4238 * igb_add_5tuple_filter_82576 - add a 5tuple filter 4239 * 4240 * @param 4241 * dev: Pointer to struct rte_eth_dev. 4242 * ntuple_filter: ponter to the filter that will be added. 4243 * 4244 * @return 4245 * - On success, zero. 4246 * - On failure, a negative value. 4247 */ 4248 static int 4249 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 4250 struct rte_eth_ntuple_filter *ntuple_filter) 4251 { 4252 struct e1000_filter_info *filter_info = 4253 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4254 struct e1000_5tuple_filter *filter; 4255 uint8_t i; 4256 int ret; 4257 4258 filter = rte_zmalloc("e1000_5tuple_filter", 4259 sizeof(struct e1000_5tuple_filter), 0); 4260 if (filter == NULL) 4261 return -ENOMEM; 4262 4263 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4264 &filter->filter_info); 4265 if (ret < 0) { 4266 rte_free(filter); 4267 return ret; 4268 } 4269 4270 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4271 &filter->filter_info) != NULL) { 4272 PMD_DRV_LOG(ERR, "filter exists."); 4273 rte_free(filter); 4274 return -EEXIST; 4275 } 4276 filter->queue = ntuple_filter->queue; 4277 4278 /* 4279 * look for an unused 5tuple filter index, 4280 * and insert the filter to list. 4281 */ 4282 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { 4283 if (!(filter_info->fivetuple_mask & (1 << i))) { 4284 filter_info->fivetuple_mask |= 1 << i; 4285 filter->index = i; 4286 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 4287 filter, 4288 entries); 4289 break; 4290 } 4291 } 4292 if (i >= E1000_MAX_FTQF_FILTERS) { 4293 PMD_DRV_LOG(ERR, "5tuple filters are full."); 4294 rte_free(filter); 4295 return -ENOSYS; 4296 } 4297 4298 igb_inject_5tuple_filter_82576(dev, filter); 4299 return 0; 4300 } 4301 4302 int 4303 igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, 4304 struct e1000_5tuple_filter *filter) 4305 { 4306 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4307 struct e1000_filter_info *filter_info = 4308 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4309 4310 filter_info->fivetuple_mask &= ~(1 << filter->index); 4311 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 4312 rte_free(filter); 4313 4314 E1000_WRITE_REG(hw, E1000_FTQF(filter->index), 4315 E1000_FTQF_VF_BP | E1000_FTQF_MASK); 4316 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); 4317 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); 4318 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); 4319 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 4320 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 4321 return 0; 4322 } 4323 4324 /* 4325 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter 4326 * 4327 * @param 4328 * dev: Pointer to struct rte_eth_dev. 4329 * ntuple_filter: ponter to the filter that will be removed. 4330 * 4331 * @return 4332 * - On success, zero. 4333 * - On failure, a negative value. 4334 */ 4335 static int 4336 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 4337 struct rte_eth_ntuple_filter *ntuple_filter) 4338 { 4339 struct e1000_filter_info *filter_info = 4340 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4341 struct e1000_5tuple_filter_info filter_5tuple; 4342 struct e1000_5tuple_filter *filter; 4343 int ret; 4344 4345 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); 4346 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4347 &filter_5tuple); 4348 if (ret < 0) 4349 return ret; 4350 4351 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4352 &filter_5tuple); 4353 if (filter == NULL) { 4354 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4355 return -ENOENT; 4356 } 4357 4358 igb_delete_5tuple_filter_82576(dev, filter); 4359 4360 return 0; 4361 } 4362 4363 static int 4364 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4365 { 4366 uint32_t rctl; 4367 struct e1000_hw *hw; 4368 struct rte_eth_dev_info dev_info; 4369 uint32_t frame_size = mtu + E1000_ETH_OVERHEAD; 4370 int ret; 4371 4372 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4373 4374 #ifdef RTE_LIBRTE_82571_SUPPORT 4375 /* XXX: not bigger than max_rx_pktlen */ 4376 if (hw->mac.type == e1000_82571) 4377 return -ENOTSUP; 4378 #endif 4379 ret = eth_igb_infos_get(dev, &dev_info); 4380 if (ret != 0) 4381 return ret; 4382 4383 /* check that mtu is within the allowed range */ 4384 if (mtu < RTE_ETHER_MIN_MTU || 4385 frame_size > dev_info.max_rx_pktlen) 4386 return -EINVAL; 4387 4388 /* 4389 * If device is started, refuse mtu that requires the support of 4390 * scattered packets when this feature has not been enabled before. 4391 */ 4392 if (dev->data->dev_started && !dev->data->scattered_rx && 4393 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 4394 PMD_INIT_LOG(ERR, "Stop port first."); 4395 return -EINVAL; 4396 } 4397 4398 rctl = E1000_READ_REG(hw, E1000_RCTL); 4399 4400 /* switch to jumbo mode if needed */ 4401 if (frame_size > E1000_ETH_MAX_LEN) { 4402 dev->data->dev_conf.rxmode.offloads |= 4403 DEV_RX_OFFLOAD_JUMBO_FRAME; 4404 rctl |= E1000_RCTL_LPE; 4405 } else { 4406 dev->data->dev_conf.rxmode.offloads &= 4407 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 4408 rctl &= ~E1000_RCTL_LPE; 4409 } 4410 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4411 4412 /* update max frame size */ 4413 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4414 4415 E1000_WRITE_REG(hw, E1000_RLPML, 4416 dev->data->dev_conf.rxmode.max_rx_pkt_len); 4417 4418 return 0; 4419 } 4420 4421 /* 4422 * igb_add_del_ntuple_filter - add or delete a ntuple filter 4423 * 4424 * @param 4425 * dev: Pointer to struct rte_eth_dev. 4426 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4427 * add: if true, add filter, if false, remove filter 4428 * 4429 * @return 4430 * - On success, zero. 4431 * - On failure, a negative value. 4432 */ 4433 int 4434 igb_add_del_ntuple_filter(struct rte_eth_dev *dev, 4435 struct rte_eth_ntuple_filter *ntuple_filter, 4436 bool add) 4437 { 4438 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4439 int ret; 4440 4441 switch (ntuple_filter->flags) { 4442 case RTE_5TUPLE_FLAGS: 4443 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4444 if (hw->mac.type != e1000_82576) 4445 return -ENOTSUP; 4446 if (add) 4447 ret = igb_add_5tuple_filter_82576(dev, 4448 ntuple_filter); 4449 else 4450 ret = igb_remove_5tuple_filter_82576(dev, 4451 ntuple_filter); 4452 break; 4453 case RTE_2TUPLE_FLAGS: 4454 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4455 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && 4456 hw->mac.type != e1000_i210 && 4457 hw->mac.type != e1000_i211) 4458 return -ENOTSUP; 4459 if (add) 4460 ret = igb_add_2tuple_filter(dev, ntuple_filter); 4461 else 4462 ret = igb_remove_2tuple_filter(dev, ntuple_filter); 4463 break; 4464 default: 4465 ret = -EINVAL; 4466 break; 4467 } 4468 4469 return ret; 4470 } 4471 4472 static inline int 4473 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, 4474 uint16_t ethertype) 4475 { 4476 int i; 4477 4478 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4479 if (filter_info->ethertype_filters[i].ethertype == ethertype && 4480 (filter_info->ethertype_mask & (1 << i))) 4481 return i; 4482 } 4483 return -1; 4484 } 4485 4486 static inline int 4487 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, 4488 uint16_t ethertype, uint32_t etqf) 4489 { 4490 int i; 4491 4492 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4493 if (!(filter_info->ethertype_mask & (1 << i))) { 4494 filter_info->ethertype_mask |= 1 << i; 4495 filter_info->ethertype_filters[i].ethertype = ethertype; 4496 filter_info->ethertype_filters[i].etqf = etqf; 4497 return i; 4498 } 4499 } 4500 return -1; 4501 } 4502 4503 int 4504 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, 4505 uint8_t idx) 4506 { 4507 if (idx >= E1000_MAX_ETQF_FILTERS) 4508 return -1; 4509 filter_info->ethertype_mask &= ~(1 << idx); 4510 filter_info->ethertype_filters[idx].ethertype = 0; 4511 filter_info->ethertype_filters[idx].etqf = 0; 4512 return idx; 4513 } 4514 4515 4516 int 4517 igb_add_del_ethertype_filter(struct rte_eth_dev *dev, 4518 struct rte_eth_ethertype_filter *filter, 4519 bool add) 4520 { 4521 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4522 struct e1000_filter_info *filter_info = 4523 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4524 uint32_t etqf = 0; 4525 int ret; 4526 4527 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 4528 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 4529 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4530 " ethertype filter.", filter->ether_type); 4531 return -EINVAL; 4532 } 4533 4534 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4535 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4536 return -EINVAL; 4537 } 4538 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4539 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4540 return -EINVAL; 4541 } 4542 4543 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4544 if (ret >= 0 && add) { 4545 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4546 filter->ether_type); 4547 return -EEXIST; 4548 } 4549 if (ret < 0 && !add) { 4550 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4551 filter->ether_type); 4552 return -ENOENT; 4553 } 4554 4555 if (add) { 4556 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; 4557 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); 4558 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; 4559 ret = igb_ethertype_filter_insert(filter_info, 4560 filter->ether_type, etqf); 4561 if (ret < 0) { 4562 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4563 return -ENOSYS; 4564 } 4565 } else { 4566 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); 4567 if (ret < 0) 4568 return -ENOSYS; 4569 } 4570 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); 4571 E1000_WRITE_FLUSH(hw); 4572 4573 return 0; 4574 } 4575 4576 static int 4577 eth_igb_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 4578 const struct rte_flow_ops **ops) 4579 { 4580 *ops = &igb_flow_ops; 4581 return 0; 4582 } 4583 4584 static int 4585 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 4586 struct rte_ether_addr *mc_addr_set, 4587 uint32_t nb_mc_addr) 4588 { 4589 struct e1000_hw *hw; 4590 4591 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4592 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 4593 return 0; 4594 } 4595 4596 static uint64_t 4597 igb_read_systime_cyclecounter(struct rte_eth_dev *dev) 4598 { 4599 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4600 uint64_t systime_cycles; 4601 4602 switch (hw->mac.type) { 4603 case e1000_i210: 4604 case e1000_i211: 4605 /* 4606 * Need to read System Time Residue Register to be able 4607 * to read the other two registers. 4608 */ 4609 E1000_READ_REG(hw, E1000_SYSTIMR); 4610 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 4611 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4612 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4613 * NSEC_PER_SEC; 4614 break; 4615 case e1000_82580: 4616 case e1000_i350: 4617 case e1000_i354: 4618 /* 4619 * Need to read System Time Residue Register to be able 4620 * to read the other two registers. 4621 */ 4622 E1000_READ_REG(hw, E1000_SYSTIMR); 4623 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4624 /* Only the 8 LSB are valid. */ 4625 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) 4626 & 0xff) << 32; 4627 break; 4628 default: 4629 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4630 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4631 << 32; 4632 break; 4633 } 4634 4635 return systime_cycles; 4636 } 4637 4638 static uint64_t 4639 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4640 { 4641 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4642 uint64_t rx_tstamp_cycles; 4643 4644 switch (hw->mac.type) { 4645 case e1000_i210: 4646 case e1000_i211: 4647 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4648 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4649 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4650 * NSEC_PER_SEC; 4651 break; 4652 case e1000_82580: 4653 case e1000_i350: 4654 case e1000_i354: 4655 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4656 /* Only the 8 LSB are valid. */ 4657 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) 4658 & 0xff) << 32; 4659 break; 4660 default: 4661 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4662 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4663 << 32; 4664 break; 4665 } 4666 4667 return rx_tstamp_cycles; 4668 } 4669 4670 static uint64_t 4671 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4672 { 4673 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4674 uint64_t tx_tstamp_cycles; 4675 4676 switch (hw->mac.type) { 4677 case e1000_i210: 4678 case e1000_i211: 4679 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4680 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4681 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 4682 * NSEC_PER_SEC; 4683 break; 4684 case e1000_82580: 4685 case e1000_i350: 4686 case e1000_i354: 4687 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4688 /* Only the 8 LSB are valid. */ 4689 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) 4690 & 0xff) << 32; 4691 break; 4692 default: 4693 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4694 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 4695 << 32; 4696 break; 4697 } 4698 4699 return tx_tstamp_cycles; 4700 } 4701 4702 static void 4703 igb_start_timecounters(struct rte_eth_dev *dev) 4704 { 4705 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4706 struct e1000_adapter *adapter = dev->data->dev_private; 4707 uint32_t incval = 1; 4708 uint32_t shift = 0; 4709 uint64_t mask = E1000_CYCLECOUNTER_MASK; 4710 4711 switch (hw->mac.type) { 4712 case e1000_82580: 4713 case e1000_i350: 4714 case e1000_i354: 4715 /* 32 LSB bits + 8 MSB bits = 40 bits */ 4716 mask = (1ULL << 40) - 1; 4717 /* fall-through */ 4718 case e1000_i210: 4719 case e1000_i211: 4720 /* 4721 * Start incrementing the register 4722 * used to timestamp PTP packets. 4723 */ 4724 E1000_WRITE_REG(hw, E1000_TIMINCA, incval); 4725 break; 4726 case e1000_82576: 4727 incval = E1000_INCVALUE_82576; 4728 shift = IGB_82576_TSYNC_SHIFT; 4729 E1000_WRITE_REG(hw, E1000_TIMINCA, 4730 E1000_INCPERIOD_82576 | incval); 4731 break; 4732 default: 4733 /* Not supported */ 4734 return; 4735 } 4736 4737 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 4738 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4739 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4740 4741 adapter->systime_tc.cc_mask = mask; 4742 adapter->systime_tc.cc_shift = shift; 4743 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 4744 4745 adapter->rx_tstamp_tc.cc_mask = mask; 4746 adapter->rx_tstamp_tc.cc_shift = shift; 4747 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4748 4749 adapter->tx_tstamp_tc.cc_mask = mask; 4750 adapter->tx_tstamp_tc.cc_shift = shift; 4751 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4752 } 4753 4754 static int 4755 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 4756 { 4757 struct e1000_adapter *adapter = dev->data->dev_private; 4758 4759 adapter->systime_tc.nsec += delta; 4760 adapter->rx_tstamp_tc.nsec += delta; 4761 adapter->tx_tstamp_tc.nsec += delta; 4762 4763 return 0; 4764 } 4765 4766 static int 4767 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 4768 { 4769 uint64_t ns; 4770 struct e1000_adapter *adapter = dev->data->dev_private; 4771 4772 ns = rte_timespec_to_ns(ts); 4773 4774 /* Set the timecounters to a new value. */ 4775 adapter->systime_tc.nsec = ns; 4776 adapter->rx_tstamp_tc.nsec = ns; 4777 adapter->tx_tstamp_tc.nsec = ns; 4778 4779 return 0; 4780 } 4781 4782 static int 4783 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 4784 { 4785 uint64_t ns, systime_cycles; 4786 struct e1000_adapter *adapter = dev->data->dev_private; 4787 4788 systime_cycles = igb_read_systime_cyclecounter(dev); 4789 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 4790 *ts = rte_ns_to_timespec(ns); 4791 4792 return 0; 4793 } 4794 4795 static int 4796 igb_timesync_enable(struct rte_eth_dev *dev) 4797 { 4798 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4799 uint32_t tsync_ctl; 4800 uint32_t tsauxc; 4801 4802 /* Stop the timesync system time. */ 4803 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); 4804 /* Reset the timesync system time value. */ 4805 switch (hw->mac.type) { 4806 case e1000_82580: 4807 case e1000_i350: 4808 case e1000_i354: 4809 case e1000_i210: 4810 case e1000_i211: 4811 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); 4812 /* fall-through */ 4813 case e1000_82576: 4814 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); 4815 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); 4816 break; 4817 default: 4818 /* Not supported. */ 4819 return -ENOTSUP; 4820 } 4821 4822 /* Enable system time for it isn't on by default. */ 4823 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); 4824 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; 4825 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); 4826 4827 igb_start_timecounters(dev); 4828 4829 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4830 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 4831 (RTE_ETHER_TYPE_1588 | 4832 E1000_ETQF_FILTER_ENABLE | 4833 E1000_ETQF_1588)); 4834 4835 /* Enable timestamping of received PTP packets. */ 4836 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 4837 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; 4838 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 4839 4840 /* Enable Timestamping of transmitted PTP packets. */ 4841 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 4842 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; 4843 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 4844 4845 return 0; 4846 } 4847 4848 static int 4849 igb_timesync_disable(struct rte_eth_dev *dev) 4850 { 4851 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4852 uint32_t tsync_ctl; 4853 4854 /* Disable timestamping of transmitted PTP packets. */ 4855 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 4856 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; 4857 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 4858 4859 /* Disable timestamping of received PTP packets. */ 4860 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 4861 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; 4862 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 4863 4864 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4865 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); 4866 4867 /* Stop incrementating the System Time registers. */ 4868 E1000_WRITE_REG(hw, E1000_TIMINCA, 0); 4869 4870 return 0; 4871 } 4872 4873 static int 4874 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 4875 struct timespec *timestamp, 4876 uint32_t flags __rte_unused) 4877 { 4878 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4879 struct e1000_adapter *adapter = dev->data->dev_private; 4880 uint32_t tsync_rxctl; 4881 uint64_t rx_tstamp_cycles; 4882 uint64_t ns; 4883 4884 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 4885 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) 4886 return -EINVAL; 4887 4888 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); 4889 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 4890 *timestamp = rte_ns_to_timespec(ns); 4891 4892 return 0; 4893 } 4894 4895 static int 4896 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 4897 struct timespec *timestamp) 4898 { 4899 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4900 struct e1000_adapter *adapter = dev->data->dev_private; 4901 uint32_t tsync_txctl; 4902 uint64_t tx_tstamp_cycles; 4903 uint64_t ns; 4904 4905 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 4906 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) 4907 return -EINVAL; 4908 4909 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); 4910 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 4911 *timestamp = rte_ns_to_timespec(ns); 4912 4913 return 0; 4914 } 4915 4916 static int 4917 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) 4918 { 4919 int count = 0; 4920 int g_ind = 0; 4921 const struct reg_info *reg_group; 4922 4923 while ((reg_group = igb_regs[g_ind++])) 4924 count += igb_reg_group_count(reg_group); 4925 4926 return count; 4927 } 4928 4929 static int 4930 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 4931 { 4932 int count = 0; 4933 int g_ind = 0; 4934 const struct reg_info *reg_group; 4935 4936 while ((reg_group = igbvf_regs[g_ind++])) 4937 count += igb_reg_group_count(reg_group); 4938 4939 return count; 4940 } 4941 4942 static int 4943 eth_igb_get_regs(struct rte_eth_dev *dev, 4944 struct rte_dev_reg_info *regs) 4945 { 4946 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4947 uint32_t *data = regs->data; 4948 int g_ind = 0; 4949 int count = 0; 4950 const struct reg_info *reg_group; 4951 4952 if (data == NULL) { 4953 regs->length = eth_igb_get_reg_length(dev); 4954 regs->width = sizeof(uint32_t); 4955 return 0; 4956 } 4957 4958 /* Support only full register dump */ 4959 if ((regs->length == 0) || 4960 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { 4961 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 4962 hw->device_id; 4963 while ((reg_group = igb_regs[g_ind++])) 4964 count += igb_read_regs_group(dev, &data[count], 4965 reg_group); 4966 return 0; 4967 } 4968 4969 return -ENOTSUP; 4970 } 4971 4972 static int 4973 igbvf_get_regs(struct rte_eth_dev *dev, 4974 struct rte_dev_reg_info *regs) 4975 { 4976 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4977 uint32_t *data = regs->data; 4978 int g_ind = 0; 4979 int count = 0; 4980 const struct reg_info *reg_group; 4981 4982 if (data == NULL) { 4983 regs->length = igbvf_get_reg_length(dev); 4984 regs->width = sizeof(uint32_t); 4985 return 0; 4986 } 4987 4988 /* Support only full register dump */ 4989 if ((regs->length == 0) || 4990 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { 4991 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 4992 hw->device_id; 4993 while ((reg_group = igbvf_regs[g_ind++])) 4994 count += igb_read_regs_group(dev, &data[count], 4995 reg_group); 4996 return 0; 4997 } 4998 4999 return -ENOTSUP; 5000 } 5001 5002 static int 5003 eth_igb_get_eeprom_length(struct rte_eth_dev *dev) 5004 { 5005 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5006 5007 /* Return unit is byte count */ 5008 return hw->nvm.word_size * 2; 5009 } 5010 5011 static int 5012 eth_igb_get_eeprom(struct rte_eth_dev *dev, 5013 struct rte_dev_eeprom_info *in_eeprom) 5014 { 5015 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5016 struct e1000_nvm_info *nvm = &hw->nvm; 5017 uint16_t *data = in_eeprom->data; 5018 int first, length; 5019 5020 first = in_eeprom->offset >> 1; 5021 length = in_eeprom->length >> 1; 5022 if ((first >= hw->nvm.word_size) || 5023 ((first + length) >= hw->nvm.word_size)) 5024 return -EINVAL; 5025 5026 in_eeprom->magic = hw->vendor_id | 5027 ((uint32_t)hw->device_id << 16); 5028 5029 if ((nvm->ops.read) == NULL) 5030 return -ENOTSUP; 5031 5032 return nvm->ops.read(hw, first, length, data); 5033 } 5034 5035 static int 5036 eth_igb_set_eeprom(struct rte_eth_dev *dev, 5037 struct rte_dev_eeprom_info *in_eeprom) 5038 { 5039 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5040 struct e1000_nvm_info *nvm = &hw->nvm; 5041 uint16_t *data = in_eeprom->data; 5042 int first, length; 5043 5044 first = in_eeprom->offset >> 1; 5045 length = in_eeprom->length >> 1; 5046 if ((first >= hw->nvm.word_size) || 5047 ((first + length) >= hw->nvm.word_size)) 5048 return -EINVAL; 5049 5050 in_eeprom->magic = (uint32_t)hw->vendor_id | 5051 ((uint32_t)hw->device_id << 16); 5052 5053 if ((nvm->ops.write) == NULL) 5054 return -ENOTSUP; 5055 return nvm->ops.write(hw, first, length, data); 5056 } 5057 5058 static int 5059 eth_igb_get_module_info(struct rte_eth_dev *dev, 5060 struct rte_eth_dev_module_info *modinfo) 5061 { 5062 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5063 5064 uint32_t status = 0; 5065 uint16_t sff8472_rev, addr_mode; 5066 bool page_swap = false; 5067 5068 if (hw->phy.media_type == e1000_media_type_copper || 5069 hw->phy.media_type == e1000_media_type_unknown) 5070 return -EOPNOTSUPP; 5071 5072 /* Check whether we support SFF-8472 or not */ 5073 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 5074 if (status) 5075 return -EIO; 5076 5077 /* addressing mode is not supported */ 5078 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 5079 if (status) 5080 return -EIO; 5081 5082 /* addressing mode is not supported */ 5083 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { 5084 PMD_DRV_LOG(ERR, 5085 "Address change required to access page 0xA2, " 5086 "but not supported. Please report the module " 5087 "type to the driver maintainers.\n"); 5088 page_swap = true; 5089 } 5090 5091 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { 5092 /* We have an SFP, but it does not support SFF-8472 */ 5093 modinfo->type = RTE_ETH_MODULE_SFF_8079; 5094 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 5095 } else { 5096 /* We have an SFP which supports a revision of SFF-8472 */ 5097 modinfo->type = RTE_ETH_MODULE_SFF_8472; 5098 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 5099 } 5100 5101 return 0; 5102 } 5103 5104 static int 5105 eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 5106 struct rte_dev_eeprom_info *info) 5107 { 5108 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5109 5110 uint32_t status = 0; 5111 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; 5112 u16 first_word, last_word; 5113 int i = 0; 5114 5115 first_word = info->offset >> 1; 5116 last_word = (info->offset + info->length - 1) >> 1; 5117 5118 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 5119 for (i = 0; i < last_word - first_word + 1; i++) { 5120 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, 5121 &dataword[i]); 5122 if (status) { 5123 /* Error occurred while reading module */ 5124 return -EIO; 5125 } 5126 5127 dataword[i] = rte_be_to_cpu_16(dataword[i]); 5128 } 5129 5130 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); 5131 5132 return 0; 5133 } 5134 5135 static int 5136 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5137 { 5138 struct e1000_hw *hw = 5139 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5140 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5141 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5142 uint32_t vec = E1000_MISC_VEC_ID; 5143 5144 if (rte_intr_allow_others(intr_handle)) 5145 vec = E1000_RX_VEC_START; 5146 5147 uint32_t mask = 1 << (queue_id + vec); 5148 5149 E1000_WRITE_REG(hw, E1000_EIMC, mask); 5150 E1000_WRITE_FLUSH(hw); 5151 5152 return 0; 5153 } 5154 5155 static int 5156 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5157 { 5158 struct e1000_hw *hw = 5159 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5160 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5161 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5162 uint32_t vec = E1000_MISC_VEC_ID; 5163 5164 if (rte_intr_allow_others(intr_handle)) 5165 vec = E1000_RX_VEC_START; 5166 5167 uint32_t mask = 1 << (queue_id + vec); 5168 uint32_t regval; 5169 5170 regval = E1000_READ_REG(hw, E1000_EIMS); 5171 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 5172 E1000_WRITE_FLUSH(hw); 5173 5174 rte_intr_ack(intr_handle); 5175 5176 return 0; 5177 } 5178 5179 static void 5180 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 5181 uint8_t index, uint8_t offset) 5182 { 5183 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 5184 5185 /* clear bits */ 5186 val &= ~((uint32_t)0xFF << offset); 5187 5188 /* write vector and valid bit */ 5189 val |= (msix_vector | E1000_IVAR_VALID) << offset; 5190 5191 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); 5192 } 5193 5194 static void 5195 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 5196 uint8_t queue, uint8_t msix_vector) 5197 { 5198 uint32_t tmp = 0; 5199 5200 if (hw->mac.type == e1000_82575) { 5201 if (direction == 0) 5202 tmp = E1000_EICR_RX_QUEUE0 << queue; 5203 else if (direction == 1) 5204 tmp = E1000_EICR_TX_QUEUE0 << queue; 5205 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); 5206 } else if (hw->mac.type == e1000_82576) { 5207 if ((direction == 0) || (direction == 1)) 5208 eth_igb_write_ivar(hw, msix_vector, queue & 0x7, 5209 ((queue & 0x8) << 1) + 5210 8 * direction); 5211 } else if ((hw->mac.type == e1000_82580) || 5212 (hw->mac.type == e1000_i350) || 5213 (hw->mac.type == e1000_i354) || 5214 (hw->mac.type == e1000_i210) || 5215 (hw->mac.type == e1000_i211)) { 5216 if ((direction == 0) || (direction == 1)) 5217 eth_igb_write_ivar(hw, msix_vector, 5218 queue >> 1, 5219 ((queue & 0x1) << 4) + 5220 8 * direction); 5221 } 5222 } 5223 5224 /* Sets up the hardware to generate MSI-X interrupts properly 5225 * @hw 5226 * board private structure 5227 */ 5228 static void 5229 eth_igb_configure_msix_intr(struct rte_eth_dev *dev) 5230 { 5231 int queue_id; 5232 uint32_t tmpval, regval, intr_mask; 5233 struct e1000_hw *hw = 5234 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5235 uint32_t vec = E1000_MISC_VEC_ID; 5236 uint32_t base = E1000_MISC_VEC_ID; 5237 uint32_t misc_shift = 0; 5238 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5239 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5240 5241 /* won't configure msix register if no mapping is done 5242 * between intr vector and event fd 5243 */ 5244 if (!rte_intr_dp_is_en(intr_handle)) 5245 return; 5246 5247 if (rte_intr_allow_others(intr_handle)) { 5248 vec = base = E1000_RX_VEC_START; 5249 misc_shift = 1; 5250 } 5251 5252 /* set interrupt vector for other causes */ 5253 if (hw->mac.type == e1000_82575) { 5254 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); 5255 /* enable MSI-X PBA support */ 5256 tmpval |= E1000_CTRL_EXT_PBA_CLR; 5257 5258 /* Auto-Mask interrupts upon ICR read */ 5259 tmpval |= E1000_CTRL_EXT_EIAME; 5260 tmpval |= E1000_CTRL_EXT_IRCA; 5261 5262 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); 5263 5264 /* enable msix_other interrupt */ 5265 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); 5266 regval = E1000_READ_REG(hw, E1000_EIAC); 5267 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); 5268 regval = E1000_READ_REG(hw, E1000_EIAM); 5269 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); 5270 } else if ((hw->mac.type == e1000_82576) || 5271 (hw->mac.type == e1000_82580) || 5272 (hw->mac.type == e1000_i350) || 5273 (hw->mac.type == e1000_i354) || 5274 (hw->mac.type == e1000_i210) || 5275 (hw->mac.type == e1000_i211)) { 5276 /* turn on MSI-X capability first */ 5277 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | 5278 E1000_GPIE_PBA | E1000_GPIE_EIAME | 5279 E1000_GPIE_NSICR); 5280 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5281 misc_shift; 5282 5283 if (dev->data->dev_conf.intr_conf.lsc != 0) 5284 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); 5285 5286 regval = E1000_READ_REG(hw, E1000_EIAC); 5287 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); 5288 5289 /* enable msix_other interrupt */ 5290 regval = E1000_READ_REG(hw, E1000_EIMS); 5291 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); 5292 tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8; 5293 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); 5294 } 5295 5296 /* use EIAM to auto-mask when MSI-X interrupt 5297 * is asserted, this saves a register write for every interrupt 5298 */ 5299 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5300 misc_shift; 5301 5302 if (dev->data->dev_conf.intr_conf.lsc != 0) 5303 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); 5304 5305 regval = E1000_READ_REG(hw, E1000_EIAM); 5306 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); 5307 5308 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { 5309 eth_igb_assign_msix_vector(hw, 0, queue_id, vec); 5310 intr_handle->intr_vec[queue_id] = vec; 5311 if (vec < base + intr_handle->nb_efd - 1) 5312 vec++; 5313 } 5314 5315 E1000_WRITE_FLUSH(hw); 5316 } 5317 5318 /* restore n-tuple filter */ 5319 static inline void 5320 igb_ntuple_filter_restore(struct rte_eth_dev *dev) 5321 { 5322 struct e1000_filter_info *filter_info = 5323 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5324 struct e1000_5tuple_filter *p_5tuple; 5325 struct e1000_2tuple_filter *p_2tuple; 5326 5327 TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { 5328 igb_inject_5tuple_filter_82576(dev, p_5tuple); 5329 } 5330 5331 TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { 5332 igb_inject_2uple_filter(dev, p_2tuple); 5333 } 5334 } 5335 5336 /* restore SYN filter */ 5337 static inline void 5338 igb_syn_filter_restore(struct rte_eth_dev *dev) 5339 { 5340 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5341 struct e1000_filter_info *filter_info = 5342 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5343 uint32_t synqf; 5344 5345 synqf = filter_info->syn_info; 5346 5347 if (synqf & E1000_SYN_FILTER_ENABLE) { 5348 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 5349 E1000_WRITE_FLUSH(hw); 5350 } 5351 } 5352 5353 /* restore ethernet type filter */ 5354 static inline void 5355 igb_ethertype_filter_restore(struct rte_eth_dev *dev) 5356 { 5357 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5358 struct e1000_filter_info *filter_info = 5359 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5360 int i; 5361 5362 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 5363 if (filter_info->ethertype_mask & (1 << i)) { 5364 E1000_WRITE_REG(hw, E1000_ETQF(i), 5365 filter_info->ethertype_filters[i].etqf); 5366 E1000_WRITE_FLUSH(hw); 5367 } 5368 } 5369 } 5370 5371 /* restore flex byte filter */ 5372 static inline void 5373 igb_flex_filter_restore(struct rte_eth_dev *dev) 5374 { 5375 struct e1000_filter_info *filter_info = 5376 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5377 struct e1000_flex_filter *flex_filter; 5378 5379 TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) { 5380 igb_inject_flex_filter(dev, flex_filter); 5381 } 5382 } 5383 5384 /* restore rss filter */ 5385 static inline void 5386 igb_rss_filter_restore(struct rte_eth_dev *dev) 5387 { 5388 struct e1000_filter_info *filter_info = 5389 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5390 5391 if (filter_info->rss_info.conf.queue_num) 5392 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); 5393 } 5394 5395 /* restore all types filter */ 5396 static int 5397 igb_filter_restore(struct rte_eth_dev *dev) 5398 { 5399 igb_ntuple_filter_restore(dev); 5400 igb_ethertype_filter_restore(dev); 5401 igb_syn_filter_restore(dev); 5402 igb_flex_filter_restore(dev); 5403 igb_rss_filter_restore(dev); 5404 5405 return 0; 5406 } 5407 5408 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); 5409 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); 5410 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); 5411 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); 5412 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); 5413 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); 5414