1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdarg.h> 10 11 #include <rte_string_fns.h> 12 #include <rte_common.h> 13 #include <rte_interrupts.h> 14 #include <rte_byteorder.h> 15 #include <rte_log.h> 16 #include <rte_debug.h> 17 #include <rte_pci.h> 18 #include <rte_bus_pci.h> 19 #include <rte_ether.h> 20 #include <rte_ethdev_driver.h> 21 #include <rte_ethdev_pci.h> 22 #include <rte_memory.h> 23 #include <rte_eal.h> 24 #include <rte_malloc.h> 25 #include <rte_dev.h> 26 27 #include "e1000_logs.h" 28 #include "base/e1000_api.h" 29 #include "e1000_ethdev.h" 30 #include "igb_regs.h" 31 32 /* 33 * Default values for port configuration 34 */ 35 #define IGB_DEFAULT_RX_FREE_THRESH 32 36 37 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) 38 #define IGB_DEFAULT_RX_HTHRESH 8 39 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) 40 41 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) 42 #define IGB_DEFAULT_TX_HTHRESH 1 43 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) 44 45 /* Bit shift and mask */ 46 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) 47 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) 48 #define IGB_8_BIT_WIDTH CHAR_BIT 49 #define IGB_8_BIT_MASK UINT8_MAX 50 51 /* Additional timesync values. */ 52 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL 53 #define E1000_ETQF_FILTER_1588 3 54 #define IGB_82576_TSYNC_SHIFT 16 55 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 56 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 57 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 58 59 #define E1000_VTIVAR_MISC 0x01740 60 #define E1000_VTIVAR_MISC_MASK 0xFF 61 #define E1000_VTIVAR_VALID 0x80 62 #define E1000_VTIVAR_MISC_MAILBOX 0 63 #define E1000_VTIVAR_MISC_INTR_MASK 0x3 64 65 /* External VLAN Enable bit mask */ 66 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26) 67 68 /* External VLAN Ether Type bit mask and shift */ 69 #define E1000_VET_VET_EXT 0xFFFF0000 70 #define E1000_VET_VET_EXT_SHIFT 16 71 72 /* MSI-X other interrupt vector */ 73 #define IGB_MSIX_OTHER_INTR_VEC 0 74 75 static int eth_igb_configure(struct rte_eth_dev *dev); 76 static int eth_igb_start(struct rte_eth_dev *dev); 77 static void eth_igb_stop(struct rte_eth_dev *dev); 78 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); 79 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); 80 static void eth_igb_close(struct rte_eth_dev *dev); 81 static int eth_igb_reset(struct rte_eth_dev *dev); 82 static int eth_igb_promiscuous_enable(struct rte_eth_dev *dev); 83 static int eth_igb_promiscuous_disable(struct rte_eth_dev *dev); 84 static int eth_igb_allmulticast_enable(struct rte_eth_dev *dev); 85 static int eth_igb_allmulticast_disable(struct rte_eth_dev *dev); 86 static int eth_igb_link_update(struct rte_eth_dev *dev, 87 int wait_to_complete); 88 static int eth_igb_stats_get(struct rte_eth_dev *dev, 89 struct rte_eth_stats *rte_stats); 90 static int eth_igb_xstats_get(struct rte_eth_dev *dev, 91 struct rte_eth_xstat *xstats, unsigned n); 92 static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, 93 const uint64_t *ids, 94 uint64_t *values, unsigned int n); 95 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, 96 struct rte_eth_xstat_name *xstats_names, 97 unsigned int size); 98 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 99 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 100 unsigned int limit); 101 static int eth_igb_stats_reset(struct rte_eth_dev *dev); 102 static int eth_igb_xstats_reset(struct rte_eth_dev *dev); 103 static int eth_igb_fw_version_get(struct rte_eth_dev *dev, 104 char *fw_version, size_t fw_size); 105 static int eth_igb_infos_get(struct rte_eth_dev *dev, 106 struct rte_eth_dev_info *dev_info); 107 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); 108 static int eth_igbvf_infos_get(struct rte_eth_dev *dev, 109 struct rte_eth_dev_info *dev_info); 110 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, 111 struct rte_eth_fc_conf *fc_conf); 112 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, 113 struct rte_eth_fc_conf *fc_conf); 114 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 115 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); 116 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); 117 static int eth_igb_interrupt_action(struct rte_eth_dev *dev, 118 struct rte_intr_handle *handle); 119 static void eth_igb_interrupt_handler(void *param); 120 static int igb_hardware_init(struct e1000_hw *hw); 121 static void igb_hw_control_acquire(struct e1000_hw *hw); 122 static void igb_hw_control_release(struct e1000_hw *hw); 123 static void igb_init_manageability(struct e1000_hw *hw); 124 static void igb_release_manageability(struct e1000_hw *hw); 125 126 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 127 128 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, 129 uint16_t vlan_id, int on); 130 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 131 enum rte_vlan_type vlan_type, 132 uint16_t tpid_id); 133 static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); 134 135 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); 136 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); 137 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); 138 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); 139 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); 140 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); 141 142 static int eth_igb_led_on(struct rte_eth_dev *dev); 143 static int eth_igb_led_off(struct rte_eth_dev *dev); 144 145 static void igb_intr_disable(struct rte_eth_dev *dev); 146 static int igb_get_rx_buffer_size(struct e1000_hw *hw); 147 static int eth_igb_rar_set(struct rte_eth_dev *dev, 148 struct rte_ether_addr *mac_addr, 149 uint32_t index, uint32_t pool); 150 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); 151 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 152 struct rte_ether_addr *addr); 153 154 static void igbvf_intr_disable(struct e1000_hw *hw); 155 static int igbvf_dev_configure(struct rte_eth_dev *dev); 156 static int igbvf_dev_start(struct rte_eth_dev *dev); 157 static void igbvf_dev_stop(struct rte_eth_dev *dev); 158 static void igbvf_dev_close(struct rte_eth_dev *dev); 159 static int igbvf_promiscuous_enable(struct rte_eth_dev *dev); 160 static int igbvf_promiscuous_disable(struct rte_eth_dev *dev); 161 static int igbvf_allmulticast_enable(struct rte_eth_dev *dev); 162 static int igbvf_allmulticast_disable(struct rte_eth_dev *dev); 163 static int eth_igbvf_link_update(struct e1000_hw *hw); 164 static int eth_igbvf_stats_get(struct rte_eth_dev *dev, 165 struct rte_eth_stats *rte_stats); 166 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, 167 struct rte_eth_xstat *xstats, unsigned n); 168 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, 169 struct rte_eth_xstat_name *xstats_names, 170 unsigned limit); 171 static int eth_igbvf_stats_reset(struct rte_eth_dev *dev); 172 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, 173 uint16_t vlan_id, int on); 174 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); 175 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); 176 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, 177 struct rte_ether_addr *addr); 178 static int igbvf_get_reg_length(struct rte_eth_dev *dev); 179 static int igbvf_get_regs(struct rte_eth_dev *dev, 180 struct rte_dev_reg_info *regs); 181 182 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, 183 struct rte_eth_rss_reta_entry64 *reta_conf, 184 uint16_t reta_size); 185 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, 186 struct rte_eth_rss_reta_entry64 *reta_conf, 187 uint16_t reta_size); 188 189 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, 190 struct rte_eth_syn_filter *filter); 191 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 192 enum rte_filter_op filter_op, 193 void *arg); 194 static int igb_add_2tuple_filter(struct rte_eth_dev *dev, 195 struct rte_eth_ntuple_filter *ntuple_filter); 196 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, 197 struct rte_eth_ntuple_filter *ntuple_filter); 198 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, 199 struct rte_eth_flex_filter *filter); 200 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 201 enum rte_filter_op filter_op, 202 void *arg); 203 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 204 struct rte_eth_ntuple_filter *ntuple_filter); 205 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 206 struct rte_eth_ntuple_filter *ntuple_filter); 207 static int igb_get_ntuple_filter(struct rte_eth_dev *dev, 208 struct rte_eth_ntuple_filter *filter); 209 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, 210 enum rte_filter_op filter_op, 211 void *arg); 212 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, 213 enum rte_filter_op filter_op, 214 void *arg); 215 static int igb_get_ethertype_filter(struct rte_eth_dev *dev, 216 struct rte_eth_ethertype_filter *filter); 217 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, 218 enum rte_filter_type filter_type, 219 enum rte_filter_op filter_op, 220 void *arg); 221 static int eth_igb_get_reg_length(struct rte_eth_dev *dev); 222 static int eth_igb_get_regs(struct rte_eth_dev *dev, 223 struct rte_dev_reg_info *regs); 224 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); 225 static int eth_igb_get_eeprom(struct rte_eth_dev *dev, 226 struct rte_dev_eeprom_info *eeprom); 227 static int eth_igb_set_eeprom(struct rte_eth_dev *dev, 228 struct rte_dev_eeprom_info *eeprom); 229 static int eth_igb_get_module_info(struct rte_eth_dev *dev, 230 struct rte_eth_dev_module_info *modinfo); 231 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 232 struct rte_dev_eeprom_info *info); 233 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 234 struct rte_ether_addr *mc_addr_set, 235 uint32_t nb_mc_addr); 236 static int igb_timesync_enable(struct rte_eth_dev *dev); 237 static int igb_timesync_disable(struct rte_eth_dev *dev); 238 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 239 struct timespec *timestamp, 240 uint32_t flags); 241 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 242 struct timespec *timestamp); 243 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 244 static int igb_timesync_read_time(struct rte_eth_dev *dev, 245 struct timespec *timestamp); 246 static int igb_timesync_write_time(struct rte_eth_dev *dev, 247 const struct timespec *timestamp); 248 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, 249 uint16_t queue_id); 250 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, 251 uint16_t queue_id); 252 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 253 uint8_t queue, uint8_t msix_vector); 254 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 255 uint8_t index, uint8_t offset); 256 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); 257 static void eth_igbvf_interrupt_handler(void *param); 258 static void igbvf_mbx_process(struct rte_eth_dev *dev); 259 static int igb_filter_restore(struct rte_eth_dev *dev); 260 261 /* 262 * Define VF Stats MACRO for Non "cleared on read" register 263 */ 264 #define UPDATE_VF_STAT(reg, last, cur) \ 265 { \ 266 u32 latest = E1000_READ_REG(hw, reg); \ 267 cur += (latest - last) & UINT_MAX; \ 268 last = latest; \ 269 } 270 271 #define IGB_FC_PAUSE_TIME 0x0680 272 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 273 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 274 275 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ 276 277 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; 278 279 /* 280 * The set of PCI devices this driver supports 281 */ 282 static const struct rte_pci_id pci_id_igb_map[] = { 283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) }, 284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) }, 285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) }, 286 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) }, 287 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, 288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) }, 289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) }, 290 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) }, 291 292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) }, 293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) }, 294 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) }, 295 296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) }, 297 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) }, 298 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) }, 299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) }, 300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) }, 301 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) }, 302 303 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) }, 304 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) }, 305 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) }, 306 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) }, 307 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) }, 308 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) }, 309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) }, 310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) }, 311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) }, 312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) }, 313 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) }, 314 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, 315 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, 316 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) }, 317 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 318 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) }, 319 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 320 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) }, 321 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) }, 322 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, 323 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) }, 324 { .vendor_id = 0, /* sentinel */ }, 325 }; 326 327 /* 328 * The set of PCI devices this driver supports (for 82576&I350 VF) 329 */ 330 static const struct rte_pci_id pci_id_igbvf_map[] = { 331 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) }, 332 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) }, 333 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) }, 334 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) }, 335 { .vendor_id = 0, /* sentinel */ }, 336 }; 337 338 static const struct rte_eth_desc_lim rx_desc_lim = { 339 .nb_max = E1000_MAX_RING_DESC, 340 .nb_min = E1000_MIN_RING_DESC, 341 .nb_align = IGB_RXD_ALIGN, 342 }; 343 344 static const struct rte_eth_desc_lim tx_desc_lim = { 345 .nb_max = E1000_MAX_RING_DESC, 346 .nb_min = E1000_MIN_RING_DESC, 347 .nb_align = IGB_RXD_ALIGN, 348 .nb_seg_max = IGB_TX_MAX_SEG, 349 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG, 350 }; 351 352 static const struct eth_dev_ops eth_igb_ops = { 353 .dev_configure = eth_igb_configure, 354 .dev_start = eth_igb_start, 355 .dev_stop = eth_igb_stop, 356 .dev_set_link_up = eth_igb_dev_set_link_up, 357 .dev_set_link_down = eth_igb_dev_set_link_down, 358 .dev_close = eth_igb_close, 359 .dev_reset = eth_igb_reset, 360 .promiscuous_enable = eth_igb_promiscuous_enable, 361 .promiscuous_disable = eth_igb_promiscuous_disable, 362 .allmulticast_enable = eth_igb_allmulticast_enable, 363 .allmulticast_disable = eth_igb_allmulticast_disable, 364 .link_update = eth_igb_link_update, 365 .stats_get = eth_igb_stats_get, 366 .xstats_get = eth_igb_xstats_get, 367 .xstats_get_by_id = eth_igb_xstats_get_by_id, 368 .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id, 369 .xstats_get_names = eth_igb_xstats_get_names, 370 .stats_reset = eth_igb_stats_reset, 371 .xstats_reset = eth_igb_xstats_reset, 372 .fw_version_get = eth_igb_fw_version_get, 373 .dev_infos_get = eth_igb_infos_get, 374 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 375 .mtu_set = eth_igb_mtu_set, 376 .vlan_filter_set = eth_igb_vlan_filter_set, 377 .vlan_tpid_set = eth_igb_vlan_tpid_set, 378 .vlan_offload_set = eth_igb_vlan_offload_set, 379 .rx_queue_setup = eth_igb_rx_queue_setup, 380 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, 381 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, 382 .rx_queue_release = eth_igb_rx_queue_release, 383 .rx_queue_count = eth_igb_rx_queue_count, 384 .rx_descriptor_done = eth_igb_rx_descriptor_done, 385 .rx_descriptor_status = eth_igb_rx_descriptor_status, 386 .tx_descriptor_status = eth_igb_tx_descriptor_status, 387 .tx_queue_setup = eth_igb_tx_queue_setup, 388 .tx_queue_release = eth_igb_tx_queue_release, 389 .tx_done_cleanup = eth_igb_tx_done_cleanup, 390 .dev_led_on = eth_igb_led_on, 391 .dev_led_off = eth_igb_led_off, 392 .flow_ctrl_get = eth_igb_flow_ctrl_get, 393 .flow_ctrl_set = eth_igb_flow_ctrl_set, 394 .mac_addr_add = eth_igb_rar_set, 395 .mac_addr_remove = eth_igb_rar_clear, 396 .mac_addr_set = eth_igb_default_mac_addr_set, 397 .reta_update = eth_igb_rss_reta_update, 398 .reta_query = eth_igb_rss_reta_query, 399 .rss_hash_update = eth_igb_rss_hash_update, 400 .rss_hash_conf_get = eth_igb_rss_hash_conf_get, 401 .filter_ctrl = eth_igb_filter_ctrl, 402 .set_mc_addr_list = eth_igb_set_mc_addr_list, 403 .rxq_info_get = igb_rxq_info_get, 404 .txq_info_get = igb_txq_info_get, 405 .timesync_enable = igb_timesync_enable, 406 .timesync_disable = igb_timesync_disable, 407 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, 408 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, 409 .get_reg = eth_igb_get_regs, 410 .get_eeprom_length = eth_igb_get_eeprom_length, 411 .get_eeprom = eth_igb_get_eeprom, 412 .set_eeprom = eth_igb_set_eeprom, 413 .get_module_info = eth_igb_get_module_info, 414 .get_module_eeprom = eth_igb_get_module_eeprom, 415 .timesync_adjust_time = igb_timesync_adjust_time, 416 .timesync_read_time = igb_timesync_read_time, 417 .timesync_write_time = igb_timesync_write_time, 418 }; 419 420 /* 421 * dev_ops for virtual function, bare necessities for basic vf 422 * operation have been implemented 423 */ 424 static const struct eth_dev_ops igbvf_eth_dev_ops = { 425 .dev_configure = igbvf_dev_configure, 426 .dev_start = igbvf_dev_start, 427 .dev_stop = igbvf_dev_stop, 428 .dev_close = igbvf_dev_close, 429 .promiscuous_enable = igbvf_promiscuous_enable, 430 .promiscuous_disable = igbvf_promiscuous_disable, 431 .allmulticast_enable = igbvf_allmulticast_enable, 432 .allmulticast_disable = igbvf_allmulticast_disable, 433 .link_update = eth_igb_link_update, 434 .stats_get = eth_igbvf_stats_get, 435 .xstats_get = eth_igbvf_xstats_get, 436 .xstats_get_names = eth_igbvf_xstats_get_names, 437 .stats_reset = eth_igbvf_stats_reset, 438 .xstats_reset = eth_igbvf_stats_reset, 439 .vlan_filter_set = igbvf_vlan_filter_set, 440 .dev_infos_get = eth_igbvf_infos_get, 441 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 442 .rx_queue_setup = eth_igb_rx_queue_setup, 443 .rx_queue_release = eth_igb_rx_queue_release, 444 .rx_descriptor_done = eth_igb_rx_descriptor_done, 445 .rx_descriptor_status = eth_igb_rx_descriptor_status, 446 .tx_descriptor_status = eth_igb_tx_descriptor_status, 447 .tx_queue_setup = eth_igb_tx_queue_setup, 448 .tx_queue_release = eth_igb_tx_queue_release, 449 .set_mc_addr_list = eth_igb_set_mc_addr_list, 450 .rxq_info_get = igb_rxq_info_get, 451 .txq_info_get = igb_txq_info_get, 452 .mac_addr_set = igbvf_default_mac_addr_set, 453 .get_reg = igbvf_get_regs, 454 }; 455 456 /* store statistics names and its offset in stats structure */ 457 struct rte_igb_xstats_name_off { 458 char name[RTE_ETH_XSTATS_NAME_SIZE]; 459 unsigned offset; 460 }; 461 462 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { 463 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, 464 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, 465 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, 466 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, 467 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, 468 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, 469 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, 470 ecol)}, 471 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, 472 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, 473 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, 474 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, 475 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, 476 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, 477 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, 478 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, 479 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, 480 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, 481 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, 482 fcruc)}, 483 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, 484 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, 485 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, 486 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, 487 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 488 prc1023)}, 489 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, 490 prc1522)}, 491 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, 492 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, 493 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, 494 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, 495 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, 496 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, 497 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, 498 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, 499 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, 500 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, 501 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, 502 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, 503 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, 504 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, 505 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, 506 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, 507 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, 508 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 509 ptc1023)}, 510 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, 511 ptc1522)}, 512 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, 513 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, 514 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, 515 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, 516 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, 517 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, 518 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, 519 520 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, 521 }; 522 523 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ 524 sizeof(rte_igb_stats_strings[0])) 525 526 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { 527 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, 528 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, 529 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, 530 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, 531 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, 532 }; 533 534 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ 535 sizeof(rte_igbvf_stats_strings[0])) 536 537 538 static inline void 539 igb_intr_enable(struct rte_eth_dev *dev) 540 { 541 struct e1000_interrupt *intr = 542 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 543 struct e1000_hw *hw = 544 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 545 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 546 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 547 548 if (rte_intr_allow_others(intr_handle) && 549 dev->data->dev_conf.intr_conf.lsc != 0) { 550 E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC); 551 } 552 553 E1000_WRITE_REG(hw, E1000_IMS, intr->mask); 554 E1000_WRITE_FLUSH(hw); 555 } 556 557 static void 558 igb_intr_disable(struct rte_eth_dev *dev) 559 { 560 struct e1000_hw *hw = 561 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 562 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 563 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 564 565 if (rte_intr_allow_others(intr_handle) && 566 dev->data->dev_conf.intr_conf.lsc != 0) { 567 E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC); 568 } 569 570 E1000_WRITE_REG(hw, E1000_IMC, ~0); 571 E1000_WRITE_FLUSH(hw); 572 } 573 574 static inline void 575 igbvf_intr_enable(struct rte_eth_dev *dev) 576 { 577 struct e1000_hw *hw = 578 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 579 580 /* only for mailbox */ 581 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); 582 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); 583 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); 584 E1000_WRITE_FLUSH(hw); 585 } 586 587 /* only for mailbox now. If RX/TX needed, should extend this function. */ 588 static void 589 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) 590 { 591 uint32_t tmp = 0; 592 593 /* mailbox */ 594 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); 595 tmp |= E1000_VTIVAR_VALID; 596 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); 597 } 598 599 static void 600 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) 601 { 602 struct e1000_hw *hw = 603 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 604 605 /* Configure VF other cause ivar */ 606 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); 607 } 608 609 static inline int32_t 610 igb_pf_reset_hw(struct e1000_hw *hw) 611 { 612 uint32_t ctrl_ext; 613 int32_t status; 614 615 status = e1000_reset_hw(hw); 616 617 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 618 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 619 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 620 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 621 E1000_WRITE_FLUSH(hw); 622 623 return status; 624 } 625 626 static void 627 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 628 { 629 struct e1000_hw *hw = 630 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 631 632 633 hw->vendor_id = pci_dev->id.vendor_id; 634 hw->device_id = pci_dev->id.device_id; 635 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 636 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 637 638 e1000_set_mac_type(hw); 639 640 /* need to check if it is a vf device below */ 641 } 642 643 static int 644 igb_reset_swfw_lock(struct e1000_hw *hw) 645 { 646 int ret_val; 647 648 /* 649 * Do mac ops initialization manually here, since we will need 650 * some function pointers set by this call. 651 */ 652 ret_val = e1000_init_mac_params(hw); 653 if (ret_val) 654 return ret_val; 655 656 /* 657 * SMBI lock should not fail in this early stage. If this is the case, 658 * it is due to an improper exit of the application. 659 * So force the release of the faulty lock. 660 */ 661 if (e1000_get_hw_semaphore_generic(hw) < 0) { 662 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 663 } 664 e1000_put_hw_semaphore_generic(hw); 665 666 if (hw->mac.ops.acquire_swfw_sync != NULL) { 667 uint16_t mask; 668 669 /* 670 * Phy lock should not fail in this early stage. If this is the case, 671 * it is due to an improper exit of the application. 672 * So force the release of the faulty lock. 673 */ 674 mask = E1000_SWFW_PHY0_SM << hw->bus.func; 675 if (hw->bus.func > E1000_FUNC_1) 676 mask <<= 2; 677 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 678 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 679 hw->bus.func); 680 } 681 hw->mac.ops.release_swfw_sync(hw, mask); 682 683 /* 684 * This one is more tricky since it is common to all ports; but 685 * swfw_sync retries last long enough (1s) to be almost sure that if 686 * lock can not be taken it is due to an improper lock of the 687 * semaphore. 688 */ 689 mask = E1000_SWFW_EEP_SM; 690 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 691 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 692 } 693 hw->mac.ops.release_swfw_sync(hw, mask); 694 } 695 696 return E1000_SUCCESS; 697 } 698 699 /* Remove all ntuple filters of the device */ 700 static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 701 { 702 struct e1000_filter_info *filter_info = 703 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 704 struct e1000_5tuple_filter *p_5tuple; 705 struct e1000_2tuple_filter *p_2tuple; 706 707 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 708 TAILQ_REMOVE(&filter_info->fivetuple_list, 709 p_5tuple, entries); 710 rte_free(p_5tuple); 711 } 712 filter_info->fivetuple_mask = 0; 713 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { 714 TAILQ_REMOVE(&filter_info->twotuple_list, 715 p_2tuple, entries); 716 rte_free(p_2tuple); 717 } 718 filter_info->twotuple_mask = 0; 719 720 return 0; 721 } 722 723 /* Remove all flex filters of the device */ 724 static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev) 725 { 726 struct e1000_filter_info *filter_info = 727 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 728 struct e1000_flex_filter *p_flex; 729 730 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { 731 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); 732 rte_free(p_flex); 733 } 734 filter_info->flex_mask = 0; 735 736 return 0; 737 } 738 739 static int 740 eth_igb_dev_init(struct rte_eth_dev *eth_dev) 741 { 742 int error = 0; 743 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 744 struct e1000_hw *hw = 745 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 746 struct e1000_vfta * shadow_vfta = 747 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 748 struct e1000_filter_info *filter_info = 749 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 750 struct e1000_adapter *adapter = 751 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 752 753 uint32_t ctrl_ext; 754 755 eth_dev->dev_ops = ð_igb_ops; 756 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 757 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 758 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 759 760 /* for secondary processes, we don't initialise any further as primary 761 * has already done this work. Only check we don't need a different 762 * RX function */ 763 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 764 if (eth_dev->data->scattered_rx) 765 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 766 return 0; 767 } 768 769 rte_eth_copy_pci_info(eth_dev, pci_dev); 770 771 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; 772 773 igb_identify_hardware(eth_dev, pci_dev); 774 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { 775 error = -EIO; 776 goto err_late; 777 } 778 779 e1000_get_bus_info(hw); 780 781 /* Reset any pending lock */ 782 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { 783 error = -EIO; 784 goto err_late; 785 } 786 787 /* Finish initialization */ 788 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { 789 error = -EIO; 790 goto err_late; 791 } 792 793 hw->mac.autoneg = 1; 794 hw->phy.autoneg_wait_to_complete = 0; 795 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 796 797 /* Copper options */ 798 if (hw->phy.media_type == e1000_media_type_copper) { 799 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 800 hw->phy.disable_polarity_correction = 0; 801 hw->phy.ms_type = e1000_ms_hw_default; 802 } 803 804 /* 805 * Start from a known state, this is important in reading the nvm 806 * and mac from that. 807 */ 808 igb_pf_reset_hw(hw); 809 810 /* Make sure we have a good EEPROM before we read from it */ 811 if (e1000_validate_nvm_checksum(hw) < 0) { 812 /* 813 * Some PCI-E parts fail the first check due to 814 * the link being in sleep state, call it again, 815 * if it fails a second time its a real issue. 816 */ 817 if (e1000_validate_nvm_checksum(hw) < 0) { 818 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 819 error = -EIO; 820 goto err_late; 821 } 822 } 823 824 /* Read the permanent MAC address out of the EEPROM */ 825 if (e1000_read_mac_addr(hw) != 0) { 826 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 827 error = -EIO; 828 goto err_late; 829 } 830 831 /* Allocate memory for storing MAC addresses */ 832 eth_dev->data->mac_addrs = rte_zmalloc("e1000", 833 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 834 if (eth_dev->data->mac_addrs == NULL) { 835 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 836 "store MAC addresses", 837 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 838 error = -ENOMEM; 839 goto err_late; 840 } 841 842 /* Copy the permanent MAC address */ 843 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 844 ð_dev->data->mac_addrs[0]); 845 846 /* Pass the information to the rte_eth_dev_close() that it should also 847 * release the private port resources. 848 */ 849 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 850 851 /* initialize the vfta */ 852 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 853 854 /* Now initialize the hardware */ 855 if (igb_hardware_init(hw) != 0) { 856 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 857 rte_free(eth_dev->data->mac_addrs); 858 eth_dev->data->mac_addrs = NULL; 859 error = -ENODEV; 860 goto err_late; 861 } 862 hw->mac.get_link_status = 1; 863 adapter->stopped = 0; 864 865 /* Indicate SOL/IDER usage */ 866 if (e1000_check_reset_block(hw) < 0) { 867 PMD_INIT_LOG(ERR, "PHY reset is blocked due to" 868 "SOL/IDER session"); 869 } 870 871 /* initialize PF if max_vfs not zero */ 872 igb_pf_host_init(eth_dev); 873 874 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 875 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 876 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 877 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 878 E1000_WRITE_FLUSH(hw); 879 880 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 881 eth_dev->data->port_id, pci_dev->id.vendor_id, 882 pci_dev->id.device_id); 883 884 rte_intr_callback_register(&pci_dev->intr_handle, 885 eth_igb_interrupt_handler, 886 (void *)eth_dev); 887 888 /* enable uio/vfio intr/eventfd mapping */ 889 rte_intr_enable(&pci_dev->intr_handle); 890 891 /* enable support intr */ 892 igb_intr_enable(eth_dev); 893 894 eth_igb_dev_set_link_down(eth_dev); 895 896 /* initialize filter info */ 897 memset(filter_info, 0, 898 sizeof(struct e1000_filter_info)); 899 900 TAILQ_INIT(&filter_info->flex_list); 901 TAILQ_INIT(&filter_info->twotuple_list); 902 TAILQ_INIT(&filter_info->fivetuple_list); 903 904 TAILQ_INIT(&igb_filter_ntuple_list); 905 TAILQ_INIT(&igb_filter_ethertype_list); 906 TAILQ_INIT(&igb_filter_syn_list); 907 TAILQ_INIT(&igb_filter_flex_list); 908 TAILQ_INIT(&igb_filter_rss_list); 909 TAILQ_INIT(&igb_flow_list); 910 911 return 0; 912 913 err_late: 914 igb_hw_control_release(hw); 915 916 return error; 917 } 918 919 static int 920 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) 921 { 922 PMD_INIT_FUNC_TRACE(); 923 924 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 925 return -EPERM; 926 927 eth_igb_close(eth_dev); 928 929 return 0; 930 } 931 932 /* 933 * Virtual Function device init 934 */ 935 static int 936 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) 937 { 938 struct rte_pci_device *pci_dev; 939 struct rte_intr_handle *intr_handle; 940 struct e1000_adapter *adapter = 941 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 942 struct e1000_hw *hw = 943 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 944 int diag; 945 struct rte_ether_addr *perm_addr = 946 (struct rte_ether_addr *)hw->mac.perm_addr; 947 948 PMD_INIT_FUNC_TRACE(); 949 950 eth_dev->dev_ops = &igbvf_eth_dev_ops; 951 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 952 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 953 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 954 955 /* for secondary processes, we don't initialise any further as primary 956 * has already done this work. Only check we don't need a different 957 * RX function */ 958 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 959 if (eth_dev->data->scattered_rx) 960 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 961 return 0; 962 } 963 964 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 965 rte_eth_copy_pci_info(eth_dev, pci_dev); 966 967 hw->device_id = pci_dev->id.device_id; 968 hw->vendor_id = pci_dev->id.vendor_id; 969 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 970 adapter->stopped = 0; 971 972 /* Initialize the shared code (base driver) */ 973 diag = e1000_setup_init_funcs(hw, TRUE); 974 if (diag != 0) { 975 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", 976 diag); 977 return -EIO; 978 } 979 980 /* init_mailbox_params */ 981 hw->mbx.ops.init_params(hw); 982 983 /* Disable the interrupts for VF */ 984 igbvf_intr_disable(hw); 985 986 diag = hw->mac.ops.reset_hw(hw); 987 988 /* Allocate memory for storing MAC addresses */ 989 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN * 990 hw->mac.rar_entry_count, 0); 991 if (eth_dev->data->mac_addrs == NULL) { 992 PMD_INIT_LOG(ERR, 993 "Failed to allocate %d bytes needed to store MAC " 994 "addresses", 995 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 996 return -ENOMEM; 997 } 998 999 /* Pass the information to the rte_eth_dev_close() that it should also 1000 * release the private port resources. 1001 */ 1002 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1003 1004 /* Generate a random MAC address, if none was assigned by PF. */ 1005 if (rte_is_zero_ether_addr(perm_addr)) { 1006 rte_eth_random_addr(perm_addr->addr_bytes); 1007 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1008 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1009 "%02x:%02x:%02x:%02x:%02x:%02x", 1010 perm_addr->addr_bytes[0], 1011 perm_addr->addr_bytes[1], 1012 perm_addr->addr_bytes[2], 1013 perm_addr->addr_bytes[3], 1014 perm_addr->addr_bytes[4], 1015 perm_addr->addr_bytes[5]); 1016 } 1017 1018 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); 1019 if (diag) { 1020 rte_free(eth_dev->data->mac_addrs); 1021 eth_dev->data->mac_addrs = NULL; 1022 return diag; 1023 } 1024 /* Copy the permanent MAC address */ 1025 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1026 ð_dev->data->mac_addrs[0]); 1027 1028 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " 1029 "mac.type=%s", 1030 eth_dev->data->port_id, pci_dev->id.vendor_id, 1031 pci_dev->id.device_id, "igb_mac_82576_vf"); 1032 1033 intr_handle = &pci_dev->intr_handle; 1034 rte_intr_callback_register(intr_handle, 1035 eth_igbvf_interrupt_handler, eth_dev); 1036 1037 return 0; 1038 } 1039 1040 static int 1041 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) 1042 { 1043 PMD_INIT_FUNC_TRACE(); 1044 1045 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1046 return -EPERM; 1047 1048 igbvf_dev_close(eth_dev); 1049 1050 return 0; 1051 } 1052 1053 static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1054 struct rte_pci_device *pci_dev) 1055 { 1056 return rte_eth_dev_pci_generic_probe(pci_dev, 1057 sizeof(struct e1000_adapter), eth_igb_dev_init); 1058 } 1059 1060 static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) 1061 { 1062 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit); 1063 } 1064 1065 static struct rte_pci_driver rte_igb_pmd = { 1066 .id_table = pci_id_igb_map, 1067 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1068 .probe = eth_igb_pci_probe, 1069 .remove = eth_igb_pci_remove, 1070 }; 1071 1072 1073 static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1074 struct rte_pci_device *pci_dev) 1075 { 1076 return rte_eth_dev_pci_generic_probe(pci_dev, 1077 sizeof(struct e1000_adapter), eth_igbvf_dev_init); 1078 } 1079 1080 static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) 1081 { 1082 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit); 1083 } 1084 1085 /* 1086 * virtual function driver struct 1087 */ 1088 static struct rte_pci_driver rte_igbvf_pmd = { 1089 .id_table = pci_id_igbvf_map, 1090 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1091 .probe = eth_igbvf_pci_probe, 1092 .remove = eth_igbvf_pci_remove, 1093 }; 1094 1095 static void 1096 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1097 { 1098 struct e1000_hw *hw = 1099 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1100 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ 1101 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 1102 rctl |= E1000_RCTL_VFE; 1103 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1104 } 1105 1106 static int 1107 igb_check_mq_mode(struct rte_eth_dev *dev) 1108 { 1109 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1110 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1111 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1112 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1113 1114 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || 1115 tx_mq_mode == ETH_MQ_TX_DCB || 1116 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 1117 PMD_INIT_LOG(ERR, "DCB mode is not supported."); 1118 return -EINVAL; 1119 } 1120 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1121 /* Check multi-queue mode. 1122 * To no break software we accept ETH_MQ_RX_NONE as this might 1123 * be used to turn off VLAN filter. 1124 */ 1125 1126 if (rx_mq_mode == ETH_MQ_RX_NONE || 1127 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1128 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 1129 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 1130 } else { 1131 /* Only support one queue on VFs. 1132 * RSS together with SRIOV is not supported. 1133 */ 1134 PMD_INIT_LOG(ERR, "SRIOV is active," 1135 " wrong mq_mode rx %d.", 1136 rx_mq_mode); 1137 return -EINVAL; 1138 } 1139 /* TX mode is not used here, so mode might be ignored.*/ 1140 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1141 /* SRIOV only works in VMDq enable mode */ 1142 PMD_INIT_LOG(WARNING, "SRIOV is active," 1143 " TX mode %d is not supported. " 1144 " Driver will behave as %d mode.", 1145 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); 1146 } 1147 1148 /* check valid queue number */ 1149 if ((nb_rx_q > 1) || (nb_tx_q > 1)) { 1150 PMD_INIT_LOG(ERR, "SRIOV is active," 1151 " only support one queue on VFs."); 1152 return -EINVAL; 1153 } 1154 } else { 1155 /* To no break software that set invalid mode, only display 1156 * warning if invalid mode is used. 1157 */ 1158 if (rx_mq_mode != ETH_MQ_RX_NONE && 1159 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && 1160 rx_mq_mode != ETH_MQ_RX_RSS) { 1161 /* RSS together with VMDq not supported*/ 1162 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 1163 rx_mq_mode); 1164 return -EINVAL; 1165 } 1166 1167 if (tx_mq_mode != ETH_MQ_TX_NONE && 1168 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1169 PMD_INIT_LOG(WARNING, "TX mode %d is not supported." 1170 " Due to txmode is meaningless in this" 1171 " driver, just ignore.", 1172 tx_mq_mode); 1173 } 1174 } 1175 return 0; 1176 } 1177 1178 static int 1179 eth_igb_configure(struct rte_eth_dev *dev) 1180 { 1181 struct e1000_interrupt *intr = 1182 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1183 int ret; 1184 1185 PMD_INIT_FUNC_TRACE(); 1186 1187 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1188 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1189 1190 /* multipe queue mode checking */ 1191 ret = igb_check_mq_mode(dev); 1192 if (ret != 0) { 1193 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", 1194 ret); 1195 return ret; 1196 } 1197 1198 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1199 PMD_INIT_FUNC_TRACE(); 1200 1201 return 0; 1202 } 1203 1204 static void 1205 eth_igb_rxtx_control(struct rte_eth_dev *dev, 1206 bool enable) 1207 { 1208 struct e1000_hw *hw = 1209 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1210 uint32_t tctl, rctl; 1211 1212 tctl = E1000_READ_REG(hw, E1000_TCTL); 1213 rctl = E1000_READ_REG(hw, E1000_RCTL); 1214 1215 if (enable) { 1216 /* enable Tx/Rx */ 1217 tctl |= E1000_TCTL_EN; 1218 rctl |= E1000_RCTL_EN; 1219 } else { 1220 /* disable Tx/Rx */ 1221 tctl &= ~E1000_TCTL_EN; 1222 rctl &= ~E1000_RCTL_EN; 1223 } 1224 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1225 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1226 E1000_WRITE_FLUSH(hw); 1227 } 1228 1229 static int 1230 eth_igb_start(struct rte_eth_dev *dev) 1231 { 1232 struct e1000_hw *hw = 1233 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1234 struct e1000_adapter *adapter = 1235 E1000_DEV_PRIVATE(dev->data->dev_private); 1236 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1237 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1238 int ret, mask; 1239 uint32_t intr_vector = 0; 1240 uint32_t ctrl_ext; 1241 uint32_t *speeds; 1242 int num_speeds; 1243 bool autoneg; 1244 1245 PMD_INIT_FUNC_TRACE(); 1246 1247 /* disable uio/vfio intr/eventfd mapping */ 1248 rte_intr_disable(intr_handle); 1249 1250 /* Power up the phy. Needed to make the link go Up */ 1251 eth_igb_dev_set_link_up(dev); 1252 1253 /* 1254 * Packet Buffer Allocation (PBA) 1255 * Writing PBA sets the receive portion of the buffer 1256 * the remainder is used for the transmit buffer. 1257 */ 1258 if (hw->mac.type == e1000_82575) { 1259 uint32_t pba; 1260 1261 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1262 E1000_WRITE_REG(hw, E1000_PBA, pba); 1263 } 1264 1265 /* Put the address into the Receive Address Array */ 1266 e1000_rar_set(hw, hw->mac.addr, 0); 1267 1268 /* Initialize the hardware */ 1269 if (igb_hardware_init(hw)) { 1270 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 1271 return -EIO; 1272 } 1273 adapter->stopped = 0; 1274 1275 E1000_WRITE_REG(hw, E1000_VET, 1276 RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); 1277 1278 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1279 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1280 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 1281 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1282 E1000_WRITE_FLUSH(hw); 1283 1284 /* configure PF module if SRIOV enabled */ 1285 igb_pf_host_configure(dev); 1286 1287 /* check and configure queue intr-vector mapping */ 1288 if ((rte_intr_cap_multiple(intr_handle) || 1289 !RTE_ETH_DEV_SRIOV(dev).active) && 1290 dev->data->dev_conf.intr_conf.rxq != 0) { 1291 intr_vector = dev->data->nb_rx_queues; 1292 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1293 return -1; 1294 } 1295 1296 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 1297 intr_handle->intr_vec = 1298 rte_zmalloc("intr_vec", 1299 dev->data->nb_rx_queues * sizeof(int), 0); 1300 if (intr_handle->intr_vec == NULL) { 1301 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1302 " intr_vec", dev->data->nb_rx_queues); 1303 return -ENOMEM; 1304 } 1305 } 1306 1307 /* confiugre msix for rx interrupt */ 1308 eth_igb_configure_msix_intr(dev); 1309 1310 /* Configure for OS presence */ 1311 igb_init_manageability(hw); 1312 1313 eth_igb_tx_init(dev); 1314 1315 /* This can fail when allocating mbufs for descriptor rings */ 1316 ret = eth_igb_rx_init(dev); 1317 if (ret) { 1318 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1319 igb_dev_clear_queues(dev); 1320 return ret; 1321 } 1322 1323 e1000_clear_hw_cntrs_base_generic(hw); 1324 1325 /* 1326 * VLAN Offload Settings 1327 */ 1328 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 1329 ETH_VLAN_EXTEND_MASK; 1330 ret = eth_igb_vlan_offload_set(dev, mask); 1331 if (ret) { 1332 PMD_INIT_LOG(ERR, "Unable to set vlan offload"); 1333 igb_dev_clear_queues(dev); 1334 return ret; 1335 } 1336 1337 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1338 /* Enable VLAN filter since VMDq always use VLAN filter */ 1339 igb_vmdq_vlan_hw_filter_enable(dev); 1340 } 1341 1342 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || 1343 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || 1344 (hw->mac.type == e1000_i211)) { 1345 /* Configure EITR with the maximum possible value (0xFFFF) */ 1346 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); 1347 } 1348 1349 /* Setup link speed and duplex */ 1350 speeds = &dev->data->dev_conf.link_speeds; 1351 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 1352 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 1353 hw->mac.autoneg = 1; 1354 } else { 1355 num_speeds = 0; 1356 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 1357 1358 /* Reset */ 1359 hw->phy.autoneg_advertised = 0; 1360 1361 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1362 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1363 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 1364 num_speeds = -1; 1365 goto error_invalid_config; 1366 } 1367 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1368 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1369 num_speeds++; 1370 } 1371 if (*speeds & ETH_LINK_SPEED_10M) { 1372 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1373 num_speeds++; 1374 } 1375 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1376 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1377 num_speeds++; 1378 } 1379 if (*speeds & ETH_LINK_SPEED_100M) { 1380 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1381 num_speeds++; 1382 } 1383 if (*speeds & ETH_LINK_SPEED_1G) { 1384 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1385 num_speeds++; 1386 } 1387 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 1388 goto error_invalid_config; 1389 1390 /* Set/reset the mac.autoneg based on the link speed, 1391 * fixed or not 1392 */ 1393 if (!autoneg) { 1394 hw->mac.autoneg = 0; 1395 hw->mac.forced_speed_duplex = 1396 hw->phy.autoneg_advertised; 1397 } else { 1398 hw->mac.autoneg = 1; 1399 } 1400 } 1401 1402 e1000_setup_link(hw); 1403 1404 if (rte_intr_allow_others(intr_handle)) { 1405 /* check if lsc interrupt is enabled */ 1406 if (dev->data->dev_conf.intr_conf.lsc != 0) 1407 eth_igb_lsc_interrupt_setup(dev, TRUE); 1408 else 1409 eth_igb_lsc_interrupt_setup(dev, FALSE); 1410 } else { 1411 rte_intr_callback_unregister(intr_handle, 1412 eth_igb_interrupt_handler, 1413 (void *)dev); 1414 if (dev->data->dev_conf.intr_conf.lsc != 0) 1415 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1416 " no intr multiplex"); 1417 } 1418 1419 /* check if rxq interrupt is enabled */ 1420 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1421 rte_intr_dp_is_en(intr_handle)) 1422 eth_igb_rxq_interrupt_setup(dev); 1423 1424 /* enable uio/vfio intr/eventfd mapping */ 1425 rte_intr_enable(intr_handle); 1426 1427 /* resume enabled intr since hw reset */ 1428 igb_intr_enable(dev); 1429 1430 /* restore all types filter */ 1431 igb_filter_restore(dev); 1432 1433 eth_igb_rxtx_control(dev, true); 1434 eth_igb_link_update(dev, 0); 1435 1436 PMD_INIT_LOG(DEBUG, "<<"); 1437 1438 return 0; 1439 1440 error_invalid_config: 1441 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1442 dev->data->dev_conf.link_speeds, dev->data->port_id); 1443 igb_dev_clear_queues(dev); 1444 return -EINVAL; 1445 } 1446 1447 /********************************************************************* 1448 * 1449 * This routine disables all traffic on the adapter by issuing a 1450 * global reset on the MAC. 1451 * 1452 **********************************************************************/ 1453 static void 1454 eth_igb_stop(struct rte_eth_dev *dev) 1455 { 1456 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1457 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1458 struct rte_eth_link link; 1459 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1460 struct e1000_adapter *adapter = 1461 E1000_DEV_PRIVATE(dev->data->dev_private); 1462 1463 if (adapter->stopped) 1464 return; 1465 1466 eth_igb_rxtx_control(dev, false); 1467 1468 igb_intr_disable(dev); 1469 1470 /* disable intr eventfd mapping */ 1471 rte_intr_disable(intr_handle); 1472 1473 igb_pf_reset_hw(hw); 1474 E1000_WRITE_REG(hw, E1000_WUC, 0); 1475 1476 /* Set bit for Go Link disconnect if PHY reset is not blocked */ 1477 if (hw->mac.type >= e1000_82580 && 1478 (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { 1479 uint32_t phpm_reg; 1480 1481 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1482 phpm_reg |= E1000_82580_PM_GO_LINKD; 1483 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1484 } 1485 1486 /* Power down the phy. Needed to make the link go Down */ 1487 eth_igb_dev_set_link_down(dev); 1488 1489 igb_dev_clear_queues(dev); 1490 1491 /* clear the recorded link status */ 1492 memset(&link, 0, sizeof(link)); 1493 rte_eth_linkstatus_set(dev, &link); 1494 1495 if (!rte_intr_allow_others(intr_handle)) 1496 /* resume to the default handler */ 1497 rte_intr_callback_register(intr_handle, 1498 eth_igb_interrupt_handler, 1499 (void *)dev); 1500 1501 /* Clean datapath event and queue/vec mapping */ 1502 rte_intr_efd_disable(intr_handle); 1503 if (intr_handle->intr_vec != NULL) { 1504 rte_free(intr_handle->intr_vec); 1505 intr_handle->intr_vec = NULL; 1506 } 1507 1508 adapter->stopped = true; 1509 } 1510 1511 static int 1512 eth_igb_dev_set_link_up(struct rte_eth_dev *dev) 1513 { 1514 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1515 1516 if (hw->phy.media_type == e1000_media_type_copper) 1517 e1000_power_up_phy(hw); 1518 else 1519 e1000_power_up_fiber_serdes_link(hw); 1520 1521 return 0; 1522 } 1523 1524 static int 1525 eth_igb_dev_set_link_down(struct rte_eth_dev *dev) 1526 { 1527 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1528 1529 if (hw->phy.media_type == e1000_media_type_copper) 1530 e1000_power_down_phy(hw); 1531 else 1532 e1000_shutdown_fiber_serdes_link(hw); 1533 1534 return 0; 1535 } 1536 1537 static void 1538 eth_igb_close(struct rte_eth_dev *dev) 1539 { 1540 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1541 struct rte_eth_link link; 1542 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1543 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1544 struct e1000_filter_info *filter_info = 1545 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 1546 1547 eth_igb_stop(dev); 1548 1549 e1000_phy_hw_reset(hw); 1550 igb_release_manageability(hw); 1551 igb_hw_control_release(hw); 1552 1553 /* Clear bit for Go Link disconnect if PHY reset is not blocked */ 1554 if (hw->mac.type >= e1000_82580 && 1555 (e1000_check_reset_block(hw) != E1000_BLK_PHY_RESET)) { 1556 uint32_t phpm_reg; 1557 1558 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1559 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1560 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1561 } 1562 1563 igb_dev_free_queues(dev); 1564 1565 if (intr_handle->intr_vec) { 1566 rte_free(intr_handle->intr_vec); 1567 intr_handle->intr_vec = NULL; 1568 } 1569 1570 memset(&link, 0, sizeof(link)); 1571 rte_eth_linkstatus_set(dev, &link); 1572 1573 dev->dev_ops = NULL; 1574 dev->rx_pkt_burst = NULL; 1575 dev->tx_pkt_burst = NULL; 1576 1577 /* Reset any pending lock */ 1578 igb_reset_swfw_lock(hw); 1579 1580 /* uninitialize PF if max_vfs not zero */ 1581 igb_pf_host_uninit(dev); 1582 1583 rte_intr_callback_unregister(intr_handle, 1584 eth_igb_interrupt_handler, dev); 1585 1586 /* clear the SYN filter info */ 1587 filter_info->syn_info = 0; 1588 1589 /* clear the ethertype filters info */ 1590 filter_info->ethertype_mask = 0; 1591 memset(filter_info->ethertype_filters, 0, 1592 E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); 1593 1594 /* clear the rss filter info */ 1595 memset(&filter_info->rss_info, 0, 1596 sizeof(struct igb_rte_flow_rss_conf)); 1597 1598 /* remove all ntuple filters of the device */ 1599 igb_ntuple_filter_uninit(dev); 1600 1601 /* remove all flex filters of the device */ 1602 igb_flex_filter_uninit(dev); 1603 1604 /* clear all the filters list */ 1605 igb_filterlist_flush(dev); 1606 } 1607 1608 /* 1609 * Reset PF device. 1610 */ 1611 static int 1612 eth_igb_reset(struct rte_eth_dev *dev) 1613 { 1614 int ret; 1615 1616 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1617 * its VF to make them align with it. The detailed notification 1618 * mechanism is PMD specific and is currently not implemented. 1619 * To avoid unexpected behavior in VF, currently reset of PF with 1620 * SR-IOV activation is not supported. It might be supported later. 1621 */ 1622 if (dev->data->sriov.active) 1623 return -ENOTSUP; 1624 1625 ret = eth_igb_dev_uninit(dev); 1626 if (ret) 1627 return ret; 1628 1629 ret = eth_igb_dev_init(dev); 1630 1631 return ret; 1632 } 1633 1634 1635 static int 1636 igb_get_rx_buffer_size(struct e1000_hw *hw) 1637 { 1638 uint32_t rx_buf_size; 1639 if (hw->mac.type == e1000_82576) { 1640 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; 1641 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { 1642 /* PBS needs to be translated according to a lookup table */ 1643 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); 1644 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); 1645 rx_buf_size = (rx_buf_size << 10); 1646 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 1647 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; 1648 } else { 1649 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; 1650 } 1651 1652 return rx_buf_size; 1653 } 1654 1655 /********************************************************************* 1656 * 1657 * Initialize the hardware 1658 * 1659 **********************************************************************/ 1660 static int 1661 igb_hardware_init(struct e1000_hw *hw) 1662 { 1663 uint32_t rx_buf_size; 1664 int diag; 1665 1666 /* Let the firmware know the OS is in control */ 1667 igb_hw_control_acquire(hw); 1668 1669 /* 1670 * These parameters control the automatic generation (Tx) and 1671 * response (Rx) to Ethernet PAUSE frames. 1672 * - High water mark should allow for at least two standard size (1518) 1673 * frames to be received after sending an XOFF. 1674 * - Low water mark works best when it is very near the high water mark. 1675 * This allows the receiver to restart by sending XON when it has 1676 * drained a bit. Here we use an arbitrary value of 1500 which will 1677 * restart after one full frame is pulled from the buffer. There 1678 * could be several smaller frames in the buffer and if so they will 1679 * not trigger the XON until their total number reduces the buffer 1680 * by 1500. 1681 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1682 */ 1683 rx_buf_size = igb_get_rx_buffer_size(hw); 1684 1685 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 1686 hw->fc.low_water = hw->fc.high_water - 1500; 1687 hw->fc.pause_time = IGB_FC_PAUSE_TIME; 1688 hw->fc.send_xon = 1; 1689 1690 /* Set Flow control, use the tunable location if sane */ 1691 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) 1692 hw->fc.requested_mode = igb_fc_setting; 1693 else 1694 hw->fc.requested_mode = e1000_fc_none; 1695 1696 /* Issue a global reset */ 1697 igb_pf_reset_hw(hw); 1698 E1000_WRITE_REG(hw, E1000_WUC, 0); 1699 1700 diag = e1000_init_hw(hw); 1701 if (diag < 0) 1702 return diag; 1703 1704 E1000_WRITE_REG(hw, E1000_VET, 1705 RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN); 1706 e1000_get_phy_info(hw); 1707 e1000_check_for_link(hw); 1708 1709 return 0; 1710 } 1711 1712 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */ 1713 static void 1714 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) 1715 { 1716 int pause_frames; 1717 1718 uint64_t old_gprc = stats->gprc; 1719 uint64_t old_gptc = stats->gptc; 1720 uint64_t old_tpr = stats->tpr; 1721 uint64_t old_tpt = stats->tpt; 1722 uint64_t old_rpthc = stats->rpthc; 1723 uint64_t old_hgptc = stats->hgptc; 1724 1725 if(hw->phy.media_type == e1000_media_type_copper || 1726 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1727 stats->symerrs += 1728 E1000_READ_REG(hw,E1000_SYMERRS); 1729 stats->sec += E1000_READ_REG(hw, E1000_SEC); 1730 } 1731 1732 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 1733 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 1734 stats->scc += E1000_READ_REG(hw, E1000_SCC); 1735 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 1736 1737 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 1738 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 1739 stats->colc += E1000_READ_REG(hw, E1000_COLC); 1740 stats->dc += E1000_READ_REG(hw, E1000_DC); 1741 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 1742 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 1743 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 1744 /* 1745 ** For watchdog management we need to know if we have been 1746 ** paused during the last interval, so capture that here. 1747 */ 1748 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 1749 stats->xoffrxc += pause_frames; 1750 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 1751 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 1752 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 1753 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 1754 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 1755 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 1756 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 1757 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 1758 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 1759 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 1760 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 1761 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 1762 1763 /* For the 64-bit byte counters the low dword must be read first. */ 1764 /* Both registers clear on the read of the high dword */ 1765 1766 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1767 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 1768 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 1769 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1770 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 1771 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 1772 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1773 1774 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 1775 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 1776 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 1777 stats->roc += E1000_READ_REG(hw, E1000_ROC); 1778 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 1779 1780 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 1781 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 1782 1783 stats->tor += E1000_READ_REG(hw, E1000_TORL); 1784 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 1785 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1786 stats->tot += E1000_READ_REG(hw, E1000_TOTL); 1787 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 1788 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1789 1790 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 1791 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 1792 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 1793 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 1794 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 1795 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 1796 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 1797 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 1798 1799 /* Interrupt Counts */ 1800 1801 stats->iac += E1000_READ_REG(hw, E1000_IAC); 1802 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 1803 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 1804 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 1805 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 1806 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 1807 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 1808 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 1809 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 1810 1811 /* Host to Card Statistics */ 1812 1813 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 1814 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 1815 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 1816 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 1817 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 1818 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 1819 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 1820 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); 1821 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); 1822 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1823 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); 1824 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); 1825 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1826 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 1827 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 1828 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 1829 1830 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 1831 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 1832 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 1833 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 1834 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 1835 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 1836 } 1837 1838 static int 1839 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1840 { 1841 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1842 struct e1000_hw_stats *stats = 1843 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1844 1845 igb_read_stats_registers(hw, stats); 1846 1847 if (rte_stats == NULL) 1848 return -EINVAL; 1849 1850 /* Rx Errors */ 1851 rte_stats->imissed = stats->mpc; 1852 rte_stats->ierrors = stats->crcerrs + 1853 stats->rlec + stats->ruc + stats->roc + 1854 stats->rxerrc + stats->algnerrc + stats->cexterr; 1855 1856 /* Tx Errors */ 1857 rte_stats->oerrors = stats->ecol + stats->latecol; 1858 1859 rte_stats->ipackets = stats->gprc; 1860 rte_stats->opackets = stats->gptc; 1861 rte_stats->ibytes = stats->gorc; 1862 rte_stats->obytes = stats->gotc; 1863 return 0; 1864 } 1865 1866 static int 1867 eth_igb_stats_reset(struct rte_eth_dev *dev) 1868 { 1869 struct e1000_hw_stats *hw_stats = 1870 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1871 1872 /* HW registers are cleared on read */ 1873 eth_igb_stats_get(dev, NULL); 1874 1875 /* Reset software totals */ 1876 memset(hw_stats, 0, sizeof(*hw_stats)); 1877 1878 return 0; 1879 } 1880 1881 static int 1882 eth_igb_xstats_reset(struct rte_eth_dev *dev) 1883 { 1884 struct e1000_hw_stats *stats = 1885 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1886 1887 /* HW registers are cleared on read */ 1888 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); 1889 1890 /* Reset software totals */ 1891 memset(stats, 0, sizeof(*stats)); 1892 1893 return 0; 1894 } 1895 1896 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1897 struct rte_eth_xstat_name *xstats_names, 1898 __rte_unused unsigned int size) 1899 { 1900 unsigned i; 1901 1902 if (xstats_names == NULL) 1903 return IGB_NB_XSTATS; 1904 1905 /* Note: limit checked in rte_eth_xstats_names() */ 1906 1907 for (i = 0; i < IGB_NB_XSTATS; i++) { 1908 strlcpy(xstats_names[i].name, rte_igb_stats_strings[i].name, 1909 sizeof(xstats_names[i].name)); 1910 } 1911 1912 return IGB_NB_XSTATS; 1913 } 1914 1915 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 1916 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 1917 unsigned int limit) 1918 { 1919 unsigned int i; 1920 1921 if (!ids) { 1922 if (xstats_names == NULL) 1923 return IGB_NB_XSTATS; 1924 1925 for (i = 0; i < IGB_NB_XSTATS; i++) 1926 strlcpy(xstats_names[i].name, 1927 rte_igb_stats_strings[i].name, 1928 sizeof(xstats_names[i].name)); 1929 1930 return IGB_NB_XSTATS; 1931 1932 } else { 1933 struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; 1934 1935 eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 1936 IGB_NB_XSTATS); 1937 1938 for (i = 0; i < limit; i++) { 1939 if (ids[i] >= IGB_NB_XSTATS) { 1940 PMD_INIT_LOG(ERR, "id value isn't valid"); 1941 return -1; 1942 } 1943 strcpy(xstats_names[i].name, 1944 xstats_names_copy[ids[i]].name); 1945 } 1946 return limit; 1947 } 1948 } 1949 1950 static int 1951 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1952 unsigned n) 1953 { 1954 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1955 struct e1000_hw_stats *hw_stats = 1956 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1957 unsigned i; 1958 1959 if (n < IGB_NB_XSTATS) 1960 return IGB_NB_XSTATS; 1961 1962 igb_read_stats_registers(hw, hw_stats); 1963 1964 /* If this is a reset xstats is NULL, and we have cleared the 1965 * registers by reading them. 1966 */ 1967 if (!xstats) 1968 return 0; 1969 1970 /* Extended stats */ 1971 for (i = 0; i < IGB_NB_XSTATS; i++) { 1972 xstats[i].id = i; 1973 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1974 rte_igb_stats_strings[i].offset); 1975 } 1976 1977 return IGB_NB_XSTATS; 1978 } 1979 1980 static int 1981 eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1982 uint64_t *values, unsigned int n) 1983 { 1984 unsigned int i; 1985 1986 if (!ids) { 1987 struct e1000_hw *hw = 1988 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1989 struct e1000_hw_stats *hw_stats = 1990 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1991 1992 if (n < IGB_NB_XSTATS) 1993 return IGB_NB_XSTATS; 1994 1995 igb_read_stats_registers(hw, hw_stats); 1996 1997 /* If this is a reset xstats is NULL, and we have cleared the 1998 * registers by reading them. 1999 */ 2000 if (!values) 2001 return 0; 2002 2003 /* Extended stats */ 2004 for (i = 0; i < IGB_NB_XSTATS; i++) 2005 values[i] = *(uint64_t *)(((char *)hw_stats) + 2006 rte_igb_stats_strings[i].offset); 2007 2008 return IGB_NB_XSTATS; 2009 2010 } else { 2011 uint64_t values_copy[IGB_NB_XSTATS]; 2012 2013 eth_igb_xstats_get_by_id(dev, NULL, values_copy, 2014 IGB_NB_XSTATS); 2015 2016 for (i = 0; i < n; i++) { 2017 if (ids[i] >= IGB_NB_XSTATS) { 2018 PMD_INIT_LOG(ERR, "id value isn't valid"); 2019 return -1; 2020 } 2021 values[i] = values_copy[ids[i]]; 2022 } 2023 return n; 2024 } 2025 } 2026 2027 static void 2028 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) 2029 { 2030 /* Good Rx packets, include VF loopback */ 2031 UPDATE_VF_STAT(E1000_VFGPRC, 2032 hw_stats->last_gprc, hw_stats->gprc); 2033 2034 /* Good Rx octets, include VF loopback */ 2035 UPDATE_VF_STAT(E1000_VFGORC, 2036 hw_stats->last_gorc, hw_stats->gorc); 2037 2038 /* Good Tx packets, include VF loopback */ 2039 UPDATE_VF_STAT(E1000_VFGPTC, 2040 hw_stats->last_gptc, hw_stats->gptc); 2041 2042 /* Good Tx octets, include VF loopback */ 2043 UPDATE_VF_STAT(E1000_VFGOTC, 2044 hw_stats->last_gotc, hw_stats->gotc); 2045 2046 /* Rx Multicst packets */ 2047 UPDATE_VF_STAT(E1000_VFMPRC, 2048 hw_stats->last_mprc, hw_stats->mprc); 2049 2050 /* Good Rx loopback packets */ 2051 UPDATE_VF_STAT(E1000_VFGPRLBC, 2052 hw_stats->last_gprlbc, hw_stats->gprlbc); 2053 2054 /* Good Rx loopback octets */ 2055 UPDATE_VF_STAT(E1000_VFGORLBC, 2056 hw_stats->last_gorlbc, hw_stats->gorlbc); 2057 2058 /* Good Tx loopback packets */ 2059 UPDATE_VF_STAT(E1000_VFGPTLBC, 2060 hw_stats->last_gptlbc, hw_stats->gptlbc); 2061 2062 /* Good Tx loopback octets */ 2063 UPDATE_VF_STAT(E1000_VFGOTLBC, 2064 hw_stats->last_gotlbc, hw_stats->gotlbc); 2065 } 2066 2067 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2068 struct rte_eth_xstat_name *xstats_names, 2069 __rte_unused unsigned limit) 2070 { 2071 unsigned i; 2072 2073 if (xstats_names != NULL) 2074 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2075 strlcpy(xstats_names[i].name, 2076 rte_igbvf_stats_strings[i].name, 2077 sizeof(xstats_names[i].name)); 2078 } 2079 return IGBVF_NB_XSTATS; 2080 } 2081 2082 static int 2083 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2084 unsigned n) 2085 { 2086 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2087 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2088 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2089 unsigned i; 2090 2091 if (n < IGBVF_NB_XSTATS) 2092 return IGBVF_NB_XSTATS; 2093 2094 igbvf_read_stats_registers(hw, hw_stats); 2095 2096 if (!xstats) 2097 return 0; 2098 2099 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2100 xstats[i].id = i; 2101 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 2102 rte_igbvf_stats_strings[i].offset); 2103 } 2104 2105 return IGBVF_NB_XSTATS; 2106 } 2107 2108 static int 2109 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 2110 { 2111 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2112 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2113 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2114 2115 igbvf_read_stats_registers(hw, hw_stats); 2116 2117 if (rte_stats == NULL) 2118 return -EINVAL; 2119 2120 rte_stats->ipackets = hw_stats->gprc; 2121 rte_stats->ibytes = hw_stats->gorc; 2122 rte_stats->opackets = hw_stats->gptc; 2123 rte_stats->obytes = hw_stats->gotc; 2124 return 0; 2125 } 2126 2127 static int 2128 eth_igbvf_stats_reset(struct rte_eth_dev *dev) 2129 { 2130 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) 2131 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2132 2133 /* Sync HW register to the last stats */ 2134 eth_igbvf_stats_get(dev, NULL); 2135 2136 /* reset HW current stats*/ 2137 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - 2138 offsetof(struct e1000_vf_stats, gprc)); 2139 2140 return 0; 2141 } 2142 2143 static int 2144 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 2145 size_t fw_size) 2146 { 2147 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2148 struct e1000_fw_version fw; 2149 int ret; 2150 2151 e1000_get_fw_version(hw, &fw); 2152 2153 switch (hw->mac.type) { 2154 case e1000_i210: 2155 case e1000_i211: 2156 if (!(e1000_get_flash_presence_i210(hw))) { 2157 ret = snprintf(fw_version, fw_size, 2158 "%2d.%2d-%d", 2159 fw.invm_major, fw.invm_minor, 2160 fw.invm_img_type); 2161 break; 2162 } 2163 /* fall through */ 2164 default: 2165 /* if option rom is valid, display its version too */ 2166 if (fw.or_valid) { 2167 ret = snprintf(fw_version, fw_size, 2168 "%d.%d, 0x%08x, %d.%d.%d", 2169 fw.eep_major, fw.eep_minor, fw.etrack_id, 2170 fw.or_major, fw.or_build, fw.or_patch); 2171 /* no option rom */ 2172 } else { 2173 if (fw.etrack_id != 0X0000) { 2174 ret = snprintf(fw_version, fw_size, 2175 "%d.%d, 0x%08x", 2176 fw.eep_major, fw.eep_minor, 2177 fw.etrack_id); 2178 } else { 2179 ret = snprintf(fw_version, fw_size, 2180 "%d.%d.%d", 2181 fw.eep_major, fw.eep_minor, 2182 fw.eep_build); 2183 } 2184 } 2185 break; 2186 } 2187 2188 ret += 1; /* add the size of '\0' */ 2189 if (fw_size < (u32)ret) 2190 return ret; 2191 else 2192 return 0; 2193 } 2194 2195 static int 2196 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2197 { 2198 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2199 2200 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2201 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2202 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2203 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2204 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2205 dev_info->rx_queue_offload_capa; 2206 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2207 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2208 dev_info->tx_queue_offload_capa; 2209 2210 switch (hw->mac.type) { 2211 case e1000_82575: 2212 dev_info->max_rx_queues = 4; 2213 dev_info->max_tx_queues = 4; 2214 dev_info->max_vmdq_pools = 0; 2215 break; 2216 2217 case e1000_82576: 2218 dev_info->max_rx_queues = 16; 2219 dev_info->max_tx_queues = 16; 2220 dev_info->max_vmdq_pools = ETH_8_POOLS; 2221 dev_info->vmdq_queue_num = 16; 2222 break; 2223 2224 case e1000_82580: 2225 dev_info->max_rx_queues = 8; 2226 dev_info->max_tx_queues = 8; 2227 dev_info->max_vmdq_pools = ETH_8_POOLS; 2228 dev_info->vmdq_queue_num = 8; 2229 break; 2230 2231 case e1000_i350: 2232 dev_info->max_rx_queues = 8; 2233 dev_info->max_tx_queues = 8; 2234 dev_info->max_vmdq_pools = ETH_8_POOLS; 2235 dev_info->vmdq_queue_num = 8; 2236 break; 2237 2238 case e1000_i354: 2239 dev_info->max_rx_queues = 8; 2240 dev_info->max_tx_queues = 8; 2241 break; 2242 2243 case e1000_i210: 2244 dev_info->max_rx_queues = 4; 2245 dev_info->max_tx_queues = 4; 2246 dev_info->max_vmdq_pools = 0; 2247 break; 2248 2249 case e1000_i211: 2250 dev_info->max_rx_queues = 2; 2251 dev_info->max_tx_queues = 2; 2252 dev_info->max_vmdq_pools = 0; 2253 break; 2254 2255 default: 2256 /* Should not happen */ 2257 return -EINVAL; 2258 } 2259 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); 2260 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 2261 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; 2262 2263 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2264 .rx_thresh = { 2265 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2266 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2267 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2268 }, 2269 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2270 .rx_drop_en = 0, 2271 .offloads = 0, 2272 }; 2273 2274 dev_info->default_txconf = (struct rte_eth_txconf) { 2275 .tx_thresh = { 2276 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2277 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2278 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2279 }, 2280 .offloads = 0, 2281 }; 2282 2283 dev_info->rx_desc_lim = rx_desc_lim; 2284 dev_info->tx_desc_lim = tx_desc_lim; 2285 2286 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 2287 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 2288 ETH_LINK_SPEED_1G; 2289 2290 dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD; 2291 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 2292 2293 return 0; 2294 } 2295 2296 static const uint32_t * 2297 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) 2298 { 2299 static const uint32_t ptypes[] = { 2300 /* refers to igb_rxd_pkt_info_to_pkt_type() */ 2301 RTE_PTYPE_L2_ETHER, 2302 RTE_PTYPE_L3_IPV4, 2303 RTE_PTYPE_L3_IPV4_EXT, 2304 RTE_PTYPE_L3_IPV6, 2305 RTE_PTYPE_L3_IPV6_EXT, 2306 RTE_PTYPE_L4_TCP, 2307 RTE_PTYPE_L4_UDP, 2308 RTE_PTYPE_L4_SCTP, 2309 RTE_PTYPE_TUNNEL_IP, 2310 RTE_PTYPE_INNER_L3_IPV6, 2311 RTE_PTYPE_INNER_L3_IPV6_EXT, 2312 RTE_PTYPE_INNER_L4_TCP, 2313 RTE_PTYPE_INNER_L4_UDP, 2314 RTE_PTYPE_UNKNOWN 2315 }; 2316 2317 if (dev->rx_pkt_burst == eth_igb_recv_pkts || 2318 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) 2319 return ptypes; 2320 return NULL; 2321 } 2322 2323 static int 2324 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2325 { 2326 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2327 2328 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2329 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2330 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2331 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 2332 DEV_TX_OFFLOAD_IPV4_CKSUM | 2333 DEV_TX_OFFLOAD_UDP_CKSUM | 2334 DEV_TX_OFFLOAD_TCP_CKSUM | 2335 DEV_TX_OFFLOAD_SCTP_CKSUM | 2336 DEV_TX_OFFLOAD_TCP_TSO; 2337 switch (hw->mac.type) { 2338 case e1000_vfadapt: 2339 dev_info->max_rx_queues = 2; 2340 dev_info->max_tx_queues = 2; 2341 break; 2342 case e1000_vfadapt_i350: 2343 dev_info->max_rx_queues = 1; 2344 dev_info->max_tx_queues = 1; 2345 break; 2346 default: 2347 /* Should not happen */ 2348 return -EINVAL; 2349 } 2350 2351 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2352 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2353 dev_info->rx_queue_offload_capa; 2354 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2355 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2356 dev_info->tx_queue_offload_capa; 2357 2358 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2359 .rx_thresh = { 2360 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2361 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2362 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2363 }, 2364 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2365 .rx_drop_en = 0, 2366 .offloads = 0, 2367 }; 2368 2369 dev_info->default_txconf = (struct rte_eth_txconf) { 2370 .tx_thresh = { 2371 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2372 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2373 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2374 }, 2375 .offloads = 0, 2376 }; 2377 2378 dev_info->rx_desc_lim = rx_desc_lim; 2379 dev_info->tx_desc_lim = tx_desc_lim; 2380 2381 return 0; 2382 } 2383 2384 /* return 0 means link status changed, -1 means not changed */ 2385 static int 2386 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2387 { 2388 struct e1000_hw *hw = 2389 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2390 struct rte_eth_link link; 2391 int link_check, count; 2392 2393 link_check = 0; 2394 hw->mac.get_link_status = 1; 2395 2396 /* possible wait-to-complete in up to 9 seconds */ 2397 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 2398 /* Read the real link status */ 2399 switch (hw->phy.media_type) { 2400 case e1000_media_type_copper: 2401 /* Do the work to read phy */ 2402 e1000_check_for_link(hw); 2403 link_check = !hw->mac.get_link_status; 2404 break; 2405 2406 case e1000_media_type_fiber: 2407 e1000_check_for_link(hw); 2408 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2409 E1000_STATUS_LU); 2410 break; 2411 2412 case e1000_media_type_internal_serdes: 2413 e1000_check_for_link(hw); 2414 link_check = hw->mac.serdes_has_link; 2415 break; 2416 2417 /* VF device is type_unknown */ 2418 case e1000_media_type_unknown: 2419 eth_igbvf_link_update(hw); 2420 link_check = !hw->mac.get_link_status; 2421 break; 2422 2423 default: 2424 break; 2425 } 2426 if (link_check || wait_to_complete == 0) 2427 break; 2428 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); 2429 } 2430 memset(&link, 0, sizeof(link)); 2431 2432 /* Now we check if a transition has happened */ 2433 if (link_check) { 2434 uint16_t duplex, speed; 2435 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 2436 link.link_duplex = (duplex == FULL_DUPLEX) ? 2437 ETH_LINK_FULL_DUPLEX : 2438 ETH_LINK_HALF_DUPLEX; 2439 link.link_speed = speed; 2440 link.link_status = ETH_LINK_UP; 2441 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 2442 ETH_LINK_SPEED_FIXED); 2443 } else if (!link_check) { 2444 link.link_speed = 0; 2445 link.link_duplex = ETH_LINK_HALF_DUPLEX; 2446 link.link_status = ETH_LINK_DOWN; 2447 link.link_autoneg = ETH_LINK_FIXED; 2448 } 2449 2450 return rte_eth_linkstatus_set(dev, &link); 2451 } 2452 2453 /* 2454 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 2455 * For ASF and Pass Through versions of f/w this means 2456 * that the driver is loaded. 2457 */ 2458 static void 2459 igb_hw_control_acquire(struct e1000_hw *hw) 2460 { 2461 uint32_t ctrl_ext; 2462 2463 /* Let firmware know the driver has taken over */ 2464 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2465 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2466 } 2467 2468 /* 2469 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 2470 * For ASF and Pass Through versions of f/w this means that the 2471 * driver is no longer loaded. 2472 */ 2473 static void 2474 igb_hw_control_release(struct e1000_hw *hw) 2475 { 2476 uint32_t ctrl_ext; 2477 2478 /* Let firmware taken over control of h/w */ 2479 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2480 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 2481 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2482 } 2483 2484 /* 2485 * Bit of a misnomer, what this really means is 2486 * to enable OS management of the system... aka 2487 * to disable special hardware management features. 2488 */ 2489 static void 2490 igb_init_manageability(struct e1000_hw *hw) 2491 { 2492 if (e1000_enable_mng_pass_thru(hw)) { 2493 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 2494 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2495 2496 /* disable hardware interception of ARP */ 2497 manc &= ~(E1000_MANC_ARP_EN); 2498 2499 /* enable receiving management packets to the host */ 2500 manc |= E1000_MANC_EN_MNG2HOST; 2501 manc2h |= 1 << 5; /* Mng Port 623 */ 2502 manc2h |= 1 << 6; /* Mng Port 664 */ 2503 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 2504 E1000_WRITE_REG(hw, E1000_MANC, manc); 2505 } 2506 } 2507 2508 static void 2509 igb_release_manageability(struct e1000_hw *hw) 2510 { 2511 if (e1000_enable_mng_pass_thru(hw)) { 2512 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2513 2514 manc |= E1000_MANC_ARP_EN; 2515 manc &= ~E1000_MANC_EN_MNG2HOST; 2516 2517 E1000_WRITE_REG(hw, E1000_MANC, manc); 2518 } 2519 } 2520 2521 static int 2522 eth_igb_promiscuous_enable(struct rte_eth_dev *dev) 2523 { 2524 struct e1000_hw *hw = 2525 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2526 uint32_t rctl; 2527 2528 rctl = E1000_READ_REG(hw, E1000_RCTL); 2529 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2530 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2531 2532 return 0; 2533 } 2534 2535 static int 2536 eth_igb_promiscuous_disable(struct rte_eth_dev *dev) 2537 { 2538 struct e1000_hw *hw = 2539 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2540 uint32_t rctl; 2541 2542 rctl = E1000_READ_REG(hw, E1000_RCTL); 2543 rctl &= (~E1000_RCTL_UPE); 2544 if (dev->data->all_multicast == 1) 2545 rctl |= E1000_RCTL_MPE; 2546 else 2547 rctl &= (~E1000_RCTL_MPE); 2548 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2549 2550 return 0; 2551 } 2552 2553 static int 2554 eth_igb_allmulticast_enable(struct rte_eth_dev *dev) 2555 { 2556 struct e1000_hw *hw = 2557 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2558 uint32_t rctl; 2559 2560 rctl = E1000_READ_REG(hw, E1000_RCTL); 2561 rctl |= E1000_RCTL_MPE; 2562 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2563 2564 return 0; 2565 } 2566 2567 static int 2568 eth_igb_allmulticast_disable(struct rte_eth_dev *dev) 2569 { 2570 struct e1000_hw *hw = 2571 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2572 uint32_t rctl; 2573 2574 if (dev->data->promiscuous == 1) 2575 return 0; /* must remain in all_multicast mode */ 2576 rctl = E1000_READ_REG(hw, E1000_RCTL); 2577 rctl &= (~E1000_RCTL_MPE); 2578 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2579 2580 return 0; 2581 } 2582 2583 static int 2584 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2585 { 2586 struct e1000_hw *hw = 2587 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2588 struct e1000_vfta * shadow_vfta = 2589 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2590 uint32_t vfta; 2591 uint32_t vid_idx; 2592 uint32_t vid_bit; 2593 2594 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 2595 E1000_VFTA_ENTRY_MASK); 2596 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 2597 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 2598 if (on) 2599 vfta |= vid_bit; 2600 else 2601 vfta &= ~vid_bit; 2602 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 2603 2604 /* update local VFTA copy */ 2605 shadow_vfta->vfta[vid_idx] = vfta; 2606 2607 return 0; 2608 } 2609 2610 static int 2611 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 2612 enum rte_vlan_type vlan_type, 2613 uint16_t tpid) 2614 { 2615 struct e1000_hw *hw = 2616 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2617 uint32_t reg, qinq; 2618 2619 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); 2620 qinq &= E1000_CTRL_EXT_EXT_VLAN; 2621 2622 /* only outer TPID of double VLAN can be configured*/ 2623 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { 2624 reg = E1000_READ_REG(hw, E1000_VET); 2625 reg = (reg & (~E1000_VET_VET_EXT)) | 2626 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); 2627 E1000_WRITE_REG(hw, E1000_VET, reg); 2628 2629 return 0; 2630 } 2631 2632 /* all other TPID values are read-only*/ 2633 PMD_DRV_LOG(ERR, "Not supported"); 2634 2635 return -ENOTSUP; 2636 } 2637 2638 static void 2639 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2640 { 2641 struct e1000_hw *hw = 2642 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2643 uint32_t reg; 2644 2645 /* Filter Table Disable */ 2646 reg = E1000_READ_REG(hw, E1000_RCTL); 2647 reg &= ~E1000_RCTL_CFIEN; 2648 reg &= ~E1000_RCTL_VFE; 2649 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2650 } 2651 2652 static void 2653 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2654 { 2655 struct e1000_hw *hw = 2656 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2657 struct e1000_vfta * shadow_vfta = 2658 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2659 uint32_t reg; 2660 int i; 2661 2662 /* Filter Table Enable, CFI not used for packet acceptance */ 2663 reg = E1000_READ_REG(hw, E1000_RCTL); 2664 reg &= ~E1000_RCTL_CFIEN; 2665 reg |= E1000_RCTL_VFE; 2666 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2667 2668 /* restore VFTA table */ 2669 for (i = 0; i < IGB_VFTA_SIZE; i++) 2670 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 2671 } 2672 2673 static void 2674 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2675 { 2676 struct e1000_hw *hw = 2677 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2678 uint32_t reg; 2679 2680 /* VLAN Mode Disable */ 2681 reg = E1000_READ_REG(hw, E1000_CTRL); 2682 reg &= ~E1000_CTRL_VME; 2683 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2684 } 2685 2686 static void 2687 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2688 { 2689 struct e1000_hw *hw = 2690 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2691 uint32_t reg; 2692 2693 /* VLAN Mode Enable */ 2694 reg = E1000_READ_REG(hw, E1000_CTRL); 2695 reg |= E1000_CTRL_VME; 2696 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2697 } 2698 2699 static void 2700 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2701 { 2702 struct e1000_hw *hw = 2703 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2704 uint32_t reg; 2705 2706 /* CTRL_EXT: Extended VLAN */ 2707 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2708 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; 2709 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2710 2711 /* Update maximum packet length */ 2712 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2713 E1000_WRITE_REG(hw, E1000_RLPML, 2714 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2715 VLAN_TAG_SIZE); 2716 } 2717 2718 static void 2719 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2720 { 2721 struct e1000_hw *hw = 2722 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2723 uint32_t reg; 2724 2725 /* CTRL_EXT: Extended VLAN */ 2726 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2727 reg |= E1000_CTRL_EXT_EXTEND_VLAN; 2728 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2729 2730 /* Update maximum packet length */ 2731 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2732 E1000_WRITE_REG(hw, E1000_RLPML, 2733 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2734 2 * VLAN_TAG_SIZE); 2735 } 2736 2737 static int 2738 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2739 { 2740 struct rte_eth_rxmode *rxmode; 2741 2742 rxmode = &dev->data->dev_conf.rxmode; 2743 if(mask & ETH_VLAN_STRIP_MASK){ 2744 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2745 igb_vlan_hw_strip_enable(dev); 2746 else 2747 igb_vlan_hw_strip_disable(dev); 2748 } 2749 2750 if(mask & ETH_VLAN_FILTER_MASK){ 2751 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2752 igb_vlan_hw_filter_enable(dev); 2753 else 2754 igb_vlan_hw_filter_disable(dev); 2755 } 2756 2757 if(mask & ETH_VLAN_EXTEND_MASK){ 2758 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2759 igb_vlan_hw_extend_enable(dev); 2760 else 2761 igb_vlan_hw_extend_disable(dev); 2762 } 2763 2764 return 0; 2765 } 2766 2767 2768 /** 2769 * It enables the interrupt mask and then enable the interrupt. 2770 * 2771 * @param dev 2772 * Pointer to struct rte_eth_dev. 2773 * @param on 2774 * Enable or Disable 2775 * 2776 * @return 2777 * - On success, zero. 2778 * - On failure, a negative value. 2779 */ 2780 static int 2781 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2782 { 2783 struct e1000_interrupt *intr = 2784 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2785 2786 if (on) 2787 intr->mask |= E1000_ICR_LSC; 2788 else 2789 intr->mask &= ~E1000_ICR_LSC; 2790 2791 return 0; 2792 } 2793 2794 /* It clears the interrupt causes and enables the interrupt. 2795 * It will be called once only during nic initialized. 2796 * 2797 * @param dev 2798 * Pointer to struct rte_eth_dev. 2799 * 2800 * @return 2801 * - On success, zero. 2802 * - On failure, a negative value. 2803 */ 2804 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) 2805 { 2806 uint32_t mask, regval; 2807 int ret; 2808 struct e1000_hw *hw = 2809 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2810 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2811 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2812 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 2813 struct rte_eth_dev_info dev_info; 2814 2815 memset(&dev_info, 0, sizeof(dev_info)); 2816 ret = eth_igb_infos_get(dev, &dev_info); 2817 if (ret != 0) 2818 return ret; 2819 2820 mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift; 2821 regval = E1000_READ_REG(hw, E1000_EIMS); 2822 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 2823 2824 return 0; 2825 } 2826 2827 /* 2828 * It reads ICR and gets interrupt causes, check it and set a bit flag 2829 * to update link status. 2830 * 2831 * @param dev 2832 * Pointer to struct rte_eth_dev. 2833 * 2834 * @return 2835 * - On success, zero. 2836 * - On failure, a negative value. 2837 */ 2838 static int 2839 eth_igb_interrupt_get_status(struct rte_eth_dev *dev) 2840 { 2841 uint32_t icr; 2842 struct e1000_hw *hw = 2843 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2844 struct e1000_interrupt *intr = 2845 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2846 2847 igb_intr_disable(dev); 2848 2849 /* read-on-clear nic registers here */ 2850 icr = E1000_READ_REG(hw, E1000_ICR); 2851 2852 intr->flags = 0; 2853 if (icr & E1000_ICR_LSC) { 2854 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 2855 } 2856 2857 if (icr & E1000_ICR_VMMB) 2858 intr->flags |= E1000_FLAG_MAILBOX; 2859 2860 return 0; 2861 } 2862 2863 /* 2864 * It executes link_update after knowing an interrupt is prsent. 2865 * 2866 * @param dev 2867 * Pointer to struct rte_eth_dev. 2868 * 2869 * @return 2870 * - On success, zero. 2871 * - On failure, a negative value. 2872 */ 2873 static int 2874 eth_igb_interrupt_action(struct rte_eth_dev *dev, 2875 struct rte_intr_handle *intr_handle) 2876 { 2877 struct e1000_hw *hw = 2878 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2879 struct e1000_interrupt *intr = 2880 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2881 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2882 struct rte_eth_link link; 2883 int ret; 2884 2885 if (intr->flags & E1000_FLAG_MAILBOX) { 2886 igb_pf_mbx_process(dev); 2887 intr->flags &= ~E1000_FLAG_MAILBOX; 2888 } 2889 2890 igb_intr_enable(dev); 2891 rte_intr_ack(intr_handle); 2892 2893 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { 2894 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 2895 2896 /* set get_link_status to check register later */ 2897 hw->mac.get_link_status = 1; 2898 ret = eth_igb_link_update(dev, 0); 2899 2900 /* check if link has changed */ 2901 if (ret < 0) 2902 return 0; 2903 2904 rte_eth_linkstatus_get(dev, &link); 2905 if (link.link_status) { 2906 PMD_INIT_LOG(INFO, 2907 " Port %d: Link Up - speed %u Mbps - %s", 2908 dev->data->port_id, 2909 (unsigned)link.link_speed, 2910 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 2911 "full-duplex" : "half-duplex"); 2912 } else { 2913 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2914 dev->data->port_id); 2915 } 2916 2917 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 2918 pci_dev->addr.domain, 2919 pci_dev->addr.bus, 2920 pci_dev->addr.devid, 2921 pci_dev->addr.function); 2922 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 2923 NULL); 2924 } 2925 2926 return 0; 2927 } 2928 2929 /** 2930 * Interrupt handler which shall be registered at first. 2931 * 2932 * @param handle 2933 * Pointer to interrupt handle. 2934 * @param param 2935 * The address of parameter (struct rte_eth_dev *) regsitered before. 2936 * 2937 * @return 2938 * void 2939 */ 2940 static void 2941 eth_igb_interrupt_handler(void *param) 2942 { 2943 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2944 2945 eth_igb_interrupt_get_status(dev); 2946 eth_igb_interrupt_action(dev, dev->intr_handle); 2947 } 2948 2949 static int 2950 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) 2951 { 2952 uint32_t eicr; 2953 struct e1000_hw *hw = 2954 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2955 struct e1000_interrupt *intr = 2956 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2957 2958 igbvf_intr_disable(hw); 2959 2960 /* read-on-clear nic registers here */ 2961 eicr = E1000_READ_REG(hw, E1000_EICR); 2962 intr->flags = 0; 2963 2964 if (eicr == E1000_VTIVAR_MISC_MAILBOX) 2965 intr->flags |= E1000_FLAG_MAILBOX; 2966 2967 return 0; 2968 } 2969 2970 void igbvf_mbx_process(struct rte_eth_dev *dev) 2971 { 2972 struct e1000_hw *hw = 2973 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2974 struct e1000_mbx_info *mbx = &hw->mbx; 2975 u32 in_msg = 0; 2976 2977 /* peek the message first */ 2978 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0)); 2979 2980 /* PF reset VF event */ 2981 if (in_msg == E1000_PF_CONTROL_MSG) { 2982 /* dummy mbx read to ack pf */ 2983 if (mbx->ops.read(hw, &in_msg, 1, 0)) 2984 return; 2985 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 2986 NULL); 2987 } 2988 } 2989 2990 static int 2991 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) 2992 { 2993 struct e1000_interrupt *intr = 2994 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2995 2996 if (intr->flags & E1000_FLAG_MAILBOX) { 2997 igbvf_mbx_process(dev); 2998 intr->flags &= ~E1000_FLAG_MAILBOX; 2999 } 3000 3001 igbvf_intr_enable(dev); 3002 rte_intr_ack(intr_handle); 3003 3004 return 0; 3005 } 3006 3007 static void 3008 eth_igbvf_interrupt_handler(void *param) 3009 { 3010 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3011 3012 eth_igbvf_interrupt_get_status(dev); 3013 eth_igbvf_interrupt_action(dev, dev->intr_handle); 3014 } 3015 3016 static int 3017 eth_igb_led_on(struct rte_eth_dev *dev) 3018 { 3019 struct e1000_hw *hw; 3020 3021 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3022 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 3023 } 3024 3025 static int 3026 eth_igb_led_off(struct rte_eth_dev *dev) 3027 { 3028 struct e1000_hw *hw; 3029 3030 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3031 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 3032 } 3033 3034 static int 3035 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3036 { 3037 struct e1000_hw *hw; 3038 uint32_t ctrl; 3039 int tx_pause; 3040 int rx_pause; 3041 3042 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3043 fc_conf->pause_time = hw->fc.pause_time; 3044 fc_conf->high_water = hw->fc.high_water; 3045 fc_conf->low_water = hw->fc.low_water; 3046 fc_conf->send_xon = hw->fc.send_xon; 3047 fc_conf->autoneg = hw->mac.autoneg; 3048 3049 /* 3050 * Return rx_pause and tx_pause status according to actual setting of 3051 * the TFCE and RFCE bits in the CTRL register. 3052 */ 3053 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3054 if (ctrl & E1000_CTRL_TFCE) 3055 tx_pause = 1; 3056 else 3057 tx_pause = 0; 3058 3059 if (ctrl & E1000_CTRL_RFCE) 3060 rx_pause = 1; 3061 else 3062 rx_pause = 0; 3063 3064 if (rx_pause && tx_pause) 3065 fc_conf->mode = RTE_FC_FULL; 3066 else if (rx_pause) 3067 fc_conf->mode = RTE_FC_RX_PAUSE; 3068 else if (tx_pause) 3069 fc_conf->mode = RTE_FC_TX_PAUSE; 3070 else 3071 fc_conf->mode = RTE_FC_NONE; 3072 3073 return 0; 3074 } 3075 3076 static int 3077 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3078 { 3079 struct e1000_hw *hw; 3080 int err; 3081 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 3082 e1000_fc_none, 3083 e1000_fc_rx_pause, 3084 e1000_fc_tx_pause, 3085 e1000_fc_full 3086 }; 3087 uint32_t rx_buf_size; 3088 uint32_t max_high_water; 3089 uint32_t rctl; 3090 3091 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3092 if (fc_conf->autoneg != hw->mac.autoneg) 3093 return -ENOTSUP; 3094 rx_buf_size = igb_get_rx_buffer_size(hw); 3095 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3096 3097 /* At least reserve one Ethernet frame for watermark */ 3098 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 3099 if ((fc_conf->high_water > max_high_water) || 3100 (fc_conf->high_water < fc_conf->low_water)) { 3101 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 3102 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 3103 return -EINVAL; 3104 } 3105 3106 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 3107 hw->fc.pause_time = fc_conf->pause_time; 3108 hw->fc.high_water = fc_conf->high_water; 3109 hw->fc.low_water = fc_conf->low_water; 3110 hw->fc.send_xon = fc_conf->send_xon; 3111 3112 err = e1000_setup_link_generic(hw); 3113 if (err == E1000_SUCCESS) { 3114 3115 /* check if we want to forward MAC frames - driver doesn't have native 3116 * capability to do that, so we'll write the registers ourselves */ 3117 3118 rctl = E1000_READ_REG(hw, E1000_RCTL); 3119 3120 /* set or clear MFLCN.PMCF bit depending on configuration */ 3121 if (fc_conf->mac_ctrl_frame_fwd != 0) 3122 rctl |= E1000_RCTL_PMCF; 3123 else 3124 rctl &= ~E1000_RCTL_PMCF; 3125 3126 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 3127 E1000_WRITE_FLUSH(hw); 3128 3129 return 0; 3130 } 3131 3132 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 3133 return -EIO; 3134 } 3135 3136 #define E1000_RAH_POOLSEL_SHIFT (18) 3137 static int 3138 eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 3139 uint32_t index, uint32_t pool) 3140 { 3141 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3142 uint32_t rah; 3143 3144 e1000_rar_set(hw, mac_addr->addr_bytes, index); 3145 rah = E1000_READ_REG(hw, E1000_RAH(index)); 3146 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); 3147 E1000_WRITE_REG(hw, E1000_RAH(index), rah); 3148 return 0; 3149 } 3150 3151 static void 3152 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) 3153 { 3154 uint8_t addr[RTE_ETHER_ADDR_LEN]; 3155 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3156 3157 memset(addr, 0, sizeof(addr)); 3158 3159 e1000_rar_set(hw, addr, index); 3160 } 3161 3162 static int 3163 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 3164 struct rte_ether_addr *addr) 3165 { 3166 eth_igb_rar_clear(dev, 0); 3167 eth_igb_rar_set(dev, (void *)addr, 0, 0); 3168 3169 return 0; 3170 } 3171 /* 3172 * Virtual Function operations 3173 */ 3174 static void 3175 igbvf_intr_disable(struct e1000_hw *hw) 3176 { 3177 PMD_INIT_FUNC_TRACE(); 3178 3179 /* Clear interrupt mask to stop from interrupts being generated */ 3180 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); 3181 3182 E1000_WRITE_FLUSH(hw); 3183 } 3184 3185 static void 3186 igbvf_stop_adapter(struct rte_eth_dev *dev) 3187 { 3188 u32 reg_val; 3189 u16 i; 3190 struct rte_eth_dev_info dev_info; 3191 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3192 int ret; 3193 3194 memset(&dev_info, 0, sizeof(dev_info)); 3195 ret = eth_igbvf_infos_get(dev, &dev_info); 3196 if (ret != 0) 3197 return; 3198 3199 /* Clear interrupt mask to stop from interrupts being generated */ 3200 igbvf_intr_disable(hw); 3201 3202 /* Clear any pending interrupts, flush previous writes */ 3203 E1000_READ_REG(hw, E1000_EICR); 3204 3205 /* Disable the transmit unit. Each queue must be disabled. */ 3206 for (i = 0; i < dev_info.max_tx_queues; i++) 3207 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); 3208 3209 /* Disable the receive unit by stopping each queue */ 3210 for (i = 0; i < dev_info.max_rx_queues; i++) { 3211 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); 3212 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; 3213 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); 3214 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) 3215 ; 3216 } 3217 3218 /* flush all queues disables */ 3219 E1000_WRITE_FLUSH(hw); 3220 msec_delay(2); 3221 } 3222 3223 static int eth_igbvf_link_update(struct e1000_hw *hw) 3224 { 3225 struct e1000_mbx_info *mbx = &hw->mbx; 3226 struct e1000_mac_info *mac = &hw->mac; 3227 int ret_val = E1000_SUCCESS; 3228 3229 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); 3230 3231 /* 3232 * We only want to run this if there has been a rst asserted. 3233 * in this case that could mean a link change, device reset, 3234 * or a virtual function reset 3235 */ 3236 3237 /* If we were hit with a reset or timeout drop the link */ 3238 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) 3239 mac->get_link_status = TRUE; 3240 3241 if (!mac->get_link_status) 3242 goto out; 3243 3244 /* if link status is down no point in checking to see if pf is up */ 3245 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) 3246 goto out; 3247 3248 /* if we passed all the tests above then the link is up and we no 3249 * longer need to check for link */ 3250 mac->get_link_status = FALSE; 3251 3252 out: 3253 return ret_val; 3254 } 3255 3256 3257 static int 3258 igbvf_dev_configure(struct rte_eth_dev *dev) 3259 { 3260 struct rte_eth_conf* conf = &dev->data->dev_conf; 3261 3262 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 3263 dev->data->port_id); 3264 3265 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 3266 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 3267 3268 /* 3269 * VF has no ability to enable/disable HW CRC 3270 * Keep the persistent behavior the same as Host PF 3271 */ 3272 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC 3273 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 3274 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 3275 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 3276 } 3277 #else 3278 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 3279 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 3280 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 3281 } 3282 #endif 3283 3284 return 0; 3285 } 3286 3287 static int 3288 igbvf_dev_start(struct rte_eth_dev *dev) 3289 { 3290 struct e1000_hw *hw = 3291 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3292 struct e1000_adapter *adapter = 3293 E1000_DEV_PRIVATE(dev->data->dev_private); 3294 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3295 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3296 int ret; 3297 uint32_t intr_vector = 0; 3298 3299 PMD_INIT_FUNC_TRACE(); 3300 3301 hw->mac.ops.reset_hw(hw); 3302 adapter->stopped = 0; 3303 3304 /* Set all vfta */ 3305 igbvf_set_vfta_all(dev,1); 3306 3307 eth_igbvf_tx_init(dev); 3308 3309 /* This can fail when allocating mbufs for descriptor rings */ 3310 ret = eth_igbvf_rx_init(dev); 3311 if (ret) { 3312 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 3313 igb_dev_clear_queues(dev); 3314 return ret; 3315 } 3316 3317 /* check and configure queue intr-vector mapping */ 3318 if (rte_intr_cap_multiple(intr_handle) && 3319 dev->data->dev_conf.intr_conf.rxq) { 3320 intr_vector = dev->data->nb_rx_queues; 3321 ret = rte_intr_efd_enable(intr_handle, intr_vector); 3322 if (ret) 3323 return ret; 3324 } 3325 3326 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3327 intr_handle->intr_vec = 3328 rte_zmalloc("intr_vec", 3329 dev->data->nb_rx_queues * sizeof(int), 0); 3330 if (!intr_handle->intr_vec) { 3331 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 3332 " intr_vec", dev->data->nb_rx_queues); 3333 return -ENOMEM; 3334 } 3335 } 3336 3337 eth_igbvf_configure_msix_intr(dev); 3338 3339 /* enable uio/vfio intr/eventfd mapping */ 3340 rte_intr_enable(intr_handle); 3341 3342 /* resume enabled intr since hw reset */ 3343 igbvf_intr_enable(dev); 3344 3345 return 0; 3346 } 3347 3348 static void 3349 igbvf_dev_stop(struct rte_eth_dev *dev) 3350 { 3351 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3352 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3353 struct e1000_adapter *adapter = 3354 E1000_DEV_PRIVATE(dev->data->dev_private); 3355 3356 if (adapter->stopped) 3357 return; 3358 3359 PMD_INIT_FUNC_TRACE(); 3360 3361 igbvf_stop_adapter(dev); 3362 3363 /* 3364 * Clear what we set, but we still keep shadow_vfta to 3365 * restore after device starts 3366 */ 3367 igbvf_set_vfta_all(dev,0); 3368 3369 igb_dev_clear_queues(dev); 3370 3371 /* disable intr eventfd mapping */ 3372 rte_intr_disable(intr_handle); 3373 3374 /* Clean datapath event and queue/vec mapping */ 3375 rte_intr_efd_disable(intr_handle); 3376 if (intr_handle->intr_vec) { 3377 rte_free(intr_handle->intr_vec); 3378 intr_handle->intr_vec = NULL; 3379 } 3380 3381 adapter->stopped = true; 3382 } 3383 3384 static void 3385 igbvf_dev_close(struct rte_eth_dev *dev) 3386 { 3387 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3388 struct rte_ether_addr addr; 3389 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3390 3391 PMD_INIT_FUNC_TRACE(); 3392 3393 e1000_reset_hw(hw); 3394 3395 igbvf_dev_stop(dev); 3396 3397 igb_dev_free_queues(dev); 3398 3399 /** 3400 * reprogram the RAR with a zero mac address, 3401 * to ensure that the VF traffic goes to the PF 3402 * after stop, close and detach of the VF. 3403 **/ 3404 3405 memset(&addr, 0, sizeof(addr)); 3406 igbvf_default_mac_addr_set(dev, &addr); 3407 3408 dev->dev_ops = NULL; 3409 dev->rx_pkt_burst = NULL; 3410 dev->tx_pkt_burst = NULL; 3411 3412 rte_intr_callback_unregister(&pci_dev->intr_handle, 3413 eth_igbvf_interrupt_handler, 3414 (void *)dev); 3415 } 3416 3417 static int 3418 igbvf_promiscuous_enable(struct rte_eth_dev *dev) 3419 { 3420 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3421 3422 /* Set both unicast and multicast promisc */ 3423 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 3424 3425 return 0; 3426 } 3427 3428 static int 3429 igbvf_promiscuous_disable(struct rte_eth_dev *dev) 3430 { 3431 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3432 3433 /* If in allmulticast mode leave multicast promisc */ 3434 if (dev->data->all_multicast == 1) 3435 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3436 else 3437 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3438 3439 return 0; 3440 } 3441 3442 static int 3443 igbvf_allmulticast_enable(struct rte_eth_dev *dev) 3444 { 3445 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3446 3447 /* In promiscuous mode multicast promisc already set */ 3448 if (dev->data->promiscuous == 0) 3449 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3450 3451 return 0; 3452 } 3453 3454 static int 3455 igbvf_allmulticast_disable(struct rte_eth_dev *dev) 3456 { 3457 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3458 3459 /* In promiscuous mode leave multicast promisc enabled */ 3460 if (dev->data->promiscuous == 0) 3461 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3462 3463 return 0; 3464 } 3465 3466 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) 3467 { 3468 struct e1000_mbx_info *mbx = &hw->mbx; 3469 uint32_t msgbuf[2]; 3470 s32 err; 3471 3472 /* After set vlan, vlan strip will also be enabled in igb driver*/ 3473 msgbuf[0] = E1000_VF_SET_VLAN; 3474 msgbuf[1] = vid; 3475 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 3476 if (on) 3477 msgbuf[0] |= E1000_VF_SET_VLAN_ADD; 3478 3479 err = mbx->ops.write_posted(hw, msgbuf, 2, 0); 3480 if (err) 3481 goto mbx_err; 3482 3483 err = mbx->ops.read_posted(hw, msgbuf, 2, 0); 3484 if (err) 3485 goto mbx_err; 3486 3487 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 3488 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) 3489 err = -EINVAL; 3490 3491 mbx_err: 3492 return err; 3493 } 3494 3495 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) 3496 { 3497 struct e1000_hw *hw = 3498 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3499 struct e1000_vfta * shadow_vfta = 3500 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3501 int i = 0, j = 0, vfta = 0, mask = 1; 3502 3503 for (i = 0; i < IGB_VFTA_SIZE; i++){ 3504 vfta = shadow_vfta->vfta[i]; 3505 if(vfta){ 3506 mask = 1; 3507 for (j = 0; j < 32; j++){ 3508 if(vfta & mask) 3509 igbvf_set_vfta(hw, 3510 (uint16_t)((i<<5)+j), on); 3511 mask<<=1; 3512 } 3513 } 3514 } 3515 3516 } 3517 3518 static int 3519 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 3520 { 3521 struct e1000_hw *hw = 3522 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3523 struct e1000_vfta * shadow_vfta = 3524 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3525 uint32_t vid_idx = 0; 3526 uint32_t vid_bit = 0; 3527 int ret = 0; 3528 3529 PMD_INIT_FUNC_TRACE(); 3530 3531 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ 3532 ret = igbvf_set_vfta(hw, vlan_id, !!on); 3533 if(ret){ 3534 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 3535 return ret; 3536 } 3537 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 3538 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 3539 3540 /*Save what we set and retore it after device reset*/ 3541 if (on) 3542 shadow_vfta->vfta[vid_idx] |= vid_bit; 3543 else 3544 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 3545 3546 return 0; 3547 } 3548 3549 static int 3550 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 3551 { 3552 struct e1000_hw *hw = 3553 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3554 3555 /* index is not used by rar_set() */ 3556 hw->mac.ops.rar_set(hw, (void *)addr, 0); 3557 return 0; 3558 } 3559 3560 3561 static int 3562 eth_igb_rss_reta_update(struct rte_eth_dev *dev, 3563 struct rte_eth_rss_reta_entry64 *reta_conf, 3564 uint16_t reta_size) 3565 { 3566 uint8_t i, j, mask; 3567 uint32_t reta, r; 3568 uint16_t idx, shift; 3569 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3570 3571 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3572 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3573 "(%d) doesn't match the number hardware can supported " 3574 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3575 return -EINVAL; 3576 } 3577 3578 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3579 idx = i / RTE_RETA_GROUP_SIZE; 3580 shift = i % RTE_RETA_GROUP_SIZE; 3581 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3582 IGB_4_BIT_MASK); 3583 if (!mask) 3584 continue; 3585 if (mask == IGB_4_BIT_MASK) 3586 r = 0; 3587 else 3588 r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3589 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { 3590 if (mask & (0x1 << j)) 3591 reta |= reta_conf[idx].reta[shift + j] << 3592 (CHAR_BIT * j); 3593 else 3594 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); 3595 } 3596 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); 3597 } 3598 3599 return 0; 3600 } 3601 3602 static int 3603 eth_igb_rss_reta_query(struct rte_eth_dev *dev, 3604 struct rte_eth_rss_reta_entry64 *reta_conf, 3605 uint16_t reta_size) 3606 { 3607 uint8_t i, j, mask; 3608 uint32_t reta; 3609 uint16_t idx, shift; 3610 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3611 3612 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3613 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3614 "(%d) doesn't match the number hardware can supported " 3615 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3616 return -EINVAL; 3617 } 3618 3619 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3620 idx = i / RTE_RETA_GROUP_SIZE; 3621 shift = i % RTE_RETA_GROUP_SIZE; 3622 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3623 IGB_4_BIT_MASK); 3624 if (!mask) 3625 continue; 3626 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3627 for (j = 0; j < IGB_4_BIT_WIDTH; j++) { 3628 if (mask & (0x1 << j)) 3629 reta_conf[idx].reta[shift + j] = 3630 ((reta >> (CHAR_BIT * j)) & 3631 IGB_8_BIT_MASK); 3632 } 3633 } 3634 3635 return 0; 3636 } 3637 3638 int 3639 eth_igb_syn_filter_set(struct rte_eth_dev *dev, 3640 struct rte_eth_syn_filter *filter, 3641 bool add) 3642 { 3643 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3644 struct e1000_filter_info *filter_info = 3645 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3646 uint32_t synqf, rfctl; 3647 3648 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3649 return -EINVAL; 3650 3651 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3652 3653 if (add) { 3654 if (synqf & E1000_SYN_FILTER_ENABLE) 3655 return -EINVAL; 3656 3657 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & 3658 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); 3659 3660 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3661 if (filter->hig_pri) 3662 rfctl |= E1000_RFCTL_SYNQFP; 3663 else 3664 rfctl &= ~E1000_RFCTL_SYNQFP; 3665 3666 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 3667 } else { 3668 if (!(synqf & E1000_SYN_FILTER_ENABLE)) 3669 return -ENOENT; 3670 synqf = 0; 3671 } 3672 3673 filter_info->syn_info = synqf; 3674 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 3675 E1000_WRITE_FLUSH(hw); 3676 return 0; 3677 } 3678 3679 static int 3680 eth_igb_syn_filter_get(struct rte_eth_dev *dev, 3681 struct rte_eth_syn_filter *filter) 3682 { 3683 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3684 uint32_t synqf, rfctl; 3685 3686 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3687 if (synqf & E1000_SYN_FILTER_ENABLE) { 3688 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3689 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; 3690 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> 3691 E1000_SYN_FILTER_QUEUE_SHIFT); 3692 return 0; 3693 } 3694 3695 return -ENOENT; 3696 } 3697 3698 static int 3699 eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 3700 enum rte_filter_op filter_op, 3701 void *arg) 3702 { 3703 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3704 int ret; 3705 3706 MAC_TYPE_FILTER_SUP(hw->mac.type); 3707 3708 if (filter_op == RTE_ETH_FILTER_NOP) 3709 return 0; 3710 3711 if (arg == NULL) { 3712 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 3713 filter_op); 3714 return -EINVAL; 3715 } 3716 3717 switch (filter_op) { 3718 case RTE_ETH_FILTER_ADD: 3719 ret = eth_igb_syn_filter_set(dev, 3720 (struct rte_eth_syn_filter *)arg, 3721 TRUE); 3722 break; 3723 case RTE_ETH_FILTER_DELETE: 3724 ret = eth_igb_syn_filter_set(dev, 3725 (struct rte_eth_syn_filter *)arg, 3726 FALSE); 3727 break; 3728 case RTE_ETH_FILTER_GET: 3729 ret = eth_igb_syn_filter_get(dev, 3730 (struct rte_eth_syn_filter *)arg); 3731 break; 3732 default: 3733 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 3734 ret = -EINVAL; 3735 break; 3736 } 3737 3738 return ret; 3739 } 3740 3741 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ 3742 static inline int 3743 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, 3744 struct e1000_2tuple_filter_info *filter_info) 3745 { 3746 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3747 return -EINVAL; 3748 if (filter->priority > E1000_2TUPLE_MAX_PRI) 3749 return -EINVAL; /* filter index is out of range. */ 3750 if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) 3751 return -EINVAL; /* flags is invalid. */ 3752 3753 switch (filter->dst_port_mask) { 3754 case UINT16_MAX: 3755 filter_info->dst_port_mask = 0; 3756 filter_info->dst_port = filter->dst_port; 3757 break; 3758 case 0: 3759 filter_info->dst_port_mask = 1; 3760 break; 3761 default: 3762 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3763 return -EINVAL; 3764 } 3765 3766 switch (filter->proto_mask) { 3767 case UINT8_MAX: 3768 filter_info->proto_mask = 0; 3769 filter_info->proto = filter->proto; 3770 break; 3771 case 0: 3772 filter_info->proto_mask = 1; 3773 break; 3774 default: 3775 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3776 return -EINVAL; 3777 } 3778 3779 filter_info->priority = (uint8_t)filter->priority; 3780 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 3781 filter_info->tcp_flags = filter->tcp_flags; 3782 else 3783 filter_info->tcp_flags = 0; 3784 3785 return 0; 3786 } 3787 3788 static inline struct e1000_2tuple_filter * 3789 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, 3790 struct e1000_2tuple_filter_info *key) 3791 { 3792 struct e1000_2tuple_filter *it; 3793 3794 TAILQ_FOREACH(it, filter_list, entries) { 3795 if (memcmp(key, &it->filter_info, 3796 sizeof(struct e1000_2tuple_filter_info)) == 0) { 3797 return it; 3798 } 3799 } 3800 return NULL; 3801 } 3802 3803 /* inject a igb 2tuple filter to HW */ 3804 static inline void 3805 igb_inject_2uple_filter(struct rte_eth_dev *dev, 3806 struct e1000_2tuple_filter *filter) 3807 { 3808 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3809 uint32_t ttqf = E1000_TTQF_DISABLE_MASK; 3810 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; 3811 int i; 3812 3813 i = filter->index; 3814 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 3815 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 3816 imir |= E1000_IMIR_PORT_BP; 3817 else 3818 imir &= ~E1000_IMIR_PORT_BP; 3819 3820 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 3821 3822 ttqf |= E1000_TTQF_QUEUE_ENABLE; 3823 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); 3824 ttqf |= (uint32_t)(filter->filter_info.proto & 3825 E1000_TTQF_PROTOCOL_MASK); 3826 if (filter->filter_info.proto_mask == 0) 3827 ttqf &= ~E1000_TTQF_MASK_ENABLE; 3828 3829 /* tcp flags bits setting. */ 3830 if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { 3831 if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) 3832 imir_ext |= E1000_IMIREXT_CTRL_URG; 3833 if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) 3834 imir_ext |= E1000_IMIREXT_CTRL_ACK; 3835 if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) 3836 imir_ext |= E1000_IMIREXT_CTRL_PSH; 3837 if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) 3838 imir_ext |= E1000_IMIREXT_CTRL_RST; 3839 if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) 3840 imir_ext |= E1000_IMIREXT_CTRL_SYN; 3841 if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) 3842 imir_ext |= E1000_IMIREXT_CTRL_FIN; 3843 } else { 3844 imir_ext |= E1000_IMIREXT_CTRL_BP; 3845 } 3846 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 3847 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); 3848 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 3849 } 3850 3851 /* 3852 * igb_add_2tuple_filter - add a 2tuple filter 3853 * 3854 * @param 3855 * dev: Pointer to struct rte_eth_dev. 3856 * ntuple_filter: ponter to the filter that will be added. 3857 * 3858 * @return 3859 * - On success, zero. 3860 * - On failure, a negative value. 3861 */ 3862 static int 3863 igb_add_2tuple_filter(struct rte_eth_dev *dev, 3864 struct rte_eth_ntuple_filter *ntuple_filter) 3865 { 3866 struct e1000_filter_info *filter_info = 3867 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3868 struct e1000_2tuple_filter *filter; 3869 int i, ret; 3870 3871 filter = rte_zmalloc("e1000_2tuple_filter", 3872 sizeof(struct e1000_2tuple_filter), 0); 3873 if (filter == NULL) 3874 return -ENOMEM; 3875 3876 ret = ntuple_filter_to_2tuple(ntuple_filter, 3877 &filter->filter_info); 3878 if (ret < 0) { 3879 rte_free(filter); 3880 return ret; 3881 } 3882 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3883 &filter->filter_info) != NULL) { 3884 PMD_DRV_LOG(ERR, "filter exists."); 3885 rte_free(filter); 3886 return -EEXIST; 3887 } 3888 filter->queue = ntuple_filter->queue; 3889 3890 /* 3891 * look for an unused 2tuple filter index, 3892 * and insert the filter to list. 3893 */ 3894 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { 3895 if (!(filter_info->twotuple_mask & (1 << i))) { 3896 filter_info->twotuple_mask |= 1 << i; 3897 filter->index = i; 3898 TAILQ_INSERT_TAIL(&filter_info->twotuple_list, 3899 filter, 3900 entries); 3901 break; 3902 } 3903 } 3904 if (i >= E1000_MAX_TTQF_FILTERS) { 3905 PMD_DRV_LOG(ERR, "2tuple filters are full."); 3906 rte_free(filter); 3907 return -ENOSYS; 3908 } 3909 3910 igb_inject_2uple_filter(dev, filter); 3911 return 0; 3912 } 3913 3914 int 3915 igb_delete_2tuple_filter(struct rte_eth_dev *dev, 3916 struct e1000_2tuple_filter *filter) 3917 { 3918 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3919 struct e1000_filter_info *filter_info = 3920 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3921 3922 filter_info->twotuple_mask &= ~(1 << filter->index); 3923 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); 3924 rte_free(filter); 3925 3926 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); 3927 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 3928 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 3929 return 0; 3930 } 3931 3932 /* 3933 * igb_remove_2tuple_filter - remove a 2tuple filter 3934 * 3935 * @param 3936 * dev: Pointer to struct rte_eth_dev. 3937 * ntuple_filter: ponter to the filter that will be removed. 3938 * 3939 * @return 3940 * - On success, zero. 3941 * - On failure, a negative value. 3942 */ 3943 static int 3944 igb_remove_2tuple_filter(struct rte_eth_dev *dev, 3945 struct rte_eth_ntuple_filter *ntuple_filter) 3946 { 3947 struct e1000_filter_info *filter_info = 3948 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3949 struct e1000_2tuple_filter_info filter_2tuple; 3950 struct e1000_2tuple_filter *filter; 3951 int ret; 3952 3953 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); 3954 ret = ntuple_filter_to_2tuple(ntuple_filter, 3955 &filter_2tuple); 3956 if (ret < 0) 3957 return ret; 3958 3959 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3960 &filter_2tuple); 3961 if (filter == NULL) { 3962 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3963 return -ENOENT; 3964 } 3965 3966 igb_delete_2tuple_filter(dev, filter); 3967 3968 return 0; 3969 } 3970 3971 /* inject a igb flex filter to HW */ 3972 static inline void 3973 igb_inject_flex_filter(struct rte_eth_dev *dev, 3974 struct e1000_flex_filter *filter) 3975 { 3976 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3977 uint32_t wufc, queueing; 3978 uint32_t reg_off; 3979 uint8_t i, j = 0; 3980 3981 wufc = E1000_READ_REG(hw, E1000_WUFC); 3982 if (filter->index < E1000_MAX_FHFT) 3983 reg_off = E1000_FHFT(filter->index); 3984 else 3985 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3986 3987 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | 3988 (E1000_WUFC_FLX0 << filter->index)); 3989 queueing = filter->filter_info.len | 3990 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | 3991 (filter->filter_info.priority << 3992 E1000_FHFT_QUEUEING_PRIO_SHIFT); 3993 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, 3994 queueing); 3995 3996 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { 3997 E1000_WRITE_REG(hw, reg_off, 3998 filter->filter_info.dwords[j]); 3999 reg_off += sizeof(uint32_t); 4000 E1000_WRITE_REG(hw, reg_off, 4001 filter->filter_info.dwords[++j]); 4002 reg_off += sizeof(uint32_t); 4003 E1000_WRITE_REG(hw, reg_off, 4004 (uint32_t)filter->filter_info.mask[i]); 4005 reg_off += sizeof(uint32_t) * 2; 4006 ++j; 4007 } 4008 } 4009 4010 static inline struct e1000_flex_filter * 4011 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, 4012 struct e1000_flex_filter_info *key) 4013 { 4014 struct e1000_flex_filter *it; 4015 4016 TAILQ_FOREACH(it, filter_list, entries) { 4017 if (memcmp(key, &it->filter_info, 4018 sizeof(struct e1000_flex_filter_info)) == 0) 4019 return it; 4020 } 4021 4022 return NULL; 4023 } 4024 4025 /* remove a flex byte filter 4026 * @param 4027 * dev: Pointer to struct rte_eth_dev. 4028 * filter: the pointer of the filter will be removed. 4029 */ 4030 void 4031 igb_remove_flex_filter(struct rte_eth_dev *dev, 4032 struct e1000_flex_filter *filter) 4033 { 4034 struct e1000_filter_info *filter_info = 4035 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4036 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4037 uint32_t wufc, i; 4038 uint32_t reg_off; 4039 4040 wufc = E1000_READ_REG(hw, E1000_WUFC); 4041 if (filter->index < E1000_MAX_FHFT) 4042 reg_off = E1000_FHFT(filter->index); 4043 else 4044 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 4045 4046 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) 4047 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); 4048 4049 E1000_WRITE_REG(hw, E1000_WUFC, wufc & 4050 (~(E1000_WUFC_FLX0 << filter->index))); 4051 4052 filter_info->flex_mask &= ~(1 << filter->index); 4053 TAILQ_REMOVE(&filter_info->flex_list, filter, entries); 4054 rte_free(filter); 4055 } 4056 4057 int 4058 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, 4059 struct rte_eth_flex_filter *filter, 4060 bool add) 4061 { 4062 struct e1000_filter_info *filter_info = 4063 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4064 struct e1000_flex_filter *flex_filter, *it; 4065 uint32_t mask; 4066 uint8_t shift, i; 4067 4068 flex_filter = rte_zmalloc("e1000_flex_filter", 4069 sizeof(struct e1000_flex_filter), 0); 4070 if (flex_filter == NULL) 4071 return -ENOMEM; 4072 4073 flex_filter->filter_info.len = filter->len; 4074 flex_filter->filter_info.priority = filter->priority; 4075 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); 4076 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { 4077 mask = 0; 4078 /* reverse bits in flex filter's mask*/ 4079 for (shift = 0; shift < CHAR_BIT; shift++) { 4080 if (filter->mask[i] & (0x01 << shift)) 4081 mask |= (0x80 >> shift); 4082 } 4083 flex_filter->filter_info.mask[i] = mask; 4084 } 4085 4086 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4087 &flex_filter->filter_info); 4088 if (it == NULL && !add) { 4089 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4090 rte_free(flex_filter); 4091 return -ENOENT; 4092 } 4093 if (it != NULL && add) { 4094 PMD_DRV_LOG(ERR, "filter exists."); 4095 rte_free(flex_filter); 4096 return -EEXIST; 4097 } 4098 4099 if (add) { 4100 flex_filter->queue = filter->queue; 4101 /* 4102 * look for an unused flex filter index 4103 * and insert the filter into the list. 4104 */ 4105 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { 4106 if (!(filter_info->flex_mask & (1 << i))) { 4107 filter_info->flex_mask |= 1 << i; 4108 flex_filter->index = i; 4109 TAILQ_INSERT_TAIL(&filter_info->flex_list, 4110 flex_filter, 4111 entries); 4112 break; 4113 } 4114 } 4115 if (i >= E1000_MAX_FLEX_FILTERS) { 4116 PMD_DRV_LOG(ERR, "flex filters are full."); 4117 rte_free(flex_filter); 4118 return -ENOSYS; 4119 } 4120 4121 igb_inject_flex_filter(dev, flex_filter); 4122 4123 } else { 4124 igb_remove_flex_filter(dev, it); 4125 rte_free(flex_filter); 4126 } 4127 4128 return 0; 4129 } 4130 4131 static int 4132 eth_igb_get_flex_filter(struct rte_eth_dev *dev, 4133 struct rte_eth_flex_filter *filter) 4134 { 4135 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4136 struct e1000_filter_info *filter_info = 4137 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4138 struct e1000_flex_filter flex_filter, *it; 4139 uint32_t wufc, queueing, wufc_en = 0; 4140 4141 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); 4142 flex_filter.filter_info.len = filter->len; 4143 flex_filter.filter_info.priority = filter->priority; 4144 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); 4145 memcpy(flex_filter.filter_info.mask, filter->mask, 4146 RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT); 4147 4148 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4149 &flex_filter.filter_info); 4150 if (it == NULL) { 4151 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4152 return -ENOENT; 4153 } 4154 4155 wufc = E1000_READ_REG(hw, E1000_WUFC); 4156 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); 4157 4158 if ((wufc & wufc_en) == wufc_en) { 4159 uint32_t reg_off = 0; 4160 if (it->index < E1000_MAX_FHFT) 4161 reg_off = E1000_FHFT(it->index); 4162 else 4163 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); 4164 4165 queueing = E1000_READ_REG(hw, 4166 reg_off + E1000_FHFT_QUEUEING_OFFSET); 4167 filter->len = queueing & E1000_FHFT_QUEUEING_LEN; 4168 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> 4169 E1000_FHFT_QUEUEING_PRIO_SHIFT; 4170 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> 4171 E1000_FHFT_QUEUEING_QUEUE_SHIFT; 4172 return 0; 4173 } 4174 return -ENOENT; 4175 } 4176 4177 static int 4178 eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 4179 enum rte_filter_op filter_op, 4180 void *arg) 4181 { 4182 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4183 struct rte_eth_flex_filter *filter; 4184 int ret = 0; 4185 4186 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 4187 4188 if (filter_op == RTE_ETH_FILTER_NOP) 4189 return ret; 4190 4191 if (arg == NULL) { 4192 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 4193 filter_op); 4194 return -EINVAL; 4195 } 4196 4197 filter = (struct rte_eth_flex_filter *)arg; 4198 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN 4199 || filter->len % sizeof(uint64_t) != 0) { 4200 PMD_DRV_LOG(ERR, "filter's length is out of range"); 4201 return -EINVAL; 4202 } 4203 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { 4204 PMD_DRV_LOG(ERR, "filter's priority is out of range"); 4205 return -EINVAL; 4206 } 4207 4208 switch (filter_op) { 4209 case RTE_ETH_FILTER_ADD: 4210 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); 4211 break; 4212 case RTE_ETH_FILTER_DELETE: 4213 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); 4214 break; 4215 case RTE_ETH_FILTER_GET: 4216 ret = eth_igb_get_flex_filter(dev, filter); 4217 break; 4218 default: 4219 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 4220 ret = -EINVAL; 4221 break; 4222 } 4223 4224 return ret; 4225 } 4226 4227 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ 4228 static inline int 4229 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, 4230 struct e1000_5tuple_filter_info *filter_info) 4231 { 4232 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) 4233 return -EINVAL; 4234 if (filter->priority > E1000_2TUPLE_MAX_PRI) 4235 return -EINVAL; /* filter index is out of range. */ 4236 if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK) 4237 return -EINVAL; /* flags is invalid. */ 4238 4239 switch (filter->dst_ip_mask) { 4240 case UINT32_MAX: 4241 filter_info->dst_ip_mask = 0; 4242 filter_info->dst_ip = filter->dst_ip; 4243 break; 4244 case 0: 4245 filter_info->dst_ip_mask = 1; 4246 break; 4247 default: 4248 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 4249 return -EINVAL; 4250 } 4251 4252 switch (filter->src_ip_mask) { 4253 case UINT32_MAX: 4254 filter_info->src_ip_mask = 0; 4255 filter_info->src_ip = filter->src_ip; 4256 break; 4257 case 0: 4258 filter_info->src_ip_mask = 1; 4259 break; 4260 default: 4261 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 4262 return -EINVAL; 4263 } 4264 4265 switch (filter->dst_port_mask) { 4266 case UINT16_MAX: 4267 filter_info->dst_port_mask = 0; 4268 filter_info->dst_port = filter->dst_port; 4269 break; 4270 case 0: 4271 filter_info->dst_port_mask = 1; 4272 break; 4273 default: 4274 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 4275 return -EINVAL; 4276 } 4277 4278 switch (filter->src_port_mask) { 4279 case UINT16_MAX: 4280 filter_info->src_port_mask = 0; 4281 filter_info->src_port = filter->src_port; 4282 break; 4283 case 0: 4284 filter_info->src_port_mask = 1; 4285 break; 4286 default: 4287 PMD_DRV_LOG(ERR, "invalid src_port mask."); 4288 return -EINVAL; 4289 } 4290 4291 switch (filter->proto_mask) { 4292 case UINT8_MAX: 4293 filter_info->proto_mask = 0; 4294 filter_info->proto = filter->proto; 4295 break; 4296 case 0: 4297 filter_info->proto_mask = 1; 4298 break; 4299 default: 4300 PMD_DRV_LOG(ERR, "invalid protocol mask."); 4301 return -EINVAL; 4302 } 4303 4304 filter_info->priority = (uint8_t)filter->priority; 4305 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 4306 filter_info->tcp_flags = filter->tcp_flags; 4307 else 4308 filter_info->tcp_flags = 0; 4309 4310 return 0; 4311 } 4312 4313 static inline struct e1000_5tuple_filter * 4314 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, 4315 struct e1000_5tuple_filter_info *key) 4316 { 4317 struct e1000_5tuple_filter *it; 4318 4319 TAILQ_FOREACH(it, filter_list, entries) { 4320 if (memcmp(key, &it->filter_info, 4321 sizeof(struct e1000_5tuple_filter_info)) == 0) { 4322 return it; 4323 } 4324 } 4325 return NULL; 4326 } 4327 4328 /* inject a igb 5-tuple filter to HW */ 4329 static inline void 4330 igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, 4331 struct e1000_5tuple_filter *filter) 4332 { 4333 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4334 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; 4335 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; 4336 uint8_t i; 4337 4338 i = filter->index; 4339 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; 4340 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ 4341 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; 4342 if (filter->filter_info.dst_ip_mask == 0) 4343 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; 4344 if (filter->filter_info.src_port_mask == 0) 4345 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; 4346 if (filter->filter_info.proto_mask == 0) 4347 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; 4348 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & 4349 E1000_FTQF_QUEUE_MASK; 4350 ftqf |= E1000_FTQF_QUEUE_ENABLE; 4351 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); 4352 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); 4353 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); 4354 4355 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; 4356 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); 4357 4358 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 4359 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 4360 imir |= E1000_IMIR_PORT_BP; 4361 else 4362 imir &= ~E1000_IMIR_PORT_BP; 4363 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 4364 4365 /* tcp flags bits setting. */ 4366 if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) { 4367 if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG) 4368 imir_ext |= E1000_IMIREXT_CTRL_URG; 4369 if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG) 4370 imir_ext |= E1000_IMIREXT_CTRL_ACK; 4371 if (filter->filter_info.tcp_flags & RTE_TCP_PSH_FLAG) 4372 imir_ext |= E1000_IMIREXT_CTRL_PSH; 4373 if (filter->filter_info.tcp_flags & RTE_TCP_RST_FLAG) 4374 imir_ext |= E1000_IMIREXT_CTRL_RST; 4375 if (filter->filter_info.tcp_flags & RTE_TCP_SYN_FLAG) 4376 imir_ext |= E1000_IMIREXT_CTRL_SYN; 4377 if (filter->filter_info.tcp_flags & RTE_TCP_FIN_FLAG) 4378 imir_ext |= E1000_IMIREXT_CTRL_FIN; 4379 } else { 4380 imir_ext |= E1000_IMIREXT_CTRL_BP; 4381 } 4382 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 4383 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 4384 } 4385 4386 /* 4387 * igb_add_5tuple_filter_82576 - add a 5tuple filter 4388 * 4389 * @param 4390 * dev: Pointer to struct rte_eth_dev. 4391 * ntuple_filter: ponter to the filter that will be added. 4392 * 4393 * @return 4394 * - On success, zero. 4395 * - On failure, a negative value. 4396 */ 4397 static int 4398 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 4399 struct rte_eth_ntuple_filter *ntuple_filter) 4400 { 4401 struct e1000_filter_info *filter_info = 4402 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4403 struct e1000_5tuple_filter *filter; 4404 uint8_t i; 4405 int ret; 4406 4407 filter = rte_zmalloc("e1000_5tuple_filter", 4408 sizeof(struct e1000_5tuple_filter), 0); 4409 if (filter == NULL) 4410 return -ENOMEM; 4411 4412 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4413 &filter->filter_info); 4414 if (ret < 0) { 4415 rte_free(filter); 4416 return ret; 4417 } 4418 4419 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4420 &filter->filter_info) != NULL) { 4421 PMD_DRV_LOG(ERR, "filter exists."); 4422 rte_free(filter); 4423 return -EEXIST; 4424 } 4425 filter->queue = ntuple_filter->queue; 4426 4427 /* 4428 * look for an unused 5tuple filter index, 4429 * and insert the filter to list. 4430 */ 4431 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { 4432 if (!(filter_info->fivetuple_mask & (1 << i))) { 4433 filter_info->fivetuple_mask |= 1 << i; 4434 filter->index = i; 4435 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 4436 filter, 4437 entries); 4438 break; 4439 } 4440 } 4441 if (i >= E1000_MAX_FTQF_FILTERS) { 4442 PMD_DRV_LOG(ERR, "5tuple filters are full."); 4443 rte_free(filter); 4444 return -ENOSYS; 4445 } 4446 4447 igb_inject_5tuple_filter_82576(dev, filter); 4448 return 0; 4449 } 4450 4451 int 4452 igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, 4453 struct e1000_5tuple_filter *filter) 4454 { 4455 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4456 struct e1000_filter_info *filter_info = 4457 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4458 4459 filter_info->fivetuple_mask &= ~(1 << filter->index); 4460 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 4461 rte_free(filter); 4462 4463 E1000_WRITE_REG(hw, E1000_FTQF(filter->index), 4464 E1000_FTQF_VF_BP | E1000_FTQF_MASK); 4465 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); 4466 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); 4467 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); 4468 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 4469 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 4470 return 0; 4471 } 4472 4473 /* 4474 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter 4475 * 4476 * @param 4477 * dev: Pointer to struct rte_eth_dev. 4478 * ntuple_filter: ponter to the filter that will be removed. 4479 * 4480 * @return 4481 * - On success, zero. 4482 * - On failure, a negative value. 4483 */ 4484 static int 4485 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 4486 struct rte_eth_ntuple_filter *ntuple_filter) 4487 { 4488 struct e1000_filter_info *filter_info = 4489 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4490 struct e1000_5tuple_filter_info filter_5tuple; 4491 struct e1000_5tuple_filter *filter; 4492 int ret; 4493 4494 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); 4495 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4496 &filter_5tuple); 4497 if (ret < 0) 4498 return ret; 4499 4500 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4501 &filter_5tuple); 4502 if (filter == NULL) { 4503 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4504 return -ENOENT; 4505 } 4506 4507 igb_delete_5tuple_filter_82576(dev, filter); 4508 4509 return 0; 4510 } 4511 4512 static int 4513 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4514 { 4515 uint32_t rctl; 4516 struct e1000_hw *hw; 4517 struct rte_eth_dev_info dev_info; 4518 uint32_t frame_size = mtu + E1000_ETH_OVERHEAD; 4519 int ret; 4520 4521 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4522 4523 #ifdef RTE_LIBRTE_82571_SUPPORT 4524 /* XXX: not bigger than max_rx_pktlen */ 4525 if (hw->mac.type == e1000_82571) 4526 return -ENOTSUP; 4527 #endif 4528 ret = eth_igb_infos_get(dev, &dev_info); 4529 if (ret != 0) 4530 return ret; 4531 4532 /* check that mtu is within the allowed range */ 4533 if (mtu < RTE_ETHER_MIN_MTU || 4534 frame_size > dev_info.max_rx_pktlen) 4535 return -EINVAL; 4536 4537 /* refuse mtu that requires the support of scattered packets when this 4538 * feature has not been enabled before. */ 4539 if (!dev->data->scattered_rx && 4540 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 4541 return -EINVAL; 4542 4543 rctl = E1000_READ_REG(hw, E1000_RCTL); 4544 4545 /* switch to jumbo mode if needed */ 4546 if (frame_size > RTE_ETHER_MAX_LEN) { 4547 dev->data->dev_conf.rxmode.offloads |= 4548 DEV_RX_OFFLOAD_JUMBO_FRAME; 4549 rctl |= E1000_RCTL_LPE; 4550 } else { 4551 dev->data->dev_conf.rxmode.offloads &= 4552 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 4553 rctl &= ~E1000_RCTL_LPE; 4554 } 4555 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4556 4557 /* update max frame size */ 4558 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4559 4560 E1000_WRITE_REG(hw, E1000_RLPML, 4561 dev->data->dev_conf.rxmode.max_rx_pkt_len); 4562 4563 return 0; 4564 } 4565 4566 /* 4567 * igb_add_del_ntuple_filter - add or delete a ntuple filter 4568 * 4569 * @param 4570 * dev: Pointer to struct rte_eth_dev. 4571 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4572 * add: if true, add filter, if false, remove filter 4573 * 4574 * @return 4575 * - On success, zero. 4576 * - On failure, a negative value. 4577 */ 4578 int 4579 igb_add_del_ntuple_filter(struct rte_eth_dev *dev, 4580 struct rte_eth_ntuple_filter *ntuple_filter, 4581 bool add) 4582 { 4583 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4584 int ret; 4585 4586 switch (ntuple_filter->flags) { 4587 case RTE_5TUPLE_FLAGS: 4588 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4589 if (hw->mac.type != e1000_82576) 4590 return -ENOTSUP; 4591 if (add) 4592 ret = igb_add_5tuple_filter_82576(dev, 4593 ntuple_filter); 4594 else 4595 ret = igb_remove_5tuple_filter_82576(dev, 4596 ntuple_filter); 4597 break; 4598 case RTE_2TUPLE_FLAGS: 4599 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4600 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && 4601 hw->mac.type != e1000_i210 && 4602 hw->mac.type != e1000_i211) 4603 return -ENOTSUP; 4604 if (add) 4605 ret = igb_add_2tuple_filter(dev, ntuple_filter); 4606 else 4607 ret = igb_remove_2tuple_filter(dev, ntuple_filter); 4608 break; 4609 default: 4610 ret = -EINVAL; 4611 break; 4612 } 4613 4614 return ret; 4615 } 4616 4617 /* 4618 * igb_get_ntuple_filter - get a ntuple filter 4619 * 4620 * @param 4621 * dev: Pointer to struct rte_eth_dev. 4622 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4623 * 4624 * @return 4625 * - On success, zero. 4626 * - On failure, a negative value. 4627 */ 4628 static int 4629 igb_get_ntuple_filter(struct rte_eth_dev *dev, 4630 struct rte_eth_ntuple_filter *ntuple_filter) 4631 { 4632 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4633 struct e1000_filter_info *filter_info = 4634 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4635 struct e1000_5tuple_filter_info filter_5tuple; 4636 struct e1000_2tuple_filter_info filter_2tuple; 4637 struct e1000_5tuple_filter *p_5tuple_filter; 4638 struct e1000_2tuple_filter *p_2tuple_filter; 4639 int ret; 4640 4641 switch (ntuple_filter->flags) { 4642 case RTE_5TUPLE_FLAGS: 4643 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4644 if (hw->mac.type != e1000_82576) 4645 return -ENOTSUP; 4646 memset(&filter_5tuple, 4647 0, 4648 sizeof(struct e1000_5tuple_filter_info)); 4649 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4650 &filter_5tuple); 4651 if (ret < 0) 4652 return ret; 4653 p_5tuple_filter = igb_5tuple_filter_lookup_82576( 4654 &filter_info->fivetuple_list, 4655 &filter_5tuple); 4656 if (p_5tuple_filter == NULL) { 4657 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4658 return -ENOENT; 4659 } 4660 ntuple_filter->queue = p_5tuple_filter->queue; 4661 break; 4662 case RTE_2TUPLE_FLAGS: 4663 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4664 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) 4665 return -ENOTSUP; 4666 memset(&filter_2tuple, 4667 0, 4668 sizeof(struct e1000_2tuple_filter_info)); 4669 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); 4670 if (ret < 0) 4671 return ret; 4672 p_2tuple_filter = igb_2tuple_filter_lookup( 4673 &filter_info->twotuple_list, 4674 &filter_2tuple); 4675 if (p_2tuple_filter == NULL) { 4676 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4677 return -ENOENT; 4678 } 4679 ntuple_filter->queue = p_2tuple_filter->queue; 4680 break; 4681 default: 4682 ret = -EINVAL; 4683 break; 4684 } 4685 4686 return 0; 4687 } 4688 4689 /* 4690 * igb_ntuple_filter_handle - Handle operations for ntuple filter. 4691 * @dev: pointer to rte_eth_dev structure 4692 * @filter_op:operation will be taken. 4693 * @arg: a pointer to specific structure corresponding to the filter_op 4694 */ 4695 static int 4696 igb_ntuple_filter_handle(struct rte_eth_dev *dev, 4697 enum rte_filter_op filter_op, 4698 void *arg) 4699 { 4700 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4701 int ret; 4702 4703 MAC_TYPE_FILTER_SUP(hw->mac.type); 4704 4705 if (filter_op == RTE_ETH_FILTER_NOP) 4706 return 0; 4707 4708 if (arg == NULL) { 4709 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4710 filter_op); 4711 return -EINVAL; 4712 } 4713 4714 switch (filter_op) { 4715 case RTE_ETH_FILTER_ADD: 4716 ret = igb_add_del_ntuple_filter(dev, 4717 (struct rte_eth_ntuple_filter *)arg, 4718 TRUE); 4719 break; 4720 case RTE_ETH_FILTER_DELETE: 4721 ret = igb_add_del_ntuple_filter(dev, 4722 (struct rte_eth_ntuple_filter *)arg, 4723 FALSE); 4724 break; 4725 case RTE_ETH_FILTER_GET: 4726 ret = igb_get_ntuple_filter(dev, 4727 (struct rte_eth_ntuple_filter *)arg); 4728 break; 4729 default: 4730 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4731 ret = -EINVAL; 4732 break; 4733 } 4734 return ret; 4735 } 4736 4737 static inline int 4738 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, 4739 uint16_t ethertype) 4740 { 4741 int i; 4742 4743 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4744 if (filter_info->ethertype_filters[i].ethertype == ethertype && 4745 (filter_info->ethertype_mask & (1 << i))) 4746 return i; 4747 } 4748 return -1; 4749 } 4750 4751 static inline int 4752 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, 4753 uint16_t ethertype, uint32_t etqf) 4754 { 4755 int i; 4756 4757 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4758 if (!(filter_info->ethertype_mask & (1 << i))) { 4759 filter_info->ethertype_mask |= 1 << i; 4760 filter_info->ethertype_filters[i].ethertype = ethertype; 4761 filter_info->ethertype_filters[i].etqf = etqf; 4762 return i; 4763 } 4764 } 4765 return -1; 4766 } 4767 4768 int 4769 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, 4770 uint8_t idx) 4771 { 4772 if (idx >= E1000_MAX_ETQF_FILTERS) 4773 return -1; 4774 filter_info->ethertype_mask &= ~(1 << idx); 4775 filter_info->ethertype_filters[idx].ethertype = 0; 4776 filter_info->ethertype_filters[idx].etqf = 0; 4777 return idx; 4778 } 4779 4780 4781 int 4782 igb_add_del_ethertype_filter(struct rte_eth_dev *dev, 4783 struct rte_eth_ethertype_filter *filter, 4784 bool add) 4785 { 4786 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4787 struct e1000_filter_info *filter_info = 4788 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4789 uint32_t etqf = 0; 4790 int ret; 4791 4792 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 4793 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 4794 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4795 " ethertype filter.", filter->ether_type); 4796 return -EINVAL; 4797 } 4798 4799 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4800 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4801 return -EINVAL; 4802 } 4803 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4804 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4805 return -EINVAL; 4806 } 4807 4808 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4809 if (ret >= 0 && add) { 4810 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4811 filter->ether_type); 4812 return -EEXIST; 4813 } 4814 if (ret < 0 && !add) { 4815 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4816 filter->ether_type); 4817 return -ENOENT; 4818 } 4819 4820 if (add) { 4821 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; 4822 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); 4823 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; 4824 ret = igb_ethertype_filter_insert(filter_info, 4825 filter->ether_type, etqf); 4826 if (ret < 0) { 4827 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4828 return -ENOSYS; 4829 } 4830 } else { 4831 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); 4832 if (ret < 0) 4833 return -ENOSYS; 4834 } 4835 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); 4836 E1000_WRITE_FLUSH(hw); 4837 4838 return 0; 4839 } 4840 4841 static int 4842 igb_get_ethertype_filter(struct rte_eth_dev *dev, 4843 struct rte_eth_ethertype_filter *filter) 4844 { 4845 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4846 struct e1000_filter_info *filter_info = 4847 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4848 uint32_t etqf; 4849 int ret; 4850 4851 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4852 if (ret < 0) { 4853 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4854 filter->ether_type); 4855 return -ENOENT; 4856 } 4857 4858 etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); 4859 if (etqf & E1000_ETQF_FILTER_ENABLE) { 4860 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; 4861 filter->flags = 0; 4862 filter->queue = (etqf & E1000_ETQF_QUEUE) >> 4863 E1000_ETQF_QUEUE_SHIFT; 4864 return 0; 4865 } 4866 4867 return -ENOENT; 4868 } 4869 4870 /* 4871 * igb_ethertype_filter_handle - Handle operations for ethertype filter. 4872 * @dev: pointer to rte_eth_dev structure 4873 * @filter_op:operation will be taken. 4874 * @arg: a pointer to specific structure corresponding to the filter_op 4875 */ 4876 static int 4877 igb_ethertype_filter_handle(struct rte_eth_dev *dev, 4878 enum rte_filter_op filter_op, 4879 void *arg) 4880 { 4881 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4882 int ret; 4883 4884 MAC_TYPE_FILTER_SUP(hw->mac.type); 4885 4886 if (filter_op == RTE_ETH_FILTER_NOP) 4887 return 0; 4888 4889 if (arg == NULL) { 4890 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4891 filter_op); 4892 return -EINVAL; 4893 } 4894 4895 switch (filter_op) { 4896 case RTE_ETH_FILTER_ADD: 4897 ret = igb_add_del_ethertype_filter(dev, 4898 (struct rte_eth_ethertype_filter *)arg, 4899 TRUE); 4900 break; 4901 case RTE_ETH_FILTER_DELETE: 4902 ret = igb_add_del_ethertype_filter(dev, 4903 (struct rte_eth_ethertype_filter *)arg, 4904 FALSE); 4905 break; 4906 case RTE_ETH_FILTER_GET: 4907 ret = igb_get_ethertype_filter(dev, 4908 (struct rte_eth_ethertype_filter *)arg); 4909 break; 4910 default: 4911 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4912 ret = -EINVAL; 4913 break; 4914 } 4915 return ret; 4916 } 4917 4918 static int 4919 eth_igb_filter_ctrl(struct rte_eth_dev *dev, 4920 enum rte_filter_type filter_type, 4921 enum rte_filter_op filter_op, 4922 void *arg) 4923 { 4924 int ret = 0; 4925 4926 switch (filter_type) { 4927 case RTE_ETH_FILTER_NTUPLE: 4928 ret = igb_ntuple_filter_handle(dev, filter_op, arg); 4929 break; 4930 case RTE_ETH_FILTER_ETHERTYPE: 4931 ret = igb_ethertype_filter_handle(dev, filter_op, arg); 4932 break; 4933 case RTE_ETH_FILTER_SYN: 4934 ret = eth_igb_syn_filter_handle(dev, filter_op, arg); 4935 break; 4936 case RTE_ETH_FILTER_FLEXIBLE: 4937 ret = eth_igb_flex_filter_handle(dev, filter_op, arg); 4938 break; 4939 case RTE_ETH_FILTER_GENERIC: 4940 if (filter_op != RTE_ETH_FILTER_GET) 4941 return -EINVAL; 4942 *(const void **)arg = &igb_flow_ops; 4943 break; 4944 default: 4945 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 4946 filter_type); 4947 break; 4948 } 4949 4950 return ret; 4951 } 4952 4953 static int 4954 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 4955 struct rte_ether_addr *mc_addr_set, 4956 uint32_t nb_mc_addr) 4957 { 4958 struct e1000_hw *hw; 4959 4960 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4961 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 4962 return 0; 4963 } 4964 4965 static uint64_t 4966 igb_read_systime_cyclecounter(struct rte_eth_dev *dev) 4967 { 4968 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4969 uint64_t systime_cycles; 4970 4971 switch (hw->mac.type) { 4972 case e1000_i210: 4973 case e1000_i211: 4974 /* 4975 * Need to read System Time Residue Register to be able 4976 * to read the other two registers. 4977 */ 4978 E1000_READ_REG(hw, E1000_SYSTIMR); 4979 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 4980 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4981 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4982 * NSEC_PER_SEC; 4983 break; 4984 case e1000_82580: 4985 case e1000_i350: 4986 case e1000_i354: 4987 /* 4988 * Need to read System Time Residue Register to be able 4989 * to read the other two registers. 4990 */ 4991 E1000_READ_REG(hw, E1000_SYSTIMR); 4992 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4993 /* Only the 8 LSB are valid. */ 4994 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) 4995 & 0xff) << 32; 4996 break; 4997 default: 4998 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4999 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 5000 << 32; 5001 break; 5002 } 5003 5004 return systime_cycles; 5005 } 5006 5007 static uint64_t 5008 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 5009 { 5010 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5011 uint64_t rx_tstamp_cycles; 5012 5013 switch (hw->mac.type) { 5014 case e1000_i210: 5015 case e1000_i211: 5016 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 5017 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 5018 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 5019 * NSEC_PER_SEC; 5020 break; 5021 case e1000_82580: 5022 case e1000_i350: 5023 case e1000_i354: 5024 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 5025 /* Only the 8 LSB are valid. */ 5026 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) 5027 & 0xff) << 32; 5028 break; 5029 default: 5030 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 5031 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 5032 << 32; 5033 break; 5034 } 5035 5036 return rx_tstamp_cycles; 5037 } 5038 5039 static uint64_t 5040 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 5041 { 5042 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5043 uint64_t tx_tstamp_cycles; 5044 5045 switch (hw->mac.type) { 5046 case e1000_i210: 5047 case e1000_i211: 5048 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 5049 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 5050 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 5051 * NSEC_PER_SEC; 5052 break; 5053 case e1000_82580: 5054 case e1000_i350: 5055 case e1000_i354: 5056 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 5057 /* Only the 8 LSB are valid. */ 5058 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) 5059 & 0xff) << 32; 5060 break; 5061 default: 5062 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 5063 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 5064 << 32; 5065 break; 5066 } 5067 5068 return tx_tstamp_cycles; 5069 } 5070 5071 static void 5072 igb_start_timecounters(struct rte_eth_dev *dev) 5073 { 5074 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5075 struct e1000_adapter *adapter = dev->data->dev_private; 5076 uint32_t incval = 1; 5077 uint32_t shift = 0; 5078 uint64_t mask = E1000_CYCLECOUNTER_MASK; 5079 5080 switch (hw->mac.type) { 5081 case e1000_82580: 5082 case e1000_i350: 5083 case e1000_i354: 5084 /* 32 LSB bits + 8 MSB bits = 40 bits */ 5085 mask = (1ULL << 40) - 1; 5086 /* fall-through */ 5087 case e1000_i210: 5088 case e1000_i211: 5089 /* 5090 * Start incrementing the register 5091 * used to timestamp PTP packets. 5092 */ 5093 E1000_WRITE_REG(hw, E1000_TIMINCA, incval); 5094 break; 5095 case e1000_82576: 5096 incval = E1000_INCVALUE_82576; 5097 shift = IGB_82576_TSYNC_SHIFT; 5098 E1000_WRITE_REG(hw, E1000_TIMINCA, 5099 E1000_INCPERIOD_82576 | incval); 5100 break; 5101 default: 5102 /* Not supported */ 5103 return; 5104 } 5105 5106 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 5107 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5108 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5109 5110 adapter->systime_tc.cc_mask = mask; 5111 adapter->systime_tc.cc_shift = shift; 5112 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 5113 5114 adapter->rx_tstamp_tc.cc_mask = mask; 5115 adapter->rx_tstamp_tc.cc_shift = shift; 5116 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 5117 5118 adapter->tx_tstamp_tc.cc_mask = mask; 5119 adapter->tx_tstamp_tc.cc_shift = shift; 5120 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 5121 } 5122 5123 static int 5124 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 5125 { 5126 struct e1000_adapter *adapter = dev->data->dev_private; 5127 5128 adapter->systime_tc.nsec += delta; 5129 adapter->rx_tstamp_tc.nsec += delta; 5130 adapter->tx_tstamp_tc.nsec += delta; 5131 5132 return 0; 5133 } 5134 5135 static int 5136 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 5137 { 5138 uint64_t ns; 5139 struct e1000_adapter *adapter = dev->data->dev_private; 5140 5141 ns = rte_timespec_to_ns(ts); 5142 5143 /* Set the timecounters to a new value. */ 5144 adapter->systime_tc.nsec = ns; 5145 adapter->rx_tstamp_tc.nsec = ns; 5146 adapter->tx_tstamp_tc.nsec = ns; 5147 5148 return 0; 5149 } 5150 5151 static int 5152 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 5153 { 5154 uint64_t ns, systime_cycles; 5155 struct e1000_adapter *adapter = dev->data->dev_private; 5156 5157 systime_cycles = igb_read_systime_cyclecounter(dev); 5158 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 5159 *ts = rte_ns_to_timespec(ns); 5160 5161 return 0; 5162 } 5163 5164 static int 5165 igb_timesync_enable(struct rte_eth_dev *dev) 5166 { 5167 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5168 uint32_t tsync_ctl; 5169 uint32_t tsauxc; 5170 5171 /* Stop the timesync system time. */ 5172 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); 5173 /* Reset the timesync system time value. */ 5174 switch (hw->mac.type) { 5175 case e1000_82580: 5176 case e1000_i350: 5177 case e1000_i354: 5178 case e1000_i210: 5179 case e1000_i211: 5180 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); 5181 /* fall-through */ 5182 case e1000_82576: 5183 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); 5184 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); 5185 break; 5186 default: 5187 /* Not supported. */ 5188 return -ENOTSUP; 5189 } 5190 5191 /* Enable system time for it isn't on by default. */ 5192 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); 5193 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; 5194 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); 5195 5196 igb_start_timecounters(dev); 5197 5198 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 5199 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 5200 (RTE_ETHER_TYPE_1588 | 5201 E1000_ETQF_FILTER_ENABLE | 5202 E1000_ETQF_1588)); 5203 5204 /* Enable timestamping of received PTP packets. */ 5205 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5206 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; 5207 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 5208 5209 /* Enable Timestamping of transmitted PTP packets. */ 5210 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5211 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; 5212 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 5213 5214 return 0; 5215 } 5216 5217 static int 5218 igb_timesync_disable(struct rte_eth_dev *dev) 5219 { 5220 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5221 uint32_t tsync_ctl; 5222 5223 /* Disable timestamping of transmitted PTP packets. */ 5224 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5225 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; 5226 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 5227 5228 /* Disable timestamping of received PTP packets. */ 5229 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5230 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; 5231 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 5232 5233 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 5234 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); 5235 5236 /* Stop incrementating the System Time registers. */ 5237 E1000_WRITE_REG(hw, E1000_TIMINCA, 0); 5238 5239 return 0; 5240 } 5241 5242 static int 5243 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 5244 struct timespec *timestamp, 5245 uint32_t flags __rte_unused) 5246 { 5247 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5248 struct e1000_adapter *adapter = dev->data->dev_private; 5249 uint32_t tsync_rxctl; 5250 uint64_t rx_tstamp_cycles; 5251 uint64_t ns; 5252 5253 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5254 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) 5255 return -EINVAL; 5256 5257 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); 5258 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 5259 *timestamp = rte_ns_to_timespec(ns); 5260 5261 return 0; 5262 } 5263 5264 static int 5265 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 5266 struct timespec *timestamp) 5267 { 5268 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5269 struct e1000_adapter *adapter = dev->data->dev_private; 5270 uint32_t tsync_txctl; 5271 uint64_t tx_tstamp_cycles; 5272 uint64_t ns; 5273 5274 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5275 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) 5276 return -EINVAL; 5277 5278 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); 5279 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 5280 *timestamp = rte_ns_to_timespec(ns); 5281 5282 return 0; 5283 } 5284 5285 static int 5286 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) 5287 { 5288 int count = 0; 5289 int g_ind = 0; 5290 const struct reg_info *reg_group; 5291 5292 while ((reg_group = igb_regs[g_ind++])) 5293 count += igb_reg_group_count(reg_group); 5294 5295 return count; 5296 } 5297 5298 static int 5299 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 5300 { 5301 int count = 0; 5302 int g_ind = 0; 5303 const struct reg_info *reg_group; 5304 5305 while ((reg_group = igbvf_regs[g_ind++])) 5306 count += igb_reg_group_count(reg_group); 5307 5308 return count; 5309 } 5310 5311 static int 5312 eth_igb_get_regs(struct rte_eth_dev *dev, 5313 struct rte_dev_reg_info *regs) 5314 { 5315 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5316 uint32_t *data = regs->data; 5317 int g_ind = 0; 5318 int count = 0; 5319 const struct reg_info *reg_group; 5320 5321 if (data == NULL) { 5322 regs->length = eth_igb_get_reg_length(dev); 5323 regs->width = sizeof(uint32_t); 5324 return 0; 5325 } 5326 5327 /* Support only full register dump */ 5328 if ((regs->length == 0) || 5329 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { 5330 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 5331 hw->device_id; 5332 while ((reg_group = igb_regs[g_ind++])) 5333 count += igb_read_regs_group(dev, &data[count], 5334 reg_group); 5335 return 0; 5336 } 5337 5338 return -ENOTSUP; 5339 } 5340 5341 static int 5342 igbvf_get_regs(struct rte_eth_dev *dev, 5343 struct rte_dev_reg_info *regs) 5344 { 5345 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5346 uint32_t *data = regs->data; 5347 int g_ind = 0; 5348 int count = 0; 5349 const struct reg_info *reg_group; 5350 5351 if (data == NULL) { 5352 regs->length = igbvf_get_reg_length(dev); 5353 regs->width = sizeof(uint32_t); 5354 return 0; 5355 } 5356 5357 /* Support only full register dump */ 5358 if ((regs->length == 0) || 5359 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { 5360 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 5361 hw->device_id; 5362 while ((reg_group = igbvf_regs[g_ind++])) 5363 count += igb_read_regs_group(dev, &data[count], 5364 reg_group); 5365 return 0; 5366 } 5367 5368 return -ENOTSUP; 5369 } 5370 5371 static int 5372 eth_igb_get_eeprom_length(struct rte_eth_dev *dev) 5373 { 5374 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5375 5376 /* Return unit is byte count */ 5377 return hw->nvm.word_size * 2; 5378 } 5379 5380 static int 5381 eth_igb_get_eeprom(struct rte_eth_dev *dev, 5382 struct rte_dev_eeprom_info *in_eeprom) 5383 { 5384 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5385 struct e1000_nvm_info *nvm = &hw->nvm; 5386 uint16_t *data = in_eeprom->data; 5387 int first, length; 5388 5389 first = in_eeprom->offset >> 1; 5390 length = in_eeprom->length >> 1; 5391 if ((first >= hw->nvm.word_size) || 5392 ((first + length) >= hw->nvm.word_size)) 5393 return -EINVAL; 5394 5395 in_eeprom->magic = hw->vendor_id | 5396 ((uint32_t)hw->device_id << 16); 5397 5398 if ((nvm->ops.read) == NULL) 5399 return -ENOTSUP; 5400 5401 return nvm->ops.read(hw, first, length, data); 5402 } 5403 5404 static int 5405 eth_igb_set_eeprom(struct rte_eth_dev *dev, 5406 struct rte_dev_eeprom_info *in_eeprom) 5407 { 5408 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5409 struct e1000_nvm_info *nvm = &hw->nvm; 5410 uint16_t *data = in_eeprom->data; 5411 int first, length; 5412 5413 first = in_eeprom->offset >> 1; 5414 length = in_eeprom->length >> 1; 5415 if ((first >= hw->nvm.word_size) || 5416 ((first + length) >= hw->nvm.word_size)) 5417 return -EINVAL; 5418 5419 in_eeprom->magic = (uint32_t)hw->vendor_id | 5420 ((uint32_t)hw->device_id << 16); 5421 5422 if ((nvm->ops.write) == NULL) 5423 return -ENOTSUP; 5424 return nvm->ops.write(hw, first, length, data); 5425 } 5426 5427 static int 5428 eth_igb_get_module_info(struct rte_eth_dev *dev, 5429 struct rte_eth_dev_module_info *modinfo) 5430 { 5431 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5432 5433 uint32_t status = 0; 5434 uint16_t sff8472_rev, addr_mode; 5435 bool page_swap = false; 5436 5437 if (hw->phy.media_type == e1000_media_type_copper || 5438 hw->phy.media_type == e1000_media_type_unknown) 5439 return -EOPNOTSUPP; 5440 5441 /* Check whether we support SFF-8472 or not */ 5442 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 5443 if (status) 5444 return -EIO; 5445 5446 /* addressing mode is not supported */ 5447 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 5448 if (status) 5449 return -EIO; 5450 5451 /* addressing mode is not supported */ 5452 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { 5453 PMD_DRV_LOG(ERR, 5454 "Address change required to access page 0xA2, " 5455 "but not supported. Please report the module " 5456 "type to the driver maintainers.\n"); 5457 page_swap = true; 5458 } 5459 5460 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { 5461 /* We have an SFP, but it does not support SFF-8472 */ 5462 modinfo->type = RTE_ETH_MODULE_SFF_8079; 5463 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 5464 } else { 5465 /* We have an SFP which supports a revision of SFF-8472 */ 5466 modinfo->type = RTE_ETH_MODULE_SFF_8472; 5467 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 5468 } 5469 5470 return 0; 5471 } 5472 5473 static int 5474 eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 5475 struct rte_dev_eeprom_info *info) 5476 { 5477 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5478 5479 uint32_t status = 0; 5480 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; 5481 u16 first_word, last_word; 5482 int i = 0; 5483 5484 if (info->length == 0) 5485 return -EINVAL; 5486 5487 first_word = info->offset >> 1; 5488 last_word = (info->offset + info->length - 1) >> 1; 5489 5490 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 5491 for (i = 0; i < last_word - first_word + 1; i++) { 5492 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, 5493 &dataword[i]); 5494 if (status) { 5495 /* Error occurred while reading module */ 5496 return -EIO; 5497 } 5498 5499 dataword[i] = rte_be_to_cpu_16(dataword[i]); 5500 } 5501 5502 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); 5503 5504 return 0; 5505 } 5506 5507 static int 5508 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5509 { 5510 struct e1000_hw *hw = 5511 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5512 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5513 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5514 uint32_t vec = E1000_MISC_VEC_ID; 5515 5516 if (rte_intr_allow_others(intr_handle)) 5517 vec = E1000_RX_VEC_START; 5518 5519 uint32_t mask = 1 << (queue_id + vec); 5520 5521 E1000_WRITE_REG(hw, E1000_EIMC, mask); 5522 E1000_WRITE_FLUSH(hw); 5523 5524 return 0; 5525 } 5526 5527 static int 5528 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5529 { 5530 struct e1000_hw *hw = 5531 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5532 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5533 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5534 uint32_t vec = E1000_MISC_VEC_ID; 5535 5536 if (rte_intr_allow_others(intr_handle)) 5537 vec = E1000_RX_VEC_START; 5538 5539 uint32_t mask = 1 << (queue_id + vec); 5540 uint32_t regval; 5541 5542 regval = E1000_READ_REG(hw, E1000_EIMS); 5543 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 5544 E1000_WRITE_FLUSH(hw); 5545 5546 rte_intr_ack(intr_handle); 5547 5548 return 0; 5549 } 5550 5551 static void 5552 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 5553 uint8_t index, uint8_t offset) 5554 { 5555 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 5556 5557 /* clear bits */ 5558 val &= ~((uint32_t)0xFF << offset); 5559 5560 /* write vector and valid bit */ 5561 val |= (msix_vector | E1000_IVAR_VALID) << offset; 5562 5563 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); 5564 } 5565 5566 static void 5567 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 5568 uint8_t queue, uint8_t msix_vector) 5569 { 5570 uint32_t tmp = 0; 5571 5572 if (hw->mac.type == e1000_82575) { 5573 if (direction == 0) 5574 tmp = E1000_EICR_RX_QUEUE0 << queue; 5575 else if (direction == 1) 5576 tmp = E1000_EICR_TX_QUEUE0 << queue; 5577 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); 5578 } else if (hw->mac.type == e1000_82576) { 5579 if ((direction == 0) || (direction == 1)) 5580 eth_igb_write_ivar(hw, msix_vector, queue & 0x7, 5581 ((queue & 0x8) << 1) + 5582 8 * direction); 5583 } else if ((hw->mac.type == e1000_82580) || 5584 (hw->mac.type == e1000_i350) || 5585 (hw->mac.type == e1000_i354) || 5586 (hw->mac.type == e1000_i210) || 5587 (hw->mac.type == e1000_i211)) { 5588 if ((direction == 0) || (direction == 1)) 5589 eth_igb_write_ivar(hw, msix_vector, 5590 queue >> 1, 5591 ((queue & 0x1) << 4) + 5592 8 * direction); 5593 } 5594 } 5595 5596 /* Sets up the hardware to generate MSI-X interrupts properly 5597 * @hw 5598 * board private structure 5599 */ 5600 static void 5601 eth_igb_configure_msix_intr(struct rte_eth_dev *dev) 5602 { 5603 int queue_id; 5604 uint32_t tmpval, regval, intr_mask; 5605 struct e1000_hw *hw = 5606 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5607 uint32_t vec = E1000_MISC_VEC_ID; 5608 uint32_t base = E1000_MISC_VEC_ID; 5609 uint32_t misc_shift = 0; 5610 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5611 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5612 5613 /* won't configure msix register if no mapping is done 5614 * between intr vector and event fd 5615 */ 5616 if (!rte_intr_dp_is_en(intr_handle)) 5617 return; 5618 5619 if (rte_intr_allow_others(intr_handle)) { 5620 vec = base = E1000_RX_VEC_START; 5621 misc_shift = 1; 5622 } 5623 5624 /* set interrupt vector for other causes */ 5625 if (hw->mac.type == e1000_82575) { 5626 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); 5627 /* enable MSI-X PBA support */ 5628 tmpval |= E1000_CTRL_EXT_PBA_CLR; 5629 5630 /* Auto-Mask interrupts upon ICR read */ 5631 tmpval |= E1000_CTRL_EXT_EIAME; 5632 tmpval |= E1000_CTRL_EXT_IRCA; 5633 5634 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); 5635 5636 /* enable msix_other interrupt */ 5637 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); 5638 regval = E1000_READ_REG(hw, E1000_EIAC); 5639 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); 5640 regval = E1000_READ_REG(hw, E1000_EIAM); 5641 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); 5642 } else if ((hw->mac.type == e1000_82576) || 5643 (hw->mac.type == e1000_82580) || 5644 (hw->mac.type == e1000_i350) || 5645 (hw->mac.type == e1000_i354) || 5646 (hw->mac.type == e1000_i210) || 5647 (hw->mac.type == e1000_i211)) { 5648 /* turn on MSI-X capability first */ 5649 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | 5650 E1000_GPIE_PBA | E1000_GPIE_EIAME | 5651 E1000_GPIE_NSICR); 5652 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5653 misc_shift; 5654 5655 if (dev->data->dev_conf.intr_conf.lsc != 0) 5656 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); 5657 5658 regval = E1000_READ_REG(hw, E1000_EIAC); 5659 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); 5660 5661 /* enable msix_other interrupt */ 5662 regval = E1000_READ_REG(hw, E1000_EIMS); 5663 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); 5664 tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8; 5665 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); 5666 } 5667 5668 /* use EIAM to auto-mask when MSI-X interrupt 5669 * is asserted, this saves a register write for every interrupt 5670 */ 5671 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5672 misc_shift; 5673 5674 if (dev->data->dev_conf.intr_conf.lsc != 0) 5675 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); 5676 5677 regval = E1000_READ_REG(hw, E1000_EIAM); 5678 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); 5679 5680 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { 5681 eth_igb_assign_msix_vector(hw, 0, queue_id, vec); 5682 intr_handle->intr_vec[queue_id] = vec; 5683 if (vec < base + intr_handle->nb_efd - 1) 5684 vec++; 5685 } 5686 5687 E1000_WRITE_FLUSH(hw); 5688 } 5689 5690 /* restore n-tuple filter */ 5691 static inline void 5692 igb_ntuple_filter_restore(struct rte_eth_dev *dev) 5693 { 5694 struct e1000_filter_info *filter_info = 5695 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5696 struct e1000_5tuple_filter *p_5tuple; 5697 struct e1000_2tuple_filter *p_2tuple; 5698 5699 TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { 5700 igb_inject_5tuple_filter_82576(dev, p_5tuple); 5701 } 5702 5703 TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { 5704 igb_inject_2uple_filter(dev, p_2tuple); 5705 } 5706 } 5707 5708 /* restore SYN filter */ 5709 static inline void 5710 igb_syn_filter_restore(struct rte_eth_dev *dev) 5711 { 5712 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5713 struct e1000_filter_info *filter_info = 5714 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5715 uint32_t synqf; 5716 5717 synqf = filter_info->syn_info; 5718 5719 if (synqf & E1000_SYN_FILTER_ENABLE) { 5720 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 5721 E1000_WRITE_FLUSH(hw); 5722 } 5723 } 5724 5725 /* restore ethernet type filter */ 5726 static inline void 5727 igb_ethertype_filter_restore(struct rte_eth_dev *dev) 5728 { 5729 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5730 struct e1000_filter_info *filter_info = 5731 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5732 int i; 5733 5734 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 5735 if (filter_info->ethertype_mask & (1 << i)) { 5736 E1000_WRITE_REG(hw, E1000_ETQF(i), 5737 filter_info->ethertype_filters[i].etqf); 5738 E1000_WRITE_FLUSH(hw); 5739 } 5740 } 5741 } 5742 5743 /* restore flex byte filter */ 5744 static inline void 5745 igb_flex_filter_restore(struct rte_eth_dev *dev) 5746 { 5747 struct e1000_filter_info *filter_info = 5748 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5749 struct e1000_flex_filter *flex_filter; 5750 5751 TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) { 5752 igb_inject_flex_filter(dev, flex_filter); 5753 } 5754 } 5755 5756 /* restore rss filter */ 5757 static inline void 5758 igb_rss_filter_restore(struct rte_eth_dev *dev) 5759 { 5760 struct e1000_filter_info *filter_info = 5761 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5762 5763 if (filter_info->rss_info.conf.queue_num) 5764 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); 5765 } 5766 5767 /* restore all types filter */ 5768 static int 5769 igb_filter_restore(struct rte_eth_dev *dev) 5770 { 5771 igb_ntuple_filter_restore(dev); 5772 igb_ethertype_filter_restore(dev); 5773 igb_syn_filter_restore(dev); 5774 igb_flex_filter_restore(dev); 5775 igb_rss_filter_restore(dev); 5776 5777 return 0; 5778 } 5779 5780 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); 5781 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); 5782 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); 5783 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); 5784 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); 5785 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); 5786 5787 /* see e1000_logs.c */ 5788 RTE_INIT(e1000_init_log) 5789 { 5790 e1000_igb_init_log(); 5791 } 5792