1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdarg.h> 10 11 #include <rte_common.h> 12 #include <rte_interrupts.h> 13 #include <rte_byteorder.h> 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <rte_pci.h> 17 #include <rte_bus_pci.h> 18 #include <rte_ether.h> 19 #include <rte_ethdev_driver.h> 20 #include <rte_ethdev_pci.h> 21 #include <rte_memory.h> 22 #include <rte_eal.h> 23 #include <rte_malloc.h> 24 #include <rte_dev.h> 25 26 #include "e1000_logs.h" 27 #include "base/e1000_api.h" 28 #include "e1000_ethdev.h" 29 #include "igb_regs.h" 30 31 /* 32 * Default values for port configuration 33 */ 34 #define IGB_DEFAULT_RX_FREE_THRESH 32 35 36 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) 37 #define IGB_DEFAULT_RX_HTHRESH 8 38 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) 39 40 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) 41 #define IGB_DEFAULT_TX_HTHRESH 1 42 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) 43 44 /* Bit shift and mask */ 45 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) 46 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) 47 #define IGB_8_BIT_WIDTH CHAR_BIT 48 #define IGB_8_BIT_MASK UINT8_MAX 49 50 /* Additional timesync values. */ 51 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL 52 #define E1000_ETQF_FILTER_1588 3 53 #define IGB_82576_TSYNC_SHIFT 16 54 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 55 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 56 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 57 58 #define E1000_VTIVAR_MISC 0x01740 59 #define E1000_VTIVAR_MISC_MASK 0xFF 60 #define E1000_VTIVAR_VALID 0x80 61 #define E1000_VTIVAR_MISC_MAILBOX 0 62 #define E1000_VTIVAR_MISC_INTR_MASK 0x3 63 64 /* External VLAN Enable bit mask */ 65 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26) 66 67 /* External VLAN Ether Type bit mask and shift */ 68 #define E1000_VET_VET_EXT 0xFFFF0000 69 #define E1000_VET_VET_EXT_SHIFT 16 70 71 static int eth_igb_configure(struct rte_eth_dev *dev); 72 static int eth_igb_start(struct rte_eth_dev *dev); 73 static void eth_igb_stop(struct rte_eth_dev *dev); 74 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); 75 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); 76 static void eth_igb_close(struct rte_eth_dev *dev); 77 static int eth_igb_reset(struct rte_eth_dev *dev); 78 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); 79 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); 80 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev); 81 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev); 82 static int eth_igb_link_update(struct rte_eth_dev *dev, 83 int wait_to_complete); 84 static int eth_igb_stats_get(struct rte_eth_dev *dev, 85 struct rte_eth_stats *rte_stats); 86 static int eth_igb_xstats_get(struct rte_eth_dev *dev, 87 struct rte_eth_xstat *xstats, unsigned n); 88 static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, 89 const uint64_t *ids, 90 uint64_t *values, unsigned int n); 91 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, 92 struct rte_eth_xstat_name *xstats_names, 93 unsigned int size); 94 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 95 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 96 unsigned int limit); 97 static void eth_igb_stats_reset(struct rte_eth_dev *dev); 98 static void eth_igb_xstats_reset(struct rte_eth_dev *dev); 99 static int eth_igb_fw_version_get(struct rte_eth_dev *dev, 100 char *fw_version, size_t fw_size); 101 static void eth_igb_infos_get(struct rte_eth_dev *dev, 102 struct rte_eth_dev_info *dev_info); 103 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); 104 static void eth_igbvf_infos_get(struct rte_eth_dev *dev, 105 struct rte_eth_dev_info *dev_info); 106 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, 107 struct rte_eth_fc_conf *fc_conf); 108 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, 109 struct rte_eth_fc_conf *fc_conf); 110 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 111 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); 112 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); 113 static int eth_igb_interrupt_action(struct rte_eth_dev *dev, 114 struct rte_intr_handle *handle); 115 static void eth_igb_interrupt_handler(void *param); 116 static int igb_hardware_init(struct e1000_hw *hw); 117 static void igb_hw_control_acquire(struct e1000_hw *hw); 118 static void igb_hw_control_release(struct e1000_hw *hw); 119 static void igb_init_manageability(struct e1000_hw *hw); 120 static void igb_release_manageability(struct e1000_hw *hw); 121 122 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 123 124 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, 125 uint16_t vlan_id, int on); 126 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 127 enum rte_vlan_type vlan_type, 128 uint16_t tpid_id); 129 static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); 130 131 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); 132 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); 133 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); 134 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); 135 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); 136 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); 137 138 static int eth_igb_led_on(struct rte_eth_dev *dev); 139 static int eth_igb_led_off(struct rte_eth_dev *dev); 140 141 static void igb_intr_disable(struct e1000_hw *hw); 142 static int igb_get_rx_buffer_size(struct e1000_hw *hw); 143 static int eth_igb_rar_set(struct rte_eth_dev *dev, 144 struct ether_addr *mac_addr, 145 uint32_t index, uint32_t pool); 146 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); 147 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 148 struct ether_addr *addr); 149 150 static void igbvf_intr_disable(struct e1000_hw *hw); 151 static int igbvf_dev_configure(struct rte_eth_dev *dev); 152 static int igbvf_dev_start(struct rte_eth_dev *dev); 153 static void igbvf_dev_stop(struct rte_eth_dev *dev); 154 static void igbvf_dev_close(struct rte_eth_dev *dev); 155 static void igbvf_promiscuous_enable(struct rte_eth_dev *dev); 156 static void igbvf_promiscuous_disable(struct rte_eth_dev *dev); 157 static void igbvf_allmulticast_enable(struct rte_eth_dev *dev); 158 static void igbvf_allmulticast_disable(struct rte_eth_dev *dev); 159 static int eth_igbvf_link_update(struct e1000_hw *hw); 160 static int eth_igbvf_stats_get(struct rte_eth_dev *dev, 161 struct rte_eth_stats *rte_stats); 162 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, 165 struct rte_eth_xstat_name *xstats_names, 166 unsigned limit); 167 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); 168 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, 169 uint16_t vlan_id, int on); 170 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); 171 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); 172 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, 173 struct ether_addr *addr); 174 static int igbvf_get_reg_length(struct rte_eth_dev *dev); 175 static int igbvf_get_regs(struct rte_eth_dev *dev, 176 struct rte_dev_reg_info *regs); 177 178 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, 179 struct rte_eth_rss_reta_entry64 *reta_conf, 180 uint16_t reta_size); 181 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, 182 struct rte_eth_rss_reta_entry64 *reta_conf, 183 uint16_t reta_size); 184 185 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, 186 struct rte_eth_syn_filter *filter); 187 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 188 enum rte_filter_op filter_op, 189 void *arg); 190 static int igb_add_2tuple_filter(struct rte_eth_dev *dev, 191 struct rte_eth_ntuple_filter *ntuple_filter); 192 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, 193 struct rte_eth_ntuple_filter *ntuple_filter); 194 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, 195 struct rte_eth_flex_filter *filter); 196 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 197 enum rte_filter_op filter_op, 198 void *arg); 199 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 200 struct rte_eth_ntuple_filter *ntuple_filter); 201 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 202 struct rte_eth_ntuple_filter *ntuple_filter); 203 static int igb_get_ntuple_filter(struct rte_eth_dev *dev, 204 struct rte_eth_ntuple_filter *filter); 205 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, 206 enum rte_filter_op filter_op, 207 void *arg); 208 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, 209 enum rte_filter_op filter_op, 210 void *arg); 211 static int igb_get_ethertype_filter(struct rte_eth_dev *dev, 212 struct rte_eth_ethertype_filter *filter); 213 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, 214 enum rte_filter_type filter_type, 215 enum rte_filter_op filter_op, 216 void *arg); 217 static int eth_igb_get_reg_length(struct rte_eth_dev *dev); 218 static int eth_igb_get_regs(struct rte_eth_dev *dev, 219 struct rte_dev_reg_info *regs); 220 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); 221 static int eth_igb_get_eeprom(struct rte_eth_dev *dev, 222 struct rte_dev_eeprom_info *eeprom); 223 static int eth_igb_set_eeprom(struct rte_eth_dev *dev, 224 struct rte_dev_eeprom_info *eeprom); 225 static int eth_igb_get_module_info(struct rte_eth_dev *dev, 226 struct rte_eth_dev_module_info *modinfo); 227 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 228 struct rte_dev_eeprom_info *info); 229 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 230 struct ether_addr *mc_addr_set, 231 uint32_t nb_mc_addr); 232 static int igb_timesync_enable(struct rte_eth_dev *dev); 233 static int igb_timesync_disable(struct rte_eth_dev *dev); 234 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 235 struct timespec *timestamp, 236 uint32_t flags); 237 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 238 struct timespec *timestamp); 239 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 240 static int igb_timesync_read_time(struct rte_eth_dev *dev, 241 struct timespec *timestamp); 242 static int igb_timesync_write_time(struct rte_eth_dev *dev, 243 const struct timespec *timestamp); 244 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, 245 uint16_t queue_id); 246 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, 247 uint16_t queue_id); 248 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 249 uint8_t queue, uint8_t msix_vector); 250 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 251 uint8_t index, uint8_t offset); 252 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); 253 static void eth_igbvf_interrupt_handler(void *param); 254 static void igbvf_mbx_process(struct rte_eth_dev *dev); 255 static int igb_filter_restore(struct rte_eth_dev *dev); 256 257 /* 258 * Define VF Stats MACRO for Non "cleared on read" register 259 */ 260 #define UPDATE_VF_STAT(reg, last, cur) \ 261 { \ 262 u32 latest = E1000_READ_REG(hw, reg); \ 263 cur += (latest - last) & UINT_MAX; \ 264 last = latest; \ 265 } 266 267 #define IGB_FC_PAUSE_TIME 0x0680 268 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 269 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 270 271 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ 272 273 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; 274 275 /* 276 * The set of PCI devices this driver supports 277 */ 278 static const struct rte_pci_id pci_id_igb_map[] = { 279 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) }, 280 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) }, 281 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) }, 282 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) }, 283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, 284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) }, 285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) }, 286 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) }, 287 288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) }, 289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) }, 290 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) }, 291 292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) }, 293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) }, 294 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) }, 295 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) }, 296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) }, 297 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) }, 298 299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) }, 300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) }, 301 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) }, 302 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) }, 303 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) }, 304 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) }, 305 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) }, 306 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) }, 307 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) }, 308 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) }, 309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) }, 310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, 311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, 312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) }, 313 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 314 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) }, 315 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 316 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) }, 317 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) }, 318 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, 319 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) }, 320 { .vendor_id = 0, /* sentinel */ }, 321 }; 322 323 /* 324 * The set of PCI devices this driver supports (for 82576&I350 VF) 325 */ 326 static const struct rte_pci_id pci_id_igbvf_map[] = { 327 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) }, 328 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) }, 329 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) }, 330 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) }, 331 { .vendor_id = 0, /* sentinel */ }, 332 }; 333 334 static const struct rte_eth_desc_lim rx_desc_lim = { 335 .nb_max = E1000_MAX_RING_DESC, 336 .nb_min = E1000_MIN_RING_DESC, 337 .nb_align = IGB_RXD_ALIGN, 338 }; 339 340 static const struct rte_eth_desc_lim tx_desc_lim = { 341 .nb_max = E1000_MAX_RING_DESC, 342 .nb_min = E1000_MIN_RING_DESC, 343 .nb_align = IGB_RXD_ALIGN, 344 .nb_seg_max = IGB_TX_MAX_SEG, 345 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG, 346 }; 347 348 static const struct eth_dev_ops eth_igb_ops = { 349 .dev_configure = eth_igb_configure, 350 .dev_start = eth_igb_start, 351 .dev_stop = eth_igb_stop, 352 .dev_set_link_up = eth_igb_dev_set_link_up, 353 .dev_set_link_down = eth_igb_dev_set_link_down, 354 .dev_close = eth_igb_close, 355 .dev_reset = eth_igb_reset, 356 .promiscuous_enable = eth_igb_promiscuous_enable, 357 .promiscuous_disable = eth_igb_promiscuous_disable, 358 .allmulticast_enable = eth_igb_allmulticast_enable, 359 .allmulticast_disable = eth_igb_allmulticast_disable, 360 .link_update = eth_igb_link_update, 361 .stats_get = eth_igb_stats_get, 362 .xstats_get = eth_igb_xstats_get, 363 .xstats_get_by_id = eth_igb_xstats_get_by_id, 364 .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id, 365 .xstats_get_names = eth_igb_xstats_get_names, 366 .stats_reset = eth_igb_stats_reset, 367 .xstats_reset = eth_igb_xstats_reset, 368 .fw_version_get = eth_igb_fw_version_get, 369 .dev_infos_get = eth_igb_infos_get, 370 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 371 .mtu_set = eth_igb_mtu_set, 372 .vlan_filter_set = eth_igb_vlan_filter_set, 373 .vlan_tpid_set = eth_igb_vlan_tpid_set, 374 .vlan_offload_set = eth_igb_vlan_offload_set, 375 .rx_queue_setup = eth_igb_rx_queue_setup, 376 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, 377 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, 378 .rx_queue_release = eth_igb_rx_queue_release, 379 .rx_queue_count = eth_igb_rx_queue_count, 380 .rx_descriptor_done = eth_igb_rx_descriptor_done, 381 .rx_descriptor_status = eth_igb_rx_descriptor_status, 382 .tx_descriptor_status = eth_igb_tx_descriptor_status, 383 .tx_queue_setup = eth_igb_tx_queue_setup, 384 .tx_queue_release = eth_igb_tx_queue_release, 385 .tx_done_cleanup = eth_igb_tx_done_cleanup, 386 .dev_led_on = eth_igb_led_on, 387 .dev_led_off = eth_igb_led_off, 388 .flow_ctrl_get = eth_igb_flow_ctrl_get, 389 .flow_ctrl_set = eth_igb_flow_ctrl_set, 390 .mac_addr_add = eth_igb_rar_set, 391 .mac_addr_remove = eth_igb_rar_clear, 392 .mac_addr_set = eth_igb_default_mac_addr_set, 393 .reta_update = eth_igb_rss_reta_update, 394 .reta_query = eth_igb_rss_reta_query, 395 .rss_hash_update = eth_igb_rss_hash_update, 396 .rss_hash_conf_get = eth_igb_rss_hash_conf_get, 397 .filter_ctrl = eth_igb_filter_ctrl, 398 .set_mc_addr_list = eth_igb_set_mc_addr_list, 399 .rxq_info_get = igb_rxq_info_get, 400 .txq_info_get = igb_txq_info_get, 401 .timesync_enable = igb_timesync_enable, 402 .timesync_disable = igb_timesync_disable, 403 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, 404 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, 405 .get_reg = eth_igb_get_regs, 406 .get_eeprom_length = eth_igb_get_eeprom_length, 407 .get_eeprom = eth_igb_get_eeprom, 408 .set_eeprom = eth_igb_set_eeprom, 409 .get_module_info = eth_igb_get_module_info, 410 .get_module_eeprom = eth_igb_get_module_eeprom, 411 .timesync_adjust_time = igb_timesync_adjust_time, 412 .timesync_read_time = igb_timesync_read_time, 413 .timesync_write_time = igb_timesync_write_time, 414 }; 415 416 /* 417 * dev_ops for virtual function, bare necessities for basic vf 418 * operation have been implemented 419 */ 420 static const struct eth_dev_ops igbvf_eth_dev_ops = { 421 .dev_configure = igbvf_dev_configure, 422 .dev_start = igbvf_dev_start, 423 .dev_stop = igbvf_dev_stop, 424 .dev_close = igbvf_dev_close, 425 .promiscuous_enable = igbvf_promiscuous_enable, 426 .promiscuous_disable = igbvf_promiscuous_disable, 427 .allmulticast_enable = igbvf_allmulticast_enable, 428 .allmulticast_disable = igbvf_allmulticast_disable, 429 .link_update = eth_igb_link_update, 430 .stats_get = eth_igbvf_stats_get, 431 .xstats_get = eth_igbvf_xstats_get, 432 .xstats_get_names = eth_igbvf_xstats_get_names, 433 .stats_reset = eth_igbvf_stats_reset, 434 .xstats_reset = eth_igbvf_stats_reset, 435 .vlan_filter_set = igbvf_vlan_filter_set, 436 .dev_infos_get = eth_igbvf_infos_get, 437 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 438 .rx_queue_setup = eth_igb_rx_queue_setup, 439 .rx_queue_release = eth_igb_rx_queue_release, 440 .rx_descriptor_done = eth_igb_rx_descriptor_done, 441 .rx_descriptor_status = eth_igb_rx_descriptor_status, 442 .tx_descriptor_status = eth_igb_tx_descriptor_status, 443 .tx_queue_setup = eth_igb_tx_queue_setup, 444 .tx_queue_release = eth_igb_tx_queue_release, 445 .set_mc_addr_list = eth_igb_set_mc_addr_list, 446 .rxq_info_get = igb_rxq_info_get, 447 .txq_info_get = igb_txq_info_get, 448 .mac_addr_set = igbvf_default_mac_addr_set, 449 .get_reg = igbvf_get_regs, 450 }; 451 452 /* store statistics names and its offset in stats structure */ 453 struct rte_igb_xstats_name_off { 454 char name[RTE_ETH_XSTATS_NAME_SIZE]; 455 unsigned offset; 456 }; 457 458 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { 459 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, 460 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, 461 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, 462 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, 463 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, 464 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, 465 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, 466 ecol)}, 467 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, 468 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, 469 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, 470 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, 471 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, 472 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, 473 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, 474 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, 475 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, 476 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, 477 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, 478 fcruc)}, 479 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, 480 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, 481 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, 482 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, 483 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 484 prc1023)}, 485 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, 486 prc1522)}, 487 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, 488 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, 489 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, 490 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, 491 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, 492 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, 493 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, 494 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, 495 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, 496 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, 497 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, 498 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, 499 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, 500 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, 501 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, 502 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, 503 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, 504 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 505 ptc1023)}, 506 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, 507 ptc1522)}, 508 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, 509 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, 510 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, 511 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, 512 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, 513 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, 514 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, 515 516 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, 517 }; 518 519 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ 520 sizeof(rte_igb_stats_strings[0])) 521 522 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { 523 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, 524 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, 525 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, 526 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, 527 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, 528 }; 529 530 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ 531 sizeof(rte_igbvf_stats_strings[0])) 532 533 534 static inline void 535 igb_intr_enable(struct rte_eth_dev *dev) 536 { 537 struct e1000_interrupt *intr = 538 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 539 struct e1000_hw *hw = 540 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 541 542 E1000_WRITE_REG(hw, E1000_IMS, intr->mask); 543 E1000_WRITE_FLUSH(hw); 544 } 545 546 static void 547 igb_intr_disable(struct e1000_hw *hw) 548 { 549 E1000_WRITE_REG(hw, E1000_IMC, ~0); 550 E1000_WRITE_FLUSH(hw); 551 } 552 553 static inline void 554 igbvf_intr_enable(struct rte_eth_dev *dev) 555 { 556 struct e1000_hw *hw = 557 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 558 559 /* only for mailbox */ 560 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); 561 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); 562 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); 563 E1000_WRITE_FLUSH(hw); 564 } 565 566 /* only for mailbox now. If RX/TX needed, should extend this function. */ 567 static void 568 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) 569 { 570 uint32_t tmp = 0; 571 572 /* mailbox */ 573 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); 574 tmp |= E1000_VTIVAR_VALID; 575 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); 576 } 577 578 static void 579 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) 580 { 581 struct e1000_hw *hw = 582 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 583 584 /* Configure VF other cause ivar */ 585 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); 586 } 587 588 static inline int32_t 589 igb_pf_reset_hw(struct e1000_hw *hw) 590 { 591 uint32_t ctrl_ext; 592 int32_t status; 593 594 status = e1000_reset_hw(hw); 595 596 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 597 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 598 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 599 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 600 E1000_WRITE_FLUSH(hw); 601 602 return status; 603 } 604 605 static void 606 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 607 { 608 struct e1000_hw *hw = 609 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 610 611 612 hw->vendor_id = pci_dev->id.vendor_id; 613 hw->device_id = pci_dev->id.device_id; 614 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 615 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 616 617 e1000_set_mac_type(hw); 618 619 /* need to check if it is a vf device below */ 620 } 621 622 static int 623 igb_reset_swfw_lock(struct e1000_hw *hw) 624 { 625 int ret_val; 626 627 /* 628 * Do mac ops initialization manually here, since we will need 629 * some function pointers set by this call. 630 */ 631 ret_val = e1000_init_mac_params(hw); 632 if (ret_val) 633 return ret_val; 634 635 /* 636 * SMBI lock should not fail in this early stage. If this is the case, 637 * it is due to an improper exit of the application. 638 * So force the release of the faulty lock. 639 */ 640 if (e1000_get_hw_semaphore_generic(hw) < 0) { 641 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 642 } 643 e1000_put_hw_semaphore_generic(hw); 644 645 if (hw->mac.ops.acquire_swfw_sync != NULL) { 646 uint16_t mask; 647 648 /* 649 * Phy lock should not fail in this early stage. If this is the case, 650 * it is due to an improper exit of the application. 651 * So force the release of the faulty lock. 652 */ 653 mask = E1000_SWFW_PHY0_SM << hw->bus.func; 654 if (hw->bus.func > E1000_FUNC_1) 655 mask <<= 2; 656 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 657 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 658 hw->bus.func); 659 } 660 hw->mac.ops.release_swfw_sync(hw, mask); 661 662 /* 663 * This one is more tricky since it is common to all ports; but 664 * swfw_sync retries last long enough (1s) to be almost sure that if 665 * lock can not be taken it is due to an improper lock of the 666 * semaphore. 667 */ 668 mask = E1000_SWFW_EEP_SM; 669 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 670 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 671 } 672 hw->mac.ops.release_swfw_sync(hw, mask); 673 } 674 675 return E1000_SUCCESS; 676 } 677 678 /* Remove all ntuple filters of the device */ 679 static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 680 { 681 struct e1000_filter_info *filter_info = 682 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 683 struct e1000_5tuple_filter *p_5tuple; 684 struct e1000_2tuple_filter *p_2tuple; 685 686 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 687 TAILQ_REMOVE(&filter_info->fivetuple_list, 688 p_5tuple, entries); 689 rte_free(p_5tuple); 690 } 691 filter_info->fivetuple_mask = 0; 692 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { 693 TAILQ_REMOVE(&filter_info->twotuple_list, 694 p_2tuple, entries); 695 rte_free(p_2tuple); 696 } 697 filter_info->twotuple_mask = 0; 698 699 return 0; 700 } 701 702 /* Remove all flex filters of the device */ 703 static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev) 704 { 705 struct e1000_filter_info *filter_info = 706 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 707 struct e1000_flex_filter *p_flex; 708 709 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { 710 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); 711 rte_free(p_flex); 712 } 713 filter_info->flex_mask = 0; 714 715 return 0; 716 } 717 718 static int 719 eth_igb_dev_init(struct rte_eth_dev *eth_dev) 720 { 721 int error = 0; 722 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 723 struct e1000_hw *hw = 724 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 725 struct e1000_vfta * shadow_vfta = 726 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 727 struct e1000_filter_info *filter_info = 728 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 729 struct e1000_adapter *adapter = 730 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 731 732 uint32_t ctrl_ext; 733 734 eth_dev->dev_ops = ð_igb_ops; 735 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 736 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 737 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 738 739 /* for secondary processes, we don't initialise any further as primary 740 * has already done this work. Only check we don't need a different 741 * RX function */ 742 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 743 if (eth_dev->data->scattered_rx) 744 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 745 return 0; 746 } 747 748 rte_eth_copy_pci_info(eth_dev, pci_dev); 749 750 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; 751 752 igb_identify_hardware(eth_dev, pci_dev); 753 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { 754 error = -EIO; 755 goto err_late; 756 } 757 758 e1000_get_bus_info(hw); 759 760 /* Reset any pending lock */ 761 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { 762 error = -EIO; 763 goto err_late; 764 } 765 766 /* Finish initialization */ 767 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { 768 error = -EIO; 769 goto err_late; 770 } 771 772 hw->mac.autoneg = 1; 773 hw->phy.autoneg_wait_to_complete = 0; 774 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 775 776 /* Copper options */ 777 if (hw->phy.media_type == e1000_media_type_copper) { 778 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 779 hw->phy.disable_polarity_correction = 0; 780 hw->phy.ms_type = e1000_ms_hw_default; 781 } 782 783 /* 784 * Start from a known state, this is important in reading the nvm 785 * and mac from that. 786 */ 787 igb_pf_reset_hw(hw); 788 789 /* Make sure we have a good EEPROM before we read from it */ 790 if (e1000_validate_nvm_checksum(hw) < 0) { 791 /* 792 * Some PCI-E parts fail the first check due to 793 * the link being in sleep state, call it again, 794 * if it fails a second time its a real issue. 795 */ 796 if (e1000_validate_nvm_checksum(hw) < 0) { 797 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 798 error = -EIO; 799 goto err_late; 800 } 801 } 802 803 /* Read the permanent MAC address out of the EEPROM */ 804 if (e1000_read_mac_addr(hw) != 0) { 805 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 806 error = -EIO; 807 goto err_late; 808 } 809 810 /* Allocate memory for storing MAC addresses */ 811 eth_dev->data->mac_addrs = rte_zmalloc("e1000", 812 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 813 if (eth_dev->data->mac_addrs == NULL) { 814 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 815 "store MAC addresses", 816 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 817 error = -ENOMEM; 818 goto err_late; 819 } 820 821 /* Copy the permanent MAC address */ 822 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); 823 824 /* initialize the vfta */ 825 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 826 827 /* Now initialize the hardware */ 828 if (igb_hardware_init(hw) != 0) { 829 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 830 rte_free(eth_dev->data->mac_addrs); 831 eth_dev->data->mac_addrs = NULL; 832 error = -ENODEV; 833 goto err_late; 834 } 835 hw->mac.get_link_status = 1; 836 adapter->stopped = 0; 837 838 /* Indicate SOL/IDER usage */ 839 if (e1000_check_reset_block(hw) < 0) { 840 PMD_INIT_LOG(ERR, "PHY reset is blocked due to" 841 "SOL/IDER session"); 842 } 843 844 /* initialize PF if max_vfs not zero */ 845 igb_pf_host_init(eth_dev); 846 847 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 848 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 849 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 850 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 851 E1000_WRITE_FLUSH(hw); 852 853 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 854 eth_dev->data->port_id, pci_dev->id.vendor_id, 855 pci_dev->id.device_id); 856 857 rte_intr_callback_register(&pci_dev->intr_handle, 858 eth_igb_interrupt_handler, 859 (void *)eth_dev); 860 861 /* enable uio/vfio intr/eventfd mapping */ 862 rte_intr_enable(&pci_dev->intr_handle); 863 864 /* enable support intr */ 865 igb_intr_enable(eth_dev); 866 867 /* initialize filter info */ 868 memset(filter_info, 0, 869 sizeof(struct e1000_filter_info)); 870 871 TAILQ_INIT(&filter_info->flex_list); 872 TAILQ_INIT(&filter_info->twotuple_list); 873 TAILQ_INIT(&filter_info->fivetuple_list); 874 875 TAILQ_INIT(&igb_filter_ntuple_list); 876 TAILQ_INIT(&igb_filter_ethertype_list); 877 TAILQ_INIT(&igb_filter_syn_list); 878 TAILQ_INIT(&igb_filter_flex_list); 879 TAILQ_INIT(&igb_filter_rss_list); 880 TAILQ_INIT(&igb_flow_list); 881 882 return 0; 883 884 err_late: 885 igb_hw_control_release(hw); 886 887 return error; 888 } 889 890 static int 891 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) 892 { 893 struct rte_pci_device *pci_dev; 894 struct rte_intr_handle *intr_handle; 895 struct e1000_hw *hw; 896 struct e1000_adapter *adapter = 897 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 898 struct e1000_filter_info *filter_info = 899 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 900 901 PMD_INIT_FUNC_TRACE(); 902 903 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 904 return -EPERM; 905 906 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 907 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 908 intr_handle = &pci_dev->intr_handle; 909 910 if (adapter->stopped == 0) 911 eth_igb_close(eth_dev); 912 913 eth_dev->dev_ops = NULL; 914 eth_dev->rx_pkt_burst = NULL; 915 eth_dev->tx_pkt_burst = NULL; 916 917 /* Reset any pending lock */ 918 igb_reset_swfw_lock(hw); 919 920 /* uninitialize PF if max_vfs not zero */ 921 igb_pf_host_uninit(eth_dev); 922 923 /* disable uio intr before callback unregister */ 924 rte_intr_disable(intr_handle); 925 rte_intr_callback_unregister(intr_handle, 926 eth_igb_interrupt_handler, eth_dev); 927 928 /* clear the SYN filter info */ 929 filter_info->syn_info = 0; 930 931 /* clear the ethertype filters info */ 932 filter_info->ethertype_mask = 0; 933 memset(filter_info->ethertype_filters, 0, 934 E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); 935 936 /* clear the rss filter info */ 937 memset(&filter_info->rss_info, 0, 938 sizeof(struct igb_rte_flow_rss_conf)); 939 940 /* remove all ntuple filters of the device */ 941 igb_ntuple_filter_uninit(eth_dev); 942 943 /* remove all flex filters of the device */ 944 igb_flex_filter_uninit(eth_dev); 945 946 /* clear all the filters list */ 947 igb_filterlist_flush(eth_dev); 948 949 return 0; 950 } 951 952 /* 953 * Virtual Function device init 954 */ 955 static int 956 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) 957 { 958 struct rte_pci_device *pci_dev; 959 struct rte_intr_handle *intr_handle; 960 struct e1000_adapter *adapter = 961 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 962 struct e1000_hw *hw = 963 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 964 int diag; 965 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr; 966 967 PMD_INIT_FUNC_TRACE(); 968 969 eth_dev->dev_ops = &igbvf_eth_dev_ops; 970 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 971 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 972 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 973 974 /* for secondary processes, we don't initialise any further as primary 975 * has already done this work. Only check we don't need a different 976 * RX function */ 977 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 978 if (eth_dev->data->scattered_rx) 979 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 980 return 0; 981 } 982 983 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 984 rte_eth_copy_pci_info(eth_dev, pci_dev); 985 986 hw->device_id = pci_dev->id.device_id; 987 hw->vendor_id = pci_dev->id.vendor_id; 988 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 989 adapter->stopped = 0; 990 991 /* Initialize the shared code (base driver) */ 992 diag = e1000_setup_init_funcs(hw, TRUE); 993 if (diag != 0) { 994 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", 995 diag); 996 return -EIO; 997 } 998 999 /* init_mailbox_params */ 1000 hw->mbx.ops.init_params(hw); 1001 1002 /* Disable the interrupts for VF */ 1003 igbvf_intr_disable(hw); 1004 1005 diag = hw->mac.ops.reset_hw(hw); 1006 1007 /* Allocate memory for storing MAC addresses */ 1008 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN * 1009 hw->mac.rar_entry_count, 0); 1010 if (eth_dev->data->mac_addrs == NULL) { 1011 PMD_INIT_LOG(ERR, 1012 "Failed to allocate %d bytes needed to store MAC " 1013 "addresses", 1014 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1015 return -ENOMEM; 1016 } 1017 1018 /* Generate a random MAC address, if none was assigned by PF. */ 1019 if (is_zero_ether_addr(perm_addr)) { 1020 eth_random_addr(perm_addr->addr_bytes); 1021 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1022 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1023 "%02x:%02x:%02x:%02x:%02x:%02x", 1024 perm_addr->addr_bytes[0], 1025 perm_addr->addr_bytes[1], 1026 perm_addr->addr_bytes[2], 1027 perm_addr->addr_bytes[3], 1028 perm_addr->addr_bytes[4], 1029 perm_addr->addr_bytes[5]); 1030 } 1031 1032 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); 1033 if (diag) { 1034 rte_free(eth_dev->data->mac_addrs); 1035 eth_dev->data->mac_addrs = NULL; 1036 return diag; 1037 } 1038 /* Copy the permanent MAC address */ 1039 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, 1040 ð_dev->data->mac_addrs[0]); 1041 1042 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " 1043 "mac.type=%s", 1044 eth_dev->data->port_id, pci_dev->id.vendor_id, 1045 pci_dev->id.device_id, "igb_mac_82576_vf"); 1046 1047 intr_handle = &pci_dev->intr_handle; 1048 rte_intr_callback_register(intr_handle, 1049 eth_igbvf_interrupt_handler, eth_dev); 1050 1051 return 0; 1052 } 1053 1054 static int 1055 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) 1056 { 1057 struct e1000_adapter *adapter = 1058 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 1059 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1060 1061 PMD_INIT_FUNC_TRACE(); 1062 1063 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1064 return -EPERM; 1065 1066 if (adapter->stopped == 0) 1067 igbvf_dev_close(eth_dev); 1068 1069 eth_dev->dev_ops = NULL; 1070 eth_dev->rx_pkt_burst = NULL; 1071 eth_dev->tx_pkt_burst = NULL; 1072 1073 /* disable uio intr before callback unregister */ 1074 rte_intr_disable(&pci_dev->intr_handle); 1075 rte_intr_callback_unregister(&pci_dev->intr_handle, 1076 eth_igbvf_interrupt_handler, 1077 (void *)eth_dev); 1078 1079 return 0; 1080 } 1081 1082 static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1083 struct rte_pci_device *pci_dev) 1084 { 1085 return rte_eth_dev_pci_generic_probe(pci_dev, 1086 sizeof(struct e1000_adapter), eth_igb_dev_init); 1087 } 1088 1089 static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) 1090 { 1091 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit); 1092 } 1093 1094 static struct rte_pci_driver rte_igb_pmd = { 1095 .id_table = pci_id_igb_map, 1096 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 1097 RTE_PCI_DRV_IOVA_AS_VA, 1098 .probe = eth_igb_pci_probe, 1099 .remove = eth_igb_pci_remove, 1100 }; 1101 1102 1103 static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1104 struct rte_pci_device *pci_dev) 1105 { 1106 return rte_eth_dev_pci_generic_probe(pci_dev, 1107 sizeof(struct e1000_adapter), eth_igbvf_dev_init); 1108 } 1109 1110 static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) 1111 { 1112 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit); 1113 } 1114 1115 /* 1116 * virtual function driver struct 1117 */ 1118 static struct rte_pci_driver rte_igbvf_pmd = { 1119 .id_table = pci_id_igbvf_map, 1120 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, 1121 .probe = eth_igbvf_pci_probe, 1122 .remove = eth_igbvf_pci_remove, 1123 }; 1124 1125 static void 1126 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1127 { 1128 struct e1000_hw *hw = 1129 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1130 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ 1131 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 1132 rctl |= E1000_RCTL_VFE; 1133 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1134 } 1135 1136 static int 1137 igb_check_mq_mode(struct rte_eth_dev *dev) 1138 { 1139 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1140 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1141 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1142 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1143 1144 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || 1145 tx_mq_mode == ETH_MQ_TX_DCB || 1146 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 1147 PMD_INIT_LOG(ERR, "DCB mode is not supported."); 1148 return -EINVAL; 1149 } 1150 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1151 /* Check multi-queue mode. 1152 * To no break software we accept ETH_MQ_RX_NONE as this might 1153 * be used to turn off VLAN filter. 1154 */ 1155 1156 if (rx_mq_mode == ETH_MQ_RX_NONE || 1157 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1158 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 1159 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 1160 } else { 1161 /* Only support one queue on VFs. 1162 * RSS together with SRIOV is not supported. 1163 */ 1164 PMD_INIT_LOG(ERR, "SRIOV is active," 1165 " wrong mq_mode rx %d.", 1166 rx_mq_mode); 1167 return -EINVAL; 1168 } 1169 /* TX mode is not used here, so mode might be ignored.*/ 1170 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1171 /* SRIOV only works in VMDq enable mode */ 1172 PMD_INIT_LOG(WARNING, "SRIOV is active," 1173 " TX mode %d is not supported. " 1174 " Driver will behave as %d mode.", 1175 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); 1176 } 1177 1178 /* check valid queue number */ 1179 if ((nb_rx_q > 1) || (nb_tx_q > 1)) { 1180 PMD_INIT_LOG(ERR, "SRIOV is active," 1181 " only support one queue on VFs."); 1182 return -EINVAL; 1183 } 1184 } else { 1185 /* To no break software that set invalid mode, only display 1186 * warning if invalid mode is used. 1187 */ 1188 if (rx_mq_mode != ETH_MQ_RX_NONE && 1189 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && 1190 rx_mq_mode != ETH_MQ_RX_RSS) { 1191 /* RSS together with VMDq not supported*/ 1192 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 1193 rx_mq_mode); 1194 return -EINVAL; 1195 } 1196 1197 if (tx_mq_mode != ETH_MQ_TX_NONE && 1198 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1199 PMD_INIT_LOG(WARNING, "TX mode %d is not supported." 1200 " Due to txmode is meaningless in this" 1201 " driver, just ignore.", 1202 tx_mq_mode); 1203 } 1204 } 1205 return 0; 1206 } 1207 1208 static int 1209 eth_igb_configure(struct rte_eth_dev *dev) 1210 { 1211 struct e1000_interrupt *intr = 1212 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1213 int ret; 1214 1215 PMD_INIT_FUNC_TRACE(); 1216 1217 /* multipe queue mode checking */ 1218 ret = igb_check_mq_mode(dev); 1219 if (ret != 0) { 1220 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", 1221 ret); 1222 return ret; 1223 } 1224 1225 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1226 PMD_INIT_FUNC_TRACE(); 1227 1228 return 0; 1229 } 1230 1231 static void 1232 eth_igb_rxtx_control(struct rte_eth_dev *dev, 1233 bool enable) 1234 { 1235 struct e1000_hw *hw = 1236 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1237 uint32_t tctl, rctl; 1238 1239 tctl = E1000_READ_REG(hw, E1000_TCTL); 1240 rctl = E1000_READ_REG(hw, E1000_RCTL); 1241 1242 if (enable) { 1243 /* enable Tx/Rx */ 1244 tctl |= E1000_TCTL_EN; 1245 rctl |= E1000_RCTL_EN; 1246 } else { 1247 /* disable Tx/Rx */ 1248 tctl &= ~E1000_TCTL_EN; 1249 rctl &= ~E1000_RCTL_EN; 1250 } 1251 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1252 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1253 E1000_WRITE_FLUSH(hw); 1254 } 1255 1256 static int 1257 eth_igb_start(struct rte_eth_dev *dev) 1258 { 1259 struct e1000_hw *hw = 1260 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1261 struct e1000_adapter *adapter = 1262 E1000_DEV_PRIVATE(dev->data->dev_private); 1263 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1264 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1265 int ret, mask; 1266 uint32_t intr_vector = 0; 1267 uint32_t ctrl_ext; 1268 uint32_t *speeds; 1269 int num_speeds; 1270 bool autoneg; 1271 1272 PMD_INIT_FUNC_TRACE(); 1273 1274 /* disable uio/vfio intr/eventfd mapping */ 1275 rte_intr_disable(intr_handle); 1276 1277 /* Power up the phy. Needed to make the link go Up */ 1278 eth_igb_dev_set_link_up(dev); 1279 1280 /* 1281 * Packet Buffer Allocation (PBA) 1282 * Writing PBA sets the receive portion of the buffer 1283 * the remainder is used for the transmit buffer. 1284 */ 1285 if (hw->mac.type == e1000_82575) { 1286 uint32_t pba; 1287 1288 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1289 E1000_WRITE_REG(hw, E1000_PBA, pba); 1290 } 1291 1292 /* Put the address into the Receive Address Array */ 1293 e1000_rar_set(hw, hw->mac.addr, 0); 1294 1295 /* Initialize the hardware */ 1296 if (igb_hardware_init(hw)) { 1297 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 1298 return -EIO; 1299 } 1300 adapter->stopped = 0; 1301 1302 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); 1303 1304 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1305 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1306 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 1307 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1308 E1000_WRITE_FLUSH(hw); 1309 1310 /* configure PF module if SRIOV enabled */ 1311 igb_pf_host_configure(dev); 1312 1313 /* check and configure queue intr-vector mapping */ 1314 if ((rte_intr_cap_multiple(intr_handle) || 1315 !RTE_ETH_DEV_SRIOV(dev).active) && 1316 dev->data->dev_conf.intr_conf.rxq != 0) { 1317 intr_vector = dev->data->nb_rx_queues; 1318 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1319 return -1; 1320 } 1321 1322 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 1323 intr_handle->intr_vec = 1324 rte_zmalloc("intr_vec", 1325 dev->data->nb_rx_queues * sizeof(int), 0); 1326 if (intr_handle->intr_vec == NULL) { 1327 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1328 " intr_vec", dev->data->nb_rx_queues); 1329 return -ENOMEM; 1330 } 1331 } 1332 1333 /* confiugre msix for rx interrupt */ 1334 eth_igb_configure_msix_intr(dev); 1335 1336 /* Configure for OS presence */ 1337 igb_init_manageability(hw); 1338 1339 eth_igb_tx_init(dev); 1340 1341 /* This can fail when allocating mbufs for descriptor rings */ 1342 ret = eth_igb_rx_init(dev); 1343 if (ret) { 1344 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1345 igb_dev_clear_queues(dev); 1346 return ret; 1347 } 1348 1349 e1000_clear_hw_cntrs_base_generic(hw); 1350 1351 /* 1352 * VLAN Offload Settings 1353 */ 1354 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 1355 ETH_VLAN_EXTEND_MASK; 1356 ret = eth_igb_vlan_offload_set(dev, mask); 1357 if (ret) { 1358 PMD_INIT_LOG(ERR, "Unable to set vlan offload"); 1359 igb_dev_clear_queues(dev); 1360 return ret; 1361 } 1362 1363 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1364 /* Enable VLAN filter since VMDq always use VLAN filter */ 1365 igb_vmdq_vlan_hw_filter_enable(dev); 1366 } 1367 1368 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || 1369 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || 1370 (hw->mac.type == e1000_i211)) { 1371 /* Configure EITR with the maximum possible value (0xFFFF) */ 1372 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); 1373 } 1374 1375 /* Setup link speed and duplex */ 1376 speeds = &dev->data->dev_conf.link_speeds; 1377 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 1378 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 1379 hw->mac.autoneg = 1; 1380 } else { 1381 num_speeds = 0; 1382 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 1383 1384 /* Reset */ 1385 hw->phy.autoneg_advertised = 0; 1386 1387 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1388 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1389 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 1390 num_speeds = -1; 1391 goto error_invalid_config; 1392 } 1393 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1394 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1395 num_speeds++; 1396 } 1397 if (*speeds & ETH_LINK_SPEED_10M) { 1398 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1399 num_speeds++; 1400 } 1401 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1402 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1403 num_speeds++; 1404 } 1405 if (*speeds & ETH_LINK_SPEED_100M) { 1406 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1407 num_speeds++; 1408 } 1409 if (*speeds & ETH_LINK_SPEED_1G) { 1410 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1411 num_speeds++; 1412 } 1413 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 1414 goto error_invalid_config; 1415 1416 /* Set/reset the mac.autoneg based on the link speed, 1417 * fixed or not 1418 */ 1419 if (!autoneg) { 1420 hw->mac.autoneg = 0; 1421 hw->mac.forced_speed_duplex = 1422 hw->phy.autoneg_advertised; 1423 } else { 1424 hw->mac.autoneg = 1; 1425 } 1426 } 1427 1428 e1000_setup_link(hw); 1429 1430 if (rte_intr_allow_others(intr_handle)) { 1431 /* check if lsc interrupt is enabled */ 1432 if (dev->data->dev_conf.intr_conf.lsc != 0) 1433 eth_igb_lsc_interrupt_setup(dev, TRUE); 1434 else 1435 eth_igb_lsc_interrupt_setup(dev, FALSE); 1436 } else { 1437 rte_intr_callback_unregister(intr_handle, 1438 eth_igb_interrupt_handler, 1439 (void *)dev); 1440 if (dev->data->dev_conf.intr_conf.lsc != 0) 1441 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1442 " no intr multiplex"); 1443 } 1444 1445 /* check if rxq interrupt is enabled */ 1446 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1447 rte_intr_dp_is_en(intr_handle)) 1448 eth_igb_rxq_interrupt_setup(dev); 1449 1450 /* enable uio/vfio intr/eventfd mapping */ 1451 rte_intr_enable(intr_handle); 1452 1453 /* resume enabled intr since hw reset */ 1454 igb_intr_enable(dev); 1455 1456 /* restore all types filter */ 1457 igb_filter_restore(dev); 1458 1459 eth_igb_rxtx_control(dev, true); 1460 eth_igb_link_update(dev, 0); 1461 1462 PMD_INIT_LOG(DEBUG, "<<"); 1463 1464 return 0; 1465 1466 error_invalid_config: 1467 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1468 dev->data->dev_conf.link_speeds, dev->data->port_id); 1469 igb_dev_clear_queues(dev); 1470 return -EINVAL; 1471 } 1472 1473 /********************************************************************* 1474 * 1475 * This routine disables all traffic on the adapter by issuing a 1476 * global reset on the MAC. 1477 * 1478 **********************************************************************/ 1479 static void 1480 eth_igb_stop(struct rte_eth_dev *dev) 1481 { 1482 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1483 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1484 struct rte_eth_link link; 1485 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1486 1487 eth_igb_rxtx_control(dev, false); 1488 1489 igb_intr_disable(hw); 1490 1491 /* disable intr eventfd mapping */ 1492 rte_intr_disable(intr_handle); 1493 1494 igb_pf_reset_hw(hw); 1495 E1000_WRITE_REG(hw, E1000_WUC, 0); 1496 1497 /* Set bit for Go Link disconnect */ 1498 if (hw->mac.type >= e1000_82580) { 1499 uint32_t phpm_reg; 1500 1501 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1502 phpm_reg |= E1000_82580_PM_GO_LINKD; 1503 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1504 } 1505 1506 /* Power down the phy. Needed to make the link go Down */ 1507 eth_igb_dev_set_link_down(dev); 1508 1509 igb_dev_clear_queues(dev); 1510 1511 /* clear the recorded link status */ 1512 memset(&link, 0, sizeof(link)); 1513 rte_eth_linkstatus_set(dev, &link); 1514 1515 if (!rte_intr_allow_others(intr_handle)) 1516 /* resume to the default handler */ 1517 rte_intr_callback_register(intr_handle, 1518 eth_igb_interrupt_handler, 1519 (void *)dev); 1520 1521 /* Clean datapath event and queue/vec mapping */ 1522 rte_intr_efd_disable(intr_handle); 1523 if (intr_handle->intr_vec != NULL) { 1524 rte_free(intr_handle->intr_vec); 1525 intr_handle->intr_vec = NULL; 1526 } 1527 } 1528 1529 static int 1530 eth_igb_dev_set_link_up(struct rte_eth_dev *dev) 1531 { 1532 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1533 1534 if (hw->phy.media_type == e1000_media_type_copper) 1535 e1000_power_up_phy(hw); 1536 else 1537 e1000_power_up_fiber_serdes_link(hw); 1538 1539 return 0; 1540 } 1541 1542 static int 1543 eth_igb_dev_set_link_down(struct rte_eth_dev *dev) 1544 { 1545 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1546 1547 if (hw->phy.media_type == e1000_media_type_copper) 1548 e1000_power_down_phy(hw); 1549 else 1550 e1000_shutdown_fiber_serdes_link(hw); 1551 1552 return 0; 1553 } 1554 1555 static void 1556 eth_igb_close(struct rte_eth_dev *dev) 1557 { 1558 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1559 struct e1000_adapter *adapter = 1560 E1000_DEV_PRIVATE(dev->data->dev_private); 1561 struct rte_eth_link link; 1562 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1563 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1564 1565 eth_igb_stop(dev); 1566 adapter->stopped = 1; 1567 1568 e1000_phy_hw_reset(hw); 1569 igb_release_manageability(hw); 1570 igb_hw_control_release(hw); 1571 1572 /* Clear bit for Go Link disconnect */ 1573 if (hw->mac.type >= e1000_82580) { 1574 uint32_t phpm_reg; 1575 1576 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1577 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1578 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1579 } 1580 1581 igb_dev_free_queues(dev); 1582 1583 if (intr_handle->intr_vec) { 1584 rte_free(intr_handle->intr_vec); 1585 intr_handle->intr_vec = NULL; 1586 } 1587 1588 memset(&link, 0, sizeof(link)); 1589 rte_eth_linkstatus_set(dev, &link); 1590 } 1591 1592 /* 1593 * Reset PF device. 1594 */ 1595 static int 1596 eth_igb_reset(struct rte_eth_dev *dev) 1597 { 1598 int ret; 1599 1600 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1601 * its VF to make them align with it. The detailed notification 1602 * mechanism is PMD specific and is currently not implemented. 1603 * To avoid unexpected behavior in VF, currently reset of PF with 1604 * SR-IOV activation is not supported. It might be supported later. 1605 */ 1606 if (dev->data->sriov.active) 1607 return -ENOTSUP; 1608 1609 ret = eth_igb_dev_uninit(dev); 1610 if (ret) 1611 return ret; 1612 1613 ret = eth_igb_dev_init(dev); 1614 1615 return ret; 1616 } 1617 1618 1619 static int 1620 igb_get_rx_buffer_size(struct e1000_hw *hw) 1621 { 1622 uint32_t rx_buf_size; 1623 if (hw->mac.type == e1000_82576) { 1624 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; 1625 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { 1626 /* PBS needs to be translated according to a lookup table */ 1627 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); 1628 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); 1629 rx_buf_size = (rx_buf_size << 10); 1630 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 1631 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; 1632 } else { 1633 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; 1634 } 1635 1636 return rx_buf_size; 1637 } 1638 1639 /********************************************************************* 1640 * 1641 * Initialize the hardware 1642 * 1643 **********************************************************************/ 1644 static int 1645 igb_hardware_init(struct e1000_hw *hw) 1646 { 1647 uint32_t rx_buf_size; 1648 int diag; 1649 1650 /* Let the firmware know the OS is in control */ 1651 igb_hw_control_acquire(hw); 1652 1653 /* 1654 * These parameters control the automatic generation (Tx) and 1655 * response (Rx) to Ethernet PAUSE frames. 1656 * - High water mark should allow for at least two standard size (1518) 1657 * frames to be received after sending an XOFF. 1658 * - Low water mark works best when it is very near the high water mark. 1659 * This allows the receiver to restart by sending XON when it has 1660 * drained a bit. Here we use an arbitrary value of 1500 which will 1661 * restart after one full frame is pulled from the buffer. There 1662 * could be several smaller frames in the buffer and if so they will 1663 * not trigger the XON until their total number reduces the buffer 1664 * by 1500. 1665 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1666 */ 1667 rx_buf_size = igb_get_rx_buffer_size(hw); 1668 1669 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2); 1670 hw->fc.low_water = hw->fc.high_water - 1500; 1671 hw->fc.pause_time = IGB_FC_PAUSE_TIME; 1672 hw->fc.send_xon = 1; 1673 1674 /* Set Flow control, use the tunable location if sane */ 1675 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) 1676 hw->fc.requested_mode = igb_fc_setting; 1677 else 1678 hw->fc.requested_mode = e1000_fc_none; 1679 1680 /* Issue a global reset */ 1681 igb_pf_reset_hw(hw); 1682 E1000_WRITE_REG(hw, E1000_WUC, 0); 1683 1684 diag = e1000_init_hw(hw); 1685 if (diag < 0) 1686 return diag; 1687 1688 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); 1689 e1000_get_phy_info(hw); 1690 e1000_check_for_link(hw); 1691 1692 return 0; 1693 } 1694 1695 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */ 1696 static void 1697 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) 1698 { 1699 int pause_frames; 1700 1701 uint64_t old_gprc = stats->gprc; 1702 uint64_t old_gptc = stats->gptc; 1703 uint64_t old_tpr = stats->tpr; 1704 uint64_t old_tpt = stats->tpt; 1705 uint64_t old_rpthc = stats->rpthc; 1706 uint64_t old_hgptc = stats->hgptc; 1707 1708 if(hw->phy.media_type == e1000_media_type_copper || 1709 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1710 stats->symerrs += 1711 E1000_READ_REG(hw,E1000_SYMERRS); 1712 stats->sec += E1000_READ_REG(hw, E1000_SEC); 1713 } 1714 1715 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 1716 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 1717 stats->scc += E1000_READ_REG(hw, E1000_SCC); 1718 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 1719 1720 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 1721 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 1722 stats->colc += E1000_READ_REG(hw, E1000_COLC); 1723 stats->dc += E1000_READ_REG(hw, E1000_DC); 1724 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 1725 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 1726 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 1727 /* 1728 ** For watchdog management we need to know if we have been 1729 ** paused during the last interval, so capture that here. 1730 */ 1731 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 1732 stats->xoffrxc += pause_frames; 1733 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 1734 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 1735 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 1736 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 1737 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 1738 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 1739 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 1740 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 1741 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 1742 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 1743 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 1744 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 1745 1746 /* For the 64-bit byte counters the low dword must be read first. */ 1747 /* Both registers clear on the read of the high dword */ 1748 1749 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1750 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 1751 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 1752 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN; 1753 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 1754 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 1755 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN; 1756 1757 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 1758 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 1759 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 1760 stats->roc += E1000_READ_REG(hw, E1000_ROC); 1761 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 1762 1763 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 1764 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 1765 1766 stats->tor += E1000_READ_REG(hw, E1000_TORL); 1767 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 1768 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN; 1769 stats->tot += E1000_READ_REG(hw, E1000_TOTL); 1770 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 1771 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN; 1772 1773 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 1774 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 1775 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 1776 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 1777 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 1778 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 1779 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 1780 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 1781 1782 /* Interrupt Counts */ 1783 1784 stats->iac += E1000_READ_REG(hw, E1000_IAC); 1785 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 1786 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 1787 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 1788 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 1789 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 1790 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 1791 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 1792 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 1793 1794 /* Host to Card Statistics */ 1795 1796 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 1797 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 1798 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 1799 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 1800 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 1801 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 1802 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 1803 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); 1804 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); 1805 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN; 1806 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); 1807 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); 1808 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN; 1809 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 1810 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 1811 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 1812 1813 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 1814 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 1815 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 1816 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 1817 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 1818 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 1819 } 1820 1821 static int 1822 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1823 { 1824 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1825 struct e1000_hw_stats *stats = 1826 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1827 1828 igb_read_stats_registers(hw, stats); 1829 1830 if (rte_stats == NULL) 1831 return -EINVAL; 1832 1833 /* Rx Errors */ 1834 rte_stats->imissed = stats->mpc; 1835 rte_stats->ierrors = stats->crcerrs + 1836 stats->rlec + stats->ruc + stats->roc + 1837 stats->rxerrc + stats->algnerrc + stats->cexterr; 1838 1839 /* Tx Errors */ 1840 rte_stats->oerrors = stats->ecol + stats->latecol; 1841 1842 rte_stats->ipackets = stats->gprc; 1843 rte_stats->opackets = stats->gptc; 1844 rte_stats->ibytes = stats->gorc; 1845 rte_stats->obytes = stats->gotc; 1846 return 0; 1847 } 1848 1849 static void 1850 eth_igb_stats_reset(struct rte_eth_dev *dev) 1851 { 1852 struct e1000_hw_stats *hw_stats = 1853 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1854 1855 /* HW registers are cleared on read */ 1856 eth_igb_stats_get(dev, NULL); 1857 1858 /* Reset software totals */ 1859 memset(hw_stats, 0, sizeof(*hw_stats)); 1860 } 1861 1862 static void 1863 eth_igb_xstats_reset(struct rte_eth_dev *dev) 1864 { 1865 struct e1000_hw_stats *stats = 1866 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1867 1868 /* HW registers are cleared on read */ 1869 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); 1870 1871 /* Reset software totals */ 1872 memset(stats, 0, sizeof(*stats)); 1873 } 1874 1875 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1876 struct rte_eth_xstat_name *xstats_names, 1877 __rte_unused unsigned int size) 1878 { 1879 unsigned i; 1880 1881 if (xstats_names == NULL) 1882 return IGB_NB_XSTATS; 1883 1884 /* Note: limit checked in rte_eth_xstats_names() */ 1885 1886 for (i = 0; i < IGB_NB_XSTATS; i++) { 1887 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 1888 "%s", rte_igb_stats_strings[i].name); 1889 } 1890 1891 return IGB_NB_XSTATS; 1892 } 1893 1894 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 1895 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 1896 unsigned int limit) 1897 { 1898 unsigned int i; 1899 1900 if (!ids) { 1901 if (xstats_names == NULL) 1902 return IGB_NB_XSTATS; 1903 1904 for (i = 0; i < IGB_NB_XSTATS; i++) 1905 snprintf(xstats_names[i].name, 1906 sizeof(xstats_names[i].name), 1907 "%s", rte_igb_stats_strings[i].name); 1908 1909 return IGB_NB_XSTATS; 1910 1911 } else { 1912 struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; 1913 1914 eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 1915 IGB_NB_XSTATS); 1916 1917 for (i = 0; i < limit; i++) { 1918 if (ids[i] >= IGB_NB_XSTATS) { 1919 PMD_INIT_LOG(ERR, "id value isn't valid"); 1920 return -1; 1921 } 1922 strcpy(xstats_names[i].name, 1923 xstats_names_copy[ids[i]].name); 1924 } 1925 return limit; 1926 } 1927 } 1928 1929 static int 1930 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1931 unsigned n) 1932 { 1933 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1934 struct e1000_hw_stats *hw_stats = 1935 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1936 unsigned i; 1937 1938 if (n < IGB_NB_XSTATS) 1939 return IGB_NB_XSTATS; 1940 1941 igb_read_stats_registers(hw, hw_stats); 1942 1943 /* If this is a reset xstats is NULL, and we have cleared the 1944 * registers by reading them. 1945 */ 1946 if (!xstats) 1947 return 0; 1948 1949 /* Extended stats */ 1950 for (i = 0; i < IGB_NB_XSTATS; i++) { 1951 xstats[i].id = i; 1952 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1953 rte_igb_stats_strings[i].offset); 1954 } 1955 1956 return IGB_NB_XSTATS; 1957 } 1958 1959 static int 1960 eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1961 uint64_t *values, unsigned int n) 1962 { 1963 unsigned int i; 1964 1965 if (!ids) { 1966 struct e1000_hw *hw = 1967 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1968 struct e1000_hw_stats *hw_stats = 1969 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1970 1971 if (n < IGB_NB_XSTATS) 1972 return IGB_NB_XSTATS; 1973 1974 igb_read_stats_registers(hw, hw_stats); 1975 1976 /* If this is a reset xstats is NULL, and we have cleared the 1977 * registers by reading them. 1978 */ 1979 if (!values) 1980 return 0; 1981 1982 /* Extended stats */ 1983 for (i = 0; i < IGB_NB_XSTATS; i++) 1984 values[i] = *(uint64_t *)(((char *)hw_stats) + 1985 rte_igb_stats_strings[i].offset); 1986 1987 return IGB_NB_XSTATS; 1988 1989 } else { 1990 uint64_t values_copy[IGB_NB_XSTATS]; 1991 1992 eth_igb_xstats_get_by_id(dev, NULL, values_copy, 1993 IGB_NB_XSTATS); 1994 1995 for (i = 0; i < n; i++) { 1996 if (ids[i] >= IGB_NB_XSTATS) { 1997 PMD_INIT_LOG(ERR, "id value isn't valid"); 1998 return -1; 1999 } 2000 values[i] = values_copy[ids[i]]; 2001 } 2002 return n; 2003 } 2004 } 2005 2006 static void 2007 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) 2008 { 2009 /* Good Rx packets, include VF loopback */ 2010 UPDATE_VF_STAT(E1000_VFGPRC, 2011 hw_stats->last_gprc, hw_stats->gprc); 2012 2013 /* Good Rx octets, include VF loopback */ 2014 UPDATE_VF_STAT(E1000_VFGORC, 2015 hw_stats->last_gorc, hw_stats->gorc); 2016 2017 /* Good Tx packets, include VF loopback */ 2018 UPDATE_VF_STAT(E1000_VFGPTC, 2019 hw_stats->last_gptc, hw_stats->gptc); 2020 2021 /* Good Tx octets, include VF loopback */ 2022 UPDATE_VF_STAT(E1000_VFGOTC, 2023 hw_stats->last_gotc, hw_stats->gotc); 2024 2025 /* Rx Multicst packets */ 2026 UPDATE_VF_STAT(E1000_VFMPRC, 2027 hw_stats->last_mprc, hw_stats->mprc); 2028 2029 /* Good Rx loopback packets */ 2030 UPDATE_VF_STAT(E1000_VFGPRLBC, 2031 hw_stats->last_gprlbc, hw_stats->gprlbc); 2032 2033 /* Good Rx loopback octets */ 2034 UPDATE_VF_STAT(E1000_VFGORLBC, 2035 hw_stats->last_gorlbc, hw_stats->gorlbc); 2036 2037 /* Good Tx loopback packets */ 2038 UPDATE_VF_STAT(E1000_VFGPTLBC, 2039 hw_stats->last_gptlbc, hw_stats->gptlbc); 2040 2041 /* Good Tx loopback octets */ 2042 UPDATE_VF_STAT(E1000_VFGOTLBC, 2043 hw_stats->last_gotlbc, hw_stats->gotlbc); 2044 } 2045 2046 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2047 struct rte_eth_xstat_name *xstats_names, 2048 __rte_unused unsigned limit) 2049 { 2050 unsigned i; 2051 2052 if (xstats_names != NULL) 2053 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2054 snprintf(xstats_names[i].name, 2055 sizeof(xstats_names[i].name), "%s", 2056 rte_igbvf_stats_strings[i].name); 2057 } 2058 return IGBVF_NB_XSTATS; 2059 } 2060 2061 static int 2062 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2063 unsigned n) 2064 { 2065 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2066 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2067 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2068 unsigned i; 2069 2070 if (n < IGBVF_NB_XSTATS) 2071 return IGBVF_NB_XSTATS; 2072 2073 igbvf_read_stats_registers(hw, hw_stats); 2074 2075 if (!xstats) 2076 return 0; 2077 2078 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2079 xstats[i].id = i; 2080 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 2081 rte_igbvf_stats_strings[i].offset); 2082 } 2083 2084 return IGBVF_NB_XSTATS; 2085 } 2086 2087 static int 2088 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 2089 { 2090 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2091 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2092 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2093 2094 igbvf_read_stats_registers(hw, hw_stats); 2095 2096 if (rte_stats == NULL) 2097 return -EINVAL; 2098 2099 rte_stats->ipackets = hw_stats->gprc; 2100 rte_stats->ibytes = hw_stats->gorc; 2101 rte_stats->opackets = hw_stats->gptc; 2102 rte_stats->obytes = hw_stats->gotc; 2103 return 0; 2104 } 2105 2106 static void 2107 eth_igbvf_stats_reset(struct rte_eth_dev *dev) 2108 { 2109 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) 2110 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2111 2112 /* Sync HW register to the last stats */ 2113 eth_igbvf_stats_get(dev, NULL); 2114 2115 /* reset HW current stats*/ 2116 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - 2117 offsetof(struct e1000_vf_stats, gprc)); 2118 } 2119 2120 static int 2121 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 2122 size_t fw_size) 2123 { 2124 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2125 struct e1000_fw_version fw; 2126 int ret; 2127 2128 e1000_get_fw_version(hw, &fw); 2129 2130 switch (hw->mac.type) { 2131 case e1000_i210: 2132 case e1000_i211: 2133 if (!(e1000_get_flash_presence_i210(hw))) { 2134 ret = snprintf(fw_version, fw_size, 2135 "%2d.%2d-%d", 2136 fw.invm_major, fw.invm_minor, 2137 fw.invm_img_type); 2138 break; 2139 } 2140 /* fall through */ 2141 default: 2142 /* if option rom is valid, display its version too */ 2143 if (fw.or_valid) { 2144 ret = snprintf(fw_version, fw_size, 2145 "%d.%d, 0x%08x, %d.%d.%d", 2146 fw.eep_major, fw.eep_minor, fw.etrack_id, 2147 fw.or_major, fw.or_build, fw.or_patch); 2148 /* no option rom */ 2149 } else { 2150 if (fw.etrack_id != 0X0000) { 2151 ret = snprintf(fw_version, fw_size, 2152 "%d.%d, 0x%08x", 2153 fw.eep_major, fw.eep_minor, 2154 fw.etrack_id); 2155 } else { 2156 ret = snprintf(fw_version, fw_size, 2157 "%d.%d.%d", 2158 fw.eep_major, fw.eep_minor, 2159 fw.eep_build); 2160 } 2161 } 2162 break; 2163 } 2164 2165 ret += 1; /* add the size of '\0' */ 2166 if (fw_size < (u32)ret) 2167 return ret; 2168 else 2169 return 0; 2170 } 2171 2172 static void 2173 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2174 { 2175 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2176 2177 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2178 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2179 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2180 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2181 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2182 dev_info->rx_queue_offload_capa; 2183 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2184 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2185 dev_info->tx_queue_offload_capa; 2186 2187 switch (hw->mac.type) { 2188 case e1000_82575: 2189 dev_info->max_rx_queues = 4; 2190 dev_info->max_tx_queues = 4; 2191 dev_info->max_vmdq_pools = 0; 2192 break; 2193 2194 case e1000_82576: 2195 dev_info->max_rx_queues = 16; 2196 dev_info->max_tx_queues = 16; 2197 dev_info->max_vmdq_pools = ETH_8_POOLS; 2198 dev_info->vmdq_queue_num = 16; 2199 break; 2200 2201 case e1000_82580: 2202 dev_info->max_rx_queues = 8; 2203 dev_info->max_tx_queues = 8; 2204 dev_info->max_vmdq_pools = ETH_8_POOLS; 2205 dev_info->vmdq_queue_num = 8; 2206 break; 2207 2208 case e1000_i350: 2209 dev_info->max_rx_queues = 8; 2210 dev_info->max_tx_queues = 8; 2211 dev_info->max_vmdq_pools = ETH_8_POOLS; 2212 dev_info->vmdq_queue_num = 8; 2213 break; 2214 2215 case e1000_i354: 2216 dev_info->max_rx_queues = 8; 2217 dev_info->max_tx_queues = 8; 2218 break; 2219 2220 case e1000_i210: 2221 dev_info->max_rx_queues = 4; 2222 dev_info->max_tx_queues = 4; 2223 dev_info->max_vmdq_pools = 0; 2224 break; 2225 2226 case e1000_i211: 2227 dev_info->max_rx_queues = 2; 2228 dev_info->max_tx_queues = 2; 2229 dev_info->max_vmdq_pools = 0; 2230 break; 2231 2232 default: 2233 /* Should not happen */ 2234 break; 2235 } 2236 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); 2237 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 2238 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; 2239 2240 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2241 .rx_thresh = { 2242 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2243 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2244 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2245 }, 2246 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2247 .rx_drop_en = 0, 2248 .offloads = 0, 2249 }; 2250 2251 dev_info->default_txconf = (struct rte_eth_txconf) { 2252 .tx_thresh = { 2253 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2254 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2255 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2256 }, 2257 .offloads = 0, 2258 }; 2259 2260 dev_info->rx_desc_lim = rx_desc_lim; 2261 dev_info->tx_desc_lim = tx_desc_lim; 2262 2263 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 2264 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 2265 ETH_LINK_SPEED_1G; 2266 } 2267 2268 static const uint32_t * 2269 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) 2270 { 2271 static const uint32_t ptypes[] = { 2272 /* refers to igb_rxd_pkt_info_to_pkt_type() */ 2273 RTE_PTYPE_L2_ETHER, 2274 RTE_PTYPE_L3_IPV4, 2275 RTE_PTYPE_L3_IPV4_EXT, 2276 RTE_PTYPE_L3_IPV6, 2277 RTE_PTYPE_L3_IPV6_EXT, 2278 RTE_PTYPE_L4_TCP, 2279 RTE_PTYPE_L4_UDP, 2280 RTE_PTYPE_L4_SCTP, 2281 RTE_PTYPE_TUNNEL_IP, 2282 RTE_PTYPE_INNER_L3_IPV6, 2283 RTE_PTYPE_INNER_L3_IPV6_EXT, 2284 RTE_PTYPE_INNER_L4_TCP, 2285 RTE_PTYPE_INNER_L4_UDP, 2286 RTE_PTYPE_UNKNOWN 2287 }; 2288 2289 if (dev->rx_pkt_burst == eth_igb_recv_pkts || 2290 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) 2291 return ptypes; 2292 return NULL; 2293 } 2294 2295 static void 2296 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2297 { 2298 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2299 2300 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2301 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2302 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2303 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 2304 DEV_TX_OFFLOAD_IPV4_CKSUM | 2305 DEV_TX_OFFLOAD_UDP_CKSUM | 2306 DEV_TX_OFFLOAD_TCP_CKSUM | 2307 DEV_TX_OFFLOAD_SCTP_CKSUM | 2308 DEV_TX_OFFLOAD_TCP_TSO; 2309 switch (hw->mac.type) { 2310 case e1000_vfadapt: 2311 dev_info->max_rx_queues = 2; 2312 dev_info->max_tx_queues = 2; 2313 break; 2314 case e1000_vfadapt_i350: 2315 dev_info->max_rx_queues = 1; 2316 dev_info->max_tx_queues = 1; 2317 break; 2318 default: 2319 /* Should not happen */ 2320 break; 2321 } 2322 2323 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2324 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2325 dev_info->rx_queue_offload_capa; 2326 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2327 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2328 dev_info->tx_queue_offload_capa; 2329 2330 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2331 .rx_thresh = { 2332 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2333 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2334 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2335 }, 2336 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2337 .rx_drop_en = 0, 2338 .offloads = 0, 2339 }; 2340 2341 dev_info->default_txconf = (struct rte_eth_txconf) { 2342 .tx_thresh = { 2343 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2344 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2345 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2346 }, 2347 .offloads = 0, 2348 }; 2349 2350 dev_info->rx_desc_lim = rx_desc_lim; 2351 dev_info->tx_desc_lim = tx_desc_lim; 2352 } 2353 2354 /* return 0 means link status changed, -1 means not changed */ 2355 static int 2356 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2357 { 2358 struct e1000_hw *hw = 2359 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2360 struct rte_eth_link link; 2361 int link_check, count; 2362 2363 link_check = 0; 2364 hw->mac.get_link_status = 1; 2365 2366 /* possible wait-to-complete in up to 9 seconds */ 2367 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 2368 /* Read the real link status */ 2369 switch (hw->phy.media_type) { 2370 case e1000_media_type_copper: 2371 /* Do the work to read phy */ 2372 e1000_check_for_link(hw); 2373 link_check = !hw->mac.get_link_status; 2374 break; 2375 2376 case e1000_media_type_fiber: 2377 e1000_check_for_link(hw); 2378 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2379 E1000_STATUS_LU); 2380 break; 2381 2382 case e1000_media_type_internal_serdes: 2383 e1000_check_for_link(hw); 2384 link_check = hw->mac.serdes_has_link; 2385 break; 2386 2387 /* VF device is type_unknown */ 2388 case e1000_media_type_unknown: 2389 eth_igbvf_link_update(hw); 2390 link_check = !hw->mac.get_link_status; 2391 break; 2392 2393 default: 2394 break; 2395 } 2396 if (link_check || wait_to_complete == 0) 2397 break; 2398 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); 2399 } 2400 memset(&link, 0, sizeof(link)); 2401 2402 /* Now we check if a transition has happened */ 2403 if (link_check) { 2404 uint16_t duplex, speed; 2405 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 2406 link.link_duplex = (duplex == FULL_DUPLEX) ? 2407 ETH_LINK_FULL_DUPLEX : 2408 ETH_LINK_HALF_DUPLEX; 2409 link.link_speed = speed; 2410 link.link_status = ETH_LINK_UP; 2411 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 2412 ETH_LINK_SPEED_FIXED); 2413 } else if (!link_check) { 2414 link.link_speed = 0; 2415 link.link_duplex = ETH_LINK_HALF_DUPLEX; 2416 link.link_status = ETH_LINK_DOWN; 2417 link.link_autoneg = ETH_LINK_FIXED; 2418 } 2419 2420 return rte_eth_linkstatus_set(dev, &link); 2421 } 2422 2423 /* 2424 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 2425 * For ASF and Pass Through versions of f/w this means 2426 * that the driver is loaded. 2427 */ 2428 static void 2429 igb_hw_control_acquire(struct e1000_hw *hw) 2430 { 2431 uint32_t ctrl_ext; 2432 2433 /* Let firmware know the driver has taken over */ 2434 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2435 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2436 } 2437 2438 /* 2439 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 2440 * For ASF and Pass Through versions of f/w this means that the 2441 * driver is no longer loaded. 2442 */ 2443 static void 2444 igb_hw_control_release(struct e1000_hw *hw) 2445 { 2446 uint32_t ctrl_ext; 2447 2448 /* Let firmware taken over control of h/w */ 2449 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2450 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 2451 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2452 } 2453 2454 /* 2455 * Bit of a misnomer, what this really means is 2456 * to enable OS management of the system... aka 2457 * to disable special hardware management features. 2458 */ 2459 static void 2460 igb_init_manageability(struct e1000_hw *hw) 2461 { 2462 if (e1000_enable_mng_pass_thru(hw)) { 2463 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 2464 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2465 2466 /* disable hardware interception of ARP */ 2467 manc &= ~(E1000_MANC_ARP_EN); 2468 2469 /* enable receiving management packets to the host */ 2470 manc |= E1000_MANC_EN_MNG2HOST; 2471 manc2h |= 1 << 5; /* Mng Port 623 */ 2472 manc2h |= 1 << 6; /* Mng Port 664 */ 2473 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 2474 E1000_WRITE_REG(hw, E1000_MANC, manc); 2475 } 2476 } 2477 2478 static void 2479 igb_release_manageability(struct e1000_hw *hw) 2480 { 2481 if (e1000_enable_mng_pass_thru(hw)) { 2482 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2483 2484 manc |= E1000_MANC_ARP_EN; 2485 manc &= ~E1000_MANC_EN_MNG2HOST; 2486 2487 E1000_WRITE_REG(hw, E1000_MANC, manc); 2488 } 2489 } 2490 2491 static void 2492 eth_igb_promiscuous_enable(struct rte_eth_dev *dev) 2493 { 2494 struct e1000_hw *hw = 2495 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2496 uint32_t rctl; 2497 2498 rctl = E1000_READ_REG(hw, E1000_RCTL); 2499 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2500 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2501 } 2502 2503 static void 2504 eth_igb_promiscuous_disable(struct rte_eth_dev *dev) 2505 { 2506 struct e1000_hw *hw = 2507 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2508 uint32_t rctl; 2509 2510 rctl = E1000_READ_REG(hw, E1000_RCTL); 2511 rctl &= (~E1000_RCTL_UPE); 2512 if (dev->data->all_multicast == 1) 2513 rctl |= E1000_RCTL_MPE; 2514 else 2515 rctl &= (~E1000_RCTL_MPE); 2516 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2517 } 2518 2519 static void 2520 eth_igb_allmulticast_enable(struct rte_eth_dev *dev) 2521 { 2522 struct e1000_hw *hw = 2523 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2524 uint32_t rctl; 2525 2526 rctl = E1000_READ_REG(hw, E1000_RCTL); 2527 rctl |= E1000_RCTL_MPE; 2528 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2529 } 2530 2531 static void 2532 eth_igb_allmulticast_disable(struct rte_eth_dev *dev) 2533 { 2534 struct e1000_hw *hw = 2535 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2536 uint32_t rctl; 2537 2538 if (dev->data->promiscuous == 1) 2539 return; /* must remain in all_multicast mode */ 2540 rctl = E1000_READ_REG(hw, E1000_RCTL); 2541 rctl &= (~E1000_RCTL_MPE); 2542 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2543 } 2544 2545 static int 2546 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2547 { 2548 struct e1000_hw *hw = 2549 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2550 struct e1000_vfta * shadow_vfta = 2551 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2552 uint32_t vfta; 2553 uint32_t vid_idx; 2554 uint32_t vid_bit; 2555 2556 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 2557 E1000_VFTA_ENTRY_MASK); 2558 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 2559 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 2560 if (on) 2561 vfta |= vid_bit; 2562 else 2563 vfta &= ~vid_bit; 2564 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 2565 2566 /* update local VFTA copy */ 2567 shadow_vfta->vfta[vid_idx] = vfta; 2568 2569 return 0; 2570 } 2571 2572 static int 2573 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 2574 enum rte_vlan_type vlan_type, 2575 uint16_t tpid) 2576 { 2577 struct e1000_hw *hw = 2578 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2579 uint32_t reg, qinq; 2580 2581 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); 2582 qinq &= E1000_CTRL_EXT_EXT_VLAN; 2583 2584 /* only outer TPID of double VLAN can be configured*/ 2585 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { 2586 reg = E1000_READ_REG(hw, E1000_VET); 2587 reg = (reg & (~E1000_VET_VET_EXT)) | 2588 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); 2589 E1000_WRITE_REG(hw, E1000_VET, reg); 2590 2591 return 0; 2592 } 2593 2594 /* all other TPID values are read-only*/ 2595 PMD_DRV_LOG(ERR, "Not supported"); 2596 2597 return -ENOTSUP; 2598 } 2599 2600 static void 2601 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2602 { 2603 struct e1000_hw *hw = 2604 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2605 uint32_t reg; 2606 2607 /* Filter Table Disable */ 2608 reg = E1000_READ_REG(hw, E1000_RCTL); 2609 reg &= ~E1000_RCTL_CFIEN; 2610 reg &= ~E1000_RCTL_VFE; 2611 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2612 } 2613 2614 static void 2615 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2616 { 2617 struct e1000_hw *hw = 2618 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2619 struct e1000_vfta * shadow_vfta = 2620 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2621 uint32_t reg; 2622 int i; 2623 2624 /* Filter Table Enable, CFI not used for packet acceptance */ 2625 reg = E1000_READ_REG(hw, E1000_RCTL); 2626 reg &= ~E1000_RCTL_CFIEN; 2627 reg |= E1000_RCTL_VFE; 2628 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2629 2630 /* restore VFTA table */ 2631 for (i = 0; i < IGB_VFTA_SIZE; i++) 2632 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 2633 } 2634 2635 static void 2636 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2637 { 2638 struct e1000_hw *hw = 2639 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2640 uint32_t reg; 2641 2642 /* VLAN Mode Disable */ 2643 reg = E1000_READ_REG(hw, E1000_CTRL); 2644 reg &= ~E1000_CTRL_VME; 2645 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2646 } 2647 2648 static void 2649 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2650 { 2651 struct e1000_hw *hw = 2652 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2653 uint32_t reg; 2654 2655 /* VLAN Mode Enable */ 2656 reg = E1000_READ_REG(hw, E1000_CTRL); 2657 reg |= E1000_CTRL_VME; 2658 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2659 } 2660 2661 static void 2662 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2663 { 2664 struct e1000_hw *hw = 2665 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2666 uint32_t reg; 2667 2668 /* CTRL_EXT: Extended VLAN */ 2669 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2670 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; 2671 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2672 2673 /* Update maximum packet length */ 2674 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2675 E1000_WRITE_REG(hw, E1000_RLPML, 2676 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2677 VLAN_TAG_SIZE); 2678 } 2679 2680 static void 2681 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2682 { 2683 struct e1000_hw *hw = 2684 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2685 uint32_t reg; 2686 2687 /* CTRL_EXT: Extended VLAN */ 2688 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2689 reg |= E1000_CTRL_EXT_EXTEND_VLAN; 2690 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2691 2692 /* Update maximum packet length */ 2693 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2694 E1000_WRITE_REG(hw, E1000_RLPML, 2695 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2696 2 * VLAN_TAG_SIZE); 2697 } 2698 2699 static int 2700 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2701 { 2702 struct rte_eth_rxmode *rxmode; 2703 2704 rxmode = &dev->data->dev_conf.rxmode; 2705 if(mask & ETH_VLAN_STRIP_MASK){ 2706 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2707 igb_vlan_hw_strip_enable(dev); 2708 else 2709 igb_vlan_hw_strip_disable(dev); 2710 } 2711 2712 if(mask & ETH_VLAN_FILTER_MASK){ 2713 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2714 igb_vlan_hw_filter_enable(dev); 2715 else 2716 igb_vlan_hw_filter_disable(dev); 2717 } 2718 2719 if(mask & ETH_VLAN_EXTEND_MASK){ 2720 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2721 igb_vlan_hw_extend_enable(dev); 2722 else 2723 igb_vlan_hw_extend_disable(dev); 2724 } 2725 2726 return 0; 2727 } 2728 2729 2730 /** 2731 * It enables the interrupt mask and then enable the interrupt. 2732 * 2733 * @param dev 2734 * Pointer to struct rte_eth_dev. 2735 * @param on 2736 * Enable or Disable 2737 * 2738 * @return 2739 * - On success, zero. 2740 * - On failure, a negative value. 2741 */ 2742 static int 2743 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2744 { 2745 struct e1000_interrupt *intr = 2746 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2747 2748 if (on) 2749 intr->mask |= E1000_ICR_LSC; 2750 else 2751 intr->mask &= ~E1000_ICR_LSC; 2752 2753 return 0; 2754 } 2755 2756 /* It clears the interrupt causes and enables the interrupt. 2757 * It will be called once only during nic initialized. 2758 * 2759 * @param dev 2760 * Pointer to struct rte_eth_dev. 2761 * 2762 * @return 2763 * - On success, zero. 2764 * - On failure, a negative value. 2765 */ 2766 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) 2767 { 2768 uint32_t mask, regval; 2769 struct e1000_hw *hw = 2770 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2771 struct rte_eth_dev_info dev_info; 2772 2773 memset(&dev_info, 0, sizeof(dev_info)); 2774 eth_igb_infos_get(dev, &dev_info); 2775 2776 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues); 2777 regval = E1000_READ_REG(hw, E1000_EIMS); 2778 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 2779 2780 return 0; 2781 } 2782 2783 /* 2784 * It reads ICR and gets interrupt causes, check it and set a bit flag 2785 * to update link status. 2786 * 2787 * @param dev 2788 * Pointer to struct rte_eth_dev. 2789 * 2790 * @return 2791 * - On success, zero. 2792 * - On failure, a negative value. 2793 */ 2794 static int 2795 eth_igb_interrupt_get_status(struct rte_eth_dev *dev) 2796 { 2797 uint32_t icr; 2798 struct e1000_hw *hw = 2799 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2800 struct e1000_interrupt *intr = 2801 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2802 2803 igb_intr_disable(hw); 2804 2805 /* read-on-clear nic registers here */ 2806 icr = E1000_READ_REG(hw, E1000_ICR); 2807 2808 intr->flags = 0; 2809 if (icr & E1000_ICR_LSC) { 2810 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 2811 } 2812 2813 if (icr & E1000_ICR_VMMB) 2814 intr->flags |= E1000_FLAG_MAILBOX; 2815 2816 return 0; 2817 } 2818 2819 /* 2820 * It executes link_update after knowing an interrupt is prsent. 2821 * 2822 * @param dev 2823 * Pointer to struct rte_eth_dev. 2824 * 2825 * @return 2826 * - On success, zero. 2827 * - On failure, a negative value. 2828 */ 2829 static int 2830 eth_igb_interrupt_action(struct rte_eth_dev *dev, 2831 struct rte_intr_handle *intr_handle) 2832 { 2833 struct e1000_hw *hw = 2834 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2835 struct e1000_interrupt *intr = 2836 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2837 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2838 struct rte_eth_link link; 2839 int ret; 2840 2841 if (intr->flags & E1000_FLAG_MAILBOX) { 2842 igb_pf_mbx_process(dev); 2843 intr->flags &= ~E1000_FLAG_MAILBOX; 2844 } 2845 2846 igb_intr_enable(dev); 2847 rte_intr_enable(intr_handle); 2848 2849 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { 2850 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 2851 2852 /* set get_link_status to check register later */ 2853 hw->mac.get_link_status = 1; 2854 ret = eth_igb_link_update(dev, 0); 2855 2856 /* check if link has changed */ 2857 if (ret < 0) 2858 return 0; 2859 2860 rte_eth_linkstatus_get(dev, &link); 2861 if (link.link_status) { 2862 PMD_INIT_LOG(INFO, 2863 " Port %d: Link Up - speed %u Mbps - %s", 2864 dev->data->port_id, 2865 (unsigned)link.link_speed, 2866 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 2867 "full-duplex" : "half-duplex"); 2868 } else { 2869 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2870 dev->data->port_id); 2871 } 2872 2873 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 2874 pci_dev->addr.domain, 2875 pci_dev->addr.bus, 2876 pci_dev->addr.devid, 2877 pci_dev->addr.function); 2878 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 2879 NULL); 2880 } 2881 2882 return 0; 2883 } 2884 2885 /** 2886 * Interrupt handler which shall be registered at first. 2887 * 2888 * @param handle 2889 * Pointer to interrupt handle. 2890 * @param param 2891 * The address of parameter (struct rte_eth_dev *) regsitered before. 2892 * 2893 * @return 2894 * void 2895 */ 2896 static void 2897 eth_igb_interrupt_handler(void *param) 2898 { 2899 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2900 2901 eth_igb_interrupt_get_status(dev); 2902 eth_igb_interrupt_action(dev, dev->intr_handle); 2903 } 2904 2905 static int 2906 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) 2907 { 2908 uint32_t eicr; 2909 struct e1000_hw *hw = 2910 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2911 struct e1000_interrupt *intr = 2912 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2913 2914 igbvf_intr_disable(hw); 2915 2916 /* read-on-clear nic registers here */ 2917 eicr = E1000_READ_REG(hw, E1000_EICR); 2918 intr->flags = 0; 2919 2920 if (eicr == E1000_VTIVAR_MISC_MAILBOX) 2921 intr->flags |= E1000_FLAG_MAILBOX; 2922 2923 return 0; 2924 } 2925 2926 void igbvf_mbx_process(struct rte_eth_dev *dev) 2927 { 2928 struct e1000_hw *hw = 2929 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2930 struct e1000_mbx_info *mbx = &hw->mbx; 2931 u32 in_msg = 0; 2932 2933 /* peek the message first */ 2934 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0)); 2935 2936 /* PF reset VF event */ 2937 if (in_msg == E1000_PF_CONTROL_MSG) { 2938 /* dummy mbx read to ack pf */ 2939 if (mbx->ops.read(hw, &in_msg, 1, 0)) 2940 return; 2941 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 2942 NULL); 2943 } 2944 } 2945 2946 static int 2947 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) 2948 { 2949 struct e1000_interrupt *intr = 2950 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2951 2952 if (intr->flags & E1000_FLAG_MAILBOX) { 2953 igbvf_mbx_process(dev); 2954 intr->flags &= ~E1000_FLAG_MAILBOX; 2955 } 2956 2957 igbvf_intr_enable(dev); 2958 rte_intr_enable(intr_handle); 2959 2960 return 0; 2961 } 2962 2963 static void 2964 eth_igbvf_interrupt_handler(void *param) 2965 { 2966 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2967 2968 eth_igbvf_interrupt_get_status(dev); 2969 eth_igbvf_interrupt_action(dev, dev->intr_handle); 2970 } 2971 2972 static int 2973 eth_igb_led_on(struct rte_eth_dev *dev) 2974 { 2975 struct e1000_hw *hw; 2976 2977 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2978 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 2979 } 2980 2981 static int 2982 eth_igb_led_off(struct rte_eth_dev *dev) 2983 { 2984 struct e1000_hw *hw; 2985 2986 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2987 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 2988 } 2989 2990 static int 2991 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2992 { 2993 struct e1000_hw *hw; 2994 uint32_t ctrl; 2995 int tx_pause; 2996 int rx_pause; 2997 2998 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2999 fc_conf->pause_time = hw->fc.pause_time; 3000 fc_conf->high_water = hw->fc.high_water; 3001 fc_conf->low_water = hw->fc.low_water; 3002 fc_conf->send_xon = hw->fc.send_xon; 3003 fc_conf->autoneg = hw->mac.autoneg; 3004 3005 /* 3006 * Return rx_pause and tx_pause status according to actual setting of 3007 * the TFCE and RFCE bits in the CTRL register. 3008 */ 3009 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3010 if (ctrl & E1000_CTRL_TFCE) 3011 tx_pause = 1; 3012 else 3013 tx_pause = 0; 3014 3015 if (ctrl & E1000_CTRL_RFCE) 3016 rx_pause = 1; 3017 else 3018 rx_pause = 0; 3019 3020 if (rx_pause && tx_pause) 3021 fc_conf->mode = RTE_FC_FULL; 3022 else if (rx_pause) 3023 fc_conf->mode = RTE_FC_RX_PAUSE; 3024 else if (tx_pause) 3025 fc_conf->mode = RTE_FC_TX_PAUSE; 3026 else 3027 fc_conf->mode = RTE_FC_NONE; 3028 3029 return 0; 3030 } 3031 3032 static int 3033 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3034 { 3035 struct e1000_hw *hw; 3036 int err; 3037 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 3038 e1000_fc_none, 3039 e1000_fc_rx_pause, 3040 e1000_fc_tx_pause, 3041 e1000_fc_full 3042 }; 3043 uint32_t rx_buf_size; 3044 uint32_t max_high_water; 3045 uint32_t rctl; 3046 3047 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3048 if (fc_conf->autoneg != hw->mac.autoneg) 3049 return -ENOTSUP; 3050 rx_buf_size = igb_get_rx_buffer_size(hw); 3051 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3052 3053 /* At least reserve one Ethernet frame for watermark */ 3054 max_high_water = rx_buf_size - ETHER_MAX_LEN; 3055 if ((fc_conf->high_water > max_high_water) || 3056 (fc_conf->high_water < fc_conf->low_water)) { 3057 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 3058 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 3059 return -EINVAL; 3060 } 3061 3062 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 3063 hw->fc.pause_time = fc_conf->pause_time; 3064 hw->fc.high_water = fc_conf->high_water; 3065 hw->fc.low_water = fc_conf->low_water; 3066 hw->fc.send_xon = fc_conf->send_xon; 3067 3068 err = e1000_setup_link_generic(hw); 3069 if (err == E1000_SUCCESS) { 3070 3071 /* check if we want to forward MAC frames - driver doesn't have native 3072 * capability to do that, so we'll write the registers ourselves */ 3073 3074 rctl = E1000_READ_REG(hw, E1000_RCTL); 3075 3076 /* set or clear MFLCN.PMCF bit depending on configuration */ 3077 if (fc_conf->mac_ctrl_frame_fwd != 0) 3078 rctl |= E1000_RCTL_PMCF; 3079 else 3080 rctl &= ~E1000_RCTL_PMCF; 3081 3082 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 3083 E1000_WRITE_FLUSH(hw); 3084 3085 return 0; 3086 } 3087 3088 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 3089 return -EIO; 3090 } 3091 3092 #define E1000_RAH_POOLSEL_SHIFT (18) 3093 static int 3094 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 3095 uint32_t index, uint32_t pool) 3096 { 3097 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3098 uint32_t rah; 3099 3100 e1000_rar_set(hw, mac_addr->addr_bytes, index); 3101 rah = E1000_READ_REG(hw, E1000_RAH(index)); 3102 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); 3103 E1000_WRITE_REG(hw, E1000_RAH(index), rah); 3104 return 0; 3105 } 3106 3107 static void 3108 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) 3109 { 3110 uint8_t addr[ETHER_ADDR_LEN]; 3111 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3112 3113 memset(addr, 0, sizeof(addr)); 3114 3115 e1000_rar_set(hw, addr, index); 3116 } 3117 3118 static int 3119 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 3120 struct ether_addr *addr) 3121 { 3122 eth_igb_rar_clear(dev, 0); 3123 eth_igb_rar_set(dev, (void *)addr, 0, 0); 3124 3125 return 0; 3126 } 3127 /* 3128 * Virtual Function operations 3129 */ 3130 static void 3131 igbvf_intr_disable(struct e1000_hw *hw) 3132 { 3133 PMD_INIT_FUNC_TRACE(); 3134 3135 /* Clear interrupt mask to stop from interrupts being generated */ 3136 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); 3137 3138 E1000_WRITE_FLUSH(hw); 3139 } 3140 3141 static void 3142 igbvf_stop_adapter(struct rte_eth_dev *dev) 3143 { 3144 u32 reg_val; 3145 u16 i; 3146 struct rte_eth_dev_info dev_info; 3147 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3148 3149 memset(&dev_info, 0, sizeof(dev_info)); 3150 eth_igbvf_infos_get(dev, &dev_info); 3151 3152 /* Clear interrupt mask to stop from interrupts being generated */ 3153 igbvf_intr_disable(hw); 3154 3155 /* Clear any pending interrupts, flush previous writes */ 3156 E1000_READ_REG(hw, E1000_EICR); 3157 3158 /* Disable the transmit unit. Each queue must be disabled. */ 3159 for (i = 0; i < dev_info.max_tx_queues; i++) 3160 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); 3161 3162 /* Disable the receive unit by stopping each queue */ 3163 for (i = 0; i < dev_info.max_rx_queues; i++) { 3164 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); 3165 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; 3166 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); 3167 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) 3168 ; 3169 } 3170 3171 /* flush all queues disables */ 3172 E1000_WRITE_FLUSH(hw); 3173 msec_delay(2); 3174 } 3175 3176 static int eth_igbvf_link_update(struct e1000_hw *hw) 3177 { 3178 struct e1000_mbx_info *mbx = &hw->mbx; 3179 struct e1000_mac_info *mac = &hw->mac; 3180 int ret_val = E1000_SUCCESS; 3181 3182 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); 3183 3184 /* 3185 * We only want to run this if there has been a rst asserted. 3186 * in this case that could mean a link change, device reset, 3187 * or a virtual function reset 3188 */ 3189 3190 /* If we were hit with a reset or timeout drop the link */ 3191 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) 3192 mac->get_link_status = TRUE; 3193 3194 if (!mac->get_link_status) 3195 goto out; 3196 3197 /* if link status is down no point in checking to see if pf is up */ 3198 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) 3199 goto out; 3200 3201 /* if we passed all the tests above then the link is up and we no 3202 * longer need to check for link */ 3203 mac->get_link_status = FALSE; 3204 3205 out: 3206 return ret_val; 3207 } 3208 3209 3210 static int 3211 igbvf_dev_configure(struct rte_eth_dev *dev) 3212 { 3213 struct rte_eth_conf* conf = &dev->data->dev_conf; 3214 3215 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 3216 dev->data->port_id); 3217 3218 /* 3219 * VF has no ability to enable/disable HW CRC 3220 * Keep the persistent behavior the same as Host PF 3221 */ 3222 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC 3223 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 3224 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 3225 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 3226 } 3227 #else 3228 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 3229 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 3230 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 3231 } 3232 #endif 3233 3234 return 0; 3235 } 3236 3237 static int 3238 igbvf_dev_start(struct rte_eth_dev *dev) 3239 { 3240 struct e1000_hw *hw = 3241 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3242 struct e1000_adapter *adapter = 3243 E1000_DEV_PRIVATE(dev->data->dev_private); 3244 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3245 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3246 int ret; 3247 uint32_t intr_vector = 0; 3248 3249 PMD_INIT_FUNC_TRACE(); 3250 3251 hw->mac.ops.reset_hw(hw); 3252 adapter->stopped = 0; 3253 3254 /* Set all vfta */ 3255 igbvf_set_vfta_all(dev,1); 3256 3257 eth_igbvf_tx_init(dev); 3258 3259 /* This can fail when allocating mbufs for descriptor rings */ 3260 ret = eth_igbvf_rx_init(dev); 3261 if (ret) { 3262 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 3263 igb_dev_clear_queues(dev); 3264 return ret; 3265 } 3266 3267 /* check and configure queue intr-vector mapping */ 3268 if (rte_intr_cap_multiple(intr_handle) && 3269 dev->data->dev_conf.intr_conf.rxq) { 3270 intr_vector = dev->data->nb_rx_queues; 3271 ret = rte_intr_efd_enable(intr_handle, intr_vector); 3272 if (ret) 3273 return ret; 3274 } 3275 3276 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3277 intr_handle->intr_vec = 3278 rte_zmalloc("intr_vec", 3279 dev->data->nb_rx_queues * sizeof(int), 0); 3280 if (!intr_handle->intr_vec) { 3281 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 3282 " intr_vec", dev->data->nb_rx_queues); 3283 return -ENOMEM; 3284 } 3285 } 3286 3287 eth_igbvf_configure_msix_intr(dev); 3288 3289 /* enable uio/vfio intr/eventfd mapping */ 3290 rte_intr_enable(intr_handle); 3291 3292 /* resume enabled intr since hw reset */ 3293 igbvf_intr_enable(dev); 3294 3295 return 0; 3296 } 3297 3298 static void 3299 igbvf_dev_stop(struct rte_eth_dev *dev) 3300 { 3301 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3302 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3303 3304 PMD_INIT_FUNC_TRACE(); 3305 3306 igbvf_stop_adapter(dev); 3307 3308 /* 3309 * Clear what we set, but we still keep shadow_vfta to 3310 * restore after device starts 3311 */ 3312 igbvf_set_vfta_all(dev,0); 3313 3314 igb_dev_clear_queues(dev); 3315 3316 /* disable intr eventfd mapping */ 3317 rte_intr_disable(intr_handle); 3318 3319 /* Clean datapath event and queue/vec mapping */ 3320 rte_intr_efd_disable(intr_handle); 3321 if (intr_handle->intr_vec) { 3322 rte_free(intr_handle->intr_vec); 3323 intr_handle->intr_vec = NULL; 3324 } 3325 } 3326 3327 static void 3328 igbvf_dev_close(struct rte_eth_dev *dev) 3329 { 3330 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3331 struct e1000_adapter *adapter = 3332 E1000_DEV_PRIVATE(dev->data->dev_private); 3333 struct ether_addr addr; 3334 3335 PMD_INIT_FUNC_TRACE(); 3336 3337 e1000_reset_hw(hw); 3338 3339 igbvf_dev_stop(dev); 3340 adapter->stopped = 1; 3341 igb_dev_free_queues(dev); 3342 3343 /** 3344 * reprogram the RAR with a zero mac address, 3345 * to ensure that the VF traffic goes to the PF 3346 * after stop, close and detach of the VF. 3347 **/ 3348 3349 memset(&addr, 0, sizeof(addr)); 3350 igbvf_default_mac_addr_set(dev, &addr); 3351 } 3352 3353 static void 3354 igbvf_promiscuous_enable(struct rte_eth_dev *dev) 3355 { 3356 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3357 3358 /* Set both unicast and multicast promisc */ 3359 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 3360 } 3361 3362 static void 3363 igbvf_promiscuous_disable(struct rte_eth_dev *dev) 3364 { 3365 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3366 3367 /* If in allmulticast mode leave multicast promisc */ 3368 if (dev->data->all_multicast == 1) 3369 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3370 else 3371 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3372 } 3373 3374 static void 3375 igbvf_allmulticast_enable(struct rte_eth_dev *dev) 3376 { 3377 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3378 3379 /* In promiscuous mode multicast promisc already set */ 3380 if (dev->data->promiscuous == 0) 3381 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3382 } 3383 3384 static void 3385 igbvf_allmulticast_disable(struct rte_eth_dev *dev) 3386 { 3387 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3388 3389 /* In promiscuous mode leave multicast promisc enabled */ 3390 if (dev->data->promiscuous == 0) 3391 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3392 } 3393 3394 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) 3395 { 3396 struct e1000_mbx_info *mbx = &hw->mbx; 3397 uint32_t msgbuf[2]; 3398 s32 err; 3399 3400 /* After set vlan, vlan strip will also be enabled in igb driver*/ 3401 msgbuf[0] = E1000_VF_SET_VLAN; 3402 msgbuf[1] = vid; 3403 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 3404 if (on) 3405 msgbuf[0] |= E1000_VF_SET_VLAN_ADD; 3406 3407 err = mbx->ops.write_posted(hw, msgbuf, 2, 0); 3408 if (err) 3409 goto mbx_err; 3410 3411 err = mbx->ops.read_posted(hw, msgbuf, 2, 0); 3412 if (err) 3413 goto mbx_err; 3414 3415 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 3416 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) 3417 err = -EINVAL; 3418 3419 mbx_err: 3420 return err; 3421 } 3422 3423 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) 3424 { 3425 struct e1000_hw *hw = 3426 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3427 struct e1000_vfta * shadow_vfta = 3428 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3429 int i = 0, j = 0, vfta = 0, mask = 1; 3430 3431 for (i = 0; i < IGB_VFTA_SIZE; i++){ 3432 vfta = shadow_vfta->vfta[i]; 3433 if(vfta){ 3434 mask = 1; 3435 for (j = 0; j < 32; j++){ 3436 if(vfta & mask) 3437 igbvf_set_vfta(hw, 3438 (uint16_t)((i<<5)+j), on); 3439 mask<<=1; 3440 } 3441 } 3442 } 3443 3444 } 3445 3446 static int 3447 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 3448 { 3449 struct e1000_hw *hw = 3450 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3451 struct e1000_vfta * shadow_vfta = 3452 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3453 uint32_t vid_idx = 0; 3454 uint32_t vid_bit = 0; 3455 int ret = 0; 3456 3457 PMD_INIT_FUNC_TRACE(); 3458 3459 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ 3460 ret = igbvf_set_vfta(hw, vlan_id, !!on); 3461 if(ret){ 3462 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 3463 return ret; 3464 } 3465 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 3466 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 3467 3468 /*Save what we set and retore it after device reset*/ 3469 if (on) 3470 shadow_vfta->vfta[vid_idx] |= vid_bit; 3471 else 3472 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 3473 3474 return 0; 3475 } 3476 3477 static int 3478 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) 3479 { 3480 struct e1000_hw *hw = 3481 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3482 3483 /* index is not used by rar_set() */ 3484 hw->mac.ops.rar_set(hw, (void *)addr, 0); 3485 return 0; 3486 } 3487 3488 3489 static int 3490 eth_igb_rss_reta_update(struct rte_eth_dev *dev, 3491 struct rte_eth_rss_reta_entry64 *reta_conf, 3492 uint16_t reta_size) 3493 { 3494 uint8_t i, j, mask; 3495 uint32_t reta, r; 3496 uint16_t idx, shift; 3497 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3498 3499 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3500 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3501 "(%d) doesn't match the number hardware can supported " 3502 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3503 return -EINVAL; 3504 } 3505 3506 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3507 idx = i / RTE_RETA_GROUP_SIZE; 3508 shift = i % RTE_RETA_GROUP_SIZE; 3509 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3510 IGB_4_BIT_MASK); 3511 if (!mask) 3512 continue; 3513 if (mask == IGB_4_BIT_MASK) 3514 r = 0; 3515 else 3516 r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3517 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { 3518 if (mask & (0x1 << j)) 3519 reta |= reta_conf[idx].reta[shift + j] << 3520 (CHAR_BIT * j); 3521 else 3522 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); 3523 } 3524 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); 3525 } 3526 3527 return 0; 3528 } 3529 3530 static int 3531 eth_igb_rss_reta_query(struct rte_eth_dev *dev, 3532 struct rte_eth_rss_reta_entry64 *reta_conf, 3533 uint16_t reta_size) 3534 { 3535 uint8_t i, j, mask; 3536 uint32_t reta; 3537 uint16_t idx, shift; 3538 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3539 3540 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3541 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3542 "(%d) doesn't match the number hardware can supported " 3543 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3544 return -EINVAL; 3545 } 3546 3547 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3548 idx = i / RTE_RETA_GROUP_SIZE; 3549 shift = i % RTE_RETA_GROUP_SIZE; 3550 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3551 IGB_4_BIT_MASK); 3552 if (!mask) 3553 continue; 3554 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3555 for (j = 0; j < IGB_4_BIT_WIDTH; j++) { 3556 if (mask & (0x1 << j)) 3557 reta_conf[idx].reta[shift + j] = 3558 ((reta >> (CHAR_BIT * j)) & 3559 IGB_8_BIT_MASK); 3560 } 3561 } 3562 3563 return 0; 3564 } 3565 3566 int 3567 eth_igb_syn_filter_set(struct rte_eth_dev *dev, 3568 struct rte_eth_syn_filter *filter, 3569 bool add) 3570 { 3571 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3572 struct e1000_filter_info *filter_info = 3573 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3574 uint32_t synqf, rfctl; 3575 3576 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3577 return -EINVAL; 3578 3579 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3580 3581 if (add) { 3582 if (synqf & E1000_SYN_FILTER_ENABLE) 3583 return -EINVAL; 3584 3585 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & 3586 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); 3587 3588 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3589 if (filter->hig_pri) 3590 rfctl |= E1000_RFCTL_SYNQFP; 3591 else 3592 rfctl &= ~E1000_RFCTL_SYNQFP; 3593 3594 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 3595 } else { 3596 if (!(synqf & E1000_SYN_FILTER_ENABLE)) 3597 return -ENOENT; 3598 synqf = 0; 3599 } 3600 3601 filter_info->syn_info = synqf; 3602 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 3603 E1000_WRITE_FLUSH(hw); 3604 return 0; 3605 } 3606 3607 static int 3608 eth_igb_syn_filter_get(struct rte_eth_dev *dev, 3609 struct rte_eth_syn_filter *filter) 3610 { 3611 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3612 uint32_t synqf, rfctl; 3613 3614 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3615 if (synqf & E1000_SYN_FILTER_ENABLE) { 3616 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3617 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; 3618 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> 3619 E1000_SYN_FILTER_QUEUE_SHIFT); 3620 return 0; 3621 } 3622 3623 return -ENOENT; 3624 } 3625 3626 static int 3627 eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 3628 enum rte_filter_op filter_op, 3629 void *arg) 3630 { 3631 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3632 int ret; 3633 3634 MAC_TYPE_FILTER_SUP(hw->mac.type); 3635 3636 if (filter_op == RTE_ETH_FILTER_NOP) 3637 return 0; 3638 3639 if (arg == NULL) { 3640 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 3641 filter_op); 3642 return -EINVAL; 3643 } 3644 3645 switch (filter_op) { 3646 case RTE_ETH_FILTER_ADD: 3647 ret = eth_igb_syn_filter_set(dev, 3648 (struct rte_eth_syn_filter *)arg, 3649 TRUE); 3650 break; 3651 case RTE_ETH_FILTER_DELETE: 3652 ret = eth_igb_syn_filter_set(dev, 3653 (struct rte_eth_syn_filter *)arg, 3654 FALSE); 3655 break; 3656 case RTE_ETH_FILTER_GET: 3657 ret = eth_igb_syn_filter_get(dev, 3658 (struct rte_eth_syn_filter *)arg); 3659 break; 3660 default: 3661 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 3662 ret = -EINVAL; 3663 break; 3664 } 3665 3666 return ret; 3667 } 3668 3669 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ 3670 static inline int 3671 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, 3672 struct e1000_2tuple_filter_info *filter_info) 3673 { 3674 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3675 return -EINVAL; 3676 if (filter->priority > E1000_2TUPLE_MAX_PRI) 3677 return -EINVAL; /* filter index is out of range. */ 3678 if (filter->tcp_flags > TCP_FLAG_ALL) 3679 return -EINVAL; /* flags is invalid. */ 3680 3681 switch (filter->dst_port_mask) { 3682 case UINT16_MAX: 3683 filter_info->dst_port_mask = 0; 3684 filter_info->dst_port = filter->dst_port; 3685 break; 3686 case 0: 3687 filter_info->dst_port_mask = 1; 3688 break; 3689 default: 3690 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3691 return -EINVAL; 3692 } 3693 3694 switch (filter->proto_mask) { 3695 case UINT8_MAX: 3696 filter_info->proto_mask = 0; 3697 filter_info->proto = filter->proto; 3698 break; 3699 case 0: 3700 filter_info->proto_mask = 1; 3701 break; 3702 default: 3703 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3704 return -EINVAL; 3705 } 3706 3707 filter_info->priority = (uint8_t)filter->priority; 3708 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 3709 filter_info->tcp_flags = filter->tcp_flags; 3710 else 3711 filter_info->tcp_flags = 0; 3712 3713 return 0; 3714 } 3715 3716 static inline struct e1000_2tuple_filter * 3717 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, 3718 struct e1000_2tuple_filter_info *key) 3719 { 3720 struct e1000_2tuple_filter *it; 3721 3722 TAILQ_FOREACH(it, filter_list, entries) { 3723 if (memcmp(key, &it->filter_info, 3724 sizeof(struct e1000_2tuple_filter_info)) == 0) { 3725 return it; 3726 } 3727 } 3728 return NULL; 3729 } 3730 3731 /* inject a igb 2tuple filter to HW */ 3732 static inline void 3733 igb_inject_2uple_filter(struct rte_eth_dev *dev, 3734 struct e1000_2tuple_filter *filter) 3735 { 3736 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3737 uint32_t ttqf = E1000_TTQF_DISABLE_MASK; 3738 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; 3739 int i; 3740 3741 i = filter->index; 3742 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 3743 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 3744 imir |= E1000_IMIR_PORT_BP; 3745 else 3746 imir &= ~E1000_IMIR_PORT_BP; 3747 3748 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 3749 3750 ttqf |= E1000_TTQF_QUEUE_ENABLE; 3751 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); 3752 ttqf |= (uint32_t)(filter->filter_info.proto & 3753 E1000_TTQF_PROTOCOL_MASK); 3754 if (filter->filter_info.proto_mask == 0) 3755 ttqf &= ~E1000_TTQF_MASK_ENABLE; 3756 3757 /* tcp flags bits setting. */ 3758 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { 3759 if (filter->filter_info.tcp_flags & TCP_URG_FLAG) 3760 imir_ext |= E1000_IMIREXT_CTRL_URG; 3761 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) 3762 imir_ext |= E1000_IMIREXT_CTRL_ACK; 3763 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) 3764 imir_ext |= E1000_IMIREXT_CTRL_PSH; 3765 if (filter->filter_info.tcp_flags & TCP_RST_FLAG) 3766 imir_ext |= E1000_IMIREXT_CTRL_RST; 3767 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) 3768 imir_ext |= E1000_IMIREXT_CTRL_SYN; 3769 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) 3770 imir_ext |= E1000_IMIREXT_CTRL_FIN; 3771 } else { 3772 imir_ext |= E1000_IMIREXT_CTRL_BP; 3773 } 3774 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 3775 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); 3776 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 3777 } 3778 3779 /* 3780 * igb_add_2tuple_filter - add a 2tuple filter 3781 * 3782 * @param 3783 * dev: Pointer to struct rte_eth_dev. 3784 * ntuple_filter: ponter to the filter that will be added. 3785 * 3786 * @return 3787 * - On success, zero. 3788 * - On failure, a negative value. 3789 */ 3790 static int 3791 igb_add_2tuple_filter(struct rte_eth_dev *dev, 3792 struct rte_eth_ntuple_filter *ntuple_filter) 3793 { 3794 struct e1000_filter_info *filter_info = 3795 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3796 struct e1000_2tuple_filter *filter; 3797 int i, ret; 3798 3799 filter = rte_zmalloc("e1000_2tuple_filter", 3800 sizeof(struct e1000_2tuple_filter), 0); 3801 if (filter == NULL) 3802 return -ENOMEM; 3803 3804 ret = ntuple_filter_to_2tuple(ntuple_filter, 3805 &filter->filter_info); 3806 if (ret < 0) { 3807 rte_free(filter); 3808 return ret; 3809 } 3810 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3811 &filter->filter_info) != NULL) { 3812 PMD_DRV_LOG(ERR, "filter exists."); 3813 rte_free(filter); 3814 return -EEXIST; 3815 } 3816 filter->queue = ntuple_filter->queue; 3817 3818 /* 3819 * look for an unused 2tuple filter index, 3820 * and insert the filter to list. 3821 */ 3822 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { 3823 if (!(filter_info->twotuple_mask & (1 << i))) { 3824 filter_info->twotuple_mask |= 1 << i; 3825 filter->index = i; 3826 TAILQ_INSERT_TAIL(&filter_info->twotuple_list, 3827 filter, 3828 entries); 3829 break; 3830 } 3831 } 3832 if (i >= E1000_MAX_TTQF_FILTERS) { 3833 PMD_DRV_LOG(ERR, "2tuple filters are full."); 3834 rte_free(filter); 3835 return -ENOSYS; 3836 } 3837 3838 igb_inject_2uple_filter(dev, filter); 3839 return 0; 3840 } 3841 3842 int 3843 igb_delete_2tuple_filter(struct rte_eth_dev *dev, 3844 struct e1000_2tuple_filter *filter) 3845 { 3846 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3847 struct e1000_filter_info *filter_info = 3848 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3849 3850 filter_info->twotuple_mask &= ~(1 << filter->index); 3851 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); 3852 rte_free(filter); 3853 3854 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); 3855 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 3856 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 3857 return 0; 3858 } 3859 3860 /* 3861 * igb_remove_2tuple_filter - remove a 2tuple filter 3862 * 3863 * @param 3864 * dev: Pointer to struct rte_eth_dev. 3865 * ntuple_filter: ponter to the filter that will be removed. 3866 * 3867 * @return 3868 * - On success, zero. 3869 * - On failure, a negative value. 3870 */ 3871 static int 3872 igb_remove_2tuple_filter(struct rte_eth_dev *dev, 3873 struct rte_eth_ntuple_filter *ntuple_filter) 3874 { 3875 struct e1000_filter_info *filter_info = 3876 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3877 struct e1000_2tuple_filter_info filter_2tuple; 3878 struct e1000_2tuple_filter *filter; 3879 int ret; 3880 3881 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); 3882 ret = ntuple_filter_to_2tuple(ntuple_filter, 3883 &filter_2tuple); 3884 if (ret < 0) 3885 return ret; 3886 3887 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3888 &filter_2tuple); 3889 if (filter == NULL) { 3890 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3891 return -ENOENT; 3892 } 3893 3894 igb_delete_2tuple_filter(dev, filter); 3895 3896 return 0; 3897 } 3898 3899 /* inject a igb flex filter to HW */ 3900 static inline void 3901 igb_inject_flex_filter(struct rte_eth_dev *dev, 3902 struct e1000_flex_filter *filter) 3903 { 3904 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3905 uint32_t wufc, queueing; 3906 uint32_t reg_off; 3907 uint8_t i, j = 0; 3908 3909 wufc = E1000_READ_REG(hw, E1000_WUFC); 3910 if (filter->index < E1000_MAX_FHFT) 3911 reg_off = E1000_FHFT(filter->index); 3912 else 3913 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3914 3915 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | 3916 (E1000_WUFC_FLX0 << filter->index)); 3917 queueing = filter->filter_info.len | 3918 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | 3919 (filter->filter_info.priority << 3920 E1000_FHFT_QUEUEING_PRIO_SHIFT); 3921 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, 3922 queueing); 3923 3924 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { 3925 E1000_WRITE_REG(hw, reg_off, 3926 filter->filter_info.dwords[j]); 3927 reg_off += sizeof(uint32_t); 3928 E1000_WRITE_REG(hw, reg_off, 3929 filter->filter_info.dwords[++j]); 3930 reg_off += sizeof(uint32_t); 3931 E1000_WRITE_REG(hw, reg_off, 3932 (uint32_t)filter->filter_info.mask[i]); 3933 reg_off += sizeof(uint32_t) * 2; 3934 ++j; 3935 } 3936 } 3937 3938 static inline struct e1000_flex_filter * 3939 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, 3940 struct e1000_flex_filter_info *key) 3941 { 3942 struct e1000_flex_filter *it; 3943 3944 TAILQ_FOREACH(it, filter_list, entries) { 3945 if (memcmp(key, &it->filter_info, 3946 sizeof(struct e1000_flex_filter_info)) == 0) 3947 return it; 3948 } 3949 3950 return NULL; 3951 } 3952 3953 /* remove a flex byte filter 3954 * @param 3955 * dev: Pointer to struct rte_eth_dev. 3956 * filter: the pointer of the filter will be removed. 3957 */ 3958 void 3959 igb_remove_flex_filter(struct rte_eth_dev *dev, 3960 struct e1000_flex_filter *filter) 3961 { 3962 struct e1000_filter_info *filter_info = 3963 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3964 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3965 uint32_t wufc, i; 3966 uint32_t reg_off; 3967 3968 wufc = E1000_READ_REG(hw, E1000_WUFC); 3969 if (filter->index < E1000_MAX_FHFT) 3970 reg_off = E1000_FHFT(filter->index); 3971 else 3972 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3973 3974 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) 3975 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); 3976 3977 E1000_WRITE_REG(hw, E1000_WUFC, wufc & 3978 (~(E1000_WUFC_FLX0 << filter->index))); 3979 3980 filter_info->flex_mask &= ~(1 << filter->index); 3981 TAILQ_REMOVE(&filter_info->flex_list, filter, entries); 3982 rte_free(filter); 3983 } 3984 3985 int 3986 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, 3987 struct rte_eth_flex_filter *filter, 3988 bool add) 3989 { 3990 struct e1000_filter_info *filter_info = 3991 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3992 struct e1000_flex_filter *flex_filter, *it; 3993 uint32_t mask; 3994 uint8_t shift, i; 3995 3996 flex_filter = rte_zmalloc("e1000_flex_filter", 3997 sizeof(struct e1000_flex_filter), 0); 3998 if (flex_filter == NULL) 3999 return -ENOMEM; 4000 4001 flex_filter->filter_info.len = filter->len; 4002 flex_filter->filter_info.priority = filter->priority; 4003 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); 4004 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { 4005 mask = 0; 4006 /* reverse bits in flex filter's mask*/ 4007 for (shift = 0; shift < CHAR_BIT; shift++) { 4008 if (filter->mask[i] & (0x01 << shift)) 4009 mask |= (0x80 >> shift); 4010 } 4011 flex_filter->filter_info.mask[i] = mask; 4012 } 4013 4014 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4015 &flex_filter->filter_info); 4016 if (it == NULL && !add) { 4017 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4018 rte_free(flex_filter); 4019 return -ENOENT; 4020 } 4021 if (it != NULL && add) { 4022 PMD_DRV_LOG(ERR, "filter exists."); 4023 rte_free(flex_filter); 4024 return -EEXIST; 4025 } 4026 4027 if (add) { 4028 flex_filter->queue = filter->queue; 4029 /* 4030 * look for an unused flex filter index 4031 * and insert the filter into the list. 4032 */ 4033 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { 4034 if (!(filter_info->flex_mask & (1 << i))) { 4035 filter_info->flex_mask |= 1 << i; 4036 flex_filter->index = i; 4037 TAILQ_INSERT_TAIL(&filter_info->flex_list, 4038 flex_filter, 4039 entries); 4040 break; 4041 } 4042 } 4043 if (i >= E1000_MAX_FLEX_FILTERS) { 4044 PMD_DRV_LOG(ERR, "flex filters are full."); 4045 rte_free(flex_filter); 4046 return -ENOSYS; 4047 } 4048 4049 igb_inject_flex_filter(dev, flex_filter); 4050 4051 } else { 4052 igb_remove_flex_filter(dev, it); 4053 rte_free(flex_filter); 4054 } 4055 4056 return 0; 4057 } 4058 4059 static int 4060 eth_igb_get_flex_filter(struct rte_eth_dev *dev, 4061 struct rte_eth_flex_filter *filter) 4062 { 4063 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4064 struct e1000_filter_info *filter_info = 4065 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4066 struct e1000_flex_filter flex_filter, *it; 4067 uint32_t wufc, queueing, wufc_en = 0; 4068 4069 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); 4070 flex_filter.filter_info.len = filter->len; 4071 flex_filter.filter_info.priority = filter->priority; 4072 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); 4073 memcpy(flex_filter.filter_info.mask, filter->mask, 4074 RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT); 4075 4076 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4077 &flex_filter.filter_info); 4078 if (it == NULL) { 4079 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4080 return -ENOENT; 4081 } 4082 4083 wufc = E1000_READ_REG(hw, E1000_WUFC); 4084 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); 4085 4086 if ((wufc & wufc_en) == wufc_en) { 4087 uint32_t reg_off = 0; 4088 if (it->index < E1000_MAX_FHFT) 4089 reg_off = E1000_FHFT(it->index); 4090 else 4091 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); 4092 4093 queueing = E1000_READ_REG(hw, 4094 reg_off + E1000_FHFT_QUEUEING_OFFSET); 4095 filter->len = queueing & E1000_FHFT_QUEUEING_LEN; 4096 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> 4097 E1000_FHFT_QUEUEING_PRIO_SHIFT; 4098 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> 4099 E1000_FHFT_QUEUEING_QUEUE_SHIFT; 4100 return 0; 4101 } 4102 return -ENOENT; 4103 } 4104 4105 static int 4106 eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 4107 enum rte_filter_op filter_op, 4108 void *arg) 4109 { 4110 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4111 struct rte_eth_flex_filter *filter; 4112 int ret = 0; 4113 4114 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 4115 4116 if (filter_op == RTE_ETH_FILTER_NOP) 4117 return ret; 4118 4119 if (arg == NULL) { 4120 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 4121 filter_op); 4122 return -EINVAL; 4123 } 4124 4125 filter = (struct rte_eth_flex_filter *)arg; 4126 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN 4127 || filter->len % sizeof(uint64_t) != 0) { 4128 PMD_DRV_LOG(ERR, "filter's length is out of range"); 4129 return -EINVAL; 4130 } 4131 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { 4132 PMD_DRV_LOG(ERR, "filter's priority is out of range"); 4133 return -EINVAL; 4134 } 4135 4136 switch (filter_op) { 4137 case RTE_ETH_FILTER_ADD: 4138 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); 4139 break; 4140 case RTE_ETH_FILTER_DELETE: 4141 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); 4142 break; 4143 case RTE_ETH_FILTER_GET: 4144 ret = eth_igb_get_flex_filter(dev, filter); 4145 break; 4146 default: 4147 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 4148 ret = -EINVAL; 4149 break; 4150 } 4151 4152 return ret; 4153 } 4154 4155 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ 4156 static inline int 4157 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, 4158 struct e1000_5tuple_filter_info *filter_info) 4159 { 4160 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) 4161 return -EINVAL; 4162 if (filter->priority > E1000_2TUPLE_MAX_PRI) 4163 return -EINVAL; /* filter index is out of range. */ 4164 if (filter->tcp_flags > TCP_FLAG_ALL) 4165 return -EINVAL; /* flags is invalid. */ 4166 4167 switch (filter->dst_ip_mask) { 4168 case UINT32_MAX: 4169 filter_info->dst_ip_mask = 0; 4170 filter_info->dst_ip = filter->dst_ip; 4171 break; 4172 case 0: 4173 filter_info->dst_ip_mask = 1; 4174 break; 4175 default: 4176 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 4177 return -EINVAL; 4178 } 4179 4180 switch (filter->src_ip_mask) { 4181 case UINT32_MAX: 4182 filter_info->src_ip_mask = 0; 4183 filter_info->src_ip = filter->src_ip; 4184 break; 4185 case 0: 4186 filter_info->src_ip_mask = 1; 4187 break; 4188 default: 4189 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 4190 return -EINVAL; 4191 } 4192 4193 switch (filter->dst_port_mask) { 4194 case UINT16_MAX: 4195 filter_info->dst_port_mask = 0; 4196 filter_info->dst_port = filter->dst_port; 4197 break; 4198 case 0: 4199 filter_info->dst_port_mask = 1; 4200 break; 4201 default: 4202 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 4203 return -EINVAL; 4204 } 4205 4206 switch (filter->src_port_mask) { 4207 case UINT16_MAX: 4208 filter_info->src_port_mask = 0; 4209 filter_info->src_port = filter->src_port; 4210 break; 4211 case 0: 4212 filter_info->src_port_mask = 1; 4213 break; 4214 default: 4215 PMD_DRV_LOG(ERR, "invalid src_port mask."); 4216 return -EINVAL; 4217 } 4218 4219 switch (filter->proto_mask) { 4220 case UINT8_MAX: 4221 filter_info->proto_mask = 0; 4222 filter_info->proto = filter->proto; 4223 break; 4224 case 0: 4225 filter_info->proto_mask = 1; 4226 break; 4227 default: 4228 PMD_DRV_LOG(ERR, "invalid protocol mask."); 4229 return -EINVAL; 4230 } 4231 4232 filter_info->priority = (uint8_t)filter->priority; 4233 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 4234 filter_info->tcp_flags = filter->tcp_flags; 4235 else 4236 filter_info->tcp_flags = 0; 4237 4238 return 0; 4239 } 4240 4241 static inline struct e1000_5tuple_filter * 4242 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, 4243 struct e1000_5tuple_filter_info *key) 4244 { 4245 struct e1000_5tuple_filter *it; 4246 4247 TAILQ_FOREACH(it, filter_list, entries) { 4248 if (memcmp(key, &it->filter_info, 4249 sizeof(struct e1000_5tuple_filter_info)) == 0) { 4250 return it; 4251 } 4252 } 4253 return NULL; 4254 } 4255 4256 /* inject a igb 5-tuple filter to HW */ 4257 static inline void 4258 igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, 4259 struct e1000_5tuple_filter *filter) 4260 { 4261 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4262 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; 4263 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; 4264 uint8_t i; 4265 4266 i = filter->index; 4267 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; 4268 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ 4269 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; 4270 if (filter->filter_info.dst_ip_mask == 0) 4271 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; 4272 if (filter->filter_info.src_port_mask == 0) 4273 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; 4274 if (filter->filter_info.proto_mask == 0) 4275 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; 4276 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & 4277 E1000_FTQF_QUEUE_MASK; 4278 ftqf |= E1000_FTQF_QUEUE_ENABLE; 4279 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); 4280 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); 4281 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); 4282 4283 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; 4284 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); 4285 4286 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 4287 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 4288 imir |= E1000_IMIR_PORT_BP; 4289 else 4290 imir &= ~E1000_IMIR_PORT_BP; 4291 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 4292 4293 /* tcp flags bits setting. */ 4294 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { 4295 if (filter->filter_info.tcp_flags & TCP_URG_FLAG) 4296 imir_ext |= E1000_IMIREXT_CTRL_URG; 4297 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) 4298 imir_ext |= E1000_IMIREXT_CTRL_ACK; 4299 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) 4300 imir_ext |= E1000_IMIREXT_CTRL_PSH; 4301 if (filter->filter_info.tcp_flags & TCP_RST_FLAG) 4302 imir_ext |= E1000_IMIREXT_CTRL_RST; 4303 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) 4304 imir_ext |= E1000_IMIREXT_CTRL_SYN; 4305 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) 4306 imir_ext |= E1000_IMIREXT_CTRL_FIN; 4307 } else { 4308 imir_ext |= E1000_IMIREXT_CTRL_BP; 4309 } 4310 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 4311 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 4312 } 4313 4314 /* 4315 * igb_add_5tuple_filter_82576 - add a 5tuple filter 4316 * 4317 * @param 4318 * dev: Pointer to struct rte_eth_dev. 4319 * ntuple_filter: ponter to the filter that will be added. 4320 * 4321 * @return 4322 * - On success, zero. 4323 * - On failure, a negative value. 4324 */ 4325 static int 4326 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 4327 struct rte_eth_ntuple_filter *ntuple_filter) 4328 { 4329 struct e1000_filter_info *filter_info = 4330 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4331 struct e1000_5tuple_filter *filter; 4332 uint8_t i; 4333 int ret; 4334 4335 filter = rte_zmalloc("e1000_5tuple_filter", 4336 sizeof(struct e1000_5tuple_filter), 0); 4337 if (filter == NULL) 4338 return -ENOMEM; 4339 4340 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4341 &filter->filter_info); 4342 if (ret < 0) { 4343 rte_free(filter); 4344 return ret; 4345 } 4346 4347 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4348 &filter->filter_info) != NULL) { 4349 PMD_DRV_LOG(ERR, "filter exists."); 4350 rte_free(filter); 4351 return -EEXIST; 4352 } 4353 filter->queue = ntuple_filter->queue; 4354 4355 /* 4356 * look for an unused 5tuple filter index, 4357 * and insert the filter to list. 4358 */ 4359 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { 4360 if (!(filter_info->fivetuple_mask & (1 << i))) { 4361 filter_info->fivetuple_mask |= 1 << i; 4362 filter->index = i; 4363 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 4364 filter, 4365 entries); 4366 break; 4367 } 4368 } 4369 if (i >= E1000_MAX_FTQF_FILTERS) { 4370 PMD_DRV_LOG(ERR, "5tuple filters are full."); 4371 rte_free(filter); 4372 return -ENOSYS; 4373 } 4374 4375 igb_inject_5tuple_filter_82576(dev, filter); 4376 return 0; 4377 } 4378 4379 int 4380 igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, 4381 struct e1000_5tuple_filter *filter) 4382 { 4383 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4384 struct e1000_filter_info *filter_info = 4385 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4386 4387 filter_info->fivetuple_mask &= ~(1 << filter->index); 4388 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 4389 rte_free(filter); 4390 4391 E1000_WRITE_REG(hw, E1000_FTQF(filter->index), 4392 E1000_FTQF_VF_BP | E1000_FTQF_MASK); 4393 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); 4394 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); 4395 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); 4396 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 4397 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 4398 return 0; 4399 } 4400 4401 /* 4402 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter 4403 * 4404 * @param 4405 * dev: Pointer to struct rte_eth_dev. 4406 * ntuple_filter: ponter to the filter that will be removed. 4407 * 4408 * @return 4409 * - On success, zero. 4410 * - On failure, a negative value. 4411 */ 4412 static int 4413 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 4414 struct rte_eth_ntuple_filter *ntuple_filter) 4415 { 4416 struct e1000_filter_info *filter_info = 4417 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4418 struct e1000_5tuple_filter_info filter_5tuple; 4419 struct e1000_5tuple_filter *filter; 4420 int ret; 4421 4422 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); 4423 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4424 &filter_5tuple); 4425 if (ret < 0) 4426 return ret; 4427 4428 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4429 &filter_5tuple); 4430 if (filter == NULL) { 4431 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4432 return -ENOENT; 4433 } 4434 4435 igb_delete_5tuple_filter_82576(dev, filter); 4436 4437 return 0; 4438 } 4439 4440 static int 4441 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4442 { 4443 uint32_t rctl; 4444 struct e1000_hw *hw; 4445 struct rte_eth_dev_info dev_info; 4446 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4447 VLAN_TAG_SIZE); 4448 4449 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4450 4451 #ifdef RTE_LIBRTE_82571_SUPPORT 4452 /* XXX: not bigger than max_rx_pktlen */ 4453 if (hw->mac.type == e1000_82571) 4454 return -ENOTSUP; 4455 #endif 4456 eth_igb_infos_get(dev, &dev_info); 4457 4458 /* check that mtu is within the allowed range */ 4459 if ((mtu < ETHER_MIN_MTU) || 4460 (frame_size > dev_info.max_rx_pktlen)) 4461 return -EINVAL; 4462 4463 /* refuse mtu that requires the support of scattered packets when this 4464 * feature has not been enabled before. */ 4465 if (!dev->data->scattered_rx && 4466 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 4467 return -EINVAL; 4468 4469 rctl = E1000_READ_REG(hw, E1000_RCTL); 4470 4471 /* switch to jumbo mode if needed */ 4472 if (frame_size > ETHER_MAX_LEN) { 4473 dev->data->dev_conf.rxmode.offloads |= 4474 DEV_RX_OFFLOAD_JUMBO_FRAME; 4475 rctl |= E1000_RCTL_LPE; 4476 } else { 4477 dev->data->dev_conf.rxmode.offloads &= 4478 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 4479 rctl &= ~E1000_RCTL_LPE; 4480 } 4481 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4482 4483 /* update max frame size */ 4484 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4485 4486 E1000_WRITE_REG(hw, E1000_RLPML, 4487 dev->data->dev_conf.rxmode.max_rx_pkt_len); 4488 4489 return 0; 4490 } 4491 4492 /* 4493 * igb_add_del_ntuple_filter - add or delete a ntuple filter 4494 * 4495 * @param 4496 * dev: Pointer to struct rte_eth_dev. 4497 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4498 * add: if true, add filter, if false, remove filter 4499 * 4500 * @return 4501 * - On success, zero. 4502 * - On failure, a negative value. 4503 */ 4504 int 4505 igb_add_del_ntuple_filter(struct rte_eth_dev *dev, 4506 struct rte_eth_ntuple_filter *ntuple_filter, 4507 bool add) 4508 { 4509 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4510 int ret; 4511 4512 switch (ntuple_filter->flags) { 4513 case RTE_5TUPLE_FLAGS: 4514 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4515 if (hw->mac.type != e1000_82576) 4516 return -ENOTSUP; 4517 if (add) 4518 ret = igb_add_5tuple_filter_82576(dev, 4519 ntuple_filter); 4520 else 4521 ret = igb_remove_5tuple_filter_82576(dev, 4522 ntuple_filter); 4523 break; 4524 case RTE_2TUPLE_FLAGS: 4525 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4526 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && 4527 hw->mac.type != e1000_i210 && 4528 hw->mac.type != e1000_i211) 4529 return -ENOTSUP; 4530 if (add) 4531 ret = igb_add_2tuple_filter(dev, ntuple_filter); 4532 else 4533 ret = igb_remove_2tuple_filter(dev, ntuple_filter); 4534 break; 4535 default: 4536 ret = -EINVAL; 4537 break; 4538 } 4539 4540 return ret; 4541 } 4542 4543 /* 4544 * igb_get_ntuple_filter - get a ntuple filter 4545 * 4546 * @param 4547 * dev: Pointer to struct rte_eth_dev. 4548 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4549 * 4550 * @return 4551 * - On success, zero. 4552 * - On failure, a negative value. 4553 */ 4554 static int 4555 igb_get_ntuple_filter(struct rte_eth_dev *dev, 4556 struct rte_eth_ntuple_filter *ntuple_filter) 4557 { 4558 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4559 struct e1000_filter_info *filter_info = 4560 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4561 struct e1000_5tuple_filter_info filter_5tuple; 4562 struct e1000_2tuple_filter_info filter_2tuple; 4563 struct e1000_5tuple_filter *p_5tuple_filter; 4564 struct e1000_2tuple_filter *p_2tuple_filter; 4565 int ret; 4566 4567 switch (ntuple_filter->flags) { 4568 case RTE_5TUPLE_FLAGS: 4569 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4570 if (hw->mac.type != e1000_82576) 4571 return -ENOTSUP; 4572 memset(&filter_5tuple, 4573 0, 4574 sizeof(struct e1000_5tuple_filter_info)); 4575 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4576 &filter_5tuple); 4577 if (ret < 0) 4578 return ret; 4579 p_5tuple_filter = igb_5tuple_filter_lookup_82576( 4580 &filter_info->fivetuple_list, 4581 &filter_5tuple); 4582 if (p_5tuple_filter == NULL) { 4583 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4584 return -ENOENT; 4585 } 4586 ntuple_filter->queue = p_5tuple_filter->queue; 4587 break; 4588 case RTE_2TUPLE_FLAGS: 4589 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4590 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) 4591 return -ENOTSUP; 4592 memset(&filter_2tuple, 4593 0, 4594 sizeof(struct e1000_2tuple_filter_info)); 4595 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); 4596 if (ret < 0) 4597 return ret; 4598 p_2tuple_filter = igb_2tuple_filter_lookup( 4599 &filter_info->twotuple_list, 4600 &filter_2tuple); 4601 if (p_2tuple_filter == NULL) { 4602 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4603 return -ENOENT; 4604 } 4605 ntuple_filter->queue = p_2tuple_filter->queue; 4606 break; 4607 default: 4608 ret = -EINVAL; 4609 break; 4610 } 4611 4612 return 0; 4613 } 4614 4615 /* 4616 * igb_ntuple_filter_handle - Handle operations for ntuple filter. 4617 * @dev: pointer to rte_eth_dev structure 4618 * @filter_op:operation will be taken. 4619 * @arg: a pointer to specific structure corresponding to the filter_op 4620 */ 4621 static int 4622 igb_ntuple_filter_handle(struct rte_eth_dev *dev, 4623 enum rte_filter_op filter_op, 4624 void *arg) 4625 { 4626 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4627 int ret; 4628 4629 MAC_TYPE_FILTER_SUP(hw->mac.type); 4630 4631 if (filter_op == RTE_ETH_FILTER_NOP) 4632 return 0; 4633 4634 if (arg == NULL) { 4635 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4636 filter_op); 4637 return -EINVAL; 4638 } 4639 4640 switch (filter_op) { 4641 case RTE_ETH_FILTER_ADD: 4642 ret = igb_add_del_ntuple_filter(dev, 4643 (struct rte_eth_ntuple_filter *)arg, 4644 TRUE); 4645 break; 4646 case RTE_ETH_FILTER_DELETE: 4647 ret = igb_add_del_ntuple_filter(dev, 4648 (struct rte_eth_ntuple_filter *)arg, 4649 FALSE); 4650 break; 4651 case RTE_ETH_FILTER_GET: 4652 ret = igb_get_ntuple_filter(dev, 4653 (struct rte_eth_ntuple_filter *)arg); 4654 break; 4655 default: 4656 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4657 ret = -EINVAL; 4658 break; 4659 } 4660 return ret; 4661 } 4662 4663 static inline int 4664 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, 4665 uint16_t ethertype) 4666 { 4667 int i; 4668 4669 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4670 if (filter_info->ethertype_filters[i].ethertype == ethertype && 4671 (filter_info->ethertype_mask & (1 << i))) 4672 return i; 4673 } 4674 return -1; 4675 } 4676 4677 static inline int 4678 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, 4679 uint16_t ethertype, uint32_t etqf) 4680 { 4681 int i; 4682 4683 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4684 if (!(filter_info->ethertype_mask & (1 << i))) { 4685 filter_info->ethertype_mask |= 1 << i; 4686 filter_info->ethertype_filters[i].ethertype = ethertype; 4687 filter_info->ethertype_filters[i].etqf = etqf; 4688 return i; 4689 } 4690 } 4691 return -1; 4692 } 4693 4694 int 4695 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, 4696 uint8_t idx) 4697 { 4698 if (idx >= E1000_MAX_ETQF_FILTERS) 4699 return -1; 4700 filter_info->ethertype_mask &= ~(1 << idx); 4701 filter_info->ethertype_filters[idx].ethertype = 0; 4702 filter_info->ethertype_filters[idx].etqf = 0; 4703 return idx; 4704 } 4705 4706 4707 int 4708 igb_add_del_ethertype_filter(struct rte_eth_dev *dev, 4709 struct rte_eth_ethertype_filter *filter, 4710 bool add) 4711 { 4712 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4713 struct e1000_filter_info *filter_info = 4714 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4715 uint32_t etqf = 0; 4716 int ret; 4717 4718 if (filter->ether_type == ETHER_TYPE_IPv4 || 4719 filter->ether_type == ETHER_TYPE_IPv6) { 4720 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4721 " ethertype filter.", filter->ether_type); 4722 return -EINVAL; 4723 } 4724 4725 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4726 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4727 return -EINVAL; 4728 } 4729 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4730 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4731 return -EINVAL; 4732 } 4733 4734 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4735 if (ret >= 0 && add) { 4736 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4737 filter->ether_type); 4738 return -EEXIST; 4739 } 4740 if (ret < 0 && !add) { 4741 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4742 filter->ether_type); 4743 return -ENOENT; 4744 } 4745 4746 if (add) { 4747 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; 4748 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); 4749 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; 4750 ret = igb_ethertype_filter_insert(filter_info, 4751 filter->ether_type, etqf); 4752 if (ret < 0) { 4753 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4754 return -ENOSYS; 4755 } 4756 } else { 4757 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); 4758 if (ret < 0) 4759 return -ENOSYS; 4760 } 4761 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); 4762 E1000_WRITE_FLUSH(hw); 4763 4764 return 0; 4765 } 4766 4767 static int 4768 igb_get_ethertype_filter(struct rte_eth_dev *dev, 4769 struct rte_eth_ethertype_filter *filter) 4770 { 4771 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4772 struct e1000_filter_info *filter_info = 4773 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4774 uint32_t etqf; 4775 int ret; 4776 4777 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4778 if (ret < 0) { 4779 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4780 filter->ether_type); 4781 return -ENOENT; 4782 } 4783 4784 etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); 4785 if (etqf & E1000_ETQF_FILTER_ENABLE) { 4786 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; 4787 filter->flags = 0; 4788 filter->queue = (etqf & E1000_ETQF_QUEUE) >> 4789 E1000_ETQF_QUEUE_SHIFT; 4790 return 0; 4791 } 4792 4793 return -ENOENT; 4794 } 4795 4796 /* 4797 * igb_ethertype_filter_handle - Handle operations for ethertype filter. 4798 * @dev: pointer to rte_eth_dev structure 4799 * @filter_op:operation will be taken. 4800 * @arg: a pointer to specific structure corresponding to the filter_op 4801 */ 4802 static int 4803 igb_ethertype_filter_handle(struct rte_eth_dev *dev, 4804 enum rte_filter_op filter_op, 4805 void *arg) 4806 { 4807 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4808 int ret; 4809 4810 MAC_TYPE_FILTER_SUP(hw->mac.type); 4811 4812 if (filter_op == RTE_ETH_FILTER_NOP) 4813 return 0; 4814 4815 if (arg == NULL) { 4816 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4817 filter_op); 4818 return -EINVAL; 4819 } 4820 4821 switch (filter_op) { 4822 case RTE_ETH_FILTER_ADD: 4823 ret = igb_add_del_ethertype_filter(dev, 4824 (struct rte_eth_ethertype_filter *)arg, 4825 TRUE); 4826 break; 4827 case RTE_ETH_FILTER_DELETE: 4828 ret = igb_add_del_ethertype_filter(dev, 4829 (struct rte_eth_ethertype_filter *)arg, 4830 FALSE); 4831 break; 4832 case RTE_ETH_FILTER_GET: 4833 ret = igb_get_ethertype_filter(dev, 4834 (struct rte_eth_ethertype_filter *)arg); 4835 break; 4836 default: 4837 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4838 ret = -EINVAL; 4839 break; 4840 } 4841 return ret; 4842 } 4843 4844 static int 4845 eth_igb_filter_ctrl(struct rte_eth_dev *dev, 4846 enum rte_filter_type filter_type, 4847 enum rte_filter_op filter_op, 4848 void *arg) 4849 { 4850 int ret = 0; 4851 4852 switch (filter_type) { 4853 case RTE_ETH_FILTER_NTUPLE: 4854 ret = igb_ntuple_filter_handle(dev, filter_op, arg); 4855 break; 4856 case RTE_ETH_FILTER_ETHERTYPE: 4857 ret = igb_ethertype_filter_handle(dev, filter_op, arg); 4858 break; 4859 case RTE_ETH_FILTER_SYN: 4860 ret = eth_igb_syn_filter_handle(dev, filter_op, arg); 4861 break; 4862 case RTE_ETH_FILTER_FLEXIBLE: 4863 ret = eth_igb_flex_filter_handle(dev, filter_op, arg); 4864 break; 4865 case RTE_ETH_FILTER_GENERIC: 4866 if (filter_op != RTE_ETH_FILTER_GET) 4867 return -EINVAL; 4868 *(const void **)arg = &igb_flow_ops; 4869 break; 4870 default: 4871 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 4872 filter_type); 4873 break; 4874 } 4875 4876 return ret; 4877 } 4878 4879 static int 4880 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 4881 struct ether_addr *mc_addr_set, 4882 uint32_t nb_mc_addr) 4883 { 4884 struct e1000_hw *hw; 4885 4886 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4887 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 4888 return 0; 4889 } 4890 4891 static uint64_t 4892 igb_read_systime_cyclecounter(struct rte_eth_dev *dev) 4893 { 4894 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4895 uint64_t systime_cycles; 4896 4897 switch (hw->mac.type) { 4898 case e1000_i210: 4899 case e1000_i211: 4900 /* 4901 * Need to read System Time Residue Register to be able 4902 * to read the other two registers. 4903 */ 4904 E1000_READ_REG(hw, E1000_SYSTIMR); 4905 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 4906 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4907 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4908 * NSEC_PER_SEC; 4909 break; 4910 case e1000_82580: 4911 case e1000_i350: 4912 case e1000_i354: 4913 /* 4914 * Need to read System Time Residue Register to be able 4915 * to read the other two registers. 4916 */ 4917 E1000_READ_REG(hw, E1000_SYSTIMR); 4918 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4919 /* Only the 8 LSB are valid. */ 4920 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) 4921 & 0xff) << 32; 4922 break; 4923 default: 4924 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4925 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4926 << 32; 4927 break; 4928 } 4929 4930 return systime_cycles; 4931 } 4932 4933 static uint64_t 4934 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4935 { 4936 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4937 uint64_t rx_tstamp_cycles; 4938 4939 switch (hw->mac.type) { 4940 case e1000_i210: 4941 case e1000_i211: 4942 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4943 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4944 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4945 * NSEC_PER_SEC; 4946 break; 4947 case e1000_82580: 4948 case e1000_i350: 4949 case e1000_i354: 4950 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4951 /* Only the 8 LSB are valid. */ 4952 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) 4953 & 0xff) << 32; 4954 break; 4955 default: 4956 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4957 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4958 << 32; 4959 break; 4960 } 4961 4962 return rx_tstamp_cycles; 4963 } 4964 4965 static uint64_t 4966 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4967 { 4968 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4969 uint64_t tx_tstamp_cycles; 4970 4971 switch (hw->mac.type) { 4972 case e1000_i210: 4973 case e1000_i211: 4974 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4975 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4976 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 4977 * NSEC_PER_SEC; 4978 break; 4979 case e1000_82580: 4980 case e1000_i350: 4981 case e1000_i354: 4982 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4983 /* Only the 8 LSB are valid. */ 4984 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) 4985 & 0xff) << 32; 4986 break; 4987 default: 4988 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4989 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 4990 << 32; 4991 break; 4992 } 4993 4994 return tx_tstamp_cycles; 4995 } 4996 4997 static void 4998 igb_start_timecounters(struct rte_eth_dev *dev) 4999 { 5000 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5001 struct e1000_adapter *adapter = 5002 (struct e1000_adapter *)dev->data->dev_private; 5003 uint32_t incval = 1; 5004 uint32_t shift = 0; 5005 uint64_t mask = E1000_CYCLECOUNTER_MASK; 5006 5007 switch (hw->mac.type) { 5008 case e1000_82580: 5009 case e1000_i350: 5010 case e1000_i354: 5011 /* 32 LSB bits + 8 MSB bits = 40 bits */ 5012 mask = (1ULL << 40) - 1; 5013 /* fall-through */ 5014 case e1000_i210: 5015 case e1000_i211: 5016 /* 5017 * Start incrementing the register 5018 * used to timestamp PTP packets. 5019 */ 5020 E1000_WRITE_REG(hw, E1000_TIMINCA, incval); 5021 break; 5022 case e1000_82576: 5023 incval = E1000_INCVALUE_82576; 5024 shift = IGB_82576_TSYNC_SHIFT; 5025 E1000_WRITE_REG(hw, E1000_TIMINCA, 5026 E1000_INCPERIOD_82576 | incval); 5027 break; 5028 default: 5029 /* Not supported */ 5030 return; 5031 } 5032 5033 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 5034 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5035 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5036 5037 adapter->systime_tc.cc_mask = mask; 5038 adapter->systime_tc.cc_shift = shift; 5039 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 5040 5041 adapter->rx_tstamp_tc.cc_mask = mask; 5042 adapter->rx_tstamp_tc.cc_shift = shift; 5043 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 5044 5045 adapter->tx_tstamp_tc.cc_mask = mask; 5046 adapter->tx_tstamp_tc.cc_shift = shift; 5047 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 5048 } 5049 5050 static int 5051 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 5052 { 5053 struct e1000_adapter *adapter = 5054 (struct e1000_adapter *)dev->data->dev_private; 5055 5056 adapter->systime_tc.nsec += delta; 5057 adapter->rx_tstamp_tc.nsec += delta; 5058 adapter->tx_tstamp_tc.nsec += delta; 5059 5060 return 0; 5061 } 5062 5063 static int 5064 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 5065 { 5066 uint64_t ns; 5067 struct e1000_adapter *adapter = 5068 (struct e1000_adapter *)dev->data->dev_private; 5069 5070 ns = rte_timespec_to_ns(ts); 5071 5072 /* Set the timecounters to a new value. */ 5073 adapter->systime_tc.nsec = ns; 5074 adapter->rx_tstamp_tc.nsec = ns; 5075 adapter->tx_tstamp_tc.nsec = ns; 5076 5077 return 0; 5078 } 5079 5080 static int 5081 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 5082 { 5083 uint64_t ns, systime_cycles; 5084 struct e1000_adapter *adapter = 5085 (struct e1000_adapter *)dev->data->dev_private; 5086 5087 systime_cycles = igb_read_systime_cyclecounter(dev); 5088 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 5089 *ts = rte_ns_to_timespec(ns); 5090 5091 return 0; 5092 } 5093 5094 static int 5095 igb_timesync_enable(struct rte_eth_dev *dev) 5096 { 5097 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5098 uint32_t tsync_ctl; 5099 uint32_t tsauxc; 5100 5101 /* Stop the timesync system time. */ 5102 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); 5103 /* Reset the timesync system time value. */ 5104 switch (hw->mac.type) { 5105 case e1000_82580: 5106 case e1000_i350: 5107 case e1000_i354: 5108 case e1000_i210: 5109 case e1000_i211: 5110 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); 5111 /* fall-through */ 5112 case e1000_82576: 5113 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); 5114 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); 5115 break; 5116 default: 5117 /* Not supported. */ 5118 return -ENOTSUP; 5119 } 5120 5121 /* Enable system time for it isn't on by default. */ 5122 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); 5123 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; 5124 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); 5125 5126 igb_start_timecounters(dev); 5127 5128 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 5129 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 5130 (ETHER_TYPE_1588 | 5131 E1000_ETQF_FILTER_ENABLE | 5132 E1000_ETQF_1588)); 5133 5134 /* Enable timestamping of received PTP packets. */ 5135 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5136 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; 5137 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 5138 5139 /* Enable Timestamping of transmitted PTP packets. */ 5140 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5141 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; 5142 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 5143 5144 return 0; 5145 } 5146 5147 static int 5148 igb_timesync_disable(struct rte_eth_dev *dev) 5149 { 5150 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5151 uint32_t tsync_ctl; 5152 5153 /* Disable timestamping of transmitted PTP packets. */ 5154 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5155 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; 5156 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 5157 5158 /* Disable timestamping of received PTP packets. */ 5159 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5160 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; 5161 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 5162 5163 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 5164 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); 5165 5166 /* Stop incrementating the System Time registers. */ 5167 E1000_WRITE_REG(hw, E1000_TIMINCA, 0); 5168 5169 return 0; 5170 } 5171 5172 static int 5173 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 5174 struct timespec *timestamp, 5175 uint32_t flags __rte_unused) 5176 { 5177 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5178 struct e1000_adapter *adapter = 5179 (struct e1000_adapter *)dev->data->dev_private; 5180 uint32_t tsync_rxctl; 5181 uint64_t rx_tstamp_cycles; 5182 uint64_t ns; 5183 5184 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5185 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) 5186 return -EINVAL; 5187 5188 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); 5189 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 5190 *timestamp = rte_ns_to_timespec(ns); 5191 5192 return 0; 5193 } 5194 5195 static int 5196 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 5197 struct timespec *timestamp) 5198 { 5199 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5200 struct e1000_adapter *adapter = 5201 (struct e1000_adapter *)dev->data->dev_private; 5202 uint32_t tsync_txctl; 5203 uint64_t tx_tstamp_cycles; 5204 uint64_t ns; 5205 5206 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5207 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) 5208 return -EINVAL; 5209 5210 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); 5211 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 5212 *timestamp = rte_ns_to_timespec(ns); 5213 5214 return 0; 5215 } 5216 5217 static int 5218 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) 5219 { 5220 int count = 0; 5221 int g_ind = 0; 5222 const struct reg_info *reg_group; 5223 5224 while ((reg_group = igb_regs[g_ind++])) 5225 count += igb_reg_group_count(reg_group); 5226 5227 return count; 5228 } 5229 5230 static int 5231 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 5232 { 5233 int count = 0; 5234 int g_ind = 0; 5235 const struct reg_info *reg_group; 5236 5237 while ((reg_group = igbvf_regs[g_ind++])) 5238 count += igb_reg_group_count(reg_group); 5239 5240 return count; 5241 } 5242 5243 static int 5244 eth_igb_get_regs(struct rte_eth_dev *dev, 5245 struct rte_dev_reg_info *regs) 5246 { 5247 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5248 uint32_t *data = regs->data; 5249 int g_ind = 0; 5250 int count = 0; 5251 const struct reg_info *reg_group; 5252 5253 if (data == NULL) { 5254 regs->length = eth_igb_get_reg_length(dev); 5255 regs->width = sizeof(uint32_t); 5256 return 0; 5257 } 5258 5259 /* Support only full register dump */ 5260 if ((regs->length == 0) || 5261 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { 5262 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 5263 hw->device_id; 5264 while ((reg_group = igb_regs[g_ind++])) 5265 count += igb_read_regs_group(dev, &data[count], 5266 reg_group); 5267 return 0; 5268 } 5269 5270 return -ENOTSUP; 5271 } 5272 5273 static int 5274 igbvf_get_regs(struct rte_eth_dev *dev, 5275 struct rte_dev_reg_info *regs) 5276 { 5277 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5278 uint32_t *data = regs->data; 5279 int g_ind = 0; 5280 int count = 0; 5281 const struct reg_info *reg_group; 5282 5283 if (data == NULL) { 5284 regs->length = igbvf_get_reg_length(dev); 5285 regs->width = sizeof(uint32_t); 5286 return 0; 5287 } 5288 5289 /* Support only full register dump */ 5290 if ((regs->length == 0) || 5291 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { 5292 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 5293 hw->device_id; 5294 while ((reg_group = igbvf_regs[g_ind++])) 5295 count += igb_read_regs_group(dev, &data[count], 5296 reg_group); 5297 return 0; 5298 } 5299 5300 return -ENOTSUP; 5301 } 5302 5303 static int 5304 eth_igb_get_eeprom_length(struct rte_eth_dev *dev) 5305 { 5306 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5307 5308 /* Return unit is byte count */ 5309 return hw->nvm.word_size * 2; 5310 } 5311 5312 static int 5313 eth_igb_get_eeprom(struct rte_eth_dev *dev, 5314 struct rte_dev_eeprom_info *in_eeprom) 5315 { 5316 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5317 struct e1000_nvm_info *nvm = &hw->nvm; 5318 uint16_t *data = in_eeprom->data; 5319 int first, length; 5320 5321 first = in_eeprom->offset >> 1; 5322 length = in_eeprom->length >> 1; 5323 if ((first >= hw->nvm.word_size) || 5324 ((first + length) >= hw->nvm.word_size)) 5325 return -EINVAL; 5326 5327 in_eeprom->magic = hw->vendor_id | 5328 ((uint32_t)hw->device_id << 16); 5329 5330 if ((nvm->ops.read) == NULL) 5331 return -ENOTSUP; 5332 5333 return nvm->ops.read(hw, first, length, data); 5334 } 5335 5336 static int 5337 eth_igb_set_eeprom(struct rte_eth_dev *dev, 5338 struct rte_dev_eeprom_info *in_eeprom) 5339 { 5340 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5341 struct e1000_nvm_info *nvm = &hw->nvm; 5342 uint16_t *data = in_eeprom->data; 5343 int first, length; 5344 5345 first = in_eeprom->offset >> 1; 5346 length = in_eeprom->length >> 1; 5347 if ((first >= hw->nvm.word_size) || 5348 ((first + length) >= hw->nvm.word_size)) 5349 return -EINVAL; 5350 5351 in_eeprom->magic = (uint32_t)hw->vendor_id | 5352 ((uint32_t)hw->device_id << 16); 5353 5354 if ((nvm->ops.write) == NULL) 5355 return -ENOTSUP; 5356 return nvm->ops.write(hw, first, length, data); 5357 } 5358 5359 static int 5360 eth_igb_get_module_info(struct rte_eth_dev *dev, 5361 struct rte_eth_dev_module_info *modinfo) 5362 { 5363 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5364 5365 uint32_t status = 0; 5366 uint16_t sff8472_rev, addr_mode; 5367 bool page_swap = false; 5368 5369 if (hw->phy.media_type == e1000_media_type_copper || 5370 hw->phy.media_type == e1000_media_type_unknown) 5371 return -EOPNOTSUPP; 5372 5373 /* Check whether we support SFF-8472 or not */ 5374 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 5375 if (status) 5376 return -EIO; 5377 5378 /* addressing mode is not supported */ 5379 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 5380 if (status) 5381 return -EIO; 5382 5383 /* addressing mode is not supported */ 5384 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { 5385 PMD_DRV_LOG(ERR, 5386 "Address change required to access page 0xA2, " 5387 "but not supported. Please report the module " 5388 "type to the driver maintainers.\n"); 5389 page_swap = true; 5390 } 5391 5392 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { 5393 /* We have an SFP, but it does not support SFF-8472 */ 5394 modinfo->type = RTE_ETH_MODULE_SFF_8079; 5395 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 5396 } else { 5397 /* We have an SFP which supports a revision of SFF-8472 */ 5398 modinfo->type = RTE_ETH_MODULE_SFF_8472; 5399 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 5400 } 5401 5402 return 0; 5403 } 5404 5405 static int 5406 eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 5407 struct rte_dev_eeprom_info *info) 5408 { 5409 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5410 5411 uint32_t status = 0; 5412 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; 5413 u16 first_word, last_word; 5414 int i = 0; 5415 5416 if (info->length == 0) 5417 return -EINVAL; 5418 5419 first_word = info->offset >> 1; 5420 last_word = (info->offset + info->length - 1) >> 1; 5421 5422 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 5423 for (i = 0; i < last_word - first_word + 1; i++) { 5424 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, 5425 &dataword[i]); 5426 if (status) { 5427 /* Error occurred while reading module */ 5428 return -EIO; 5429 } 5430 5431 dataword[i] = rte_be_to_cpu_16(dataword[i]); 5432 } 5433 5434 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); 5435 5436 return 0; 5437 } 5438 5439 static int 5440 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5441 { 5442 struct e1000_hw *hw = 5443 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5444 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5445 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5446 uint32_t vec = E1000_MISC_VEC_ID; 5447 5448 if (rte_intr_allow_others(intr_handle)) 5449 vec = E1000_RX_VEC_START; 5450 5451 uint32_t mask = 1 << (queue_id + vec); 5452 5453 E1000_WRITE_REG(hw, E1000_EIMC, mask); 5454 E1000_WRITE_FLUSH(hw); 5455 5456 return 0; 5457 } 5458 5459 static int 5460 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5461 { 5462 struct e1000_hw *hw = 5463 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5464 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5465 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5466 uint32_t vec = E1000_MISC_VEC_ID; 5467 5468 if (rte_intr_allow_others(intr_handle)) 5469 vec = E1000_RX_VEC_START; 5470 5471 uint32_t mask = 1 << (queue_id + vec); 5472 uint32_t regval; 5473 5474 regval = E1000_READ_REG(hw, E1000_EIMS); 5475 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 5476 E1000_WRITE_FLUSH(hw); 5477 5478 rte_intr_enable(intr_handle); 5479 5480 return 0; 5481 } 5482 5483 static void 5484 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 5485 uint8_t index, uint8_t offset) 5486 { 5487 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 5488 5489 /* clear bits */ 5490 val &= ~((uint32_t)0xFF << offset); 5491 5492 /* write vector and valid bit */ 5493 val |= (msix_vector | E1000_IVAR_VALID) << offset; 5494 5495 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); 5496 } 5497 5498 static void 5499 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 5500 uint8_t queue, uint8_t msix_vector) 5501 { 5502 uint32_t tmp = 0; 5503 5504 if (hw->mac.type == e1000_82575) { 5505 if (direction == 0) 5506 tmp = E1000_EICR_RX_QUEUE0 << queue; 5507 else if (direction == 1) 5508 tmp = E1000_EICR_TX_QUEUE0 << queue; 5509 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); 5510 } else if (hw->mac.type == e1000_82576) { 5511 if ((direction == 0) || (direction == 1)) 5512 eth_igb_write_ivar(hw, msix_vector, queue & 0x7, 5513 ((queue & 0x8) << 1) + 5514 8 * direction); 5515 } else if ((hw->mac.type == e1000_82580) || 5516 (hw->mac.type == e1000_i350) || 5517 (hw->mac.type == e1000_i354) || 5518 (hw->mac.type == e1000_i210) || 5519 (hw->mac.type == e1000_i211)) { 5520 if ((direction == 0) || (direction == 1)) 5521 eth_igb_write_ivar(hw, msix_vector, 5522 queue >> 1, 5523 ((queue & 0x1) << 4) + 5524 8 * direction); 5525 } 5526 } 5527 5528 /* Sets up the hardware to generate MSI-X interrupts properly 5529 * @hw 5530 * board private structure 5531 */ 5532 static void 5533 eth_igb_configure_msix_intr(struct rte_eth_dev *dev) 5534 { 5535 int queue_id; 5536 uint32_t tmpval, regval, intr_mask; 5537 struct e1000_hw *hw = 5538 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5539 uint32_t vec = E1000_MISC_VEC_ID; 5540 uint32_t base = E1000_MISC_VEC_ID; 5541 uint32_t misc_shift = 0; 5542 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5543 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5544 5545 /* won't configure msix register if no mapping is done 5546 * between intr vector and event fd 5547 */ 5548 if (!rte_intr_dp_is_en(intr_handle)) 5549 return; 5550 5551 if (rte_intr_allow_others(intr_handle)) { 5552 vec = base = E1000_RX_VEC_START; 5553 misc_shift = 1; 5554 } 5555 5556 /* set interrupt vector for other causes */ 5557 if (hw->mac.type == e1000_82575) { 5558 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); 5559 /* enable MSI-X PBA support */ 5560 tmpval |= E1000_CTRL_EXT_PBA_CLR; 5561 5562 /* Auto-Mask interrupts upon ICR read */ 5563 tmpval |= E1000_CTRL_EXT_EIAME; 5564 tmpval |= E1000_CTRL_EXT_IRCA; 5565 5566 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); 5567 5568 /* enable msix_other interrupt */ 5569 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); 5570 regval = E1000_READ_REG(hw, E1000_EIAC); 5571 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); 5572 regval = E1000_READ_REG(hw, E1000_EIAM); 5573 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); 5574 } else if ((hw->mac.type == e1000_82576) || 5575 (hw->mac.type == e1000_82580) || 5576 (hw->mac.type == e1000_i350) || 5577 (hw->mac.type == e1000_i354) || 5578 (hw->mac.type == e1000_i210) || 5579 (hw->mac.type == e1000_i211)) { 5580 /* turn on MSI-X capability first */ 5581 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | 5582 E1000_GPIE_PBA | E1000_GPIE_EIAME | 5583 E1000_GPIE_NSICR); 5584 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5585 misc_shift; 5586 regval = E1000_READ_REG(hw, E1000_EIAC); 5587 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); 5588 5589 /* enable msix_other interrupt */ 5590 regval = E1000_READ_REG(hw, E1000_EIMS); 5591 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); 5592 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8; 5593 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); 5594 } 5595 5596 /* use EIAM to auto-mask when MSI-X interrupt 5597 * is asserted, this saves a register write for every interrupt 5598 */ 5599 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5600 misc_shift; 5601 regval = E1000_READ_REG(hw, E1000_EIAM); 5602 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); 5603 5604 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { 5605 eth_igb_assign_msix_vector(hw, 0, queue_id, vec); 5606 intr_handle->intr_vec[queue_id] = vec; 5607 if (vec < base + intr_handle->nb_efd - 1) 5608 vec++; 5609 } 5610 5611 E1000_WRITE_FLUSH(hw); 5612 } 5613 5614 /* restore n-tuple filter */ 5615 static inline void 5616 igb_ntuple_filter_restore(struct rte_eth_dev *dev) 5617 { 5618 struct e1000_filter_info *filter_info = 5619 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5620 struct e1000_5tuple_filter *p_5tuple; 5621 struct e1000_2tuple_filter *p_2tuple; 5622 5623 TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { 5624 igb_inject_5tuple_filter_82576(dev, p_5tuple); 5625 } 5626 5627 TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { 5628 igb_inject_2uple_filter(dev, p_2tuple); 5629 } 5630 } 5631 5632 /* restore SYN filter */ 5633 static inline void 5634 igb_syn_filter_restore(struct rte_eth_dev *dev) 5635 { 5636 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5637 struct e1000_filter_info *filter_info = 5638 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5639 uint32_t synqf; 5640 5641 synqf = filter_info->syn_info; 5642 5643 if (synqf & E1000_SYN_FILTER_ENABLE) { 5644 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 5645 E1000_WRITE_FLUSH(hw); 5646 } 5647 } 5648 5649 /* restore ethernet type filter */ 5650 static inline void 5651 igb_ethertype_filter_restore(struct rte_eth_dev *dev) 5652 { 5653 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5654 struct e1000_filter_info *filter_info = 5655 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5656 int i; 5657 5658 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 5659 if (filter_info->ethertype_mask & (1 << i)) { 5660 E1000_WRITE_REG(hw, E1000_ETQF(i), 5661 filter_info->ethertype_filters[i].etqf); 5662 E1000_WRITE_FLUSH(hw); 5663 } 5664 } 5665 } 5666 5667 /* restore flex byte filter */ 5668 static inline void 5669 igb_flex_filter_restore(struct rte_eth_dev *dev) 5670 { 5671 struct e1000_filter_info *filter_info = 5672 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5673 struct e1000_flex_filter *flex_filter; 5674 5675 TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) { 5676 igb_inject_flex_filter(dev, flex_filter); 5677 } 5678 } 5679 5680 /* restore rss filter */ 5681 static inline void 5682 igb_rss_filter_restore(struct rte_eth_dev *dev) 5683 { 5684 struct e1000_filter_info *filter_info = 5685 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5686 5687 if (filter_info->rss_info.conf.queue_num) 5688 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); 5689 } 5690 5691 /* restore all types filter */ 5692 static int 5693 igb_filter_restore(struct rte_eth_dev *dev) 5694 { 5695 igb_ntuple_filter_restore(dev); 5696 igb_ethertype_filter_restore(dev); 5697 igb_syn_filter_restore(dev); 5698 igb_flex_filter_restore(dev); 5699 igb_rss_filter_restore(dev); 5700 5701 return 0; 5702 } 5703 5704 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); 5705 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); 5706 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); 5707 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); 5708 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); 5709 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); 5710 5711 /* see e1000_logs.c */ 5712 RTE_INIT(e1000_init_log) 5713 { 5714 e1000_igb_init_log(); 5715 } 5716