1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdarg.h> 10 11 #include <rte_common.h> 12 #include <rte_interrupts.h> 13 #include <rte_byteorder.h> 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <rte_pci.h> 17 #include <rte_bus_pci.h> 18 #include <rte_ether.h> 19 #include <rte_ethdev_driver.h> 20 #include <rte_ethdev_pci.h> 21 #include <rte_memory.h> 22 #include <rte_eal.h> 23 #include <rte_malloc.h> 24 #include <rte_dev.h> 25 26 #include "e1000_logs.h" 27 #include "base/e1000_api.h" 28 #include "e1000_ethdev.h" 29 #include "igb_regs.h" 30 31 /* 32 * Default values for port configuration 33 */ 34 #define IGB_DEFAULT_RX_FREE_THRESH 32 35 36 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) 37 #define IGB_DEFAULT_RX_HTHRESH 8 38 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) 39 40 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) 41 #define IGB_DEFAULT_TX_HTHRESH 1 42 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) 43 44 /* Bit shift and mask */ 45 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) 46 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) 47 #define IGB_8_BIT_WIDTH CHAR_BIT 48 #define IGB_8_BIT_MASK UINT8_MAX 49 50 /* Additional timesync values. */ 51 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL 52 #define E1000_ETQF_FILTER_1588 3 53 #define IGB_82576_TSYNC_SHIFT 16 54 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 55 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 56 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 57 58 #define E1000_VTIVAR_MISC 0x01740 59 #define E1000_VTIVAR_MISC_MASK 0xFF 60 #define E1000_VTIVAR_VALID 0x80 61 #define E1000_VTIVAR_MISC_MAILBOX 0 62 #define E1000_VTIVAR_MISC_INTR_MASK 0x3 63 64 /* External VLAN Enable bit mask */ 65 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26) 66 67 /* External VLAN Ether Type bit mask and shift */ 68 #define E1000_VET_VET_EXT 0xFFFF0000 69 #define E1000_VET_VET_EXT_SHIFT 16 70 71 /* MSI-X other interrupt vector */ 72 #define IGB_MSIX_OTHER_INTR_VEC 0 73 74 static int eth_igb_configure(struct rte_eth_dev *dev); 75 static int eth_igb_start(struct rte_eth_dev *dev); 76 static void eth_igb_stop(struct rte_eth_dev *dev); 77 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); 78 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); 79 static void eth_igb_close(struct rte_eth_dev *dev); 80 static int eth_igb_reset(struct rte_eth_dev *dev); 81 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); 82 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); 83 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev); 84 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev); 85 static int eth_igb_link_update(struct rte_eth_dev *dev, 86 int wait_to_complete); 87 static int eth_igb_stats_get(struct rte_eth_dev *dev, 88 struct rte_eth_stats *rte_stats); 89 static int eth_igb_xstats_get(struct rte_eth_dev *dev, 90 struct rte_eth_xstat *xstats, unsigned n); 91 static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, 92 const uint64_t *ids, 93 uint64_t *values, unsigned int n); 94 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, 95 struct rte_eth_xstat_name *xstats_names, 96 unsigned int size); 97 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 98 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 99 unsigned int limit); 100 static void eth_igb_stats_reset(struct rte_eth_dev *dev); 101 static void eth_igb_xstats_reset(struct rte_eth_dev *dev); 102 static int eth_igb_fw_version_get(struct rte_eth_dev *dev, 103 char *fw_version, size_t fw_size); 104 static void eth_igb_infos_get(struct rte_eth_dev *dev, 105 struct rte_eth_dev_info *dev_info); 106 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); 107 static void eth_igbvf_infos_get(struct rte_eth_dev *dev, 108 struct rte_eth_dev_info *dev_info); 109 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, 110 struct rte_eth_fc_conf *fc_conf); 111 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, 112 struct rte_eth_fc_conf *fc_conf); 113 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 114 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); 115 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); 116 static int eth_igb_interrupt_action(struct rte_eth_dev *dev, 117 struct rte_intr_handle *handle); 118 static void eth_igb_interrupt_handler(void *param); 119 static int igb_hardware_init(struct e1000_hw *hw); 120 static void igb_hw_control_acquire(struct e1000_hw *hw); 121 static void igb_hw_control_release(struct e1000_hw *hw); 122 static void igb_init_manageability(struct e1000_hw *hw); 123 static void igb_release_manageability(struct e1000_hw *hw); 124 125 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 126 127 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, 128 uint16_t vlan_id, int on); 129 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 130 enum rte_vlan_type vlan_type, 131 uint16_t tpid_id); 132 static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); 133 134 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); 135 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); 136 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); 137 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); 138 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); 139 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); 140 141 static int eth_igb_led_on(struct rte_eth_dev *dev); 142 static int eth_igb_led_off(struct rte_eth_dev *dev); 143 144 static void igb_intr_disable(struct rte_eth_dev *dev); 145 static int igb_get_rx_buffer_size(struct e1000_hw *hw); 146 static int eth_igb_rar_set(struct rte_eth_dev *dev, 147 struct ether_addr *mac_addr, 148 uint32_t index, uint32_t pool); 149 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); 150 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 151 struct ether_addr *addr); 152 153 static void igbvf_intr_disable(struct e1000_hw *hw); 154 static int igbvf_dev_configure(struct rte_eth_dev *dev); 155 static int igbvf_dev_start(struct rte_eth_dev *dev); 156 static void igbvf_dev_stop(struct rte_eth_dev *dev); 157 static void igbvf_dev_close(struct rte_eth_dev *dev); 158 static void igbvf_promiscuous_enable(struct rte_eth_dev *dev); 159 static void igbvf_promiscuous_disable(struct rte_eth_dev *dev); 160 static void igbvf_allmulticast_enable(struct rte_eth_dev *dev); 161 static void igbvf_allmulticast_disable(struct rte_eth_dev *dev); 162 static int eth_igbvf_link_update(struct e1000_hw *hw); 163 static int eth_igbvf_stats_get(struct rte_eth_dev *dev, 164 struct rte_eth_stats *rte_stats); 165 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, 166 struct rte_eth_xstat *xstats, unsigned n); 167 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, 168 struct rte_eth_xstat_name *xstats_names, 169 unsigned limit); 170 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); 171 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, 172 uint16_t vlan_id, int on); 173 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); 174 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); 175 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, 176 struct ether_addr *addr); 177 static int igbvf_get_reg_length(struct rte_eth_dev *dev); 178 static int igbvf_get_regs(struct rte_eth_dev *dev, 179 struct rte_dev_reg_info *regs); 180 181 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, 182 struct rte_eth_rss_reta_entry64 *reta_conf, 183 uint16_t reta_size); 184 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, 185 struct rte_eth_rss_reta_entry64 *reta_conf, 186 uint16_t reta_size); 187 188 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, 189 struct rte_eth_syn_filter *filter); 190 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 191 enum rte_filter_op filter_op, 192 void *arg); 193 static int igb_add_2tuple_filter(struct rte_eth_dev *dev, 194 struct rte_eth_ntuple_filter *ntuple_filter); 195 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, 196 struct rte_eth_ntuple_filter *ntuple_filter); 197 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, 198 struct rte_eth_flex_filter *filter); 199 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 200 enum rte_filter_op filter_op, 201 void *arg); 202 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 203 struct rte_eth_ntuple_filter *ntuple_filter); 204 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 205 struct rte_eth_ntuple_filter *ntuple_filter); 206 static int igb_get_ntuple_filter(struct rte_eth_dev *dev, 207 struct rte_eth_ntuple_filter *filter); 208 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, 209 enum rte_filter_op filter_op, 210 void *arg); 211 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, 212 enum rte_filter_op filter_op, 213 void *arg); 214 static int igb_get_ethertype_filter(struct rte_eth_dev *dev, 215 struct rte_eth_ethertype_filter *filter); 216 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, 217 enum rte_filter_type filter_type, 218 enum rte_filter_op filter_op, 219 void *arg); 220 static int eth_igb_get_reg_length(struct rte_eth_dev *dev); 221 static int eth_igb_get_regs(struct rte_eth_dev *dev, 222 struct rte_dev_reg_info *regs); 223 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); 224 static int eth_igb_get_eeprom(struct rte_eth_dev *dev, 225 struct rte_dev_eeprom_info *eeprom); 226 static int eth_igb_set_eeprom(struct rte_eth_dev *dev, 227 struct rte_dev_eeprom_info *eeprom); 228 static int eth_igb_get_module_info(struct rte_eth_dev *dev, 229 struct rte_eth_dev_module_info *modinfo); 230 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 231 struct rte_dev_eeprom_info *info); 232 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 233 struct ether_addr *mc_addr_set, 234 uint32_t nb_mc_addr); 235 static int igb_timesync_enable(struct rte_eth_dev *dev); 236 static int igb_timesync_disable(struct rte_eth_dev *dev); 237 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 238 struct timespec *timestamp, 239 uint32_t flags); 240 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 241 struct timespec *timestamp); 242 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 243 static int igb_timesync_read_time(struct rte_eth_dev *dev, 244 struct timespec *timestamp); 245 static int igb_timesync_write_time(struct rte_eth_dev *dev, 246 const struct timespec *timestamp); 247 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, 248 uint16_t queue_id); 249 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, 250 uint16_t queue_id); 251 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 252 uint8_t queue, uint8_t msix_vector); 253 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 254 uint8_t index, uint8_t offset); 255 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); 256 static void eth_igbvf_interrupt_handler(void *param); 257 static void igbvf_mbx_process(struct rte_eth_dev *dev); 258 static int igb_filter_restore(struct rte_eth_dev *dev); 259 260 /* 261 * Define VF Stats MACRO for Non "cleared on read" register 262 */ 263 #define UPDATE_VF_STAT(reg, last, cur) \ 264 { \ 265 u32 latest = E1000_READ_REG(hw, reg); \ 266 cur += (latest - last) & UINT_MAX; \ 267 last = latest; \ 268 } 269 270 #define IGB_FC_PAUSE_TIME 0x0680 271 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 272 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 273 274 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ 275 276 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; 277 278 /* 279 * The set of PCI devices this driver supports 280 */ 281 static const struct rte_pci_id pci_id_igb_map[] = { 282 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) }, 283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) }, 284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) }, 285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) }, 286 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, 287 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) }, 288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) }, 289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) }, 290 291 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) }, 292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) }, 293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) }, 294 295 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) }, 296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) }, 297 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) }, 298 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) }, 299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) }, 300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) }, 301 302 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) }, 303 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) }, 304 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) }, 305 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) }, 306 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) }, 307 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) }, 308 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) }, 309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) }, 310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) }, 311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) }, 312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) }, 313 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) }, 314 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) }, 315 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) }, 316 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, 317 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) }, 318 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, 319 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) }, 320 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) }, 321 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, 322 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) }, 323 { .vendor_id = 0, /* sentinel */ }, 324 }; 325 326 /* 327 * The set of PCI devices this driver supports (for 82576&I350 VF) 328 */ 329 static const struct rte_pci_id pci_id_igbvf_map[] = { 330 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) }, 331 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) }, 332 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) }, 333 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) }, 334 { .vendor_id = 0, /* sentinel */ }, 335 }; 336 337 static const struct rte_eth_desc_lim rx_desc_lim = { 338 .nb_max = E1000_MAX_RING_DESC, 339 .nb_min = E1000_MIN_RING_DESC, 340 .nb_align = IGB_RXD_ALIGN, 341 }; 342 343 static const struct rte_eth_desc_lim tx_desc_lim = { 344 .nb_max = E1000_MAX_RING_DESC, 345 .nb_min = E1000_MIN_RING_DESC, 346 .nb_align = IGB_RXD_ALIGN, 347 .nb_seg_max = IGB_TX_MAX_SEG, 348 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG, 349 }; 350 351 static const struct eth_dev_ops eth_igb_ops = { 352 .dev_configure = eth_igb_configure, 353 .dev_start = eth_igb_start, 354 .dev_stop = eth_igb_stop, 355 .dev_set_link_up = eth_igb_dev_set_link_up, 356 .dev_set_link_down = eth_igb_dev_set_link_down, 357 .dev_close = eth_igb_close, 358 .dev_reset = eth_igb_reset, 359 .promiscuous_enable = eth_igb_promiscuous_enable, 360 .promiscuous_disable = eth_igb_promiscuous_disable, 361 .allmulticast_enable = eth_igb_allmulticast_enable, 362 .allmulticast_disable = eth_igb_allmulticast_disable, 363 .link_update = eth_igb_link_update, 364 .stats_get = eth_igb_stats_get, 365 .xstats_get = eth_igb_xstats_get, 366 .xstats_get_by_id = eth_igb_xstats_get_by_id, 367 .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id, 368 .xstats_get_names = eth_igb_xstats_get_names, 369 .stats_reset = eth_igb_stats_reset, 370 .xstats_reset = eth_igb_xstats_reset, 371 .fw_version_get = eth_igb_fw_version_get, 372 .dev_infos_get = eth_igb_infos_get, 373 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 374 .mtu_set = eth_igb_mtu_set, 375 .vlan_filter_set = eth_igb_vlan_filter_set, 376 .vlan_tpid_set = eth_igb_vlan_tpid_set, 377 .vlan_offload_set = eth_igb_vlan_offload_set, 378 .rx_queue_setup = eth_igb_rx_queue_setup, 379 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, 380 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, 381 .rx_queue_release = eth_igb_rx_queue_release, 382 .rx_queue_count = eth_igb_rx_queue_count, 383 .rx_descriptor_done = eth_igb_rx_descriptor_done, 384 .rx_descriptor_status = eth_igb_rx_descriptor_status, 385 .tx_descriptor_status = eth_igb_tx_descriptor_status, 386 .tx_queue_setup = eth_igb_tx_queue_setup, 387 .tx_queue_release = eth_igb_tx_queue_release, 388 .tx_done_cleanup = eth_igb_tx_done_cleanup, 389 .dev_led_on = eth_igb_led_on, 390 .dev_led_off = eth_igb_led_off, 391 .flow_ctrl_get = eth_igb_flow_ctrl_get, 392 .flow_ctrl_set = eth_igb_flow_ctrl_set, 393 .mac_addr_add = eth_igb_rar_set, 394 .mac_addr_remove = eth_igb_rar_clear, 395 .mac_addr_set = eth_igb_default_mac_addr_set, 396 .reta_update = eth_igb_rss_reta_update, 397 .reta_query = eth_igb_rss_reta_query, 398 .rss_hash_update = eth_igb_rss_hash_update, 399 .rss_hash_conf_get = eth_igb_rss_hash_conf_get, 400 .filter_ctrl = eth_igb_filter_ctrl, 401 .set_mc_addr_list = eth_igb_set_mc_addr_list, 402 .rxq_info_get = igb_rxq_info_get, 403 .txq_info_get = igb_txq_info_get, 404 .timesync_enable = igb_timesync_enable, 405 .timesync_disable = igb_timesync_disable, 406 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, 407 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, 408 .get_reg = eth_igb_get_regs, 409 .get_eeprom_length = eth_igb_get_eeprom_length, 410 .get_eeprom = eth_igb_get_eeprom, 411 .set_eeprom = eth_igb_set_eeprom, 412 .get_module_info = eth_igb_get_module_info, 413 .get_module_eeprom = eth_igb_get_module_eeprom, 414 .timesync_adjust_time = igb_timesync_adjust_time, 415 .timesync_read_time = igb_timesync_read_time, 416 .timesync_write_time = igb_timesync_write_time, 417 }; 418 419 /* 420 * dev_ops for virtual function, bare necessities for basic vf 421 * operation have been implemented 422 */ 423 static const struct eth_dev_ops igbvf_eth_dev_ops = { 424 .dev_configure = igbvf_dev_configure, 425 .dev_start = igbvf_dev_start, 426 .dev_stop = igbvf_dev_stop, 427 .dev_close = igbvf_dev_close, 428 .promiscuous_enable = igbvf_promiscuous_enable, 429 .promiscuous_disable = igbvf_promiscuous_disable, 430 .allmulticast_enable = igbvf_allmulticast_enable, 431 .allmulticast_disable = igbvf_allmulticast_disable, 432 .link_update = eth_igb_link_update, 433 .stats_get = eth_igbvf_stats_get, 434 .xstats_get = eth_igbvf_xstats_get, 435 .xstats_get_names = eth_igbvf_xstats_get_names, 436 .stats_reset = eth_igbvf_stats_reset, 437 .xstats_reset = eth_igbvf_stats_reset, 438 .vlan_filter_set = igbvf_vlan_filter_set, 439 .dev_infos_get = eth_igbvf_infos_get, 440 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 441 .rx_queue_setup = eth_igb_rx_queue_setup, 442 .rx_queue_release = eth_igb_rx_queue_release, 443 .rx_descriptor_done = eth_igb_rx_descriptor_done, 444 .rx_descriptor_status = eth_igb_rx_descriptor_status, 445 .tx_descriptor_status = eth_igb_tx_descriptor_status, 446 .tx_queue_setup = eth_igb_tx_queue_setup, 447 .tx_queue_release = eth_igb_tx_queue_release, 448 .set_mc_addr_list = eth_igb_set_mc_addr_list, 449 .rxq_info_get = igb_rxq_info_get, 450 .txq_info_get = igb_txq_info_get, 451 .mac_addr_set = igbvf_default_mac_addr_set, 452 .get_reg = igbvf_get_regs, 453 }; 454 455 /* store statistics names and its offset in stats structure */ 456 struct rte_igb_xstats_name_off { 457 char name[RTE_ETH_XSTATS_NAME_SIZE]; 458 unsigned offset; 459 }; 460 461 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { 462 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, 463 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, 464 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, 465 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, 466 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, 467 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, 468 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, 469 ecol)}, 470 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, 471 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, 472 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, 473 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, 474 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, 475 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, 476 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, 477 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, 478 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, 479 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, 480 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, 481 fcruc)}, 482 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, 483 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, 484 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, 485 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, 486 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 487 prc1023)}, 488 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, 489 prc1522)}, 490 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, 491 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, 492 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, 493 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, 494 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, 495 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, 496 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, 497 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, 498 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, 499 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, 500 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, 501 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, 502 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, 503 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, 504 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, 505 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, 506 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, 507 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 508 ptc1023)}, 509 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, 510 ptc1522)}, 511 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, 512 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, 513 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, 514 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, 515 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, 516 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, 517 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, 518 519 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, 520 }; 521 522 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ 523 sizeof(rte_igb_stats_strings[0])) 524 525 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { 526 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, 527 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, 528 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, 529 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, 530 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, 531 }; 532 533 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ 534 sizeof(rte_igbvf_stats_strings[0])) 535 536 537 static inline void 538 igb_intr_enable(struct rte_eth_dev *dev) 539 { 540 struct e1000_interrupt *intr = 541 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 542 struct e1000_hw *hw = 543 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 544 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 545 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 546 547 if (rte_intr_allow_others(intr_handle) && 548 dev->data->dev_conf.intr_conf.lsc != 0) { 549 E1000_WRITE_REG(hw, E1000_EIMS, 1 << IGB_MSIX_OTHER_INTR_VEC); 550 } 551 552 E1000_WRITE_REG(hw, E1000_IMS, intr->mask); 553 E1000_WRITE_FLUSH(hw); 554 } 555 556 static void 557 igb_intr_disable(struct rte_eth_dev *dev) 558 { 559 struct e1000_hw *hw = 560 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 561 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 562 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 563 564 if (rte_intr_allow_others(intr_handle) && 565 dev->data->dev_conf.intr_conf.lsc != 0) { 566 E1000_WRITE_REG(hw, E1000_EIMC, 1 << IGB_MSIX_OTHER_INTR_VEC); 567 } 568 569 E1000_WRITE_REG(hw, E1000_IMC, ~0); 570 E1000_WRITE_FLUSH(hw); 571 } 572 573 static inline void 574 igbvf_intr_enable(struct rte_eth_dev *dev) 575 { 576 struct e1000_hw *hw = 577 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 578 579 /* only for mailbox */ 580 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); 581 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); 582 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); 583 E1000_WRITE_FLUSH(hw); 584 } 585 586 /* only for mailbox now. If RX/TX needed, should extend this function. */ 587 static void 588 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) 589 { 590 uint32_t tmp = 0; 591 592 /* mailbox */ 593 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); 594 tmp |= E1000_VTIVAR_VALID; 595 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); 596 } 597 598 static void 599 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) 600 { 601 struct e1000_hw *hw = 602 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 603 604 /* Configure VF other cause ivar */ 605 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); 606 } 607 608 static inline int32_t 609 igb_pf_reset_hw(struct e1000_hw *hw) 610 { 611 uint32_t ctrl_ext; 612 int32_t status; 613 614 status = e1000_reset_hw(hw); 615 616 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 617 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 618 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 619 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 620 E1000_WRITE_FLUSH(hw); 621 622 return status; 623 } 624 625 static void 626 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 627 { 628 struct e1000_hw *hw = 629 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 630 631 632 hw->vendor_id = pci_dev->id.vendor_id; 633 hw->device_id = pci_dev->id.device_id; 634 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 635 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 636 637 e1000_set_mac_type(hw); 638 639 /* need to check if it is a vf device below */ 640 } 641 642 static int 643 igb_reset_swfw_lock(struct e1000_hw *hw) 644 { 645 int ret_val; 646 647 /* 648 * Do mac ops initialization manually here, since we will need 649 * some function pointers set by this call. 650 */ 651 ret_val = e1000_init_mac_params(hw); 652 if (ret_val) 653 return ret_val; 654 655 /* 656 * SMBI lock should not fail in this early stage. If this is the case, 657 * it is due to an improper exit of the application. 658 * So force the release of the faulty lock. 659 */ 660 if (e1000_get_hw_semaphore_generic(hw) < 0) { 661 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 662 } 663 e1000_put_hw_semaphore_generic(hw); 664 665 if (hw->mac.ops.acquire_swfw_sync != NULL) { 666 uint16_t mask; 667 668 /* 669 * Phy lock should not fail in this early stage. If this is the case, 670 * it is due to an improper exit of the application. 671 * So force the release of the faulty lock. 672 */ 673 mask = E1000_SWFW_PHY0_SM << hw->bus.func; 674 if (hw->bus.func > E1000_FUNC_1) 675 mask <<= 2; 676 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 677 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 678 hw->bus.func); 679 } 680 hw->mac.ops.release_swfw_sync(hw, mask); 681 682 /* 683 * This one is more tricky since it is common to all ports; but 684 * swfw_sync retries last long enough (1s) to be almost sure that if 685 * lock can not be taken it is due to an improper lock of the 686 * semaphore. 687 */ 688 mask = E1000_SWFW_EEP_SM; 689 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 690 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 691 } 692 hw->mac.ops.release_swfw_sync(hw, mask); 693 } 694 695 return E1000_SUCCESS; 696 } 697 698 /* Remove all ntuple filters of the device */ 699 static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 700 { 701 struct e1000_filter_info *filter_info = 702 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 703 struct e1000_5tuple_filter *p_5tuple; 704 struct e1000_2tuple_filter *p_2tuple; 705 706 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 707 TAILQ_REMOVE(&filter_info->fivetuple_list, 708 p_5tuple, entries); 709 rte_free(p_5tuple); 710 } 711 filter_info->fivetuple_mask = 0; 712 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { 713 TAILQ_REMOVE(&filter_info->twotuple_list, 714 p_2tuple, entries); 715 rte_free(p_2tuple); 716 } 717 filter_info->twotuple_mask = 0; 718 719 return 0; 720 } 721 722 /* Remove all flex filters of the device */ 723 static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev) 724 { 725 struct e1000_filter_info *filter_info = 726 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 727 struct e1000_flex_filter *p_flex; 728 729 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { 730 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); 731 rte_free(p_flex); 732 } 733 filter_info->flex_mask = 0; 734 735 return 0; 736 } 737 738 static int 739 eth_igb_dev_init(struct rte_eth_dev *eth_dev) 740 { 741 int error = 0; 742 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 743 struct e1000_hw *hw = 744 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 745 struct e1000_vfta * shadow_vfta = 746 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 747 struct e1000_filter_info *filter_info = 748 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 749 struct e1000_adapter *adapter = 750 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 751 752 uint32_t ctrl_ext; 753 754 eth_dev->dev_ops = ð_igb_ops; 755 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 756 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 757 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 758 759 /* for secondary processes, we don't initialise any further as primary 760 * has already done this work. Only check we don't need a different 761 * RX function */ 762 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 763 if (eth_dev->data->scattered_rx) 764 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 765 return 0; 766 } 767 768 rte_eth_copy_pci_info(eth_dev, pci_dev); 769 770 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; 771 772 igb_identify_hardware(eth_dev, pci_dev); 773 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { 774 error = -EIO; 775 goto err_late; 776 } 777 778 e1000_get_bus_info(hw); 779 780 /* Reset any pending lock */ 781 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { 782 error = -EIO; 783 goto err_late; 784 } 785 786 /* Finish initialization */ 787 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { 788 error = -EIO; 789 goto err_late; 790 } 791 792 hw->mac.autoneg = 1; 793 hw->phy.autoneg_wait_to_complete = 0; 794 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 795 796 /* Copper options */ 797 if (hw->phy.media_type == e1000_media_type_copper) { 798 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 799 hw->phy.disable_polarity_correction = 0; 800 hw->phy.ms_type = e1000_ms_hw_default; 801 } 802 803 /* 804 * Start from a known state, this is important in reading the nvm 805 * and mac from that. 806 */ 807 igb_pf_reset_hw(hw); 808 809 /* Make sure we have a good EEPROM before we read from it */ 810 if (e1000_validate_nvm_checksum(hw) < 0) { 811 /* 812 * Some PCI-E parts fail the first check due to 813 * the link being in sleep state, call it again, 814 * if it fails a second time its a real issue. 815 */ 816 if (e1000_validate_nvm_checksum(hw) < 0) { 817 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 818 error = -EIO; 819 goto err_late; 820 } 821 } 822 823 /* Read the permanent MAC address out of the EEPROM */ 824 if (e1000_read_mac_addr(hw) != 0) { 825 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 826 error = -EIO; 827 goto err_late; 828 } 829 830 /* Allocate memory for storing MAC addresses */ 831 eth_dev->data->mac_addrs = rte_zmalloc("e1000", 832 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 833 if (eth_dev->data->mac_addrs == NULL) { 834 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 835 "store MAC addresses", 836 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 837 error = -ENOMEM; 838 goto err_late; 839 } 840 841 /* Copy the permanent MAC address */ 842 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); 843 844 /* initialize the vfta */ 845 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 846 847 /* Now initialize the hardware */ 848 if (igb_hardware_init(hw) != 0) { 849 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 850 rte_free(eth_dev->data->mac_addrs); 851 eth_dev->data->mac_addrs = NULL; 852 error = -ENODEV; 853 goto err_late; 854 } 855 hw->mac.get_link_status = 1; 856 adapter->stopped = 0; 857 858 /* Indicate SOL/IDER usage */ 859 if (e1000_check_reset_block(hw) < 0) { 860 PMD_INIT_LOG(ERR, "PHY reset is blocked due to" 861 "SOL/IDER session"); 862 } 863 864 /* initialize PF if max_vfs not zero */ 865 igb_pf_host_init(eth_dev); 866 867 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 868 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 869 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 870 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 871 E1000_WRITE_FLUSH(hw); 872 873 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 874 eth_dev->data->port_id, pci_dev->id.vendor_id, 875 pci_dev->id.device_id); 876 877 rte_intr_callback_register(&pci_dev->intr_handle, 878 eth_igb_interrupt_handler, 879 (void *)eth_dev); 880 881 /* enable uio/vfio intr/eventfd mapping */ 882 rte_intr_enable(&pci_dev->intr_handle); 883 884 /* enable support intr */ 885 igb_intr_enable(eth_dev); 886 887 /* initialize filter info */ 888 memset(filter_info, 0, 889 sizeof(struct e1000_filter_info)); 890 891 TAILQ_INIT(&filter_info->flex_list); 892 TAILQ_INIT(&filter_info->twotuple_list); 893 TAILQ_INIT(&filter_info->fivetuple_list); 894 895 TAILQ_INIT(&igb_filter_ntuple_list); 896 TAILQ_INIT(&igb_filter_ethertype_list); 897 TAILQ_INIT(&igb_filter_syn_list); 898 TAILQ_INIT(&igb_filter_flex_list); 899 TAILQ_INIT(&igb_filter_rss_list); 900 TAILQ_INIT(&igb_flow_list); 901 902 return 0; 903 904 err_late: 905 igb_hw_control_release(hw); 906 907 return error; 908 } 909 910 static int 911 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) 912 { 913 struct rte_pci_device *pci_dev; 914 struct rte_intr_handle *intr_handle; 915 struct e1000_hw *hw; 916 struct e1000_adapter *adapter = 917 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 918 struct e1000_filter_info *filter_info = 919 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 920 921 PMD_INIT_FUNC_TRACE(); 922 923 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 924 return -EPERM; 925 926 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 927 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 928 intr_handle = &pci_dev->intr_handle; 929 930 if (adapter->stopped == 0) 931 eth_igb_close(eth_dev); 932 933 eth_dev->dev_ops = NULL; 934 eth_dev->rx_pkt_burst = NULL; 935 eth_dev->tx_pkt_burst = NULL; 936 937 /* Reset any pending lock */ 938 igb_reset_swfw_lock(hw); 939 940 /* uninitialize PF if max_vfs not zero */ 941 igb_pf_host_uninit(eth_dev); 942 943 /* disable uio intr before callback unregister */ 944 rte_intr_disable(intr_handle); 945 rte_intr_callback_unregister(intr_handle, 946 eth_igb_interrupt_handler, eth_dev); 947 948 /* clear the SYN filter info */ 949 filter_info->syn_info = 0; 950 951 /* clear the ethertype filters info */ 952 filter_info->ethertype_mask = 0; 953 memset(filter_info->ethertype_filters, 0, 954 E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); 955 956 /* clear the rss filter info */ 957 memset(&filter_info->rss_info, 0, 958 sizeof(struct igb_rte_flow_rss_conf)); 959 960 /* remove all ntuple filters of the device */ 961 igb_ntuple_filter_uninit(eth_dev); 962 963 /* remove all flex filters of the device */ 964 igb_flex_filter_uninit(eth_dev); 965 966 /* clear all the filters list */ 967 igb_filterlist_flush(eth_dev); 968 969 return 0; 970 } 971 972 /* 973 * Virtual Function device init 974 */ 975 static int 976 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) 977 { 978 struct rte_pci_device *pci_dev; 979 struct rte_intr_handle *intr_handle; 980 struct e1000_adapter *adapter = 981 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 982 struct e1000_hw *hw = 983 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 984 int diag; 985 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr; 986 987 PMD_INIT_FUNC_TRACE(); 988 989 eth_dev->dev_ops = &igbvf_eth_dev_ops; 990 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 991 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 992 eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; 993 994 /* for secondary processes, we don't initialise any further as primary 995 * has already done this work. Only check we don't need a different 996 * RX function */ 997 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 998 if (eth_dev->data->scattered_rx) 999 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 1000 return 0; 1001 } 1002 1003 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1004 rte_eth_copy_pci_info(eth_dev, pci_dev); 1005 1006 hw->device_id = pci_dev->id.device_id; 1007 hw->vendor_id = pci_dev->id.vendor_id; 1008 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1009 adapter->stopped = 0; 1010 1011 /* Initialize the shared code (base driver) */ 1012 diag = e1000_setup_init_funcs(hw, TRUE); 1013 if (diag != 0) { 1014 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", 1015 diag); 1016 return -EIO; 1017 } 1018 1019 /* init_mailbox_params */ 1020 hw->mbx.ops.init_params(hw); 1021 1022 /* Disable the interrupts for VF */ 1023 igbvf_intr_disable(hw); 1024 1025 diag = hw->mac.ops.reset_hw(hw); 1026 1027 /* Allocate memory for storing MAC addresses */ 1028 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN * 1029 hw->mac.rar_entry_count, 0); 1030 if (eth_dev->data->mac_addrs == NULL) { 1031 PMD_INIT_LOG(ERR, 1032 "Failed to allocate %d bytes needed to store MAC " 1033 "addresses", 1034 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1035 return -ENOMEM; 1036 } 1037 1038 /* Generate a random MAC address, if none was assigned by PF. */ 1039 if (is_zero_ether_addr(perm_addr)) { 1040 eth_random_addr(perm_addr->addr_bytes); 1041 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1042 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1043 "%02x:%02x:%02x:%02x:%02x:%02x", 1044 perm_addr->addr_bytes[0], 1045 perm_addr->addr_bytes[1], 1046 perm_addr->addr_bytes[2], 1047 perm_addr->addr_bytes[3], 1048 perm_addr->addr_bytes[4], 1049 perm_addr->addr_bytes[5]); 1050 } 1051 1052 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); 1053 if (diag) { 1054 rte_free(eth_dev->data->mac_addrs); 1055 eth_dev->data->mac_addrs = NULL; 1056 return diag; 1057 } 1058 /* Copy the permanent MAC address */ 1059 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, 1060 ð_dev->data->mac_addrs[0]); 1061 1062 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " 1063 "mac.type=%s", 1064 eth_dev->data->port_id, pci_dev->id.vendor_id, 1065 pci_dev->id.device_id, "igb_mac_82576_vf"); 1066 1067 intr_handle = &pci_dev->intr_handle; 1068 rte_intr_callback_register(intr_handle, 1069 eth_igbvf_interrupt_handler, eth_dev); 1070 1071 return 0; 1072 } 1073 1074 static int 1075 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) 1076 { 1077 struct e1000_adapter *adapter = 1078 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 1079 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1080 1081 PMD_INIT_FUNC_TRACE(); 1082 1083 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1084 return -EPERM; 1085 1086 if (adapter->stopped == 0) 1087 igbvf_dev_close(eth_dev); 1088 1089 eth_dev->dev_ops = NULL; 1090 eth_dev->rx_pkt_burst = NULL; 1091 eth_dev->tx_pkt_burst = NULL; 1092 1093 /* disable uio intr before callback unregister */ 1094 rte_intr_disable(&pci_dev->intr_handle); 1095 rte_intr_callback_unregister(&pci_dev->intr_handle, 1096 eth_igbvf_interrupt_handler, 1097 (void *)eth_dev); 1098 1099 return 0; 1100 } 1101 1102 static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1103 struct rte_pci_device *pci_dev) 1104 { 1105 return rte_eth_dev_pci_generic_probe(pci_dev, 1106 sizeof(struct e1000_adapter), eth_igb_dev_init); 1107 } 1108 1109 static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) 1110 { 1111 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit); 1112 } 1113 1114 static struct rte_pci_driver rte_igb_pmd = { 1115 .id_table = pci_id_igb_map, 1116 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 1117 RTE_PCI_DRV_IOVA_AS_VA, 1118 .probe = eth_igb_pci_probe, 1119 .remove = eth_igb_pci_remove, 1120 }; 1121 1122 1123 static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1124 struct rte_pci_device *pci_dev) 1125 { 1126 return rte_eth_dev_pci_generic_probe(pci_dev, 1127 sizeof(struct e1000_adapter), eth_igbvf_dev_init); 1128 } 1129 1130 static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) 1131 { 1132 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit); 1133 } 1134 1135 /* 1136 * virtual function driver struct 1137 */ 1138 static struct rte_pci_driver rte_igbvf_pmd = { 1139 .id_table = pci_id_igbvf_map, 1140 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, 1141 .probe = eth_igbvf_pci_probe, 1142 .remove = eth_igbvf_pci_remove, 1143 }; 1144 1145 static void 1146 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1147 { 1148 struct e1000_hw *hw = 1149 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1150 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ 1151 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 1152 rctl |= E1000_RCTL_VFE; 1153 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1154 } 1155 1156 static int 1157 igb_check_mq_mode(struct rte_eth_dev *dev) 1158 { 1159 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1160 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1161 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1162 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1163 1164 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || 1165 tx_mq_mode == ETH_MQ_TX_DCB || 1166 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 1167 PMD_INIT_LOG(ERR, "DCB mode is not supported."); 1168 return -EINVAL; 1169 } 1170 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1171 /* Check multi-queue mode. 1172 * To no break software we accept ETH_MQ_RX_NONE as this might 1173 * be used to turn off VLAN filter. 1174 */ 1175 1176 if (rx_mq_mode == ETH_MQ_RX_NONE || 1177 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1178 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 1179 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 1180 } else { 1181 /* Only support one queue on VFs. 1182 * RSS together with SRIOV is not supported. 1183 */ 1184 PMD_INIT_LOG(ERR, "SRIOV is active," 1185 " wrong mq_mode rx %d.", 1186 rx_mq_mode); 1187 return -EINVAL; 1188 } 1189 /* TX mode is not used here, so mode might be ignored.*/ 1190 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1191 /* SRIOV only works in VMDq enable mode */ 1192 PMD_INIT_LOG(WARNING, "SRIOV is active," 1193 " TX mode %d is not supported. " 1194 " Driver will behave as %d mode.", 1195 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); 1196 } 1197 1198 /* check valid queue number */ 1199 if ((nb_rx_q > 1) || (nb_tx_q > 1)) { 1200 PMD_INIT_LOG(ERR, "SRIOV is active," 1201 " only support one queue on VFs."); 1202 return -EINVAL; 1203 } 1204 } else { 1205 /* To no break software that set invalid mode, only display 1206 * warning if invalid mode is used. 1207 */ 1208 if (rx_mq_mode != ETH_MQ_RX_NONE && 1209 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && 1210 rx_mq_mode != ETH_MQ_RX_RSS) { 1211 /* RSS together with VMDq not supported*/ 1212 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 1213 rx_mq_mode); 1214 return -EINVAL; 1215 } 1216 1217 if (tx_mq_mode != ETH_MQ_TX_NONE && 1218 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1219 PMD_INIT_LOG(WARNING, "TX mode %d is not supported." 1220 " Due to txmode is meaningless in this" 1221 " driver, just ignore.", 1222 tx_mq_mode); 1223 } 1224 } 1225 return 0; 1226 } 1227 1228 static int 1229 eth_igb_configure(struct rte_eth_dev *dev) 1230 { 1231 struct e1000_interrupt *intr = 1232 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1233 int ret; 1234 1235 PMD_INIT_FUNC_TRACE(); 1236 1237 /* multipe queue mode checking */ 1238 ret = igb_check_mq_mode(dev); 1239 if (ret != 0) { 1240 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", 1241 ret); 1242 return ret; 1243 } 1244 1245 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1246 PMD_INIT_FUNC_TRACE(); 1247 1248 return 0; 1249 } 1250 1251 static void 1252 eth_igb_rxtx_control(struct rte_eth_dev *dev, 1253 bool enable) 1254 { 1255 struct e1000_hw *hw = 1256 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1257 uint32_t tctl, rctl; 1258 1259 tctl = E1000_READ_REG(hw, E1000_TCTL); 1260 rctl = E1000_READ_REG(hw, E1000_RCTL); 1261 1262 if (enable) { 1263 /* enable Tx/Rx */ 1264 tctl |= E1000_TCTL_EN; 1265 rctl |= E1000_RCTL_EN; 1266 } else { 1267 /* disable Tx/Rx */ 1268 tctl &= ~E1000_TCTL_EN; 1269 rctl &= ~E1000_RCTL_EN; 1270 } 1271 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1272 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1273 E1000_WRITE_FLUSH(hw); 1274 } 1275 1276 static int 1277 eth_igb_start(struct rte_eth_dev *dev) 1278 { 1279 struct e1000_hw *hw = 1280 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1281 struct e1000_adapter *adapter = 1282 E1000_DEV_PRIVATE(dev->data->dev_private); 1283 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1284 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1285 int ret, mask; 1286 uint32_t intr_vector = 0; 1287 uint32_t ctrl_ext; 1288 uint32_t *speeds; 1289 int num_speeds; 1290 bool autoneg; 1291 1292 PMD_INIT_FUNC_TRACE(); 1293 1294 /* disable uio/vfio intr/eventfd mapping */ 1295 rte_intr_disable(intr_handle); 1296 1297 /* Power up the phy. Needed to make the link go Up */ 1298 eth_igb_dev_set_link_up(dev); 1299 1300 /* 1301 * Packet Buffer Allocation (PBA) 1302 * Writing PBA sets the receive portion of the buffer 1303 * the remainder is used for the transmit buffer. 1304 */ 1305 if (hw->mac.type == e1000_82575) { 1306 uint32_t pba; 1307 1308 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1309 E1000_WRITE_REG(hw, E1000_PBA, pba); 1310 } 1311 1312 /* Put the address into the Receive Address Array */ 1313 e1000_rar_set(hw, hw->mac.addr, 0); 1314 1315 /* Initialize the hardware */ 1316 if (igb_hardware_init(hw)) { 1317 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 1318 return -EIO; 1319 } 1320 adapter->stopped = 0; 1321 1322 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); 1323 1324 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1325 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1326 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 1327 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1328 E1000_WRITE_FLUSH(hw); 1329 1330 /* configure PF module if SRIOV enabled */ 1331 igb_pf_host_configure(dev); 1332 1333 /* check and configure queue intr-vector mapping */ 1334 if ((rte_intr_cap_multiple(intr_handle) || 1335 !RTE_ETH_DEV_SRIOV(dev).active) && 1336 dev->data->dev_conf.intr_conf.rxq != 0) { 1337 intr_vector = dev->data->nb_rx_queues; 1338 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1339 return -1; 1340 } 1341 1342 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 1343 intr_handle->intr_vec = 1344 rte_zmalloc("intr_vec", 1345 dev->data->nb_rx_queues * sizeof(int), 0); 1346 if (intr_handle->intr_vec == NULL) { 1347 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1348 " intr_vec", dev->data->nb_rx_queues); 1349 return -ENOMEM; 1350 } 1351 } 1352 1353 /* confiugre msix for rx interrupt */ 1354 eth_igb_configure_msix_intr(dev); 1355 1356 /* Configure for OS presence */ 1357 igb_init_manageability(hw); 1358 1359 eth_igb_tx_init(dev); 1360 1361 /* This can fail when allocating mbufs for descriptor rings */ 1362 ret = eth_igb_rx_init(dev); 1363 if (ret) { 1364 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1365 igb_dev_clear_queues(dev); 1366 return ret; 1367 } 1368 1369 e1000_clear_hw_cntrs_base_generic(hw); 1370 1371 /* 1372 * VLAN Offload Settings 1373 */ 1374 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 1375 ETH_VLAN_EXTEND_MASK; 1376 ret = eth_igb_vlan_offload_set(dev, mask); 1377 if (ret) { 1378 PMD_INIT_LOG(ERR, "Unable to set vlan offload"); 1379 igb_dev_clear_queues(dev); 1380 return ret; 1381 } 1382 1383 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1384 /* Enable VLAN filter since VMDq always use VLAN filter */ 1385 igb_vmdq_vlan_hw_filter_enable(dev); 1386 } 1387 1388 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || 1389 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || 1390 (hw->mac.type == e1000_i211)) { 1391 /* Configure EITR with the maximum possible value (0xFFFF) */ 1392 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); 1393 } 1394 1395 /* Setup link speed and duplex */ 1396 speeds = &dev->data->dev_conf.link_speeds; 1397 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 1398 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 1399 hw->mac.autoneg = 1; 1400 } else { 1401 num_speeds = 0; 1402 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 1403 1404 /* Reset */ 1405 hw->phy.autoneg_advertised = 0; 1406 1407 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1408 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1409 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 1410 num_speeds = -1; 1411 goto error_invalid_config; 1412 } 1413 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1414 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1415 num_speeds++; 1416 } 1417 if (*speeds & ETH_LINK_SPEED_10M) { 1418 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1419 num_speeds++; 1420 } 1421 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1422 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1423 num_speeds++; 1424 } 1425 if (*speeds & ETH_LINK_SPEED_100M) { 1426 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1427 num_speeds++; 1428 } 1429 if (*speeds & ETH_LINK_SPEED_1G) { 1430 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1431 num_speeds++; 1432 } 1433 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 1434 goto error_invalid_config; 1435 1436 /* Set/reset the mac.autoneg based on the link speed, 1437 * fixed or not 1438 */ 1439 if (!autoneg) { 1440 hw->mac.autoneg = 0; 1441 hw->mac.forced_speed_duplex = 1442 hw->phy.autoneg_advertised; 1443 } else { 1444 hw->mac.autoneg = 1; 1445 } 1446 } 1447 1448 e1000_setup_link(hw); 1449 1450 if (rte_intr_allow_others(intr_handle)) { 1451 /* check if lsc interrupt is enabled */ 1452 if (dev->data->dev_conf.intr_conf.lsc != 0) 1453 eth_igb_lsc_interrupt_setup(dev, TRUE); 1454 else 1455 eth_igb_lsc_interrupt_setup(dev, FALSE); 1456 } else { 1457 rte_intr_callback_unregister(intr_handle, 1458 eth_igb_interrupt_handler, 1459 (void *)dev); 1460 if (dev->data->dev_conf.intr_conf.lsc != 0) 1461 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1462 " no intr multiplex"); 1463 } 1464 1465 /* check if rxq interrupt is enabled */ 1466 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1467 rte_intr_dp_is_en(intr_handle)) 1468 eth_igb_rxq_interrupt_setup(dev); 1469 1470 /* enable uio/vfio intr/eventfd mapping */ 1471 rte_intr_enable(intr_handle); 1472 1473 /* resume enabled intr since hw reset */ 1474 igb_intr_enable(dev); 1475 1476 /* restore all types filter */ 1477 igb_filter_restore(dev); 1478 1479 eth_igb_rxtx_control(dev, true); 1480 eth_igb_link_update(dev, 0); 1481 1482 PMD_INIT_LOG(DEBUG, "<<"); 1483 1484 return 0; 1485 1486 error_invalid_config: 1487 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1488 dev->data->dev_conf.link_speeds, dev->data->port_id); 1489 igb_dev_clear_queues(dev); 1490 return -EINVAL; 1491 } 1492 1493 /********************************************************************* 1494 * 1495 * This routine disables all traffic on the adapter by issuing a 1496 * global reset on the MAC. 1497 * 1498 **********************************************************************/ 1499 static void 1500 eth_igb_stop(struct rte_eth_dev *dev) 1501 { 1502 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1503 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1504 struct rte_eth_link link; 1505 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1506 1507 eth_igb_rxtx_control(dev, false); 1508 1509 igb_intr_disable(dev); 1510 1511 /* disable intr eventfd mapping */ 1512 rte_intr_disable(intr_handle); 1513 1514 igb_pf_reset_hw(hw); 1515 E1000_WRITE_REG(hw, E1000_WUC, 0); 1516 1517 /* Set bit for Go Link disconnect */ 1518 if (hw->mac.type >= e1000_82580) { 1519 uint32_t phpm_reg; 1520 1521 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1522 phpm_reg |= E1000_82580_PM_GO_LINKD; 1523 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1524 } 1525 1526 /* Power down the phy. Needed to make the link go Down */ 1527 eth_igb_dev_set_link_down(dev); 1528 1529 igb_dev_clear_queues(dev); 1530 1531 /* clear the recorded link status */ 1532 memset(&link, 0, sizeof(link)); 1533 rte_eth_linkstatus_set(dev, &link); 1534 1535 if (!rte_intr_allow_others(intr_handle)) 1536 /* resume to the default handler */ 1537 rte_intr_callback_register(intr_handle, 1538 eth_igb_interrupt_handler, 1539 (void *)dev); 1540 1541 /* Clean datapath event and queue/vec mapping */ 1542 rte_intr_efd_disable(intr_handle); 1543 if (intr_handle->intr_vec != NULL) { 1544 rte_free(intr_handle->intr_vec); 1545 intr_handle->intr_vec = NULL; 1546 } 1547 } 1548 1549 static int 1550 eth_igb_dev_set_link_up(struct rte_eth_dev *dev) 1551 { 1552 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1553 1554 if (hw->phy.media_type == e1000_media_type_copper) 1555 e1000_power_up_phy(hw); 1556 else 1557 e1000_power_up_fiber_serdes_link(hw); 1558 1559 return 0; 1560 } 1561 1562 static int 1563 eth_igb_dev_set_link_down(struct rte_eth_dev *dev) 1564 { 1565 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1566 1567 if (hw->phy.media_type == e1000_media_type_copper) 1568 e1000_power_down_phy(hw); 1569 else 1570 e1000_shutdown_fiber_serdes_link(hw); 1571 1572 return 0; 1573 } 1574 1575 static void 1576 eth_igb_close(struct rte_eth_dev *dev) 1577 { 1578 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1579 struct e1000_adapter *adapter = 1580 E1000_DEV_PRIVATE(dev->data->dev_private); 1581 struct rte_eth_link link; 1582 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1583 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1584 1585 eth_igb_stop(dev); 1586 adapter->stopped = 1; 1587 1588 e1000_phy_hw_reset(hw); 1589 igb_release_manageability(hw); 1590 igb_hw_control_release(hw); 1591 1592 /* Clear bit for Go Link disconnect */ 1593 if (hw->mac.type >= e1000_82580) { 1594 uint32_t phpm_reg; 1595 1596 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1597 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1598 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1599 } 1600 1601 igb_dev_free_queues(dev); 1602 1603 if (intr_handle->intr_vec) { 1604 rte_free(intr_handle->intr_vec); 1605 intr_handle->intr_vec = NULL; 1606 } 1607 1608 memset(&link, 0, sizeof(link)); 1609 rte_eth_linkstatus_set(dev, &link); 1610 } 1611 1612 /* 1613 * Reset PF device. 1614 */ 1615 static int 1616 eth_igb_reset(struct rte_eth_dev *dev) 1617 { 1618 int ret; 1619 1620 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1621 * its VF to make them align with it. The detailed notification 1622 * mechanism is PMD specific and is currently not implemented. 1623 * To avoid unexpected behavior in VF, currently reset of PF with 1624 * SR-IOV activation is not supported. It might be supported later. 1625 */ 1626 if (dev->data->sriov.active) 1627 return -ENOTSUP; 1628 1629 ret = eth_igb_dev_uninit(dev); 1630 if (ret) 1631 return ret; 1632 1633 ret = eth_igb_dev_init(dev); 1634 1635 return ret; 1636 } 1637 1638 1639 static int 1640 igb_get_rx_buffer_size(struct e1000_hw *hw) 1641 { 1642 uint32_t rx_buf_size; 1643 if (hw->mac.type == e1000_82576) { 1644 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; 1645 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { 1646 /* PBS needs to be translated according to a lookup table */ 1647 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); 1648 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); 1649 rx_buf_size = (rx_buf_size << 10); 1650 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 1651 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; 1652 } else { 1653 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; 1654 } 1655 1656 return rx_buf_size; 1657 } 1658 1659 /********************************************************************* 1660 * 1661 * Initialize the hardware 1662 * 1663 **********************************************************************/ 1664 static int 1665 igb_hardware_init(struct e1000_hw *hw) 1666 { 1667 uint32_t rx_buf_size; 1668 int diag; 1669 1670 /* Let the firmware know the OS is in control */ 1671 igb_hw_control_acquire(hw); 1672 1673 /* 1674 * These parameters control the automatic generation (Tx) and 1675 * response (Rx) to Ethernet PAUSE frames. 1676 * - High water mark should allow for at least two standard size (1518) 1677 * frames to be received after sending an XOFF. 1678 * - Low water mark works best when it is very near the high water mark. 1679 * This allows the receiver to restart by sending XON when it has 1680 * drained a bit. Here we use an arbitrary value of 1500 which will 1681 * restart after one full frame is pulled from the buffer. There 1682 * could be several smaller frames in the buffer and if so they will 1683 * not trigger the XON until their total number reduces the buffer 1684 * by 1500. 1685 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1686 */ 1687 rx_buf_size = igb_get_rx_buffer_size(hw); 1688 1689 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2); 1690 hw->fc.low_water = hw->fc.high_water - 1500; 1691 hw->fc.pause_time = IGB_FC_PAUSE_TIME; 1692 hw->fc.send_xon = 1; 1693 1694 /* Set Flow control, use the tunable location if sane */ 1695 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) 1696 hw->fc.requested_mode = igb_fc_setting; 1697 else 1698 hw->fc.requested_mode = e1000_fc_none; 1699 1700 /* Issue a global reset */ 1701 igb_pf_reset_hw(hw); 1702 E1000_WRITE_REG(hw, E1000_WUC, 0); 1703 1704 diag = e1000_init_hw(hw); 1705 if (diag < 0) 1706 return diag; 1707 1708 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); 1709 e1000_get_phy_info(hw); 1710 e1000_check_for_link(hw); 1711 1712 return 0; 1713 } 1714 1715 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */ 1716 static void 1717 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) 1718 { 1719 int pause_frames; 1720 1721 uint64_t old_gprc = stats->gprc; 1722 uint64_t old_gptc = stats->gptc; 1723 uint64_t old_tpr = stats->tpr; 1724 uint64_t old_tpt = stats->tpt; 1725 uint64_t old_rpthc = stats->rpthc; 1726 uint64_t old_hgptc = stats->hgptc; 1727 1728 if(hw->phy.media_type == e1000_media_type_copper || 1729 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1730 stats->symerrs += 1731 E1000_READ_REG(hw,E1000_SYMERRS); 1732 stats->sec += E1000_READ_REG(hw, E1000_SEC); 1733 } 1734 1735 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 1736 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 1737 stats->scc += E1000_READ_REG(hw, E1000_SCC); 1738 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 1739 1740 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 1741 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 1742 stats->colc += E1000_READ_REG(hw, E1000_COLC); 1743 stats->dc += E1000_READ_REG(hw, E1000_DC); 1744 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 1745 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 1746 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 1747 /* 1748 ** For watchdog management we need to know if we have been 1749 ** paused during the last interval, so capture that here. 1750 */ 1751 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 1752 stats->xoffrxc += pause_frames; 1753 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 1754 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 1755 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 1756 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 1757 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 1758 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 1759 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 1760 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 1761 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 1762 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 1763 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 1764 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 1765 1766 /* For the 64-bit byte counters the low dword must be read first. */ 1767 /* Both registers clear on the read of the high dword */ 1768 1769 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1770 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 1771 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 1772 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN; 1773 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 1774 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 1775 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN; 1776 1777 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 1778 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 1779 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 1780 stats->roc += E1000_READ_REG(hw, E1000_ROC); 1781 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 1782 1783 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 1784 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 1785 1786 stats->tor += E1000_READ_REG(hw, E1000_TORL); 1787 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 1788 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN; 1789 stats->tot += E1000_READ_REG(hw, E1000_TOTL); 1790 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 1791 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN; 1792 1793 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 1794 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 1795 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 1796 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 1797 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 1798 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 1799 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 1800 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 1801 1802 /* Interrupt Counts */ 1803 1804 stats->iac += E1000_READ_REG(hw, E1000_IAC); 1805 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 1806 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 1807 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 1808 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 1809 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 1810 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 1811 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 1812 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 1813 1814 /* Host to Card Statistics */ 1815 1816 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 1817 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 1818 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 1819 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 1820 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 1821 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 1822 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 1823 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); 1824 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); 1825 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN; 1826 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); 1827 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); 1828 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN; 1829 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 1830 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 1831 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 1832 1833 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 1834 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 1835 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 1836 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 1837 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 1838 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 1839 } 1840 1841 static int 1842 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1843 { 1844 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1845 struct e1000_hw_stats *stats = 1846 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1847 1848 igb_read_stats_registers(hw, stats); 1849 1850 if (rte_stats == NULL) 1851 return -EINVAL; 1852 1853 /* Rx Errors */ 1854 rte_stats->imissed = stats->mpc; 1855 rte_stats->ierrors = stats->crcerrs + 1856 stats->rlec + stats->ruc + stats->roc + 1857 stats->rxerrc + stats->algnerrc + stats->cexterr; 1858 1859 /* Tx Errors */ 1860 rte_stats->oerrors = stats->ecol + stats->latecol; 1861 1862 rte_stats->ipackets = stats->gprc; 1863 rte_stats->opackets = stats->gptc; 1864 rte_stats->ibytes = stats->gorc; 1865 rte_stats->obytes = stats->gotc; 1866 return 0; 1867 } 1868 1869 static void 1870 eth_igb_stats_reset(struct rte_eth_dev *dev) 1871 { 1872 struct e1000_hw_stats *hw_stats = 1873 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1874 1875 /* HW registers are cleared on read */ 1876 eth_igb_stats_get(dev, NULL); 1877 1878 /* Reset software totals */ 1879 memset(hw_stats, 0, sizeof(*hw_stats)); 1880 } 1881 1882 static void 1883 eth_igb_xstats_reset(struct rte_eth_dev *dev) 1884 { 1885 struct e1000_hw_stats *stats = 1886 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1887 1888 /* HW registers are cleared on read */ 1889 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); 1890 1891 /* Reset software totals */ 1892 memset(stats, 0, sizeof(*stats)); 1893 } 1894 1895 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1896 struct rte_eth_xstat_name *xstats_names, 1897 __rte_unused unsigned int size) 1898 { 1899 unsigned i; 1900 1901 if (xstats_names == NULL) 1902 return IGB_NB_XSTATS; 1903 1904 /* Note: limit checked in rte_eth_xstats_names() */ 1905 1906 for (i = 0; i < IGB_NB_XSTATS; i++) { 1907 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 1908 "%s", rte_igb_stats_strings[i].name); 1909 } 1910 1911 return IGB_NB_XSTATS; 1912 } 1913 1914 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, 1915 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 1916 unsigned int limit) 1917 { 1918 unsigned int i; 1919 1920 if (!ids) { 1921 if (xstats_names == NULL) 1922 return IGB_NB_XSTATS; 1923 1924 for (i = 0; i < IGB_NB_XSTATS; i++) 1925 snprintf(xstats_names[i].name, 1926 sizeof(xstats_names[i].name), 1927 "%s", rte_igb_stats_strings[i].name); 1928 1929 return IGB_NB_XSTATS; 1930 1931 } else { 1932 struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; 1933 1934 eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 1935 IGB_NB_XSTATS); 1936 1937 for (i = 0; i < limit; i++) { 1938 if (ids[i] >= IGB_NB_XSTATS) { 1939 PMD_INIT_LOG(ERR, "id value isn't valid"); 1940 return -1; 1941 } 1942 strcpy(xstats_names[i].name, 1943 xstats_names_copy[ids[i]].name); 1944 } 1945 return limit; 1946 } 1947 } 1948 1949 static int 1950 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1951 unsigned n) 1952 { 1953 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1954 struct e1000_hw_stats *hw_stats = 1955 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1956 unsigned i; 1957 1958 if (n < IGB_NB_XSTATS) 1959 return IGB_NB_XSTATS; 1960 1961 igb_read_stats_registers(hw, hw_stats); 1962 1963 /* If this is a reset xstats is NULL, and we have cleared the 1964 * registers by reading them. 1965 */ 1966 if (!xstats) 1967 return 0; 1968 1969 /* Extended stats */ 1970 for (i = 0; i < IGB_NB_XSTATS; i++) { 1971 xstats[i].id = i; 1972 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1973 rte_igb_stats_strings[i].offset); 1974 } 1975 1976 return IGB_NB_XSTATS; 1977 } 1978 1979 static int 1980 eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1981 uint64_t *values, unsigned int n) 1982 { 1983 unsigned int i; 1984 1985 if (!ids) { 1986 struct e1000_hw *hw = 1987 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1988 struct e1000_hw_stats *hw_stats = 1989 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1990 1991 if (n < IGB_NB_XSTATS) 1992 return IGB_NB_XSTATS; 1993 1994 igb_read_stats_registers(hw, hw_stats); 1995 1996 /* If this is a reset xstats is NULL, and we have cleared the 1997 * registers by reading them. 1998 */ 1999 if (!values) 2000 return 0; 2001 2002 /* Extended stats */ 2003 for (i = 0; i < IGB_NB_XSTATS; i++) 2004 values[i] = *(uint64_t *)(((char *)hw_stats) + 2005 rte_igb_stats_strings[i].offset); 2006 2007 return IGB_NB_XSTATS; 2008 2009 } else { 2010 uint64_t values_copy[IGB_NB_XSTATS]; 2011 2012 eth_igb_xstats_get_by_id(dev, NULL, values_copy, 2013 IGB_NB_XSTATS); 2014 2015 for (i = 0; i < n; i++) { 2016 if (ids[i] >= IGB_NB_XSTATS) { 2017 PMD_INIT_LOG(ERR, "id value isn't valid"); 2018 return -1; 2019 } 2020 values[i] = values_copy[ids[i]]; 2021 } 2022 return n; 2023 } 2024 } 2025 2026 static void 2027 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) 2028 { 2029 /* Good Rx packets, include VF loopback */ 2030 UPDATE_VF_STAT(E1000_VFGPRC, 2031 hw_stats->last_gprc, hw_stats->gprc); 2032 2033 /* Good Rx octets, include VF loopback */ 2034 UPDATE_VF_STAT(E1000_VFGORC, 2035 hw_stats->last_gorc, hw_stats->gorc); 2036 2037 /* Good Tx packets, include VF loopback */ 2038 UPDATE_VF_STAT(E1000_VFGPTC, 2039 hw_stats->last_gptc, hw_stats->gptc); 2040 2041 /* Good Tx octets, include VF loopback */ 2042 UPDATE_VF_STAT(E1000_VFGOTC, 2043 hw_stats->last_gotc, hw_stats->gotc); 2044 2045 /* Rx Multicst packets */ 2046 UPDATE_VF_STAT(E1000_VFMPRC, 2047 hw_stats->last_mprc, hw_stats->mprc); 2048 2049 /* Good Rx loopback packets */ 2050 UPDATE_VF_STAT(E1000_VFGPRLBC, 2051 hw_stats->last_gprlbc, hw_stats->gprlbc); 2052 2053 /* Good Rx loopback octets */ 2054 UPDATE_VF_STAT(E1000_VFGORLBC, 2055 hw_stats->last_gorlbc, hw_stats->gorlbc); 2056 2057 /* Good Tx loopback packets */ 2058 UPDATE_VF_STAT(E1000_VFGPTLBC, 2059 hw_stats->last_gptlbc, hw_stats->gptlbc); 2060 2061 /* Good Tx loopback octets */ 2062 UPDATE_VF_STAT(E1000_VFGOTLBC, 2063 hw_stats->last_gotlbc, hw_stats->gotlbc); 2064 } 2065 2066 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2067 struct rte_eth_xstat_name *xstats_names, 2068 __rte_unused unsigned limit) 2069 { 2070 unsigned i; 2071 2072 if (xstats_names != NULL) 2073 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2074 snprintf(xstats_names[i].name, 2075 sizeof(xstats_names[i].name), "%s", 2076 rte_igbvf_stats_strings[i].name); 2077 } 2078 return IGBVF_NB_XSTATS; 2079 } 2080 2081 static int 2082 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2083 unsigned n) 2084 { 2085 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2086 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2087 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2088 unsigned i; 2089 2090 if (n < IGBVF_NB_XSTATS) 2091 return IGBVF_NB_XSTATS; 2092 2093 igbvf_read_stats_registers(hw, hw_stats); 2094 2095 if (!xstats) 2096 return 0; 2097 2098 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 2099 xstats[i].id = i; 2100 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 2101 rte_igbvf_stats_strings[i].offset); 2102 } 2103 2104 return IGBVF_NB_XSTATS; 2105 } 2106 2107 static int 2108 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 2109 { 2110 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2111 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 2112 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2113 2114 igbvf_read_stats_registers(hw, hw_stats); 2115 2116 if (rte_stats == NULL) 2117 return -EINVAL; 2118 2119 rte_stats->ipackets = hw_stats->gprc; 2120 rte_stats->ibytes = hw_stats->gorc; 2121 rte_stats->opackets = hw_stats->gptc; 2122 rte_stats->obytes = hw_stats->gotc; 2123 return 0; 2124 } 2125 2126 static void 2127 eth_igbvf_stats_reset(struct rte_eth_dev *dev) 2128 { 2129 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) 2130 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2131 2132 /* Sync HW register to the last stats */ 2133 eth_igbvf_stats_get(dev, NULL); 2134 2135 /* reset HW current stats*/ 2136 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - 2137 offsetof(struct e1000_vf_stats, gprc)); 2138 } 2139 2140 static int 2141 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 2142 size_t fw_size) 2143 { 2144 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2145 struct e1000_fw_version fw; 2146 int ret; 2147 2148 e1000_get_fw_version(hw, &fw); 2149 2150 switch (hw->mac.type) { 2151 case e1000_i210: 2152 case e1000_i211: 2153 if (!(e1000_get_flash_presence_i210(hw))) { 2154 ret = snprintf(fw_version, fw_size, 2155 "%2d.%2d-%d", 2156 fw.invm_major, fw.invm_minor, 2157 fw.invm_img_type); 2158 break; 2159 } 2160 /* fall through */ 2161 default: 2162 /* if option rom is valid, display its version too */ 2163 if (fw.or_valid) { 2164 ret = snprintf(fw_version, fw_size, 2165 "%d.%d, 0x%08x, %d.%d.%d", 2166 fw.eep_major, fw.eep_minor, fw.etrack_id, 2167 fw.or_major, fw.or_build, fw.or_patch); 2168 /* no option rom */ 2169 } else { 2170 if (fw.etrack_id != 0X0000) { 2171 ret = snprintf(fw_version, fw_size, 2172 "%d.%d, 0x%08x", 2173 fw.eep_major, fw.eep_minor, 2174 fw.etrack_id); 2175 } else { 2176 ret = snprintf(fw_version, fw_size, 2177 "%d.%d.%d", 2178 fw.eep_major, fw.eep_minor, 2179 fw.eep_build); 2180 } 2181 } 2182 break; 2183 } 2184 2185 ret += 1; /* add the size of '\0' */ 2186 if (fw_size < (u32)ret) 2187 return ret; 2188 else 2189 return 0; 2190 } 2191 2192 static void 2193 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2194 { 2195 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2196 2197 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2198 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2199 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2200 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2201 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2202 dev_info->rx_queue_offload_capa; 2203 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2204 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2205 dev_info->tx_queue_offload_capa; 2206 2207 switch (hw->mac.type) { 2208 case e1000_82575: 2209 dev_info->max_rx_queues = 4; 2210 dev_info->max_tx_queues = 4; 2211 dev_info->max_vmdq_pools = 0; 2212 break; 2213 2214 case e1000_82576: 2215 dev_info->max_rx_queues = 16; 2216 dev_info->max_tx_queues = 16; 2217 dev_info->max_vmdq_pools = ETH_8_POOLS; 2218 dev_info->vmdq_queue_num = 16; 2219 break; 2220 2221 case e1000_82580: 2222 dev_info->max_rx_queues = 8; 2223 dev_info->max_tx_queues = 8; 2224 dev_info->max_vmdq_pools = ETH_8_POOLS; 2225 dev_info->vmdq_queue_num = 8; 2226 break; 2227 2228 case e1000_i350: 2229 dev_info->max_rx_queues = 8; 2230 dev_info->max_tx_queues = 8; 2231 dev_info->max_vmdq_pools = ETH_8_POOLS; 2232 dev_info->vmdq_queue_num = 8; 2233 break; 2234 2235 case e1000_i354: 2236 dev_info->max_rx_queues = 8; 2237 dev_info->max_tx_queues = 8; 2238 break; 2239 2240 case e1000_i210: 2241 dev_info->max_rx_queues = 4; 2242 dev_info->max_tx_queues = 4; 2243 dev_info->max_vmdq_pools = 0; 2244 break; 2245 2246 case e1000_i211: 2247 dev_info->max_rx_queues = 2; 2248 dev_info->max_tx_queues = 2; 2249 dev_info->max_vmdq_pools = 0; 2250 break; 2251 2252 default: 2253 /* Should not happen */ 2254 break; 2255 } 2256 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); 2257 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 2258 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; 2259 2260 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2261 .rx_thresh = { 2262 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2263 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2264 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2265 }, 2266 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2267 .rx_drop_en = 0, 2268 .offloads = 0, 2269 }; 2270 2271 dev_info->default_txconf = (struct rte_eth_txconf) { 2272 .tx_thresh = { 2273 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2274 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2275 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2276 }, 2277 .offloads = 0, 2278 }; 2279 2280 dev_info->rx_desc_lim = rx_desc_lim; 2281 dev_info->tx_desc_lim = tx_desc_lim; 2282 2283 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 2284 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 2285 ETH_LINK_SPEED_1G; 2286 } 2287 2288 static const uint32_t * 2289 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) 2290 { 2291 static const uint32_t ptypes[] = { 2292 /* refers to igb_rxd_pkt_info_to_pkt_type() */ 2293 RTE_PTYPE_L2_ETHER, 2294 RTE_PTYPE_L3_IPV4, 2295 RTE_PTYPE_L3_IPV4_EXT, 2296 RTE_PTYPE_L3_IPV6, 2297 RTE_PTYPE_L3_IPV6_EXT, 2298 RTE_PTYPE_L4_TCP, 2299 RTE_PTYPE_L4_UDP, 2300 RTE_PTYPE_L4_SCTP, 2301 RTE_PTYPE_TUNNEL_IP, 2302 RTE_PTYPE_INNER_L3_IPV6, 2303 RTE_PTYPE_INNER_L3_IPV6_EXT, 2304 RTE_PTYPE_INNER_L4_TCP, 2305 RTE_PTYPE_INNER_L4_UDP, 2306 RTE_PTYPE_UNKNOWN 2307 }; 2308 2309 if (dev->rx_pkt_burst == eth_igb_recv_pkts || 2310 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) 2311 return ptypes; 2312 return NULL; 2313 } 2314 2315 static void 2316 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2317 { 2318 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2319 2320 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2321 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2322 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2323 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 2324 DEV_TX_OFFLOAD_IPV4_CKSUM | 2325 DEV_TX_OFFLOAD_UDP_CKSUM | 2326 DEV_TX_OFFLOAD_TCP_CKSUM | 2327 DEV_TX_OFFLOAD_SCTP_CKSUM | 2328 DEV_TX_OFFLOAD_TCP_TSO; 2329 switch (hw->mac.type) { 2330 case e1000_vfadapt: 2331 dev_info->max_rx_queues = 2; 2332 dev_info->max_tx_queues = 2; 2333 break; 2334 case e1000_vfadapt_i350: 2335 dev_info->max_rx_queues = 1; 2336 dev_info->max_tx_queues = 1; 2337 break; 2338 default: 2339 /* Should not happen */ 2340 break; 2341 } 2342 2343 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); 2344 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | 2345 dev_info->rx_queue_offload_capa; 2346 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); 2347 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | 2348 dev_info->tx_queue_offload_capa; 2349 2350 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2351 .rx_thresh = { 2352 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2353 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2354 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2355 }, 2356 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2357 .rx_drop_en = 0, 2358 .offloads = 0, 2359 }; 2360 2361 dev_info->default_txconf = (struct rte_eth_txconf) { 2362 .tx_thresh = { 2363 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2364 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2365 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2366 }, 2367 .offloads = 0, 2368 }; 2369 2370 dev_info->rx_desc_lim = rx_desc_lim; 2371 dev_info->tx_desc_lim = tx_desc_lim; 2372 } 2373 2374 /* return 0 means link status changed, -1 means not changed */ 2375 static int 2376 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2377 { 2378 struct e1000_hw *hw = 2379 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2380 struct rte_eth_link link; 2381 int link_check, count; 2382 2383 link_check = 0; 2384 hw->mac.get_link_status = 1; 2385 2386 /* possible wait-to-complete in up to 9 seconds */ 2387 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 2388 /* Read the real link status */ 2389 switch (hw->phy.media_type) { 2390 case e1000_media_type_copper: 2391 /* Do the work to read phy */ 2392 e1000_check_for_link(hw); 2393 link_check = !hw->mac.get_link_status; 2394 break; 2395 2396 case e1000_media_type_fiber: 2397 e1000_check_for_link(hw); 2398 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2399 E1000_STATUS_LU); 2400 break; 2401 2402 case e1000_media_type_internal_serdes: 2403 e1000_check_for_link(hw); 2404 link_check = hw->mac.serdes_has_link; 2405 break; 2406 2407 /* VF device is type_unknown */ 2408 case e1000_media_type_unknown: 2409 eth_igbvf_link_update(hw); 2410 link_check = !hw->mac.get_link_status; 2411 break; 2412 2413 default: 2414 break; 2415 } 2416 if (link_check || wait_to_complete == 0) 2417 break; 2418 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); 2419 } 2420 memset(&link, 0, sizeof(link)); 2421 2422 /* Now we check if a transition has happened */ 2423 if (link_check) { 2424 uint16_t duplex, speed; 2425 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 2426 link.link_duplex = (duplex == FULL_DUPLEX) ? 2427 ETH_LINK_FULL_DUPLEX : 2428 ETH_LINK_HALF_DUPLEX; 2429 link.link_speed = speed; 2430 link.link_status = ETH_LINK_UP; 2431 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 2432 ETH_LINK_SPEED_FIXED); 2433 } else if (!link_check) { 2434 link.link_speed = 0; 2435 link.link_duplex = ETH_LINK_HALF_DUPLEX; 2436 link.link_status = ETH_LINK_DOWN; 2437 link.link_autoneg = ETH_LINK_FIXED; 2438 } 2439 2440 return rte_eth_linkstatus_set(dev, &link); 2441 } 2442 2443 /* 2444 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 2445 * For ASF and Pass Through versions of f/w this means 2446 * that the driver is loaded. 2447 */ 2448 static void 2449 igb_hw_control_acquire(struct e1000_hw *hw) 2450 { 2451 uint32_t ctrl_ext; 2452 2453 /* Let firmware know the driver has taken over */ 2454 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2455 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2456 } 2457 2458 /* 2459 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 2460 * For ASF and Pass Through versions of f/w this means that the 2461 * driver is no longer loaded. 2462 */ 2463 static void 2464 igb_hw_control_release(struct e1000_hw *hw) 2465 { 2466 uint32_t ctrl_ext; 2467 2468 /* Let firmware taken over control of h/w */ 2469 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2470 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 2471 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2472 } 2473 2474 /* 2475 * Bit of a misnomer, what this really means is 2476 * to enable OS management of the system... aka 2477 * to disable special hardware management features. 2478 */ 2479 static void 2480 igb_init_manageability(struct e1000_hw *hw) 2481 { 2482 if (e1000_enable_mng_pass_thru(hw)) { 2483 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 2484 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2485 2486 /* disable hardware interception of ARP */ 2487 manc &= ~(E1000_MANC_ARP_EN); 2488 2489 /* enable receiving management packets to the host */ 2490 manc |= E1000_MANC_EN_MNG2HOST; 2491 manc2h |= 1 << 5; /* Mng Port 623 */ 2492 manc2h |= 1 << 6; /* Mng Port 664 */ 2493 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 2494 E1000_WRITE_REG(hw, E1000_MANC, manc); 2495 } 2496 } 2497 2498 static void 2499 igb_release_manageability(struct e1000_hw *hw) 2500 { 2501 if (e1000_enable_mng_pass_thru(hw)) { 2502 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2503 2504 manc |= E1000_MANC_ARP_EN; 2505 manc &= ~E1000_MANC_EN_MNG2HOST; 2506 2507 E1000_WRITE_REG(hw, E1000_MANC, manc); 2508 } 2509 } 2510 2511 static void 2512 eth_igb_promiscuous_enable(struct rte_eth_dev *dev) 2513 { 2514 struct e1000_hw *hw = 2515 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2516 uint32_t rctl; 2517 2518 rctl = E1000_READ_REG(hw, E1000_RCTL); 2519 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2520 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2521 } 2522 2523 static void 2524 eth_igb_promiscuous_disable(struct rte_eth_dev *dev) 2525 { 2526 struct e1000_hw *hw = 2527 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2528 uint32_t rctl; 2529 2530 rctl = E1000_READ_REG(hw, E1000_RCTL); 2531 rctl &= (~E1000_RCTL_UPE); 2532 if (dev->data->all_multicast == 1) 2533 rctl |= E1000_RCTL_MPE; 2534 else 2535 rctl &= (~E1000_RCTL_MPE); 2536 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2537 } 2538 2539 static void 2540 eth_igb_allmulticast_enable(struct rte_eth_dev *dev) 2541 { 2542 struct e1000_hw *hw = 2543 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2544 uint32_t rctl; 2545 2546 rctl = E1000_READ_REG(hw, E1000_RCTL); 2547 rctl |= E1000_RCTL_MPE; 2548 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2549 } 2550 2551 static void 2552 eth_igb_allmulticast_disable(struct rte_eth_dev *dev) 2553 { 2554 struct e1000_hw *hw = 2555 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2556 uint32_t rctl; 2557 2558 if (dev->data->promiscuous == 1) 2559 return; /* must remain in all_multicast mode */ 2560 rctl = E1000_READ_REG(hw, E1000_RCTL); 2561 rctl &= (~E1000_RCTL_MPE); 2562 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2563 } 2564 2565 static int 2566 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2567 { 2568 struct e1000_hw *hw = 2569 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2570 struct e1000_vfta * shadow_vfta = 2571 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2572 uint32_t vfta; 2573 uint32_t vid_idx; 2574 uint32_t vid_bit; 2575 2576 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 2577 E1000_VFTA_ENTRY_MASK); 2578 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 2579 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 2580 if (on) 2581 vfta |= vid_bit; 2582 else 2583 vfta &= ~vid_bit; 2584 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 2585 2586 /* update local VFTA copy */ 2587 shadow_vfta->vfta[vid_idx] = vfta; 2588 2589 return 0; 2590 } 2591 2592 static int 2593 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 2594 enum rte_vlan_type vlan_type, 2595 uint16_t tpid) 2596 { 2597 struct e1000_hw *hw = 2598 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2599 uint32_t reg, qinq; 2600 2601 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); 2602 qinq &= E1000_CTRL_EXT_EXT_VLAN; 2603 2604 /* only outer TPID of double VLAN can be configured*/ 2605 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { 2606 reg = E1000_READ_REG(hw, E1000_VET); 2607 reg = (reg & (~E1000_VET_VET_EXT)) | 2608 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); 2609 E1000_WRITE_REG(hw, E1000_VET, reg); 2610 2611 return 0; 2612 } 2613 2614 /* all other TPID values are read-only*/ 2615 PMD_DRV_LOG(ERR, "Not supported"); 2616 2617 return -ENOTSUP; 2618 } 2619 2620 static void 2621 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2622 { 2623 struct e1000_hw *hw = 2624 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2625 uint32_t reg; 2626 2627 /* Filter Table Disable */ 2628 reg = E1000_READ_REG(hw, E1000_RCTL); 2629 reg &= ~E1000_RCTL_CFIEN; 2630 reg &= ~E1000_RCTL_VFE; 2631 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2632 } 2633 2634 static void 2635 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2636 { 2637 struct e1000_hw *hw = 2638 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2639 struct e1000_vfta * shadow_vfta = 2640 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2641 uint32_t reg; 2642 int i; 2643 2644 /* Filter Table Enable, CFI not used for packet acceptance */ 2645 reg = E1000_READ_REG(hw, E1000_RCTL); 2646 reg &= ~E1000_RCTL_CFIEN; 2647 reg |= E1000_RCTL_VFE; 2648 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2649 2650 /* restore VFTA table */ 2651 for (i = 0; i < IGB_VFTA_SIZE; i++) 2652 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 2653 } 2654 2655 static void 2656 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2657 { 2658 struct e1000_hw *hw = 2659 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2660 uint32_t reg; 2661 2662 /* VLAN Mode Disable */ 2663 reg = E1000_READ_REG(hw, E1000_CTRL); 2664 reg &= ~E1000_CTRL_VME; 2665 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2666 } 2667 2668 static void 2669 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2670 { 2671 struct e1000_hw *hw = 2672 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2673 uint32_t reg; 2674 2675 /* VLAN Mode Enable */ 2676 reg = E1000_READ_REG(hw, E1000_CTRL); 2677 reg |= E1000_CTRL_VME; 2678 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2679 } 2680 2681 static void 2682 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2683 { 2684 struct e1000_hw *hw = 2685 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2686 uint32_t reg; 2687 2688 /* CTRL_EXT: Extended VLAN */ 2689 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2690 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; 2691 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2692 2693 /* Update maximum packet length */ 2694 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2695 E1000_WRITE_REG(hw, E1000_RLPML, 2696 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2697 VLAN_TAG_SIZE); 2698 } 2699 2700 static void 2701 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2702 { 2703 struct e1000_hw *hw = 2704 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2705 uint32_t reg; 2706 2707 /* CTRL_EXT: Extended VLAN */ 2708 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2709 reg |= E1000_CTRL_EXT_EXTEND_VLAN; 2710 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2711 2712 /* Update maximum packet length */ 2713 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 2714 E1000_WRITE_REG(hw, E1000_RLPML, 2715 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2716 2 * VLAN_TAG_SIZE); 2717 } 2718 2719 static int 2720 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2721 { 2722 struct rte_eth_rxmode *rxmode; 2723 2724 rxmode = &dev->data->dev_conf.rxmode; 2725 if(mask & ETH_VLAN_STRIP_MASK){ 2726 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2727 igb_vlan_hw_strip_enable(dev); 2728 else 2729 igb_vlan_hw_strip_disable(dev); 2730 } 2731 2732 if(mask & ETH_VLAN_FILTER_MASK){ 2733 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2734 igb_vlan_hw_filter_enable(dev); 2735 else 2736 igb_vlan_hw_filter_disable(dev); 2737 } 2738 2739 if(mask & ETH_VLAN_EXTEND_MASK){ 2740 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2741 igb_vlan_hw_extend_enable(dev); 2742 else 2743 igb_vlan_hw_extend_disable(dev); 2744 } 2745 2746 return 0; 2747 } 2748 2749 2750 /** 2751 * It enables the interrupt mask and then enable the interrupt. 2752 * 2753 * @param dev 2754 * Pointer to struct rte_eth_dev. 2755 * @param on 2756 * Enable or Disable 2757 * 2758 * @return 2759 * - On success, zero. 2760 * - On failure, a negative value. 2761 */ 2762 static int 2763 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2764 { 2765 struct e1000_interrupt *intr = 2766 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2767 2768 if (on) 2769 intr->mask |= E1000_ICR_LSC; 2770 else 2771 intr->mask &= ~E1000_ICR_LSC; 2772 2773 return 0; 2774 } 2775 2776 /* It clears the interrupt causes and enables the interrupt. 2777 * It will be called once only during nic initialized. 2778 * 2779 * @param dev 2780 * Pointer to struct rte_eth_dev. 2781 * 2782 * @return 2783 * - On success, zero. 2784 * - On failure, a negative value. 2785 */ 2786 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) 2787 { 2788 uint32_t mask, regval; 2789 struct e1000_hw *hw = 2790 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2791 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2792 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2793 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 2794 struct rte_eth_dev_info dev_info; 2795 2796 memset(&dev_info, 0, sizeof(dev_info)); 2797 eth_igb_infos_get(dev, &dev_info); 2798 2799 mask = (0xFFFFFFFF >> (32 - dev_info.max_rx_queues)) << misc_shift; 2800 regval = E1000_READ_REG(hw, E1000_EIMS); 2801 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 2802 2803 return 0; 2804 } 2805 2806 /* 2807 * It reads ICR and gets interrupt causes, check it and set a bit flag 2808 * to update link status. 2809 * 2810 * @param dev 2811 * Pointer to struct rte_eth_dev. 2812 * 2813 * @return 2814 * - On success, zero. 2815 * - On failure, a negative value. 2816 */ 2817 static int 2818 eth_igb_interrupt_get_status(struct rte_eth_dev *dev) 2819 { 2820 uint32_t icr; 2821 struct e1000_hw *hw = 2822 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2823 struct e1000_interrupt *intr = 2824 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2825 2826 igb_intr_disable(dev); 2827 2828 /* read-on-clear nic registers here */ 2829 icr = E1000_READ_REG(hw, E1000_ICR); 2830 2831 intr->flags = 0; 2832 if (icr & E1000_ICR_LSC) { 2833 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 2834 } 2835 2836 if (icr & E1000_ICR_VMMB) 2837 intr->flags |= E1000_FLAG_MAILBOX; 2838 2839 return 0; 2840 } 2841 2842 /* 2843 * It executes link_update after knowing an interrupt is prsent. 2844 * 2845 * @param dev 2846 * Pointer to struct rte_eth_dev. 2847 * 2848 * @return 2849 * - On success, zero. 2850 * - On failure, a negative value. 2851 */ 2852 static int 2853 eth_igb_interrupt_action(struct rte_eth_dev *dev, 2854 struct rte_intr_handle *intr_handle) 2855 { 2856 struct e1000_hw *hw = 2857 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2858 struct e1000_interrupt *intr = 2859 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2860 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2861 struct rte_eth_link link; 2862 int ret; 2863 2864 if (intr->flags & E1000_FLAG_MAILBOX) { 2865 igb_pf_mbx_process(dev); 2866 intr->flags &= ~E1000_FLAG_MAILBOX; 2867 } 2868 2869 igb_intr_enable(dev); 2870 rte_intr_enable(intr_handle); 2871 2872 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { 2873 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 2874 2875 /* set get_link_status to check register later */ 2876 hw->mac.get_link_status = 1; 2877 ret = eth_igb_link_update(dev, 0); 2878 2879 /* check if link has changed */ 2880 if (ret < 0) 2881 return 0; 2882 2883 rte_eth_linkstatus_get(dev, &link); 2884 if (link.link_status) { 2885 PMD_INIT_LOG(INFO, 2886 " Port %d: Link Up - speed %u Mbps - %s", 2887 dev->data->port_id, 2888 (unsigned)link.link_speed, 2889 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 2890 "full-duplex" : "half-duplex"); 2891 } else { 2892 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2893 dev->data->port_id); 2894 } 2895 2896 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 2897 pci_dev->addr.domain, 2898 pci_dev->addr.bus, 2899 pci_dev->addr.devid, 2900 pci_dev->addr.function); 2901 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 2902 NULL); 2903 } 2904 2905 return 0; 2906 } 2907 2908 /** 2909 * Interrupt handler which shall be registered at first. 2910 * 2911 * @param handle 2912 * Pointer to interrupt handle. 2913 * @param param 2914 * The address of parameter (struct rte_eth_dev *) regsitered before. 2915 * 2916 * @return 2917 * void 2918 */ 2919 static void 2920 eth_igb_interrupt_handler(void *param) 2921 { 2922 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2923 2924 eth_igb_interrupt_get_status(dev); 2925 eth_igb_interrupt_action(dev, dev->intr_handle); 2926 } 2927 2928 static int 2929 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) 2930 { 2931 uint32_t eicr; 2932 struct e1000_hw *hw = 2933 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2934 struct e1000_interrupt *intr = 2935 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2936 2937 igbvf_intr_disable(hw); 2938 2939 /* read-on-clear nic registers here */ 2940 eicr = E1000_READ_REG(hw, E1000_EICR); 2941 intr->flags = 0; 2942 2943 if (eicr == E1000_VTIVAR_MISC_MAILBOX) 2944 intr->flags |= E1000_FLAG_MAILBOX; 2945 2946 return 0; 2947 } 2948 2949 void igbvf_mbx_process(struct rte_eth_dev *dev) 2950 { 2951 struct e1000_hw *hw = 2952 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2953 struct e1000_mbx_info *mbx = &hw->mbx; 2954 u32 in_msg = 0; 2955 2956 /* peek the message first */ 2957 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0)); 2958 2959 /* PF reset VF event */ 2960 if (in_msg == E1000_PF_CONTROL_MSG) { 2961 /* dummy mbx read to ack pf */ 2962 if (mbx->ops.read(hw, &in_msg, 1, 0)) 2963 return; 2964 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 2965 NULL); 2966 } 2967 } 2968 2969 static int 2970 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) 2971 { 2972 struct e1000_interrupt *intr = 2973 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2974 2975 if (intr->flags & E1000_FLAG_MAILBOX) { 2976 igbvf_mbx_process(dev); 2977 intr->flags &= ~E1000_FLAG_MAILBOX; 2978 } 2979 2980 igbvf_intr_enable(dev); 2981 rte_intr_enable(intr_handle); 2982 2983 return 0; 2984 } 2985 2986 static void 2987 eth_igbvf_interrupt_handler(void *param) 2988 { 2989 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2990 2991 eth_igbvf_interrupt_get_status(dev); 2992 eth_igbvf_interrupt_action(dev, dev->intr_handle); 2993 } 2994 2995 static int 2996 eth_igb_led_on(struct rte_eth_dev *dev) 2997 { 2998 struct e1000_hw *hw; 2999 3000 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3001 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 3002 } 3003 3004 static int 3005 eth_igb_led_off(struct rte_eth_dev *dev) 3006 { 3007 struct e1000_hw *hw; 3008 3009 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3010 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 3011 } 3012 3013 static int 3014 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3015 { 3016 struct e1000_hw *hw; 3017 uint32_t ctrl; 3018 int tx_pause; 3019 int rx_pause; 3020 3021 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3022 fc_conf->pause_time = hw->fc.pause_time; 3023 fc_conf->high_water = hw->fc.high_water; 3024 fc_conf->low_water = hw->fc.low_water; 3025 fc_conf->send_xon = hw->fc.send_xon; 3026 fc_conf->autoneg = hw->mac.autoneg; 3027 3028 /* 3029 * Return rx_pause and tx_pause status according to actual setting of 3030 * the TFCE and RFCE bits in the CTRL register. 3031 */ 3032 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3033 if (ctrl & E1000_CTRL_TFCE) 3034 tx_pause = 1; 3035 else 3036 tx_pause = 0; 3037 3038 if (ctrl & E1000_CTRL_RFCE) 3039 rx_pause = 1; 3040 else 3041 rx_pause = 0; 3042 3043 if (rx_pause && tx_pause) 3044 fc_conf->mode = RTE_FC_FULL; 3045 else if (rx_pause) 3046 fc_conf->mode = RTE_FC_RX_PAUSE; 3047 else if (tx_pause) 3048 fc_conf->mode = RTE_FC_TX_PAUSE; 3049 else 3050 fc_conf->mode = RTE_FC_NONE; 3051 3052 return 0; 3053 } 3054 3055 static int 3056 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3057 { 3058 struct e1000_hw *hw; 3059 int err; 3060 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 3061 e1000_fc_none, 3062 e1000_fc_rx_pause, 3063 e1000_fc_tx_pause, 3064 e1000_fc_full 3065 }; 3066 uint32_t rx_buf_size; 3067 uint32_t max_high_water; 3068 uint32_t rctl; 3069 3070 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3071 if (fc_conf->autoneg != hw->mac.autoneg) 3072 return -ENOTSUP; 3073 rx_buf_size = igb_get_rx_buffer_size(hw); 3074 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3075 3076 /* At least reserve one Ethernet frame for watermark */ 3077 max_high_water = rx_buf_size - ETHER_MAX_LEN; 3078 if ((fc_conf->high_water > max_high_water) || 3079 (fc_conf->high_water < fc_conf->low_water)) { 3080 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 3081 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 3082 return -EINVAL; 3083 } 3084 3085 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 3086 hw->fc.pause_time = fc_conf->pause_time; 3087 hw->fc.high_water = fc_conf->high_water; 3088 hw->fc.low_water = fc_conf->low_water; 3089 hw->fc.send_xon = fc_conf->send_xon; 3090 3091 err = e1000_setup_link_generic(hw); 3092 if (err == E1000_SUCCESS) { 3093 3094 /* check if we want to forward MAC frames - driver doesn't have native 3095 * capability to do that, so we'll write the registers ourselves */ 3096 3097 rctl = E1000_READ_REG(hw, E1000_RCTL); 3098 3099 /* set or clear MFLCN.PMCF bit depending on configuration */ 3100 if (fc_conf->mac_ctrl_frame_fwd != 0) 3101 rctl |= E1000_RCTL_PMCF; 3102 else 3103 rctl &= ~E1000_RCTL_PMCF; 3104 3105 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 3106 E1000_WRITE_FLUSH(hw); 3107 3108 return 0; 3109 } 3110 3111 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 3112 return -EIO; 3113 } 3114 3115 #define E1000_RAH_POOLSEL_SHIFT (18) 3116 static int 3117 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 3118 uint32_t index, uint32_t pool) 3119 { 3120 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3121 uint32_t rah; 3122 3123 e1000_rar_set(hw, mac_addr->addr_bytes, index); 3124 rah = E1000_READ_REG(hw, E1000_RAH(index)); 3125 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); 3126 E1000_WRITE_REG(hw, E1000_RAH(index), rah); 3127 return 0; 3128 } 3129 3130 static void 3131 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) 3132 { 3133 uint8_t addr[ETHER_ADDR_LEN]; 3134 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3135 3136 memset(addr, 0, sizeof(addr)); 3137 3138 e1000_rar_set(hw, addr, index); 3139 } 3140 3141 static int 3142 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 3143 struct ether_addr *addr) 3144 { 3145 eth_igb_rar_clear(dev, 0); 3146 eth_igb_rar_set(dev, (void *)addr, 0, 0); 3147 3148 return 0; 3149 } 3150 /* 3151 * Virtual Function operations 3152 */ 3153 static void 3154 igbvf_intr_disable(struct e1000_hw *hw) 3155 { 3156 PMD_INIT_FUNC_TRACE(); 3157 3158 /* Clear interrupt mask to stop from interrupts being generated */ 3159 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); 3160 3161 E1000_WRITE_FLUSH(hw); 3162 } 3163 3164 static void 3165 igbvf_stop_adapter(struct rte_eth_dev *dev) 3166 { 3167 u32 reg_val; 3168 u16 i; 3169 struct rte_eth_dev_info dev_info; 3170 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3171 3172 memset(&dev_info, 0, sizeof(dev_info)); 3173 eth_igbvf_infos_get(dev, &dev_info); 3174 3175 /* Clear interrupt mask to stop from interrupts being generated */ 3176 igbvf_intr_disable(hw); 3177 3178 /* Clear any pending interrupts, flush previous writes */ 3179 E1000_READ_REG(hw, E1000_EICR); 3180 3181 /* Disable the transmit unit. Each queue must be disabled. */ 3182 for (i = 0; i < dev_info.max_tx_queues; i++) 3183 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); 3184 3185 /* Disable the receive unit by stopping each queue */ 3186 for (i = 0; i < dev_info.max_rx_queues; i++) { 3187 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); 3188 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; 3189 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); 3190 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) 3191 ; 3192 } 3193 3194 /* flush all queues disables */ 3195 E1000_WRITE_FLUSH(hw); 3196 msec_delay(2); 3197 } 3198 3199 static int eth_igbvf_link_update(struct e1000_hw *hw) 3200 { 3201 struct e1000_mbx_info *mbx = &hw->mbx; 3202 struct e1000_mac_info *mac = &hw->mac; 3203 int ret_val = E1000_SUCCESS; 3204 3205 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); 3206 3207 /* 3208 * We only want to run this if there has been a rst asserted. 3209 * in this case that could mean a link change, device reset, 3210 * or a virtual function reset 3211 */ 3212 3213 /* If we were hit with a reset or timeout drop the link */ 3214 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) 3215 mac->get_link_status = TRUE; 3216 3217 if (!mac->get_link_status) 3218 goto out; 3219 3220 /* if link status is down no point in checking to see if pf is up */ 3221 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) 3222 goto out; 3223 3224 /* if we passed all the tests above then the link is up and we no 3225 * longer need to check for link */ 3226 mac->get_link_status = FALSE; 3227 3228 out: 3229 return ret_val; 3230 } 3231 3232 3233 static int 3234 igbvf_dev_configure(struct rte_eth_dev *dev) 3235 { 3236 struct rte_eth_conf* conf = &dev->data->dev_conf; 3237 3238 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 3239 dev->data->port_id); 3240 3241 /* 3242 * VF has no ability to enable/disable HW CRC 3243 * Keep the persistent behavior the same as Host PF 3244 */ 3245 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC 3246 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 3247 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 3248 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 3249 } 3250 #else 3251 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 3252 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 3253 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 3254 } 3255 #endif 3256 3257 return 0; 3258 } 3259 3260 static int 3261 igbvf_dev_start(struct rte_eth_dev *dev) 3262 { 3263 struct e1000_hw *hw = 3264 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3265 struct e1000_adapter *adapter = 3266 E1000_DEV_PRIVATE(dev->data->dev_private); 3267 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3268 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3269 int ret; 3270 uint32_t intr_vector = 0; 3271 3272 PMD_INIT_FUNC_TRACE(); 3273 3274 hw->mac.ops.reset_hw(hw); 3275 adapter->stopped = 0; 3276 3277 /* Set all vfta */ 3278 igbvf_set_vfta_all(dev,1); 3279 3280 eth_igbvf_tx_init(dev); 3281 3282 /* This can fail when allocating mbufs for descriptor rings */ 3283 ret = eth_igbvf_rx_init(dev); 3284 if (ret) { 3285 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 3286 igb_dev_clear_queues(dev); 3287 return ret; 3288 } 3289 3290 /* check and configure queue intr-vector mapping */ 3291 if (rte_intr_cap_multiple(intr_handle) && 3292 dev->data->dev_conf.intr_conf.rxq) { 3293 intr_vector = dev->data->nb_rx_queues; 3294 ret = rte_intr_efd_enable(intr_handle, intr_vector); 3295 if (ret) 3296 return ret; 3297 } 3298 3299 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3300 intr_handle->intr_vec = 3301 rte_zmalloc("intr_vec", 3302 dev->data->nb_rx_queues * sizeof(int), 0); 3303 if (!intr_handle->intr_vec) { 3304 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 3305 " intr_vec", dev->data->nb_rx_queues); 3306 return -ENOMEM; 3307 } 3308 } 3309 3310 eth_igbvf_configure_msix_intr(dev); 3311 3312 /* enable uio/vfio intr/eventfd mapping */ 3313 rte_intr_enable(intr_handle); 3314 3315 /* resume enabled intr since hw reset */ 3316 igbvf_intr_enable(dev); 3317 3318 return 0; 3319 } 3320 3321 static void 3322 igbvf_dev_stop(struct rte_eth_dev *dev) 3323 { 3324 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3325 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3326 3327 PMD_INIT_FUNC_TRACE(); 3328 3329 igbvf_stop_adapter(dev); 3330 3331 /* 3332 * Clear what we set, but we still keep shadow_vfta to 3333 * restore after device starts 3334 */ 3335 igbvf_set_vfta_all(dev,0); 3336 3337 igb_dev_clear_queues(dev); 3338 3339 /* disable intr eventfd mapping */ 3340 rte_intr_disable(intr_handle); 3341 3342 /* Clean datapath event and queue/vec mapping */ 3343 rte_intr_efd_disable(intr_handle); 3344 if (intr_handle->intr_vec) { 3345 rte_free(intr_handle->intr_vec); 3346 intr_handle->intr_vec = NULL; 3347 } 3348 } 3349 3350 static void 3351 igbvf_dev_close(struct rte_eth_dev *dev) 3352 { 3353 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3354 struct e1000_adapter *adapter = 3355 E1000_DEV_PRIVATE(dev->data->dev_private); 3356 struct ether_addr addr; 3357 3358 PMD_INIT_FUNC_TRACE(); 3359 3360 e1000_reset_hw(hw); 3361 3362 igbvf_dev_stop(dev); 3363 adapter->stopped = 1; 3364 igb_dev_free_queues(dev); 3365 3366 /** 3367 * reprogram the RAR with a zero mac address, 3368 * to ensure that the VF traffic goes to the PF 3369 * after stop, close and detach of the VF. 3370 **/ 3371 3372 memset(&addr, 0, sizeof(addr)); 3373 igbvf_default_mac_addr_set(dev, &addr); 3374 } 3375 3376 static void 3377 igbvf_promiscuous_enable(struct rte_eth_dev *dev) 3378 { 3379 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3380 3381 /* Set both unicast and multicast promisc */ 3382 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 3383 } 3384 3385 static void 3386 igbvf_promiscuous_disable(struct rte_eth_dev *dev) 3387 { 3388 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3389 3390 /* If in allmulticast mode leave multicast promisc */ 3391 if (dev->data->all_multicast == 1) 3392 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3393 else 3394 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3395 } 3396 3397 static void 3398 igbvf_allmulticast_enable(struct rte_eth_dev *dev) 3399 { 3400 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3401 3402 /* In promiscuous mode multicast promisc already set */ 3403 if (dev->data->promiscuous == 0) 3404 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3405 } 3406 3407 static void 3408 igbvf_allmulticast_disable(struct rte_eth_dev *dev) 3409 { 3410 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3411 3412 /* In promiscuous mode leave multicast promisc enabled */ 3413 if (dev->data->promiscuous == 0) 3414 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3415 } 3416 3417 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) 3418 { 3419 struct e1000_mbx_info *mbx = &hw->mbx; 3420 uint32_t msgbuf[2]; 3421 s32 err; 3422 3423 /* After set vlan, vlan strip will also be enabled in igb driver*/ 3424 msgbuf[0] = E1000_VF_SET_VLAN; 3425 msgbuf[1] = vid; 3426 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 3427 if (on) 3428 msgbuf[0] |= E1000_VF_SET_VLAN_ADD; 3429 3430 err = mbx->ops.write_posted(hw, msgbuf, 2, 0); 3431 if (err) 3432 goto mbx_err; 3433 3434 err = mbx->ops.read_posted(hw, msgbuf, 2, 0); 3435 if (err) 3436 goto mbx_err; 3437 3438 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 3439 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) 3440 err = -EINVAL; 3441 3442 mbx_err: 3443 return err; 3444 } 3445 3446 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) 3447 { 3448 struct e1000_hw *hw = 3449 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3450 struct e1000_vfta * shadow_vfta = 3451 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3452 int i = 0, j = 0, vfta = 0, mask = 1; 3453 3454 for (i = 0; i < IGB_VFTA_SIZE; i++){ 3455 vfta = shadow_vfta->vfta[i]; 3456 if(vfta){ 3457 mask = 1; 3458 for (j = 0; j < 32; j++){ 3459 if(vfta & mask) 3460 igbvf_set_vfta(hw, 3461 (uint16_t)((i<<5)+j), on); 3462 mask<<=1; 3463 } 3464 } 3465 } 3466 3467 } 3468 3469 static int 3470 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 3471 { 3472 struct e1000_hw *hw = 3473 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3474 struct e1000_vfta * shadow_vfta = 3475 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3476 uint32_t vid_idx = 0; 3477 uint32_t vid_bit = 0; 3478 int ret = 0; 3479 3480 PMD_INIT_FUNC_TRACE(); 3481 3482 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ 3483 ret = igbvf_set_vfta(hw, vlan_id, !!on); 3484 if(ret){ 3485 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 3486 return ret; 3487 } 3488 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 3489 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 3490 3491 /*Save what we set and retore it after device reset*/ 3492 if (on) 3493 shadow_vfta->vfta[vid_idx] |= vid_bit; 3494 else 3495 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 3496 3497 return 0; 3498 } 3499 3500 static int 3501 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) 3502 { 3503 struct e1000_hw *hw = 3504 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3505 3506 /* index is not used by rar_set() */ 3507 hw->mac.ops.rar_set(hw, (void *)addr, 0); 3508 return 0; 3509 } 3510 3511 3512 static int 3513 eth_igb_rss_reta_update(struct rte_eth_dev *dev, 3514 struct rte_eth_rss_reta_entry64 *reta_conf, 3515 uint16_t reta_size) 3516 { 3517 uint8_t i, j, mask; 3518 uint32_t reta, r; 3519 uint16_t idx, shift; 3520 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3521 3522 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3523 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3524 "(%d) doesn't match the number hardware can supported " 3525 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3526 return -EINVAL; 3527 } 3528 3529 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3530 idx = i / RTE_RETA_GROUP_SIZE; 3531 shift = i % RTE_RETA_GROUP_SIZE; 3532 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3533 IGB_4_BIT_MASK); 3534 if (!mask) 3535 continue; 3536 if (mask == IGB_4_BIT_MASK) 3537 r = 0; 3538 else 3539 r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3540 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { 3541 if (mask & (0x1 << j)) 3542 reta |= reta_conf[idx].reta[shift + j] << 3543 (CHAR_BIT * j); 3544 else 3545 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); 3546 } 3547 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); 3548 } 3549 3550 return 0; 3551 } 3552 3553 static int 3554 eth_igb_rss_reta_query(struct rte_eth_dev *dev, 3555 struct rte_eth_rss_reta_entry64 *reta_conf, 3556 uint16_t reta_size) 3557 { 3558 uint8_t i, j, mask; 3559 uint32_t reta; 3560 uint16_t idx, shift; 3561 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3562 3563 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3564 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3565 "(%d) doesn't match the number hardware can supported " 3566 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3567 return -EINVAL; 3568 } 3569 3570 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3571 idx = i / RTE_RETA_GROUP_SIZE; 3572 shift = i % RTE_RETA_GROUP_SIZE; 3573 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3574 IGB_4_BIT_MASK); 3575 if (!mask) 3576 continue; 3577 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3578 for (j = 0; j < IGB_4_BIT_WIDTH; j++) { 3579 if (mask & (0x1 << j)) 3580 reta_conf[idx].reta[shift + j] = 3581 ((reta >> (CHAR_BIT * j)) & 3582 IGB_8_BIT_MASK); 3583 } 3584 } 3585 3586 return 0; 3587 } 3588 3589 int 3590 eth_igb_syn_filter_set(struct rte_eth_dev *dev, 3591 struct rte_eth_syn_filter *filter, 3592 bool add) 3593 { 3594 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3595 struct e1000_filter_info *filter_info = 3596 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3597 uint32_t synqf, rfctl; 3598 3599 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3600 return -EINVAL; 3601 3602 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3603 3604 if (add) { 3605 if (synqf & E1000_SYN_FILTER_ENABLE) 3606 return -EINVAL; 3607 3608 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & 3609 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); 3610 3611 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3612 if (filter->hig_pri) 3613 rfctl |= E1000_RFCTL_SYNQFP; 3614 else 3615 rfctl &= ~E1000_RFCTL_SYNQFP; 3616 3617 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 3618 } else { 3619 if (!(synqf & E1000_SYN_FILTER_ENABLE)) 3620 return -ENOENT; 3621 synqf = 0; 3622 } 3623 3624 filter_info->syn_info = synqf; 3625 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 3626 E1000_WRITE_FLUSH(hw); 3627 return 0; 3628 } 3629 3630 static int 3631 eth_igb_syn_filter_get(struct rte_eth_dev *dev, 3632 struct rte_eth_syn_filter *filter) 3633 { 3634 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3635 uint32_t synqf, rfctl; 3636 3637 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3638 if (synqf & E1000_SYN_FILTER_ENABLE) { 3639 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3640 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; 3641 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> 3642 E1000_SYN_FILTER_QUEUE_SHIFT); 3643 return 0; 3644 } 3645 3646 return -ENOENT; 3647 } 3648 3649 static int 3650 eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 3651 enum rte_filter_op filter_op, 3652 void *arg) 3653 { 3654 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3655 int ret; 3656 3657 MAC_TYPE_FILTER_SUP(hw->mac.type); 3658 3659 if (filter_op == RTE_ETH_FILTER_NOP) 3660 return 0; 3661 3662 if (arg == NULL) { 3663 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 3664 filter_op); 3665 return -EINVAL; 3666 } 3667 3668 switch (filter_op) { 3669 case RTE_ETH_FILTER_ADD: 3670 ret = eth_igb_syn_filter_set(dev, 3671 (struct rte_eth_syn_filter *)arg, 3672 TRUE); 3673 break; 3674 case RTE_ETH_FILTER_DELETE: 3675 ret = eth_igb_syn_filter_set(dev, 3676 (struct rte_eth_syn_filter *)arg, 3677 FALSE); 3678 break; 3679 case RTE_ETH_FILTER_GET: 3680 ret = eth_igb_syn_filter_get(dev, 3681 (struct rte_eth_syn_filter *)arg); 3682 break; 3683 default: 3684 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 3685 ret = -EINVAL; 3686 break; 3687 } 3688 3689 return ret; 3690 } 3691 3692 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ 3693 static inline int 3694 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, 3695 struct e1000_2tuple_filter_info *filter_info) 3696 { 3697 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3698 return -EINVAL; 3699 if (filter->priority > E1000_2TUPLE_MAX_PRI) 3700 return -EINVAL; /* filter index is out of range. */ 3701 if (filter->tcp_flags > TCP_FLAG_ALL) 3702 return -EINVAL; /* flags is invalid. */ 3703 3704 switch (filter->dst_port_mask) { 3705 case UINT16_MAX: 3706 filter_info->dst_port_mask = 0; 3707 filter_info->dst_port = filter->dst_port; 3708 break; 3709 case 0: 3710 filter_info->dst_port_mask = 1; 3711 break; 3712 default: 3713 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3714 return -EINVAL; 3715 } 3716 3717 switch (filter->proto_mask) { 3718 case UINT8_MAX: 3719 filter_info->proto_mask = 0; 3720 filter_info->proto = filter->proto; 3721 break; 3722 case 0: 3723 filter_info->proto_mask = 1; 3724 break; 3725 default: 3726 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3727 return -EINVAL; 3728 } 3729 3730 filter_info->priority = (uint8_t)filter->priority; 3731 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 3732 filter_info->tcp_flags = filter->tcp_flags; 3733 else 3734 filter_info->tcp_flags = 0; 3735 3736 return 0; 3737 } 3738 3739 static inline struct e1000_2tuple_filter * 3740 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, 3741 struct e1000_2tuple_filter_info *key) 3742 { 3743 struct e1000_2tuple_filter *it; 3744 3745 TAILQ_FOREACH(it, filter_list, entries) { 3746 if (memcmp(key, &it->filter_info, 3747 sizeof(struct e1000_2tuple_filter_info)) == 0) { 3748 return it; 3749 } 3750 } 3751 return NULL; 3752 } 3753 3754 /* inject a igb 2tuple filter to HW */ 3755 static inline void 3756 igb_inject_2uple_filter(struct rte_eth_dev *dev, 3757 struct e1000_2tuple_filter *filter) 3758 { 3759 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3760 uint32_t ttqf = E1000_TTQF_DISABLE_MASK; 3761 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; 3762 int i; 3763 3764 i = filter->index; 3765 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 3766 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 3767 imir |= E1000_IMIR_PORT_BP; 3768 else 3769 imir &= ~E1000_IMIR_PORT_BP; 3770 3771 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 3772 3773 ttqf |= E1000_TTQF_QUEUE_ENABLE; 3774 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); 3775 ttqf |= (uint32_t)(filter->filter_info.proto & 3776 E1000_TTQF_PROTOCOL_MASK); 3777 if (filter->filter_info.proto_mask == 0) 3778 ttqf &= ~E1000_TTQF_MASK_ENABLE; 3779 3780 /* tcp flags bits setting. */ 3781 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { 3782 if (filter->filter_info.tcp_flags & TCP_URG_FLAG) 3783 imir_ext |= E1000_IMIREXT_CTRL_URG; 3784 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) 3785 imir_ext |= E1000_IMIREXT_CTRL_ACK; 3786 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) 3787 imir_ext |= E1000_IMIREXT_CTRL_PSH; 3788 if (filter->filter_info.tcp_flags & TCP_RST_FLAG) 3789 imir_ext |= E1000_IMIREXT_CTRL_RST; 3790 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) 3791 imir_ext |= E1000_IMIREXT_CTRL_SYN; 3792 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) 3793 imir_ext |= E1000_IMIREXT_CTRL_FIN; 3794 } else { 3795 imir_ext |= E1000_IMIREXT_CTRL_BP; 3796 } 3797 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 3798 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); 3799 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 3800 } 3801 3802 /* 3803 * igb_add_2tuple_filter - add a 2tuple filter 3804 * 3805 * @param 3806 * dev: Pointer to struct rte_eth_dev. 3807 * ntuple_filter: ponter to the filter that will be added. 3808 * 3809 * @return 3810 * - On success, zero. 3811 * - On failure, a negative value. 3812 */ 3813 static int 3814 igb_add_2tuple_filter(struct rte_eth_dev *dev, 3815 struct rte_eth_ntuple_filter *ntuple_filter) 3816 { 3817 struct e1000_filter_info *filter_info = 3818 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3819 struct e1000_2tuple_filter *filter; 3820 int i, ret; 3821 3822 filter = rte_zmalloc("e1000_2tuple_filter", 3823 sizeof(struct e1000_2tuple_filter), 0); 3824 if (filter == NULL) 3825 return -ENOMEM; 3826 3827 ret = ntuple_filter_to_2tuple(ntuple_filter, 3828 &filter->filter_info); 3829 if (ret < 0) { 3830 rte_free(filter); 3831 return ret; 3832 } 3833 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3834 &filter->filter_info) != NULL) { 3835 PMD_DRV_LOG(ERR, "filter exists."); 3836 rte_free(filter); 3837 return -EEXIST; 3838 } 3839 filter->queue = ntuple_filter->queue; 3840 3841 /* 3842 * look for an unused 2tuple filter index, 3843 * and insert the filter to list. 3844 */ 3845 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { 3846 if (!(filter_info->twotuple_mask & (1 << i))) { 3847 filter_info->twotuple_mask |= 1 << i; 3848 filter->index = i; 3849 TAILQ_INSERT_TAIL(&filter_info->twotuple_list, 3850 filter, 3851 entries); 3852 break; 3853 } 3854 } 3855 if (i >= E1000_MAX_TTQF_FILTERS) { 3856 PMD_DRV_LOG(ERR, "2tuple filters are full."); 3857 rte_free(filter); 3858 return -ENOSYS; 3859 } 3860 3861 igb_inject_2uple_filter(dev, filter); 3862 return 0; 3863 } 3864 3865 int 3866 igb_delete_2tuple_filter(struct rte_eth_dev *dev, 3867 struct e1000_2tuple_filter *filter) 3868 { 3869 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3870 struct e1000_filter_info *filter_info = 3871 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3872 3873 filter_info->twotuple_mask &= ~(1 << filter->index); 3874 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); 3875 rte_free(filter); 3876 3877 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); 3878 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 3879 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 3880 return 0; 3881 } 3882 3883 /* 3884 * igb_remove_2tuple_filter - remove a 2tuple filter 3885 * 3886 * @param 3887 * dev: Pointer to struct rte_eth_dev. 3888 * ntuple_filter: ponter to the filter that will be removed. 3889 * 3890 * @return 3891 * - On success, zero. 3892 * - On failure, a negative value. 3893 */ 3894 static int 3895 igb_remove_2tuple_filter(struct rte_eth_dev *dev, 3896 struct rte_eth_ntuple_filter *ntuple_filter) 3897 { 3898 struct e1000_filter_info *filter_info = 3899 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3900 struct e1000_2tuple_filter_info filter_2tuple; 3901 struct e1000_2tuple_filter *filter; 3902 int ret; 3903 3904 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); 3905 ret = ntuple_filter_to_2tuple(ntuple_filter, 3906 &filter_2tuple); 3907 if (ret < 0) 3908 return ret; 3909 3910 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3911 &filter_2tuple); 3912 if (filter == NULL) { 3913 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3914 return -ENOENT; 3915 } 3916 3917 igb_delete_2tuple_filter(dev, filter); 3918 3919 return 0; 3920 } 3921 3922 /* inject a igb flex filter to HW */ 3923 static inline void 3924 igb_inject_flex_filter(struct rte_eth_dev *dev, 3925 struct e1000_flex_filter *filter) 3926 { 3927 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3928 uint32_t wufc, queueing; 3929 uint32_t reg_off; 3930 uint8_t i, j = 0; 3931 3932 wufc = E1000_READ_REG(hw, E1000_WUFC); 3933 if (filter->index < E1000_MAX_FHFT) 3934 reg_off = E1000_FHFT(filter->index); 3935 else 3936 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3937 3938 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | 3939 (E1000_WUFC_FLX0 << filter->index)); 3940 queueing = filter->filter_info.len | 3941 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | 3942 (filter->filter_info.priority << 3943 E1000_FHFT_QUEUEING_PRIO_SHIFT); 3944 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, 3945 queueing); 3946 3947 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { 3948 E1000_WRITE_REG(hw, reg_off, 3949 filter->filter_info.dwords[j]); 3950 reg_off += sizeof(uint32_t); 3951 E1000_WRITE_REG(hw, reg_off, 3952 filter->filter_info.dwords[++j]); 3953 reg_off += sizeof(uint32_t); 3954 E1000_WRITE_REG(hw, reg_off, 3955 (uint32_t)filter->filter_info.mask[i]); 3956 reg_off += sizeof(uint32_t) * 2; 3957 ++j; 3958 } 3959 } 3960 3961 static inline struct e1000_flex_filter * 3962 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, 3963 struct e1000_flex_filter_info *key) 3964 { 3965 struct e1000_flex_filter *it; 3966 3967 TAILQ_FOREACH(it, filter_list, entries) { 3968 if (memcmp(key, &it->filter_info, 3969 sizeof(struct e1000_flex_filter_info)) == 0) 3970 return it; 3971 } 3972 3973 return NULL; 3974 } 3975 3976 /* remove a flex byte filter 3977 * @param 3978 * dev: Pointer to struct rte_eth_dev. 3979 * filter: the pointer of the filter will be removed. 3980 */ 3981 void 3982 igb_remove_flex_filter(struct rte_eth_dev *dev, 3983 struct e1000_flex_filter *filter) 3984 { 3985 struct e1000_filter_info *filter_info = 3986 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3987 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3988 uint32_t wufc, i; 3989 uint32_t reg_off; 3990 3991 wufc = E1000_READ_REG(hw, E1000_WUFC); 3992 if (filter->index < E1000_MAX_FHFT) 3993 reg_off = E1000_FHFT(filter->index); 3994 else 3995 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT); 3996 3997 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) 3998 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); 3999 4000 E1000_WRITE_REG(hw, E1000_WUFC, wufc & 4001 (~(E1000_WUFC_FLX0 << filter->index))); 4002 4003 filter_info->flex_mask &= ~(1 << filter->index); 4004 TAILQ_REMOVE(&filter_info->flex_list, filter, entries); 4005 rte_free(filter); 4006 } 4007 4008 int 4009 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, 4010 struct rte_eth_flex_filter *filter, 4011 bool add) 4012 { 4013 struct e1000_filter_info *filter_info = 4014 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4015 struct e1000_flex_filter *flex_filter, *it; 4016 uint32_t mask; 4017 uint8_t shift, i; 4018 4019 flex_filter = rte_zmalloc("e1000_flex_filter", 4020 sizeof(struct e1000_flex_filter), 0); 4021 if (flex_filter == NULL) 4022 return -ENOMEM; 4023 4024 flex_filter->filter_info.len = filter->len; 4025 flex_filter->filter_info.priority = filter->priority; 4026 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); 4027 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { 4028 mask = 0; 4029 /* reverse bits in flex filter's mask*/ 4030 for (shift = 0; shift < CHAR_BIT; shift++) { 4031 if (filter->mask[i] & (0x01 << shift)) 4032 mask |= (0x80 >> shift); 4033 } 4034 flex_filter->filter_info.mask[i] = mask; 4035 } 4036 4037 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4038 &flex_filter->filter_info); 4039 if (it == NULL && !add) { 4040 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4041 rte_free(flex_filter); 4042 return -ENOENT; 4043 } 4044 if (it != NULL && add) { 4045 PMD_DRV_LOG(ERR, "filter exists."); 4046 rte_free(flex_filter); 4047 return -EEXIST; 4048 } 4049 4050 if (add) { 4051 flex_filter->queue = filter->queue; 4052 /* 4053 * look for an unused flex filter index 4054 * and insert the filter into the list. 4055 */ 4056 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { 4057 if (!(filter_info->flex_mask & (1 << i))) { 4058 filter_info->flex_mask |= 1 << i; 4059 flex_filter->index = i; 4060 TAILQ_INSERT_TAIL(&filter_info->flex_list, 4061 flex_filter, 4062 entries); 4063 break; 4064 } 4065 } 4066 if (i >= E1000_MAX_FLEX_FILTERS) { 4067 PMD_DRV_LOG(ERR, "flex filters are full."); 4068 rte_free(flex_filter); 4069 return -ENOSYS; 4070 } 4071 4072 igb_inject_flex_filter(dev, flex_filter); 4073 4074 } else { 4075 igb_remove_flex_filter(dev, it); 4076 rte_free(flex_filter); 4077 } 4078 4079 return 0; 4080 } 4081 4082 static int 4083 eth_igb_get_flex_filter(struct rte_eth_dev *dev, 4084 struct rte_eth_flex_filter *filter) 4085 { 4086 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4087 struct e1000_filter_info *filter_info = 4088 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4089 struct e1000_flex_filter flex_filter, *it; 4090 uint32_t wufc, queueing, wufc_en = 0; 4091 4092 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); 4093 flex_filter.filter_info.len = filter->len; 4094 flex_filter.filter_info.priority = filter->priority; 4095 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); 4096 memcpy(flex_filter.filter_info.mask, filter->mask, 4097 RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT); 4098 4099 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 4100 &flex_filter.filter_info); 4101 if (it == NULL) { 4102 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4103 return -ENOENT; 4104 } 4105 4106 wufc = E1000_READ_REG(hw, E1000_WUFC); 4107 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); 4108 4109 if ((wufc & wufc_en) == wufc_en) { 4110 uint32_t reg_off = 0; 4111 if (it->index < E1000_MAX_FHFT) 4112 reg_off = E1000_FHFT(it->index); 4113 else 4114 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); 4115 4116 queueing = E1000_READ_REG(hw, 4117 reg_off + E1000_FHFT_QUEUEING_OFFSET); 4118 filter->len = queueing & E1000_FHFT_QUEUEING_LEN; 4119 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> 4120 E1000_FHFT_QUEUEING_PRIO_SHIFT; 4121 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> 4122 E1000_FHFT_QUEUEING_QUEUE_SHIFT; 4123 return 0; 4124 } 4125 return -ENOENT; 4126 } 4127 4128 static int 4129 eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 4130 enum rte_filter_op filter_op, 4131 void *arg) 4132 { 4133 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4134 struct rte_eth_flex_filter *filter; 4135 int ret = 0; 4136 4137 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 4138 4139 if (filter_op == RTE_ETH_FILTER_NOP) 4140 return ret; 4141 4142 if (arg == NULL) { 4143 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 4144 filter_op); 4145 return -EINVAL; 4146 } 4147 4148 filter = (struct rte_eth_flex_filter *)arg; 4149 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN 4150 || filter->len % sizeof(uint64_t) != 0) { 4151 PMD_DRV_LOG(ERR, "filter's length is out of range"); 4152 return -EINVAL; 4153 } 4154 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { 4155 PMD_DRV_LOG(ERR, "filter's priority is out of range"); 4156 return -EINVAL; 4157 } 4158 4159 switch (filter_op) { 4160 case RTE_ETH_FILTER_ADD: 4161 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); 4162 break; 4163 case RTE_ETH_FILTER_DELETE: 4164 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); 4165 break; 4166 case RTE_ETH_FILTER_GET: 4167 ret = eth_igb_get_flex_filter(dev, filter); 4168 break; 4169 default: 4170 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 4171 ret = -EINVAL; 4172 break; 4173 } 4174 4175 return ret; 4176 } 4177 4178 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ 4179 static inline int 4180 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, 4181 struct e1000_5tuple_filter_info *filter_info) 4182 { 4183 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) 4184 return -EINVAL; 4185 if (filter->priority > E1000_2TUPLE_MAX_PRI) 4186 return -EINVAL; /* filter index is out of range. */ 4187 if (filter->tcp_flags > TCP_FLAG_ALL) 4188 return -EINVAL; /* flags is invalid. */ 4189 4190 switch (filter->dst_ip_mask) { 4191 case UINT32_MAX: 4192 filter_info->dst_ip_mask = 0; 4193 filter_info->dst_ip = filter->dst_ip; 4194 break; 4195 case 0: 4196 filter_info->dst_ip_mask = 1; 4197 break; 4198 default: 4199 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 4200 return -EINVAL; 4201 } 4202 4203 switch (filter->src_ip_mask) { 4204 case UINT32_MAX: 4205 filter_info->src_ip_mask = 0; 4206 filter_info->src_ip = filter->src_ip; 4207 break; 4208 case 0: 4209 filter_info->src_ip_mask = 1; 4210 break; 4211 default: 4212 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 4213 return -EINVAL; 4214 } 4215 4216 switch (filter->dst_port_mask) { 4217 case UINT16_MAX: 4218 filter_info->dst_port_mask = 0; 4219 filter_info->dst_port = filter->dst_port; 4220 break; 4221 case 0: 4222 filter_info->dst_port_mask = 1; 4223 break; 4224 default: 4225 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 4226 return -EINVAL; 4227 } 4228 4229 switch (filter->src_port_mask) { 4230 case UINT16_MAX: 4231 filter_info->src_port_mask = 0; 4232 filter_info->src_port = filter->src_port; 4233 break; 4234 case 0: 4235 filter_info->src_port_mask = 1; 4236 break; 4237 default: 4238 PMD_DRV_LOG(ERR, "invalid src_port mask."); 4239 return -EINVAL; 4240 } 4241 4242 switch (filter->proto_mask) { 4243 case UINT8_MAX: 4244 filter_info->proto_mask = 0; 4245 filter_info->proto = filter->proto; 4246 break; 4247 case 0: 4248 filter_info->proto_mask = 1; 4249 break; 4250 default: 4251 PMD_DRV_LOG(ERR, "invalid protocol mask."); 4252 return -EINVAL; 4253 } 4254 4255 filter_info->priority = (uint8_t)filter->priority; 4256 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 4257 filter_info->tcp_flags = filter->tcp_flags; 4258 else 4259 filter_info->tcp_flags = 0; 4260 4261 return 0; 4262 } 4263 4264 static inline struct e1000_5tuple_filter * 4265 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, 4266 struct e1000_5tuple_filter_info *key) 4267 { 4268 struct e1000_5tuple_filter *it; 4269 4270 TAILQ_FOREACH(it, filter_list, entries) { 4271 if (memcmp(key, &it->filter_info, 4272 sizeof(struct e1000_5tuple_filter_info)) == 0) { 4273 return it; 4274 } 4275 } 4276 return NULL; 4277 } 4278 4279 /* inject a igb 5-tuple filter to HW */ 4280 static inline void 4281 igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, 4282 struct e1000_5tuple_filter *filter) 4283 { 4284 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4285 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; 4286 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; 4287 uint8_t i; 4288 4289 i = filter->index; 4290 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; 4291 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ 4292 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; 4293 if (filter->filter_info.dst_ip_mask == 0) 4294 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; 4295 if (filter->filter_info.src_port_mask == 0) 4296 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; 4297 if (filter->filter_info.proto_mask == 0) 4298 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; 4299 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & 4300 E1000_FTQF_QUEUE_MASK; 4301 ftqf |= E1000_FTQF_QUEUE_ENABLE; 4302 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); 4303 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); 4304 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); 4305 4306 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; 4307 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); 4308 4309 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 4310 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 4311 imir |= E1000_IMIR_PORT_BP; 4312 else 4313 imir &= ~E1000_IMIR_PORT_BP; 4314 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 4315 4316 /* tcp flags bits setting. */ 4317 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { 4318 if (filter->filter_info.tcp_flags & TCP_URG_FLAG) 4319 imir_ext |= E1000_IMIREXT_CTRL_URG; 4320 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) 4321 imir_ext |= E1000_IMIREXT_CTRL_ACK; 4322 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) 4323 imir_ext |= E1000_IMIREXT_CTRL_PSH; 4324 if (filter->filter_info.tcp_flags & TCP_RST_FLAG) 4325 imir_ext |= E1000_IMIREXT_CTRL_RST; 4326 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) 4327 imir_ext |= E1000_IMIREXT_CTRL_SYN; 4328 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) 4329 imir_ext |= E1000_IMIREXT_CTRL_FIN; 4330 } else { 4331 imir_ext |= E1000_IMIREXT_CTRL_BP; 4332 } 4333 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 4334 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 4335 } 4336 4337 /* 4338 * igb_add_5tuple_filter_82576 - add a 5tuple filter 4339 * 4340 * @param 4341 * dev: Pointer to struct rte_eth_dev. 4342 * ntuple_filter: ponter to the filter that will be added. 4343 * 4344 * @return 4345 * - On success, zero. 4346 * - On failure, a negative value. 4347 */ 4348 static int 4349 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 4350 struct rte_eth_ntuple_filter *ntuple_filter) 4351 { 4352 struct e1000_filter_info *filter_info = 4353 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4354 struct e1000_5tuple_filter *filter; 4355 uint8_t i; 4356 int ret; 4357 4358 filter = rte_zmalloc("e1000_5tuple_filter", 4359 sizeof(struct e1000_5tuple_filter), 0); 4360 if (filter == NULL) 4361 return -ENOMEM; 4362 4363 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4364 &filter->filter_info); 4365 if (ret < 0) { 4366 rte_free(filter); 4367 return ret; 4368 } 4369 4370 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4371 &filter->filter_info) != NULL) { 4372 PMD_DRV_LOG(ERR, "filter exists."); 4373 rte_free(filter); 4374 return -EEXIST; 4375 } 4376 filter->queue = ntuple_filter->queue; 4377 4378 /* 4379 * look for an unused 5tuple filter index, 4380 * and insert the filter to list. 4381 */ 4382 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { 4383 if (!(filter_info->fivetuple_mask & (1 << i))) { 4384 filter_info->fivetuple_mask |= 1 << i; 4385 filter->index = i; 4386 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 4387 filter, 4388 entries); 4389 break; 4390 } 4391 } 4392 if (i >= E1000_MAX_FTQF_FILTERS) { 4393 PMD_DRV_LOG(ERR, "5tuple filters are full."); 4394 rte_free(filter); 4395 return -ENOSYS; 4396 } 4397 4398 igb_inject_5tuple_filter_82576(dev, filter); 4399 return 0; 4400 } 4401 4402 int 4403 igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, 4404 struct e1000_5tuple_filter *filter) 4405 { 4406 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4407 struct e1000_filter_info *filter_info = 4408 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4409 4410 filter_info->fivetuple_mask &= ~(1 << filter->index); 4411 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 4412 rte_free(filter); 4413 4414 E1000_WRITE_REG(hw, E1000_FTQF(filter->index), 4415 E1000_FTQF_VF_BP | E1000_FTQF_MASK); 4416 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); 4417 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); 4418 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); 4419 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 4420 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 4421 return 0; 4422 } 4423 4424 /* 4425 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter 4426 * 4427 * @param 4428 * dev: Pointer to struct rte_eth_dev. 4429 * ntuple_filter: ponter to the filter that will be removed. 4430 * 4431 * @return 4432 * - On success, zero. 4433 * - On failure, a negative value. 4434 */ 4435 static int 4436 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 4437 struct rte_eth_ntuple_filter *ntuple_filter) 4438 { 4439 struct e1000_filter_info *filter_info = 4440 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4441 struct e1000_5tuple_filter_info filter_5tuple; 4442 struct e1000_5tuple_filter *filter; 4443 int ret; 4444 4445 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); 4446 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4447 &filter_5tuple); 4448 if (ret < 0) 4449 return ret; 4450 4451 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4452 &filter_5tuple); 4453 if (filter == NULL) { 4454 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4455 return -ENOENT; 4456 } 4457 4458 igb_delete_5tuple_filter_82576(dev, filter); 4459 4460 return 0; 4461 } 4462 4463 static int 4464 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4465 { 4466 uint32_t rctl; 4467 struct e1000_hw *hw; 4468 struct rte_eth_dev_info dev_info; 4469 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4470 VLAN_TAG_SIZE); 4471 4472 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4473 4474 #ifdef RTE_LIBRTE_82571_SUPPORT 4475 /* XXX: not bigger than max_rx_pktlen */ 4476 if (hw->mac.type == e1000_82571) 4477 return -ENOTSUP; 4478 #endif 4479 eth_igb_infos_get(dev, &dev_info); 4480 4481 /* check that mtu is within the allowed range */ 4482 if ((mtu < ETHER_MIN_MTU) || 4483 (frame_size > dev_info.max_rx_pktlen)) 4484 return -EINVAL; 4485 4486 /* refuse mtu that requires the support of scattered packets when this 4487 * feature has not been enabled before. */ 4488 if (!dev->data->scattered_rx && 4489 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 4490 return -EINVAL; 4491 4492 rctl = E1000_READ_REG(hw, E1000_RCTL); 4493 4494 /* switch to jumbo mode if needed */ 4495 if (frame_size > ETHER_MAX_LEN) { 4496 dev->data->dev_conf.rxmode.offloads |= 4497 DEV_RX_OFFLOAD_JUMBO_FRAME; 4498 rctl |= E1000_RCTL_LPE; 4499 } else { 4500 dev->data->dev_conf.rxmode.offloads &= 4501 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 4502 rctl &= ~E1000_RCTL_LPE; 4503 } 4504 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4505 4506 /* update max frame size */ 4507 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4508 4509 E1000_WRITE_REG(hw, E1000_RLPML, 4510 dev->data->dev_conf.rxmode.max_rx_pkt_len); 4511 4512 return 0; 4513 } 4514 4515 /* 4516 * igb_add_del_ntuple_filter - add or delete a ntuple filter 4517 * 4518 * @param 4519 * dev: Pointer to struct rte_eth_dev. 4520 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4521 * add: if true, add filter, if false, remove filter 4522 * 4523 * @return 4524 * - On success, zero. 4525 * - On failure, a negative value. 4526 */ 4527 int 4528 igb_add_del_ntuple_filter(struct rte_eth_dev *dev, 4529 struct rte_eth_ntuple_filter *ntuple_filter, 4530 bool add) 4531 { 4532 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4533 int ret; 4534 4535 switch (ntuple_filter->flags) { 4536 case RTE_5TUPLE_FLAGS: 4537 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4538 if (hw->mac.type != e1000_82576) 4539 return -ENOTSUP; 4540 if (add) 4541 ret = igb_add_5tuple_filter_82576(dev, 4542 ntuple_filter); 4543 else 4544 ret = igb_remove_5tuple_filter_82576(dev, 4545 ntuple_filter); 4546 break; 4547 case RTE_2TUPLE_FLAGS: 4548 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4549 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && 4550 hw->mac.type != e1000_i210 && 4551 hw->mac.type != e1000_i211) 4552 return -ENOTSUP; 4553 if (add) 4554 ret = igb_add_2tuple_filter(dev, ntuple_filter); 4555 else 4556 ret = igb_remove_2tuple_filter(dev, ntuple_filter); 4557 break; 4558 default: 4559 ret = -EINVAL; 4560 break; 4561 } 4562 4563 return ret; 4564 } 4565 4566 /* 4567 * igb_get_ntuple_filter - get a ntuple filter 4568 * 4569 * @param 4570 * dev: Pointer to struct rte_eth_dev. 4571 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4572 * 4573 * @return 4574 * - On success, zero. 4575 * - On failure, a negative value. 4576 */ 4577 static int 4578 igb_get_ntuple_filter(struct rte_eth_dev *dev, 4579 struct rte_eth_ntuple_filter *ntuple_filter) 4580 { 4581 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4582 struct e1000_filter_info *filter_info = 4583 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4584 struct e1000_5tuple_filter_info filter_5tuple; 4585 struct e1000_2tuple_filter_info filter_2tuple; 4586 struct e1000_5tuple_filter *p_5tuple_filter; 4587 struct e1000_2tuple_filter *p_2tuple_filter; 4588 int ret; 4589 4590 switch (ntuple_filter->flags) { 4591 case RTE_5TUPLE_FLAGS: 4592 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4593 if (hw->mac.type != e1000_82576) 4594 return -ENOTSUP; 4595 memset(&filter_5tuple, 4596 0, 4597 sizeof(struct e1000_5tuple_filter_info)); 4598 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4599 &filter_5tuple); 4600 if (ret < 0) 4601 return ret; 4602 p_5tuple_filter = igb_5tuple_filter_lookup_82576( 4603 &filter_info->fivetuple_list, 4604 &filter_5tuple); 4605 if (p_5tuple_filter == NULL) { 4606 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4607 return -ENOENT; 4608 } 4609 ntuple_filter->queue = p_5tuple_filter->queue; 4610 break; 4611 case RTE_2TUPLE_FLAGS: 4612 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4613 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) 4614 return -ENOTSUP; 4615 memset(&filter_2tuple, 4616 0, 4617 sizeof(struct e1000_2tuple_filter_info)); 4618 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); 4619 if (ret < 0) 4620 return ret; 4621 p_2tuple_filter = igb_2tuple_filter_lookup( 4622 &filter_info->twotuple_list, 4623 &filter_2tuple); 4624 if (p_2tuple_filter == NULL) { 4625 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4626 return -ENOENT; 4627 } 4628 ntuple_filter->queue = p_2tuple_filter->queue; 4629 break; 4630 default: 4631 ret = -EINVAL; 4632 break; 4633 } 4634 4635 return 0; 4636 } 4637 4638 /* 4639 * igb_ntuple_filter_handle - Handle operations for ntuple filter. 4640 * @dev: pointer to rte_eth_dev structure 4641 * @filter_op:operation will be taken. 4642 * @arg: a pointer to specific structure corresponding to the filter_op 4643 */ 4644 static int 4645 igb_ntuple_filter_handle(struct rte_eth_dev *dev, 4646 enum rte_filter_op filter_op, 4647 void *arg) 4648 { 4649 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4650 int ret; 4651 4652 MAC_TYPE_FILTER_SUP(hw->mac.type); 4653 4654 if (filter_op == RTE_ETH_FILTER_NOP) 4655 return 0; 4656 4657 if (arg == NULL) { 4658 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4659 filter_op); 4660 return -EINVAL; 4661 } 4662 4663 switch (filter_op) { 4664 case RTE_ETH_FILTER_ADD: 4665 ret = igb_add_del_ntuple_filter(dev, 4666 (struct rte_eth_ntuple_filter *)arg, 4667 TRUE); 4668 break; 4669 case RTE_ETH_FILTER_DELETE: 4670 ret = igb_add_del_ntuple_filter(dev, 4671 (struct rte_eth_ntuple_filter *)arg, 4672 FALSE); 4673 break; 4674 case RTE_ETH_FILTER_GET: 4675 ret = igb_get_ntuple_filter(dev, 4676 (struct rte_eth_ntuple_filter *)arg); 4677 break; 4678 default: 4679 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4680 ret = -EINVAL; 4681 break; 4682 } 4683 return ret; 4684 } 4685 4686 static inline int 4687 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, 4688 uint16_t ethertype) 4689 { 4690 int i; 4691 4692 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4693 if (filter_info->ethertype_filters[i].ethertype == ethertype && 4694 (filter_info->ethertype_mask & (1 << i))) 4695 return i; 4696 } 4697 return -1; 4698 } 4699 4700 static inline int 4701 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, 4702 uint16_t ethertype, uint32_t etqf) 4703 { 4704 int i; 4705 4706 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4707 if (!(filter_info->ethertype_mask & (1 << i))) { 4708 filter_info->ethertype_mask |= 1 << i; 4709 filter_info->ethertype_filters[i].ethertype = ethertype; 4710 filter_info->ethertype_filters[i].etqf = etqf; 4711 return i; 4712 } 4713 } 4714 return -1; 4715 } 4716 4717 int 4718 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, 4719 uint8_t idx) 4720 { 4721 if (idx >= E1000_MAX_ETQF_FILTERS) 4722 return -1; 4723 filter_info->ethertype_mask &= ~(1 << idx); 4724 filter_info->ethertype_filters[idx].ethertype = 0; 4725 filter_info->ethertype_filters[idx].etqf = 0; 4726 return idx; 4727 } 4728 4729 4730 int 4731 igb_add_del_ethertype_filter(struct rte_eth_dev *dev, 4732 struct rte_eth_ethertype_filter *filter, 4733 bool add) 4734 { 4735 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4736 struct e1000_filter_info *filter_info = 4737 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4738 uint32_t etqf = 0; 4739 int ret; 4740 4741 if (filter->ether_type == ETHER_TYPE_IPv4 || 4742 filter->ether_type == ETHER_TYPE_IPv6) { 4743 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4744 " ethertype filter.", filter->ether_type); 4745 return -EINVAL; 4746 } 4747 4748 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4749 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4750 return -EINVAL; 4751 } 4752 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4753 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4754 return -EINVAL; 4755 } 4756 4757 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4758 if (ret >= 0 && add) { 4759 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4760 filter->ether_type); 4761 return -EEXIST; 4762 } 4763 if (ret < 0 && !add) { 4764 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4765 filter->ether_type); 4766 return -ENOENT; 4767 } 4768 4769 if (add) { 4770 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; 4771 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); 4772 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; 4773 ret = igb_ethertype_filter_insert(filter_info, 4774 filter->ether_type, etqf); 4775 if (ret < 0) { 4776 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4777 return -ENOSYS; 4778 } 4779 } else { 4780 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); 4781 if (ret < 0) 4782 return -ENOSYS; 4783 } 4784 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); 4785 E1000_WRITE_FLUSH(hw); 4786 4787 return 0; 4788 } 4789 4790 static int 4791 igb_get_ethertype_filter(struct rte_eth_dev *dev, 4792 struct rte_eth_ethertype_filter *filter) 4793 { 4794 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4795 struct e1000_filter_info *filter_info = 4796 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4797 uint32_t etqf; 4798 int ret; 4799 4800 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4801 if (ret < 0) { 4802 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4803 filter->ether_type); 4804 return -ENOENT; 4805 } 4806 4807 etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); 4808 if (etqf & E1000_ETQF_FILTER_ENABLE) { 4809 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; 4810 filter->flags = 0; 4811 filter->queue = (etqf & E1000_ETQF_QUEUE) >> 4812 E1000_ETQF_QUEUE_SHIFT; 4813 return 0; 4814 } 4815 4816 return -ENOENT; 4817 } 4818 4819 /* 4820 * igb_ethertype_filter_handle - Handle operations for ethertype filter. 4821 * @dev: pointer to rte_eth_dev structure 4822 * @filter_op:operation will be taken. 4823 * @arg: a pointer to specific structure corresponding to the filter_op 4824 */ 4825 static int 4826 igb_ethertype_filter_handle(struct rte_eth_dev *dev, 4827 enum rte_filter_op filter_op, 4828 void *arg) 4829 { 4830 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4831 int ret; 4832 4833 MAC_TYPE_FILTER_SUP(hw->mac.type); 4834 4835 if (filter_op == RTE_ETH_FILTER_NOP) 4836 return 0; 4837 4838 if (arg == NULL) { 4839 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4840 filter_op); 4841 return -EINVAL; 4842 } 4843 4844 switch (filter_op) { 4845 case RTE_ETH_FILTER_ADD: 4846 ret = igb_add_del_ethertype_filter(dev, 4847 (struct rte_eth_ethertype_filter *)arg, 4848 TRUE); 4849 break; 4850 case RTE_ETH_FILTER_DELETE: 4851 ret = igb_add_del_ethertype_filter(dev, 4852 (struct rte_eth_ethertype_filter *)arg, 4853 FALSE); 4854 break; 4855 case RTE_ETH_FILTER_GET: 4856 ret = igb_get_ethertype_filter(dev, 4857 (struct rte_eth_ethertype_filter *)arg); 4858 break; 4859 default: 4860 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4861 ret = -EINVAL; 4862 break; 4863 } 4864 return ret; 4865 } 4866 4867 static int 4868 eth_igb_filter_ctrl(struct rte_eth_dev *dev, 4869 enum rte_filter_type filter_type, 4870 enum rte_filter_op filter_op, 4871 void *arg) 4872 { 4873 int ret = 0; 4874 4875 switch (filter_type) { 4876 case RTE_ETH_FILTER_NTUPLE: 4877 ret = igb_ntuple_filter_handle(dev, filter_op, arg); 4878 break; 4879 case RTE_ETH_FILTER_ETHERTYPE: 4880 ret = igb_ethertype_filter_handle(dev, filter_op, arg); 4881 break; 4882 case RTE_ETH_FILTER_SYN: 4883 ret = eth_igb_syn_filter_handle(dev, filter_op, arg); 4884 break; 4885 case RTE_ETH_FILTER_FLEXIBLE: 4886 ret = eth_igb_flex_filter_handle(dev, filter_op, arg); 4887 break; 4888 case RTE_ETH_FILTER_GENERIC: 4889 if (filter_op != RTE_ETH_FILTER_GET) 4890 return -EINVAL; 4891 *(const void **)arg = &igb_flow_ops; 4892 break; 4893 default: 4894 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 4895 filter_type); 4896 break; 4897 } 4898 4899 return ret; 4900 } 4901 4902 static int 4903 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 4904 struct ether_addr *mc_addr_set, 4905 uint32_t nb_mc_addr) 4906 { 4907 struct e1000_hw *hw; 4908 4909 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4910 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 4911 return 0; 4912 } 4913 4914 static uint64_t 4915 igb_read_systime_cyclecounter(struct rte_eth_dev *dev) 4916 { 4917 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4918 uint64_t systime_cycles; 4919 4920 switch (hw->mac.type) { 4921 case e1000_i210: 4922 case e1000_i211: 4923 /* 4924 * Need to read System Time Residue Register to be able 4925 * to read the other two registers. 4926 */ 4927 E1000_READ_REG(hw, E1000_SYSTIMR); 4928 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 4929 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4930 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4931 * NSEC_PER_SEC; 4932 break; 4933 case e1000_82580: 4934 case e1000_i350: 4935 case e1000_i354: 4936 /* 4937 * Need to read System Time Residue Register to be able 4938 * to read the other two registers. 4939 */ 4940 E1000_READ_REG(hw, E1000_SYSTIMR); 4941 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4942 /* Only the 8 LSB are valid. */ 4943 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) 4944 & 0xff) << 32; 4945 break; 4946 default: 4947 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4948 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4949 << 32; 4950 break; 4951 } 4952 4953 return systime_cycles; 4954 } 4955 4956 static uint64_t 4957 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4958 { 4959 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4960 uint64_t rx_tstamp_cycles; 4961 4962 switch (hw->mac.type) { 4963 case e1000_i210: 4964 case e1000_i211: 4965 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4966 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4967 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4968 * NSEC_PER_SEC; 4969 break; 4970 case e1000_82580: 4971 case e1000_i350: 4972 case e1000_i354: 4973 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4974 /* Only the 8 LSB are valid. */ 4975 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) 4976 & 0xff) << 32; 4977 break; 4978 default: 4979 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4980 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4981 << 32; 4982 break; 4983 } 4984 4985 return rx_tstamp_cycles; 4986 } 4987 4988 static uint64_t 4989 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4990 { 4991 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4992 uint64_t tx_tstamp_cycles; 4993 4994 switch (hw->mac.type) { 4995 case e1000_i210: 4996 case e1000_i211: 4997 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4998 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4999 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 5000 * NSEC_PER_SEC; 5001 break; 5002 case e1000_82580: 5003 case e1000_i350: 5004 case e1000_i354: 5005 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 5006 /* Only the 8 LSB are valid. */ 5007 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) 5008 & 0xff) << 32; 5009 break; 5010 default: 5011 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 5012 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 5013 << 32; 5014 break; 5015 } 5016 5017 return tx_tstamp_cycles; 5018 } 5019 5020 static void 5021 igb_start_timecounters(struct rte_eth_dev *dev) 5022 { 5023 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5024 struct e1000_adapter *adapter = dev->data->dev_private; 5025 uint32_t incval = 1; 5026 uint32_t shift = 0; 5027 uint64_t mask = E1000_CYCLECOUNTER_MASK; 5028 5029 switch (hw->mac.type) { 5030 case e1000_82580: 5031 case e1000_i350: 5032 case e1000_i354: 5033 /* 32 LSB bits + 8 MSB bits = 40 bits */ 5034 mask = (1ULL << 40) - 1; 5035 /* fall-through */ 5036 case e1000_i210: 5037 case e1000_i211: 5038 /* 5039 * Start incrementing the register 5040 * used to timestamp PTP packets. 5041 */ 5042 E1000_WRITE_REG(hw, E1000_TIMINCA, incval); 5043 break; 5044 case e1000_82576: 5045 incval = E1000_INCVALUE_82576; 5046 shift = IGB_82576_TSYNC_SHIFT; 5047 E1000_WRITE_REG(hw, E1000_TIMINCA, 5048 E1000_INCPERIOD_82576 | incval); 5049 break; 5050 default: 5051 /* Not supported */ 5052 return; 5053 } 5054 5055 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 5056 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5057 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 5058 5059 adapter->systime_tc.cc_mask = mask; 5060 adapter->systime_tc.cc_shift = shift; 5061 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 5062 5063 adapter->rx_tstamp_tc.cc_mask = mask; 5064 adapter->rx_tstamp_tc.cc_shift = shift; 5065 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 5066 5067 adapter->tx_tstamp_tc.cc_mask = mask; 5068 adapter->tx_tstamp_tc.cc_shift = shift; 5069 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 5070 } 5071 5072 static int 5073 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 5074 { 5075 struct e1000_adapter *adapter = dev->data->dev_private; 5076 5077 adapter->systime_tc.nsec += delta; 5078 adapter->rx_tstamp_tc.nsec += delta; 5079 adapter->tx_tstamp_tc.nsec += delta; 5080 5081 return 0; 5082 } 5083 5084 static int 5085 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 5086 { 5087 uint64_t ns; 5088 struct e1000_adapter *adapter = dev->data->dev_private; 5089 5090 ns = rte_timespec_to_ns(ts); 5091 5092 /* Set the timecounters to a new value. */ 5093 adapter->systime_tc.nsec = ns; 5094 adapter->rx_tstamp_tc.nsec = ns; 5095 adapter->tx_tstamp_tc.nsec = ns; 5096 5097 return 0; 5098 } 5099 5100 static int 5101 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 5102 { 5103 uint64_t ns, systime_cycles; 5104 struct e1000_adapter *adapter = dev->data->dev_private; 5105 5106 systime_cycles = igb_read_systime_cyclecounter(dev); 5107 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 5108 *ts = rte_ns_to_timespec(ns); 5109 5110 return 0; 5111 } 5112 5113 static int 5114 igb_timesync_enable(struct rte_eth_dev *dev) 5115 { 5116 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5117 uint32_t tsync_ctl; 5118 uint32_t tsauxc; 5119 5120 /* Stop the timesync system time. */ 5121 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); 5122 /* Reset the timesync system time value. */ 5123 switch (hw->mac.type) { 5124 case e1000_82580: 5125 case e1000_i350: 5126 case e1000_i354: 5127 case e1000_i210: 5128 case e1000_i211: 5129 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); 5130 /* fall-through */ 5131 case e1000_82576: 5132 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); 5133 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); 5134 break; 5135 default: 5136 /* Not supported. */ 5137 return -ENOTSUP; 5138 } 5139 5140 /* Enable system time for it isn't on by default. */ 5141 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); 5142 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; 5143 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); 5144 5145 igb_start_timecounters(dev); 5146 5147 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 5148 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 5149 (ETHER_TYPE_1588 | 5150 E1000_ETQF_FILTER_ENABLE | 5151 E1000_ETQF_1588)); 5152 5153 /* Enable timestamping of received PTP packets. */ 5154 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5155 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; 5156 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 5157 5158 /* Enable Timestamping of transmitted PTP packets. */ 5159 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5160 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; 5161 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 5162 5163 return 0; 5164 } 5165 5166 static int 5167 igb_timesync_disable(struct rte_eth_dev *dev) 5168 { 5169 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5170 uint32_t tsync_ctl; 5171 5172 /* Disable timestamping of transmitted PTP packets. */ 5173 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5174 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; 5175 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 5176 5177 /* Disable timestamping of received PTP packets. */ 5178 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5179 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; 5180 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 5181 5182 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 5183 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); 5184 5185 /* Stop incrementating the System Time registers. */ 5186 E1000_WRITE_REG(hw, E1000_TIMINCA, 0); 5187 5188 return 0; 5189 } 5190 5191 static int 5192 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 5193 struct timespec *timestamp, 5194 uint32_t flags __rte_unused) 5195 { 5196 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5197 struct e1000_adapter *adapter = dev->data->dev_private; 5198 uint32_t tsync_rxctl; 5199 uint64_t rx_tstamp_cycles; 5200 uint64_t ns; 5201 5202 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 5203 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) 5204 return -EINVAL; 5205 5206 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); 5207 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 5208 *timestamp = rte_ns_to_timespec(ns); 5209 5210 return 0; 5211 } 5212 5213 static int 5214 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 5215 struct timespec *timestamp) 5216 { 5217 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5218 struct e1000_adapter *adapter = dev->data->dev_private; 5219 uint32_t tsync_txctl; 5220 uint64_t tx_tstamp_cycles; 5221 uint64_t ns; 5222 5223 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 5224 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) 5225 return -EINVAL; 5226 5227 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); 5228 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 5229 *timestamp = rte_ns_to_timespec(ns); 5230 5231 return 0; 5232 } 5233 5234 static int 5235 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) 5236 { 5237 int count = 0; 5238 int g_ind = 0; 5239 const struct reg_info *reg_group; 5240 5241 while ((reg_group = igb_regs[g_ind++])) 5242 count += igb_reg_group_count(reg_group); 5243 5244 return count; 5245 } 5246 5247 static int 5248 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 5249 { 5250 int count = 0; 5251 int g_ind = 0; 5252 const struct reg_info *reg_group; 5253 5254 while ((reg_group = igbvf_regs[g_ind++])) 5255 count += igb_reg_group_count(reg_group); 5256 5257 return count; 5258 } 5259 5260 static int 5261 eth_igb_get_regs(struct rte_eth_dev *dev, 5262 struct rte_dev_reg_info *regs) 5263 { 5264 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5265 uint32_t *data = regs->data; 5266 int g_ind = 0; 5267 int count = 0; 5268 const struct reg_info *reg_group; 5269 5270 if (data == NULL) { 5271 regs->length = eth_igb_get_reg_length(dev); 5272 regs->width = sizeof(uint32_t); 5273 return 0; 5274 } 5275 5276 /* Support only full register dump */ 5277 if ((regs->length == 0) || 5278 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { 5279 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 5280 hw->device_id; 5281 while ((reg_group = igb_regs[g_ind++])) 5282 count += igb_read_regs_group(dev, &data[count], 5283 reg_group); 5284 return 0; 5285 } 5286 5287 return -ENOTSUP; 5288 } 5289 5290 static int 5291 igbvf_get_regs(struct rte_eth_dev *dev, 5292 struct rte_dev_reg_info *regs) 5293 { 5294 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5295 uint32_t *data = regs->data; 5296 int g_ind = 0; 5297 int count = 0; 5298 const struct reg_info *reg_group; 5299 5300 if (data == NULL) { 5301 regs->length = igbvf_get_reg_length(dev); 5302 regs->width = sizeof(uint32_t); 5303 return 0; 5304 } 5305 5306 /* Support only full register dump */ 5307 if ((regs->length == 0) || 5308 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { 5309 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 5310 hw->device_id; 5311 while ((reg_group = igbvf_regs[g_ind++])) 5312 count += igb_read_regs_group(dev, &data[count], 5313 reg_group); 5314 return 0; 5315 } 5316 5317 return -ENOTSUP; 5318 } 5319 5320 static int 5321 eth_igb_get_eeprom_length(struct rte_eth_dev *dev) 5322 { 5323 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5324 5325 /* Return unit is byte count */ 5326 return hw->nvm.word_size * 2; 5327 } 5328 5329 static int 5330 eth_igb_get_eeprom(struct rte_eth_dev *dev, 5331 struct rte_dev_eeprom_info *in_eeprom) 5332 { 5333 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5334 struct e1000_nvm_info *nvm = &hw->nvm; 5335 uint16_t *data = in_eeprom->data; 5336 int first, length; 5337 5338 first = in_eeprom->offset >> 1; 5339 length = in_eeprom->length >> 1; 5340 if ((first >= hw->nvm.word_size) || 5341 ((first + length) >= hw->nvm.word_size)) 5342 return -EINVAL; 5343 5344 in_eeprom->magic = hw->vendor_id | 5345 ((uint32_t)hw->device_id << 16); 5346 5347 if ((nvm->ops.read) == NULL) 5348 return -ENOTSUP; 5349 5350 return nvm->ops.read(hw, first, length, data); 5351 } 5352 5353 static int 5354 eth_igb_set_eeprom(struct rte_eth_dev *dev, 5355 struct rte_dev_eeprom_info *in_eeprom) 5356 { 5357 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5358 struct e1000_nvm_info *nvm = &hw->nvm; 5359 uint16_t *data = in_eeprom->data; 5360 int first, length; 5361 5362 first = in_eeprom->offset >> 1; 5363 length = in_eeprom->length >> 1; 5364 if ((first >= hw->nvm.word_size) || 5365 ((first + length) >= hw->nvm.word_size)) 5366 return -EINVAL; 5367 5368 in_eeprom->magic = (uint32_t)hw->vendor_id | 5369 ((uint32_t)hw->device_id << 16); 5370 5371 if ((nvm->ops.write) == NULL) 5372 return -ENOTSUP; 5373 return nvm->ops.write(hw, first, length, data); 5374 } 5375 5376 static int 5377 eth_igb_get_module_info(struct rte_eth_dev *dev, 5378 struct rte_eth_dev_module_info *modinfo) 5379 { 5380 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5381 5382 uint32_t status = 0; 5383 uint16_t sff8472_rev, addr_mode; 5384 bool page_swap = false; 5385 5386 if (hw->phy.media_type == e1000_media_type_copper || 5387 hw->phy.media_type == e1000_media_type_unknown) 5388 return -EOPNOTSUPP; 5389 5390 /* Check whether we support SFF-8472 or not */ 5391 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); 5392 if (status) 5393 return -EIO; 5394 5395 /* addressing mode is not supported */ 5396 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); 5397 if (status) 5398 return -EIO; 5399 5400 /* addressing mode is not supported */ 5401 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { 5402 PMD_DRV_LOG(ERR, 5403 "Address change required to access page 0xA2, " 5404 "but not supported. Please report the module " 5405 "type to the driver maintainers.\n"); 5406 page_swap = true; 5407 } 5408 5409 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { 5410 /* We have an SFP, but it does not support SFF-8472 */ 5411 modinfo->type = RTE_ETH_MODULE_SFF_8079; 5412 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 5413 } else { 5414 /* We have an SFP which supports a revision of SFF-8472 */ 5415 modinfo->type = RTE_ETH_MODULE_SFF_8472; 5416 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 5417 } 5418 5419 return 0; 5420 } 5421 5422 static int 5423 eth_igb_get_module_eeprom(struct rte_eth_dev *dev, 5424 struct rte_dev_eeprom_info *info) 5425 { 5426 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5427 5428 uint32_t status = 0; 5429 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; 5430 u16 first_word, last_word; 5431 int i = 0; 5432 5433 if (info->length == 0) 5434 return -EINVAL; 5435 5436 first_word = info->offset >> 1; 5437 last_word = (info->offset + info->length - 1) >> 1; 5438 5439 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 5440 for (i = 0; i < last_word - first_word + 1; i++) { 5441 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, 5442 &dataword[i]); 5443 if (status) { 5444 /* Error occurred while reading module */ 5445 return -EIO; 5446 } 5447 5448 dataword[i] = rte_be_to_cpu_16(dataword[i]); 5449 } 5450 5451 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); 5452 5453 return 0; 5454 } 5455 5456 static int 5457 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5458 { 5459 struct e1000_hw *hw = 5460 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5461 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5462 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5463 uint32_t vec = E1000_MISC_VEC_ID; 5464 5465 if (rte_intr_allow_others(intr_handle)) 5466 vec = E1000_RX_VEC_START; 5467 5468 uint32_t mask = 1 << (queue_id + vec); 5469 5470 E1000_WRITE_REG(hw, E1000_EIMC, mask); 5471 E1000_WRITE_FLUSH(hw); 5472 5473 return 0; 5474 } 5475 5476 static int 5477 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5478 { 5479 struct e1000_hw *hw = 5480 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5481 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5483 uint32_t vec = E1000_MISC_VEC_ID; 5484 5485 if (rte_intr_allow_others(intr_handle)) 5486 vec = E1000_RX_VEC_START; 5487 5488 uint32_t mask = 1 << (queue_id + vec); 5489 uint32_t regval; 5490 5491 regval = E1000_READ_REG(hw, E1000_EIMS); 5492 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 5493 E1000_WRITE_FLUSH(hw); 5494 5495 rte_intr_enable(intr_handle); 5496 5497 return 0; 5498 } 5499 5500 static void 5501 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 5502 uint8_t index, uint8_t offset) 5503 { 5504 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 5505 5506 /* clear bits */ 5507 val &= ~((uint32_t)0xFF << offset); 5508 5509 /* write vector and valid bit */ 5510 val |= (msix_vector | E1000_IVAR_VALID) << offset; 5511 5512 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); 5513 } 5514 5515 static void 5516 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 5517 uint8_t queue, uint8_t msix_vector) 5518 { 5519 uint32_t tmp = 0; 5520 5521 if (hw->mac.type == e1000_82575) { 5522 if (direction == 0) 5523 tmp = E1000_EICR_RX_QUEUE0 << queue; 5524 else if (direction == 1) 5525 tmp = E1000_EICR_TX_QUEUE0 << queue; 5526 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); 5527 } else if (hw->mac.type == e1000_82576) { 5528 if ((direction == 0) || (direction == 1)) 5529 eth_igb_write_ivar(hw, msix_vector, queue & 0x7, 5530 ((queue & 0x8) << 1) + 5531 8 * direction); 5532 } else if ((hw->mac.type == e1000_82580) || 5533 (hw->mac.type == e1000_i350) || 5534 (hw->mac.type == e1000_i354) || 5535 (hw->mac.type == e1000_i210) || 5536 (hw->mac.type == e1000_i211)) { 5537 if ((direction == 0) || (direction == 1)) 5538 eth_igb_write_ivar(hw, msix_vector, 5539 queue >> 1, 5540 ((queue & 0x1) << 4) + 5541 8 * direction); 5542 } 5543 } 5544 5545 /* Sets up the hardware to generate MSI-X interrupts properly 5546 * @hw 5547 * board private structure 5548 */ 5549 static void 5550 eth_igb_configure_msix_intr(struct rte_eth_dev *dev) 5551 { 5552 int queue_id; 5553 uint32_t tmpval, regval, intr_mask; 5554 struct e1000_hw *hw = 5555 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5556 uint32_t vec = E1000_MISC_VEC_ID; 5557 uint32_t base = E1000_MISC_VEC_ID; 5558 uint32_t misc_shift = 0; 5559 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5560 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5561 5562 /* won't configure msix register if no mapping is done 5563 * between intr vector and event fd 5564 */ 5565 if (!rte_intr_dp_is_en(intr_handle)) 5566 return; 5567 5568 if (rte_intr_allow_others(intr_handle)) { 5569 vec = base = E1000_RX_VEC_START; 5570 misc_shift = 1; 5571 } 5572 5573 /* set interrupt vector for other causes */ 5574 if (hw->mac.type == e1000_82575) { 5575 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); 5576 /* enable MSI-X PBA support */ 5577 tmpval |= E1000_CTRL_EXT_PBA_CLR; 5578 5579 /* Auto-Mask interrupts upon ICR read */ 5580 tmpval |= E1000_CTRL_EXT_EIAME; 5581 tmpval |= E1000_CTRL_EXT_IRCA; 5582 5583 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); 5584 5585 /* enable msix_other interrupt */ 5586 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); 5587 regval = E1000_READ_REG(hw, E1000_EIAC); 5588 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); 5589 regval = E1000_READ_REG(hw, E1000_EIAM); 5590 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); 5591 } else if ((hw->mac.type == e1000_82576) || 5592 (hw->mac.type == e1000_82580) || 5593 (hw->mac.type == e1000_i350) || 5594 (hw->mac.type == e1000_i354) || 5595 (hw->mac.type == e1000_i210) || 5596 (hw->mac.type == e1000_i211)) { 5597 /* turn on MSI-X capability first */ 5598 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | 5599 E1000_GPIE_PBA | E1000_GPIE_EIAME | 5600 E1000_GPIE_NSICR); 5601 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5602 misc_shift; 5603 5604 if (dev->data->dev_conf.intr_conf.lsc != 0) 5605 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); 5606 5607 regval = E1000_READ_REG(hw, E1000_EIAC); 5608 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); 5609 5610 /* enable msix_other interrupt */ 5611 regval = E1000_READ_REG(hw, E1000_EIMS); 5612 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); 5613 tmpval = (IGB_MSIX_OTHER_INTR_VEC | E1000_IVAR_VALID) << 8; 5614 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); 5615 } 5616 5617 /* use EIAM to auto-mask when MSI-X interrupt 5618 * is asserted, this saves a register write for every interrupt 5619 */ 5620 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5621 misc_shift; 5622 5623 if (dev->data->dev_conf.intr_conf.lsc != 0) 5624 intr_mask |= (1 << IGB_MSIX_OTHER_INTR_VEC); 5625 5626 regval = E1000_READ_REG(hw, E1000_EIAM); 5627 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); 5628 5629 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { 5630 eth_igb_assign_msix_vector(hw, 0, queue_id, vec); 5631 intr_handle->intr_vec[queue_id] = vec; 5632 if (vec < base + intr_handle->nb_efd - 1) 5633 vec++; 5634 } 5635 5636 E1000_WRITE_FLUSH(hw); 5637 } 5638 5639 /* restore n-tuple filter */ 5640 static inline void 5641 igb_ntuple_filter_restore(struct rte_eth_dev *dev) 5642 { 5643 struct e1000_filter_info *filter_info = 5644 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5645 struct e1000_5tuple_filter *p_5tuple; 5646 struct e1000_2tuple_filter *p_2tuple; 5647 5648 TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { 5649 igb_inject_5tuple_filter_82576(dev, p_5tuple); 5650 } 5651 5652 TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { 5653 igb_inject_2uple_filter(dev, p_2tuple); 5654 } 5655 } 5656 5657 /* restore SYN filter */ 5658 static inline void 5659 igb_syn_filter_restore(struct rte_eth_dev *dev) 5660 { 5661 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5662 struct e1000_filter_info *filter_info = 5663 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5664 uint32_t synqf; 5665 5666 synqf = filter_info->syn_info; 5667 5668 if (synqf & E1000_SYN_FILTER_ENABLE) { 5669 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 5670 E1000_WRITE_FLUSH(hw); 5671 } 5672 } 5673 5674 /* restore ethernet type filter */ 5675 static inline void 5676 igb_ethertype_filter_restore(struct rte_eth_dev *dev) 5677 { 5678 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5679 struct e1000_filter_info *filter_info = 5680 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5681 int i; 5682 5683 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 5684 if (filter_info->ethertype_mask & (1 << i)) { 5685 E1000_WRITE_REG(hw, E1000_ETQF(i), 5686 filter_info->ethertype_filters[i].etqf); 5687 E1000_WRITE_FLUSH(hw); 5688 } 5689 } 5690 } 5691 5692 /* restore flex byte filter */ 5693 static inline void 5694 igb_flex_filter_restore(struct rte_eth_dev *dev) 5695 { 5696 struct e1000_filter_info *filter_info = 5697 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5698 struct e1000_flex_filter *flex_filter; 5699 5700 TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) { 5701 igb_inject_flex_filter(dev, flex_filter); 5702 } 5703 } 5704 5705 /* restore rss filter */ 5706 static inline void 5707 igb_rss_filter_restore(struct rte_eth_dev *dev) 5708 { 5709 struct e1000_filter_info *filter_info = 5710 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5711 5712 if (filter_info->rss_info.conf.queue_num) 5713 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); 5714 } 5715 5716 /* restore all types filter */ 5717 static int 5718 igb_filter_restore(struct rte_eth_dev *dev) 5719 { 5720 igb_ntuple_filter_restore(dev); 5721 igb_ethertype_filter_restore(dev); 5722 igb_syn_filter_restore(dev); 5723 igb_flex_filter_restore(dev); 5724 igb_rss_filter_restore(dev); 5725 5726 return 0; 5727 } 5728 5729 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); 5730 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); 5731 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); 5732 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); 5733 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); 5734 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); 5735 5736 /* see e1000_logs.c */ 5737 RTE_INIT(e1000_init_log) 5738 { 5739 e1000_igb_init_log(); 5740 } 5741