1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <stdarg.h> 39 40 #include <rte_common.h> 41 #include <rte_interrupts.h> 42 #include <rte_byteorder.h> 43 #include <rte_log.h> 44 #include <rte_debug.h> 45 #include <rte_pci.h> 46 #include <rte_ether.h> 47 #include <rte_ethdev.h> 48 #include <rte_memory.h> 49 #include <rte_memzone.h> 50 #include <rte_eal.h> 51 #include <rte_atomic.h> 52 #include <rte_malloc.h> 53 #include <rte_dev.h> 54 55 #include "e1000_logs.h" 56 #include "base/e1000_api.h" 57 #include "e1000_ethdev.h" 58 #include "igb_regs.h" 59 60 /* 61 * Default values for port configuration 62 */ 63 #define IGB_DEFAULT_RX_FREE_THRESH 32 64 65 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) 66 #define IGB_DEFAULT_RX_HTHRESH 8 67 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4) 68 69 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) 70 #define IGB_DEFAULT_TX_HTHRESH 1 71 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) 72 73 #define IGB_HKEY_MAX_INDEX 10 74 75 /* Bit shift and mask */ 76 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) 77 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) 78 #define IGB_8_BIT_WIDTH CHAR_BIT 79 #define IGB_8_BIT_MASK UINT8_MAX 80 81 /* Additional timesync values. */ 82 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL 83 #define E1000_ETQF_FILTER_1588 3 84 #define IGB_82576_TSYNC_SHIFT 16 85 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) 86 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 87 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 88 89 #define E1000_VTIVAR_MISC 0x01740 90 #define E1000_VTIVAR_MISC_MASK 0xFF 91 #define E1000_VTIVAR_VALID 0x80 92 #define E1000_VTIVAR_MISC_MAILBOX 0 93 #define E1000_VTIVAR_MISC_INTR_MASK 0x3 94 95 /* External VLAN Enable bit mask */ 96 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26) 97 98 /* External VLAN Ether Type bit mask and shift */ 99 #define E1000_VET_VET_EXT 0xFFFF0000 100 #define E1000_VET_VET_EXT_SHIFT 16 101 102 static int eth_igb_configure(struct rte_eth_dev *dev); 103 static int eth_igb_start(struct rte_eth_dev *dev); 104 static void eth_igb_stop(struct rte_eth_dev *dev); 105 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); 106 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); 107 static void eth_igb_close(struct rte_eth_dev *dev); 108 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); 109 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); 110 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev); 111 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev); 112 static int eth_igb_link_update(struct rte_eth_dev *dev, 113 int wait_to_complete); 114 static void eth_igb_stats_get(struct rte_eth_dev *dev, 115 struct rte_eth_stats *rte_stats); 116 static int eth_igb_xstats_get(struct rte_eth_dev *dev, 117 struct rte_eth_xstat *xstats, unsigned n); 118 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, 119 struct rte_eth_xstat_name *xstats_names, 120 unsigned limit); 121 static void eth_igb_stats_reset(struct rte_eth_dev *dev); 122 static void eth_igb_xstats_reset(struct rte_eth_dev *dev); 123 static void eth_igb_infos_get(struct rte_eth_dev *dev, 124 struct rte_eth_dev_info *dev_info); 125 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); 126 static void eth_igbvf_infos_get(struct rte_eth_dev *dev, 127 struct rte_eth_dev_info *dev_info); 128 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, 129 struct rte_eth_fc_conf *fc_conf); 130 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, 131 struct rte_eth_fc_conf *fc_conf); 132 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); 133 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); 134 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); 135 static int eth_igb_interrupt_action(struct rte_eth_dev *dev); 136 static void eth_igb_interrupt_handler(struct rte_intr_handle *handle, 137 void *param); 138 static int igb_hardware_init(struct e1000_hw *hw); 139 static void igb_hw_control_acquire(struct e1000_hw *hw); 140 static void igb_hw_control_release(struct e1000_hw *hw); 141 static void igb_init_manageability(struct e1000_hw *hw); 142 static void igb_release_manageability(struct e1000_hw *hw); 143 144 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 145 146 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, 147 uint16_t vlan_id, int on); 148 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 149 enum rte_vlan_type vlan_type, 150 uint16_t tpid_id); 151 static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); 152 153 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); 154 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); 155 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); 156 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); 157 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); 158 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); 159 160 static int eth_igb_led_on(struct rte_eth_dev *dev); 161 static int eth_igb_led_off(struct rte_eth_dev *dev); 162 163 static void igb_intr_disable(struct e1000_hw *hw); 164 static int igb_get_rx_buffer_size(struct e1000_hw *hw); 165 static void eth_igb_rar_set(struct rte_eth_dev *dev, 166 struct ether_addr *mac_addr, 167 uint32_t index, uint32_t pool); 168 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); 169 static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 170 struct ether_addr *addr); 171 172 static void igbvf_intr_disable(struct e1000_hw *hw); 173 static int igbvf_dev_configure(struct rte_eth_dev *dev); 174 static int igbvf_dev_start(struct rte_eth_dev *dev); 175 static void igbvf_dev_stop(struct rte_eth_dev *dev); 176 static void igbvf_dev_close(struct rte_eth_dev *dev); 177 static void igbvf_promiscuous_enable(struct rte_eth_dev *dev); 178 static void igbvf_promiscuous_disable(struct rte_eth_dev *dev); 179 static void igbvf_allmulticast_enable(struct rte_eth_dev *dev); 180 static void igbvf_allmulticast_disable(struct rte_eth_dev *dev); 181 static int eth_igbvf_link_update(struct e1000_hw *hw); 182 static void eth_igbvf_stats_get(struct rte_eth_dev *dev, 183 struct rte_eth_stats *rte_stats); 184 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, 185 struct rte_eth_xstat *xstats, unsigned n); 186 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, 187 struct rte_eth_xstat_name *xstats_names, 188 unsigned limit); 189 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); 190 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, 191 uint16_t vlan_id, int on); 192 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); 193 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); 194 static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev, 195 struct ether_addr *addr); 196 static int igbvf_get_reg_length(struct rte_eth_dev *dev); 197 static int igbvf_get_regs(struct rte_eth_dev *dev, 198 struct rte_dev_reg_info *regs); 199 200 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, 201 struct rte_eth_rss_reta_entry64 *reta_conf, 202 uint16_t reta_size); 203 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, 204 struct rte_eth_rss_reta_entry64 *reta_conf, 205 uint16_t reta_size); 206 207 static int eth_igb_syn_filter_set(struct rte_eth_dev *dev, 208 struct rte_eth_syn_filter *filter, 209 bool add); 210 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev, 211 struct rte_eth_syn_filter *filter); 212 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 213 enum rte_filter_op filter_op, 214 void *arg); 215 static int igb_add_2tuple_filter(struct rte_eth_dev *dev, 216 struct rte_eth_ntuple_filter *ntuple_filter); 217 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev, 218 struct rte_eth_ntuple_filter *ntuple_filter); 219 static int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, 220 struct rte_eth_flex_filter *filter, 221 bool add); 222 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev, 223 struct rte_eth_flex_filter *filter); 224 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 225 enum rte_filter_op filter_op, 226 void *arg); 227 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 228 struct rte_eth_ntuple_filter *ntuple_filter); 229 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 230 struct rte_eth_ntuple_filter *ntuple_filter); 231 static int igb_add_del_ntuple_filter(struct rte_eth_dev *dev, 232 struct rte_eth_ntuple_filter *filter, 233 bool add); 234 static int igb_get_ntuple_filter(struct rte_eth_dev *dev, 235 struct rte_eth_ntuple_filter *filter); 236 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev, 237 enum rte_filter_op filter_op, 238 void *arg); 239 static int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, 240 struct rte_eth_ethertype_filter *filter, 241 bool add); 242 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev, 243 enum rte_filter_op filter_op, 244 void *arg); 245 static int igb_get_ethertype_filter(struct rte_eth_dev *dev, 246 struct rte_eth_ethertype_filter *filter); 247 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev, 248 enum rte_filter_type filter_type, 249 enum rte_filter_op filter_op, 250 void *arg); 251 static int eth_igb_get_reg_length(struct rte_eth_dev *dev); 252 static int eth_igb_get_regs(struct rte_eth_dev *dev, 253 struct rte_dev_reg_info *regs); 254 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev); 255 static int eth_igb_get_eeprom(struct rte_eth_dev *dev, 256 struct rte_dev_eeprom_info *eeprom); 257 static int eth_igb_set_eeprom(struct rte_eth_dev *dev, 258 struct rte_dev_eeprom_info *eeprom); 259 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 260 struct ether_addr *mc_addr_set, 261 uint32_t nb_mc_addr); 262 static int igb_timesync_enable(struct rte_eth_dev *dev); 263 static int igb_timesync_disable(struct rte_eth_dev *dev); 264 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 265 struct timespec *timestamp, 266 uint32_t flags); 267 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 268 struct timespec *timestamp); 269 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 270 static int igb_timesync_read_time(struct rte_eth_dev *dev, 271 struct timespec *timestamp); 272 static int igb_timesync_write_time(struct rte_eth_dev *dev, 273 const struct timespec *timestamp); 274 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, 275 uint16_t queue_id); 276 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, 277 uint16_t queue_id); 278 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 279 uint8_t queue, uint8_t msix_vector); 280 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 281 uint8_t index, uint8_t offset); 282 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); 283 static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle, 284 void *param); 285 static void igbvf_mbx_process(struct rte_eth_dev *dev); 286 287 /* 288 * Define VF Stats MACRO for Non "cleared on read" register 289 */ 290 #define UPDATE_VF_STAT(reg, last, cur) \ 291 { \ 292 u32 latest = E1000_READ_REG(hw, reg); \ 293 cur += (latest - last) & UINT_MAX; \ 294 last = latest; \ 295 } 296 297 #define IGB_FC_PAUSE_TIME 0x0680 298 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 299 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 300 301 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ 302 303 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; 304 305 /* 306 * The set of PCI devices this driver supports 307 */ 308 static const struct rte_pci_id pci_id_igb_map[] = { 309 310 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 311 #include "rte_pci_dev_ids.h" 312 313 {0}, 314 }; 315 316 /* 317 * The set of PCI devices this driver supports (for 82576&I350 VF) 318 */ 319 static const struct rte_pci_id pci_id_igbvf_map[] = { 320 321 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 322 #include "rte_pci_dev_ids.h" 323 324 {0}, 325 }; 326 327 static const struct rte_eth_desc_lim rx_desc_lim = { 328 .nb_max = E1000_MAX_RING_DESC, 329 .nb_min = E1000_MIN_RING_DESC, 330 .nb_align = IGB_RXD_ALIGN, 331 }; 332 333 static const struct rte_eth_desc_lim tx_desc_lim = { 334 .nb_max = E1000_MAX_RING_DESC, 335 .nb_min = E1000_MIN_RING_DESC, 336 .nb_align = IGB_RXD_ALIGN, 337 }; 338 339 static const struct eth_dev_ops eth_igb_ops = { 340 .dev_configure = eth_igb_configure, 341 .dev_start = eth_igb_start, 342 .dev_stop = eth_igb_stop, 343 .dev_set_link_up = eth_igb_dev_set_link_up, 344 .dev_set_link_down = eth_igb_dev_set_link_down, 345 .dev_close = eth_igb_close, 346 .promiscuous_enable = eth_igb_promiscuous_enable, 347 .promiscuous_disable = eth_igb_promiscuous_disable, 348 .allmulticast_enable = eth_igb_allmulticast_enable, 349 .allmulticast_disable = eth_igb_allmulticast_disable, 350 .link_update = eth_igb_link_update, 351 .stats_get = eth_igb_stats_get, 352 .xstats_get = eth_igb_xstats_get, 353 .xstats_get_names = eth_igb_xstats_get_names, 354 .stats_reset = eth_igb_stats_reset, 355 .xstats_reset = eth_igb_xstats_reset, 356 .dev_infos_get = eth_igb_infos_get, 357 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 358 .mtu_set = eth_igb_mtu_set, 359 .vlan_filter_set = eth_igb_vlan_filter_set, 360 .vlan_tpid_set = eth_igb_vlan_tpid_set, 361 .vlan_offload_set = eth_igb_vlan_offload_set, 362 .rx_queue_setup = eth_igb_rx_queue_setup, 363 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable, 364 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable, 365 .rx_queue_release = eth_igb_rx_queue_release, 366 .rx_queue_count = eth_igb_rx_queue_count, 367 .rx_descriptor_done = eth_igb_rx_descriptor_done, 368 .tx_queue_setup = eth_igb_tx_queue_setup, 369 .tx_queue_release = eth_igb_tx_queue_release, 370 .dev_led_on = eth_igb_led_on, 371 .dev_led_off = eth_igb_led_off, 372 .flow_ctrl_get = eth_igb_flow_ctrl_get, 373 .flow_ctrl_set = eth_igb_flow_ctrl_set, 374 .mac_addr_add = eth_igb_rar_set, 375 .mac_addr_remove = eth_igb_rar_clear, 376 .mac_addr_set = eth_igb_default_mac_addr_set, 377 .reta_update = eth_igb_rss_reta_update, 378 .reta_query = eth_igb_rss_reta_query, 379 .rss_hash_update = eth_igb_rss_hash_update, 380 .rss_hash_conf_get = eth_igb_rss_hash_conf_get, 381 .filter_ctrl = eth_igb_filter_ctrl, 382 .set_mc_addr_list = eth_igb_set_mc_addr_list, 383 .rxq_info_get = igb_rxq_info_get, 384 .txq_info_get = igb_txq_info_get, 385 .timesync_enable = igb_timesync_enable, 386 .timesync_disable = igb_timesync_disable, 387 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, 388 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, 389 .get_reg = eth_igb_get_regs, 390 .get_eeprom_length = eth_igb_get_eeprom_length, 391 .get_eeprom = eth_igb_get_eeprom, 392 .set_eeprom = eth_igb_set_eeprom, 393 .timesync_adjust_time = igb_timesync_adjust_time, 394 .timesync_read_time = igb_timesync_read_time, 395 .timesync_write_time = igb_timesync_write_time, 396 }; 397 398 /* 399 * dev_ops for virtual function, bare necessities for basic vf 400 * operation have been implemented 401 */ 402 static const struct eth_dev_ops igbvf_eth_dev_ops = { 403 .dev_configure = igbvf_dev_configure, 404 .dev_start = igbvf_dev_start, 405 .dev_stop = igbvf_dev_stop, 406 .dev_close = igbvf_dev_close, 407 .promiscuous_enable = igbvf_promiscuous_enable, 408 .promiscuous_disable = igbvf_promiscuous_disable, 409 .allmulticast_enable = igbvf_allmulticast_enable, 410 .allmulticast_disable = igbvf_allmulticast_disable, 411 .link_update = eth_igb_link_update, 412 .stats_get = eth_igbvf_stats_get, 413 .xstats_get = eth_igbvf_xstats_get, 414 .xstats_get_names = eth_igbvf_xstats_get_names, 415 .stats_reset = eth_igbvf_stats_reset, 416 .xstats_reset = eth_igbvf_stats_reset, 417 .vlan_filter_set = igbvf_vlan_filter_set, 418 .dev_infos_get = eth_igbvf_infos_get, 419 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, 420 .rx_queue_setup = eth_igb_rx_queue_setup, 421 .rx_queue_release = eth_igb_rx_queue_release, 422 .tx_queue_setup = eth_igb_tx_queue_setup, 423 .tx_queue_release = eth_igb_tx_queue_release, 424 .set_mc_addr_list = eth_igb_set_mc_addr_list, 425 .rxq_info_get = igb_rxq_info_get, 426 .txq_info_get = igb_txq_info_get, 427 .mac_addr_set = igbvf_default_mac_addr_set, 428 .get_reg = igbvf_get_regs, 429 }; 430 431 /* store statistics names and its offset in stats structure */ 432 struct rte_igb_xstats_name_off { 433 char name[RTE_ETH_XSTATS_NAME_SIZE]; 434 unsigned offset; 435 }; 436 437 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = { 438 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)}, 439 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)}, 440 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)}, 441 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)}, 442 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)}, 443 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)}, 444 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats, 445 ecol)}, 446 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)}, 447 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)}, 448 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)}, 449 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)}, 450 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)}, 451 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)}, 452 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)}, 453 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)}, 454 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)}, 455 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)}, 456 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats, 457 fcruc)}, 458 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)}, 459 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)}, 460 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)}, 461 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)}, 462 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 463 prc1023)}, 464 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats, 465 prc1522)}, 466 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)}, 467 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)}, 468 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)}, 469 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)}, 470 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)}, 471 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)}, 472 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)}, 473 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)}, 474 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)}, 475 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)}, 476 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)}, 477 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)}, 478 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)}, 479 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)}, 480 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)}, 481 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)}, 482 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)}, 483 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats, 484 ptc1023)}, 485 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats, 486 ptc1522)}, 487 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)}, 488 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)}, 489 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)}, 490 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)}, 491 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)}, 492 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)}, 493 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)}, 494 495 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)}, 496 }; 497 498 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \ 499 sizeof(rte_igb_stats_strings[0])) 500 501 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { 502 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)}, 503 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)}, 504 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)}, 505 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)}, 506 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)}, 507 }; 508 509 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ 510 sizeof(rte_igbvf_stats_strings[0])) 511 512 /** 513 * Atomically reads the link status information from global 514 * structure rte_eth_dev. 515 * 516 * @param dev 517 * - Pointer to the structure rte_eth_dev to read from. 518 * - Pointer to the buffer to be saved with the link status. 519 * 520 * @return 521 * - On success, zero. 522 * - On failure, negative value. 523 */ 524 static inline int 525 rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev, 526 struct rte_eth_link *link) 527 { 528 struct rte_eth_link *dst = link; 529 struct rte_eth_link *src = &(dev->data->dev_link); 530 531 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 532 *(uint64_t *)src) == 0) 533 return -1; 534 535 return 0; 536 } 537 538 /** 539 * Atomically writes the link status information into global 540 * structure rte_eth_dev. 541 * 542 * @param dev 543 * - Pointer to the structure rte_eth_dev to read from. 544 * - Pointer to the buffer to be saved with the link status. 545 * 546 * @return 547 * - On success, zero. 548 * - On failure, negative value. 549 */ 550 static inline int 551 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, 552 struct rte_eth_link *link) 553 { 554 struct rte_eth_link *dst = &(dev->data->dev_link); 555 struct rte_eth_link *src = link; 556 557 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 558 *(uint64_t *)src) == 0) 559 return -1; 560 561 return 0; 562 } 563 564 static inline void 565 igb_intr_enable(struct rte_eth_dev *dev) 566 { 567 struct e1000_interrupt *intr = 568 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 569 struct e1000_hw *hw = 570 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 571 572 E1000_WRITE_REG(hw, E1000_IMS, intr->mask); 573 E1000_WRITE_FLUSH(hw); 574 } 575 576 static void 577 igb_intr_disable(struct e1000_hw *hw) 578 { 579 E1000_WRITE_REG(hw, E1000_IMC, ~0); 580 E1000_WRITE_FLUSH(hw); 581 } 582 583 static inline void 584 igbvf_intr_enable(struct rte_eth_dev *dev) 585 { 586 struct e1000_hw *hw = 587 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 588 589 /* only for mailbox */ 590 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); 591 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); 592 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); 593 E1000_WRITE_FLUSH(hw); 594 } 595 596 /* only for mailbox now. If RX/TX needed, should extend this function. */ 597 static void 598 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) 599 { 600 uint32_t tmp = 0; 601 602 /* mailbox */ 603 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); 604 tmp |= E1000_VTIVAR_VALID; 605 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); 606 } 607 608 static void 609 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) 610 { 611 struct e1000_hw *hw = 612 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 613 614 /* Configure VF other cause ivar */ 615 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); 616 } 617 618 static inline int32_t 619 igb_pf_reset_hw(struct e1000_hw *hw) 620 { 621 uint32_t ctrl_ext; 622 int32_t status; 623 624 status = e1000_reset_hw(hw); 625 626 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 627 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 628 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 629 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 630 E1000_WRITE_FLUSH(hw); 631 632 return status; 633 } 634 635 static void 636 igb_identify_hardware(struct rte_eth_dev *dev) 637 { 638 struct e1000_hw *hw = 639 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 640 641 hw->vendor_id = dev->pci_dev->id.vendor_id; 642 hw->device_id = dev->pci_dev->id.device_id; 643 hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; 644 hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; 645 646 e1000_set_mac_type(hw); 647 648 /* need to check if it is a vf device below */ 649 } 650 651 static int 652 igb_reset_swfw_lock(struct e1000_hw *hw) 653 { 654 int ret_val; 655 656 /* 657 * Do mac ops initialization manually here, since we will need 658 * some function pointers set by this call. 659 */ 660 ret_val = e1000_init_mac_params(hw); 661 if (ret_val) 662 return ret_val; 663 664 /* 665 * SMBI lock should not fail in this early stage. If this is the case, 666 * it is due to an improper exit of the application. 667 * So force the release of the faulty lock. 668 */ 669 if (e1000_get_hw_semaphore_generic(hw) < 0) { 670 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 671 } 672 e1000_put_hw_semaphore_generic(hw); 673 674 if (hw->mac.ops.acquire_swfw_sync != NULL) { 675 uint16_t mask; 676 677 /* 678 * Phy lock should not fail in this early stage. If this is the case, 679 * it is due to an improper exit of the application. 680 * So force the release of the faulty lock. 681 */ 682 mask = E1000_SWFW_PHY0_SM << hw->bus.func; 683 if (hw->bus.func > E1000_FUNC_1) 684 mask <<= 2; 685 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 686 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 687 hw->bus.func); 688 } 689 hw->mac.ops.release_swfw_sync(hw, mask); 690 691 /* 692 * This one is more tricky since it is common to all ports; but 693 * swfw_sync retries last long enough (1s) to be almost sure that if 694 * lock can not be taken it is due to an improper lock of the 695 * semaphore. 696 */ 697 mask = E1000_SWFW_EEP_SM; 698 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 699 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 700 } 701 hw->mac.ops.release_swfw_sync(hw, mask); 702 } 703 704 return E1000_SUCCESS; 705 } 706 707 static int 708 eth_igb_dev_init(struct rte_eth_dev *eth_dev) 709 { 710 int error = 0; 711 struct rte_pci_device *pci_dev; 712 struct e1000_hw *hw = 713 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 714 struct e1000_vfta * shadow_vfta = 715 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 716 struct e1000_filter_info *filter_info = 717 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 718 struct e1000_adapter *adapter = 719 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 720 721 uint32_t ctrl_ext; 722 723 pci_dev = eth_dev->pci_dev; 724 725 eth_dev->dev_ops = ð_igb_ops; 726 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 727 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 728 729 /* for secondary processes, we don't initialise any further as primary 730 * has already done this work. Only check we don't need a different 731 * RX function */ 732 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 733 if (eth_dev->data->scattered_rx) 734 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 735 return 0; 736 } 737 738 rte_eth_copy_pci_info(eth_dev, pci_dev); 739 740 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; 741 742 igb_identify_hardware(eth_dev); 743 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { 744 error = -EIO; 745 goto err_late; 746 } 747 748 e1000_get_bus_info(hw); 749 750 /* Reset any pending lock */ 751 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { 752 error = -EIO; 753 goto err_late; 754 } 755 756 /* Finish initialization */ 757 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { 758 error = -EIO; 759 goto err_late; 760 } 761 762 hw->mac.autoneg = 1; 763 hw->phy.autoneg_wait_to_complete = 0; 764 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 765 766 /* Copper options */ 767 if (hw->phy.media_type == e1000_media_type_copper) { 768 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 769 hw->phy.disable_polarity_correction = 0; 770 hw->phy.ms_type = e1000_ms_hw_default; 771 } 772 773 /* 774 * Start from a known state, this is important in reading the nvm 775 * and mac from that. 776 */ 777 igb_pf_reset_hw(hw); 778 779 /* Make sure we have a good EEPROM before we read from it */ 780 if (e1000_validate_nvm_checksum(hw) < 0) { 781 /* 782 * Some PCI-E parts fail the first check due to 783 * the link being in sleep state, call it again, 784 * if it fails a second time its a real issue. 785 */ 786 if (e1000_validate_nvm_checksum(hw) < 0) { 787 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 788 error = -EIO; 789 goto err_late; 790 } 791 } 792 793 /* Read the permanent MAC address out of the EEPROM */ 794 if (e1000_read_mac_addr(hw) != 0) { 795 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 796 error = -EIO; 797 goto err_late; 798 } 799 800 /* Allocate memory for storing MAC addresses */ 801 eth_dev->data->mac_addrs = rte_zmalloc("e1000", 802 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 803 if (eth_dev->data->mac_addrs == NULL) { 804 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 805 "store MAC addresses", 806 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 807 error = -ENOMEM; 808 goto err_late; 809 } 810 811 /* Copy the permanent MAC address */ 812 ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); 813 814 /* initialize the vfta */ 815 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 816 817 /* Now initialize the hardware */ 818 if (igb_hardware_init(hw) != 0) { 819 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 820 rte_free(eth_dev->data->mac_addrs); 821 eth_dev->data->mac_addrs = NULL; 822 error = -ENODEV; 823 goto err_late; 824 } 825 hw->mac.get_link_status = 1; 826 adapter->stopped = 0; 827 828 /* Indicate SOL/IDER usage */ 829 if (e1000_check_reset_block(hw) < 0) { 830 PMD_INIT_LOG(ERR, "PHY reset is blocked due to" 831 "SOL/IDER session"); 832 } 833 834 /* initialize PF if max_vfs not zero */ 835 igb_pf_host_init(eth_dev); 836 837 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 838 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 839 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 840 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 841 E1000_WRITE_FLUSH(hw); 842 843 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 844 eth_dev->data->port_id, pci_dev->id.vendor_id, 845 pci_dev->id.device_id); 846 847 rte_intr_callback_register(&pci_dev->intr_handle, 848 eth_igb_interrupt_handler, 849 (void *)eth_dev); 850 851 /* enable uio/vfio intr/eventfd mapping */ 852 rte_intr_enable(&pci_dev->intr_handle); 853 854 /* enable support intr */ 855 igb_intr_enable(eth_dev); 856 857 TAILQ_INIT(&filter_info->flex_list); 858 filter_info->flex_mask = 0; 859 TAILQ_INIT(&filter_info->twotuple_list); 860 filter_info->twotuple_mask = 0; 861 TAILQ_INIT(&filter_info->fivetuple_list); 862 filter_info->fivetuple_mask = 0; 863 864 return 0; 865 866 err_late: 867 igb_hw_control_release(hw); 868 869 return error; 870 } 871 872 static int 873 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) 874 { 875 struct rte_pci_device *pci_dev; 876 struct e1000_hw *hw; 877 struct e1000_adapter *adapter = 878 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 879 880 PMD_INIT_FUNC_TRACE(); 881 882 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 883 return -EPERM; 884 885 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 886 pci_dev = eth_dev->pci_dev; 887 888 if (adapter->stopped == 0) 889 eth_igb_close(eth_dev); 890 891 eth_dev->dev_ops = NULL; 892 eth_dev->rx_pkt_burst = NULL; 893 eth_dev->tx_pkt_burst = NULL; 894 895 /* Reset any pending lock */ 896 igb_reset_swfw_lock(hw); 897 898 rte_free(eth_dev->data->mac_addrs); 899 eth_dev->data->mac_addrs = NULL; 900 901 /* uninitialize PF if max_vfs not zero */ 902 igb_pf_host_uninit(eth_dev); 903 904 /* disable uio intr before callback unregister */ 905 rte_intr_disable(&(pci_dev->intr_handle)); 906 rte_intr_callback_unregister(&(pci_dev->intr_handle), 907 eth_igb_interrupt_handler, (void *)eth_dev); 908 909 return 0; 910 } 911 912 /* 913 * Virtual Function device init 914 */ 915 static int 916 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) 917 { 918 struct rte_pci_device *pci_dev; 919 struct e1000_adapter *adapter = 920 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 921 struct e1000_hw *hw = 922 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 923 int diag; 924 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr; 925 926 PMD_INIT_FUNC_TRACE(); 927 928 eth_dev->dev_ops = &igbvf_eth_dev_ops; 929 eth_dev->rx_pkt_burst = ð_igb_recv_pkts; 930 eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; 931 932 /* for secondary processes, we don't initialise any further as primary 933 * has already done this work. Only check we don't need a different 934 * RX function */ 935 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 936 if (eth_dev->data->scattered_rx) 937 eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; 938 return 0; 939 } 940 941 pci_dev = eth_dev->pci_dev; 942 943 rte_eth_copy_pci_info(eth_dev, pci_dev); 944 945 hw->device_id = pci_dev->id.device_id; 946 hw->vendor_id = pci_dev->id.vendor_id; 947 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 948 adapter->stopped = 0; 949 950 /* Initialize the shared code (base driver) */ 951 diag = e1000_setup_init_funcs(hw, TRUE); 952 if (diag != 0) { 953 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d", 954 diag); 955 return -EIO; 956 } 957 958 /* init_mailbox_params */ 959 hw->mbx.ops.init_params(hw); 960 961 /* Disable the interrupts for VF */ 962 igbvf_intr_disable(hw); 963 964 diag = hw->mac.ops.reset_hw(hw); 965 966 /* Allocate memory for storing MAC addresses */ 967 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN * 968 hw->mac.rar_entry_count, 0); 969 if (eth_dev->data->mac_addrs == NULL) { 970 PMD_INIT_LOG(ERR, 971 "Failed to allocate %d bytes needed to store MAC " 972 "addresses", 973 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 974 return -ENOMEM; 975 } 976 977 /* Generate a random MAC address, if none was assigned by PF. */ 978 if (is_zero_ether_addr(perm_addr)) { 979 eth_random_addr(perm_addr->addr_bytes); 980 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); 981 if (diag) { 982 rte_free(eth_dev->data->mac_addrs); 983 eth_dev->data->mac_addrs = NULL; 984 return diag; 985 } 986 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 987 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 988 "%02x:%02x:%02x:%02x:%02x:%02x", 989 perm_addr->addr_bytes[0], 990 perm_addr->addr_bytes[1], 991 perm_addr->addr_bytes[2], 992 perm_addr->addr_bytes[3], 993 perm_addr->addr_bytes[4], 994 perm_addr->addr_bytes[5]); 995 } 996 997 /* Copy the permanent MAC address */ 998 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, 999 ð_dev->data->mac_addrs[0]); 1000 1001 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x " 1002 "mac.type=%s", 1003 eth_dev->data->port_id, pci_dev->id.vendor_id, 1004 pci_dev->id.device_id, "igb_mac_82576_vf"); 1005 1006 rte_intr_callback_register(&pci_dev->intr_handle, 1007 eth_igbvf_interrupt_handler, 1008 (void *)eth_dev); 1009 1010 return 0; 1011 } 1012 1013 static int 1014 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) 1015 { 1016 struct e1000_adapter *adapter = 1017 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 1018 struct rte_pci_device *pci_dev = eth_dev->pci_dev; 1019 1020 PMD_INIT_FUNC_TRACE(); 1021 1022 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1023 return -EPERM; 1024 1025 if (adapter->stopped == 0) 1026 igbvf_dev_close(eth_dev); 1027 1028 eth_dev->dev_ops = NULL; 1029 eth_dev->rx_pkt_burst = NULL; 1030 eth_dev->tx_pkt_burst = NULL; 1031 1032 rte_free(eth_dev->data->mac_addrs); 1033 eth_dev->data->mac_addrs = NULL; 1034 1035 /* disable uio intr before callback unregister */ 1036 rte_intr_disable(&pci_dev->intr_handle); 1037 rte_intr_callback_unregister(&pci_dev->intr_handle, 1038 eth_igbvf_interrupt_handler, 1039 (void *)eth_dev); 1040 1041 return 0; 1042 } 1043 1044 static struct eth_driver rte_igb_pmd = { 1045 .pci_drv = { 1046 .name = "rte_igb_pmd", 1047 .id_table = pci_id_igb_map, 1048 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 1049 RTE_PCI_DRV_DETACHABLE, 1050 }, 1051 .eth_dev_init = eth_igb_dev_init, 1052 .eth_dev_uninit = eth_igb_dev_uninit, 1053 .dev_private_size = sizeof(struct e1000_adapter), 1054 }; 1055 1056 /* 1057 * virtual function driver struct 1058 */ 1059 static struct eth_driver rte_igbvf_pmd = { 1060 .pci_drv = { 1061 .name = "rte_igbvf_pmd", 1062 .id_table = pci_id_igbvf_map, 1063 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, 1064 }, 1065 .eth_dev_init = eth_igbvf_dev_init, 1066 .eth_dev_uninit = eth_igbvf_dev_uninit, 1067 .dev_private_size = sizeof(struct e1000_adapter), 1068 }; 1069 1070 static int 1071 rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused) 1072 { 1073 rte_eth_driver_register(&rte_igb_pmd); 1074 return 0; 1075 } 1076 1077 static void 1078 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1079 { 1080 struct e1000_hw *hw = 1081 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1082 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ 1083 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 1084 rctl |= E1000_RCTL_VFE; 1085 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1086 } 1087 1088 /* 1089 * VF Driver initialization routine. 1090 * Invoked one at EAL init time. 1091 * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices. 1092 */ 1093 static int 1094 rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) 1095 { 1096 PMD_INIT_FUNC_TRACE(); 1097 1098 rte_eth_driver_register(&rte_igbvf_pmd); 1099 return 0; 1100 } 1101 1102 static int 1103 igb_check_mq_mode(struct rte_eth_dev *dev) 1104 { 1105 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 1106 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 1107 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1108 uint16_t nb_tx_q = dev->data->nb_rx_queues; 1109 1110 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) || 1111 tx_mq_mode == ETH_MQ_TX_DCB || 1112 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) { 1113 PMD_INIT_LOG(ERR, "DCB mode is not supported."); 1114 return -EINVAL; 1115 } 1116 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1117 /* Check multi-queue mode. 1118 * To no break software we accept ETH_MQ_RX_NONE as this might 1119 * be used to turn off VLAN filter. 1120 */ 1121 1122 if (rx_mq_mode == ETH_MQ_RX_NONE || 1123 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1124 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 1125 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 1126 } else { 1127 /* Only support one queue on VFs. 1128 * RSS together with SRIOV is not supported. 1129 */ 1130 PMD_INIT_LOG(ERR, "SRIOV is active," 1131 " wrong mq_mode rx %d.", 1132 rx_mq_mode); 1133 return -EINVAL; 1134 } 1135 /* TX mode is not used here, so mode might be ignored.*/ 1136 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1137 /* SRIOV only works in VMDq enable mode */ 1138 PMD_INIT_LOG(WARNING, "SRIOV is active," 1139 " TX mode %d is not supported. " 1140 " Driver will behave as %d mode.", 1141 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY); 1142 } 1143 1144 /* check valid queue number */ 1145 if ((nb_rx_q > 1) || (nb_tx_q > 1)) { 1146 PMD_INIT_LOG(ERR, "SRIOV is active," 1147 " only support one queue on VFs."); 1148 return -EINVAL; 1149 } 1150 } else { 1151 /* To no break software that set invalid mode, only display 1152 * warning if invalid mode is used. 1153 */ 1154 if (rx_mq_mode != ETH_MQ_RX_NONE && 1155 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY && 1156 rx_mq_mode != ETH_MQ_RX_RSS) { 1157 /* RSS together with VMDq not supported*/ 1158 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 1159 rx_mq_mode); 1160 return -EINVAL; 1161 } 1162 1163 if (tx_mq_mode != ETH_MQ_TX_NONE && 1164 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) { 1165 PMD_INIT_LOG(WARNING, "TX mode %d is not supported." 1166 " Due to txmode is meaningless in this" 1167 " driver, just ignore.", 1168 tx_mq_mode); 1169 } 1170 } 1171 return 0; 1172 } 1173 1174 static int 1175 eth_igb_configure(struct rte_eth_dev *dev) 1176 { 1177 struct e1000_interrupt *intr = 1178 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1179 int ret; 1180 1181 PMD_INIT_FUNC_TRACE(); 1182 1183 /* multipe queue mode checking */ 1184 ret = igb_check_mq_mode(dev); 1185 if (ret != 0) { 1186 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.", 1187 ret); 1188 return ret; 1189 } 1190 1191 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1192 PMD_INIT_FUNC_TRACE(); 1193 1194 return 0; 1195 } 1196 1197 static int 1198 eth_igb_start(struct rte_eth_dev *dev) 1199 { 1200 struct e1000_hw *hw = 1201 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1202 struct e1000_adapter *adapter = 1203 E1000_DEV_PRIVATE(dev->data->dev_private); 1204 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 1205 int ret, mask; 1206 uint32_t intr_vector = 0; 1207 uint32_t ctrl_ext; 1208 uint32_t *speeds; 1209 int num_speeds; 1210 bool autoneg; 1211 1212 PMD_INIT_FUNC_TRACE(); 1213 1214 /* disable uio/vfio intr/eventfd mapping */ 1215 rte_intr_disable(intr_handle); 1216 1217 /* Power up the phy. Needed to make the link go Up */ 1218 eth_igb_dev_set_link_up(dev); 1219 1220 /* 1221 * Packet Buffer Allocation (PBA) 1222 * Writing PBA sets the receive portion of the buffer 1223 * the remainder is used for the transmit buffer. 1224 */ 1225 if (hw->mac.type == e1000_82575) { 1226 uint32_t pba; 1227 1228 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1229 E1000_WRITE_REG(hw, E1000_PBA, pba); 1230 } 1231 1232 /* Put the address into the Receive Address Array */ 1233 e1000_rar_set(hw, hw->mac.addr, 0); 1234 1235 /* Initialize the hardware */ 1236 if (igb_hardware_init(hw)) { 1237 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 1238 return -EIO; 1239 } 1240 adapter->stopped = 0; 1241 1242 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); 1243 1244 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1245 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1246 ctrl_ext |= E1000_CTRL_EXT_PFRSTD; 1247 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 1248 E1000_WRITE_FLUSH(hw); 1249 1250 /* configure PF module if SRIOV enabled */ 1251 igb_pf_host_configure(dev); 1252 1253 /* check and configure queue intr-vector mapping */ 1254 if ((rte_intr_cap_multiple(intr_handle) || 1255 !RTE_ETH_DEV_SRIOV(dev).active) && 1256 dev->data->dev_conf.intr_conf.rxq != 0) { 1257 intr_vector = dev->data->nb_rx_queues; 1258 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1259 return -1; 1260 } 1261 1262 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 1263 intr_handle->intr_vec = 1264 rte_zmalloc("intr_vec", 1265 dev->data->nb_rx_queues * sizeof(int), 0); 1266 if (intr_handle->intr_vec == NULL) { 1267 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1268 " intr_vec\n", dev->data->nb_rx_queues); 1269 return -ENOMEM; 1270 } 1271 } 1272 1273 /* confiugre msix for rx interrupt */ 1274 eth_igb_configure_msix_intr(dev); 1275 1276 /* Configure for OS presence */ 1277 igb_init_manageability(hw); 1278 1279 eth_igb_tx_init(dev); 1280 1281 /* This can fail when allocating mbufs for descriptor rings */ 1282 ret = eth_igb_rx_init(dev); 1283 if (ret) { 1284 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1285 igb_dev_clear_queues(dev); 1286 return ret; 1287 } 1288 1289 e1000_clear_hw_cntrs_base_generic(hw); 1290 1291 /* 1292 * VLAN Offload Settings 1293 */ 1294 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 1295 ETH_VLAN_EXTEND_MASK; 1296 eth_igb_vlan_offload_set(dev, mask); 1297 1298 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1299 /* Enable VLAN filter since VMDq always use VLAN filter */ 1300 igb_vmdq_vlan_hw_filter_enable(dev); 1301 } 1302 1303 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || 1304 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) || 1305 (hw->mac.type == e1000_i211)) { 1306 /* Configure EITR with the maximum possible value (0xFFFF) */ 1307 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); 1308 } 1309 1310 /* Setup link speed and duplex */ 1311 speeds = &dev->data->dev_conf.link_speeds; 1312 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 1313 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 1314 } else { 1315 num_speeds = 0; 1316 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 1317 1318 /* Reset */ 1319 hw->phy.autoneg_advertised = 0; 1320 1321 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1322 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1323 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 1324 num_speeds = -1; 1325 goto error_invalid_config; 1326 } 1327 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1328 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1329 num_speeds++; 1330 } 1331 if (*speeds & ETH_LINK_SPEED_10M) { 1332 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1333 num_speeds++; 1334 } 1335 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1336 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1337 num_speeds++; 1338 } 1339 if (*speeds & ETH_LINK_SPEED_100M) { 1340 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1341 num_speeds++; 1342 } 1343 if (*speeds & ETH_LINK_SPEED_1G) { 1344 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1345 num_speeds++; 1346 } 1347 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 1348 goto error_invalid_config; 1349 } 1350 1351 e1000_setup_link(hw); 1352 1353 if (rte_intr_allow_others(intr_handle)) { 1354 /* check if lsc interrupt is enabled */ 1355 if (dev->data->dev_conf.intr_conf.lsc != 0) 1356 eth_igb_lsc_interrupt_setup(dev); 1357 } else { 1358 rte_intr_callback_unregister(intr_handle, 1359 eth_igb_interrupt_handler, 1360 (void *)dev); 1361 if (dev->data->dev_conf.intr_conf.lsc != 0) 1362 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1363 " no intr multiplex\n"); 1364 } 1365 1366 /* check if rxq interrupt is enabled */ 1367 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1368 rte_intr_dp_is_en(intr_handle)) 1369 eth_igb_rxq_interrupt_setup(dev); 1370 1371 /* enable uio/vfio intr/eventfd mapping */ 1372 rte_intr_enable(intr_handle); 1373 1374 /* resume enabled intr since hw reset */ 1375 igb_intr_enable(dev); 1376 1377 PMD_INIT_LOG(DEBUG, "<<"); 1378 1379 return 0; 1380 1381 error_invalid_config: 1382 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1383 dev->data->dev_conf.link_speeds, dev->data->port_id); 1384 igb_dev_clear_queues(dev); 1385 return -EINVAL; 1386 } 1387 1388 /********************************************************************* 1389 * 1390 * This routine disables all traffic on the adapter by issuing a 1391 * global reset on the MAC. 1392 * 1393 **********************************************************************/ 1394 static void 1395 eth_igb_stop(struct rte_eth_dev *dev) 1396 { 1397 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1398 struct e1000_filter_info *filter_info = 1399 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 1400 struct rte_eth_link link; 1401 struct e1000_flex_filter *p_flex; 1402 struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next; 1403 struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next; 1404 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 1405 1406 igb_intr_disable(hw); 1407 1408 /* disable intr eventfd mapping */ 1409 rte_intr_disable(intr_handle); 1410 1411 igb_pf_reset_hw(hw); 1412 E1000_WRITE_REG(hw, E1000_WUC, 0); 1413 1414 /* Set bit for Go Link disconnect */ 1415 if (hw->mac.type >= e1000_82580) { 1416 uint32_t phpm_reg; 1417 1418 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1419 phpm_reg |= E1000_82580_PM_GO_LINKD; 1420 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1421 } 1422 1423 /* Power down the phy. Needed to make the link go Down */ 1424 eth_igb_dev_set_link_down(dev); 1425 1426 igb_dev_clear_queues(dev); 1427 1428 /* clear the recorded link status */ 1429 memset(&link, 0, sizeof(link)); 1430 rte_igb_dev_atomic_write_link_status(dev, &link); 1431 1432 /* Remove all flex filters of the device */ 1433 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) { 1434 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries); 1435 rte_free(p_flex); 1436 } 1437 filter_info->flex_mask = 0; 1438 1439 /* Remove all ntuple filters of the device */ 1440 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); 1441 p_5tuple != NULL; p_5tuple = p_5tuple_next) { 1442 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); 1443 TAILQ_REMOVE(&filter_info->fivetuple_list, 1444 p_5tuple, entries); 1445 rte_free(p_5tuple); 1446 } 1447 filter_info->fivetuple_mask = 0; 1448 for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list); 1449 p_2tuple != NULL; p_2tuple = p_2tuple_next) { 1450 p_2tuple_next = TAILQ_NEXT(p_2tuple, entries); 1451 TAILQ_REMOVE(&filter_info->twotuple_list, 1452 p_2tuple, entries); 1453 rte_free(p_2tuple); 1454 } 1455 filter_info->twotuple_mask = 0; 1456 1457 if (!rte_intr_allow_others(intr_handle)) 1458 /* resume to the default handler */ 1459 rte_intr_callback_register(intr_handle, 1460 eth_igb_interrupt_handler, 1461 (void *)dev); 1462 1463 /* Clean datapath event and queue/vec mapping */ 1464 rte_intr_efd_disable(intr_handle); 1465 if (intr_handle->intr_vec != NULL) { 1466 rte_free(intr_handle->intr_vec); 1467 intr_handle->intr_vec = NULL; 1468 } 1469 } 1470 1471 static int 1472 eth_igb_dev_set_link_up(struct rte_eth_dev *dev) 1473 { 1474 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1475 1476 if (hw->phy.media_type == e1000_media_type_copper) 1477 e1000_power_up_phy(hw); 1478 else 1479 e1000_power_up_fiber_serdes_link(hw); 1480 1481 return 0; 1482 } 1483 1484 static int 1485 eth_igb_dev_set_link_down(struct rte_eth_dev *dev) 1486 { 1487 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1488 1489 if (hw->phy.media_type == e1000_media_type_copper) 1490 e1000_power_down_phy(hw); 1491 else 1492 e1000_shutdown_fiber_serdes_link(hw); 1493 1494 return 0; 1495 } 1496 1497 static void 1498 eth_igb_close(struct rte_eth_dev *dev) 1499 { 1500 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1501 struct e1000_adapter *adapter = 1502 E1000_DEV_PRIVATE(dev->data->dev_private); 1503 struct rte_eth_link link; 1504 struct rte_pci_device *pci_dev; 1505 1506 eth_igb_stop(dev); 1507 adapter->stopped = 1; 1508 1509 e1000_phy_hw_reset(hw); 1510 igb_release_manageability(hw); 1511 igb_hw_control_release(hw); 1512 1513 /* Clear bit for Go Link disconnect */ 1514 if (hw->mac.type >= e1000_82580) { 1515 uint32_t phpm_reg; 1516 1517 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); 1518 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1519 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); 1520 } 1521 1522 igb_dev_free_queues(dev); 1523 1524 pci_dev = dev->pci_dev; 1525 if (pci_dev->intr_handle.intr_vec) { 1526 rte_free(pci_dev->intr_handle.intr_vec); 1527 pci_dev->intr_handle.intr_vec = NULL; 1528 } 1529 1530 memset(&link, 0, sizeof(link)); 1531 rte_igb_dev_atomic_write_link_status(dev, &link); 1532 } 1533 1534 static int 1535 igb_get_rx_buffer_size(struct e1000_hw *hw) 1536 { 1537 uint32_t rx_buf_size; 1538 if (hw->mac.type == e1000_82576) { 1539 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; 1540 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) { 1541 /* PBS needs to be translated according to a lookup table */ 1542 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); 1543 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); 1544 rx_buf_size = (rx_buf_size << 10); 1545 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 1546 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10; 1547 } else { 1548 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; 1549 } 1550 1551 return rx_buf_size; 1552 } 1553 1554 /********************************************************************* 1555 * 1556 * Initialize the hardware 1557 * 1558 **********************************************************************/ 1559 static int 1560 igb_hardware_init(struct e1000_hw *hw) 1561 { 1562 uint32_t rx_buf_size; 1563 int diag; 1564 1565 /* Let the firmware know the OS is in control */ 1566 igb_hw_control_acquire(hw); 1567 1568 /* 1569 * These parameters control the automatic generation (Tx) and 1570 * response (Rx) to Ethernet PAUSE frames. 1571 * - High water mark should allow for at least two standard size (1518) 1572 * frames to be received after sending an XOFF. 1573 * - Low water mark works best when it is very near the high water mark. 1574 * This allows the receiver to restart by sending XON when it has 1575 * drained a bit. Here we use an arbitrary value of 1500 which will 1576 * restart after one full frame is pulled from the buffer. There 1577 * could be several smaller frames in the buffer and if so they will 1578 * not trigger the XON until their total number reduces the buffer 1579 * by 1500. 1580 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1581 */ 1582 rx_buf_size = igb_get_rx_buffer_size(hw); 1583 1584 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2); 1585 hw->fc.low_water = hw->fc.high_water - 1500; 1586 hw->fc.pause_time = IGB_FC_PAUSE_TIME; 1587 hw->fc.send_xon = 1; 1588 1589 /* Set Flow control, use the tunable location if sane */ 1590 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) 1591 hw->fc.requested_mode = igb_fc_setting; 1592 else 1593 hw->fc.requested_mode = e1000_fc_none; 1594 1595 /* Issue a global reset */ 1596 igb_pf_reset_hw(hw); 1597 E1000_WRITE_REG(hw, E1000_WUC, 0); 1598 1599 diag = e1000_init_hw(hw); 1600 if (diag < 0) 1601 return diag; 1602 1603 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); 1604 e1000_get_phy_info(hw); 1605 e1000_check_for_link(hw); 1606 1607 return 0; 1608 } 1609 1610 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */ 1611 static void 1612 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats) 1613 { 1614 int pause_frames; 1615 1616 uint64_t old_gprc = stats->gprc; 1617 uint64_t old_gptc = stats->gptc; 1618 uint64_t old_tpr = stats->tpr; 1619 uint64_t old_tpt = stats->tpt; 1620 uint64_t old_rpthc = stats->rpthc; 1621 uint64_t old_hgptc = stats->hgptc; 1622 1623 if(hw->phy.media_type == e1000_media_type_copper || 1624 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 1625 stats->symerrs += 1626 E1000_READ_REG(hw,E1000_SYMERRS); 1627 stats->sec += E1000_READ_REG(hw, E1000_SEC); 1628 } 1629 1630 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 1631 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 1632 stats->scc += E1000_READ_REG(hw, E1000_SCC); 1633 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 1634 1635 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 1636 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 1637 stats->colc += E1000_READ_REG(hw, E1000_COLC); 1638 stats->dc += E1000_READ_REG(hw, E1000_DC); 1639 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 1640 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 1641 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 1642 /* 1643 ** For watchdog management we need to know if we have been 1644 ** paused during the last interval, so capture that here. 1645 */ 1646 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 1647 stats->xoffrxc += pause_frames; 1648 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 1649 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 1650 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 1651 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 1652 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 1653 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 1654 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 1655 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 1656 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 1657 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 1658 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 1659 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 1660 1661 /* For the 64-bit byte counters the low dword must be read first. */ 1662 /* Both registers clear on the read of the high dword */ 1663 1664 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1665 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 1666 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 1667 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN; 1668 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 1669 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 1670 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN; 1671 1672 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 1673 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 1674 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 1675 stats->roc += E1000_READ_REG(hw, E1000_ROC); 1676 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 1677 1678 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 1679 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 1680 1681 stats->tor += E1000_READ_REG(hw, E1000_TORL); 1682 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32); 1683 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN; 1684 stats->tot += E1000_READ_REG(hw, E1000_TOTL); 1685 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32); 1686 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN; 1687 1688 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 1689 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 1690 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 1691 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 1692 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 1693 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 1694 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 1695 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 1696 1697 /* Interrupt Counts */ 1698 1699 stats->iac += E1000_READ_REG(hw, E1000_IAC); 1700 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 1701 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 1702 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 1703 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 1704 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 1705 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 1706 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 1707 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 1708 1709 /* Host to Card Statistics */ 1710 1711 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 1712 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 1713 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 1714 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 1715 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 1716 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 1717 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 1718 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); 1719 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); 1720 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN; 1721 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); 1722 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); 1723 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN; 1724 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 1725 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 1726 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 1727 1728 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 1729 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 1730 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 1731 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 1732 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 1733 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 1734 } 1735 1736 static void 1737 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1738 { 1739 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1740 struct e1000_hw_stats *stats = 1741 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1742 1743 igb_read_stats_registers(hw, stats); 1744 1745 if (rte_stats == NULL) 1746 return; 1747 1748 /* Rx Errors */ 1749 rte_stats->imissed = stats->mpc; 1750 rte_stats->ierrors = stats->crcerrs + 1751 stats->rlec + stats->ruc + stats->roc + 1752 stats->rxerrc + stats->algnerrc + stats->cexterr; 1753 1754 /* Tx Errors */ 1755 rte_stats->oerrors = stats->ecol + stats->latecol; 1756 1757 rte_stats->ipackets = stats->gprc; 1758 rte_stats->opackets = stats->gptc; 1759 rte_stats->ibytes = stats->gorc; 1760 rte_stats->obytes = stats->gotc; 1761 } 1762 1763 static void 1764 eth_igb_stats_reset(struct rte_eth_dev *dev) 1765 { 1766 struct e1000_hw_stats *hw_stats = 1767 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1768 1769 /* HW registers are cleared on read */ 1770 eth_igb_stats_get(dev, NULL); 1771 1772 /* Reset software totals */ 1773 memset(hw_stats, 0, sizeof(*hw_stats)); 1774 } 1775 1776 static void 1777 eth_igb_xstats_reset(struct rte_eth_dev *dev) 1778 { 1779 struct e1000_hw_stats *stats = 1780 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1781 1782 /* HW registers are cleared on read */ 1783 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS); 1784 1785 /* Reset software totals */ 1786 memset(stats, 0, sizeof(*stats)); 1787 } 1788 1789 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1790 struct rte_eth_xstat_name *xstats_names, 1791 __rte_unused unsigned limit) 1792 { 1793 unsigned i; 1794 1795 if (xstats_names == NULL) 1796 return IGB_NB_XSTATS; 1797 1798 /* Note: limit checked in rte_eth_xstats_names() */ 1799 1800 for (i = 0; i < IGB_NB_XSTATS; i++) { 1801 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 1802 "%s", rte_igb_stats_strings[i].name); 1803 } 1804 1805 return IGB_NB_XSTATS; 1806 } 1807 1808 static int 1809 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1810 unsigned n) 1811 { 1812 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1813 struct e1000_hw_stats *hw_stats = 1814 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1815 unsigned i; 1816 1817 if (n < IGB_NB_XSTATS) 1818 return IGB_NB_XSTATS; 1819 1820 igb_read_stats_registers(hw, hw_stats); 1821 1822 /* If this is a reset xstats is NULL, and we have cleared the 1823 * registers by reading them. 1824 */ 1825 if (!xstats) 1826 return 0; 1827 1828 /* Extended stats */ 1829 for (i = 0; i < IGB_NB_XSTATS; i++) { 1830 xstats[i].id = i; 1831 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1832 rte_igb_stats_strings[i].offset); 1833 } 1834 1835 return IGB_NB_XSTATS; 1836 } 1837 1838 static void 1839 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) 1840 { 1841 /* Good Rx packets, include VF loopback */ 1842 UPDATE_VF_STAT(E1000_VFGPRC, 1843 hw_stats->last_gprc, hw_stats->gprc); 1844 1845 /* Good Rx octets, include VF loopback */ 1846 UPDATE_VF_STAT(E1000_VFGORC, 1847 hw_stats->last_gorc, hw_stats->gorc); 1848 1849 /* Good Tx packets, include VF loopback */ 1850 UPDATE_VF_STAT(E1000_VFGPTC, 1851 hw_stats->last_gptc, hw_stats->gptc); 1852 1853 /* Good Tx octets, include VF loopback */ 1854 UPDATE_VF_STAT(E1000_VFGOTC, 1855 hw_stats->last_gotc, hw_stats->gotc); 1856 1857 /* Rx Multicst packets */ 1858 UPDATE_VF_STAT(E1000_VFMPRC, 1859 hw_stats->last_mprc, hw_stats->mprc); 1860 1861 /* Good Rx loopback packets */ 1862 UPDATE_VF_STAT(E1000_VFGPRLBC, 1863 hw_stats->last_gprlbc, hw_stats->gprlbc); 1864 1865 /* Good Rx loopback octets */ 1866 UPDATE_VF_STAT(E1000_VFGORLBC, 1867 hw_stats->last_gorlbc, hw_stats->gorlbc); 1868 1869 /* Good Tx loopback packets */ 1870 UPDATE_VF_STAT(E1000_VFGPTLBC, 1871 hw_stats->last_gptlbc, hw_stats->gptlbc); 1872 1873 /* Good Tx loopback octets */ 1874 UPDATE_VF_STAT(E1000_VFGOTLBC, 1875 hw_stats->last_gotlbc, hw_stats->gotlbc); 1876 } 1877 1878 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1879 struct rte_eth_xstat_name *xstats_names, 1880 __rte_unused unsigned limit) 1881 { 1882 unsigned i; 1883 1884 if (xstats_names != NULL) 1885 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 1886 snprintf(xstats_names[i].name, 1887 sizeof(xstats_names[i].name), "%s", 1888 rte_igbvf_stats_strings[i].name); 1889 } 1890 return IGBVF_NB_XSTATS; 1891 } 1892 1893 static int 1894 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1895 unsigned n) 1896 { 1897 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1898 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 1899 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1900 unsigned i; 1901 1902 if (n < IGBVF_NB_XSTATS) 1903 return IGBVF_NB_XSTATS; 1904 1905 igbvf_read_stats_registers(hw, hw_stats); 1906 1907 if (!xstats) 1908 return 0; 1909 1910 for (i = 0; i < IGBVF_NB_XSTATS; i++) { 1911 xstats[i].id = i; 1912 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1913 rte_igbvf_stats_strings[i].offset); 1914 } 1915 1916 return IGBVF_NB_XSTATS; 1917 } 1918 1919 static void 1920 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1921 { 1922 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1923 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *) 1924 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1925 1926 igbvf_read_stats_registers(hw, hw_stats); 1927 1928 if (rte_stats == NULL) 1929 return; 1930 1931 rte_stats->ipackets = hw_stats->gprc; 1932 rte_stats->ibytes = hw_stats->gorc; 1933 rte_stats->opackets = hw_stats->gptc; 1934 rte_stats->obytes = hw_stats->gotc; 1935 } 1936 1937 static void 1938 eth_igbvf_stats_reset(struct rte_eth_dev *dev) 1939 { 1940 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*) 1941 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1942 1943 /* Sync HW register to the last stats */ 1944 eth_igbvf_stats_get(dev, NULL); 1945 1946 /* reset HW current stats*/ 1947 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) - 1948 offsetof(struct e1000_vf_stats, gprc)); 1949 } 1950 1951 static void 1952 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1953 { 1954 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1955 1956 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1957 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 1958 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1959 dev_info->rx_offload_capa = 1960 DEV_RX_OFFLOAD_VLAN_STRIP | 1961 DEV_RX_OFFLOAD_IPV4_CKSUM | 1962 DEV_RX_OFFLOAD_UDP_CKSUM | 1963 DEV_RX_OFFLOAD_TCP_CKSUM; 1964 dev_info->tx_offload_capa = 1965 DEV_TX_OFFLOAD_VLAN_INSERT | 1966 DEV_TX_OFFLOAD_IPV4_CKSUM | 1967 DEV_TX_OFFLOAD_UDP_CKSUM | 1968 DEV_TX_OFFLOAD_TCP_CKSUM | 1969 DEV_TX_OFFLOAD_SCTP_CKSUM | 1970 DEV_TX_OFFLOAD_TCP_TSO; 1971 1972 switch (hw->mac.type) { 1973 case e1000_82575: 1974 dev_info->max_rx_queues = 4; 1975 dev_info->max_tx_queues = 4; 1976 dev_info->max_vmdq_pools = 0; 1977 break; 1978 1979 case e1000_82576: 1980 dev_info->max_rx_queues = 16; 1981 dev_info->max_tx_queues = 16; 1982 dev_info->max_vmdq_pools = ETH_8_POOLS; 1983 dev_info->vmdq_queue_num = 16; 1984 break; 1985 1986 case e1000_82580: 1987 dev_info->max_rx_queues = 8; 1988 dev_info->max_tx_queues = 8; 1989 dev_info->max_vmdq_pools = ETH_8_POOLS; 1990 dev_info->vmdq_queue_num = 8; 1991 break; 1992 1993 case e1000_i350: 1994 dev_info->max_rx_queues = 8; 1995 dev_info->max_tx_queues = 8; 1996 dev_info->max_vmdq_pools = ETH_8_POOLS; 1997 dev_info->vmdq_queue_num = 8; 1998 break; 1999 2000 case e1000_i354: 2001 dev_info->max_rx_queues = 8; 2002 dev_info->max_tx_queues = 8; 2003 break; 2004 2005 case e1000_i210: 2006 dev_info->max_rx_queues = 4; 2007 dev_info->max_tx_queues = 4; 2008 dev_info->max_vmdq_pools = 0; 2009 break; 2010 2011 case e1000_i211: 2012 dev_info->max_rx_queues = 2; 2013 dev_info->max_tx_queues = 2; 2014 dev_info->max_vmdq_pools = 0; 2015 break; 2016 2017 default: 2018 /* Should not happen */ 2019 break; 2020 } 2021 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t); 2022 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 2023 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL; 2024 2025 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2026 .rx_thresh = { 2027 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2028 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2029 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2030 }, 2031 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2032 .rx_drop_en = 0, 2033 }; 2034 2035 dev_info->default_txconf = (struct rte_eth_txconf) { 2036 .tx_thresh = { 2037 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2038 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2039 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2040 }, 2041 .txq_flags = 0, 2042 }; 2043 2044 dev_info->rx_desc_lim = rx_desc_lim; 2045 dev_info->tx_desc_lim = tx_desc_lim; 2046 2047 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 2048 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 2049 ETH_LINK_SPEED_1G; 2050 } 2051 2052 static const uint32_t * 2053 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev) 2054 { 2055 static const uint32_t ptypes[] = { 2056 /* refers to igb_rxd_pkt_info_to_pkt_type() */ 2057 RTE_PTYPE_L2_ETHER, 2058 RTE_PTYPE_L3_IPV4, 2059 RTE_PTYPE_L3_IPV4_EXT, 2060 RTE_PTYPE_L3_IPV6, 2061 RTE_PTYPE_L3_IPV6_EXT, 2062 RTE_PTYPE_L4_TCP, 2063 RTE_PTYPE_L4_UDP, 2064 RTE_PTYPE_L4_SCTP, 2065 RTE_PTYPE_TUNNEL_IP, 2066 RTE_PTYPE_INNER_L3_IPV6, 2067 RTE_PTYPE_INNER_L3_IPV6_EXT, 2068 RTE_PTYPE_INNER_L4_TCP, 2069 RTE_PTYPE_INNER_L4_UDP, 2070 RTE_PTYPE_UNKNOWN 2071 }; 2072 2073 if (dev->rx_pkt_burst == eth_igb_recv_pkts || 2074 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts) 2075 return ptypes; 2076 return NULL; 2077 } 2078 2079 static void 2080 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2081 { 2082 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2083 2084 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 2085 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ 2086 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 2087 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 2088 DEV_RX_OFFLOAD_IPV4_CKSUM | 2089 DEV_RX_OFFLOAD_UDP_CKSUM | 2090 DEV_RX_OFFLOAD_TCP_CKSUM; 2091 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 2092 DEV_TX_OFFLOAD_IPV4_CKSUM | 2093 DEV_TX_OFFLOAD_UDP_CKSUM | 2094 DEV_TX_OFFLOAD_TCP_CKSUM | 2095 DEV_TX_OFFLOAD_SCTP_CKSUM | 2096 DEV_TX_OFFLOAD_TCP_TSO; 2097 switch (hw->mac.type) { 2098 case e1000_vfadapt: 2099 dev_info->max_rx_queues = 2; 2100 dev_info->max_tx_queues = 2; 2101 break; 2102 case e1000_vfadapt_i350: 2103 dev_info->max_rx_queues = 1; 2104 dev_info->max_tx_queues = 1; 2105 break; 2106 default: 2107 /* Should not happen */ 2108 break; 2109 } 2110 2111 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2112 .rx_thresh = { 2113 .pthresh = IGB_DEFAULT_RX_PTHRESH, 2114 .hthresh = IGB_DEFAULT_RX_HTHRESH, 2115 .wthresh = IGB_DEFAULT_RX_WTHRESH, 2116 }, 2117 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, 2118 .rx_drop_en = 0, 2119 }; 2120 2121 dev_info->default_txconf = (struct rte_eth_txconf) { 2122 .tx_thresh = { 2123 .pthresh = IGB_DEFAULT_TX_PTHRESH, 2124 .hthresh = IGB_DEFAULT_TX_HTHRESH, 2125 .wthresh = IGB_DEFAULT_TX_WTHRESH, 2126 }, 2127 .txq_flags = 0, 2128 }; 2129 2130 dev_info->rx_desc_lim = rx_desc_lim; 2131 dev_info->tx_desc_lim = tx_desc_lim; 2132 } 2133 2134 /* return 0 means link status changed, -1 means not changed */ 2135 static int 2136 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2137 { 2138 struct e1000_hw *hw = 2139 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2140 struct rte_eth_link link, old; 2141 int link_check, count; 2142 2143 link_check = 0; 2144 hw->mac.get_link_status = 1; 2145 2146 /* possible wait-to-complete in up to 9 seconds */ 2147 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 2148 /* Read the real link status */ 2149 switch (hw->phy.media_type) { 2150 case e1000_media_type_copper: 2151 /* Do the work to read phy */ 2152 e1000_check_for_link(hw); 2153 link_check = !hw->mac.get_link_status; 2154 break; 2155 2156 case e1000_media_type_fiber: 2157 e1000_check_for_link(hw); 2158 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 2159 E1000_STATUS_LU); 2160 break; 2161 2162 case e1000_media_type_internal_serdes: 2163 e1000_check_for_link(hw); 2164 link_check = hw->mac.serdes_has_link; 2165 break; 2166 2167 /* VF device is type_unknown */ 2168 case e1000_media_type_unknown: 2169 eth_igbvf_link_update(hw); 2170 link_check = !hw->mac.get_link_status; 2171 break; 2172 2173 default: 2174 break; 2175 } 2176 if (link_check || wait_to_complete == 0) 2177 break; 2178 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); 2179 } 2180 memset(&link, 0, sizeof(link)); 2181 rte_igb_dev_atomic_read_link_status(dev, &link); 2182 old = link; 2183 2184 /* Now we check if a transition has happened */ 2185 if (link_check) { 2186 uint16_t duplex, speed; 2187 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 2188 link.link_duplex = (duplex == FULL_DUPLEX) ? 2189 ETH_LINK_FULL_DUPLEX : 2190 ETH_LINK_HALF_DUPLEX; 2191 link.link_speed = speed; 2192 link.link_status = ETH_LINK_UP; 2193 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 2194 ETH_LINK_SPEED_FIXED); 2195 } else if (!link_check) { 2196 link.link_speed = 0; 2197 link.link_duplex = ETH_LINK_HALF_DUPLEX; 2198 link.link_status = ETH_LINK_DOWN; 2199 link.link_autoneg = ETH_LINK_SPEED_FIXED; 2200 } 2201 rte_igb_dev_atomic_write_link_status(dev, &link); 2202 2203 /* not changed */ 2204 if (old.link_status == link.link_status) 2205 return -1; 2206 2207 /* changed */ 2208 return 0; 2209 } 2210 2211 /* 2212 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 2213 * For ASF and Pass Through versions of f/w this means 2214 * that the driver is loaded. 2215 */ 2216 static void 2217 igb_hw_control_acquire(struct e1000_hw *hw) 2218 { 2219 uint32_t ctrl_ext; 2220 2221 /* Let firmware know the driver has taken over */ 2222 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2223 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2224 } 2225 2226 /* 2227 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 2228 * For ASF and Pass Through versions of f/w this means that the 2229 * driver is no longer loaded. 2230 */ 2231 static void 2232 igb_hw_control_release(struct e1000_hw *hw) 2233 { 2234 uint32_t ctrl_ext; 2235 2236 /* Let firmware taken over control of h/w */ 2237 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 2238 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 2239 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2240 } 2241 2242 /* 2243 * Bit of a misnomer, what this really means is 2244 * to enable OS management of the system... aka 2245 * to disable special hardware management features. 2246 */ 2247 static void 2248 igb_init_manageability(struct e1000_hw *hw) 2249 { 2250 if (e1000_enable_mng_pass_thru(hw)) { 2251 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 2252 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2253 2254 /* disable hardware interception of ARP */ 2255 manc &= ~(E1000_MANC_ARP_EN); 2256 2257 /* enable receiving management packets to the host */ 2258 manc |= E1000_MANC_EN_MNG2HOST; 2259 manc2h |= 1 << 5; /* Mng Port 623 */ 2260 manc2h |= 1 << 6; /* Mng Port 664 */ 2261 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 2262 E1000_WRITE_REG(hw, E1000_MANC, manc); 2263 } 2264 } 2265 2266 static void 2267 igb_release_manageability(struct e1000_hw *hw) 2268 { 2269 if (e1000_enable_mng_pass_thru(hw)) { 2270 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 2271 2272 manc |= E1000_MANC_ARP_EN; 2273 manc &= ~E1000_MANC_EN_MNG2HOST; 2274 2275 E1000_WRITE_REG(hw, E1000_MANC, manc); 2276 } 2277 } 2278 2279 static void 2280 eth_igb_promiscuous_enable(struct rte_eth_dev *dev) 2281 { 2282 struct e1000_hw *hw = 2283 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2284 uint32_t rctl; 2285 2286 rctl = E1000_READ_REG(hw, E1000_RCTL); 2287 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2288 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2289 } 2290 2291 static void 2292 eth_igb_promiscuous_disable(struct rte_eth_dev *dev) 2293 { 2294 struct e1000_hw *hw = 2295 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2296 uint32_t rctl; 2297 2298 rctl = E1000_READ_REG(hw, E1000_RCTL); 2299 rctl &= (~E1000_RCTL_UPE); 2300 if (dev->data->all_multicast == 1) 2301 rctl |= E1000_RCTL_MPE; 2302 else 2303 rctl &= (~E1000_RCTL_MPE); 2304 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2305 } 2306 2307 static void 2308 eth_igb_allmulticast_enable(struct rte_eth_dev *dev) 2309 { 2310 struct e1000_hw *hw = 2311 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2312 uint32_t rctl; 2313 2314 rctl = E1000_READ_REG(hw, E1000_RCTL); 2315 rctl |= E1000_RCTL_MPE; 2316 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2317 } 2318 2319 static void 2320 eth_igb_allmulticast_disable(struct rte_eth_dev *dev) 2321 { 2322 struct e1000_hw *hw = 2323 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2324 uint32_t rctl; 2325 2326 if (dev->data->promiscuous == 1) 2327 return; /* must remain in all_multicast mode */ 2328 rctl = E1000_READ_REG(hw, E1000_RCTL); 2329 rctl &= (~E1000_RCTL_MPE); 2330 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2331 } 2332 2333 static int 2334 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2335 { 2336 struct e1000_hw *hw = 2337 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2338 struct e1000_vfta * shadow_vfta = 2339 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2340 uint32_t vfta; 2341 uint32_t vid_idx; 2342 uint32_t vid_bit; 2343 2344 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 2345 E1000_VFTA_ENTRY_MASK); 2346 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 2347 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 2348 if (on) 2349 vfta |= vid_bit; 2350 else 2351 vfta &= ~vid_bit; 2352 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 2353 2354 /* update local VFTA copy */ 2355 shadow_vfta->vfta[vid_idx] = vfta; 2356 2357 return 0; 2358 } 2359 2360 static int 2361 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, 2362 enum rte_vlan_type vlan_type, 2363 uint16_t tpid) 2364 { 2365 struct e1000_hw *hw = 2366 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2367 uint32_t reg, qinq; 2368 2369 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); 2370 qinq &= E1000_CTRL_EXT_EXT_VLAN; 2371 2372 /* only outer TPID of double VLAN can be configured*/ 2373 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { 2374 reg = E1000_READ_REG(hw, E1000_VET); 2375 reg = (reg & (~E1000_VET_VET_EXT)) | 2376 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); 2377 E1000_WRITE_REG(hw, E1000_VET, reg); 2378 2379 return 0; 2380 } 2381 2382 /* all other TPID values are read-only*/ 2383 PMD_DRV_LOG(ERR, "Not supported"); 2384 2385 return -ENOTSUP; 2386 } 2387 2388 static void 2389 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2390 { 2391 struct e1000_hw *hw = 2392 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2393 uint32_t reg; 2394 2395 /* Filter Table Disable */ 2396 reg = E1000_READ_REG(hw, E1000_RCTL); 2397 reg &= ~E1000_RCTL_CFIEN; 2398 reg &= ~E1000_RCTL_VFE; 2399 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2400 } 2401 2402 static void 2403 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2404 { 2405 struct e1000_hw *hw = 2406 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2407 struct e1000_vfta * shadow_vfta = 2408 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 2409 uint32_t reg; 2410 int i; 2411 2412 /* Filter Table Enable, CFI not used for packet acceptance */ 2413 reg = E1000_READ_REG(hw, E1000_RCTL); 2414 reg &= ~E1000_RCTL_CFIEN; 2415 reg |= E1000_RCTL_VFE; 2416 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2417 2418 /* restore VFTA table */ 2419 for (i = 0; i < IGB_VFTA_SIZE; i++) 2420 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 2421 } 2422 2423 static void 2424 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2425 { 2426 struct e1000_hw *hw = 2427 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2428 uint32_t reg; 2429 2430 /* VLAN Mode Disable */ 2431 reg = E1000_READ_REG(hw, E1000_CTRL); 2432 reg &= ~E1000_CTRL_VME; 2433 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2434 } 2435 2436 static void 2437 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2438 { 2439 struct e1000_hw *hw = 2440 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2441 uint32_t reg; 2442 2443 /* VLAN Mode Enable */ 2444 reg = E1000_READ_REG(hw, E1000_CTRL); 2445 reg |= E1000_CTRL_VME; 2446 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2447 } 2448 2449 static void 2450 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2451 { 2452 struct e1000_hw *hw = 2453 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2454 uint32_t reg; 2455 2456 /* CTRL_EXT: Extended VLAN */ 2457 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2458 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; 2459 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2460 2461 /* Update maximum packet length */ 2462 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) 2463 E1000_WRITE_REG(hw, E1000_RLPML, 2464 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2465 VLAN_TAG_SIZE); 2466 } 2467 2468 static void 2469 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2470 { 2471 struct e1000_hw *hw = 2472 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2473 uint32_t reg; 2474 2475 /* CTRL_EXT: Extended VLAN */ 2476 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 2477 reg |= E1000_CTRL_EXT_EXTEND_VLAN; 2478 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 2479 2480 /* Update maximum packet length */ 2481 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) 2482 E1000_WRITE_REG(hw, E1000_RLPML, 2483 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2484 2 * VLAN_TAG_SIZE); 2485 } 2486 2487 static void 2488 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2489 { 2490 if(mask & ETH_VLAN_STRIP_MASK){ 2491 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 2492 igb_vlan_hw_strip_enable(dev); 2493 else 2494 igb_vlan_hw_strip_disable(dev); 2495 } 2496 2497 if(mask & ETH_VLAN_FILTER_MASK){ 2498 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 2499 igb_vlan_hw_filter_enable(dev); 2500 else 2501 igb_vlan_hw_filter_disable(dev); 2502 } 2503 2504 if(mask & ETH_VLAN_EXTEND_MASK){ 2505 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 2506 igb_vlan_hw_extend_enable(dev); 2507 else 2508 igb_vlan_hw_extend_disable(dev); 2509 } 2510 } 2511 2512 2513 /** 2514 * It enables the interrupt mask and then enable the interrupt. 2515 * 2516 * @param dev 2517 * Pointer to struct rte_eth_dev. 2518 * 2519 * @return 2520 * - On success, zero. 2521 * - On failure, a negative value. 2522 */ 2523 static int 2524 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev) 2525 { 2526 struct e1000_interrupt *intr = 2527 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2528 2529 intr->mask |= E1000_ICR_LSC; 2530 2531 return 0; 2532 } 2533 2534 /* It clears the interrupt causes and enables the interrupt. 2535 * It will be called once only during nic initialized. 2536 * 2537 * @param dev 2538 * Pointer to struct rte_eth_dev. 2539 * 2540 * @return 2541 * - On success, zero. 2542 * - On failure, a negative value. 2543 */ 2544 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev) 2545 { 2546 uint32_t mask, regval; 2547 struct e1000_hw *hw = 2548 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2549 struct rte_eth_dev_info dev_info; 2550 2551 memset(&dev_info, 0, sizeof(dev_info)); 2552 eth_igb_infos_get(dev, &dev_info); 2553 2554 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues); 2555 regval = E1000_READ_REG(hw, E1000_EIMS); 2556 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 2557 2558 return 0; 2559 } 2560 2561 /* 2562 * It reads ICR and gets interrupt causes, check it and set a bit flag 2563 * to update link status. 2564 * 2565 * @param dev 2566 * Pointer to struct rte_eth_dev. 2567 * 2568 * @return 2569 * - On success, zero. 2570 * - On failure, a negative value. 2571 */ 2572 static int 2573 eth_igb_interrupt_get_status(struct rte_eth_dev *dev) 2574 { 2575 uint32_t icr; 2576 struct e1000_hw *hw = 2577 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2578 struct e1000_interrupt *intr = 2579 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2580 2581 igb_intr_disable(hw); 2582 2583 /* read-on-clear nic registers here */ 2584 icr = E1000_READ_REG(hw, E1000_ICR); 2585 2586 intr->flags = 0; 2587 if (icr & E1000_ICR_LSC) { 2588 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 2589 } 2590 2591 if (icr & E1000_ICR_VMMB) 2592 intr->flags |= E1000_FLAG_MAILBOX; 2593 2594 return 0; 2595 } 2596 2597 /* 2598 * It executes link_update after knowing an interrupt is prsent. 2599 * 2600 * @param dev 2601 * Pointer to struct rte_eth_dev. 2602 * 2603 * @return 2604 * - On success, zero. 2605 * - On failure, a negative value. 2606 */ 2607 static int 2608 eth_igb_interrupt_action(struct rte_eth_dev *dev) 2609 { 2610 struct e1000_hw *hw = 2611 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2612 struct e1000_interrupt *intr = 2613 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2614 uint32_t tctl, rctl; 2615 struct rte_eth_link link; 2616 int ret; 2617 2618 if (intr->flags & E1000_FLAG_MAILBOX) { 2619 igb_pf_mbx_process(dev); 2620 intr->flags &= ~E1000_FLAG_MAILBOX; 2621 } 2622 2623 igb_intr_enable(dev); 2624 rte_intr_enable(&(dev->pci_dev->intr_handle)); 2625 2626 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { 2627 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 2628 2629 /* set get_link_status to check register later */ 2630 hw->mac.get_link_status = 1; 2631 ret = eth_igb_link_update(dev, 0); 2632 2633 /* check if link has changed */ 2634 if (ret < 0) 2635 return 0; 2636 2637 memset(&link, 0, sizeof(link)); 2638 rte_igb_dev_atomic_read_link_status(dev, &link); 2639 if (link.link_status) { 2640 PMD_INIT_LOG(INFO, 2641 " Port %d: Link Up - speed %u Mbps - %s", 2642 dev->data->port_id, 2643 (unsigned)link.link_speed, 2644 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 2645 "full-duplex" : "half-duplex"); 2646 } else { 2647 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2648 dev->data->port_id); 2649 } 2650 2651 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 2652 dev->pci_dev->addr.domain, 2653 dev->pci_dev->addr.bus, 2654 dev->pci_dev->addr.devid, 2655 dev->pci_dev->addr.function); 2656 tctl = E1000_READ_REG(hw, E1000_TCTL); 2657 rctl = E1000_READ_REG(hw, E1000_RCTL); 2658 if (link.link_status) { 2659 /* enable Tx/Rx */ 2660 tctl |= E1000_TCTL_EN; 2661 rctl |= E1000_RCTL_EN; 2662 } else { 2663 /* disable Tx/Rx */ 2664 tctl &= ~E1000_TCTL_EN; 2665 rctl &= ~E1000_RCTL_EN; 2666 } 2667 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 2668 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2669 E1000_WRITE_FLUSH(hw); 2670 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); 2671 } 2672 2673 return 0; 2674 } 2675 2676 /** 2677 * Interrupt handler which shall be registered at first. 2678 * 2679 * @param handle 2680 * Pointer to interrupt handle. 2681 * @param param 2682 * The address of parameter (struct rte_eth_dev *) regsitered before. 2683 * 2684 * @return 2685 * void 2686 */ 2687 static void 2688 eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 2689 void *param) 2690 { 2691 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2692 2693 eth_igb_interrupt_get_status(dev); 2694 eth_igb_interrupt_action(dev); 2695 } 2696 2697 static int 2698 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) 2699 { 2700 uint32_t eicr; 2701 struct e1000_hw *hw = 2702 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2703 struct e1000_interrupt *intr = 2704 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2705 2706 igbvf_intr_disable(hw); 2707 2708 /* read-on-clear nic registers here */ 2709 eicr = E1000_READ_REG(hw, E1000_EICR); 2710 intr->flags = 0; 2711 2712 if (eicr == E1000_VTIVAR_MISC_MAILBOX) 2713 intr->flags |= E1000_FLAG_MAILBOX; 2714 2715 return 0; 2716 } 2717 2718 void igbvf_mbx_process(struct rte_eth_dev *dev) 2719 { 2720 struct e1000_hw *hw = 2721 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2722 struct e1000_mbx_info *mbx = &hw->mbx; 2723 u32 in_msg = 0; 2724 2725 if (mbx->ops.read(hw, &in_msg, 1, 0)) 2726 return; 2727 2728 /* PF reset VF event */ 2729 if (in_msg == E1000_PF_CONTROL_MSG) 2730 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); 2731 } 2732 2733 static int 2734 eth_igbvf_interrupt_action(struct rte_eth_dev *dev) 2735 { 2736 struct e1000_interrupt *intr = 2737 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2738 2739 if (intr->flags & E1000_FLAG_MAILBOX) { 2740 igbvf_mbx_process(dev); 2741 intr->flags &= ~E1000_FLAG_MAILBOX; 2742 } 2743 2744 igbvf_intr_enable(dev); 2745 rte_intr_enable(&dev->pci_dev->intr_handle); 2746 2747 return 0; 2748 } 2749 2750 static void 2751 eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 2752 void *param) 2753 { 2754 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2755 2756 eth_igbvf_interrupt_get_status(dev); 2757 eth_igbvf_interrupt_action(dev); 2758 } 2759 2760 static int 2761 eth_igb_led_on(struct rte_eth_dev *dev) 2762 { 2763 struct e1000_hw *hw; 2764 2765 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2766 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 2767 } 2768 2769 static int 2770 eth_igb_led_off(struct rte_eth_dev *dev) 2771 { 2772 struct e1000_hw *hw; 2773 2774 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2775 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 2776 } 2777 2778 static int 2779 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2780 { 2781 struct e1000_hw *hw; 2782 uint32_t ctrl; 2783 int tx_pause; 2784 int rx_pause; 2785 2786 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2787 fc_conf->pause_time = hw->fc.pause_time; 2788 fc_conf->high_water = hw->fc.high_water; 2789 fc_conf->low_water = hw->fc.low_water; 2790 fc_conf->send_xon = hw->fc.send_xon; 2791 fc_conf->autoneg = hw->mac.autoneg; 2792 2793 /* 2794 * Return rx_pause and tx_pause status according to actual setting of 2795 * the TFCE and RFCE bits in the CTRL register. 2796 */ 2797 ctrl = E1000_READ_REG(hw, E1000_CTRL); 2798 if (ctrl & E1000_CTRL_TFCE) 2799 tx_pause = 1; 2800 else 2801 tx_pause = 0; 2802 2803 if (ctrl & E1000_CTRL_RFCE) 2804 rx_pause = 1; 2805 else 2806 rx_pause = 0; 2807 2808 if (rx_pause && tx_pause) 2809 fc_conf->mode = RTE_FC_FULL; 2810 else if (rx_pause) 2811 fc_conf->mode = RTE_FC_RX_PAUSE; 2812 else if (tx_pause) 2813 fc_conf->mode = RTE_FC_TX_PAUSE; 2814 else 2815 fc_conf->mode = RTE_FC_NONE; 2816 2817 return 0; 2818 } 2819 2820 static int 2821 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2822 { 2823 struct e1000_hw *hw; 2824 int err; 2825 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 2826 e1000_fc_none, 2827 e1000_fc_rx_pause, 2828 e1000_fc_tx_pause, 2829 e1000_fc_full 2830 }; 2831 uint32_t rx_buf_size; 2832 uint32_t max_high_water; 2833 uint32_t rctl; 2834 2835 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2836 if (fc_conf->autoneg != hw->mac.autoneg) 2837 return -ENOTSUP; 2838 rx_buf_size = igb_get_rx_buffer_size(hw); 2839 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2840 2841 /* At least reserve one Ethernet frame for watermark */ 2842 max_high_water = rx_buf_size - ETHER_MAX_LEN; 2843 if ((fc_conf->high_water > max_high_water) || 2844 (fc_conf->high_water < fc_conf->low_water)) { 2845 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 2846 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 2847 return -EINVAL; 2848 } 2849 2850 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 2851 hw->fc.pause_time = fc_conf->pause_time; 2852 hw->fc.high_water = fc_conf->high_water; 2853 hw->fc.low_water = fc_conf->low_water; 2854 hw->fc.send_xon = fc_conf->send_xon; 2855 2856 err = e1000_setup_link_generic(hw); 2857 if (err == E1000_SUCCESS) { 2858 2859 /* check if we want to forward MAC frames - driver doesn't have native 2860 * capability to do that, so we'll write the registers ourselves */ 2861 2862 rctl = E1000_READ_REG(hw, E1000_RCTL); 2863 2864 /* set or clear MFLCN.PMCF bit depending on configuration */ 2865 if (fc_conf->mac_ctrl_frame_fwd != 0) 2866 rctl |= E1000_RCTL_PMCF; 2867 else 2868 rctl &= ~E1000_RCTL_PMCF; 2869 2870 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2871 E1000_WRITE_FLUSH(hw); 2872 2873 return 0; 2874 } 2875 2876 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 2877 return -EIO; 2878 } 2879 2880 #define E1000_RAH_POOLSEL_SHIFT (18) 2881 static void 2882 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 2883 uint32_t index, __rte_unused uint32_t pool) 2884 { 2885 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2886 uint32_t rah; 2887 2888 e1000_rar_set(hw, mac_addr->addr_bytes, index); 2889 rah = E1000_READ_REG(hw, E1000_RAH(index)); 2890 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); 2891 E1000_WRITE_REG(hw, E1000_RAH(index), rah); 2892 } 2893 2894 static void 2895 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) 2896 { 2897 uint8_t addr[ETHER_ADDR_LEN]; 2898 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2899 2900 memset(addr, 0, sizeof(addr)); 2901 2902 e1000_rar_set(hw, addr, index); 2903 } 2904 2905 static void 2906 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, 2907 struct ether_addr *addr) 2908 { 2909 eth_igb_rar_clear(dev, 0); 2910 2911 eth_igb_rar_set(dev, (void *)addr, 0, 0); 2912 } 2913 /* 2914 * Virtual Function operations 2915 */ 2916 static void 2917 igbvf_intr_disable(struct e1000_hw *hw) 2918 { 2919 PMD_INIT_FUNC_TRACE(); 2920 2921 /* Clear interrupt mask to stop from interrupts being generated */ 2922 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); 2923 2924 E1000_WRITE_FLUSH(hw); 2925 } 2926 2927 static void 2928 igbvf_stop_adapter(struct rte_eth_dev *dev) 2929 { 2930 u32 reg_val; 2931 u16 i; 2932 struct rte_eth_dev_info dev_info; 2933 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2934 2935 memset(&dev_info, 0, sizeof(dev_info)); 2936 eth_igbvf_infos_get(dev, &dev_info); 2937 2938 /* Clear interrupt mask to stop from interrupts being generated */ 2939 igbvf_intr_disable(hw); 2940 2941 /* Clear any pending interrupts, flush previous writes */ 2942 E1000_READ_REG(hw, E1000_EICR); 2943 2944 /* Disable the transmit unit. Each queue must be disabled. */ 2945 for (i = 0; i < dev_info.max_tx_queues; i++) 2946 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH); 2947 2948 /* Disable the receive unit by stopping each queue */ 2949 for (i = 0; i < dev_info.max_rx_queues; i++) { 2950 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2951 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE; 2952 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val); 2953 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE) 2954 ; 2955 } 2956 2957 /* flush all queues disables */ 2958 E1000_WRITE_FLUSH(hw); 2959 msec_delay(2); 2960 } 2961 2962 static int eth_igbvf_link_update(struct e1000_hw *hw) 2963 { 2964 struct e1000_mbx_info *mbx = &hw->mbx; 2965 struct e1000_mac_info *mac = &hw->mac; 2966 int ret_val = E1000_SUCCESS; 2967 2968 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf"); 2969 2970 /* 2971 * We only want to run this if there has been a rst asserted. 2972 * in this case that could mean a link change, device reset, 2973 * or a virtual function reset 2974 */ 2975 2976 /* If we were hit with a reset or timeout drop the link */ 2977 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout) 2978 mac->get_link_status = TRUE; 2979 2980 if (!mac->get_link_status) 2981 goto out; 2982 2983 /* if link status is down no point in checking to see if pf is up */ 2984 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) 2985 goto out; 2986 2987 /* if we passed all the tests above then the link is up and we no 2988 * longer need to check for link */ 2989 mac->get_link_status = FALSE; 2990 2991 out: 2992 return ret_val; 2993 } 2994 2995 2996 static int 2997 igbvf_dev_configure(struct rte_eth_dev *dev) 2998 { 2999 struct rte_eth_conf* conf = &dev->data->dev_conf; 3000 3001 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 3002 dev->data->port_id); 3003 3004 /* 3005 * VF has no ability to enable/disable HW CRC 3006 * Keep the persistent behavior the same as Host PF 3007 */ 3008 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC 3009 if (!conf->rxmode.hw_strip_crc) { 3010 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 3011 conf->rxmode.hw_strip_crc = 1; 3012 } 3013 #else 3014 if (conf->rxmode.hw_strip_crc) { 3015 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 3016 conf->rxmode.hw_strip_crc = 0; 3017 } 3018 #endif 3019 3020 return 0; 3021 } 3022 3023 static int 3024 igbvf_dev_start(struct rte_eth_dev *dev) 3025 { 3026 struct e1000_hw *hw = 3027 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3028 struct e1000_adapter *adapter = 3029 E1000_DEV_PRIVATE(dev->data->dev_private); 3030 int ret; 3031 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 3032 uint32_t intr_vector = 0; 3033 3034 PMD_INIT_FUNC_TRACE(); 3035 3036 hw->mac.ops.reset_hw(hw); 3037 adapter->stopped = 0; 3038 3039 /* Set all vfta */ 3040 igbvf_set_vfta_all(dev,1); 3041 3042 eth_igbvf_tx_init(dev); 3043 3044 /* This can fail when allocating mbufs for descriptor rings */ 3045 ret = eth_igbvf_rx_init(dev); 3046 if (ret) { 3047 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 3048 igb_dev_clear_queues(dev); 3049 return ret; 3050 } 3051 3052 /* check and configure queue intr-vector mapping */ 3053 if (dev->data->dev_conf.intr_conf.rxq != 0) { 3054 intr_vector = dev->data->nb_rx_queues; 3055 ret = rte_intr_efd_enable(intr_handle, intr_vector); 3056 if (ret) 3057 return ret; 3058 } 3059 3060 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 3061 intr_handle->intr_vec = 3062 rte_zmalloc("intr_vec", 3063 dev->data->nb_rx_queues * sizeof(int), 0); 3064 if (!intr_handle->intr_vec) { 3065 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 3066 " intr_vec\n", dev->data->nb_rx_queues); 3067 return -ENOMEM; 3068 } 3069 } 3070 3071 eth_igbvf_configure_msix_intr(dev); 3072 3073 /* enable uio/vfio intr/eventfd mapping */ 3074 rte_intr_enable(intr_handle); 3075 3076 /* resume enabled intr since hw reset */ 3077 igbvf_intr_enable(dev); 3078 3079 return 0; 3080 } 3081 3082 static void 3083 igbvf_dev_stop(struct rte_eth_dev *dev) 3084 { 3085 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 3086 3087 PMD_INIT_FUNC_TRACE(); 3088 3089 igbvf_stop_adapter(dev); 3090 3091 /* 3092 * Clear what we set, but we still keep shadow_vfta to 3093 * restore after device starts 3094 */ 3095 igbvf_set_vfta_all(dev,0); 3096 3097 igb_dev_clear_queues(dev); 3098 3099 /* disable intr eventfd mapping */ 3100 rte_intr_disable(intr_handle); 3101 3102 /* Clean datapath event and queue/vec mapping */ 3103 rte_intr_efd_disable(intr_handle); 3104 if (intr_handle->intr_vec) { 3105 rte_free(intr_handle->intr_vec); 3106 intr_handle->intr_vec = NULL; 3107 } 3108 } 3109 3110 static void 3111 igbvf_dev_close(struct rte_eth_dev *dev) 3112 { 3113 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3114 struct e1000_adapter *adapter = 3115 E1000_DEV_PRIVATE(dev->data->dev_private); 3116 struct ether_addr addr; 3117 3118 PMD_INIT_FUNC_TRACE(); 3119 3120 e1000_reset_hw(hw); 3121 3122 igbvf_dev_stop(dev); 3123 adapter->stopped = 1; 3124 igb_dev_free_queues(dev); 3125 3126 /** 3127 * reprogram the RAR with a zero mac address, 3128 * to ensure that the VF traffic goes to the PF 3129 * after stop, close and detach of the VF. 3130 **/ 3131 3132 memset(&addr, 0, sizeof(addr)); 3133 igbvf_default_mac_addr_set(dev, &addr); 3134 } 3135 3136 static void 3137 igbvf_promiscuous_enable(struct rte_eth_dev *dev) 3138 { 3139 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3140 3141 /* Set both unicast and multicast promisc */ 3142 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 3143 } 3144 3145 static void 3146 igbvf_promiscuous_disable(struct rte_eth_dev *dev) 3147 { 3148 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3149 3150 /* If in allmulticast mode leave multicast promisc */ 3151 if (dev->data->all_multicast == 1) 3152 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3153 else 3154 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3155 } 3156 3157 static void 3158 igbvf_allmulticast_enable(struct rte_eth_dev *dev) 3159 { 3160 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3161 3162 /* In promiscuous mode multicast promisc already set */ 3163 if (dev->data->promiscuous == 0) 3164 e1000_promisc_set_vf(hw, e1000_promisc_multicast); 3165 } 3166 3167 static void 3168 igbvf_allmulticast_disable(struct rte_eth_dev *dev) 3169 { 3170 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3171 3172 /* In promiscuous mode leave multicast promisc enabled */ 3173 if (dev->data->promiscuous == 0) 3174 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 3175 } 3176 3177 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) 3178 { 3179 struct e1000_mbx_info *mbx = &hw->mbx; 3180 uint32_t msgbuf[2]; 3181 s32 err; 3182 3183 /* After set vlan, vlan strip will also be enabled in igb driver*/ 3184 msgbuf[0] = E1000_VF_SET_VLAN; 3185 msgbuf[1] = vid; 3186 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 3187 if (on) 3188 msgbuf[0] |= E1000_VF_SET_VLAN_ADD; 3189 3190 err = mbx->ops.write_posted(hw, msgbuf, 2, 0); 3191 if (err) 3192 goto mbx_err; 3193 3194 err = mbx->ops.read_posted(hw, msgbuf, 2, 0); 3195 if (err) 3196 goto mbx_err; 3197 3198 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 3199 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)) 3200 err = -EINVAL; 3201 3202 mbx_err: 3203 return err; 3204 } 3205 3206 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) 3207 { 3208 struct e1000_hw *hw = 3209 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3210 struct e1000_vfta * shadow_vfta = 3211 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3212 int i = 0, j = 0, vfta = 0, mask = 1; 3213 3214 for (i = 0; i < IGB_VFTA_SIZE; i++){ 3215 vfta = shadow_vfta->vfta[i]; 3216 if(vfta){ 3217 mask = 1; 3218 for (j = 0; j < 32; j++){ 3219 if(vfta & mask) 3220 igbvf_set_vfta(hw, 3221 (uint16_t)((i<<5)+j), on); 3222 mask<<=1; 3223 } 3224 } 3225 } 3226 3227 } 3228 3229 static int 3230 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 3231 { 3232 struct e1000_hw *hw = 3233 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3234 struct e1000_vfta * shadow_vfta = 3235 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 3236 uint32_t vid_idx = 0; 3237 uint32_t vid_bit = 0; 3238 int ret = 0; 3239 3240 PMD_INIT_FUNC_TRACE(); 3241 3242 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ 3243 ret = igbvf_set_vfta(hw, vlan_id, !!on); 3244 if(ret){ 3245 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 3246 return ret; 3247 } 3248 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 3249 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 3250 3251 /*Save what we set and retore it after device reset*/ 3252 if (on) 3253 shadow_vfta->vfta[vid_idx] |= vid_bit; 3254 else 3255 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 3256 3257 return 0; 3258 } 3259 3260 static void 3261 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) 3262 { 3263 struct e1000_hw *hw = 3264 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3265 3266 /* index is not used by rar_set() */ 3267 hw->mac.ops.rar_set(hw, (void *)addr, 0); 3268 } 3269 3270 3271 static int 3272 eth_igb_rss_reta_update(struct rte_eth_dev *dev, 3273 struct rte_eth_rss_reta_entry64 *reta_conf, 3274 uint16_t reta_size) 3275 { 3276 uint8_t i, j, mask; 3277 uint32_t reta, r; 3278 uint16_t idx, shift; 3279 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3280 3281 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3282 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3283 "(%d) doesn't match the number hardware can supported " 3284 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); 3285 return -EINVAL; 3286 } 3287 3288 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3289 idx = i / RTE_RETA_GROUP_SIZE; 3290 shift = i % RTE_RETA_GROUP_SIZE; 3291 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3292 IGB_4_BIT_MASK); 3293 if (!mask) 3294 continue; 3295 if (mask == IGB_4_BIT_MASK) 3296 r = 0; 3297 else 3298 r = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3299 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) { 3300 if (mask & (0x1 << j)) 3301 reta |= reta_conf[idx].reta[shift + j] << 3302 (CHAR_BIT * j); 3303 else 3304 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j)); 3305 } 3306 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta); 3307 } 3308 3309 return 0; 3310 } 3311 3312 static int 3313 eth_igb_rss_reta_query(struct rte_eth_dev *dev, 3314 struct rte_eth_rss_reta_entry64 *reta_conf, 3315 uint16_t reta_size) 3316 { 3317 uint8_t i, j, mask; 3318 uint32_t reta; 3319 uint16_t idx, shift; 3320 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3321 3322 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3323 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3324 "(%d) doesn't match the number hardware can supported " 3325 "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); 3326 return -EINVAL; 3327 } 3328 3329 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) { 3330 idx = i / RTE_RETA_GROUP_SIZE; 3331 shift = i % RTE_RETA_GROUP_SIZE; 3332 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3333 IGB_4_BIT_MASK); 3334 if (!mask) 3335 continue; 3336 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2)); 3337 for (j = 0; j < IGB_4_BIT_WIDTH; j++) { 3338 if (mask & (0x1 << j)) 3339 reta_conf[idx].reta[shift + j] = 3340 ((reta >> (CHAR_BIT * j)) & 3341 IGB_8_BIT_MASK); 3342 } 3343 } 3344 3345 return 0; 3346 } 3347 3348 #define MAC_TYPE_FILTER_SUP(type) do {\ 3349 if ((type) != e1000_82580 && (type) != e1000_i350 &&\ 3350 (type) != e1000_82576)\ 3351 return -ENOTSUP;\ 3352 } while (0) 3353 3354 static int 3355 eth_igb_syn_filter_set(struct rte_eth_dev *dev, 3356 struct rte_eth_syn_filter *filter, 3357 bool add) 3358 { 3359 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3360 uint32_t synqf, rfctl; 3361 3362 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3363 return -EINVAL; 3364 3365 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3366 3367 if (add) { 3368 if (synqf & E1000_SYN_FILTER_ENABLE) 3369 return -EINVAL; 3370 3371 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) & 3372 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE); 3373 3374 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3375 if (filter->hig_pri) 3376 rfctl |= E1000_RFCTL_SYNQFP; 3377 else 3378 rfctl &= ~E1000_RFCTL_SYNQFP; 3379 3380 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); 3381 } else { 3382 if (!(synqf & E1000_SYN_FILTER_ENABLE)) 3383 return -ENOENT; 3384 synqf = 0; 3385 } 3386 3387 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); 3388 E1000_WRITE_FLUSH(hw); 3389 return 0; 3390 } 3391 3392 static int 3393 eth_igb_syn_filter_get(struct rte_eth_dev *dev, 3394 struct rte_eth_syn_filter *filter) 3395 { 3396 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3397 uint32_t synqf, rfctl; 3398 3399 synqf = E1000_READ_REG(hw, E1000_SYNQF(0)); 3400 if (synqf & E1000_SYN_FILTER_ENABLE) { 3401 rfctl = E1000_READ_REG(hw, E1000_RFCTL); 3402 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0; 3403 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >> 3404 E1000_SYN_FILTER_QUEUE_SHIFT); 3405 return 0; 3406 } 3407 3408 return -ENOENT; 3409 } 3410 3411 static int 3412 eth_igb_syn_filter_handle(struct rte_eth_dev *dev, 3413 enum rte_filter_op filter_op, 3414 void *arg) 3415 { 3416 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3417 int ret; 3418 3419 MAC_TYPE_FILTER_SUP(hw->mac.type); 3420 3421 if (filter_op == RTE_ETH_FILTER_NOP) 3422 return 0; 3423 3424 if (arg == NULL) { 3425 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 3426 filter_op); 3427 return -EINVAL; 3428 } 3429 3430 switch (filter_op) { 3431 case RTE_ETH_FILTER_ADD: 3432 ret = eth_igb_syn_filter_set(dev, 3433 (struct rte_eth_syn_filter *)arg, 3434 TRUE); 3435 break; 3436 case RTE_ETH_FILTER_DELETE: 3437 ret = eth_igb_syn_filter_set(dev, 3438 (struct rte_eth_syn_filter *)arg, 3439 FALSE); 3440 break; 3441 case RTE_ETH_FILTER_GET: 3442 ret = eth_igb_syn_filter_get(dev, 3443 (struct rte_eth_syn_filter *)arg); 3444 break; 3445 default: 3446 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); 3447 ret = -EINVAL; 3448 break; 3449 } 3450 3451 return ret; 3452 } 3453 3454 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ 3455 if ((type) != e1000_82580 && (type) != e1000_i350)\ 3456 return -ENOSYS; \ 3457 } while (0) 3458 3459 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/ 3460 static inline int 3461 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter, 3462 struct e1000_2tuple_filter_info *filter_info) 3463 { 3464 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) 3465 return -EINVAL; 3466 if (filter->priority > E1000_2TUPLE_MAX_PRI) 3467 return -EINVAL; /* filter index is out of range. */ 3468 if (filter->tcp_flags > TCP_FLAG_ALL) 3469 return -EINVAL; /* flags is invalid. */ 3470 3471 switch (filter->dst_port_mask) { 3472 case UINT16_MAX: 3473 filter_info->dst_port_mask = 0; 3474 filter_info->dst_port = filter->dst_port; 3475 break; 3476 case 0: 3477 filter_info->dst_port_mask = 1; 3478 break; 3479 default: 3480 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3481 return -EINVAL; 3482 } 3483 3484 switch (filter->proto_mask) { 3485 case UINT8_MAX: 3486 filter_info->proto_mask = 0; 3487 filter_info->proto = filter->proto; 3488 break; 3489 case 0: 3490 filter_info->proto_mask = 1; 3491 break; 3492 default: 3493 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3494 return -EINVAL; 3495 } 3496 3497 filter_info->priority = (uint8_t)filter->priority; 3498 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 3499 filter_info->tcp_flags = filter->tcp_flags; 3500 else 3501 filter_info->tcp_flags = 0; 3502 3503 return 0; 3504 } 3505 3506 static inline struct e1000_2tuple_filter * 3507 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, 3508 struct e1000_2tuple_filter_info *key) 3509 { 3510 struct e1000_2tuple_filter *it; 3511 3512 TAILQ_FOREACH(it, filter_list, entries) { 3513 if (memcmp(key, &it->filter_info, 3514 sizeof(struct e1000_2tuple_filter_info)) == 0) { 3515 return it; 3516 } 3517 } 3518 return NULL; 3519 } 3520 3521 /* 3522 * igb_add_2tuple_filter - add a 2tuple filter 3523 * 3524 * @param 3525 * dev: Pointer to struct rte_eth_dev. 3526 * ntuple_filter: ponter to the filter that will be added. 3527 * 3528 * @return 3529 * - On success, zero. 3530 * - On failure, a negative value. 3531 */ 3532 static int 3533 igb_add_2tuple_filter(struct rte_eth_dev *dev, 3534 struct rte_eth_ntuple_filter *ntuple_filter) 3535 { 3536 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3537 struct e1000_filter_info *filter_info = 3538 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3539 struct e1000_2tuple_filter *filter; 3540 uint32_t ttqf = E1000_TTQF_DISABLE_MASK; 3541 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; 3542 int i, ret; 3543 3544 filter = rte_zmalloc("e1000_2tuple_filter", 3545 sizeof(struct e1000_2tuple_filter), 0); 3546 if (filter == NULL) 3547 return -ENOMEM; 3548 3549 ret = ntuple_filter_to_2tuple(ntuple_filter, 3550 &filter->filter_info); 3551 if (ret < 0) { 3552 rte_free(filter); 3553 return ret; 3554 } 3555 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3556 &filter->filter_info) != NULL) { 3557 PMD_DRV_LOG(ERR, "filter exists."); 3558 rte_free(filter); 3559 return -EEXIST; 3560 } 3561 filter->queue = ntuple_filter->queue; 3562 3563 /* 3564 * look for an unused 2tuple filter index, 3565 * and insert the filter to list. 3566 */ 3567 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) { 3568 if (!(filter_info->twotuple_mask & (1 << i))) { 3569 filter_info->twotuple_mask |= 1 << i; 3570 filter->index = i; 3571 TAILQ_INSERT_TAIL(&filter_info->twotuple_list, 3572 filter, 3573 entries); 3574 break; 3575 } 3576 } 3577 if (i >= E1000_MAX_TTQF_FILTERS) { 3578 PMD_DRV_LOG(ERR, "2tuple filters are full."); 3579 rte_free(filter); 3580 return -ENOSYS; 3581 } 3582 3583 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 3584 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 3585 imir |= E1000_IMIR_PORT_BP; 3586 else 3587 imir &= ~E1000_IMIR_PORT_BP; 3588 3589 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 3590 3591 ttqf |= E1000_TTQF_QUEUE_ENABLE; 3592 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); 3593 ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK); 3594 if (filter->filter_info.proto_mask == 0) 3595 ttqf &= ~E1000_TTQF_MASK_ENABLE; 3596 3597 /* tcp flags bits setting. */ 3598 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { 3599 if (filter->filter_info.tcp_flags & TCP_URG_FLAG) 3600 imir_ext |= E1000_IMIREXT_CTRL_URG; 3601 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) 3602 imir_ext |= E1000_IMIREXT_CTRL_ACK; 3603 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) 3604 imir_ext |= E1000_IMIREXT_CTRL_PSH; 3605 if (filter->filter_info.tcp_flags & TCP_RST_FLAG) 3606 imir_ext |= E1000_IMIREXT_CTRL_RST; 3607 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) 3608 imir_ext |= E1000_IMIREXT_CTRL_SYN; 3609 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) 3610 imir_ext |= E1000_IMIREXT_CTRL_FIN; 3611 } else 3612 imir_ext |= E1000_IMIREXT_CTRL_BP; 3613 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 3614 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); 3615 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 3616 return 0; 3617 } 3618 3619 /* 3620 * igb_remove_2tuple_filter - remove a 2tuple filter 3621 * 3622 * @param 3623 * dev: Pointer to struct rte_eth_dev. 3624 * ntuple_filter: ponter to the filter that will be removed. 3625 * 3626 * @return 3627 * - On success, zero. 3628 * - On failure, a negative value. 3629 */ 3630 static int 3631 igb_remove_2tuple_filter(struct rte_eth_dev *dev, 3632 struct rte_eth_ntuple_filter *ntuple_filter) 3633 { 3634 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3635 struct e1000_filter_info *filter_info = 3636 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3637 struct e1000_2tuple_filter_info filter_2tuple; 3638 struct e1000_2tuple_filter *filter; 3639 int ret; 3640 3641 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info)); 3642 ret = ntuple_filter_to_2tuple(ntuple_filter, 3643 &filter_2tuple); 3644 if (ret < 0) 3645 return ret; 3646 3647 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list, 3648 &filter_2tuple); 3649 if (filter == NULL) { 3650 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3651 return -ENOENT; 3652 } 3653 3654 filter_info->twotuple_mask &= ~(1 << filter->index); 3655 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); 3656 rte_free(filter); 3657 3658 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); 3659 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 3660 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 3661 return 0; 3662 } 3663 3664 static inline struct e1000_flex_filter * 3665 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list, 3666 struct e1000_flex_filter_info *key) 3667 { 3668 struct e1000_flex_filter *it; 3669 3670 TAILQ_FOREACH(it, filter_list, entries) { 3671 if (memcmp(key, &it->filter_info, 3672 sizeof(struct e1000_flex_filter_info)) == 0) 3673 return it; 3674 } 3675 3676 return NULL; 3677 } 3678 3679 static int 3680 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, 3681 struct rte_eth_flex_filter *filter, 3682 bool add) 3683 { 3684 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3685 struct e1000_filter_info *filter_info = 3686 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3687 struct e1000_flex_filter *flex_filter, *it; 3688 uint32_t wufc, queueing, mask; 3689 uint32_t reg_off; 3690 uint8_t shift, i, j = 0; 3691 3692 flex_filter = rte_zmalloc("e1000_flex_filter", 3693 sizeof(struct e1000_flex_filter), 0); 3694 if (flex_filter == NULL) 3695 return -ENOMEM; 3696 3697 flex_filter->filter_info.len = filter->len; 3698 flex_filter->filter_info.priority = filter->priority; 3699 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len); 3700 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) { 3701 mask = 0; 3702 /* reverse bits in flex filter's mask*/ 3703 for (shift = 0; shift < CHAR_BIT; shift++) { 3704 if (filter->mask[i] & (0x01 << shift)) 3705 mask |= (0x80 >> shift); 3706 } 3707 flex_filter->filter_info.mask[i] = mask; 3708 } 3709 3710 wufc = E1000_READ_REG(hw, E1000_WUFC); 3711 if (flex_filter->index < E1000_MAX_FHFT) 3712 reg_off = E1000_FHFT(flex_filter->index); 3713 else 3714 reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); 3715 3716 if (add) { 3717 if (eth_igb_flex_filter_lookup(&filter_info->flex_list, 3718 &flex_filter->filter_info) != NULL) { 3719 PMD_DRV_LOG(ERR, "filter exists."); 3720 rte_free(flex_filter); 3721 return -EEXIST; 3722 } 3723 flex_filter->queue = filter->queue; 3724 /* 3725 * look for an unused flex filter index 3726 * and insert the filter into the list. 3727 */ 3728 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) { 3729 if (!(filter_info->flex_mask & (1 << i))) { 3730 filter_info->flex_mask |= 1 << i; 3731 flex_filter->index = i; 3732 TAILQ_INSERT_TAIL(&filter_info->flex_list, 3733 flex_filter, 3734 entries); 3735 break; 3736 } 3737 } 3738 if (i >= E1000_MAX_FLEX_FILTERS) { 3739 PMD_DRV_LOG(ERR, "flex filters are full."); 3740 rte_free(flex_filter); 3741 return -ENOSYS; 3742 } 3743 3744 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | 3745 (E1000_WUFC_FLX0 << flex_filter->index)); 3746 queueing = filter->len | 3747 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) | 3748 (filter->priority << E1000_FHFT_QUEUEING_PRIO_SHIFT); 3749 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET, 3750 queueing); 3751 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) { 3752 E1000_WRITE_REG(hw, reg_off, 3753 flex_filter->filter_info.dwords[j]); 3754 reg_off += sizeof(uint32_t); 3755 E1000_WRITE_REG(hw, reg_off, 3756 flex_filter->filter_info.dwords[++j]); 3757 reg_off += sizeof(uint32_t); 3758 E1000_WRITE_REG(hw, reg_off, 3759 (uint32_t)flex_filter->filter_info.mask[i]); 3760 reg_off += sizeof(uint32_t) * 2; 3761 ++j; 3762 } 3763 } else { 3764 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 3765 &flex_filter->filter_info); 3766 if (it == NULL) { 3767 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3768 rte_free(flex_filter); 3769 return -ENOENT; 3770 } 3771 3772 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) 3773 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); 3774 E1000_WRITE_REG(hw, E1000_WUFC, wufc & 3775 (~(E1000_WUFC_FLX0 << it->index))); 3776 3777 filter_info->flex_mask &= ~(1 << it->index); 3778 TAILQ_REMOVE(&filter_info->flex_list, it, entries); 3779 rte_free(it); 3780 rte_free(flex_filter); 3781 } 3782 3783 return 0; 3784 } 3785 3786 static int 3787 eth_igb_get_flex_filter(struct rte_eth_dev *dev, 3788 struct rte_eth_flex_filter *filter) 3789 { 3790 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3791 struct e1000_filter_info *filter_info = 3792 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 3793 struct e1000_flex_filter flex_filter, *it; 3794 uint32_t wufc, queueing, wufc_en = 0; 3795 3796 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter)); 3797 flex_filter.filter_info.len = filter->len; 3798 flex_filter.filter_info.priority = filter->priority; 3799 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len); 3800 memcpy(flex_filter.filter_info.mask, filter->mask, 3801 RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char)); 3802 3803 it = eth_igb_flex_filter_lookup(&filter_info->flex_list, 3804 &flex_filter.filter_info); 3805 if (it == NULL) { 3806 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3807 return -ENOENT; 3808 } 3809 3810 wufc = E1000_READ_REG(hw, E1000_WUFC); 3811 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index); 3812 3813 if ((wufc & wufc_en) == wufc_en) { 3814 uint32_t reg_off = 0; 3815 if (it->index < E1000_MAX_FHFT) 3816 reg_off = E1000_FHFT(it->index); 3817 else 3818 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); 3819 3820 queueing = E1000_READ_REG(hw, 3821 reg_off + E1000_FHFT_QUEUEING_OFFSET); 3822 filter->len = queueing & E1000_FHFT_QUEUEING_LEN; 3823 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >> 3824 E1000_FHFT_QUEUEING_PRIO_SHIFT; 3825 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >> 3826 E1000_FHFT_QUEUEING_QUEUE_SHIFT; 3827 return 0; 3828 } 3829 return -ENOENT; 3830 } 3831 3832 static int 3833 eth_igb_flex_filter_handle(struct rte_eth_dev *dev, 3834 enum rte_filter_op filter_op, 3835 void *arg) 3836 { 3837 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3838 struct rte_eth_flex_filter *filter; 3839 int ret = 0; 3840 3841 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 3842 3843 if (filter_op == RTE_ETH_FILTER_NOP) 3844 return ret; 3845 3846 if (arg == NULL) { 3847 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 3848 filter_op); 3849 return -EINVAL; 3850 } 3851 3852 filter = (struct rte_eth_flex_filter *)arg; 3853 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN 3854 || filter->len % sizeof(uint64_t) != 0) { 3855 PMD_DRV_LOG(ERR, "filter's length is out of range"); 3856 return -EINVAL; 3857 } 3858 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) { 3859 PMD_DRV_LOG(ERR, "filter's priority is out of range"); 3860 return -EINVAL; 3861 } 3862 3863 switch (filter_op) { 3864 case RTE_ETH_FILTER_ADD: 3865 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE); 3866 break; 3867 case RTE_ETH_FILTER_DELETE: 3868 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE); 3869 break; 3870 case RTE_ETH_FILTER_GET: 3871 ret = eth_igb_get_flex_filter(dev, filter); 3872 break; 3873 default: 3874 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 3875 ret = -EINVAL; 3876 break; 3877 } 3878 3879 return ret; 3880 } 3881 3882 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/ 3883 static inline int 3884 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter, 3885 struct e1000_5tuple_filter_info *filter_info) 3886 { 3887 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) 3888 return -EINVAL; 3889 if (filter->priority > E1000_2TUPLE_MAX_PRI) 3890 return -EINVAL; /* filter index is out of range. */ 3891 if (filter->tcp_flags > TCP_FLAG_ALL) 3892 return -EINVAL; /* flags is invalid. */ 3893 3894 switch (filter->dst_ip_mask) { 3895 case UINT32_MAX: 3896 filter_info->dst_ip_mask = 0; 3897 filter_info->dst_ip = filter->dst_ip; 3898 break; 3899 case 0: 3900 filter_info->dst_ip_mask = 1; 3901 break; 3902 default: 3903 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 3904 return -EINVAL; 3905 } 3906 3907 switch (filter->src_ip_mask) { 3908 case UINT32_MAX: 3909 filter_info->src_ip_mask = 0; 3910 filter_info->src_ip = filter->src_ip; 3911 break; 3912 case 0: 3913 filter_info->src_ip_mask = 1; 3914 break; 3915 default: 3916 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 3917 return -EINVAL; 3918 } 3919 3920 switch (filter->dst_port_mask) { 3921 case UINT16_MAX: 3922 filter_info->dst_port_mask = 0; 3923 filter_info->dst_port = filter->dst_port; 3924 break; 3925 case 0: 3926 filter_info->dst_port_mask = 1; 3927 break; 3928 default: 3929 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3930 return -EINVAL; 3931 } 3932 3933 switch (filter->src_port_mask) { 3934 case UINT16_MAX: 3935 filter_info->src_port_mask = 0; 3936 filter_info->src_port = filter->src_port; 3937 break; 3938 case 0: 3939 filter_info->src_port_mask = 1; 3940 break; 3941 default: 3942 PMD_DRV_LOG(ERR, "invalid src_port mask."); 3943 return -EINVAL; 3944 } 3945 3946 switch (filter->proto_mask) { 3947 case UINT8_MAX: 3948 filter_info->proto_mask = 0; 3949 filter_info->proto = filter->proto; 3950 break; 3951 case 0: 3952 filter_info->proto_mask = 1; 3953 break; 3954 default: 3955 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3956 return -EINVAL; 3957 } 3958 3959 filter_info->priority = (uint8_t)filter->priority; 3960 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) 3961 filter_info->tcp_flags = filter->tcp_flags; 3962 else 3963 filter_info->tcp_flags = 0; 3964 3965 return 0; 3966 } 3967 3968 static inline struct e1000_5tuple_filter * 3969 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, 3970 struct e1000_5tuple_filter_info *key) 3971 { 3972 struct e1000_5tuple_filter *it; 3973 3974 TAILQ_FOREACH(it, filter_list, entries) { 3975 if (memcmp(key, &it->filter_info, 3976 sizeof(struct e1000_5tuple_filter_info)) == 0) { 3977 return it; 3978 } 3979 } 3980 return NULL; 3981 } 3982 3983 /* 3984 * igb_add_5tuple_filter_82576 - add a 5tuple filter 3985 * 3986 * @param 3987 * dev: Pointer to struct rte_eth_dev. 3988 * ntuple_filter: ponter to the filter that will be added. 3989 * 3990 * @return 3991 * - On success, zero. 3992 * - On failure, a negative value. 3993 */ 3994 static int 3995 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, 3996 struct rte_eth_ntuple_filter *ntuple_filter) 3997 { 3998 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3999 struct e1000_filter_info *filter_info = 4000 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4001 struct e1000_5tuple_filter *filter; 4002 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; 4003 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; 4004 uint8_t i; 4005 int ret; 4006 4007 filter = rte_zmalloc("e1000_5tuple_filter", 4008 sizeof(struct e1000_5tuple_filter), 0); 4009 if (filter == NULL) 4010 return -ENOMEM; 4011 4012 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4013 &filter->filter_info); 4014 if (ret < 0) { 4015 rte_free(filter); 4016 return ret; 4017 } 4018 4019 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4020 &filter->filter_info) != NULL) { 4021 PMD_DRV_LOG(ERR, "filter exists."); 4022 rte_free(filter); 4023 return -EEXIST; 4024 } 4025 filter->queue = ntuple_filter->queue; 4026 4027 /* 4028 * look for an unused 5tuple filter index, 4029 * and insert the filter to list. 4030 */ 4031 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) { 4032 if (!(filter_info->fivetuple_mask & (1 << i))) { 4033 filter_info->fivetuple_mask |= 1 << i; 4034 filter->index = i; 4035 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 4036 filter, 4037 entries); 4038 break; 4039 } 4040 } 4041 if (i >= E1000_MAX_FTQF_FILTERS) { 4042 PMD_DRV_LOG(ERR, "5tuple filters are full."); 4043 rte_free(filter); 4044 return -ENOSYS; 4045 } 4046 4047 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; 4048 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ 4049 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; 4050 if (filter->filter_info.dst_ip_mask == 0) 4051 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; 4052 if (filter->filter_info.src_port_mask == 0) 4053 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; 4054 if (filter->filter_info.proto_mask == 0) 4055 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; 4056 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & 4057 E1000_FTQF_QUEUE_MASK; 4058 ftqf |= E1000_FTQF_QUEUE_ENABLE; 4059 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); 4060 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); 4061 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); 4062 4063 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; 4064 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); 4065 4066 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); 4067 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ 4068 imir |= E1000_IMIR_PORT_BP; 4069 else 4070 imir &= ~E1000_IMIR_PORT_BP; 4071 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; 4072 4073 /* tcp flags bits setting. */ 4074 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { 4075 if (filter->filter_info.tcp_flags & TCP_URG_FLAG) 4076 imir_ext |= E1000_IMIREXT_CTRL_URG; 4077 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) 4078 imir_ext |= E1000_IMIREXT_CTRL_ACK; 4079 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) 4080 imir_ext |= E1000_IMIREXT_CTRL_PSH; 4081 if (filter->filter_info.tcp_flags & TCP_RST_FLAG) 4082 imir_ext |= E1000_IMIREXT_CTRL_RST; 4083 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) 4084 imir_ext |= E1000_IMIREXT_CTRL_SYN; 4085 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) 4086 imir_ext |= E1000_IMIREXT_CTRL_FIN; 4087 } else 4088 imir_ext |= E1000_IMIREXT_CTRL_BP; 4089 E1000_WRITE_REG(hw, E1000_IMIR(i), imir); 4090 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); 4091 return 0; 4092 } 4093 4094 /* 4095 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter 4096 * 4097 * @param 4098 * dev: Pointer to struct rte_eth_dev. 4099 * ntuple_filter: ponter to the filter that will be removed. 4100 * 4101 * @return 4102 * - On success, zero. 4103 * - On failure, a negative value. 4104 */ 4105 static int 4106 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev, 4107 struct rte_eth_ntuple_filter *ntuple_filter) 4108 { 4109 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4110 struct e1000_filter_info *filter_info = 4111 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4112 struct e1000_5tuple_filter_info filter_5tuple; 4113 struct e1000_5tuple_filter *filter; 4114 int ret; 4115 4116 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info)); 4117 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4118 &filter_5tuple); 4119 if (ret < 0) 4120 return ret; 4121 4122 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list, 4123 &filter_5tuple); 4124 if (filter == NULL) { 4125 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4126 return -ENOENT; 4127 } 4128 4129 filter_info->fivetuple_mask &= ~(1 << filter->index); 4130 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 4131 rte_free(filter); 4132 4133 E1000_WRITE_REG(hw, E1000_FTQF(filter->index), 4134 E1000_FTQF_VF_BP | E1000_FTQF_MASK); 4135 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0); 4136 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0); 4137 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); 4138 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); 4139 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); 4140 return 0; 4141 } 4142 4143 static int 4144 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4145 { 4146 uint32_t rctl; 4147 struct e1000_hw *hw; 4148 struct rte_eth_dev_info dev_info; 4149 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4150 VLAN_TAG_SIZE); 4151 4152 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4153 4154 #ifdef RTE_LIBRTE_82571_SUPPORT 4155 /* XXX: not bigger than max_rx_pktlen */ 4156 if (hw->mac.type == e1000_82571) 4157 return -ENOTSUP; 4158 #endif 4159 eth_igb_infos_get(dev, &dev_info); 4160 4161 /* check that mtu is within the allowed range */ 4162 if ((mtu < ETHER_MIN_MTU) || 4163 (frame_size > dev_info.max_rx_pktlen)) 4164 return -EINVAL; 4165 4166 /* refuse mtu that requires the support of scattered packets when this 4167 * feature has not been enabled before. */ 4168 if (!dev->data->scattered_rx && 4169 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 4170 return -EINVAL; 4171 4172 rctl = E1000_READ_REG(hw, E1000_RCTL); 4173 4174 /* switch to jumbo mode if needed */ 4175 if (frame_size > ETHER_MAX_LEN) { 4176 dev->data->dev_conf.rxmode.jumbo_frame = 1; 4177 rctl |= E1000_RCTL_LPE; 4178 } else { 4179 dev->data->dev_conf.rxmode.jumbo_frame = 0; 4180 rctl &= ~E1000_RCTL_LPE; 4181 } 4182 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4183 4184 /* update max frame size */ 4185 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4186 4187 E1000_WRITE_REG(hw, E1000_RLPML, 4188 dev->data->dev_conf.rxmode.max_rx_pkt_len); 4189 4190 return 0; 4191 } 4192 4193 /* 4194 * igb_add_del_ntuple_filter - add or delete a ntuple filter 4195 * 4196 * @param 4197 * dev: Pointer to struct rte_eth_dev. 4198 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4199 * add: if true, add filter, if false, remove filter 4200 * 4201 * @return 4202 * - On success, zero. 4203 * - On failure, a negative value. 4204 */ 4205 static int 4206 igb_add_del_ntuple_filter(struct rte_eth_dev *dev, 4207 struct rte_eth_ntuple_filter *ntuple_filter, 4208 bool add) 4209 { 4210 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4211 int ret; 4212 4213 switch (ntuple_filter->flags) { 4214 case RTE_5TUPLE_FLAGS: 4215 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4216 if (hw->mac.type != e1000_82576) 4217 return -ENOTSUP; 4218 if (add) 4219 ret = igb_add_5tuple_filter_82576(dev, 4220 ntuple_filter); 4221 else 4222 ret = igb_remove_5tuple_filter_82576(dev, 4223 ntuple_filter); 4224 break; 4225 case RTE_2TUPLE_FLAGS: 4226 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4227 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) 4228 return -ENOTSUP; 4229 if (add) 4230 ret = igb_add_2tuple_filter(dev, ntuple_filter); 4231 else 4232 ret = igb_remove_2tuple_filter(dev, ntuple_filter); 4233 break; 4234 default: 4235 ret = -EINVAL; 4236 break; 4237 } 4238 4239 return ret; 4240 } 4241 4242 /* 4243 * igb_get_ntuple_filter - get a ntuple filter 4244 * 4245 * @param 4246 * dev: Pointer to struct rte_eth_dev. 4247 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4248 * 4249 * @return 4250 * - On success, zero. 4251 * - On failure, a negative value. 4252 */ 4253 static int 4254 igb_get_ntuple_filter(struct rte_eth_dev *dev, 4255 struct rte_eth_ntuple_filter *ntuple_filter) 4256 { 4257 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4258 struct e1000_filter_info *filter_info = 4259 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4260 struct e1000_5tuple_filter_info filter_5tuple; 4261 struct e1000_2tuple_filter_info filter_2tuple; 4262 struct e1000_5tuple_filter *p_5tuple_filter; 4263 struct e1000_2tuple_filter *p_2tuple_filter; 4264 int ret; 4265 4266 switch (ntuple_filter->flags) { 4267 case RTE_5TUPLE_FLAGS: 4268 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4269 if (hw->mac.type != e1000_82576) 4270 return -ENOTSUP; 4271 memset(&filter_5tuple, 4272 0, 4273 sizeof(struct e1000_5tuple_filter_info)); 4274 ret = ntuple_filter_to_5tuple_82576(ntuple_filter, 4275 &filter_5tuple); 4276 if (ret < 0) 4277 return ret; 4278 p_5tuple_filter = igb_5tuple_filter_lookup_82576( 4279 &filter_info->fivetuple_list, 4280 &filter_5tuple); 4281 if (p_5tuple_filter == NULL) { 4282 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4283 return -ENOENT; 4284 } 4285 ntuple_filter->queue = p_5tuple_filter->queue; 4286 break; 4287 case RTE_2TUPLE_FLAGS: 4288 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): 4289 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) 4290 return -ENOTSUP; 4291 memset(&filter_2tuple, 4292 0, 4293 sizeof(struct e1000_2tuple_filter_info)); 4294 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple); 4295 if (ret < 0) 4296 return ret; 4297 p_2tuple_filter = igb_2tuple_filter_lookup( 4298 &filter_info->twotuple_list, 4299 &filter_2tuple); 4300 if (p_2tuple_filter == NULL) { 4301 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4302 return -ENOENT; 4303 } 4304 ntuple_filter->queue = p_2tuple_filter->queue; 4305 break; 4306 default: 4307 ret = -EINVAL; 4308 break; 4309 } 4310 4311 return 0; 4312 } 4313 4314 /* 4315 * igb_ntuple_filter_handle - Handle operations for ntuple filter. 4316 * @dev: pointer to rte_eth_dev structure 4317 * @filter_op:operation will be taken. 4318 * @arg: a pointer to specific structure corresponding to the filter_op 4319 */ 4320 static int 4321 igb_ntuple_filter_handle(struct rte_eth_dev *dev, 4322 enum rte_filter_op filter_op, 4323 void *arg) 4324 { 4325 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4326 int ret; 4327 4328 MAC_TYPE_FILTER_SUP(hw->mac.type); 4329 4330 if (filter_op == RTE_ETH_FILTER_NOP) 4331 return 0; 4332 4333 if (arg == NULL) { 4334 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4335 filter_op); 4336 return -EINVAL; 4337 } 4338 4339 switch (filter_op) { 4340 case RTE_ETH_FILTER_ADD: 4341 ret = igb_add_del_ntuple_filter(dev, 4342 (struct rte_eth_ntuple_filter *)arg, 4343 TRUE); 4344 break; 4345 case RTE_ETH_FILTER_DELETE: 4346 ret = igb_add_del_ntuple_filter(dev, 4347 (struct rte_eth_ntuple_filter *)arg, 4348 FALSE); 4349 break; 4350 case RTE_ETH_FILTER_GET: 4351 ret = igb_get_ntuple_filter(dev, 4352 (struct rte_eth_ntuple_filter *)arg); 4353 break; 4354 default: 4355 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4356 ret = -EINVAL; 4357 break; 4358 } 4359 return ret; 4360 } 4361 4362 static inline int 4363 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, 4364 uint16_t ethertype) 4365 { 4366 int i; 4367 4368 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4369 if (filter_info->ethertype_filters[i] == ethertype && 4370 (filter_info->ethertype_mask & (1 << i))) 4371 return i; 4372 } 4373 return -1; 4374 } 4375 4376 static inline int 4377 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, 4378 uint16_t ethertype) 4379 { 4380 int i; 4381 4382 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { 4383 if (!(filter_info->ethertype_mask & (1 << i))) { 4384 filter_info->ethertype_mask |= 1 << i; 4385 filter_info->ethertype_filters[i] = ethertype; 4386 return i; 4387 } 4388 } 4389 return -1; 4390 } 4391 4392 static inline int 4393 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, 4394 uint8_t idx) 4395 { 4396 if (idx >= E1000_MAX_ETQF_FILTERS) 4397 return -1; 4398 filter_info->ethertype_mask &= ~(1 << idx); 4399 filter_info->ethertype_filters[idx] = 0; 4400 return idx; 4401 } 4402 4403 4404 static int 4405 igb_add_del_ethertype_filter(struct rte_eth_dev *dev, 4406 struct rte_eth_ethertype_filter *filter, 4407 bool add) 4408 { 4409 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4410 struct e1000_filter_info *filter_info = 4411 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4412 uint32_t etqf = 0; 4413 int ret; 4414 4415 if (filter->ether_type == ETHER_TYPE_IPv4 || 4416 filter->ether_type == ETHER_TYPE_IPv6) { 4417 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4418 " ethertype filter.", filter->ether_type); 4419 return -EINVAL; 4420 } 4421 4422 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4423 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4424 return -EINVAL; 4425 } 4426 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4427 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4428 return -EINVAL; 4429 } 4430 4431 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4432 if (ret >= 0 && add) { 4433 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4434 filter->ether_type); 4435 return -EEXIST; 4436 } 4437 if (ret < 0 && !add) { 4438 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4439 filter->ether_type); 4440 return -ENOENT; 4441 } 4442 4443 if (add) { 4444 ret = igb_ethertype_filter_insert(filter_info, 4445 filter->ether_type); 4446 if (ret < 0) { 4447 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4448 return -ENOSYS; 4449 } 4450 4451 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; 4452 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); 4453 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; 4454 } else { 4455 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); 4456 if (ret < 0) 4457 return -ENOSYS; 4458 } 4459 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf); 4460 E1000_WRITE_FLUSH(hw); 4461 4462 return 0; 4463 } 4464 4465 static int 4466 igb_get_ethertype_filter(struct rte_eth_dev *dev, 4467 struct rte_eth_ethertype_filter *filter) 4468 { 4469 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4470 struct e1000_filter_info *filter_info = 4471 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 4472 uint32_t etqf; 4473 int ret; 4474 4475 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type); 4476 if (ret < 0) { 4477 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4478 filter->ether_type); 4479 return -ENOENT; 4480 } 4481 4482 etqf = E1000_READ_REG(hw, E1000_ETQF(ret)); 4483 if (etqf & E1000_ETQF_FILTER_ENABLE) { 4484 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE; 4485 filter->flags = 0; 4486 filter->queue = (etqf & E1000_ETQF_QUEUE) >> 4487 E1000_ETQF_QUEUE_SHIFT; 4488 return 0; 4489 } 4490 4491 return -ENOENT; 4492 } 4493 4494 /* 4495 * igb_ethertype_filter_handle - Handle operations for ethertype filter. 4496 * @dev: pointer to rte_eth_dev structure 4497 * @filter_op:operation will be taken. 4498 * @arg: a pointer to specific structure corresponding to the filter_op 4499 */ 4500 static int 4501 igb_ethertype_filter_handle(struct rte_eth_dev *dev, 4502 enum rte_filter_op filter_op, 4503 void *arg) 4504 { 4505 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4506 int ret; 4507 4508 MAC_TYPE_FILTER_SUP(hw->mac.type); 4509 4510 if (filter_op == RTE_ETH_FILTER_NOP) 4511 return 0; 4512 4513 if (arg == NULL) { 4514 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 4515 filter_op); 4516 return -EINVAL; 4517 } 4518 4519 switch (filter_op) { 4520 case RTE_ETH_FILTER_ADD: 4521 ret = igb_add_del_ethertype_filter(dev, 4522 (struct rte_eth_ethertype_filter *)arg, 4523 TRUE); 4524 break; 4525 case RTE_ETH_FILTER_DELETE: 4526 ret = igb_add_del_ethertype_filter(dev, 4527 (struct rte_eth_ethertype_filter *)arg, 4528 FALSE); 4529 break; 4530 case RTE_ETH_FILTER_GET: 4531 ret = igb_get_ethertype_filter(dev, 4532 (struct rte_eth_ethertype_filter *)arg); 4533 break; 4534 default: 4535 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 4536 ret = -EINVAL; 4537 break; 4538 } 4539 return ret; 4540 } 4541 4542 static int 4543 eth_igb_filter_ctrl(struct rte_eth_dev *dev, 4544 enum rte_filter_type filter_type, 4545 enum rte_filter_op filter_op, 4546 void *arg) 4547 { 4548 int ret = -EINVAL; 4549 4550 switch (filter_type) { 4551 case RTE_ETH_FILTER_NTUPLE: 4552 ret = igb_ntuple_filter_handle(dev, filter_op, arg); 4553 break; 4554 case RTE_ETH_FILTER_ETHERTYPE: 4555 ret = igb_ethertype_filter_handle(dev, filter_op, arg); 4556 break; 4557 case RTE_ETH_FILTER_SYN: 4558 ret = eth_igb_syn_filter_handle(dev, filter_op, arg); 4559 break; 4560 case RTE_ETH_FILTER_FLEXIBLE: 4561 ret = eth_igb_flex_filter_handle(dev, filter_op, arg); 4562 break; 4563 default: 4564 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 4565 filter_type); 4566 break; 4567 } 4568 4569 return ret; 4570 } 4571 4572 static int 4573 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, 4574 struct ether_addr *mc_addr_set, 4575 uint32_t nb_mc_addr) 4576 { 4577 struct e1000_hw *hw; 4578 4579 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4580 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 4581 return 0; 4582 } 4583 4584 static uint64_t 4585 igb_read_systime_cyclecounter(struct rte_eth_dev *dev) 4586 { 4587 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4588 uint64_t systime_cycles; 4589 4590 switch (hw->mac.type) { 4591 case e1000_i210: 4592 case e1000_i211: 4593 /* 4594 * Need to read System Time Residue Register to be able 4595 * to read the other two registers. 4596 */ 4597 E1000_READ_REG(hw, E1000_SYSTIMR); 4598 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 4599 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4600 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4601 * NSEC_PER_SEC; 4602 break; 4603 case e1000_82580: 4604 case e1000_i350: 4605 case e1000_i354: 4606 /* 4607 * Need to read System Time Residue Register to be able 4608 * to read the other two registers. 4609 */ 4610 E1000_READ_REG(hw, E1000_SYSTIMR); 4611 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4612 /* Only the 8 LSB are valid. */ 4613 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH) 4614 & 0xff) << 32; 4615 break; 4616 default: 4617 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML); 4618 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH) 4619 << 32; 4620 break; 4621 } 4622 4623 return systime_cycles; 4624 } 4625 4626 static uint64_t 4627 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4628 { 4629 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4630 uint64_t rx_tstamp_cycles; 4631 4632 switch (hw->mac.type) { 4633 case e1000_i210: 4634 case e1000_i211: 4635 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4636 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4637 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4638 * NSEC_PER_SEC; 4639 break; 4640 case e1000_82580: 4641 case e1000_i350: 4642 case e1000_i354: 4643 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4644 /* Only the 8 LSB are valid. */ 4645 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH) 4646 & 0xff) << 32; 4647 break; 4648 default: 4649 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL); 4650 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH) 4651 << 32; 4652 break; 4653 } 4654 4655 return rx_tstamp_cycles; 4656 } 4657 4658 static uint64_t 4659 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4660 { 4661 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4662 uint64_t tx_tstamp_cycles; 4663 4664 switch (hw->mac.type) { 4665 case e1000_i210: 4666 case e1000_i211: 4667 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 4668 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4669 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 4670 * NSEC_PER_SEC; 4671 break; 4672 case e1000_82580: 4673 case e1000_i350: 4674 case e1000_i354: 4675 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4676 /* Only the 8 LSB are valid. */ 4677 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH) 4678 & 0xff) << 32; 4679 break; 4680 default: 4681 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL); 4682 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH) 4683 << 32; 4684 break; 4685 } 4686 4687 return tx_tstamp_cycles; 4688 } 4689 4690 static void 4691 igb_start_timecounters(struct rte_eth_dev *dev) 4692 { 4693 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4694 struct e1000_adapter *adapter = 4695 (struct e1000_adapter *)dev->data->dev_private; 4696 uint32_t incval = 1; 4697 uint32_t shift = 0; 4698 uint64_t mask = E1000_CYCLECOUNTER_MASK; 4699 4700 switch (hw->mac.type) { 4701 case e1000_82580: 4702 case e1000_i350: 4703 case e1000_i354: 4704 /* 32 LSB bits + 8 MSB bits = 40 bits */ 4705 mask = (1ULL << 40) - 1; 4706 /* fall-through */ 4707 case e1000_i210: 4708 case e1000_i211: 4709 /* 4710 * Start incrementing the register 4711 * used to timestamp PTP packets. 4712 */ 4713 E1000_WRITE_REG(hw, E1000_TIMINCA, incval); 4714 break; 4715 case e1000_82576: 4716 incval = E1000_INCVALUE_82576; 4717 shift = IGB_82576_TSYNC_SHIFT; 4718 E1000_WRITE_REG(hw, E1000_TIMINCA, 4719 E1000_INCPERIOD_82576 | incval); 4720 break; 4721 default: 4722 /* Not supported */ 4723 return; 4724 } 4725 4726 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 4727 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4728 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4729 4730 adapter->systime_tc.cc_mask = mask; 4731 adapter->systime_tc.cc_shift = shift; 4732 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 4733 4734 adapter->rx_tstamp_tc.cc_mask = mask; 4735 adapter->rx_tstamp_tc.cc_shift = shift; 4736 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4737 4738 adapter->tx_tstamp_tc.cc_mask = mask; 4739 adapter->tx_tstamp_tc.cc_shift = shift; 4740 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4741 } 4742 4743 static int 4744 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 4745 { 4746 struct e1000_adapter *adapter = 4747 (struct e1000_adapter *)dev->data->dev_private; 4748 4749 adapter->systime_tc.nsec += delta; 4750 adapter->rx_tstamp_tc.nsec += delta; 4751 adapter->tx_tstamp_tc.nsec += delta; 4752 4753 return 0; 4754 } 4755 4756 static int 4757 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 4758 { 4759 uint64_t ns; 4760 struct e1000_adapter *adapter = 4761 (struct e1000_adapter *)dev->data->dev_private; 4762 4763 ns = rte_timespec_to_ns(ts); 4764 4765 /* Set the timecounters to a new value. */ 4766 adapter->systime_tc.nsec = ns; 4767 adapter->rx_tstamp_tc.nsec = ns; 4768 adapter->tx_tstamp_tc.nsec = ns; 4769 4770 return 0; 4771 } 4772 4773 static int 4774 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 4775 { 4776 uint64_t ns, systime_cycles; 4777 struct e1000_adapter *adapter = 4778 (struct e1000_adapter *)dev->data->dev_private; 4779 4780 systime_cycles = igb_read_systime_cyclecounter(dev); 4781 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 4782 *ts = rte_ns_to_timespec(ns); 4783 4784 return 0; 4785 } 4786 4787 static int 4788 igb_timesync_enable(struct rte_eth_dev *dev) 4789 { 4790 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4791 uint32_t tsync_ctl; 4792 uint32_t tsauxc; 4793 4794 /* Stop the timesync system time. */ 4795 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0); 4796 /* Reset the timesync system time value. */ 4797 switch (hw->mac.type) { 4798 case e1000_82580: 4799 case e1000_i350: 4800 case e1000_i354: 4801 case e1000_i210: 4802 case e1000_i211: 4803 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0); 4804 /* fall-through */ 4805 case e1000_82576: 4806 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0); 4807 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0); 4808 break; 4809 default: 4810 /* Not supported. */ 4811 return -ENOTSUP; 4812 } 4813 4814 /* Enable system time for it isn't on by default. */ 4815 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC); 4816 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME; 4817 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc); 4818 4819 igb_start_timecounters(dev); 4820 4821 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4822 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 4823 (ETHER_TYPE_1588 | 4824 E1000_ETQF_FILTER_ENABLE | 4825 E1000_ETQF_1588)); 4826 4827 /* Enable timestamping of received PTP packets. */ 4828 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 4829 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED; 4830 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 4831 4832 /* Enable Timestamping of transmitted PTP packets. */ 4833 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 4834 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED; 4835 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 4836 4837 return 0; 4838 } 4839 4840 static int 4841 igb_timesync_disable(struct rte_eth_dev *dev) 4842 { 4843 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4844 uint32_t tsync_ctl; 4845 4846 /* Disable timestamping of transmitted PTP packets. */ 4847 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 4848 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED; 4849 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl); 4850 4851 /* Disable timestamping of received PTP packets. */ 4852 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 4853 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED; 4854 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl); 4855 4856 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4857 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0); 4858 4859 /* Stop incrementating the System Time registers. */ 4860 E1000_WRITE_REG(hw, E1000_TIMINCA, 0); 4861 4862 return 0; 4863 } 4864 4865 static int 4866 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 4867 struct timespec *timestamp, 4868 uint32_t flags __rte_unused) 4869 { 4870 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4871 struct e1000_adapter *adapter = 4872 (struct e1000_adapter *)dev->data->dev_private; 4873 uint32_t tsync_rxctl; 4874 uint64_t rx_tstamp_cycles; 4875 uint64_t ns; 4876 4877 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL); 4878 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0) 4879 return -EINVAL; 4880 4881 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev); 4882 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 4883 *timestamp = rte_ns_to_timespec(ns); 4884 4885 return 0; 4886 } 4887 4888 static int 4889 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 4890 struct timespec *timestamp) 4891 { 4892 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4893 struct e1000_adapter *adapter = 4894 (struct e1000_adapter *)dev->data->dev_private; 4895 uint32_t tsync_txctl; 4896 uint64_t tx_tstamp_cycles; 4897 uint64_t ns; 4898 4899 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); 4900 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0) 4901 return -EINVAL; 4902 4903 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev); 4904 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 4905 *timestamp = rte_ns_to_timespec(ns); 4906 4907 return 0; 4908 } 4909 4910 static int 4911 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused) 4912 { 4913 int count = 0; 4914 int g_ind = 0; 4915 const struct reg_info *reg_group; 4916 4917 while ((reg_group = igb_regs[g_ind++])) 4918 count += igb_reg_group_count(reg_group); 4919 4920 return count; 4921 } 4922 4923 static int 4924 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 4925 { 4926 int count = 0; 4927 int g_ind = 0; 4928 const struct reg_info *reg_group; 4929 4930 while ((reg_group = igbvf_regs[g_ind++])) 4931 count += igb_reg_group_count(reg_group); 4932 4933 return count; 4934 } 4935 4936 static int 4937 eth_igb_get_regs(struct rte_eth_dev *dev, 4938 struct rte_dev_reg_info *regs) 4939 { 4940 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4941 uint32_t *data = regs->data; 4942 int g_ind = 0; 4943 int count = 0; 4944 const struct reg_info *reg_group; 4945 4946 if (data == NULL) { 4947 regs->length = eth_igb_get_reg_length(dev); 4948 regs->width = sizeof(uint32_t); 4949 return 0; 4950 } 4951 4952 /* Support only full register dump */ 4953 if ((regs->length == 0) || 4954 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { 4955 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 4956 hw->device_id; 4957 while ((reg_group = igb_regs[g_ind++])) 4958 count += igb_read_regs_group(dev, &data[count], 4959 reg_group); 4960 return 0; 4961 } 4962 4963 return -ENOTSUP; 4964 } 4965 4966 static int 4967 igbvf_get_regs(struct rte_eth_dev *dev, 4968 struct rte_dev_reg_info *regs) 4969 { 4970 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4971 uint32_t *data = regs->data; 4972 int g_ind = 0; 4973 int count = 0; 4974 const struct reg_info *reg_group; 4975 4976 if (data == NULL) { 4977 regs->length = igbvf_get_reg_length(dev); 4978 regs->width = sizeof(uint32_t); 4979 return 0; 4980 } 4981 4982 /* Support only full register dump */ 4983 if ((regs->length == 0) || 4984 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { 4985 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 4986 hw->device_id; 4987 while ((reg_group = igbvf_regs[g_ind++])) 4988 count += igb_read_regs_group(dev, &data[count], 4989 reg_group); 4990 return 0; 4991 } 4992 4993 return -ENOTSUP; 4994 } 4995 4996 static int 4997 eth_igb_get_eeprom_length(struct rte_eth_dev *dev) 4998 { 4999 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5000 5001 /* Return unit is byte count */ 5002 return hw->nvm.word_size * 2; 5003 } 5004 5005 static int 5006 eth_igb_get_eeprom(struct rte_eth_dev *dev, 5007 struct rte_dev_eeprom_info *in_eeprom) 5008 { 5009 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5010 struct e1000_nvm_info *nvm = &hw->nvm; 5011 uint16_t *data = in_eeprom->data; 5012 int first, length; 5013 5014 first = in_eeprom->offset >> 1; 5015 length = in_eeprom->length >> 1; 5016 if ((first >= hw->nvm.word_size) || 5017 ((first + length) >= hw->nvm.word_size)) 5018 return -EINVAL; 5019 5020 in_eeprom->magic = hw->vendor_id | 5021 ((uint32_t)hw->device_id << 16); 5022 5023 if ((nvm->ops.read) == NULL) 5024 return -ENOTSUP; 5025 5026 return nvm->ops.read(hw, first, length, data); 5027 } 5028 5029 static int 5030 eth_igb_set_eeprom(struct rte_eth_dev *dev, 5031 struct rte_dev_eeprom_info *in_eeprom) 5032 { 5033 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5034 struct e1000_nvm_info *nvm = &hw->nvm; 5035 uint16_t *data = in_eeprom->data; 5036 int first, length; 5037 5038 first = in_eeprom->offset >> 1; 5039 length = in_eeprom->length >> 1; 5040 if ((first >= hw->nvm.word_size) || 5041 ((first + length) >= hw->nvm.word_size)) 5042 return -EINVAL; 5043 5044 in_eeprom->magic = (uint32_t)hw->vendor_id | 5045 ((uint32_t)hw->device_id << 16); 5046 5047 if ((nvm->ops.write) == NULL) 5048 return -ENOTSUP; 5049 return nvm->ops.write(hw, first, length, data); 5050 } 5051 5052 static struct rte_driver pmd_igb_drv = { 5053 .type = PMD_PDEV, 5054 .init = rte_igb_pmd_init, 5055 }; 5056 5057 static struct rte_driver pmd_igbvf_drv = { 5058 .type = PMD_PDEV, 5059 .init = rte_igbvf_pmd_init, 5060 }; 5061 5062 static int 5063 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5064 { 5065 struct e1000_hw *hw = 5066 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5067 uint32_t mask = 1 << queue_id; 5068 5069 E1000_WRITE_REG(hw, E1000_EIMC, mask); 5070 E1000_WRITE_FLUSH(hw); 5071 5072 return 0; 5073 } 5074 5075 static int 5076 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5077 { 5078 struct e1000_hw *hw = 5079 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5080 uint32_t mask = 1 << queue_id; 5081 uint32_t regval; 5082 5083 regval = E1000_READ_REG(hw, E1000_EIMS); 5084 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); 5085 E1000_WRITE_FLUSH(hw); 5086 5087 rte_intr_enable(&dev->pci_dev->intr_handle); 5088 5089 return 0; 5090 } 5091 5092 static void 5093 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, 5094 uint8_t index, uint8_t offset) 5095 { 5096 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 5097 5098 /* clear bits */ 5099 val &= ~((uint32_t)0xFF << offset); 5100 5101 /* write vector and valid bit */ 5102 val |= (msix_vector | E1000_IVAR_VALID) << offset; 5103 5104 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val); 5105 } 5106 5107 static void 5108 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, 5109 uint8_t queue, uint8_t msix_vector) 5110 { 5111 uint32_t tmp = 0; 5112 5113 if (hw->mac.type == e1000_82575) { 5114 if (direction == 0) 5115 tmp = E1000_EICR_RX_QUEUE0 << queue; 5116 else if (direction == 1) 5117 tmp = E1000_EICR_TX_QUEUE0 << queue; 5118 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp); 5119 } else if (hw->mac.type == e1000_82576) { 5120 if ((direction == 0) || (direction == 1)) 5121 eth_igb_write_ivar(hw, msix_vector, queue & 0x7, 5122 ((queue & 0x8) << 1) + 5123 8 * direction); 5124 } else if ((hw->mac.type == e1000_82580) || 5125 (hw->mac.type == e1000_i350) || 5126 (hw->mac.type == e1000_i354) || 5127 (hw->mac.type == e1000_i210) || 5128 (hw->mac.type == e1000_i211)) { 5129 if ((direction == 0) || (direction == 1)) 5130 eth_igb_write_ivar(hw, msix_vector, 5131 queue >> 1, 5132 ((queue & 0x1) << 4) + 5133 8 * direction); 5134 } 5135 } 5136 5137 /* Sets up the hardware to generate MSI-X interrupts properly 5138 * @hw 5139 * board private structure 5140 */ 5141 static void 5142 eth_igb_configure_msix_intr(struct rte_eth_dev *dev) 5143 { 5144 int queue_id; 5145 uint32_t tmpval, regval, intr_mask; 5146 struct e1000_hw *hw = 5147 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5148 uint32_t vec = E1000_MISC_VEC_ID; 5149 uint32_t base = E1000_MISC_VEC_ID; 5150 uint32_t misc_shift = 0; 5151 5152 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 5153 5154 /* won't configure msix register if no mapping is done 5155 * between intr vector and event fd 5156 */ 5157 if (!rte_intr_dp_is_en(intr_handle)) 5158 return; 5159 5160 if (rte_intr_allow_others(intr_handle)) { 5161 vec = base = E1000_RX_VEC_START; 5162 misc_shift = 1; 5163 } 5164 5165 /* set interrupt vector for other causes */ 5166 if (hw->mac.type == e1000_82575) { 5167 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT); 5168 /* enable MSI-X PBA support */ 5169 tmpval |= E1000_CTRL_EXT_PBA_CLR; 5170 5171 /* Auto-Mask interrupts upon ICR read */ 5172 tmpval |= E1000_CTRL_EXT_EIAME; 5173 tmpval |= E1000_CTRL_EXT_IRCA; 5174 5175 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval); 5176 5177 /* enable msix_other interrupt */ 5178 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER); 5179 regval = E1000_READ_REG(hw, E1000_EIAC); 5180 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER); 5181 regval = E1000_READ_REG(hw, E1000_EIAM); 5182 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER); 5183 } else if ((hw->mac.type == e1000_82576) || 5184 (hw->mac.type == e1000_82580) || 5185 (hw->mac.type == e1000_i350) || 5186 (hw->mac.type == e1000_i354) || 5187 (hw->mac.type == e1000_i210) || 5188 (hw->mac.type == e1000_i211)) { 5189 /* turn on MSI-X capability first */ 5190 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | 5191 E1000_GPIE_PBA | E1000_GPIE_EIAME | 5192 E1000_GPIE_NSICR); 5193 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5194 misc_shift; 5195 regval = E1000_READ_REG(hw, E1000_EIAC); 5196 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask); 5197 5198 /* enable msix_other interrupt */ 5199 regval = E1000_READ_REG(hw, E1000_EIMS); 5200 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask); 5201 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8; 5202 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval); 5203 } 5204 5205 /* use EIAM to auto-mask when MSI-X interrupt 5206 * is asserted, this saves a register write for every interrupt 5207 */ 5208 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 5209 misc_shift; 5210 regval = E1000_READ_REG(hw, E1000_EIAM); 5211 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask); 5212 5213 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { 5214 eth_igb_assign_msix_vector(hw, 0, queue_id, vec); 5215 intr_handle->intr_vec[queue_id] = vec; 5216 if (vec < base + intr_handle->nb_efd - 1) 5217 vec++; 5218 } 5219 5220 E1000_WRITE_FLUSH(hw); 5221 } 5222 5223 PMD_REGISTER_DRIVER(pmd_igb_drv, igb); 5224 DRIVER_REGISTER_PCI_TABLE(igb, pci_id_igb_map); 5225 PMD_REGISTER_DRIVER(pmd_igbvf_drv, igbvf); 5226 DRIVER_REGISTER_PCI_TABLE(igbvf, pci_id_igbvf_map); 5227