1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdarg.h> 10 11 #include <rte_common.h> 12 #include <rte_interrupts.h> 13 #include <rte_byteorder.h> 14 #include <rte_debug.h> 15 #include <rte_pci.h> 16 #include <rte_bus_pci.h> 17 #include <rte_ether.h> 18 #include <ethdev_driver.h> 19 #include <ethdev_pci.h> 20 #include <rte_memory.h> 21 #include <rte_eal.h> 22 #include <rte_malloc.h> 23 #include <rte_dev.h> 24 25 #include "e1000_logs.h" 26 #include "base/e1000_api.h" 27 #include "e1000_ethdev.h" 28 29 #define EM_EIAC 0x000DC 30 31 #define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) 32 33 34 static int eth_em_configure(struct rte_eth_dev *dev); 35 static int eth_em_start(struct rte_eth_dev *dev); 36 static int eth_em_stop(struct rte_eth_dev *dev); 37 static int eth_em_close(struct rte_eth_dev *dev); 38 static int eth_em_promiscuous_enable(struct rte_eth_dev *dev); 39 static int eth_em_promiscuous_disable(struct rte_eth_dev *dev); 40 static int eth_em_allmulticast_enable(struct rte_eth_dev *dev); 41 static int eth_em_allmulticast_disable(struct rte_eth_dev *dev); 42 static int eth_em_link_update(struct rte_eth_dev *dev, 43 int wait_to_complete); 44 static int eth_em_stats_get(struct rte_eth_dev *dev, 45 struct rte_eth_stats *rte_stats); 46 static int eth_em_stats_reset(struct rte_eth_dev *dev); 47 static int eth_em_infos_get(struct rte_eth_dev *dev, 48 struct rte_eth_dev_info *dev_info); 49 static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, 50 struct rte_eth_fc_conf *fc_conf); 51 static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, 52 struct rte_eth_fc_conf *fc_conf); 53 static int eth_em_interrupt_setup(struct rte_eth_dev *dev); 54 static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); 55 static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); 56 static int eth_em_interrupt_action(struct rte_eth_dev *dev, 57 struct rte_intr_handle *handle); 58 static void eth_em_interrupt_handler(void *param); 59 60 static int em_hw_init(struct e1000_hw *hw); 61 static int em_hardware_init(struct e1000_hw *hw); 62 static void em_hw_control_acquire(struct e1000_hw *hw); 63 static void em_hw_control_release(struct e1000_hw *hw); 64 static void em_init_manageability(struct e1000_hw *hw); 65 static void em_release_manageability(struct e1000_hw *hw); 66 67 static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 68 69 static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, 70 uint16_t vlan_id, int on); 71 static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); 72 static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); 73 static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); 74 static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); 75 static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); 76 77 /* 78 static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, 79 uint16_t vlan_id, int on); 80 */ 81 82 static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 83 static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 84 static void em_lsc_intr_disable(struct e1000_hw *hw); 85 static void em_rxq_intr_enable(struct e1000_hw *hw); 86 static void em_rxq_intr_disable(struct e1000_hw *hw); 87 88 static int eth_em_led_on(struct rte_eth_dev *dev); 89 static int eth_em_led_off(struct rte_eth_dev *dev); 90 91 static int em_get_rx_buffer_size(struct e1000_hw *hw); 92 static int eth_em_rar_set(struct rte_eth_dev *dev, 93 struct rte_ether_addr *mac_addr, 94 uint32_t index, uint32_t pool); 95 static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); 96 static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, 97 struct rte_ether_addr *addr); 98 99 static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 100 struct rte_ether_addr *mc_addr_set, 101 uint32_t nb_mc_addr); 102 103 #define EM_FC_PAUSE_TIME 0x0680 104 #define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 105 #define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 106 107 static enum e1000_fc_mode em_fc_setting = e1000_fc_full; 108 109 /* 110 * The set of PCI devices this driver supports 111 */ 112 static const struct rte_pci_id pci_id_em_map[] = { 113 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, 114 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, 115 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, 116 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, 117 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, 118 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, 119 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, 120 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, 121 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, 122 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, 123 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, 124 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, 125 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, 126 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, 127 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, 128 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, 129 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, 130 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, 131 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, 132 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, 133 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, 134 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, 135 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, 136 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) }, 137 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, 138 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, 139 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, 140 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, 141 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, 142 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, 143 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, 144 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, 145 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) }, 146 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) }, 147 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) }, 148 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) }, 149 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) }, 150 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) }, 151 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) }, 152 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) }, 153 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) }, 154 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) }, 155 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) }, 156 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) }, 157 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) }, 158 { .vendor_id = 0, /* sentinel */ }, 159 }; 160 161 static const struct eth_dev_ops eth_em_ops = { 162 .dev_configure = eth_em_configure, 163 .dev_start = eth_em_start, 164 .dev_stop = eth_em_stop, 165 .dev_close = eth_em_close, 166 .promiscuous_enable = eth_em_promiscuous_enable, 167 .promiscuous_disable = eth_em_promiscuous_disable, 168 .allmulticast_enable = eth_em_allmulticast_enable, 169 .allmulticast_disable = eth_em_allmulticast_disable, 170 .link_update = eth_em_link_update, 171 .stats_get = eth_em_stats_get, 172 .stats_reset = eth_em_stats_reset, 173 .dev_infos_get = eth_em_infos_get, 174 .mtu_set = eth_em_mtu_set, 175 .vlan_filter_set = eth_em_vlan_filter_set, 176 .vlan_offload_set = eth_em_vlan_offload_set, 177 .rx_queue_setup = eth_em_rx_queue_setup, 178 .rx_queue_release = eth_em_rx_queue_release, 179 .tx_queue_setup = eth_em_tx_queue_setup, 180 .tx_queue_release = eth_em_tx_queue_release, 181 .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, 182 .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, 183 .dev_led_on = eth_em_led_on, 184 .dev_led_off = eth_em_led_off, 185 .flow_ctrl_get = eth_em_flow_ctrl_get, 186 .flow_ctrl_set = eth_em_flow_ctrl_set, 187 .mac_addr_set = eth_em_default_mac_addr_set, 188 .mac_addr_add = eth_em_rar_set, 189 .mac_addr_remove = eth_em_rar_clear, 190 .set_mc_addr_list = eth_em_set_mc_addr_list, 191 .rxq_info_get = em_rxq_info_get, 192 .txq_info_get = em_txq_info_get, 193 }; 194 195 196 /** 197 * eth_em_dev_is_ich8 - Check for ICH8 device 198 * @hw: pointer to the HW structure 199 * 200 * return TRUE for ICH8, otherwise FALSE 201 **/ 202 static bool 203 eth_em_dev_is_ich8(struct e1000_hw *hw) 204 { 205 DEBUGFUNC("eth_em_dev_is_ich8"); 206 207 switch (hw->device_id) { 208 case E1000_DEV_ID_PCH2_LV_LM: 209 case E1000_DEV_ID_PCH_LPT_I217_LM: 210 case E1000_DEV_ID_PCH_LPT_I217_V: 211 case E1000_DEV_ID_PCH_LPTLP_I218_LM: 212 case E1000_DEV_ID_PCH_LPTLP_I218_V: 213 case E1000_DEV_ID_PCH_I218_V2: 214 case E1000_DEV_ID_PCH_I218_LM2: 215 case E1000_DEV_ID_PCH_I218_V3: 216 case E1000_DEV_ID_PCH_I218_LM3: 217 case E1000_DEV_ID_PCH_SPT_I219_LM: 218 case E1000_DEV_ID_PCH_SPT_I219_V: 219 case E1000_DEV_ID_PCH_SPT_I219_LM2: 220 case E1000_DEV_ID_PCH_SPT_I219_V2: 221 case E1000_DEV_ID_PCH_LBG_I219_LM3: 222 case E1000_DEV_ID_PCH_SPT_I219_LM4: 223 case E1000_DEV_ID_PCH_SPT_I219_V4: 224 case E1000_DEV_ID_PCH_SPT_I219_LM5: 225 case E1000_DEV_ID_PCH_SPT_I219_V5: 226 case E1000_DEV_ID_PCH_CNP_I219_LM6: 227 case E1000_DEV_ID_PCH_CNP_I219_V6: 228 case E1000_DEV_ID_PCH_CNP_I219_LM7: 229 case E1000_DEV_ID_PCH_CNP_I219_V7: 230 return 1; 231 default: 232 return 0; 233 } 234 } 235 236 static int 237 eth_em_dev_init(struct rte_eth_dev *eth_dev) 238 { 239 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 240 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 241 struct e1000_adapter *adapter = 242 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 243 struct e1000_hw *hw = 244 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 245 struct e1000_vfta * shadow_vfta = 246 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 247 248 eth_dev->dev_ops = ð_em_ops; 249 eth_dev->rx_queue_count = eth_em_rx_queue_count; 250 eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status; 251 eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status; 252 eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; 253 eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; 254 eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts; 255 256 /* for secondary processes, we don't initialise any further as primary 257 * has already done this work. Only check we don't need a different 258 * RX function */ 259 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 260 if (eth_dev->data->scattered_rx) 261 eth_dev->rx_pkt_burst = 262 (eth_rx_burst_t)ð_em_recv_scattered_pkts; 263 return 0; 264 } 265 266 rte_eth_copy_pci_info(eth_dev, pci_dev); 267 268 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 269 hw->device_id = pci_dev->id.device_id; 270 adapter->stopped = 0; 271 272 /* For ICH8 support we'll need to map the flash memory BAR */ 273 if (eth_em_dev_is_ich8(hw)) 274 hw->flash_address = (void *)pci_dev->mem_resource[1].addr; 275 276 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || 277 em_hw_init(hw) != 0) { 278 PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " 279 "failed to init HW", 280 eth_dev->data->port_id, pci_dev->id.vendor_id, 281 pci_dev->id.device_id); 282 return -ENODEV; 283 } 284 285 /* Allocate memory for storing MAC addresses */ 286 eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN * 287 hw->mac.rar_entry_count, 0); 288 if (eth_dev->data->mac_addrs == NULL) { 289 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 290 "store MAC addresses", 291 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 292 return -ENOMEM; 293 } 294 295 /* Copy the permanent MAC address */ 296 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 297 eth_dev->data->mac_addrs); 298 299 /* initialize the vfta */ 300 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 301 302 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 303 eth_dev->data->port_id, pci_dev->id.vendor_id, 304 pci_dev->id.device_id); 305 306 rte_intr_callback_register(intr_handle, 307 eth_em_interrupt_handler, eth_dev); 308 309 return 0; 310 } 311 312 static int 313 eth_em_dev_uninit(struct rte_eth_dev *eth_dev) 314 { 315 PMD_INIT_FUNC_TRACE(); 316 317 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 318 return 0; 319 320 eth_em_close(eth_dev); 321 322 return 0; 323 } 324 325 static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 326 struct rte_pci_device *pci_dev) 327 { 328 return rte_eth_dev_pci_generic_probe(pci_dev, 329 sizeof(struct e1000_adapter), eth_em_dev_init); 330 } 331 332 static int eth_em_pci_remove(struct rte_pci_device *pci_dev) 333 { 334 return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit); 335 } 336 337 static struct rte_pci_driver rte_em_pmd = { 338 .id_table = pci_id_em_map, 339 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 340 .probe = eth_em_pci_probe, 341 .remove = eth_em_pci_remove, 342 }; 343 344 static int 345 em_hw_init(struct e1000_hw *hw) 346 { 347 int diag; 348 349 diag = hw->mac.ops.init_params(hw); 350 if (diag != 0) { 351 PMD_INIT_LOG(ERR, "MAC Initialization Error"); 352 return diag; 353 } 354 diag = hw->nvm.ops.init_params(hw); 355 if (diag != 0) { 356 PMD_INIT_LOG(ERR, "NVM Initialization Error"); 357 return diag; 358 } 359 diag = hw->phy.ops.init_params(hw); 360 if (diag != 0) { 361 PMD_INIT_LOG(ERR, "PHY Initialization Error"); 362 return diag; 363 } 364 (void) e1000_get_bus_info(hw); 365 366 hw->mac.autoneg = 1; 367 hw->phy.autoneg_wait_to_complete = 0; 368 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 369 370 e1000_init_script_state_82541(hw, TRUE); 371 e1000_set_tbi_compatibility_82543(hw, TRUE); 372 373 /* Copper options */ 374 if (hw->phy.media_type == e1000_media_type_copper) { 375 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 376 hw->phy.disable_polarity_correction = 0; 377 hw->phy.ms_type = e1000_ms_hw_default; 378 } 379 380 /* 381 * Start from a known state, this is important in reading the nvm 382 * and mac from that. 383 */ 384 e1000_reset_hw(hw); 385 386 /* Make sure we have a good EEPROM before we read from it */ 387 if (e1000_validate_nvm_checksum(hw) < 0) { 388 /* 389 * Some PCI-E parts fail the first check due to 390 * the link being in sleep state, call it again, 391 * if it fails a second time its a real issue. 392 */ 393 diag = e1000_validate_nvm_checksum(hw); 394 if (diag < 0) { 395 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 396 goto error; 397 } 398 } 399 400 /* Read the permanent MAC address out of the EEPROM */ 401 diag = e1000_read_mac_addr(hw); 402 if (diag != 0) { 403 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 404 goto error; 405 } 406 407 /* Now initialize the hardware */ 408 diag = em_hardware_init(hw); 409 if (diag != 0) { 410 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 411 goto error; 412 } 413 414 hw->mac.get_link_status = 1; 415 416 /* Indicate SOL/IDER usage */ 417 diag = e1000_check_reset_block(hw); 418 if (diag < 0) { 419 PMD_INIT_LOG(ERR, "PHY reset is blocked due to " 420 "SOL/IDER session"); 421 } 422 return 0; 423 424 error: 425 em_hw_control_release(hw); 426 return diag; 427 } 428 429 static int 430 eth_em_configure(struct rte_eth_dev *dev) 431 { 432 struct e1000_interrupt *intr = 433 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 434 435 PMD_INIT_FUNC_TRACE(); 436 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 437 438 PMD_INIT_FUNC_TRACE(); 439 440 return 0; 441 } 442 443 static void 444 em_set_pba(struct e1000_hw *hw) 445 { 446 uint32_t pba; 447 448 /* 449 * Packet Buffer Allocation (PBA) 450 * Writing PBA sets the receive portion of the buffer 451 * the remainder is used for the transmit buffer. 452 * Devices before the 82547 had a Packet Buffer of 64K. 453 * After the 82547 the buffer was reduced to 40K. 454 */ 455 switch (hw->mac.type) { 456 case e1000_82547: 457 case e1000_82547_rev_2: 458 /* 82547: Total Packet Buffer is 40K */ 459 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 460 break; 461 case e1000_82571: 462 case e1000_82572: 463 case e1000_80003es2lan: 464 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 465 break; 466 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 467 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 468 break; 469 case e1000_82574: 470 case e1000_82583: 471 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 472 break; 473 case e1000_ich8lan: 474 pba = E1000_PBA_8K; 475 break; 476 case e1000_ich9lan: 477 case e1000_ich10lan: 478 pba = E1000_PBA_10K; 479 break; 480 case e1000_pchlan: 481 case e1000_pch2lan: 482 case e1000_pch_lpt: 483 case e1000_pch_spt: 484 case e1000_pch_cnp: 485 pba = E1000_PBA_26K; 486 break; 487 default: 488 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 489 } 490 491 E1000_WRITE_REG(hw, E1000_PBA, pba); 492 } 493 494 static void 495 eth_em_rxtx_control(struct rte_eth_dev *dev, 496 bool enable) 497 { 498 struct e1000_hw *hw = 499 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 500 uint32_t tctl, rctl; 501 502 tctl = E1000_READ_REG(hw, E1000_TCTL); 503 rctl = E1000_READ_REG(hw, E1000_RCTL); 504 if (enable) { 505 /* enable Tx/Rx */ 506 tctl |= E1000_TCTL_EN; 507 rctl |= E1000_RCTL_EN; 508 } else { 509 /* disable Tx/Rx */ 510 tctl &= ~E1000_TCTL_EN; 511 rctl &= ~E1000_RCTL_EN; 512 } 513 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 514 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 515 E1000_WRITE_FLUSH(hw); 516 } 517 518 static int 519 eth_em_start(struct rte_eth_dev *dev) 520 { 521 struct e1000_adapter *adapter = 522 E1000_DEV_PRIVATE(dev->data->dev_private); 523 struct e1000_hw *hw = 524 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 525 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 526 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 527 int ret, mask; 528 uint32_t intr_vector = 0; 529 uint32_t *speeds; 530 int num_speeds; 531 bool autoneg; 532 533 PMD_INIT_FUNC_TRACE(); 534 535 ret = eth_em_stop(dev); 536 if (ret != 0) 537 return ret; 538 539 e1000_power_up_phy(hw); 540 541 /* Set default PBA value */ 542 em_set_pba(hw); 543 544 /* Put the address into the Receive Address Array */ 545 e1000_rar_set(hw, hw->mac.addr, 0); 546 547 /* 548 * With the 82571 adapter, RAR[0] may be overwritten 549 * when the other port is reset, we make a duplicate 550 * in RAR[14] for that eventuality, this assures 551 * the interface continues to function. 552 */ 553 if (hw->mac.type == e1000_82571) { 554 e1000_set_laa_state_82571(hw, TRUE); 555 e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); 556 } 557 558 /* Initialize the hardware */ 559 if (em_hardware_init(hw)) { 560 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 561 return -EIO; 562 } 563 564 E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN); 565 566 /* Configure for OS presence */ 567 em_init_manageability(hw); 568 569 if (dev->data->dev_conf.intr_conf.rxq != 0) { 570 intr_vector = dev->data->nb_rx_queues; 571 if (rte_intr_efd_enable(intr_handle, intr_vector)) 572 return -1; 573 } 574 575 if (rte_intr_dp_is_en(intr_handle)) { 576 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 577 dev->data->nb_rx_queues)) { 578 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 579 " intr_vec", dev->data->nb_rx_queues); 580 return -ENOMEM; 581 } 582 583 /* enable rx interrupt */ 584 em_rxq_intr_enable(hw); 585 } 586 587 eth_em_tx_init(dev); 588 589 ret = eth_em_rx_init(dev); 590 if (ret) { 591 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 592 em_dev_clear_queues(dev); 593 return ret; 594 } 595 596 e1000_clear_hw_cntrs_base_generic(hw); 597 598 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 599 RTE_ETH_VLAN_EXTEND_MASK; 600 ret = eth_em_vlan_offload_set(dev, mask); 601 if (ret) { 602 PMD_INIT_LOG(ERR, "Unable to update vlan offload"); 603 em_dev_clear_queues(dev); 604 return ret; 605 } 606 607 /* Set Interrupt Throttling Rate to maximum allowed value. */ 608 E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); 609 610 /* Setup link speed and duplex */ 611 speeds = &dev->data->dev_conf.link_speeds; 612 if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 613 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 614 hw->mac.autoneg = 1; 615 } else { 616 num_speeds = 0; 617 autoneg = (*speeds & RTE_ETH_LINK_SPEED_FIXED) == 0; 618 619 /* Reset */ 620 hw->phy.autoneg_advertised = 0; 621 622 if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | 623 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | 624 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_FIXED)) { 625 num_speeds = -1; 626 goto error_invalid_config; 627 } 628 if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) { 629 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 630 num_speeds++; 631 } 632 if (*speeds & RTE_ETH_LINK_SPEED_10M) { 633 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 634 num_speeds++; 635 } 636 if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) { 637 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 638 num_speeds++; 639 } 640 if (*speeds & RTE_ETH_LINK_SPEED_100M) { 641 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 642 num_speeds++; 643 } 644 if (*speeds & RTE_ETH_LINK_SPEED_1G) { 645 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 646 num_speeds++; 647 } 648 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 649 goto error_invalid_config; 650 651 /* Set/reset the mac.autoneg based on the link speed, 652 * fixed or not 653 */ 654 if (!autoneg) { 655 hw->mac.autoneg = 0; 656 hw->mac.forced_speed_duplex = 657 hw->phy.autoneg_advertised; 658 } else { 659 hw->mac.autoneg = 1; 660 } 661 } 662 663 e1000_setup_link(hw); 664 665 if (rte_intr_allow_others(intr_handle)) { 666 /* check if lsc interrupt is enabled */ 667 if (dev->data->dev_conf.intr_conf.lsc != 0) { 668 ret = eth_em_interrupt_setup(dev); 669 if (ret) { 670 PMD_INIT_LOG(ERR, "Unable to setup interrupts"); 671 em_dev_clear_queues(dev); 672 return ret; 673 } 674 } 675 } else { 676 rte_intr_callback_unregister(intr_handle, 677 eth_em_interrupt_handler, 678 (void *)dev); 679 if (dev->data->dev_conf.intr_conf.lsc != 0) 680 PMD_INIT_LOG(INFO, "lsc won't enable because of" 681 " no intr multiplexn"); 682 } 683 /* check if rxq interrupt is enabled */ 684 if (dev->data->dev_conf.intr_conf.rxq != 0) 685 eth_em_rxq_interrupt_setup(dev); 686 687 rte_intr_enable(intr_handle); 688 689 adapter->stopped = 0; 690 691 eth_em_rxtx_control(dev, true); 692 eth_em_link_update(dev, 0); 693 694 PMD_INIT_LOG(DEBUG, "<<"); 695 696 return 0; 697 698 error_invalid_config: 699 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 700 dev->data->dev_conf.link_speeds, dev->data->port_id); 701 em_dev_clear_queues(dev); 702 return -EINVAL; 703 } 704 705 /********************************************************************* 706 * 707 * This routine disables all traffic on the adapter by issuing a 708 * global reset on the MAC. 709 * 710 **********************************************************************/ 711 static int 712 eth_em_stop(struct rte_eth_dev *dev) 713 { 714 struct rte_eth_link link; 715 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 716 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 717 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 718 719 dev->data->dev_started = 0; 720 721 eth_em_rxtx_control(dev, false); 722 em_rxq_intr_disable(hw); 723 em_lsc_intr_disable(hw); 724 725 e1000_reset_hw(hw); 726 727 /* Flush desc rings for i219 */ 728 if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp) 729 em_flush_desc_rings(dev); 730 731 if (hw->mac.type >= e1000_82544) 732 E1000_WRITE_REG(hw, E1000_WUC, 0); 733 734 /* Power down the phy. Needed to make the link go down */ 735 e1000_power_down_phy(hw); 736 737 em_dev_clear_queues(dev); 738 739 /* clear the recorded link status */ 740 memset(&link, 0, sizeof(link)); 741 rte_eth_linkstatus_set(dev, &link); 742 743 if (!rte_intr_allow_others(intr_handle)) 744 /* resume to the default handler */ 745 rte_intr_callback_register(intr_handle, 746 eth_em_interrupt_handler, 747 (void *)dev); 748 749 /* Clean datapath event and queue/vec mapping */ 750 rte_intr_efd_disable(intr_handle); 751 rte_intr_vec_list_free(intr_handle); 752 753 return 0; 754 } 755 756 static int 757 eth_em_close(struct rte_eth_dev *dev) 758 { 759 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 760 struct e1000_adapter *adapter = 761 E1000_DEV_PRIVATE(dev->data->dev_private); 762 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 763 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 764 int ret; 765 766 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 767 return 0; 768 769 ret = eth_em_stop(dev); 770 adapter->stopped = 1; 771 em_dev_free_queues(dev); 772 e1000_phy_hw_reset(hw); 773 em_release_manageability(hw); 774 em_hw_control_release(hw); 775 776 /* disable uio intr before callback unregister */ 777 rte_intr_disable(intr_handle); 778 rte_intr_callback_unregister(intr_handle, 779 eth_em_interrupt_handler, dev); 780 781 return ret; 782 } 783 784 static int 785 em_get_rx_buffer_size(struct e1000_hw *hw) 786 { 787 uint32_t rx_buf_size; 788 789 rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); 790 return rx_buf_size; 791 } 792 793 /********************************************************************* 794 * 795 * Initialize the hardware 796 * 797 **********************************************************************/ 798 static int 799 em_hardware_init(struct e1000_hw *hw) 800 { 801 uint32_t rx_buf_size; 802 int diag; 803 804 /* Issue a global reset */ 805 e1000_reset_hw(hw); 806 807 /* Let the firmware know the OS is in control */ 808 em_hw_control_acquire(hw); 809 810 /* 811 * These parameters control the automatic generation (Tx) and 812 * response (Rx) to Ethernet PAUSE frames. 813 * - High water mark should allow for at least two standard size (1518) 814 * frames to be received after sending an XOFF. 815 * - Low water mark works best when it is very near the high water mark. 816 * This allows the receiver to restart by sending XON when it has 817 * drained a bit. Here we use an arbitrary value of 1500 which will 818 * restart after one full frame is pulled from the buffer. There 819 * could be several smaller frames in the buffer and if so they will 820 * not trigger the XON until their total number reduces the buffer 821 * by 1500. 822 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 823 */ 824 rx_buf_size = em_get_rx_buffer_size(hw); 825 826 hw->fc.high_water = rx_buf_size - 827 PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024); 828 hw->fc.low_water = hw->fc.high_water - 1500; 829 830 if (hw->mac.type == e1000_80003es2lan) 831 hw->fc.pause_time = UINT16_MAX; 832 else 833 hw->fc.pause_time = EM_FC_PAUSE_TIME; 834 835 hw->fc.send_xon = 1; 836 837 /* Set Flow control, use the tunable location if sane */ 838 if (em_fc_setting <= e1000_fc_full) 839 hw->fc.requested_mode = em_fc_setting; 840 else 841 hw->fc.requested_mode = e1000_fc_none; 842 843 /* Workaround: no TX flow ctrl for PCH */ 844 if (hw->mac.type == e1000_pchlan) 845 hw->fc.requested_mode = e1000_fc_rx_pause; 846 847 /* Override - settings for PCH2LAN, ya its magic :) */ 848 if (hw->mac.type == e1000_pch2lan) { 849 hw->fc.high_water = 0x5C20; 850 hw->fc.low_water = 0x5048; 851 hw->fc.pause_time = 0x0650; 852 hw->fc.refresh_time = 0x0400; 853 } else if (hw->mac.type == e1000_pch_lpt || 854 hw->mac.type == e1000_pch_spt || 855 hw->mac.type == e1000_pch_cnp) { 856 hw->fc.requested_mode = e1000_fc_full; 857 } 858 859 diag = e1000_init_hw(hw); 860 if (diag < 0) 861 return diag; 862 e1000_check_for_link(hw); 863 return 0; 864 } 865 866 /* This function is based on em_update_stats_counters() in e1000/if_em.c */ 867 static int 868 eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 869 { 870 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 871 struct e1000_hw_stats *stats = 872 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 873 int pause_frames; 874 875 if(hw->phy.media_type == e1000_media_type_copper || 876 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 877 stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); 878 stats->sec += E1000_READ_REG(hw, E1000_SEC); 879 } 880 881 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 882 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 883 stats->scc += E1000_READ_REG(hw, E1000_SCC); 884 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 885 886 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 887 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 888 stats->colc += E1000_READ_REG(hw, E1000_COLC); 889 stats->dc += E1000_READ_REG(hw, E1000_DC); 890 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 891 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 892 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 893 894 /* 895 * For watchdog management we need to know if we have been 896 * paused during the last interval, so capture that here. 897 */ 898 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 899 stats->xoffrxc += pause_frames; 900 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 901 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 902 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 903 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 904 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 905 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 906 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 907 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 908 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 909 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 910 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 911 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 912 913 /* 914 * For the 64-bit byte counters the low dword must be read first. 915 * Both registers clear on the read of the high dword. 916 */ 917 918 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 919 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 920 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 921 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 922 923 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 924 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 925 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 926 stats->roc += E1000_READ_REG(hw, E1000_ROC); 927 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 928 929 stats->tor += E1000_READ_REG(hw, E1000_TORH); 930 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 931 932 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 933 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 934 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 935 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 936 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 937 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 938 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 939 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 940 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 941 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 942 943 /* Interrupt Counts */ 944 945 if (hw->mac.type >= e1000_82571) { 946 stats->iac += E1000_READ_REG(hw, E1000_IAC); 947 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 948 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 949 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 950 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 951 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 952 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 953 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 954 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 955 } 956 957 if (hw->mac.type >= e1000_82543) { 958 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 959 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 960 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 961 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 962 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 963 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 964 } 965 966 if (rte_stats == NULL) 967 return -EINVAL; 968 969 /* Rx Errors */ 970 rte_stats->imissed = stats->mpc; 971 rte_stats->ierrors = stats->crcerrs + stats->rlec + 972 stats->rxerrc + stats->algnerrc + stats->cexterr; 973 974 /* Tx Errors */ 975 rte_stats->oerrors = stats->ecol + stats->latecol; 976 977 rte_stats->ipackets = stats->gprc; 978 rte_stats->opackets = stats->gptc; 979 rte_stats->ibytes = stats->gorc; 980 rte_stats->obytes = stats->gotc; 981 return 0; 982 } 983 984 static int 985 eth_em_stats_reset(struct rte_eth_dev *dev) 986 { 987 struct e1000_hw_stats *hw_stats = 988 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 989 990 /* HW registers are cleared on read */ 991 eth_em_stats_get(dev, NULL); 992 993 /* Reset software totals */ 994 memset(hw_stats, 0, sizeof(*hw_stats)); 995 996 return 0; 997 } 998 999 static int 1000 eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 1001 { 1002 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1003 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1004 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1005 1006 em_rxq_intr_enable(hw); 1007 rte_intr_ack(intr_handle); 1008 1009 return 0; 1010 } 1011 1012 static int 1013 eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 1014 { 1015 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1016 1017 em_rxq_intr_disable(hw); 1018 1019 return 0; 1020 } 1021 1022 uint32_t 1023 em_get_max_pktlen(struct rte_eth_dev *dev) 1024 { 1025 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1026 1027 switch (hw->mac.type) { 1028 case e1000_82571: 1029 case e1000_82572: 1030 case e1000_ich9lan: 1031 case e1000_ich10lan: 1032 case e1000_pch2lan: 1033 case e1000_pch_lpt: 1034 case e1000_pch_spt: 1035 case e1000_pch_cnp: 1036 case e1000_82574: 1037 case e1000_80003es2lan: /* 9K Jumbo Frame size */ 1038 case e1000_82583: 1039 return 0x2412; 1040 case e1000_pchlan: 1041 return 0x1000; 1042 /* Adapters that do not support jumbo frames */ 1043 case e1000_ich8lan: 1044 return RTE_ETHER_MAX_LEN; 1045 default: 1046 return MAX_JUMBO_FRAME_SIZE; 1047 } 1048 } 1049 1050 static int 1051 eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1052 { 1053 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1054 1055 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1056 dev_info->max_rx_pktlen = em_get_max_pktlen(dev); 1057 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1058 1059 /* 1060 * Starting with 631xESB hw supports 2 TX/RX queues per port. 1061 * Unfortunatelly, all these nics have just one TX context. 1062 * So we have few choises for TX: 1063 * - Use just one TX queue. 1064 * - Allow cksum offload only for one TX queue. 1065 * - Don't allow TX cksum offload at all. 1066 * For now, option #1 was chosen. 1067 * To use second RX queue we have to use extended RX descriptor 1068 * (Multiple Receive Queues are mutually exclusive with UDP 1069 * fragmentation and are not supported when a legacy receive 1070 * descriptor format is used). 1071 * Which means separate RX routinies - as legacy nics (82540, 82545) 1072 * don't support extended RXD. 1073 * To avoid it we support just one RX queue for now (no RSS). 1074 */ 1075 1076 dev_info->max_rx_queues = 1; 1077 dev_info->max_tx_queues = 1; 1078 1079 dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(); 1080 dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() | 1081 dev_info->rx_queue_offload_capa; 1082 dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); 1083 dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | 1084 dev_info->tx_queue_offload_capa; 1085 1086 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 1087 .nb_max = E1000_MAX_RING_DESC, 1088 .nb_min = E1000_MIN_RING_DESC, 1089 .nb_align = EM_RXD_ALIGN, 1090 }; 1091 1092 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 1093 .nb_max = E1000_MAX_RING_DESC, 1094 .nb_min = E1000_MIN_RING_DESC, 1095 .nb_align = EM_TXD_ALIGN, 1096 .nb_seg_max = EM_TX_MAX_SEG, 1097 .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG, 1098 }; 1099 1100 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | 1101 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | 1102 RTE_ETH_LINK_SPEED_1G; 1103 1104 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1105 1106 /* Preferred queue parameters */ 1107 dev_info->default_rxportconf.nb_queues = 1; 1108 dev_info->default_txportconf.nb_queues = 1; 1109 dev_info->default_txportconf.ring_size = 256; 1110 dev_info->default_rxportconf.ring_size = 256; 1111 1112 return 0; 1113 } 1114 1115 /* return 0 means link status changed, -1 means not changed */ 1116 static int 1117 eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1118 { 1119 struct e1000_hw *hw = 1120 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1121 struct rte_eth_link link; 1122 int link_up, count; 1123 1124 link_up = 0; 1125 hw->mac.get_link_status = 1; 1126 1127 /* possible wait-to-complete in up to 9 seconds */ 1128 for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 1129 /* Read the real link status */ 1130 switch (hw->phy.media_type) { 1131 case e1000_media_type_copper: 1132 /* Do the work to read phy */ 1133 e1000_check_for_link(hw); 1134 link_up = !hw->mac.get_link_status; 1135 break; 1136 1137 case e1000_media_type_fiber: 1138 e1000_check_for_link(hw); 1139 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1140 E1000_STATUS_LU); 1141 break; 1142 1143 case e1000_media_type_internal_serdes: 1144 e1000_check_for_link(hw); 1145 link_up = hw->mac.serdes_has_link; 1146 break; 1147 1148 default: 1149 break; 1150 } 1151 if (link_up || wait_to_complete == 0) 1152 break; 1153 rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); 1154 } 1155 memset(&link, 0, sizeof(link)); 1156 1157 /* Now we check if a transition has happened */ 1158 if (link_up) { 1159 uint16_t duplex, speed; 1160 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1161 link.link_duplex = (duplex == FULL_DUPLEX) ? 1162 RTE_ETH_LINK_FULL_DUPLEX : 1163 RTE_ETH_LINK_HALF_DUPLEX; 1164 link.link_speed = speed; 1165 link.link_status = RTE_ETH_LINK_UP; 1166 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1167 RTE_ETH_LINK_SPEED_FIXED); 1168 } else { 1169 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 1170 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1171 link.link_status = RTE_ETH_LINK_DOWN; 1172 link.link_autoneg = RTE_ETH_LINK_FIXED; 1173 } 1174 1175 return rte_eth_linkstatus_set(dev, &link); 1176 } 1177 1178 /* 1179 * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 1180 * For ASF and Pass Through versions of f/w this means 1181 * that the driver is loaded. For AMT version type f/w 1182 * this means that the network i/f is open. 1183 */ 1184 static void 1185 em_hw_control_acquire(struct e1000_hw *hw) 1186 { 1187 uint32_t ctrl_ext, swsm; 1188 1189 /* Let firmware know the driver has taken over */ 1190 if (hw->mac.type == e1000_82573) { 1191 swsm = E1000_READ_REG(hw, E1000_SWSM); 1192 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 1193 1194 } else { 1195 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1196 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1197 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1198 } 1199 } 1200 1201 /* 1202 * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. 1203 * For ASF and Pass Through versions of f/w this means that the 1204 * driver is no longer loaded. For AMT versions of the 1205 * f/w this means that the network i/f is closed. 1206 */ 1207 static void 1208 em_hw_control_release(struct e1000_hw *hw) 1209 { 1210 uint32_t ctrl_ext, swsm; 1211 1212 /* Let firmware taken over control of h/w */ 1213 if (hw->mac.type == e1000_82573) { 1214 swsm = E1000_READ_REG(hw, E1000_SWSM); 1215 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1216 } else { 1217 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1218 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1219 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1220 } 1221 } 1222 1223 /* 1224 * Bit of a misnomer, what this really means is 1225 * to enable OS management of the system... aka 1226 * to disable special hardware management features. 1227 */ 1228 static void 1229 em_init_manageability(struct e1000_hw *hw) 1230 { 1231 if (e1000_enable_mng_pass_thru(hw)) { 1232 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 1233 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 1234 1235 /* disable hardware interception of ARP */ 1236 manc &= ~(E1000_MANC_ARP_EN); 1237 1238 /* enable receiving management packets to the host */ 1239 manc |= E1000_MANC_EN_MNG2HOST; 1240 manc2h |= 1 << 5; /* Mng Port 623 */ 1241 manc2h |= 1 << 6; /* Mng Port 664 */ 1242 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 1243 E1000_WRITE_REG(hw, E1000_MANC, manc); 1244 } 1245 } 1246 1247 /* 1248 * Give control back to hardware management 1249 * controller if there is one. 1250 */ 1251 static void 1252 em_release_manageability(struct e1000_hw *hw) 1253 { 1254 uint32_t manc; 1255 1256 if (e1000_enable_mng_pass_thru(hw)) { 1257 manc = E1000_READ_REG(hw, E1000_MANC); 1258 1259 /* re-enable hardware interception of ARP */ 1260 manc |= E1000_MANC_ARP_EN; 1261 manc &= ~E1000_MANC_EN_MNG2HOST; 1262 1263 E1000_WRITE_REG(hw, E1000_MANC, manc); 1264 } 1265 } 1266 1267 static int 1268 eth_em_promiscuous_enable(struct rte_eth_dev *dev) 1269 { 1270 struct e1000_hw *hw = 1271 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1272 uint32_t rctl; 1273 1274 rctl = E1000_READ_REG(hw, E1000_RCTL); 1275 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1276 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1277 1278 return 0; 1279 } 1280 1281 static int 1282 eth_em_promiscuous_disable(struct rte_eth_dev *dev) 1283 { 1284 struct e1000_hw *hw = 1285 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1286 uint32_t rctl; 1287 1288 rctl = E1000_READ_REG(hw, E1000_RCTL); 1289 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1290 if (dev->data->all_multicast == 1) 1291 rctl |= E1000_RCTL_MPE; 1292 else 1293 rctl &= (~E1000_RCTL_MPE); 1294 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1295 1296 return 0; 1297 } 1298 1299 static int 1300 eth_em_allmulticast_enable(struct rte_eth_dev *dev) 1301 { 1302 struct e1000_hw *hw = 1303 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1304 uint32_t rctl; 1305 1306 rctl = E1000_READ_REG(hw, E1000_RCTL); 1307 rctl |= E1000_RCTL_MPE; 1308 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1309 1310 return 0; 1311 } 1312 1313 static int 1314 eth_em_allmulticast_disable(struct rte_eth_dev *dev) 1315 { 1316 struct e1000_hw *hw = 1317 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1318 uint32_t rctl; 1319 1320 if (dev->data->promiscuous == 1) 1321 return 0; /* must remain in all_multicast mode */ 1322 rctl = E1000_READ_REG(hw, E1000_RCTL); 1323 rctl &= (~E1000_RCTL_MPE); 1324 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1325 1326 return 0; 1327 } 1328 1329 static int 1330 eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1331 { 1332 struct e1000_hw *hw = 1333 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1334 struct e1000_vfta * shadow_vfta = 1335 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1336 uint32_t vfta; 1337 uint32_t vid_idx; 1338 uint32_t vid_bit; 1339 1340 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 1341 E1000_VFTA_ENTRY_MASK); 1342 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 1343 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 1344 if (on) 1345 vfta |= vid_bit; 1346 else 1347 vfta &= ~vid_bit; 1348 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 1349 1350 /* update local VFTA copy */ 1351 shadow_vfta->vfta[vid_idx] = vfta; 1352 1353 return 0; 1354 } 1355 1356 static void 1357 em_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1358 { 1359 struct e1000_hw *hw = 1360 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1361 uint32_t reg; 1362 1363 /* Filter Table Disable */ 1364 reg = E1000_READ_REG(hw, E1000_RCTL); 1365 reg &= ~E1000_RCTL_CFIEN; 1366 reg &= ~E1000_RCTL_VFE; 1367 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1368 } 1369 1370 static void 1371 em_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1372 { 1373 struct e1000_hw *hw = 1374 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1375 struct e1000_vfta * shadow_vfta = 1376 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1377 uint32_t reg; 1378 int i; 1379 1380 /* Filter Table Enable, CFI not used for packet acceptance */ 1381 reg = E1000_READ_REG(hw, E1000_RCTL); 1382 reg &= ~E1000_RCTL_CFIEN; 1383 reg |= E1000_RCTL_VFE; 1384 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1385 1386 /* restore vfta from local copy */ 1387 for (i = 0; i < IGB_VFTA_SIZE; i++) 1388 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 1389 } 1390 1391 static void 1392 em_vlan_hw_strip_disable(struct rte_eth_dev *dev) 1393 { 1394 struct e1000_hw *hw = 1395 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1396 uint32_t reg; 1397 1398 /* VLAN Mode Disable */ 1399 reg = E1000_READ_REG(hw, E1000_CTRL); 1400 reg &= ~E1000_CTRL_VME; 1401 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1402 1403 } 1404 1405 static void 1406 em_vlan_hw_strip_enable(struct rte_eth_dev *dev) 1407 { 1408 struct e1000_hw *hw = 1409 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1410 uint32_t reg; 1411 1412 /* VLAN Mode Enable */ 1413 reg = E1000_READ_REG(hw, E1000_CTRL); 1414 reg |= E1000_CTRL_VME; 1415 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1416 } 1417 1418 static int 1419 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1420 { 1421 struct rte_eth_rxmode *rxmode; 1422 1423 rxmode = &dev->data->dev_conf.rxmode; 1424 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1425 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1426 em_vlan_hw_strip_enable(dev); 1427 else 1428 em_vlan_hw_strip_disable(dev); 1429 } 1430 1431 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1432 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1433 em_vlan_hw_filter_enable(dev); 1434 else 1435 em_vlan_hw_filter_disable(dev); 1436 } 1437 1438 return 0; 1439 } 1440 1441 /* 1442 * It enables the interrupt mask and then enable the interrupt. 1443 * 1444 * @param dev 1445 * Pointer to struct rte_eth_dev. 1446 * 1447 * @return 1448 * - On success, zero. 1449 * - On failure, a negative value. 1450 */ 1451 static int 1452 eth_em_interrupt_setup(struct rte_eth_dev *dev) 1453 { 1454 uint32_t regval; 1455 struct e1000_hw *hw = 1456 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1457 1458 /* clear interrupt */ 1459 E1000_READ_REG(hw, E1000_ICR); 1460 regval = E1000_READ_REG(hw, E1000_IMS); 1461 E1000_WRITE_REG(hw, E1000_IMS, 1462 regval | E1000_ICR_LSC | E1000_ICR_OTHER); 1463 return 0; 1464 } 1465 1466 /* 1467 * It clears the interrupt causes and enables the interrupt. 1468 * It will be called once only during nic initialized. 1469 * 1470 * @param dev 1471 * Pointer to struct rte_eth_dev. 1472 * 1473 * @return 1474 * - On success, zero. 1475 * - On failure, a negative value. 1476 */ 1477 static int 1478 eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) 1479 { 1480 struct e1000_hw *hw = 1481 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1482 1483 E1000_READ_REG(hw, E1000_ICR); 1484 em_rxq_intr_enable(hw); 1485 return 0; 1486 } 1487 1488 /* 1489 * It enable receive packet interrupt. 1490 * @param hw 1491 * Pointer to struct e1000_hw 1492 * 1493 * @return 1494 */ 1495 static void 1496 em_rxq_intr_enable(struct e1000_hw *hw) 1497 { 1498 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); 1499 E1000_WRITE_FLUSH(hw); 1500 } 1501 1502 /* 1503 * It disabled lsc interrupt. 1504 * @param hw 1505 * Pointer to struct e1000_hw 1506 * 1507 * @return 1508 */ 1509 static void 1510 em_lsc_intr_disable(struct e1000_hw *hw) 1511 { 1512 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); 1513 E1000_WRITE_FLUSH(hw); 1514 } 1515 1516 /* 1517 * It disabled receive packet interrupt. 1518 * @param hw 1519 * Pointer to struct e1000_hw 1520 * 1521 * @return 1522 */ 1523 static void 1524 em_rxq_intr_disable(struct e1000_hw *hw) 1525 { 1526 E1000_READ_REG(hw, E1000_ICR); 1527 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 1528 E1000_WRITE_FLUSH(hw); 1529 } 1530 1531 /* 1532 * It reads ICR and gets interrupt causes, check it and set a bit flag 1533 * to update link status. 1534 * 1535 * @param dev 1536 * Pointer to struct rte_eth_dev. 1537 * 1538 * @return 1539 * - On success, zero. 1540 * - On failure, a negative value. 1541 */ 1542 static int 1543 eth_em_interrupt_get_status(struct rte_eth_dev *dev) 1544 { 1545 uint32_t icr; 1546 struct e1000_hw *hw = 1547 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1548 struct e1000_interrupt *intr = 1549 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1550 1551 /* read-on-clear nic registers here */ 1552 icr = E1000_READ_REG(hw, E1000_ICR); 1553 if (icr & E1000_ICR_LSC) { 1554 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1555 } 1556 1557 return 0; 1558 } 1559 1560 /* 1561 * It executes link_update after knowing an interrupt is prsent. 1562 * 1563 * @param dev 1564 * Pointer to struct rte_eth_dev. 1565 * 1566 * @return 1567 * - On success, zero. 1568 * - On failure, a negative value. 1569 */ 1570 static int 1571 eth_em_interrupt_action(struct rte_eth_dev *dev, 1572 struct rte_intr_handle *intr_handle) 1573 { 1574 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1575 struct e1000_hw *hw = 1576 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1577 struct e1000_interrupt *intr = 1578 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1579 struct rte_eth_link link; 1580 int ret; 1581 1582 if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) 1583 return -1; 1584 1585 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 1586 rte_intr_ack(intr_handle); 1587 1588 /* set get_link_status to check register later */ 1589 hw->mac.get_link_status = 1; 1590 ret = eth_em_link_update(dev, 0); 1591 1592 /* check if link has changed */ 1593 if (ret < 0) 1594 return 0; 1595 1596 rte_eth_linkstatus_get(dev, &link); 1597 1598 if (link.link_status) { 1599 PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", 1600 dev->data->port_id, link.link_speed, 1601 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 1602 "full-duplex" : "half-duplex"); 1603 } else { 1604 PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); 1605 } 1606 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 1607 pci_dev->addr.domain, pci_dev->addr.bus, 1608 pci_dev->addr.devid, pci_dev->addr.function); 1609 1610 return 0; 1611 } 1612 1613 /** 1614 * Interrupt handler which shall be registered at first. 1615 * 1616 * @param handle 1617 * Pointer to interrupt handle. 1618 * @param param 1619 * The address of parameter (struct rte_eth_dev *) regsitered before. 1620 * 1621 * @return 1622 * void 1623 */ 1624 static void 1625 eth_em_interrupt_handler(void *param) 1626 { 1627 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1628 1629 eth_em_interrupt_get_status(dev); 1630 eth_em_interrupt_action(dev, dev->intr_handle); 1631 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1632 } 1633 1634 static int 1635 eth_em_led_on(struct rte_eth_dev *dev) 1636 { 1637 struct e1000_hw *hw; 1638 1639 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1640 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1641 } 1642 1643 static int 1644 eth_em_led_off(struct rte_eth_dev *dev) 1645 { 1646 struct e1000_hw *hw; 1647 1648 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1649 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1650 } 1651 1652 static int 1653 eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1654 { 1655 struct e1000_hw *hw; 1656 uint32_t ctrl; 1657 int tx_pause; 1658 int rx_pause; 1659 1660 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1661 fc_conf->pause_time = hw->fc.pause_time; 1662 fc_conf->high_water = hw->fc.high_water; 1663 fc_conf->low_water = hw->fc.low_water; 1664 fc_conf->send_xon = hw->fc.send_xon; 1665 fc_conf->autoneg = hw->mac.autoneg; 1666 1667 /* 1668 * Return rx_pause and tx_pause status according to actual setting of 1669 * the TFCE and RFCE bits in the CTRL register. 1670 */ 1671 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1672 if (ctrl & E1000_CTRL_TFCE) 1673 tx_pause = 1; 1674 else 1675 tx_pause = 0; 1676 1677 if (ctrl & E1000_CTRL_RFCE) 1678 rx_pause = 1; 1679 else 1680 rx_pause = 0; 1681 1682 if (rx_pause && tx_pause) 1683 fc_conf->mode = RTE_ETH_FC_FULL; 1684 else if (rx_pause) 1685 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 1686 else if (tx_pause) 1687 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 1688 else 1689 fc_conf->mode = RTE_ETH_FC_NONE; 1690 1691 return 0; 1692 } 1693 1694 static int 1695 eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1696 { 1697 struct e1000_hw *hw; 1698 int err; 1699 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 1700 e1000_fc_none, 1701 e1000_fc_rx_pause, 1702 e1000_fc_tx_pause, 1703 e1000_fc_full 1704 }; 1705 uint32_t rx_buf_size; 1706 uint32_t max_high_water; 1707 uint32_t rctl; 1708 1709 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1710 if (fc_conf->autoneg != hw->mac.autoneg) 1711 return -ENOTSUP; 1712 rx_buf_size = em_get_rx_buffer_size(hw); 1713 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 1714 1715 /* At least reserve one Ethernet frame for watermark */ 1716 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 1717 if ((fc_conf->high_water > max_high_water) || 1718 (fc_conf->high_water < fc_conf->low_water)) { 1719 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 1720 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 1721 return -EINVAL; 1722 } 1723 1724 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 1725 hw->fc.pause_time = fc_conf->pause_time; 1726 hw->fc.high_water = fc_conf->high_water; 1727 hw->fc.low_water = fc_conf->low_water; 1728 hw->fc.send_xon = fc_conf->send_xon; 1729 1730 err = e1000_setup_link_generic(hw); 1731 if (err == E1000_SUCCESS) { 1732 1733 /* check if we want to forward MAC frames - driver doesn't have native 1734 * capability to do that, so we'll write the registers ourselves */ 1735 1736 rctl = E1000_READ_REG(hw, E1000_RCTL); 1737 1738 /* set or clear MFLCN.PMCF bit depending on configuration */ 1739 if (fc_conf->mac_ctrl_frame_fwd != 0) 1740 rctl |= E1000_RCTL_PMCF; 1741 else 1742 rctl &= ~E1000_RCTL_PMCF; 1743 1744 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1745 E1000_WRITE_FLUSH(hw); 1746 1747 return 0; 1748 } 1749 1750 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 1751 return -EIO; 1752 } 1753 1754 static int 1755 eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1756 uint32_t index, __rte_unused uint32_t pool) 1757 { 1758 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1759 1760 return e1000_rar_set(hw, mac_addr->addr_bytes, index); 1761 } 1762 1763 static void 1764 eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1765 { 1766 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1767 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1768 1769 memset(addr, 0, sizeof(addr)); 1770 1771 e1000_rar_set(hw, addr, index); 1772 } 1773 1774 static int 1775 eth_em_default_mac_addr_set(struct rte_eth_dev *dev, 1776 struct rte_ether_addr *addr) 1777 { 1778 eth_em_rar_clear(dev, 0); 1779 1780 return eth_em_rar_set(dev, (void *)addr, 0, 0); 1781 } 1782 1783 static int 1784 eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1785 { 1786 struct e1000_hw *hw; 1787 uint32_t frame_size; 1788 uint32_t rctl; 1789 1790 frame_size = mtu + E1000_ETH_OVERHEAD; 1791 1792 /* 1793 * If device is started, refuse mtu that requires the support of 1794 * scattered packets when this feature has not been enabled before. 1795 */ 1796 if (dev->data->dev_started && !dev->data->scattered_rx && 1797 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1798 PMD_INIT_LOG(ERR, "Stop port first."); 1799 return -EINVAL; 1800 } 1801 1802 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1803 rctl = E1000_READ_REG(hw, E1000_RCTL); 1804 1805 /* switch to jumbo mode if needed */ 1806 if (mtu > RTE_ETHER_MTU) 1807 rctl |= E1000_RCTL_LPE; 1808 else 1809 rctl &= ~E1000_RCTL_LPE; 1810 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1811 1812 return 0; 1813 } 1814 1815 static int 1816 eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 1817 struct rte_ether_addr *mc_addr_set, 1818 uint32_t nb_mc_addr) 1819 { 1820 struct e1000_hw *hw; 1821 1822 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1823 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1824 return 0; 1825 } 1826 1827 RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); 1828 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); 1829 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); 1830