1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <stdarg.h> 39 40 #include <rte_common.h> 41 #include <rte_interrupts.h> 42 #include <rte_byteorder.h> 43 #include <rte_log.h> 44 #include <rte_debug.h> 45 #include <rte_pci.h> 46 #include <rte_ether.h> 47 #include <rte_ethdev.h> 48 #include <rte_memory.h> 49 #include <rte_memzone.h> 50 #include <rte_eal.h> 51 #include <rte_atomic.h> 52 #include <rte_malloc.h> 53 #include <rte_dev.h> 54 55 #include "e1000_logs.h" 56 #include "base/e1000_api.h" 57 #include "e1000_ethdev.h" 58 59 #define EM_EIAC 0x000DC 60 61 #define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) 62 63 64 static int eth_em_configure(struct rte_eth_dev *dev); 65 static int eth_em_start(struct rte_eth_dev *dev); 66 static void eth_em_stop(struct rte_eth_dev *dev); 67 static void eth_em_close(struct rte_eth_dev *dev); 68 static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); 69 static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); 70 static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); 71 static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); 72 static int eth_em_link_update(struct rte_eth_dev *dev, 73 int wait_to_complete); 74 static void eth_em_stats_get(struct rte_eth_dev *dev, 75 struct rte_eth_stats *rte_stats); 76 static void eth_em_stats_reset(struct rte_eth_dev *dev); 77 static void eth_em_infos_get(struct rte_eth_dev *dev, 78 struct rte_eth_dev_info *dev_info); 79 static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, 80 struct rte_eth_fc_conf *fc_conf); 81 static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, 82 struct rte_eth_fc_conf *fc_conf); 83 static int eth_em_interrupt_setup(struct rte_eth_dev *dev); 84 static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); 85 static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); 86 static int eth_em_interrupt_action(struct rte_eth_dev *dev); 87 static void eth_em_interrupt_handler(struct rte_intr_handle *handle, 88 void *param); 89 90 static int em_hw_init(struct e1000_hw *hw); 91 static int em_hardware_init(struct e1000_hw *hw); 92 static void em_hw_control_acquire(struct e1000_hw *hw); 93 static void em_hw_control_release(struct e1000_hw *hw); 94 static void em_init_manageability(struct e1000_hw *hw); 95 static void em_release_manageability(struct e1000_hw *hw); 96 97 static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 98 99 static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, 100 uint16_t vlan_id, int on); 101 static void eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); 102 static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); 103 static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); 104 static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); 105 static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); 106 107 /* 108 static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, 109 uint16_t vlan_id, int on); 110 */ 111 112 static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 113 static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 114 static void em_lsc_intr_disable(struct e1000_hw *hw); 115 static void em_rxq_intr_enable(struct e1000_hw *hw); 116 static void em_rxq_intr_disable(struct e1000_hw *hw); 117 118 static int eth_em_led_on(struct rte_eth_dev *dev); 119 static int eth_em_led_off(struct rte_eth_dev *dev); 120 121 static int em_get_rx_buffer_size(struct e1000_hw *hw); 122 static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 123 uint32_t index, uint32_t pool); 124 static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); 125 126 static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 127 struct ether_addr *mc_addr_set, 128 uint32_t nb_mc_addr); 129 130 #define EM_FC_PAUSE_TIME 0x0680 131 #define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 132 #define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 133 134 static enum e1000_fc_mode em_fc_setting = e1000_fc_full; 135 136 /* 137 * The set of PCI devices this driver supports 138 */ 139 static const struct rte_pci_id pci_id_em_map[] = { 140 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, 141 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, 142 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, 143 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, 144 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, 145 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, 146 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, 147 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, 148 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, 149 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, 150 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, 151 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, 152 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, 153 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, 154 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, 155 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, 156 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, 157 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, 158 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, 159 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, 160 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, 161 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, 162 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, 163 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, 164 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, 165 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, 166 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, 167 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, 168 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, 169 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, 170 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, 171 { .vendor_id = 0, /* sentinel */ }, 172 }; 173 174 static const struct eth_dev_ops eth_em_ops = { 175 .dev_configure = eth_em_configure, 176 .dev_start = eth_em_start, 177 .dev_stop = eth_em_stop, 178 .dev_close = eth_em_close, 179 .promiscuous_enable = eth_em_promiscuous_enable, 180 .promiscuous_disable = eth_em_promiscuous_disable, 181 .allmulticast_enable = eth_em_allmulticast_enable, 182 .allmulticast_disable = eth_em_allmulticast_disable, 183 .link_update = eth_em_link_update, 184 .stats_get = eth_em_stats_get, 185 .stats_reset = eth_em_stats_reset, 186 .dev_infos_get = eth_em_infos_get, 187 .mtu_set = eth_em_mtu_set, 188 .vlan_filter_set = eth_em_vlan_filter_set, 189 .vlan_offload_set = eth_em_vlan_offload_set, 190 .rx_queue_setup = eth_em_rx_queue_setup, 191 .rx_queue_release = eth_em_rx_queue_release, 192 .rx_queue_count = eth_em_rx_queue_count, 193 .rx_descriptor_done = eth_em_rx_descriptor_done, 194 .tx_queue_setup = eth_em_tx_queue_setup, 195 .tx_queue_release = eth_em_tx_queue_release, 196 .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, 197 .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, 198 .dev_led_on = eth_em_led_on, 199 .dev_led_off = eth_em_led_off, 200 .flow_ctrl_get = eth_em_flow_ctrl_get, 201 .flow_ctrl_set = eth_em_flow_ctrl_set, 202 .mac_addr_add = eth_em_rar_set, 203 .mac_addr_remove = eth_em_rar_clear, 204 .set_mc_addr_list = eth_em_set_mc_addr_list, 205 .rxq_info_get = em_rxq_info_get, 206 .txq_info_get = em_txq_info_get, 207 }; 208 209 /** 210 * Atomically reads the link status information from global 211 * structure rte_eth_dev. 212 * 213 * @param dev 214 * - Pointer to the structure rte_eth_dev to read from. 215 * - Pointer to the buffer to be saved with the link status. 216 * 217 * @return 218 * - On success, zero. 219 * - On failure, negative value. 220 */ 221 static inline int 222 rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, 223 struct rte_eth_link *link) 224 { 225 struct rte_eth_link *dst = link; 226 struct rte_eth_link *src = &(dev->data->dev_link); 227 228 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 229 *(uint64_t *)src) == 0) 230 return -1; 231 232 return 0; 233 } 234 235 /** 236 * Atomically writes the link status information into global 237 * structure rte_eth_dev. 238 * 239 * @param dev 240 * - Pointer to the structure rte_eth_dev to read from. 241 * - Pointer to the buffer to be saved with the link status. 242 * 243 * @return 244 * - On success, zero. 245 * - On failure, negative value. 246 */ 247 static inline int 248 rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, 249 struct rte_eth_link *link) 250 { 251 struct rte_eth_link *dst = &(dev->data->dev_link); 252 struct rte_eth_link *src = link; 253 254 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 255 *(uint64_t *)src) == 0) 256 return -1; 257 258 return 0; 259 } 260 261 /** 262 * eth_em_dev_is_ich8 - Check for ICH8 device 263 * @hw: pointer to the HW structure 264 * 265 * return TRUE for ICH8, otherwise FALSE 266 **/ 267 static bool 268 eth_em_dev_is_ich8(struct e1000_hw *hw) 269 { 270 DEBUGFUNC("eth_em_dev_is_ich8"); 271 272 switch (hw->device_id) { 273 case E1000_DEV_ID_PCH_LPT_I217_LM: 274 case E1000_DEV_ID_PCH_LPT_I217_V: 275 case E1000_DEV_ID_PCH_LPTLP_I218_LM: 276 case E1000_DEV_ID_PCH_LPTLP_I218_V: 277 case E1000_DEV_ID_PCH_I218_V2: 278 case E1000_DEV_ID_PCH_I218_LM2: 279 case E1000_DEV_ID_PCH_I218_V3: 280 case E1000_DEV_ID_PCH_I218_LM3: 281 return 1; 282 default: 283 return 0; 284 } 285 } 286 287 static int 288 eth_em_dev_init(struct rte_eth_dev *eth_dev) 289 { 290 struct rte_pci_device *pci_dev; 291 struct e1000_adapter *adapter = 292 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 293 struct e1000_hw *hw = 294 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 295 struct e1000_vfta * shadow_vfta = 296 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 297 298 pci_dev = eth_dev->pci_dev; 299 300 eth_dev->dev_ops = ð_em_ops; 301 eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; 302 eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; 303 304 /* for secondary processes, we don't initialise any further as primary 305 * has already done this work. Only check we don't need a different 306 * RX function */ 307 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 308 if (eth_dev->data->scattered_rx) 309 eth_dev->rx_pkt_burst = 310 (eth_rx_burst_t)ð_em_recv_scattered_pkts; 311 return 0; 312 } 313 314 rte_eth_copy_pci_info(eth_dev, pci_dev); 315 316 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 317 hw->device_id = pci_dev->id.device_id; 318 adapter->stopped = 0; 319 320 /* For ICH8 support we'll need to map the flash memory BAR */ 321 if (eth_em_dev_is_ich8(hw)) 322 hw->flash_address = (void *)pci_dev->mem_resource[1].addr; 323 324 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || 325 em_hw_init(hw) != 0) { 326 PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " 327 "failed to init HW", 328 eth_dev->data->port_id, pci_dev->id.vendor_id, 329 pci_dev->id.device_id); 330 return -ENODEV; 331 } 332 333 /* Allocate memory for storing MAC addresses */ 334 eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * 335 hw->mac.rar_entry_count, 0); 336 if (eth_dev->data->mac_addrs == NULL) { 337 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 338 "store MAC addresses", 339 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 340 return -ENOMEM; 341 } 342 343 /* Copy the permanent MAC address */ 344 ether_addr_copy((struct ether_addr *) hw->mac.addr, 345 eth_dev->data->mac_addrs); 346 347 /* initialize the vfta */ 348 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 349 350 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 351 eth_dev->data->port_id, pci_dev->id.vendor_id, 352 pci_dev->id.device_id); 353 354 rte_intr_callback_register(&(pci_dev->intr_handle), 355 eth_em_interrupt_handler, (void *)eth_dev); 356 357 return 0; 358 } 359 360 static int 361 eth_em_dev_uninit(struct rte_eth_dev *eth_dev) 362 { 363 struct rte_pci_device *pci_dev; 364 struct e1000_adapter *adapter = 365 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 366 367 PMD_INIT_FUNC_TRACE(); 368 369 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 370 return -EPERM; 371 372 pci_dev = eth_dev->pci_dev; 373 374 if (adapter->stopped == 0) 375 eth_em_close(eth_dev); 376 377 eth_dev->dev_ops = NULL; 378 eth_dev->rx_pkt_burst = NULL; 379 eth_dev->tx_pkt_burst = NULL; 380 381 rte_free(eth_dev->data->mac_addrs); 382 eth_dev->data->mac_addrs = NULL; 383 384 /* disable uio intr before callback unregister */ 385 rte_intr_disable(&(pci_dev->intr_handle)); 386 rte_intr_callback_unregister(&(pci_dev->intr_handle), 387 eth_em_interrupt_handler, (void *)eth_dev); 388 389 return 0; 390 } 391 392 static struct eth_driver rte_em_pmd = { 393 .pci_drv = { 394 .id_table = pci_id_em_map, 395 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 396 RTE_PCI_DRV_DETACHABLE, 397 .probe = rte_eth_dev_pci_probe, 398 .remove = rte_eth_dev_pci_remove, 399 }, 400 .eth_dev_init = eth_em_dev_init, 401 .eth_dev_uninit = eth_em_dev_uninit, 402 .dev_private_size = sizeof(struct e1000_adapter), 403 }; 404 405 static int 406 em_hw_init(struct e1000_hw *hw) 407 { 408 int diag; 409 410 diag = hw->mac.ops.init_params(hw); 411 if (diag != 0) { 412 PMD_INIT_LOG(ERR, "MAC Initialization Error"); 413 return diag; 414 } 415 diag = hw->nvm.ops.init_params(hw); 416 if (diag != 0) { 417 PMD_INIT_LOG(ERR, "NVM Initialization Error"); 418 return diag; 419 } 420 diag = hw->phy.ops.init_params(hw); 421 if (diag != 0) { 422 PMD_INIT_LOG(ERR, "PHY Initialization Error"); 423 return diag; 424 } 425 (void) e1000_get_bus_info(hw); 426 427 hw->mac.autoneg = 1; 428 hw->phy.autoneg_wait_to_complete = 0; 429 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 430 431 e1000_init_script_state_82541(hw, TRUE); 432 e1000_set_tbi_compatibility_82543(hw, TRUE); 433 434 /* Copper options */ 435 if (hw->phy.media_type == e1000_media_type_copper) { 436 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 437 hw->phy.disable_polarity_correction = 0; 438 hw->phy.ms_type = e1000_ms_hw_default; 439 } 440 441 /* 442 * Start from a known state, this is important in reading the nvm 443 * and mac from that. 444 */ 445 e1000_reset_hw(hw); 446 447 /* Make sure we have a good EEPROM before we read from it */ 448 if (e1000_validate_nvm_checksum(hw) < 0) { 449 /* 450 * Some PCI-E parts fail the first check due to 451 * the link being in sleep state, call it again, 452 * if it fails a second time its a real issue. 453 */ 454 diag = e1000_validate_nvm_checksum(hw); 455 if (diag < 0) { 456 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 457 goto error; 458 } 459 } 460 461 /* Read the permanent MAC address out of the EEPROM */ 462 diag = e1000_read_mac_addr(hw); 463 if (diag != 0) { 464 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 465 goto error; 466 } 467 468 /* Now initialize the hardware */ 469 diag = em_hardware_init(hw); 470 if (diag != 0) { 471 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 472 goto error; 473 } 474 475 hw->mac.get_link_status = 1; 476 477 /* Indicate SOL/IDER usage */ 478 diag = e1000_check_reset_block(hw); 479 if (diag < 0) { 480 PMD_INIT_LOG(ERR, "PHY reset is blocked due to " 481 "SOL/IDER session"); 482 } 483 return 0; 484 485 error: 486 em_hw_control_release(hw); 487 return diag; 488 } 489 490 static int 491 eth_em_configure(struct rte_eth_dev *dev) 492 { 493 struct e1000_interrupt *intr = 494 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 495 496 PMD_INIT_FUNC_TRACE(); 497 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 498 PMD_INIT_FUNC_TRACE(); 499 500 return 0; 501 } 502 503 static void 504 em_set_pba(struct e1000_hw *hw) 505 { 506 uint32_t pba; 507 508 /* 509 * Packet Buffer Allocation (PBA) 510 * Writing PBA sets the receive portion of the buffer 511 * the remainder is used for the transmit buffer. 512 * Devices before the 82547 had a Packet Buffer of 64K. 513 * After the 82547 the buffer was reduced to 40K. 514 */ 515 switch (hw->mac.type) { 516 case e1000_82547: 517 case e1000_82547_rev_2: 518 /* 82547: Total Packet Buffer is 40K */ 519 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 520 break; 521 case e1000_82571: 522 case e1000_82572: 523 case e1000_80003es2lan: 524 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 525 break; 526 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 527 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 528 break; 529 case e1000_82574: 530 case e1000_82583: 531 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 532 break; 533 case e1000_ich8lan: 534 pba = E1000_PBA_8K; 535 break; 536 case e1000_ich9lan: 537 case e1000_ich10lan: 538 pba = E1000_PBA_10K; 539 break; 540 case e1000_pchlan: 541 case e1000_pch2lan: 542 case e1000_pch_lpt: 543 pba = E1000_PBA_26K; 544 break; 545 default: 546 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 547 } 548 549 E1000_WRITE_REG(hw, E1000_PBA, pba); 550 } 551 552 static int 553 eth_em_start(struct rte_eth_dev *dev) 554 { 555 struct e1000_adapter *adapter = 556 E1000_DEV_PRIVATE(dev->data->dev_private); 557 struct e1000_hw *hw = 558 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 559 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 560 int ret, mask; 561 uint32_t intr_vector = 0; 562 uint32_t *speeds; 563 int num_speeds; 564 bool autoneg; 565 566 PMD_INIT_FUNC_TRACE(); 567 568 eth_em_stop(dev); 569 570 e1000_power_up_phy(hw); 571 572 /* Set default PBA value */ 573 em_set_pba(hw); 574 575 /* Put the address into the Receive Address Array */ 576 e1000_rar_set(hw, hw->mac.addr, 0); 577 578 /* 579 * With the 82571 adapter, RAR[0] may be overwritten 580 * when the other port is reset, we make a duplicate 581 * in RAR[14] for that eventuality, this assures 582 * the interface continues to function. 583 */ 584 if (hw->mac.type == e1000_82571) { 585 e1000_set_laa_state_82571(hw, TRUE); 586 e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); 587 } 588 589 /* Initialize the hardware */ 590 if (em_hardware_init(hw)) { 591 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 592 return -EIO; 593 } 594 595 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); 596 597 /* Configure for OS presence */ 598 em_init_manageability(hw); 599 600 if (dev->data->dev_conf.intr_conf.rxq != 0) { 601 intr_vector = dev->data->nb_rx_queues; 602 if (rte_intr_efd_enable(intr_handle, intr_vector)) 603 return -1; 604 } 605 606 if (rte_intr_dp_is_en(intr_handle)) { 607 intr_handle->intr_vec = 608 rte_zmalloc("intr_vec", 609 dev->data->nb_rx_queues * sizeof(int), 0); 610 if (intr_handle->intr_vec == NULL) { 611 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 612 " intr_vec\n", dev->data->nb_rx_queues); 613 return -ENOMEM; 614 } 615 616 /* enable rx interrupt */ 617 em_rxq_intr_enable(hw); 618 } 619 620 eth_em_tx_init(dev); 621 622 ret = eth_em_rx_init(dev); 623 if (ret) { 624 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 625 em_dev_clear_queues(dev); 626 return ret; 627 } 628 629 e1000_clear_hw_cntrs_base_generic(hw); 630 631 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 632 ETH_VLAN_EXTEND_MASK; 633 eth_em_vlan_offload_set(dev, mask); 634 635 /* Set Interrupt Throttling Rate to maximum allowed value. */ 636 E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); 637 638 /* Setup link speed and duplex */ 639 speeds = &dev->data->dev_conf.link_speeds; 640 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 641 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 642 } else { 643 num_speeds = 0; 644 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 645 646 /* Reset */ 647 hw->phy.autoneg_advertised = 0; 648 649 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 650 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 651 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 652 num_speeds = -1; 653 goto error_invalid_config; 654 } 655 if (*speeds & ETH_LINK_SPEED_10M_HD) { 656 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 657 num_speeds++; 658 } 659 if (*speeds & ETH_LINK_SPEED_10M) { 660 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 661 num_speeds++; 662 } 663 if (*speeds & ETH_LINK_SPEED_100M_HD) { 664 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 665 num_speeds++; 666 } 667 if (*speeds & ETH_LINK_SPEED_100M) { 668 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 669 num_speeds++; 670 } 671 if (*speeds & ETH_LINK_SPEED_1G) { 672 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 673 num_speeds++; 674 } 675 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 676 goto error_invalid_config; 677 } 678 679 e1000_setup_link(hw); 680 681 if (rte_intr_allow_others(intr_handle)) { 682 /* check if lsc interrupt is enabled */ 683 if (dev->data->dev_conf.intr_conf.lsc != 0) { 684 ret = eth_em_interrupt_setup(dev); 685 if (ret) { 686 PMD_INIT_LOG(ERR, "Unable to setup interrupts"); 687 em_dev_clear_queues(dev); 688 return ret; 689 } 690 } 691 } else { 692 rte_intr_callback_unregister(intr_handle, 693 eth_em_interrupt_handler, 694 (void *)dev); 695 if (dev->data->dev_conf.intr_conf.lsc != 0) 696 PMD_INIT_LOG(INFO, "lsc won't enable because of" 697 " no intr multiplex\n"); 698 } 699 /* check if rxq interrupt is enabled */ 700 if (dev->data->dev_conf.intr_conf.rxq != 0) 701 eth_em_rxq_interrupt_setup(dev); 702 703 rte_intr_enable(intr_handle); 704 705 adapter->stopped = 0; 706 707 PMD_INIT_LOG(DEBUG, "<<"); 708 709 return 0; 710 711 error_invalid_config: 712 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 713 dev->data->dev_conf.link_speeds, dev->data->port_id); 714 em_dev_clear_queues(dev); 715 return -EINVAL; 716 } 717 718 /********************************************************************* 719 * 720 * This routine disables all traffic on the adapter by issuing a 721 * global reset on the MAC. 722 * 723 **********************************************************************/ 724 static void 725 eth_em_stop(struct rte_eth_dev *dev) 726 { 727 struct rte_eth_link link; 728 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 729 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 730 731 em_rxq_intr_disable(hw); 732 em_lsc_intr_disable(hw); 733 734 e1000_reset_hw(hw); 735 if (hw->mac.type >= e1000_82544) 736 E1000_WRITE_REG(hw, E1000_WUC, 0); 737 738 /* Power down the phy. Needed to make the link go down */ 739 e1000_power_down_phy(hw); 740 741 em_dev_clear_queues(dev); 742 743 /* clear the recorded link status */ 744 memset(&link, 0, sizeof(link)); 745 rte_em_dev_atomic_write_link_status(dev, &link); 746 747 if (!rte_intr_allow_others(intr_handle)) 748 /* resume to the default handler */ 749 rte_intr_callback_register(intr_handle, 750 eth_em_interrupt_handler, 751 (void *)dev); 752 753 /* Clean datapath event and queue/vec mapping */ 754 rte_intr_efd_disable(intr_handle); 755 if (intr_handle->intr_vec != NULL) { 756 rte_free(intr_handle->intr_vec); 757 intr_handle->intr_vec = NULL; 758 } 759 } 760 761 static void 762 eth_em_close(struct rte_eth_dev *dev) 763 { 764 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 765 struct e1000_adapter *adapter = 766 E1000_DEV_PRIVATE(dev->data->dev_private); 767 768 eth_em_stop(dev); 769 adapter->stopped = 1; 770 em_dev_free_queues(dev); 771 e1000_phy_hw_reset(hw); 772 em_release_manageability(hw); 773 em_hw_control_release(hw); 774 } 775 776 static int 777 em_get_rx_buffer_size(struct e1000_hw *hw) 778 { 779 uint32_t rx_buf_size; 780 781 rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); 782 return rx_buf_size; 783 } 784 785 /********************************************************************* 786 * 787 * Initialize the hardware 788 * 789 **********************************************************************/ 790 static int 791 em_hardware_init(struct e1000_hw *hw) 792 { 793 uint32_t rx_buf_size; 794 int diag; 795 796 /* Issue a global reset */ 797 e1000_reset_hw(hw); 798 799 /* Let the firmware know the OS is in control */ 800 em_hw_control_acquire(hw); 801 802 /* 803 * These parameters control the automatic generation (Tx) and 804 * response (Rx) to Ethernet PAUSE frames. 805 * - High water mark should allow for at least two standard size (1518) 806 * frames to be received after sending an XOFF. 807 * - Low water mark works best when it is very near the high water mark. 808 * This allows the receiver to restart by sending XON when it has 809 * drained a bit. Here we use an arbitrary value of 1500 which will 810 * restart after one full frame is pulled from the buffer. There 811 * could be several smaller frames in the buffer and if so they will 812 * not trigger the XON until their total number reduces the buffer 813 * by 1500. 814 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 815 */ 816 rx_buf_size = em_get_rx_buffer_size(hw); 817 818 hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); 819 hw->fc.low_water = hw->fc.high_water - 1500; 820 821 if (hw->mac.type == e1000_80003es2lan) 822 hw->fc.pause_time = UINT16_MAX; 823 else 824 hw->fc.pause_time = EM_FC_PAUSE_TIME; 825 826 hw->fc.send_xon = 1; 827 828 /* Set Flow control, use the tunable location if sane */ 829 if (em_fc_setting <= e1000_fc_full) 830 hw->fc.requested_mode = em_fc_setting; 831 else 832 hw->fc.requested_mode = e1000_fc_none; 833 834 /* Workaround: no TX flow ctrl for PCH */ 835 if (hw->mac.type == e1000_pchlan) 836 hw->fc.requested_mode = e1000_fc_rx_pause; 837 838 /* Override - settings for PCH2LAN, ya its magic :) */ 839 if (hw->mac.type == e1000_pch2lan) { 840 hw->fc.high_water = 0x5C20; 841 hw->fc.low_water = 0x5048; 842 hw->fc.pause_time = 0x0650; 843 hw->fc.refresh_time = 0x0400; 844 } else if (hw->mac.type == e1000_pch_lpt) { 845 hw->fc.requested_mode = e1000_fc_full; 846 } 847 848 diag = e1000_init_hw(hw); 849 if (diag < 0) 850 return diag; 851 e1000_check_for_link(hw); 852 return 0; 853 } 854 855 /* This function is based on em_update_stats_counters() in e1000/if_em.c */ 856 static void 857 eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 858 { 859 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 860 struct e1000_hw_stats *stats = 861 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 862 int pause_frames; 863 864 if(hw->phy.media_type == e1000_media_type_copper || 865 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 866 stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); 867 stats->sec += E1000_READ_REG(hw, E1000_SEC); 868 } 869 870 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 871 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 872 stats->scc += E1000_READ_REG(hw, E1000_SCC); 873 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 874 875 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 876 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 877 stats->colc += E1000_READ_REG(hw, E1000_COLC); 878 stats->dc += E1000_READ_REG(hw, E1000_DC); 879 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 880 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 881 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 882 883 /* 884 * For watchdog management we need to know if we have been 885 * paused during the last interval, so capture that here. 886 */ 887 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 888 stats->xoffrxc += pause_frames; 889 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 890 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 891 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 892 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 893 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 894 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 895 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 896 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 897 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 898 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 899 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 900 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 901 902 /* 903 * For the 64-bit byte counters the low dword must be read first. 904 * Both registers clear on the read of the high dword. 905 */ 906 907 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 908 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 909 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 910 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 911 912 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 913 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 914 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 915 stats->roc += E1000_READ_REG(hw, E1000_ROC); 916 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 917 918 stats->tor += E1000_READ_REG(hw, E1000_TORH); 919 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 920 921 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 922 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 923 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 924 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 925 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 926 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 927 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 928 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 929 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 930 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 931 932 /* Interrupt Counts */ 933 934 if (hw->mac.type >= e1000_82571) { 935 stats->iac += E1000_READ_REG(hw, E1000_IAC); 936 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 937 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 938 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 939 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 940 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 941 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 942 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 943 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 944 } 945 946 if (hw->mac.type >= e1000_82543) { 947 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 948 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 949 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 950 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 951 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 952 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 953 } 954 955 if (rte_stats == NULL) 956 return; 957 958 /* Rx Errors */ 959 rte_stats->imissed = stats->mpc; 960 rte_stats->ierrors = stats->crcerrs + 961 stats->rlec + stats->ruc + stats->roc + 962 stats->rxerrc + stats->algnerrc + stats->cexterr; 963 964 /* Tx Errors */ 965 rte_stats->oerrors = stats->ecol + stats->latecol; 966 967 rte_stats->ipackets = stats->gprc; 968 rte_stats->opackets = stats->gptc; 969 rte_stats->ibytes = stats->gorc; 970 rte_stats->obytes = stats->gotc; 971 } 972 973 static void 974 eth_em_stats_reset(struct rte_eth_dev *dev) 975 { 976 struct e1000_hw_stats *hw_stats = 977 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 978 979 /* HW registers are cleared on read */ 980 eth_em_stats_get(dev, NULL); 981 982 /* Reset software totals */ 983 memset(hw_stats, 0, sizeof(*hw_stats)); 984 } 985 986 static int 987 eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 988 { 989 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 990 991 em_rxq_intr_enable(hw); 992 rte_intr_enable(&dev->pci_dev->intr_handle); 993 994 return 0; 995 } 996 997 static int 998 eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 999 { 1000 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1001 1002 em_rxq_intr_disable(hw); 1003 1004 return 0; 1005 } 1006 1007 static uint32_t 1008 em_get_max_pktlen(const struct e1000_hw *hw) 1009 { 1010 switch (hw->mac.type) { 1011 case e1000_82571: 1012 case e1000_82572: 1013 case e1000_ich9lan: 1014 case e1000_ich10lan: 1015 case e1000_pch2lan: 1016 case e1000_pch_lpt: 1017 case e1000_82574: 1018 case e1000_80003es2lan: /* 9K Jumbo Frame size */ 1019 case e1000_82583: 1020 return 0x2412; 1021 case e1000_pchlan: 1022 return 0x1000; 1023 /* Adapters that do not support jumbo frames */ 1024 case e1000_ich8lan: 1025 return ETHER_MAX_LEN; 1026 default: 1027 return MAX_JUMBO_FRAME_SIZE; 1028 } 1029 } 1030 1031 static void 1032 eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1033 { 1034 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1035 1036 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1037 dev_info->max_rx_pktlen = em_get_max_pktlen(hw); 1038 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1039 1040 /* 1041 * Starting with 631xESB hw supports 2 TX/RX queues per port. 1042 * Unfortunatelly, all these nics have just one TX context. 1043 * So we have few choises for TX: 1044 * - Use just one TX queue. 1045 * - Allow cksum offload only for one TX queue. 1046 * - Don't allow TX cksum offload at all. 1047 * For now, option #1 was chosen. 1048 * To use second RX queue we have to use extended RX descriptor 1049 * (Multiple Receive Queues are mutually exclusive with UDP 1050 * fragmentation and are not supported when a legacy receive 1051 * descriptor format is used). 1052 * Which means separate RX routinies - as legacy nics (82540, 82545) 1053 * don't support extended RXD. 1054 * To avoid it we support just one RX queue for now (no RSS). 1055 */ 1056 1057 dev_info->max_rx_queues = 1; 1058 dev_info->max_tx_queues = 1; 1059 1060 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 1061 .nb_max = E1000_MAX_RING_DESC, 1062 .nb_min = E1000_MIN_RING_DESC, 1063 .nb_align = EM_RXD_ALIGN, 1064 }; 1065 1066 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 1067 .nb_max = E1000_MAX_RING_DESC, 1068 .nb_min = E1000_MIN_RING_DESC, 1069 .nb_align = EM_TXD_ALIGN, 1070 }; 1071 1072 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1073 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1074 ETH_LINK_SPEED_1G; 1075 } 1076 1077 /* return 0 means link status changed, -1 means not changed */ 1078 static int 1079 eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1080 { 1081 struct e1000_hw *hw = 1082 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1083 struct rte_eth_link link, old; 1084 int link_check, count; 1085 1086 link_check = 0; 1087 hw->mac.get_link_status = 1; 1088 1089 /* possible wait-to-complete in up to 9 seconds */ 1090 for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 1091 /* Read the real link status */ 1092 switch (hw->phy.media_type) { 1093 case e1000_media_type_copper: 1094 /* Do the work to read phy */ 1095 e1000_check_for_link(hw); 1096 link_check = !hw->mac.get_link_status; 1097 break; 1098 1099 case e1000_media_type_fiber: 1100 e1000_check_for_link(hw); 1101 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 1102 E1000_STATUS_LU); 1103 break; 1104 1105 case e1000_media_type_internal_serdes: 1106 e1000_check_for_link(hw); 1107 link_check = hw->mac.serdes_has_link; 1108 break; 1109 1110 default: 1111 break; 1112 } 1113 if (link_check || wait_to_complete == 0) 1114 break; 1115 rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); 1116 } 1117 memset(&link, 0, sizeof(link)); 1118 rte_em_dev_atomic_read_link_status(dev, &link); 1119 old = link; 1120 1121 /* Now we check if a transition has happened */ 1122 if (link_check && (link.link_status == ETH_LINK_DOWN)) { 1123 uint16_t duplex, speed; 1124 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1125 link.link_duplex = (duplex == FULL_DUPLEX) ? 1126 ETH_LINK_FULL_DUPLEX : 1127 ETH_LINK_HALF_DUPLEX; 1128 link.link_speed = speed; 1129 link.link_status = ETH_LINK_UP; 1130 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1131 ETH_LINK_SPEED_FIXED); 1132 } else if (!link_check && (link.link_status == ETH_LINK_UP)) { 1133 link.link_speed = 0; 1134 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1135 link.link_status = ETH_LINK_DOWN; 1136 link.link_autoneg = ETH_LINK_SPEED_FIXED; 1137 } 1138 rte_em_dev_atomic_write_link_status(dev, &link); 1139 1140 /* not changed */ 1141 if (old.link_status == link.link_status) 1142 return -1; 1143 1144 /* changed */ 1145 return 0; 1146 } 1147 1148 /* 1149 * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 1150 * For ASF and Pass Through versions of f/w this means 1151 * that the driver is loaded. For AMT version type f/w 1152 * this means that the network i/f is open. 1153 */ 1154 static void 1155 em_hw_control_acquire(struct e1000_hw *hw) 1156 { 1157 uint32_t ctrl_ext, swsm; 1158 1159 /* Let firmware know the driver has taken over */ 1160 if (hw->mac.type == e1000_82573) { 1161 swsm = E1000_READ_REG(hw, E1000_SWSM); 1162 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 1163 1164 } else { 1165 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1166 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1167 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1168 } 1169 } 1170 1171 /* 1172 * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. 1173 * For ASF and Pass Through versions of f/w this means that the 1174 * driver is no longer loaded. For AMT versions of the 1175 * f/w this means that the network i/f is closed. 1176 */ 1177 static void 1178 em_hw_control_release(struct e1000_hw *hw) 1179 { 1180 uint32_t ctrl_ext, swsm; 1181 1182 /* Let firmware taken over control of h/w */ 1183 if (hw->mac.type == e1000_82573) { 1184 swsm = E1000_READ_REG(hw, E1000_SWSM); 1185 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1186 } else { 1187 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1188 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1189 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1190 } 1191 } 1192 1193 /* 1194 * Bit of a misnomer, what this really means is 1195 * to enable OS management of the system... aka 1196 * to disable special hardware management features. 1197 */ 1198 static void 1199 em_init_manageability(struct e1000_hw *hw) 1200 { 1201 if (e1000_enable_mng_pass_thru(hw)) { 1202 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 1203 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 1204 1205 /* disable hardware interception of ARP */ 1206 manc &= ~(E1000_MANC_ARP_EN); 1207 1208 /* enable receiving management packets to the host */ 1209 manc |= E1000_MANC_EN_MNG2HOST; 1210 manc2h |= 1 << 5; /* Mng Port 623 */ 1211 manc2h |= 1 << 6; /* Mng Port 664 */ 1212 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 1213 E1000_WRITE_REG(hw, E1000_MANC, manc); 1214 } 1215 } 1216 1217 /* 1218 * Give control back to hardware management 1219 * controller if there is one. 1220 */ 1221 static void 1222 em_release_manageability(struct e1000_hw *hw) 1223 { 1224 uint32_t manc; 1225 1226 if (e1000_enable_mng_pass_thru(hw)) { 1227 manc = E1000_READ_REG(hw, E1000_MANC); 1228 1229 /* re-enable hardware interception of ARP */ 1230 manc |= E1000_MANC_ARP_EN; 1231 manc &= ~E1000_MANC_EN_MNG2HOST; 1232 1233 E1000_WRITE_REG(hw, E1000_MANC, manc); 1234 } 1235 } 1236 1237 static void 1238 eth_em_promiscuous_enable(struct rte_eth_dev *dev) 1239 { 1240 struct e1000_hw *hw = 1241 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1242 uint32_t rctl; 1243 1244 rctl = E1000_READ_REG(hw, E1000_RCTL); 1245 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1246 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1247 } 1248 1249 static void 1250 eth_em_promiscuous_disable(struct rte_eth_dev *dev) 1251 { 1252 struct e1000_hw *hw = 1253 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1254 uint32_t rctl; 1255 1256 rctl = E1000_READ_REG(hw, E1000_RCTL); 1257 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1258 if (dev->data->all_multicast == 1) 1259 rctl |= E1000_RCTL_MPE; 1260 else 1261 rctl &= (~E1000_RCTL_MPE); 1262 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1263 } 1264 1265 static void 1266 eth_em_allmulticast_enable(struct rte_eth_dev *dev) 1267 { 1268 struct e1000_hw *hw = 1269 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1270 uint32_t rctl; 1271 1272 rctl = E1000_READ_REG(hw, E1000_RCTL); 1273 rctl |= E1000_RCTL_MPE; 1274 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1275 } 1276 1277 static void 1278 eth_em_allmulticast_disable(struct rte_eth_dev *dev) 1279 { 1280 struct e1000_hw *hw = 1281 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1282 uint32_t rctl; 1283 1284 if (dev->data->promiscuous == 1) 1285 return; /* must remain in all_multicast mode */ 1286 rctl = E1000_READ_REG(hw, E1000_RCTL); 1287 rctl &= (~E1000_RCTL_MPE); 1288 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1289 } 1290 1291 static int 1292 eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1293 { 1294 struct e1000_hw *hw = 1295 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1296 struct e1000_vfta * shadow_vfta = 1297 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1298 uint32_t vfta; 1299 uint32_t vid_idx; 1300 uint32_t vid_bit; 1301 1302 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 1303 E1000_VFTA_ENTRY_MASK); 1304 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 1305 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 1306 if (on) 1307 vfta |= vid_bit; 1308 else 1309 vfta &= ~vid_bit; 1310 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 1311 1312 /* update local VFTA copy */ 1313 shadow_vfta->vfta[vid_idx] = vfta; 1314 1315 return 0; 1316 } 1317 1318 static void 1319 em_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1320 { 1321 struct e1000_hw *hw = 1322 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1323 uint32_t reg; 1324 1325 /* Filter Table Disable */ 1326 reg = E1000_READ_REG(hw, E1000_RCTL); 1327 reg &= ~E1000_RCTL_CFIEN; 1328 reg &= ~E1000_RCTL_VFE; 1329 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1330 } 1331 1332 static void 1333 em_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1334 { 1335 struct e1000_hw *hw = 1336 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1337 struct e1000_vfta * shadow_vfta = 1338 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1339 uint32_t reg; 1340 int i; 1341 1342 /* Filter Table Enable, CFI not used for packet acceptance */ 1343 reg = E1000_READ_REG(hw, E1000_RCTL); 1344 reg &= ~E1000_RCTL_CFIEN; 1345 reg |= E1000_RCTL_VFE; 1346 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1347 1348 /* restore vfta from local copy */ 1349 for (i = 0; i < IGB_VFTA_SIZE; i++) 1350 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 1351 } 1352 1353 static void 1354 em_vlan_hw_strip_disable(struct rte_eth_dev *dev) 1355 { 1356 struct e1000_hw *hw = 1357 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1358 uint32_t reg; 1359 1360 /* VLAN Mode Disable */ 1361 reg = E1000_READ_REG(hw, E1000_CTRL); 1362 reg &= ~E1000_CTRL_VME; 1363 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1364 1365 } 1366 1367 static void 1368 em_vlan_hw_strip_enable(struct rte_eth_dev *dev) 1369 { 1370 struct e1000_hw *hw = 1371 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1372 uint32_t reg; 1373 1374 /* VLAN Mode Enable */ 1375 reg = E1000_READ_REG(hw, E1000_CTRL); 1376 reg |= E1000_CTRL_VME; 1377 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1378 } 1379 1380 static void 1381 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1382 { 1383 if(mask & ETH_VLAN_STRIP_MASK){ 1384 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1385 em_vlan_hw_strip_enable(dev); 1386 else 1387 em_vlan_hw_strip_disable(dev); 1388 } 1389 1390 if(mask & ETH_VLAN_FILTER_MASK){ 1391 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 1392 em_vlan_hw_filter_enable(dev); 1393 else 1394 em_vlan_hw_filter_disable(dev); 1395 } 1396 } 1397 1398 /* 1399 * It enables the interrupt mask and then enable the interrupt. 1400 * 1401 * @param dev 1402 * Pointer to struct rte_eth_dev. 1403 * 1404 * @return 1405 * - On success, zero. 1406 * - On failure, a negative value. 1407 */ 1408 static int 1409 eth_em_interrupt_setup(struct rte_eth_dev *dev) 1410 { 1411 uint32_t regval; 1412 struct e1000_hw *hw = 1413 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1414 1415 /* clear interrupt */ 1416 E1000_READ_REG(hw, E1000_ICR); 1417 regval = E1000_READ_REG(hw, E1000_IMS); 1418 E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC); 1419 return 0; 1420 } 1421 1422 /* 1423 * It clears the interrupt causes and enables the interrupt. 1424 * It will be called once only during nic initialized. 1425 * 1426 * @param dev 1427 * Pointer to struct rte_eth_dev. 1428 * 1429 * @return 1430 * - On success, zero. 1431 * - On failure, a negative value. 1432 */ 1433 static int 1434 eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) 1435 { 1436 struct e1000_hw *hw = 1437 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1438 1439 E1000_READ_REG(hw, E1000_ICR); 1440 em_rxq_intr_enable(hw); 1441 return 0; 1442 } 1443 1444 /* 1445 * It enable receive packet interrupt. 1446 * @param hw 1447 * Pointer to struct e1000_hw 1448 * 1449 * @return 1450 */ 1451 static void 1452 em_rxq_intr_enable(struct e1000_hw *hw) 1453 { 1454 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); 1455 E1000_WRITE_FLUSH(hw); 1456 } 1457 1458 /* 1459 * It disabled lsc interrupt. 1460 * @param hw 1461 * Pointer to struct e1000_hw 1462 * 1463 * @return 1464 */ 1465 static void 1466 em_lsc_intr_disable(struct e1000_hw *hw) 1467 { 1468 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC); 1469 E1000_WRITE_FLUSH(hw); 1470 } 1471 1472 /* 1473 * It disabled receive packet interrupt. 1474 * @param hw 1475 * Pointer to struct e1000_hw 1476 * 1477 * @return 1478 */ 1479 static void 1480 em_rxq_intr_disable(struct e1000_hw *hw) 1481 { 1482 E1000_READ_REG(hw, E1000_ICR); 1483 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 1484 E1000_WRITE_FLUSH(hw); 1485 } 1486 1487 /* 1488 * It reads ICR and gets interrupt causes, check it and set a bit flag 1489 * to update link status. 1490 * 1491 * @param dev 1492 * Pointer to struct rte_eth_dev. 1493 * 1494 * @return 1495 * - On success, zero. 1496 * - On failure, a negative value. 1497 */ 1498 static int 1499 eth_em_interrupt_get_status(struct rte_eth_dev *dev) 1500 { 1501 uint32_t icr; 1502 struct e1000_hw *hw = 1503 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1504 struct e1000_interrupt *intr = 1505 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1506 1507 /* read-on-clear nic registers here */ 1508 icr = E1000_READ_REG(hw, E1000_ICR); 1509 if (icr & E1000_ICR_LSC) { 1510 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1511 } 1512 1513 return 0; 1514 } 1515 1516 /* 1517 * It executes link_update after knowing an interrupt is prsent. 1518 * 1519 * @param dev 1520 * Pointer to struct rte_eth_dev. 1521 * 1522 * @return 1523 * - On success, zero. 1524 * - On failure, a negative value. 1525 */ 1526 static int 1527 eth_em_interrupt_action(struct rte_eth_dev *dev) 1528 { 1529 struct e1000_hw *hw = 1530 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1531 struct e1000_interrupt *intr = 1532 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1533 uint32_t tctl, rctl; 1534 struct rte_eth_link link; 1535 int ret; 1536 1537 if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) 1538 return -1; 1539 1540 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 1541 rte_intr_enable(&(dev->pci_dev->intr_handle)); 1542 1543 /* set get_link_status to check register later */ 1544 hw->mac.get_link_status = 1; 1545 ret = eth_em_link_update(dev, 0); 1546 1547 /* check if link has changed */ 1548 if (ret < 0) 1549 return 0; 1550 1551 memset(&link, 0, sizeof(link)); 1552 rte_em_dev_atomic_read_link_status(dev, &link); 1553 if (link.link_status) { 1554 PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", 1555 dev->data->port_id, (unsigned)link.link_speed, 1556 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 1557 "full-duplex" : "half-duplex"); 1558 } else { 1559 PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); 1560 } 1561 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 1562 dev->pci_dev->addr.domain, dev->pci_dev->addr.bus, 1563 dev->pci_dev->addr.devid, dev->pci_dev->addr.function); 1564 1565 tctl = E1000_READ_REG(hw, E1000_TCTL); 1566 rctl = E1000_READ_REG(hw, E1000_RCTL); 1567 if (link.link_status) { 1568 /* enable Tx/Rx */ 1569 tctl |= E1000_TCTL_EN; 1570 rctl |= E1000_RCTL_EN; 1571 } else { 1572 /* disable Tx/Rx */ 1573 tctl &= ~E1000_TCTL_EN; 1574 rctl &= ~E1000_RCTL_EN; 1575 } 1576 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1577 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1578 E1000_WRITE_FLUSH(hw); 1579 1580 return 0; 1581 } 1582 1583 /** 1584 * Interrupt handler which shall be registered at first. 1585 * 1586 * @param handle 1587 * Pointer to interrupt handle. 1588 * @param param 1589 * The address of parameter (struct rte_eth_dev *) regsitered before. 1590 * 1591 * @return 1592 * void 1593 */ 1594 static void 1595 eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 1596 void *param) 1597 { 1598 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1599 1600 eth_em_interrupt_get_status(dev); 1601 eth_em_interrupt_action(dev); 1602 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); 1603 } 1604 1605 static int 1606 eth_em_led_on(struct rte_eth_dev *dev) 1607 { 1608 struct e1000_hw *hw; 1609 1610 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1611 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1612 } 1613 1614 static int 1615 eth_em_led_off(struct rte_eth_dev *dev) 1616 { 1617 struct e1000_hw *hw; 1618 1619 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1620 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1621 } 1622 1623 static int 1624 eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1625 { 1626 struct e1000_hw *hw; 1627 uint32_t ctrl; 1628 int tx_pause; 1629 int rx_pause; 1630 1631 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1632 fc_conf->pause_time = hw->fc.pause_time; 1633 fc_conf->high_water = hw->fc.high_water; 1634 fc_conf->low_water = hw->fc.low_water; 1635 fc_conf->send_xon = hw->fc.send_xon; 1636 fc_conf->autoneg = hw->mac.autoneg; 1637 1638 /* 1639 * Return rx_pause and tx_pause status according to actual setting of 1640 * the TFCE and RFCE bits in the CTRL register. 1641 */ 1642 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1643 if (ctrl & E1000_CTRL_TFCE) 1644 tx_pause = 1; 1645 else 1646 tx_pause = 0; 1647 1648 if (ctrl & E1000_CTRL_RFCE) 1649 rx_pause = 1; 1650 else 1651 rx_pause = 0; 1652 1653 if (rx_pause && tx_pause) 1654 fc_conf->mode = RTE_FC_FULL; 1655 else if (rx_pause) 1656 fc_conf->mode = RTE_FC_RX_PAUSE; 1657 else if (tx_pause) 1658 fc_conf->mode = RTE_FC_TX_PAUSE; 1659 else 1660 fc_conf->mode = RTE_FC_NONE; 1661 1662 return 0; 1663 } 1664 1665 static int 1666 eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1667 { 1668 struct e1000_hw *hw; 1669 int err; 1670 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 1671 e1000_fc_none, 1672 e1000_fc_rx_pause, 1673 e1000_fc_tx_pause, 1674 e1000_fc_full 1675 }; 1676 uint32_t rx_buf_size; 1677 uint32_t max_high_water; 1678 uint32_t rctl; 1679 1680 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1681 if (fc_conf->autoneg != hw->mac.autoneg) 1682 return -ENOTSUP; 1683 rx_buf_size = em_get_rx_buffer_size(hw); 1684 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 1685 1686 /* At least reserve one Ethernet frame for watermark */ 1687 max_high_water = rx_buf_size - ETHER_MAX_LEN; 1688 if ((fc_conf->high_water > max_high_water) || 1689 (fc_conf->high_water < fc_conf->low_water)) { 1690 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 1691 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 1692 return -EINVAL; 1693 } 1694 1695 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 1696 hw->fc.pause_time = fc_conf->pause_time; 1697 hw->fc.high_water = fc_conf->high_water; 1698 hw->fc.low_water = fc_conf->low_water; 1699 hw->fc.send_xon = fc_conf->send_xon; 1700 1701 err = e1000_setup_link_generic(hw); 1702 if (err == E1000_SUCCESS) { 1703 1704 /* check if we want to forward MAC frames - driver doesn't have native 1705 * capability to do that, so we'll write the registers ourselves */ 1706 1707 rctl = E1000_READ_REG(hw, E1000_RCTL); 1708 1709 /* set or clear MFLCN.PMCF bit depending on configuration */ 1710 if (fc_conf->mac_ctrl_frame_fwd != 0) 1711 rctl |= E1000_RCTL_PMCF; 1712 else 1713 rctl &= ~E1000_RCTL_PMCF; 1714 1715 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1716 E1000_WRITE_FLUSH(hw); 1717 1718 return 0; 1719 } 1720 1721 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 1722 return -EIO; 1723 } 1724 1725 static void 1726 eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 1727 uint32_t index, __rte_unused uint32_t pool) 1728 { 1729 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1730 1731 e1000_rar_set(hw, mac_addr->addr_bytes, index); 1732 } 1733 1734 static void 1735 eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1736 { 1737 uint8_t addr[ETHER_ADDR_LEN]; 1738 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1739 1740 memset(addr, 0, sizeof(addr)); 1741 1742 e1000_rar_set(hw, addr, index); 1743 } 1744 1745 static int 1746 eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1747 { 1748 struct rte_eth_dev_info dev_info; 1749 struct e1000_hw *hw; 1750 uint32_t frame_size; 1751 uint32_t rctl; 1752 1753 eth_em_infos_get(dev, &dev_info); 1754 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; 1755 1756 /* check that mtu is within the allowed range */ 1757 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 1758 return -EINVAL; 1759 1760 /* refuse mtu that requires the support of scattered packets when this 1761 * feature has not been enabled before. */ 1762 if (!dev->data->scattered_rx && 1763 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 1764 return -EINVAL; 1765 1766 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1767 rctl = E1000_READ_REG(hw, E1000_RCTL); 1768 1769 /* switch to jumbo mode if needed */ 1770 if (frame_size > ETHER_MAX_LEN) { 1771 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1772 rctl |= E1000_RCTL_LPE; 1773 } else { 1774 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1775 rctl &= ~E1000_RCTL_LPE; 1776 } 1777 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1778 1779 /* update max frame size */ 1780 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1781 return 0; 1782 } 1783 1784 static int 1785 eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 1786 struct ether_addr *mc_addr_set, 1787 uint32_t nb_mc_addr) 1788 { 1789 struct e1000_hw *hw; 1790 1791 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1792 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1793 return 0; 1794 } 1795 1796 DRIVER_REGISTER_PCI(net_e1000_em, rte_em_pmd.pci_drv); 1797 DRIVER_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); 1798