1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <stdarg.h> 10 11 #include <rte_common.h> 12 #include <rte_interrupts.h> 13 #include <rte_byteorder.h> 14 #include <rte_debug.h> 15 #include <rte_pci.h> 16 #include <rte_bus_pci.h> 17 #include <rte_ether.h> 18 #include <rte_ethdev_driver.h> 19 #include <rte_ethdev_pci.h> 20 #include <rte_memory.h> 21 #include <rte_eal.h> 22 #include <rte_malloc.h> 23 #include <rte_dev.h> 24 25 #include "e1000_logs.h" 26 #include "base/e1000_api.h" 27 #include "e1000_ethdev.h" 28 29 #define EM_EIAC 0x000DC 30 31 #define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) 32 33 34 static int eth_em_configure(struct rte_eth_dev *dev); 35 static int eth_em_start(struct rte_eth_dev *dev); 36 static void eth_em_stop(struct rte_eth_dev *dev); 37 static void eth_em_close(struct rte_eth_dev *dev); 38 static int eth_em_promiscuous_enable(struct rte_eth_dev *dev); 39 static int eth_em_promiscuous_disable(struct rte_eth_dev *dev); 40 static int eth_em_allmulticast_enable(struct rte_eth_dev *dev); 41 static int eth_em_allmulticast_disable(struct rte_eth_dev *dev); 42 static int eth_em_link_update(struct rte_eth_dev *dev, 43 int wait_to_complete); 44 static int eth_em_stats_get(struct rte_eth_dev *dev, 45 struct rte_eth_stats *rte_stats); 46 static int eth_em_stats_reset(struct rte_eth_dev *dev); 47 static int eth_em_infos_get(struct rte_eth_dev *dev, 48 struct rte_eth_dev_info *dev_info); 49 static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, 50 struct rte_eth_fc_conf *fc_conf); 51 static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, 52 struct rte_eth_fc_conf *fc_conf); 53 static int eth_em_interrupt_setup(struct rte_eth_dev *dev); 54 static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); 55 static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); 56 static int eth_em_interrupt_action(struct rte_eth_dev *dev, 57 struct rte_intr_handle *handle); 58 static void eth_em_interrupt_handler(void *param); 59 60 static int em_hw_init(struct e1000_hw *hw); 61 static int em_hardware_init(struct e1000_hw *hw); 62 static void em_hw_control_acquire(struct e1000_hw *hw); 63 static void em_hw_control_release(struct e1000_hw *hw); 64 static void em_init_manageability(struct e1000_hw *hw); 65 static void em_release_manageability(struct e1000_hw *hw); 66 67 static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 68 69 static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, 70 uint16_t vlan_id, int on); 71 static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); 72 static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); 73 static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); 74 static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); 75 static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); 76 77 /* 78 static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, 79 uint16_t vlan_id, int on); 80 */ 81 82 static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 83 static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 84 static void em_lsc_intr_disable(struct e1000_hw *hw); 85 static void em_rxq_intr_enable(struct e1000_hw *hw); 86 static void em_rxq_intr_disable(struct e1000_hw *hw); 87 88 static int eth_em_led_on(struct rte_eth_dev *dev); 89 static int eth_em_led_off(struct rte_eth_dev *dev); 90 91 static int em_get_rx_buffer_size(struct e1000_hw *hw); 92 static int eth_em_rar_set(struct rte_eth_dev *dev, 93 struct rte_ether_addr *mac_addr, 94 uint32_t index, uint32_t pool); 95 static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); 96 static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, 97 struct rte_ether_addr *addr); 98 99 static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 100 struct rte_ether_addr *mc_addr_set, 101 uint32_t nb_mc_addr); 102 103 #define EM_FC_PAUSE_TIME 0x0680 104 #define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 105 #define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 106 107 static enum e1000_fc_mode em_fc_setting = e1000_fc_full; 108 109 /* 110 * The set of PCI devices this driver supports 111 */ 112 static const struct rte_pci_id pci_id_em_map[] = { 113 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, 114 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, 115 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, 116 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, 117 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, 118 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, 119 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, 120 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, 121 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, 122 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, 123 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, 124 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, 125 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, 126 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, 127 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, 128 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, 129 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, 130 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, 131 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, 132 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, 133 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, 134 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, 135 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, 136 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH2_LV_LM) }, 137 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, 138 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, 139 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, 140 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, 141 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, 142 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, 143 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, 144 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, 145 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) }, 146 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) }, 147 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) }, 148 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) }, 149 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) }, 150 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) }, 151 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) }, 152 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) }, 153 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) }, 154 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) }, 155 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) }, 156 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) }, 157 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) }, 158 { .vendor_id = 0, /* sentinel */ }, 159 }; 160 161 static const struct eth_dev_ops eth_em_ops = { 162 .dev_configure = eth_em_configure, 163 .dev_start = eth_em_start, 164 .dev_stop = eth_em_stop, 165 .dev_close = eth_em_close, 166 .promiscuous_enable = eth_em_promiscuous_enable, 167 .promiscuous_disable = eth_em_promiscuous_disable, 168 .allmulticast_enable = eth_em_allmulticast_enable, 169 .allmulticast_disable = eth_em_allmulticast_disable, 170 .link_update = eth_em_link_update, 171 .stats_get = eth_em_stats_get, 172 .stats_reset = eth_em_stats_reset, 173 .dev_infos_get = eth_em_infos_get, 174 .mtu_set = eth_em_mtu_set, 175 .vlan_filter_set = eth_em_vlan_filter_set, 176 .vlan_offload_set = eth_em_vlan_offload_set, 177 .rx_queue_setup = eth_em_rx_queue_setup, 178 .rx_queue_release = eth_em_rx_queue_release, 179 .rx_queue_count = eth_em_rx_queue_count, 180 .rx_descriptor_done = eth_em_rx_descriptor_done, 181 .rx_descriptor_status = eth_em_rx_descriptor_status, 182 .tx_descriptor_status = eth_em_tx_descriptor_status, 183 .tx_queue_setup = eth_em_tx_queue_setup, 184 .tx_queue_release = eth_em_tx_queue_release, 185 .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, 186 .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, 187 .dev_led_on = eth_em_led_on, 188 .dev_led_off = eth_em_led_off, 189 .flow_ctrl_get = eth_em_flow_ctrl_get, 190 .flow_ctrl_set = eth_em_flow_ctrl_set, 191 .mac_addr_set = eth_em_default_mac_addr_set, 192 .mac_addr_add = eth_em_rar_set, 193 .mac_addr_remove = eth_em_rar_clear, 194 .set_mc_addr_list = eth_em_set_mc_addr_list, 195 .rxq_info_get = em_rxq_info_get, 196 .txq_info_get = em_txq_info_get, 197 }; 198 199 200 /** 201 * eth_em_dev_is_ich8 - Check for ICH8 device 202 * @hw: pointer to the HW structure 203 * 204 * return TRUE for ICH8, otherwise FALSE 205 **/ 206 static bool 207 eth_em_dev_is_ich8(struct e1000_hw *hw) 208 { 209 DEBUGFUNC("eth_em_dev_is_ich8"); 210 211 switch (hw->device_id) { 212 case E1000_DEV_ID_PCH2_LV_LM: 213 case E1000_DEV_ID_PCH_LPT_I217_LM: 214 case E1000_DEV_ID_PCH_LPT_I217_V: 215 case E1000_DEV_ID_PCH_LPTLP_I218_LM: 216 case E1000_DEV_ID_PCH_LPTLP_I218_V: 217 case E1000_DEV_ID_PCH_I218_V2: 218 case E1000_DEV_ID_PCH_I218_LM2: 219 case E1000_DEV_ID_PCH_I218_V3: 220 case E1000_DEV_ID_PCH_I218_LM3: 221 case E1000_DEV_ID_PCH_SPT_I219_LM: 222 case E1000_DEV_ID_PCH_SPT_I219_V: 223 case E1000_DEV_ID_PCH_SPT_I219_LM2: 224 case E1000_DEV_ID_PCH_SPT_I219_V2: 225 case E1000_DEV_ID_PCH_LBG_I219_LM3: 226 case E1000_DEV_ID_PCH_SPT_I219_LM4: 227 case E1000_DEV_ID_PCH_SPT_I219_V4: 228 case E1000_DEV_ID_PCH_SPT_I219_LM5: 229 case E1000_DEV_ID_PCH_SPT_I219_V5: 230 case E1000_DEV_ID_PCH_CNP_I219_LM6: 231 case E1000_DEV_ID_PCH_CNP_I219_V6: 232 case E1000_DEV_ID_PCH_CNP_I219_LM7: 233 case E1000_DEV_ID_PCH_CNP_I219_V7: 234 return 1; 235 default: 236 return 0; 237 } 238 } 239 240 static int 241 eth_em_dev_init(struct rte_eth_dev *eth_dev) 242 { 243 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 244 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 245 struct e1000_adapter *adapter = 246 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 247 struct e1000_hw *hw = 248 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 249 struct e1000_vfta * shadow_vfta = 250 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 251 252 eth_dev->dev_ops = ð_em_ops; 253 eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; 254 eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; 255 eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts; 256 257 /* for secondary processes, we don't initialise any further as primary 258 * has already done this work. Only check we don't need a different 259 * RX function */ 260 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 261 if (eth_dev->data->scattered_rx) 262 eth_dev->rx_pkt_burst = 263 (eth_rx_burst_t)ð_em_recv_scattered_pkts; 264 return 0; 265 } 266 267 rte_eth_copy_pci_info(eth_dev, pci_dev); 268 269 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 270 hw->device_id = pci_dev->id.device_id; 271 adapter->stopped = 0; 272 273 /* For ICH8 support we'll need to map the flash memory BAR */ 274 if (eth_em_dev_is_ich8(hw)) 275 hw->flash_address = (void *)pci_dev->mem_resource[1].addr; 276 277 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || 278 em_hw_init(hw) != 0) { 279 PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " 280 "failed to init HW", 281 eth_dev->data->port_id, pci_dev->id.vendor_id, 282 pci_dev->id.device_id); 283 return -ENODEV; 284 } 285 286 /* Allocate memory for storing MAC addresses */ 287 eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN * 288 hw->mac.rar_entry_count, 0); 289 if (eth_dev->data->mac_addrs == NULL) { 290 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 291 "store MAC addresses", 292 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 293 return -ENOMEM; 294 } 295 296 /* Copy the permanent MAC address */ 297 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 298 eth_dev->data->mac_addrs); 299 300 /* Pass the information to the rte_eth_dev_close() that it should also 301 * release the private port resources. 302 */ 303 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 304 305 /* initialize the vfta */ 306 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 307 308 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 309 eth_dev->data->port_id, pci_dev->id.vendor_id, 310 pci_dev->id.device_id); 311 312 rte_intr_callback_register(intr_handle, 313 eth_em_interrupt_handler, eth_dev); 314 315 return 0; 316 } 317 318 static int 319 eth_em_dev_uninit(struct rte_eth_dev *eth_dev) 320 { 321 PMD_INIT_FUNC_TRACE(); 322 323 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 324 return -EPERM; 325 326 eth_em_close(eth_dev); 327 328 return 0; 329 } 330 331 static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 332 struct rte_pci_device *pci_dev) 333 { 334 return rte_eth_dev_pci_generic_probe(pci_dev, 335 sizeof(struct e1000_adapter), eth_em_dev_init); 336 } 337 338 static int eth_em_pci_remove(struct rte_pci_device *pci_dev) 339 { 340 return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit); 341 } 342 343 static struct rte_pci_driver rte_em_pmd = { 344 .id_table = pci_id_em_map, 345 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 346 .probe = eth_em_pci_probe, 347 .remove = eth_em_pci_remove, 348 }; 349 350 static int 351 em_hw_init(struct e1000_hw *hw) 352 { 353 int diag; 354 355 diag = hw->mac.ops.init_params(hw); 356 if (diag != 0) { 357 PMD_INIT_LOG(ERR, "MAC Initialization Error"); 358 return diag; 359 } 360 diag = hw->nvm.ops.init_params(hw); 361 if (diag != 0) { 362 PMD_INIT_LOG(ERR, "NVM Initialization Error"); 363 return diag; 364 } 365 diag = hw->phy.ops.init_params(hw); 366 if (diag != 0) { 367 PMD_INIT_LOG(ERR, "PHY Initialization Error"); 368 return diag; 369 } 370 (void) e1000_get_bus_info(hw); 371 372 hw->mac.autoneg = 1; 373 hw->phy.autoneg_wait_to_complete = 0; 374 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 375 376 e1000_init_script_state_82541(hw, TRUE); 377 e1000_set_tbi_compatibility_82543(hw, TRUE); 378 379 /* Copper options */ 380 if (hw->phy.media_type == e1000_media_type_copper) { 381 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 382 hw->phy.disable_polarity_correction = 0; 383 hw->phy.ms_type = e1000_ms_hw_default; 384 } 385 386 /* 387 * Start from a known state, this is important in reading the nvm 388 * and mac from that. 389 */ 390 e1000_reset_hw(hw); 391 392 /* Make sure we have a good EEPROM before we read from it */ 393 if (e1000_validate_nvm_checksum(hw) < 0) { 394 /* 395 * Some PCI-E parts fail the first check due to 396 * the link being in sleep state, call it again, 397 * if it fails a second time its a real issue. 398 */ 399 diag = e1000_validate_nvm_checksum(hw); 400 if (diag < 0) { 401 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 402 goto error; 403 } 404 } 405 406 /* Read the permanent MAC address out of the EEPROM */ 407 diag = e1000_read_mac_addr(hw); 408 if (diag != 0) { 409 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 410 goto error; 411 } 412 413 /* Now initialize the hardware */ 414 diag = em_hardware_init(hw); 415 if (diag != 0) { 416 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 417 goto error; 418 } 419 420 hw->mac.get_link_status = 1; 421 422 /* Indicate SOL/IDER usage */ 423 diag = e1000_check_reset_block(hw); 424 if (diag < 0) { 425 PMD_INIT_LOG(ERR, "PHY reset is blocked due to " 426 "SOL/IDER session"); 427 } 428 return 0; 429 430 error: 431 em_hw_control_release(hw); 432 return diag; 433 } 434 435 static int 436 eth_em_configure(struct rte_eth_dev *dev) 437 { 438 struct e1000_interrupt *intr = 439 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 440 441 PMD_INIT_FUNC_TRACE(); 442 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 443 444 PMD_INIT_FUNC_TRACE(); 445 446 return 0; 447 } 448 449 static void 450 em_set_pba(struct e1000_hw *hw) 451 { 452 uint32_t pba; 453 454 /* 455 * Packet Buffer Allocation (PBA) 456 * Writing PBA sets the receive portion of the buffer 457 * the remainder is used for the transmit buffer. 458 * Devices before the 82547 had a Packet Buffer of 64K. 459 * After the 82547 the buffer was reduced to 40K. 460 */ 461 switch (hw->mac.type) { 462 case e1000_82547: 463 case e1000_82547_rev_2: 464 /* 82547: Total Packet Buffer is 40K */ 465 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 466 break; 467 case e1000_82571: 468 case e1000_82572: 469 case e1000_80003es2lan: 470 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 471 break; 472 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 473 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 474 break; 475 case e1000_82574: 476 case e1000_82583: 477 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 478 break; 479 case e1000_ich8lan: 480 pba = E1000_PBA_8K; 481 break; 482 case e1000_ich9lan: 483 case e1000_ich10lan: 484 pba = E1000_PBA_10K; 485 break; 486 case e1000_pchlan: 487 case e1000_pch2lan: 488 case e1000_pch_lpt: 489 case e1000_pch_spt: 490 case e1000_pch_cnp: 491 pba = E1000_PBA_26K; 492 break; 493 default: 494 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 495 } 496 497 E1000_WRITE_REG(hw, E1000_PBA, pba); 498 } 499 500 static void 501 eth_em_rxtx_control(struct rte_eth_dev *dev, 502 bool enable) 503 { 504 struct e1000_hw *hw = 505 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 506 uint32_t tctl, rctl; 507 508 tctl = E1000_READ_REG(hw, E1000_TCTL); 509 rctl = E1000_READ_REG(hw, E1000_RCTL); 510 if (enable) { 511 /* enable Tx/Rx */ 512 tctl |= E1000_TCTL_EN; 513 rctl |= E1000_RCTL_EN; 514 } else { 515 /* disable Tx/Rx */ 516 tctl &= ~E1000_TCTL_EN; 517 rctl &= ~E1000_RCTL_EN; 518 } 519 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 520 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 521 E1000_WRITE_FLUSH(hw); 522 } 523 524 static int 525 eth_em_start(struct rte_eth_dev *dev) 526 { 527 struct e1000_adapter *adapter = 528 E1000_DEV_PRIVATE(dev->data->dev_private); 529 struct e1000_hw *hw = 530 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 531 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 532 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 533 int ret, mask; 534 uint32_t intr_vector = 0; 535 uint32_t *speeds; 536 int num_speeds; 537 bool autoneg; 538 539 PMD_INIT_FUNC_TRACE(); 540 541 eth_em_stop(dev); 542 543 e1000_power_up_phy(hw); 544 545 /* Set default PBA value */ 546 em_set_pba(hw); 547 548 /* Put the address into the Receive Address Array */ 549 e1000_rar_set(hw, hw->mac.addr, 0); 550 551 /* 552 * With the 82571 adapter, RAR[0] may be overwritten 553 * when the other port is reset, we make a duplicate 554 * in RAR[14] for that eventuality, this assures 555 * the interface continues to function. 556 */ 557 if (hw->mac.type == e1000_82571) { 558 e1000_set_laa_state_82571(hw, TRUE); 559 e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); 560 } 561 562 /* Initialize the hardware */ 563 if (em_hardware_init(hw)) { 564 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 565 return -EIO; 566 } 567 568 E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN); 569 570 /* Configure for OS presence */ 571 em_init_manageability(hw); 572 573 if (dev->data->dev_conf.intr_conf.rxq != 0) { 574 intr_vector = dev->data->nb_rx_queues; 575 if (rte_intr_efd_enable(intr_handle, intr_vector)) 576 return -1; 577 } 578 579 if (rte_intr_dp_is_en(intr_handle)) { 580 intr_handle->intr_vec = 581 rte_zmalloc("intr_vec", 582 dev->data->nb_rx_queues * sizeof(int), 0); 583 if (intr_handle->intr_vec == NULL) { 584 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 585 " intr_vec", dev->data->nb_rx_queues); 586 return -ENOMEM; 587 } 588 589 /* enable rx interrupt */ 590 em_rxq_intr_enable(hw); 591 } 592 593 eth_em_tx_init(dev); 594 595 ret = eth_em_rx_init(dev); 596 if (ret) { 597 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 598 em_dev_clear_queues(dev); 599 return ret; 600 } 601 602 e1000_clear_hw_cntrs_base_generic(hw); 603 604 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 605 ETH_VLAN_EXTEND_MASK; 606 ret = eth_em_vlan_offload_set(dev, mask); 607 if (ret) { 608 PMD_INIT_LOG(ERR, "Unable to update vlan offload"); 609 em_dev_clear_queues(dev); 610 return ret; 611 } 612 613 /* Set Interrupt Throttling Rate to maximum allowed value. */ 614 E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); 615 616 /* Setup link speed and duplex */ 617 speeds = &dev->data->dev_conf.link_speeds; 618 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 619 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 620 hw->mac.autoneg = 1; 621 } else { 622 num_speeds = 0; 623 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 624 625 /* Reset */ 626 hw->phy.autoneg_advertised = 0; 627 628 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 629 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 630 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 631 num_speeds = -1; 632 goto error_invalid_config; 633 } 634 if (*speeds & ETH_LINK_SPEED_10M_HD) { 635 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 636 num_speeds++; 637 } 638 if (*speeds & ETH_LINK_SPEED_10M) { 639 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 640 num_speeds++; 641 } 642 if (*speeds & ETH_LINK_SPEED_100M_HD) { 643 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 644 num_speeds++; 645 } 646 if (*speeds & ETH_LINK_SPEED_100M) { 647 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 648 num_speeds++; 649 } 650 if (*speeds & ETH_LINK_SPEED_1G) { 651 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 652 num_speeds++; 653 } 654 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 655 goto error_invalid_config; 656 657 /* Set/reset the mac.autoneg based on the link speed, 658 * fixed or not 659 */ 660 if (!autoneg) { 661 hw->mac.autoneg = 0; 662 hw->mac.forced_speed_duplex = 663 hw->phy.autoneg_advertised; 664 } else { 665 hw->mac.autoneg = 1; 666 } 667 } 668 669 e1000_setup_link(hw); 670 671 if (rte_intr_allow_others(intr_handle)) { 672 /* check if lsc interrupt is enabled */ 673 if (dev->data->dev_conf.intr_conf.lsc != 0) { 674 ret = eth_em_interrupt_setup(dev); 675 if (ret) { 676 PMD_INIT_LOG(ERR, "Unable to setup interrupts"); 677 em_dev_clear_queues(dev); 678 return ret; 679 } 680 } 681 } else { 682 rte_intr_callback_unregister(intr_handle, 683 eth_em_interrupt_handler, 684 (void *)dev); 685 if (dev->data->dev_conf.intr_conf.lsc != 0) 686 PMD_INIT_LOG(INFO, "lsc won't enable because of" 687 " no intr multiplexn"); 688 } 689 /* check if rxq interrupt is enabled */ 690 if (dev->data->dev_conf.intr_conf.rxq != 0) 691 eth_em_rxq_interrupt_setup(dev); 692 693 rte_intr_enable(intr_handle); 694 695 adapter->stopped = 0; 696 697 eth_em_rxtx_control(dev, true); 698 eth_em_link_update(dev, 0); 699 700 PMD_INIT_LOG(DEBUG, "<<"); 701 702 return 0; 703 704 error_invalid_config: 705 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 706 dev->data->dev_conf.link_speeds, dev->data->port_id); 707 em_dev_clear_queues(dev); 708 return -EINVAL; 709 } 710 711 /********************************************************************* 712 * 713 * This routine disables all traffic on the adapter by issuing a 714 * global reset on the MAC. 715 * 716 **********************************************************************/ 717 static void 718 eth_em_stop(struct rte_eth_dev *dev) 719 { 720 struct rte_eth_link link; 721 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 722 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 723 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 724 725 eth_em_rxtx_control(dev, false); 726 em_rxq_intr_disable(hw); 727 em_lsc_intr_disable(hw); 728 729 e1000_reset_hw(hw); 730 731 /* Flush desc rings for i219 */ 732 if (hw->mac.type == e1000_pch_spt || hw->mac.type == e1000_pch_cnp) 733 em_flush_desc_rings(dev); 734 735 if (hw->mac.type >= e1000_82544) 736 E1000_WRITE_REG(hw, E1000_WUC, 0); 737 738 /* Power down the phy. Needed to make the link go down */ 739 e1000_power_down_phy(hw); 740 741 em_dev_clear_queues(dev); 742 743 /* clear the recorded link status */ 744 memset(&link, 0, sizeof(link)); 745 rte_eth_linkstatus_set(dev, &link); 746 747 if (!rte_intr_allow_others(intr_handle)) 748 /* resume to the default handler */ 749 rte_intr_callback_register(intr_handle, 750 eth_em_interrupt_handler, 751 (void *)dev); 752 753 /* Clean datapath event and queue/vec mapping */ 754 rte_intr_efd_disable(intr_handle); 755 if (intr_handle->intr_vec != NULL) { 756 rte_free(intr_handle->intr_vec); 757 intr_handle->intr_vec = NULL; 758 } 759 } 760 761 static void 762 eth_em_close(struct rte_eth_dev *dev) 763 { 764 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 765 struct e1000_adapter *adapter = 766 E1000_DEV_PRIVATE(dev->data->dev_private); 767 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 768 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 769 770 eth_em_stop(dev); 771 adapter->stopped = 1; 772 em_dev_free_queues(dev); 773 e1000_phy_hw_reset(hw); 774 em_release_manageability(hw); 775 em_hw_control_release(hw); 776 777 dev->dev_ops = NULL; 778 dev->rx_pkt_burst = NULL; 779 dev->tx_pkt_burst = NULL; 780 781 /* disable uio intr before callback unregister */ 782 rte_intr_disable(intr_handle); 783 rte_intr_callback_unregister(intr_handle, 784 eth_em_interrupt_handler, dev); 785 } 786 787 static int 788 em_get_rx_buffer_size(struct e1000_hw *hw) 789 { 790 uint32_t rx_buf_size; 791 792 rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); 793 return rx_buf_size; 794 } 795 796 /********************************************************************* 797 * 798 * Initialize the hardware 799 * 800 **********************************************************************/ 801 static int 802 em_hardware_init(struct e1000_hw *hw) 803 { 804 uint32_t rx_buf_size; 805 int diag; 806 807 /* Issue a global reset */ 808 e1000_reset_hw(hw); 809 810 /* Let the firmware know the OS is in control */ 811 em_hw_control_acquire(hw); 812 813 /* 814 * These parameters control the automatic generation (Tx) and 815 * response (Rx) to Ethernet PAUSE frames. 816 * - High water mark should allow for at least two standard size (1518) 817 * frames to be received after sending an XOFF. 818 * - Low water mark works best when it is very near the high water mark. 819 * This allows the receiver to restart by sending XON when it has 820 * drained a bit. Here we use an arbitrary value of 1500 which will 821 * restart after one full frame is pulled from the buffer. There 822 * could be several smaller frames in the buffer and if so they will 823 * not trigger the XON until their total number reduces the buffer 824 * by 1500. 825 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 826 */ 827 rx_buf_size = em_get_rx_buffer_size(hw); 828 829 hw->fc.high_water = rx_buf_size - 830 PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024); 831 hw->fc.low_water = hw->fc.high_water - 1500; 832 833 if (hw->mac.type == e1000_80003es2lan) 834 hw->fc.pause_time = UINT16_MAX; 835 else 836 hw->fc.pause_time = EM_FC_PAUSE_TIME; 837 838 hw->fc.send_xon = 1; 839 840 /* Set Flow control, use the tunable location if sane */ 841 if (em_fc_setting <= e1000_fc_full) 842 hw->fc.requested_mode = em_fc_setting; 843 else 844 hw->fc.requested_mode = e1000_fc_none; 845 846 /* Workaround: no TX flow ctrl for PCH */ 847 if (hw->mac.type == e1000_pchlan) 848 hw->fc.requested_mode = e1000_fc_rx_pause; 849 850 /* Override - settings for PCH2LAN, ya its magic :) */ 851 if (hw->mac.type == e1000_pch2lan) { 852 hw->fc.high_water = 0x5C20; 853 hw->fc.low_water = 0x5048; 854 hw->fc.pause_time = 0x0650; 855 hw->fc.refresh_time = 0x0400; 856 } else if (hw->mac.type == e1000_pch_lpt || 857 hw->mac.type == e1000_pch_spt || 858 hw->mac.type == e1000_pch_cnp) { 859 hw->fc.requested_mode = e1000_fc_full; 860 } 861 862 diag = e1000_init_hw(hw); 863 if (diag < 0) 864 return diag; 865 e1000_check_for_link(hw); 866 return 0; 867 } 868 869 /* This function is based on em_update_stats_counters() in e1000/if_em.c */ 870 static int 871 eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 872 { 873 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 874 struct e1000_hw_stats *stats = 875 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 876 int pause_frames; 877 878 if(hw->phy.media_type == e1000_media_type_copper || 879 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 880 stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); 881 stats->sec += E1000_READ_REG(hw, E1000_SEC); 882 } 883 884 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 885 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 886 stats->scc += E1000_READ_REG(hw, E1000_SCC); 887 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 888 889 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 890 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 891 stats->colc += E1000_READ_REG(hw, E1000_COLC); 892 stats->dc += E1000_READ_REG(hw, E1000_DC); 893 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 894 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 895 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 896 897 /* 898 * For watchdog management we need to know if we have been 899 * paused during the last interval, so capture that here. 900 */ 901 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 902 stats->xoffrxc += pause_frames; 903 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 904 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 905 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 906 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 907 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 908 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 909 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 910 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 911 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 912 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 913 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 914 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 915 916 /* 917 * For the 64-bit byte counters the low dword must be read first. 918 * Both registers clear on the read of the high dword. 919 */ 920 921 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 922 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 923 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 924 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 925 926 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 927 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 928 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 929 stats->roc += E1000_READ_REG(hw, E1000_ROC); 930 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 931 932 stats->tor += E1000_READ_REG(hw, E1000_TORH); 933 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 934 935 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 936 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 937 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 938 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 939 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 940 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 941 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 942 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 943 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 944 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 945 946 /* Interrupt Counts */ 947 948 if (hw->mac.type >= e1000_82571) { 949 stats->iac += E1000_READ_REG(hw, E1000_IAC); 950 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 951 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 952 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 953 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 954 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 955 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 956 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 957 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 958 } 959 960 if (hw->mac.type >= e1000_82543) { 961 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 962 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 963 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 964 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 965 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 966 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 967 } 968 969 if (rte_stats == NULL) 970 return -EINVAL; 971 972 /* Rx Errors */ 973 rte_stats->imissed = stats->mpc; 974 rte_stats->ierrors = stats->crcerrs + 975 stats->rlec + stats->ruc + stats->roc + 976 stats->rxerrc + stats->algnerrc + stats->cexterr; 977 978 /* Tx Errors */ 979 rte_stats->oerrors = stats->ecol + stats->latecol; 980 981 rte_stats->ipackets = stats->gprc; 982 rte_stats->opackets = stats->gptc; 983 rte_stats->ibytes = stats->gorc; 984 rte_stats->obytes = stats->gotc; 985 return 0; 986 } 987 988 static int 989 eth_em_stats_reset(struct rte_eth_dev *dev) 990 { 991 struct e1000_hw_stats *hw_stats = 992 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 993 994 /* HW registers are cleared on read */ 995 eth_em_stats_get(dev, NULL); 996 997 /* Reset software totals */ 998 memset(hw_stats, 0, sizeof(*hw_stats)); 999 1000 return 0; 1001 } 1002 1003 static int 1004 eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 1005 { 1006 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1007 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1008 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1009 1010 em_rxq_intr_enable(hw); 1011 rte_intr_ack(intr_handle); 1012 1013 return 0; 1014 } 1015 1016 static int 1017 eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 1018 { 1019 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1020 1021 em_rxq_intr_disable(hw); 1022 1023 return 0; 1024 } 1025 1026 uint32_t 1027 em_get_max_pktlen(struct rte_eth_dev *dev) 1028 { 1029 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1030 1031 switch (hw->mac.type) { 1032 case e1000_82571: 1033 case e1000_82572: 1034 case e1000_ich9lan: 1035 case e1000_ich10lan: 1036 case e1000_pch2lan: 1037 case e1000_pch_lpt: 1038 case e1000_pch_spt: 1039 case e1000_pch_cnp: 1040 case e1000_82574: 1041 case e1000_80003es2lan: /* 9K Jumbo Frame size */ 1042 case e1000_82583: 1043 return 0x2412; 1044 case e1000_pchlan: 1045 return 0x1000; 1046 /* Adapters that do not support jumbo frames */ 1047 case e1000_ich8lan: 1048 return RTE_ETHER_MAX_LEN; 1049 default: 1050 return MAX_JUMBO_FRAME_SIZE; 1051 } 1052 } 1053 1054 static int 1055 eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1056 { 1057 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1058 1059 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1060 dev_info->max_rx_pktlen = em_get_max_pktlen(dev); 1061 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1062 1063 /* 1064 * Starting with 631xESB hw supports 2 TX/RX queues per port. 1065 * Unfortunatelly, all these nics have just one TX context. 1066 * So we have few choises for TX: 1067 * - Use just one TX queue. 1068 * - Allow cksum offload only for one TX queue. 1069 * - Don't allow TX cksum offload at all. 1070 * For now, option #1 was chosen. 1071 * To use second RX queue we have to use extended RX descriptor 1072 * (Multiple Receive Queues are mutually exclusive with UDP 1073 * fragmentation and are not supported when a legacy receive 1074 * descriptor format is used). 1075 * Which means separate RX routinies - as legacy nics (82540, 82545) 1076 * don't support extended RXD. 1077 * To avoid it we support just one RX queue for now (no RSS). 1078 */ 1079 1080 dev_info->max_rx_queues = 1; 1081 dev_info->max_tx_queues = 1; 1082 1083 dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); 1084 dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | 1085 dev_info->rx_queue_offload_capa; 1086 dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); 1087 dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | 1088 dev_info->tx_queue_offload_capa; 1089 1090 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 1091 .nb_max = E1000_MAX_RING_DESC, 1092 .nb_min = E1000_MIN_RING_DESC, 1093 .nb_align = EM_RXD_ALIGN, 1094 }; 1095 1096 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 1097 .nb_max = E1000_MAX_RING_DESC, 1098 .nb_min = E1000_MIN_RING_DESC, 1099 .nb_align = EM_TXD_ALIGN, 1100 .nb_seg_max = EM_TX_MAX_SEG, 1101 .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG, 1102 }; 1103 1104 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1105 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1106 ETH_LINK_SPEED_1G; 1107 1108 /* Preferred queue parameters */ 1109 dev_info->default_rxportconf.nb_queues = 1; 1110 dev_info->default_txportconf.nb_queues = 1; 1111 dev_info->default_txportconf.ring_size = 256; 1112 dev_info->default_rxportconf.ring_size = 256; 1113 1114 return 0; 1115 } 1116 1117 /* return 0 means link status changed, -1 means not changed */ 1118 static int 1119 eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1120 { 1121 struct e1000_hw *hw = 1122 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1123 struct rte_eth_link link; 1124 int link_up, count; 1125 1126 link_up = 0; 1127 hw->mac.get_link_status = 1; 1128 1129 /* possible wait-to-complete in up to 9 seconds */ 1130 for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 1131 /* Read the real link status */ 1132 switch (hw->phy.media_type) { 1133 case e1000_media_type_copper: 1134 /* Do the work to read phy */ 1135 e1000_check_for_link(hw); 1136 link_up = !hw->mac.get_link_status; 1137 break; 1138 1139 case e1000_media_type_fiber: 1140 e1000_check_for_link(hw); 1141 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1142 E1000_STATUS_LU); 1143 break; 1144 1145 case e1000_media_type_internal_serdes: 1146 e1000_check_for_link(hw); 1147 link_up = hw->mac.serdes_has_link; 1148 break; 1149 1150 default: 1151 break; 1152 } 1153 if (link_up || wait_to_complete == 0) 1154 break; 1155 rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); 1156 } 1157 memset(&link, 0, sizeof(link)); 1158 1159 /* Now we check if a transition has happened */ 1160 if (link_up) { 1161 uint16_t duplex, speed; 1162 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1163 link.link_duplex = (duplex == FULL_DUPLEX) ? 1164 ETH_LINK_FULL_DUPLEX : 1165 ETH_LINK_HALF_DUPLEX; 1166 link.link_speed = speed; 1167 link.link_status = ETH_LINK_UP; 1168 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1169 ETH_LINK_SPEED_FIXED); 1170 } else { 1171 link.link_speed = ETH_SPEED_NUM_NONE; 1172 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1173 link.link_status = ETH_LINK_DOWN; 1174 link.link_autoneg = ETH_LINK_FIXED; 1175 } 1176 1177 return rte_eth_linkstatus_set(dev, &link); 1178 } 1179 1180 /* 1181 * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 1182 * For ASF and Pass Through versions of f/w this means 1183 * that the driver is loaded. For AMT version type f/w 1184 * this means that the network i/f is open. 1185 */ 1186 static void 1187 em_hw_control_acquire(struct e1000_hw *hw) 1188 { 1189 uint32_t ctrl_ext, swsm; 1190 1191 /* Let firmware know the driver has taken over */ 1192 if (hw->mac.type == e1000_82573) { 1193 swsm = E1000_READ_REG(hw, E1000_SWSM); 1194 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 1195 1196 } else { 1197 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1198 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1199 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1200 } 1201 } 1202 1203 /* 1204 * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. 1205 * For ASF and Pass Through versions of f/w this means that the 1206 * driver is no longer loaded. For AMT versions of the 1207 * f/w this means that the network i/f is closed. 1208 */ 1209 static void 1210 em_hw_control_release(struct e1000_hw *hw) 1211 { 1212 uint32_t ctrl_ext, swsm; 1213 1214 /* Let firmware taken over control of h/w */ 1215 if (hw->mac.type == e1000_82573) { 1216 swsm = E1000_READ_REG(hw, E1000_SWSM); 1217 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1218 } else { 1219 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1220 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1221 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1222 } 1223 } 1224 1225 /* 1226 * Bit of a misnomer, what this really means is 1227 * to enable OS management of the system... aka 1228 * to disable special hardware management features. 1229 */ 1230 static void 1231 em_init_manageability(struct e1000_hw *hw) 1232 { 1233 if (e1000_enable_mng_pass_thru(hw)) { 1234 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 1235 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 1236 1237 /* disable hardware interception of ARP */ 1238 manc &= ~(E1000_MANC_ARP_EN); 1239 1240 /* enable receiving management packets to the host */ 1241 manc |= E1000_MANC_EN_MNG2HOST; 1242 manc2h |= 1 << 5; /* Mng Port 623 */ 1243 manc2h |= 1 << 6; /* Mng Port 664 */ 1244 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 1245 E1000_WRITE_REG(hw, E1000_MANC, manc); 1246 } 1247 } 1248 1249 /* 1250 * Give control back to hardware management 1251 * controller if there is one. 1252 */ 1253 static void 1254 em_release_manageability(struct e1000_hw *hw) 1255 { 1256 uint32_t manc; 1257 1258 if (e1000_enable_mng_pass_thru(hw)) { 1259 manc = E1000_READ_REG(hw, E1000_MANC); 1260 1261 /* re-enable hardware interception of ARP */ 1262 manc |= E1000_MANC_ARP_EN; 1263 manc &= ~E1000_MANC_EN_MNG2HOST; 1264 1265 E1000_WRITE_REG(hw, E1000_MANC, manc); 1266 } 1267 } 1268 1269 static int 1270 eth_em_promiscuous_enable(struct rte_eth_dev *dev) 1271 { 1272 struct e1000_hw *hw = 1273 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1274 uint32_t rctl; 1275 1276 rctl = E1000_READ_REG(hw, E1000_RCTL); 1277 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1278 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1279 1280 return 0; 1281 } 1282 1283 static int 1284 eth_em_promiscuous_disable(struct rte_eth_dev *dev) 1285 { 1286 struct e1000_hw *hw = 1287 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1288 uint32_t rctl; 1289 1290 rctl = E1000_READ_REG(hw, E1000_RCTL); 1291 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1292 if (dev->data->all_multicast == 1) 1293 rctl |= E1000_RCTL_MPE; 1294 else 1295 rctl &= (~E1000_RCTL_MPE); 1296 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1297 1298 return 0; 1299 } 1300 1301 static int 1302 eth_em_allmulticast_enable(struct rte_eth_dev *dev) 1303 { 1304 struct e1000_hw *hw = 1305 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1306 uint32_t rctl; 1307 1308 rctl = E1000_READ_REG(hw, E1000_RCTL); 1309 rctl |= E1000_RCTL_MPE; 1310 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1311 1312 return 0; 1313 } 1314 1315 static int 1316 eth_em_allmulticast_disable(struct rte_eth_dev *dev) 1317 { 1318 struct e1000_hw *hw = 1319 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1320 uint32_t rctl; 1321 1322 if (dev->data->promiscuous == 1) 1323 return 0; /* must remain in all_multicast mode */ 1324 rctl = E1000_READ_REG(hw, E1000_RCTL); 1325 rctl &= (~E1000_RCTL_MPE); 1326 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1327 1328 return 0; 1329 } 1330 1331 static int 1332 eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1333 { 1334 struct e1000_hw *hw = 1335 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1336 struct e1000_vfta * shadow_vfta = 1337 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1338 uint32_t vfta; 1339 uint32_t vid_idx; 1340 uint32_t vid_bit; 1341 1342 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 1343 E1000_VFTA_ENTRY_MASK); 1344 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 1345 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 1346 if (on) 1347 vfta |= vid_bit; 1348 else 1349 vfta &= ~vid_bit; 1350 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 1351 1352 /* update local VFTA copy */ 1353 shadow_vfta->vfta[vid_idx] = vfta; 1354 1355 return 0; 1356 } 1357 1358 static void 1359 em_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1360 { 1361 struct e1000_hw *hw = 1362 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1363 uint32_t reg; 1364 1365 /* Filter Table Disable */ 1366 reg = E1000_READ_REG(hw, E1000_RCTL); 1367 reg &= ~E1000_RCTL_CFIEN; 1368 reg &= ~E1000_RCTL_VFE; 1369 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1370 } 1371 1372 static void 1373 em_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1374 { 1375 struct e1000_hw *hw = 1376 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1377 struct e1000_vfta * shadow_vfta = 1378 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1379 uint32_t reg; 1380 int i; 1381 1382 /* Filter Table Enable, CFI not used for packet acceptance */ 1383 reg = E1000_READ_REG(hw, E1000_RCTL); 1384 reg &= ~E1000_RCTL_CFIEN; 1385 reg |= E1000_RCTL_VFE; 1386 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1387 1388 /* restore vfta from local copy */ 1389 for (i = 0; i < IGB_VFTA_SIZE; i++) 1390 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 1391 } 1392 1393 static void 1394 em_vlan_hw_strip_disable(struct rte_eth_dev *dev) 1395 { 1396 struct e1000_hw *hw = 1397 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1398 uint32_t reg; 1399 1400 /* VLAN Mode Disable */ 1401 reg = E1000_READ_REG(hw, E1000_CTRL); 1402 reg &= ~E1000_CTRL_VME; 1403 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1404 1405 } 1406 1407 static void 1408 em_vlan_hw_strip_enable(struct rte_eth_dev *dev) 1409 { 1410 struct e1000_hw *hw = 1411 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1412 uint32_t reg; 1413 1414 /* VLAN Mode Enable */ 1415 reg = E1000_READ_REG(hw, E1000_CTRL); 1416 reg |= E1000_CTRL_VME; 1417 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1418 } 1419 1420 static int 1421 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1422 { 1423 struct rte_eth_rxmode *rxmode; 1424 1425 rxmode = &dev->data->dev_conf.rxmode; 1426 if(mask & ETH_VLAN_STRIP_MASK){ 1427 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1428 em_vlan_hw_strip_enable(dev); 1429 else 1430 em_vlan_hw_strip_disable(dev); 1431 } 1432 1433 if(mask & ETH_VLAN_FILTER_MASK){ 1434 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1435 em_vlan_hw_filter_enable(dev); 1436 else 1437 em_vlan_hw_filter_disable(dev); 1438 } 1439 1440 return 0; 1441 } 1442 1443 /* 1444 * It enables the interrupt mask and then enable the interrupt. 1445 * 1446 * @param dev 1447 * Pointer to struct rte_eth_dev. 1448 * 1449 * @return 1450 * - On success, zero. 1451 * - On failure, a negative value. 1452 */ 1453 static int 1454 eth_em_interrupt_setup(struct rte_eth_dev *dev) 1455 { 1456 uint32_t regval; 1457 struct e1000_hw *hw = 1458 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1459 1460 /* clear interrupt */ 1461 E1000_READ_REG(hw, E1000_ICR); 1462 regval = E1000_READ_REG(hw, E1000_IMS); 1463 E1000_WRITE_REG(hw, E1000_IMS, 1464 regval | E1000_ICR_LSC | E1000_ICR_OTHER); 1465 return 0; 1466 } 1467 1468 /* 1469 * It clears the interrupt causes and enables the interrupt. 1470 * It will be called once only during nic initialized. 1471 * 1472 * @param dev 1473 * Pointer to struct rte_eth_dev. 1474 * 1475 * @return 1476 * - On success, zero. 1477 * - On failure, a negative value. 1478 */ 1479 static int 1480 eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) 1481 { 1482 struct e1000_hw *hw = 1483 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1484 1485 E1000_READ_REG(hw, E1000_ICR); 1486 em_rxq_intr_enable(hw); 1487 return 0; 1488 } 1489 1490 /* 1491 * It enable receive packet interrupt. 1492 * @param hw 1493 * Pointer to struct e1000_hw 1494 * 1495 * @return 1496 */ 1497 static void 1498 em_rxq_intr_enable(struct e1000_hw *hw) 1499 { 1500 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); 1501 E1000_WRITE_FLUSH(hw); 1502 } 1503 1504 /* 1505 * It disabled lsc interrupt. 1506 * @param hw 1507 * Pointer to struct e1000_hw 1508 * 1509 * @return 1510 */ 1511 static void 1512 em_lsc_intr_disable(struct e1000_hw *hw) 1513 { 1514 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC | E1000_IMS_OTHER); 1515 E1000_WRITE_FLUSH(hw); 1516 } 1517 1518 /* 1519 * It disabled receive packet interrupt. 1520 * @param hw 1521 * Pointer to struct e1000_hw 1522 * 1523 * @return 1524 */ 1525 static void 1526 em_rxq_intr_disable(struct e1000_hw *hw) 1527 { 1528 E1000_READ_REG(hw, E1000_ICR); 1529 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 1530 E1000_WRITE_FLUSH(hw); 1531 } 1532 1533 /* 1534 * It reads ICR and gets interrupt causes, check it and set a bit flag 1535 * to update link status. 1536 * 1537 * @param dev 1538 * Pointer to struct rte_eth_dev. 1539 * 1540 * @return 1541 * - On success, zero. 1542 * - On failure, a negative value. 1543 */ 1544 static int 1545 eth_em_interrupt_get_status(struct rte_eth_dev *dev) 1546 { 1547 uint32_t icr; 1548 struct e1000_hw *hw = 1549 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1550 struct e1000_interrupt *intr = 1551 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1552 1553 /* read-on-clear nic registers here */ 1554 icr = E1000_READ_REG(hw, E1000_ICR); 1555 if (icr & E1000_ICR_LSC) { 1556 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1557 } 1558 1559 return 0; 1560 } 1561 1562 /* 1563 * It executes link_update after knowing an interrupt is prsent. 1564 * 1565 * @param dev 1566 * Pointer to struct rte_eth_dev. 1567 * 1568 * @return 1569 * - On success, zero. 1570 * - On failure, a negative value. 1571 */ 1572 static int 1573 eth_em_interrupt_action(struct rte_eth_dev *dev, 1574 struct rte_intr_handle *intr_handle) 1575 { 1576 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1577 struct e1000_hw *hw = 1578 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1579 struct e1000_interrupt *intr = 1580 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1581 struct rte_eth_link link; 1582 int ret; 1583 1584 if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) 1585 return -1; 1586 1587 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 1588 rte_intr_ack(intr_handle); 1589 1590 /* set get_link_status to check register later */ 1591 hw->mac.get_link_status = 1; 1592 ret = eth_em_link_update(dev, 0); 1593 1594 /* check if link has changed */ 1595 if (ret < 0) 1596 return 0; 1597 1598 rte_eth_linkstatus_get(dev, &link); 1599 1600 if (link.link_status) { 1601 PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", 1602 dev->data->port_id, link.link_speed, 1603 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 1604 "full-duplex" : "half-duplex"); 1605 } else { 1606 PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); 1607 } 1608 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 1609 pci_dev->addr.domain, pci_dev->addr.bus, 1610 pci_dev->addr.devid, pci_dev->addr.function); 1611 1612 return 0; 1613 } 1614 1615 /** 1616 * Interrupt handler which shall be registered at first. 1617 * 1618 * @param handle 1619 * Pointer to interrupt handle. 1620 * @param param 1621 * The address of parameter (struct rte_eth_dev *) regsitered before. 1622 * 1623 * @return 1624 * void 1625 */ 1626 static void 1627 eth_em_interrupt_handler(void *param) 1628 { 1629 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1630 1631 eth_em_interrupt_get_status(dev); 1632 eth_em_interrupt_action(dev, dev->intr_handle); 1633 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1634 } 1635 1636 static int 1637 eth_em_led_on(struct rte_eth_dev *dev) 1638 { 1639 struct e1000_hw *hw; 1640 1641 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1642 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1643 } 1644 1645 static int 1646 eth_em_led_off(struct rte_eth_dev *dev) 1647 { 1648 struct e1000_hw *hw; 1649 1650 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1651 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1652 } 1653 1654 static int 1655 eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1656 { 1657 struct e1000_hw *hw; 1658 uint32_t ctrl; 1659 int tx_pause; 1660 int rx_pause; 1661 1662 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1663 fc_conf->pause_time = hw->fc.pause_time; 1664 fc_conf->high_water = hw->fc.high_water; 1665 fc_conf->low_water = hw->fc.low_water; 1666 fc_conf->send_xon = hw->fc.send_xon; 1667 fc_conf->autoneg = hw->mac.autoneg; 1668 1669 /* 1670 * Return rx_pause and tx_pause status according to actual setting of 1671 * the TFCE and RFCE bits in the CTRL register. 1672 */ 1673 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1674 if (ctrl & E1000_CTRL_TFCE) 1675 tx_pause = 1; 1676 else 1677 tx_pause = 0; 1678 1679 if (ctrl & E1000_CTRL_RFCE) 1680 rx_pause = 1; 1681 else 1682 rx_pause = 0; 1683 1684 if (rx_pause && tx_pause) 1685 fc_conf->mode = RTE_FC_FULL; 1686 else if (rx_pause) 1687 fc_conf->mode = RTE_FC_RX_PAUSE; 1688 else if (tx_pause) 1689 fc_conf->mode = RTE_FC_TX_PAUSE; 1690 else 1691 fc_conf->mode = RTE_FC_NONE; 1692 1693 return 0; 1694 } 1695 1696 static int 1697 eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1698 { 1699 struct e1000_hw *hw; 1700 int err; 1701 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 1702 e1000_fc_none, 1703 e1000_fc_rx_pause, 1704 e1000_fc_tx_pause, 1705 e1000_fc_full 1706 }; 1707 uint32_t rx_buf_size; 1708 uint32_t max_high_water; 1709 uint32_t rctl; 1710 1711 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1712 if (fc_conf->autoneg != hw->mac.autoneg) 1713 return -ENOTSUP; 1714 rx_buf_size = em_get_rx_buffer_size(hw); 1715 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 1716 1717 /* At least reserve one Ethernet frame for watermark */ 1718 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 1719 if ((fc_conf->high_water > max_high_water) || 1720 (fc_conf->high_water < fc_conf->low_water)) { 1721 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 1722 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 1723 return -EINVAL; 1724 } 1725 1726 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 1727 hw->fc.pause_time = fc_conf->pause_time; 1728 hw->fc.high_water = fc_conf->high_water; 1729 hw->fc.low_water = fc_conf->low_water; 1730 hw->fc.send_xon = fc_conf->send_xon; 1731 1732 err = e1000_setup_link_generic(hw); 1733 if (err == E1000_SUCCESS) { 1734 1735 /* check if we want to forward MAC frames - driver doesn't have native 1736 * capability to do that, so we'll write the registers ourselves */ 1737 1738 rctl = E1000_READ_REG(hw, E1000_RCTL); 1739 1740 /* set or clear MFLCN.PMCF bit depending on configuration */ 1741 if (fc_conf->mac_ctrl_frame_fwd != 0) 1742 rctl |= E1000_RCTL_PMCF; 1743 else 1744 rctl &= ~E1000_RCTL_PMCF; 1745 1746 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1747 E1000_WRITE_FLUSH(hw); 1748 1749 return 0; 1750 } 1751 1752 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 1753 return -EIO; 1754 } 1755 1756 static int 1757 eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1758 uint32_t index, __rte_unused uint32_t pool) 1759 { 1760 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1761 1762 return e1000_rar_set(hw, mac_addr->addr_bytes, index); 1763 } 1764 1765 static void 1766 eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1767 { 1768 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1769 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1770 1771 memset(addr, 0, sizeof(addr)); 1772 1773 e1000_rar_set(hw, addr, index); 1774 } 1775 1776 static int 1777 eth_em_default_mac_addr_set(struct rte_eth_dev *dev, 1778 struct rte_ether_addr *addr) 1779 { 1780 eth_em_rar_clear(dev, 0); 1781 1782 return eth_em_rar_set(dev, (void *)addr, 0, 0); 1783 } 1784 1785 static int 1786 eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1787 { 1788 struct rte_eth_dev_info dev_info; 1789 struct e1000_hw *hw; 1790 uint32_t frame_size; 1791 uint32_t rctl; 1792 int ret; 1793 1794 ret = eth_em_infos_get(dev, &dev_info); 1795 if (ret != 0) 1796 return ret; 1797 1798 frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 1799 VLAN_TAG_SIZE; 1800 1801 /* check that mtu is within the allowed range */ 1802 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 1803 return -EINVAL; 1804 1805 /* refuse mtu that requires the support of scattered packets when this 1806 * feature has not been enabled before. */ 1807 if (!dev->data->scattered_rx && 1808 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 1809 return -EINVAL; 1810 1811 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1812 rctl = E1000_READ_REG(hw, E1000_RCTL); 1813 1814 /* switch to jumbo mode if needed */ 1815 if (frame_size > RTE_ETHER_MAX_LEN) { 1816 dev->data->dev_conf.rxmode.offloads |= 1817 DEV_RX_OFFLOAD_JUMBO_FRAME; 1818 rctl |= E1000_RCTL_LPE; 1819 } else { 1820 dev->data->dev_conf.rxmode.offloads &= 1821 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1822 rctl &= ~E1000_RCTL_LPE; 1823 } 1824 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1825 1826 /* update max frame size */ 1827 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1828 return 0; 1829 } 1830 1831 static int 1832 eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 1833 struct rte_ether_addr *mc_addr_set, 1834 uint32_t nb_mc_addr) 1835 { 1836 struct e1000_hw *hw; 1837 1838 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1839 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1840 return 0; 1841 } 1842 1843 RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); 1844 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); 1845 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); 1846 1847 /* see e1000_logs.c */ 1848 RTE_INIT(igb_init_log) 1849 { 1850 e1000_igb_init_log(); 1851 } 1852