1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <stdarg.h> 39 40 #include <rte_common.h> 41 #include <rte_interrupts.h> 42 #include <rte_byteorder.h> 43 #include <rte_log.h> 44 #include <rte_debug.h> 45 #include <rte_pci.h> 46 #include <rte_bus_pci.h> 47 #include <rte_ether.h> 48 #include <rte_ethdev.h> 49 #include <rte_ethdev_pci.h> 50 #include <rte_memory.h> 51 #include <rte_eal.h> 52 #include <rte_atomic.h> 53 #include <rte_malloc.h> 54 #include <rte_dev.h> 55 56 #include "e1000_logs.h" 57 #include "base/e1000_api.h" 58 #include "e1000_ethdev.h" 59 60 #define EM_EIAC 0x000DC 61 62 #define PMD_ROUNDUP(x,y) (((x) + (y) - 1)/(y) * (y)) 63 64 65 static int eth_em_configure(struct rte_eth_dev *dev); 66 static int eth_em_start(struct rte_eth_dev *dev); 67 static void eth_em_stop(struct rte_eth_dev *dev); 68 static void eth_em_close(struct rte_eth_dev *dev); 69 static void eth_em_promiscuous_enable(struct rte_eth_dev *dev); 70 static void eth_em_promiscuous_disable(struct rte_eth_dev *dev); 71 static void eth_em_allmulticast_enable(struct rte_eth_dev *dev); 72 static void eth_em_allmulticast_disable(struct rte_eth_dev *dev); 73 static int eth_em_link_update(struct rte_eth_dev *dev, 74 int wait_to_complete); 75 static int eth_em_stats_get(struct rte_eth_dev *dev, 76 struct rte_eth_stats *rte_stats); 77 static void eth_em_stats_reset(struct rte_eth_dev *dev); 78 static void eth_em_infos_get(struct rte_eth_dev *dev, 79 struct rte_eth_dev_info *dev_info); 80 static int eth_em_flow_ctrl_get(struct rte_eth_dev *dev, 81 struct rte_eth_fc_conf *fc_conf); 82 static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev, 83 struct rte_eth_fc_conf *fc_conf); 84 static int eth_em_interrupt_setup(struct rte_eth_dev *dev); 85 static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev); 86 static int eth_em_interrupt_get_status(struct rte_eth_dev *dev); 87 static int eth_em_interrupt_action(struct rte_eth_dev *dev, 88 struct rte_intr_handle *handle); 89 static void eth_em_interrupt_handler(void *param); 90 91 static int em_hw_init(struct e1000_hw *hw); 92 static int em_hardware_init(struct e1000_hw *hw); 93 static void em_hw_control_acquire(struct e1000_hw *hw); 94 static void em_hw_control_release(struct e1000_hw *hw); 95 static void em_init_manageability(struct e1000_hw *hw); 96 static void em_release_manageability(struct e1000_hw *hw); 97 98 static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 99 100 static int eth_em_vlan_filter_set(struct rte_eth_dev *dev, 101 uint16_t vlan_id, int on); 102 static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask); 103 static void em_vlan_hw_filter_enable(struct rte_eth_dev *dev); 104 static void em_vlan_hw_filter_disable(struct rte_eth_dev *dev); 105 static void em_vlan_hw_strip_enable(struct rte_eth_dev *dev); 106 static void em_vlan_hw_strip_disable(struct rte_eth_dev *dev); 107 108 /* 109 static void eth_em_vlan_filter_set(struct rte_eth_dev *dev, 110 uint16_t vlan_id, int on); 111 */ 112 113 static int eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 114 static int eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 115 static void em_lsc_intr_disable(struct e1000_hw *hw); 116 static void em_rxq_intr_enable(struct e1000_hw *hw); 117 static void em_rxq_intr_disable(struct e1000_hw *hw); 118 119 static int eth_em_led_on(struct rte_eth_dev *dev); 120 static int eth_em_led_off(struct rte_eth_dev *dev); 121 122 static int em_get_rx_buffer_size(struct e1000_hw *hw); 123 static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 124 uint32_t index, uint32_t pool); 125 static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); 126 127 static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 128 struct ether_addr *mc_addr_set, 129 uint32_t nb_mc_addr); 130 131 #define EM_FC_PAUSE_TIME 0x0680 132 #define EM_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 133 #define EM_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 134 135 static enum e1000_fc_mode em_fc_setting = e1000_fc_full; 136 137 /* 138 * The set of PCI devices this driver supports 139 */ 140 static const struct rte_pci_id pci_id_em_map[] = { 141 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, 142 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, 143 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, 144 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, 145 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, 146 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, 147 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, 148 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, 149 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, 150 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, 151 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, 152 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, 153 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, 154 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, 155 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, 156 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, 157 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, 158 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, 159 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, 160 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, 161 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, 162 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, 163 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, 164 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, 165 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, 166 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, 167 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, 168 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, 169 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, 170 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, 171 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, 172 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) }, 173 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) }, 174 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) }, 175 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) }, 176 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) }, 177 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) }, 178 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) }, 179 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) }, 180 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) }, 181 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) }, 182 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) }, 183 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) }, 184 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) }, 185 { .vendor_id = 0, /* sentinel */ }, 186 }; 187 188 static const struct eth_dev_ops eth_em_ops = { 189 .dev_configure = eth_em_configure, 190 .dev_start = eth_em_start, 191 .dev_stop = eth_em_stop, 192 .dev_close = eth_em_close, 193 .promiscuous_enable = eth_em_promiscuous_enable, 194 .promiscuous_disable = eth_em_promiscuous_disable, 195 .allmulticast_enable = eth_em_allmulticast_enable, 196 .allmulticast_disable = eth_em_allmulticast_disable, 197 .link_update = eth_em_link_update, 198 .stats_get = eth_em_stats_get, 199 .stats_reset = eth_em_stats_reset, 200 .dev_infos_get = eth_em_infos_get, 201 .mtu_set = eth_em_mtu_set, 202 .vlan_filter_set = eth_em_vlan_filter_set, 203 .vlan_offload_set = eth_em_vlan_offload_set, 204 .rx_queue_setup = eth_em_rx_queue_setup, 205 .rx_queue_release = eth_em_rx_queue_release, 206 .rx_queue_count = eth_em_rx_queue_count, 207 .rx_descriptor_done = eth_em_rx_descriptor_done, 208 .rx_descriptor_status = eth_em_rx_descriptor_status, 209 .tx_descriptor_status = eth_em_tx_descriptor_status, 210 .tx_queue_setup = eth_em_tx_queue_setup, 211 .tx_queue_release = eth_em_tx_queue_release, 212 .rx_queue_intr_enable = eth_em_rx_queue_intr_enable, 213 .rx_queue_intr_disable = eth_em_rx_queue_intr_disable, 214 .dev_led_on = eth_em_led_on, 215 .dev_led_off = eth_em_led_off, 216 .flow_ctrl_get = eth_em_flow_ctrl_get, 217 .flow_ctrl_set = eth_em_flow_ctrl_set, 218 .mac_addr_add = eth_em_rar_set, 219 .mac_addr_remove = eth_em_rar_clear, 220 .set_mc_addr_list = eth_em_set_mc_addr_list, 221 .rxq_info_get = em_rxq_info_get, 222 .txq_info_get = em_txq_info_get, 223 }; 224 225 /** 226 * Atomically reads the link status information from global 227 * structure rte_eth_dev. 228 * 229 * @param dev 230 * - Pointer to the structure rte_eth_dev to read from. 231 * - Pointer to the buffer to be saved with the link status. 232 * 233 * @return 234 * - On success, zero. 235 * - On failure, negative value. 236 */ 237 static inline int 238 rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, 239 struct rte_eth_link *link) 240 { 241 struct rte_eth_link *dst = link; 242 struct rte_eth_link *src = &(dev->data->dev_link); 243 244 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 245 *(uint64_t *)src) == 0) 246 return -1; 247 248 return 0; 249 } 250 251 /** 252 * Atomically writes the link status information into global 253 * structure rte_eth_dev. 254 * 255 * @param dev 256 * - Pointer to the structure rte_eth_dev to read from. 257 * - Pointer to the buffer to be saved with the link status. 258 * 259 * @return 260 * - On success, zero. 261 * - On failure, negative value. 262 */ 263 static inline int 264 rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, 265 struct rte_eth_link *link) 266 { 267 struct rte_eth_link *dst = &(dev->data->dev_link); 268 struct rte_eth_link *src = link; 269 270 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 271 *(uint64_t *)src) == 0) 272 return -1; 273 274 return 0; 275 } 276 277 /** 278 * eth_em_dev_is_ich8 - Check for ICH8 device 279 * @hw: pointer to the HW structure 280 * 281 * return TRUE for ICH8, otherwise FALSE 282 **/ 283 static bool 284 eth_em_dev_is_ich8(struct e1000_hw *hw) 285 { 286 DEBUGFUNC("eth_em_dev_is_ich8"); 287 288 switch (hw->device_id) { 289 case E1000_DEV_ID_PCH_LPT_I217_LM: 290 case E1000_DEV_ID_PCH_LPT_I217_V: 291 case E1000_DEV_ID_PCH_LPTLP_I218_LM: 292 case E1000_DEV_ID_PCH_LPTLP_I218_V: 293 case E1000_DEV_ID_PCH_I218_V2: 294 case E1000_DEV_ID_PCH_I218_LM2: 295 case E1000_DEV_ID_PCH_I218_V3: 296 case E1000_DEV_ID_PCH_I218_LM3: 297 case E1000_DEV_ID_PCH_SPT_I219_LM: 298 case E1000_DEV_ID_PCH_SPT_I219_V: 299 case E1000_DEV_ID_PCH_SPT_I219_LM2: 300 case E1000_DEV_ID_PCH_SPT_I219_V2: 301 case E1000_DEV_ID_PCH_LBG_I219_LM3: 302 case E1000_DEV_ID_PCH_SPT_I219_LM4: 303 case E1000_DEV_ID_PCH_SPT_I219_V4: 304 case E1000_DEV_ID_PCH_SPT_I219_LM5: 305 case E1000_DEV_ID_PCH_SPT_I219_V5: 306 case E1000_DEV_ID_PCH_CNP_I219_LM6: 307 case E1000_DEV_ID_PCH_CNP_I219_V6: 308 case E1000_DEV_ID_PCH_CNP_I219_LM7: 309 case E1000_DEV_ID_PCH_CNP_I219_V7: 310 return 1; 311 default: 312 return 0; 313 } 314 } 315 316 static int 317 eth_em_dev_init(struct rte_eth_dev *eth_dev) 318 { 319 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 320 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 321 struct e1000_adapter *adapter = 322 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 323 struct e1000_hw *hw = 324 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 325 struct e1000_vfta * shadow_vfta = 326 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 327 328 eth_dev->dev_ops = ð_em_ops; 329 eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts; 330 eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts; 331 eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts; 332 333 /* for secondary processes, we don't initialise any further as primary 334 * has already done this work. Only check we don't need a different 335 * RX function */ 336 if (rte_eal_process_type() != RTE_PROC_PRIMARY){ 337 if (eth_dev->data->scattered_rx) 338 eth_dev->rx_pkt_burst = 339 (eth_rx_burst_t)ð_em_recv_scattered_pkts; 340 return 0; 341 } 342 343 rte_eth_copy_pci_info(eth_dev, pci_dev); 344 345 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 346 hw->device_id = pci_dev->id.device_id; 347 adapter->stopped = 0; 348 349 /* For ICH8 support we'll need to map the flash memory BAR */ 350 if (eth_em_dev_is_ich8(hw)) 351 hw->flash_address = (void *)pci_dev->mem_resource[1].addr; 352 353 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS || 354 em_hw_init(hw) != 0) { 355 PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: " 356 "failed to init HW", 357 eth_dev->data->port_id, pci_dev->id.vendor_id, 358 pci_dev->id.device_id); 359 return -ENODEV; 360 } 361 362 /* Allocate memory for storing MAC addresses */ 363 eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN * 364 hw->mac.rar_entry_count, 0); 365 if (eth_dev->data->mac_addrs == NULL) { 366 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " 367 "store MAC addresses", 368 ETHER_ADDR_LEN * hw->mac.rar_entry_count); 369 return -ENOMEM; 370 } 371 372 /* Copy the permanent MAC address */ 373 ether_addr_copy((struct ether_addr *) hw->mac.addr, 374 eth_dev->data->mac_addrs); 375 376 /* initialize the vfta */ 377 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 378 379 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 380 eth_dev->data->port_id, pci_dev->id.vendor_id, 381 pci_dev->id.device_id); 382 383 rte_intr_callback_register(intr_handle, 384 eth_em_interrupt_handler, eth_dev); 385 386 return 0; 387 } 388 389 static int 390 eth_em_dev_uninit(struct rte_eth_dev *eth_dev) 391 { 392 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 393 struct e1000_adapter *adapter = 394 E1000_DEV_PRIVATE(eth_dev->data->dev_private); 395 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 396 397 PMD_INIT_FUNC_TRACE(); 398 399 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 400 return -EPERM; 401 402 if (adapter->stopped == 0) 403 eth_em_close(eth_dev); 404 405 eth_dev->dev_ops = NULL; 406 eth_dev->rx_pkt_burst = NULL; 407 eth_dev->tx_pkt_burst = NULL; 408 409 rte_free(eth_dev->data->mac_addrs); 410 eth_dev->data->mac_addrs = NULL; 411 412 /* disable uio intr before callback unregister */ 413 rte_intr_disable(intr_handle); 414 rte_intr_callback_unregister(intr_handle, 415 eth_em_interrupt_handler, eth_dev); 416 417 return 0; 418 } 419 420 static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 421 struct rte_pci_device *pci_dev) 422 { 423 return rte_eth_dev_pci_generic_probe(pci_dev, 424 sizeof(struct e1000_adapter), eth_em_dev_init); 425 } 426 427 static int eth_em_pci_remove(struct rte_pci_device *pci_dev) 428 { 429 return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit); 430 } 431 432 static struct rte_pci_driver rte_em_pmd = { 433 .id_table = pci_id_em_map, 434 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 435 RTE_PCI_DRV_IOVA_AS_VA, 436 .probe = eth_em_pci_probe, 437 .remove = eth_em_pci_remove, 438 }; 439 440 static int 441 em_hw_init(struct e1000_hw *hw) 442 { 443 int diag; 444 445 diag = hw->mac.ops.init_params(hw); 446 if (diag != 0) { 447 PMD_INIT_LOG(ERR, "MAC Initialization Error"); 448 return diag; 449 } 450 diag = hw->nvm.ops.init_params(hw); 451 if (diag != 0) { 452 PMD_INIT_LOG(ERR, "NVM Initialization Error"); 453 return diag; 454 } 455 diag = hw->phy.ops.init_params(hw); 456 if (diag != 0) { 457 PMD_INIT_LOG(ERR, "PHY Initialization Error"); 458 return diag; 459 } 460 (void) e1000_get_bus_info(hw); 461 462 hw->mac.autoneg = 1; 463 hw->phy.autoneg_wait_to_complete = 0; 464 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 465 466 e1000_init_script_state_82541(hw, TRUE); 467 e1000_set_tbi_compatibility_82543(hw, TRUE); 468 469 /* Copper options */ 470 if (hw->phy.media_type == e1000_media_type_copper) { 471 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 472 hw->phy.disable_polarity_correction = 0; 473 hw->phy.ms_type = e1000_ms_hw_default; 474 } 475 476 /* 477 * Start from a known state, this is important in reading the nvm 478 * and mac from that. 479 */ 480 e1000_reset_hw(hw); 481 482 /* Make sure we have a good EEPROM before we read from it */ 483 if (e1000_validate_nvm_checksum(hw) < 0) { 484 /* 485 * Some PCI-E parts fail the first check due to 486 * the link being in sleep state, call it again, 487 * if it fails a second time its a real issue. 488 */ 489 diag = e1000_validate_nvm_checksum(hw); 490 if (diag < 0) { 491 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 492 goto error; 493 } 494 } 495 496 /* Read the permanent MAC address out of the EEPROM */ 497 diag = e1000_read_mac_addr(hw); 498 if (diag != 0) { 499 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 500 goto error; 501 } 502 503 /* Now initialize the hardware */ 504 diag = em_hardware_init(hw); 505 if (diag != 0) { 506 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 507 goto error; 508 } 509 510 hw->mac.get_link_status = 1; 511 512 /* Indicate SOL/IDER usage */ 513 diag = e1000_check_reset_block(hw); 514 if (diag < 0) { 515 PMD_INIT_LOG(ERR, "PHY reset is blocked due to " 516 "SOL/IDER session"); 517 } 518 return 0; 519 520 error: 521 em_hw_control_release(hw); 522 return diag; 523 } 524 525 static int 526 eth_em_configure(struct rte_eth_dev *dev) 527 { 528 struct e1000_interrupt *intr = 529 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 530 531 PMD_INIT_FUNC_TRACE(); 532 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 533 PMD_INIT_FUNC_TRACE(); 534 535 return 0; 536 } 537 538 static void 539 em_set_pba(struct e1000_hw *hw) 540 { 541 uint32_t pba; 542 543 /* 544 * Packet Buffer Allocation (PBA) 545 * Writing PBA sets the receive portion of the buffer 546 * the remainder is used for the transmit buffer. 547 * Devices before the 82547 had a Packet Buffer of 64K. 548 * After the 82547 the buffer was reduced to 40K. 549 */ 550 switch (hw->mac.type) { 551 case e1000_82547: 552 case e1000_82547_rev_2: 553 /* 82547: Total Packet Buffer is 40K */ 554 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 555 break; 556 case e1000_82571: 557 case e1000_82572: 558 case e1000_80003es2lan: 559 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 560 break; 561 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 562 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 563 break; 564 case e1000_82574: 565 case e1000_82583: 566 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 567 break; 568 case e1000_ich8lan: 569 pba = E1000_PBA_8K; 570 break; 571 case e1000_ich9lan: 572 case e1000_ich10lan: 573 pba = E1000_PBA_10K; 574 break; 575 case e1000_pchlan: 576 case e1000_pch2lan: 577 case e1000_pch_lpt: 578 case e1000_pch_spt: 579 case e1000_pch_cnp: 580 pba = E1000_PBA_26K; 581 break; 582 default: 583 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 584 } 585 586 E1000_WRITE_REG(hw, E1000_PBA, pba); 587 } 588 589 static int 590 eth_em_start(struct rte_eth_dev *dev) 591 { 592 struct e1000_adapter *adapter = 593 E1000_DEV_PRIVATE(dev->data->dev_private); 594 struct e1000_hw *hw = 595 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 596 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 597 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 598 int ret, mask; 599 uint32_t intr_vector = 0; 600 uint32_t *speeds; 601 int num_speeds; 602 bool autoneg; 603 604 PMD_INIT_FUNC_TRACE(); 605 606 eth_em_stop(dev); 607 608 e1000_power_up_phy(hw); 609 610 /* Set default PBA value */ 611 em_set_pba(hw); 612 613 /* Put the address into the Receive Address Array */ 614 e1000_rar_set(hw, hw->mac.addr, 0); 615 616 /* 617 * With the 82571 adapter, RAR[0] may be overwritten 618 * when the other port is reset, we make a duplicate 619 * in RAR[14] for that eventuality, this assures 620 * the interface continues to function. 621 */ 622 if (hw->mac.type == e1000_82571) { 623 e1000_set_laa_state_82571(hw, TRUE); 624 e1000_rar_set(hw, hw->mac.addr, E1000_RAR_ENTRIES - 1); 625 } 626 627 /* Initialize the hardware */ 628 if (em_hardware_init(hw)) { 629 PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); 630 return -EIO; 631 } 632 633 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); 634 635 /* Configure for OS presence */ 636 em_init_manageability(hw); 637 638 if (dev->data->dev_conf.intr_conf.rxq != 0) { 639 intr_vector = dev->data->nb_rx_queues; 640 if (rte_intr_efd_enable(intr_handle, intr_vector)) 641 return -1; 642 } 643 644 if (rte_intr_dp_is_en(intr_handle)) { 645 intr_handle->intr_vec = 646 rte_zmalloc("intr_vec", 647 dev->data->nb_rx_queues * sizeof(int), 0); 648 if (intr_handle->intr_vec == NULL) { 649 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 650 " intr_vec", dev->data->nb_rx_queues); 651 return -ENOMEM; 652 } 653 654 /* enable rx interrupt */ 655 em_rxq_intr_enable(hw); 656 } 657 658 eth_em_tx_init(dev); 659 660 ret = eth_em_rx_init(dev); 661 if (ret) { 662 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 663 em_dev_clear_queues(dev); 664 return ret; 665 } 666 667 e1000_clear_hw_cntrs_base_generic(hw); 668 669 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ 670 ETH_VLAN_EXTEND_MASK; 671 ret = eth_em_vlan_offload_set(dev, mask); 672 if (ret) { 673 PMD_INIT_LOG(ERR, "Unable to update vlan offload"); 674 em_dev_clear_queues(dev); 675 return ret; 676 } 677 678 /* Set Interrupt Throttling Rate to maximum allowed value. */ 679 E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX); 680 681 /* Setup link speed and duplex */ 682 speeds = &dev->data->dev_conf.link_speeds; 683 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 684 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; 685 hw->mac.autoneg = 1; 686 } else { 687 num_speeds = 0; 688 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 689 690 /* Reset */ 691 hw->phy.autoneg_advertised = 0; 692 693 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 694 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 695 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { 696 num_speeds = -1; 697 goto error_invalid_config; 698 } 699 if (*speeds & ETH_LINK_SPEED_10M_HD) { 700 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 701 num_speeds++; 702 } 703 if (*speeds & ETH_LINK_SPEED_10M) { 704 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 705 num_speeds++; 706 } 707 if (*speeds & ETH_LINK_SPEED_100M_HD) { 708 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 709 num_speeds++; 710 } 711 if (*speeds & ETH_LINK_SPEED_100M) { 712 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 713 num_speeds++; 714 } 715 if (*speeds & ETH_LINK_SPEED_1G) { 716 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 717 num_speeds++; 718 } 719 if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) 720 goto error_invalid_config; 721 722 /* Set/reset the mac.autoneg based on the link speed, 723 * fixed or not 724 */ 725 if (!autoneg) { 726 hw->mac.autoneg = 0; 727 hw->mac.forced_speed_duplex = 728 hw->phy.autoneg_advertised; 729 } else { 730 hw->mac.autoneg = 1; 731 } 732 } 733 734 e1000_setup_link(hw); 735 736 if (rte_intr_allow_others(intr_handle)) { 737 /* check if lsc interrupt is enabled */ 738 if (dev->data->dev_conf.intr_conf.lsc != 0) { 739 ret = eth_em_interrupt_setup(dev); 740 if (ret) { 741 PMD_INIT_LOG(ERR, "Unable to setup interrupts"); 742 em_dev_clear_queues(dev); 743 return ret; 744 } 745 } 746 } else { 747 rte_intr_callback_unregister(intr_handle, 748 eth_em_interrupt_handler, 749 (void *)dev); 750 if (dev->data->dev_conf.intr_conf.lsc != 0) 751 PMD_INIT_LOG(INFO, "lsc won't enable because of" 752 " no intr multiplexn"); 753 } 754 /* check if rxq interrupt is enabled */ 755 if (dev->data->dev_conf.intr_conf.rxq != 0) 756 eth_em_rxq_interrupt_setup(dev); 757 758 rte_intr_enable(intr_handle); 759 760 adapter->stopped = 0; 761 762 PMD_INIT_LOG(DEBUG, "<<"); 763 764 return 0; 765 766 error_invalid_config: 767 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 768 dev->data->dev_conf.link_speeds, dev->data->port_id); 769 em_dev_clear_queues(dev); 770 return -EINVAL; 771 } 772 773 /********************************************************************* 774 * 775 * This routine disables all traffic on the adapter by issuing a 776 * global reset on the MAC. 777 * 778 **********************************************************************/ 779 static void 780 eth_em_stop(struct rte_eth_dev *dev) 781 { 782 struct rte_eth_link link; 783 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 784 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 785 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 786 787 em_rxq_intr_disable(hw); 788 em_lsc_intr_disable(hw); 789 790 e1000_reset_hw(hw); 791 if (hw->mac.type >= e1000_82544) 792 E1000_WRITE_REG(hw, E1000_WUC, 0); 793 794 /* Power down the phy. Needed to make the link go down */ 795 e1000_power_down_phy(hw); 796 797 em_dev_clear_queues(dev); 798 799 /* clear the recorded link status */ 800 memset(&link, 0, sizeof(link)); 801 rte_em_dev_atomic_write_link_status(dev, &link); 802 803 if (!rte_intr_allow_others(intr_handle)) 804 /* resume to the default handler */ 805 rte_intr_callback_register(intr_handle, 806 eth_em_interrupt_handler, 807 (void *)dev); 808 809 /* Clean datapath event and queue/vec mapping */ 810 rte_intr_efd_disable(intr_handle); 811 if (intr_handle->intr_vec != NULL) { 812 rte_free(intr_handle->intr_vec); 813 intr_handle->intr_vec = NULL; 814 } 815 } 816 817 static void 818 eth_em_close(struct rte_eth_dev *dev) 819 { 820 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 821 struct e1000_adapter *adapter = 822 E1000_DEV_PRIVATE(dev->data->dev_private); 823 824 eth_em_stop(dev); 825 adapter->stopped = 1; 826 em_dev_free_queues(dev); 827 e1000_phy_hw_reset(hw); 828 em_release_manageability(hw); 829 em_hw_control_release(hw); 830 } 831 832 static int 833 em_get_rx_buffer_size(struct e1000_hw *hw) 834 { 835 uint32_t rx_buf_size; 836 837 rx_buf_size = ((E1000_READ_REG(hw, E1000_PBA) & UINT16_MAX) << 10); 838 return rx_buf_size; 839 } 840 841 /********************************************************************* 842 * 843 * Initialize the hardware 844 * 845 **********************************************************************/ 846 static int 847 em_hardware_init(struct e1000_hw *hw) 848 { 849 uint32_t rx_buf_size; 850 int diag; 851 852 /* Issue a global reset */ 853 e1000_reset_hw(hw); 854 855 /* Let the firmware know the OS is in control */ 856 em_hw_control_acquire(hw); 857 858 /* 859 * These parameters control the automatic generation (Tx) and 860 * response (Rx) to Ethernet PAUSE frames. 861 * - High water mark should allow for at least two standard size (1518) 862 * frames to be received after sending an XOFF. 863 * - Low water mark works best when it is very near the high water mark. 864 * This allows the receiver to restart by sending XON when it has 865 * drained a bit. Here we use an arbitrary value of 1500 which will 866 * restart after one full frame is pulled from the buffer. There 867 * could be several smaller frames in the buffer and if so they will 868 * not trigger the XON until their total number reduces the buffer 869 * by 1500. 870 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 871 */ 872 rx_buf_size = em_get_rx_buffer_size(hw); 873 874 hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024); 875 hw->fc.low_water = hw->fc.high_water - 1500; 876 877 if (hw->mac.type == e1000_80003es2lan) 878 hw->fc.pause_time = UINT16_MAX; 879 else 880 hw->fc.pause_time = EM_FC_PAUSE_TIME; 881 882 hw->fc.send_xon = 1; 883 884 /* Set Flow control, use the tunable location if sane */ 885 if (em_fc_setting <= e1000_fc_full) 886 hw->fc.requested_mode = em_fc_setting; 887 else 888 hw->fc.requested_mode = e1000_fc_none; 889 890 /* Workaround: no TX flow ctrl for PCH */ 891 if (hw->mac.type == e1000_pchlan) 892 hw->fc.requested_mode = e1000_fc_rx_pause; 893 894 /* Override - settings for PCH2LAN, ya its magic :) */ 895 if (hw->mac.type == e1000_pch2lan) { 896 hw->fc.high_water = 0x5C20; 897 hw->fc.low_water = 0x5048; 898 hw->fc.pause_time = 0x0650; 899 hw->fc.refresh_time = 0x0400; 900 } else if (hw->mac.type == e1000_pch_lpt || 901 hw->mac.type == e1000_pch_spt || 902 hw->mac.type == e1000_pch_cnp) { 903 hw->fc.requested_mode = e1000_fc_full; 904 } 905 906 diag = e1000_init_hw(hw); 907 if (diag < 0) 908 return diag; 909 e1000_check_for_link(hw); 910 return 0; 911 } 912 913 /* This function is based on em_update_stats_counters() in e1000/if_em.c */ 914 static int 915 eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 916 { 917 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 918 struct e1000_hw_stats *stats = 919 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 920 int pause_frames; 921 922 if(hw->phy.media_type == e1000_media_type_copper || 923 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 924 stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); 925 stats->sec += E1000_READ_REG(hw, E1000_SEC); 926 } 927 928 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 929 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 930 stats->scc += E1000_READ_REG(hw, E1000_SCC); 931 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 932 933 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 934 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 935 stats->colc += E1000_READ_REG(hw, E1000_COLC); 936 stats->dc += E1000_READ_REG(hw, E1000_DC); 937 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 938 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 939 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 940 941 /* 942 * For watchdog management we need to know if we have been 943 * paused during the last interval, so capture that here. 944 */ 945 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 946 stats->xoffrxc += pause_frames; 947 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 948 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 949 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 950 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 951 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 952 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 953 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 954 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 955 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 956 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 957 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 958 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 959 960 /* 961 * For the 64-bit byte counters the low dword must be read first. 962 * Both registers clear on the read of the high dword. 963 */ 964 965 stats->gorc += E1000_READ_REG(hw, E1000_GORCL); 966 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 967 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); 968 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 969 970 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 971 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 972 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 973 stats->roc += E1000_READ_REG(hw, E1000_ROC); 974 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 975 976 stats->tor += E1000_READ_REG(hw, E1000_TORH); 977 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 978 979 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 980 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 981 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 982 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 983 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 984 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 985 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 986 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 987 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 988 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 989 990 /* Interrupt Counts */ 991 992 if (hw->mac.type >= e1000_82571) { 993 stats->iac += E1000_READ_REG(hw, E1000_IAC); 994 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 995 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 996 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 997 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 998 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 999 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 1000 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 1001 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 1002 } 1003 1004 if (hw->mac.type >= e1000_82543) { 1005 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 1006 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 1007 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 1008 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 1009 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 1010 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 1011 } 1012 1013 if (rte_stats == NULL) 1014 return -EINVAL; 1015 1016 /* Rx Errors */ 1017 rte_stats->imissed = stats->mpc; 1018 rte_stats->ierrors = stats->crcerrs + 1019 stats->rlec + stats->ruc + stats->roc + 1020 stats->rxerrc + stats->algnerrc + stats->cexterr; 1021 1022 /* Tx Errors */ 1023 rte_stats->oerrors = stats->ecol + stats->latecol; 1024 1025 rte_stats->ipackets = stats->gprc; 1026 rte_stats->opackets = stats->gptc; 1027 rte_stats->ibytes = stats->gorc; 1028 rte_stats->obytes = stats->gotc; 1029 return 0; 1030 } 1031 1032 static void 1033 eth_em_stats_reset(struct rte_eth_dev *dev) 1034 { 1035 struct e1000_hw_stats *hw_stats = 1036 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 1037 1038 /* HW registers are cleared on read */ 1039 eth_em_stats_get(dev, NULL); 1040 1041 /* Reset software totals */ 1042 memset(hw_stats, 0, sizeof(*hw_stats)); 1043 } 1044 1045 static int 1046 eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 1047 { 1048 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1049 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1050 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1051 1052 em_rxq_intr_enable(hw); 1053 rte_intr_enable(intr_handle); 1054 1055 return 0; 1056 } 1057 1058 static int 1059 eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id) 1060 { 1061 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1062 1063 em_rxq_intr_disable(hw); 1064 1065 return 0; 1066 } 1067 1068 static uint32_t 1069 em_get_max_pktlen(const struct e1000_hw *hw) 1070 { 1071 switch (hw->mac.type) { 1072 case e1000_82571: 1073 case e1000_82572: 1074 case e1000_ich9lan: 1075 case e1000_ich10lan: 1076 case e1000_pch2lan: 1077 case e1000_pch_lpt: 1078 case e1000_pch_spt: 1079 case e1000_pch_cnp: 1080 case e1000_82574: 1081 case e1000_80003es2lan: /* 9K Jumbo Frame size */ 1082 case e1000_82583: 1083 return 0x2412; 1084 case e1000_pchlan: 1085 return 0x1000; 1086 /* Adapters that do not support jumbo frames */ 1087 case e1000_ich8lan: 1088 return ETHER_MAX_LEN; 1089 default: 1090 return MAX_JUMBO_FRAME_SIZE; 1091 } 1092 } 1093 1094 static void 1095 eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1096 { 1097 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1098 1099 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1100 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1101 dev_info->max_rx_pktlen = em_get_max_pktlen(hw); 1102 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1103 dev_info->rx_offload_capa = 1104 DEV_RX_OFFLOAD_VLAN_STRIP | 1105 DEV_RX_OFFLOAD_IPV4_CKSUM | 1106 DEV_RX_OFFLOAD_UDP_CKSUM | 1107 DEV_RX_OFFLOAD_TCP_CKSUM; 1108 dev_info->tx_offload_capa = 1109 DEV_TX_OFFLOAD_VLAN_INSERT | 1110 DEV_TX_OFFLOAD_IPV4_CKSUM | 1111 DEV_TX_OFFLOAD_UDP_CKSUM | 1112 DEV_TX_OFFLOAD_TCP_CKSUM; 1113 1114 /* 1115 * Starting with 631xESB hw supports 2 TX/RX queues per port. 1116 * Unfortunatelly, all these nics have just one TX context. 1117 * So we have few choises for TX: 1118 * - Use just one TX queue. 1119 * - Allow cksum offload only for one TX queue. 1120 * - Don't allow TX cksum offload at all. 1121 * For now, option #1 was chosen. 1122 * To use second RX queue we have to use extended RX descriptor 1123 * (Multiple Receive Queues are mutually exclusive with UDP 1124 * fragmentation and are not supported when a legacy receive 1125 * descriptor format is used). 1126 * Which means separate RX routinies - as legacy nics (82540, 82545) 1127 * don't support extended RXD. 1128 * To avoid it we support just one RX queue for now (no RSS). 1129 */ 1130 1131 dev_info->max_rx_queues = 1; 1132 dev_info->max_tx_queues = 1; 1133 1134 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 1135 .nb_max = E1000_MAX_RING_DESC, 1136 .nb_min = E1000_MIN_RING_DESC, 1137 .nb_align = EM_RXD_ALIGN, 1138 }; 1139 1140 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 1141 .nb_max = E1000_MAX_RING_DESC, 1142 .nb_min = E1000_MIN_RING_DESC, 1143 .nb_align = EM_TXD_ALIGN, 1144 .nb_seg_max = EM_TX_MAX_SEG, 1145 .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG, 1146 }; 1147 1148 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1149 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1150 ETH_LINK_SPEED_1G; 1151 } 1152 1153 /* return 0 means link status changed, -1 means not changed */ 1154 static int 1155 eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1156 { 1157 struct e1000_hw *hw = 1158 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1159 struct rte_eth_link link, old; 1160 int link_check, count; 1161 1162 link_check = 0; 1163 hw->mac.get_link_status = 1; 1164 1165 /* possible wait-to-complete in up to 9 seconds */ 1166 for (count = 0; count < EM_LINK_UPDATE_CHECK_TIMEOUT; count ++) { 1167 /* Read the real link status */ 1168 switch (hw->phy.media_type) { 1169 case e1000_media_type_copper: 1170 /* Do the work to read phy */ 1171 e1000_check_for_link(hw); 1172 link_check = !hw->mac.get_link_status; 1173 break; 1174 1175 case e1000_media_type_fiber: 1176 e1000_check_for_link(hw); 1177 link_check = (E1000_READ_REG(hw, E1000_STATUS) & 1178 E1000_STATUS_LU); 1179 break; 1180 1181 case e1000_media_type_internal_serdes: 1182 e1000_check_for_link(hw); 1183 link_check = hw->mac.serdes_has_link; 1184 break; 1185 1186 default: 1187 break; 1188 } 1189 if (link_check || wait_to_complete == 0) 1190 break; 1191 rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); 1192 } 1193 memset(&link, 0, sizeof(link)); 1194 rte_em_dev_atomic_read_link_status(dev, &link); 1195 old = link; 1196 1197 /* Now we check if a transition has happened */ 1198 if (link_check && (link.link_status == ETH_LINK_DOWN)) { 1199 uint16_t duplex, speed; 1200 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 1201 link.link_duplex = (duplex == FULL_DUPLEX) ? 1202 ETH_LINK_FULL_DUPLEX : 1203 ETH_LINK_HALF_DUPLEX; 1204 link.link_speed = speed; 1205 link.link_status = ETH_LINK_UP; 1206 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1207 ETH_LINK_SPEED_FIXED); 1208 } else if (!link_check && (link.link_status == ETH_LINK_UP)) { 1209 link.link_speed = 0; 1210 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1211 link.link_status = ETH_LINK_DOWN; 1212 link.link_autoneg = ETH_LINK_FIXED; 1213 } 1214 rte_em_dev_atomic_write_link_status(dev, &link); 1215 1216 /* not changed */ 1217 if (old.link_status == link.link_status) 1218 return -1; 1219 1220 /* changed */ 1221 return 0; 1222 } 1223 1224 /* 1225 * em_hw_control_acquire sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 1226 * For ASF and Pass Through versions of f/w this means 1227 * that the driver is loaded. For AMT version type f/w 1228 * this means that the network i/f is open. 1229 */ 1230 static void 1231 em_hw_control_acquire(struct e1000_hw *hw) 1232 { 1233 uint32_t ctrl_ext, swsm; 1234 1235 /* Let firmware know the driver has taken over */ 1236 if (hw->mac.type == e1000_82573) { 1237 swsm = E1000_READ_REG(hw, E1000_SWSM); 1238 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 1239 1240 } else { 1241 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1242 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1243 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 1244 } 1245 } 1246 1247 /* 1248 * em_hw_control_release resets {CTRL_EXTT|FWSM}:DRV_LOAD bit. 1249 * For ASF and Pass Through versions of f/w this means that the 1250 * driver is no longer loaded. For AMT versions of the 1251 * f/w this means that the network i/f is closed. 1252 */ 1253 static void 1254 em_hw_control_release(struct e1000_hw *hw) 1255 { 1256 uint32_t ctrl_ext, swsm; 1257 1258 /* Let firmware taken over control of h/w */ 1259 if (hw->mac.type == e1000_82573) { 1260 swsm = E1000_READ_REG(hw, E1000_SWSM); 1261 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1262 } else { 1263 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 1264 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 1265 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1266 } 1267 } 1268 1269 /* 1270 * Bit of a misnomer, what this really means is 1271 * to enable OS management of the system... aka 1272 * to disable special hardware management features. 1273 */ 1274 static void 1275 em_init_manageability(struct e1000_hw *hw) 1276 { 1277 if (e1000_enable_mng_pass_thru(hw)) { 1278 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); 1279 uint32_t manc = E1000_READ_REG(hw, E1000_MANC); 1280 1281 /* disable hardware interception of ARP */ 1282 manc &= ~(E1000_MANC_ARP_EN); 1283 1284 /* enable receiving management packets to the host */ 1285 manc |= E1000_MANC_EN_MNG2HOST; 1286 manc2h |= 1 << 5; /* Mng Port 623 */ 1287 manc2h |= 1 << 6; /* Mng Port 664 */ 1288 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); 1289 E1000_WRITE_REG(hw, E1000_MANC, manc); 1290 } 1291 } 1292 1293 /* 1294 * Give control back to hardware management 1295 * controller if there is one. 1296 */ 1297 static void 1298 em_release_manageability(struct e1000_hw *hw) 1299 { 1300 uint32_t manc; 1301 1302 if (e1000_enable_mng_pass_thru(hw)) { 1303 manc = E1000_READ_REG(hw, E1000_MANC); 1304 1305 /* re-enable hardware interception of ARP */ 1306 manc |= E1000_MANC_ARP_EN; 1307 manc &= ~E1000_MANC_EN_MNG2HOST; 1308 1309 E1000_WRITE_REG(hw, E1000_MANC, manc); 1310 } 1311 } 1312 1313 static void 1314 eth_em_promiscuous_enable(struct rte_eth_dev *dev) 1315 { 1316 struct e1000_hw *hw = 1317 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1318 uint32_t rctl; 1319 1320 rctl = E1000_READ_REG(hw, E1000_RCTL); 1321 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1322 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1323 } 1324 1325 static void 1326 eth_em_promiscuous_disable(struct rte_eth_dev *dev) 1327 { 1328 struct e1000_hw *hw = 1329 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1330 uint32_t rctl; 1331 1332 rctl = E1000_READ_REG(hw, E1000_RCTL); 1333 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP); 1334 if (dev->data->all_multicast == 1) 1335 rctl |= E1000_RCTL_MPE; 1336 else 1337 rctl &= (~E1000_RCTL_MPE); 1338 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1339 } 1340 1341 static void 1342 eth_em_allmulticast_enable(struct rte_eth_dev *dev) 1343 { 1344 struct e1000_hw *hw = 1345 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1346 uint32_t rctl; 1347 1348 rctl = E1000_READ_REG(hw, E1000_RCTL); 1349 rctl |= E1000_RCTL_MPE; 1350 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1351 } 1352 1353 static void 1354 eth_em_allmulticast_disable(struct rte_eth_dev *dev) 1355 { 1356 struct e1000_hw *hw = 1357 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1358 uint32_t rctl; 1359 1360 if (dev->data->promiscuous == 1) 1361 return; /* must remain in all_multicast mode */ 1362 rctl = E1000_READ_REG(hw, E1000_RCTL); 1363 rctl &= (~E1000_RCTL_MPE); 1364 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1365 } 1366 1367 static int 1368 eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1369 { 1370 struct e1000_hw *hw = 1371 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1372 struct e1000_vfta * shadow_vfta = 1373 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1374 uint32_t vfta; 1375 uint32_t vid_idx; 1376 uint32_t vid_bit; 1377 1378 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & 1379 E1000_VFTA_ENTRY_MASK); 1380 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); 1381 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); 1382 if (on) 1383 vfta |= vid_bit; 1384 else 1385 vfta &= ~vid_bit; 1386 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); 1387 1388 /* update local VFTA copy */ 1389 shadow_vfta->vfta[vid_idx] = vfta; 1390 1391 return 0; 1392 } 1393 1394 static void 1395 em_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1396 { 1397 struct e1000_hw *hw = 1398 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1399 uint32_t reg; 1400 1401 /* Filter Table Disable */ 1402 reg = E1000_READ_REG(hw, E1000_RCTL); 1403 reg &= ~E1000_RCTL_CFIEN; 1404 reg &= ~E1000_RCTL_VFE; 1405 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1406 } 1407 1408 static void 1409 em_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1410 { 1411 struct e1000_hw *hw = 1412 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1413 struct e1000_vfta * shadow_vfta = 1414 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1415 uint32_t reg; 1416 int i; 1417 1418 /* Filter Table Enable, CFI not used for packet acceptance */ 1419 reg = E1000_READ_REG(hw, E1000_RCTL); 1420 reg &= ~E1000_RCTL_CFIEN; 1421 reg |= E1000_RCTL_VFE; 1422 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1423 1424 /* restore vfta from local copy */ 1425 for (i = 0; i < IGB_VFTA_SIZE; i++) 1426 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); 1427 } 1428 1429 static void 1430 em_vlan_hw_strip_disable(struct rte_eth_dev *dev) 1431 { 1432 struct e1000_hw *hw = 1433 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1434 uint32_t reg; 1435 1436 /* VLAN Mode Disable */ 1437 reg = E1000_READ_REG(hw, E1000_CTRL); 1438 reg &= ~E1000_CTRL_VME; 1439 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1440 1441 } 1442 1443 static void 1444 em_vlan_hw_strip_enable(struct rte_eth_dev *dev) 1445 { 1446 struct e1000_hw *hw = 1447 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1448 uint32_t reg; 1449 1450 /* VLAN Mode Enable */ 1451 reg = E1000_READ_REG(hw, E1000_CTRL); 1452 reg |= E1000_CTRL_VME; 1453 E1000_WRITE_REG(hw, E1000_CTRL, reg); 1454 } 1455 1456 static int 1457 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1458 { 1459 if(mask & ETH_VLAN_STRIP_MASK){ 1460 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1461 em_vlan_hw_strip_enable(dev); 1462 else 1463 em_vlan_hw_strip_disable(dev); 1464 } 1465 1466 if(mask & ETH_VLAN_FILTER_MASK){ 1467 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 1468 em_vlan_hw_filter_enable(dev); 1469 else 1470 em_vlan_hw_filter_disable(dev); 1471 } 1472 1473 return 0; 1474 } 1475 1476 /* 1477 * It enables the interrupt mask and then enable the interrupt. 1478 * 1479 * @param dev 1480 * Pointer to struct rte_eth_dev. 1481 * 1482 * @return 1483 * - On success, zero. 1484 * - On failure, a negative value. 1485 */ 1486 static int 1487 eth_em_interrupt_setup(struct rte_eth_dev *dev) 1488 { 1489 uint32_t regval; 1490 struct e1000_hw *hw = 1491 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1492 1493 /* clear interrupt */ 1494 E1000_READ_REG(hw, E1000_ICR); 1495 regval = E1000_READ_REG(hw, E1000_IMS); 1496 E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC); 1497 return 0; 1498 } 1499 1500 /* 1501 * It clears the interrupt causes and enables the interrupt. 1502 * It will be called once only during nic initialized. 1503 * 1504 * @param dev 1505 * Pointer to struct rte_eth_dev. 1506 * 1507 * @return 1508 * - On success, zero. 1509 * - On failure, a negative value. 1510 */ 1511 static int 1512 eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev) 1513 { 1514 struct e1000_hw *hw = 1515 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1516 1517 E1000_READ_REG(hw, E1000_ICR); 1518 em_rxq_intr_enable(hw); 1519 return 0; 1520 } 1521 1522 /* 1523 * It enable receive packet interrupt. 1524 * @param hw 1525 * Pointer to struct e1000_hw 1526 * 1527 * @return 1528 */ 1529 static void 1530 em_rxq_intr_enable(struct e1000_hw *hw) 1531 { 1532 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_RXT0); 1533 E1000_WRITE_FLUSH(hw); 1534 } 1535 1536 /* 1537 * It disabled lsc interrupt. 1538 * @param hw 1539 * Pointer to struct e1000_hw 1540 * 1541 * @return 1542 */ 1543 static void 1544 em_lsc_intr_disable(struct e1000_hw *hw) 1545 { 1546 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_LSC); 1547 E1000_WRITE_FLUSH(hw); 1548 } 1549 1550 /* 1551 * It disabled receive packet interrupt. 1552 * @param hw 1553 * Pointer to struct e1000_hw 1554 * 1555 * @return 1556 */ 1557 static void 1558 em_rxq_intr_disable(struct e1000_hw *hw) 1559 { 1560 E1000_READ_REG(hw, E1000_ICR); 1561 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 1562 E1000_WRITE_FLUSH(hw); 1563 } 1564 1565 /* 1566 * It reads ICR and gets interrupt causes, check it and set a bit flag 1567 * to update link status. 1568 * 1569 * @param dev 1570 * Pointer to struct rte_eth_dev. 1571 * 1572 * @return 1573 * - On success, zero. 1574 * - On failure, a negative value. 1575 */ 1576 static int 1577 eth_em_interrupt_get_status(struct rte_eth_dev *dev) 1578 { 1579 uint32_t icr; 1580 struct e1000_hw *hw = 1581 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1582 struct e1000_interrupt *intr = 1583 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1584 1585 /* read-on-clear nic registers here */ 1586 icr = E1000_READ_REG(hw, E1000_ICR); 1587 if (icr & E1000_ICR_LSC) { 1588 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; 1589 } 1590 1591 return 0; 1592 } 1593 1594 /* 1595 * It executes link_update after knowing an interrupt is prsent. 1596 * 1597 * @param dev 1598 * Pointer to struct rte_eth_dev. 1599 * 1600 * @return 1601 * - On success, zero. 1602 * - On failure, a negative value. 1603 */ 1604 static int 1605 eth_em_interrupt_action(struct rte_eth_dev *dev, 1606 struct rte_intr_handle *intr_handle) 1607 { 1608 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1609 struct e1000_hw *hw = 1610 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1611 struct e1000_interrupt *intr = 1612 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 1613 uint32_t tctl, rctl; 1614 struct rte_eth_link link; 1615 int ret; 1616 1617 if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) 1618 return -1; 1619 1620 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; 1621 rte_intr_enable(intr_handle); 1622 1623 /* set get_link_status to check register later */ 1624 hw->mac.get_link_status = 1; 1625 ret = eth_em_link_update(dev, 0); 1626 1627 /* check if link has changed */ 1628 if (ret < 0) 1629 return 0; 1630 1631 memset(&link, 0, sizeof(link)); 1632 rte_em_dev_atomic_read_link_status(dev, &link); 1633 if (link.link_status) { 1634 PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", 1635 dev->data->port_id, link.link_speed, 1636 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 1637 "full-duplex" : "half-duplex"); 1638 } else { 1639 PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id); 1640 } 1641 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 1642 pci_dev->addr.domain, pci_dev->addr.bus, 1643 pci_dev->addr.devid, pci_dev->addr.function); 1644 1645 tctl = E1000_READ_REG(hw, E1000_TCTL); 1646 rctl = E1000_READ_REG(hw, E1000_RCTL); 1647 if (link.link_status) { 1648 /* enable Tx/Rx */ 1649 tctl |= E1000_TCTL_EN; 1650 rctl |= E1000_RCTL_EN; 1651 } else { 1652 /* disable Tx/Rx */ 1653 tctl &= ~E1000_TCTL_EN; 1654 rctl &= ~E1000_RCTL_EN; 1655 } 1656 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1657 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1658 E1000_WRITE_FLUSH(hw); 1659 1660 return 0; 1661 } 1662 1663 /** 1664 * Interrupt handler which shall be registered at first. 1665 * 1666 * @param handle 1667 * Pointer to interrupt handle. 1668 * @param param 1669 * The address of parameter (struct rte_eth_dev *) regsitered before. 1670 * 1671 * @return 1672 * void 1673 */ 1674 static void 1675 eth_em_interrupt_handler(void *param) 1676 { 1677 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1678 1679 eth_em_interrupt_get_status(dev); 1680 eth_em_interrupt_action(dev, dev->intr_handle); 1681 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL); 1682 } 1683 1684 static int 1685 eth_em_led_on(struct rte_eth_dev *dev) 1686 { 1687 struct e1000_hw *hw; 1688 1689 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1690 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1691 } 1692 1693 static int 1694 eth_em_led_off(struct rte_eth_dev *dev) 1695 { 1696 struct e1000_hw *hw; 1697 1698 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1699 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; 1700 } 1701 1702 static int 1703 eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1704 { 1705 struct e1000_hw *hw; 1706 uint32_t ctrl; 1707 int tx_pause; 1708 int rx_pause; 1709 1710 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1711 fc_conf->pause_time = hw->fc.pause_time; 1712 fc_conf->high_water = hw->fc.high_water; 1713 fc_conf->low_water = hw->fc.low_water; 1714 fc_conf->send_xon = hw->fc.send_xon; 1715 fc_conf->autoneg = hw->mac.autoneg; 1716 1717 /* 1718 * Return rx_pause and tx_pause status according to actual setting of 1719 * the TFCE and RFCE bits in the CTRL register. 1720 */ 1721 ctrl = E1000_READ_REG(hw, E1000_CTRL); 1722 if (ctrl & E1000_CTRL_TFCE) 1723 tx_pause = 1; 1724 else 1725 tx_pause = 0; 1726 1727 if (ctrl & E1000_CTRL_RFCE) 1728 rx_pause = 1; 1729 else 1730 rx_pause = 0; 1731 1732 if (rx_pause && tx_pause) 1733 fc_conf->mode = RTE_FC_FULL; 1734 else if (rx_pause) 1735 fc_conf->mode = RTE_FC_RX_PAUSE; 1736 else if (tx_pause) 1737 fc_conf->mode = RTE_FC_TX_PAUSE; 1738 else 1739 fc_conf->mode = RTE_FC_NONE; 1740 1741 return 0; 1742 } 1743 1744 static int 1745 eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1746 { 1747 struct e1000_hw *hw; 1748 int err; 1749 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { 1750 e1000_fc_none, 1751 e1000_fc_rx_pause, 1752 e1000_fc_tx_pause, 1753 e1000_fc_full 1754 }; 1755 uint32_t rx_buf_size; 1756 uint32_t max_high_water; 1757 uint32_t rctl; 1758 1759 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1760 if (fc_conf->autoneg != hw->mac.autoneg) 1761 return -ENOTSUP; 1762 rx_buf_size = em_get_rx_buffer_size(hw); 1763 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 1764 1765 /* At least reserve one Ethernet frame for watermark */ 1766 max_high_water = rx_buf_size - ETHER_MAX_LEN; 1767 if ((fc_conf->high_water > max_high_water) || 1768 (fc_conf->high_water < fc_conf->low_water)) { 1769 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value"); 1770 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water); 1771 return -EINVAL; 1772 } 1773 1774 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; 1775 hw->fc.pause_time = fc_conf->pause_time; 1776 hw->fc.high_water = fc_conf->high_water; 1777 hw->fc.low_water = fc_conf->low_water; 1778 hw->fc.send_xon = fc_conf->send_xon; 1779 1780 err = e1000_setup_link_generic(hw); 1781 if (err == E1000_SUCCESS) { 1782 1783 /* check if we want to forward MAC frames - driver doesn't have native 1784 * capability to do that, so we'll write the registers ourselves */ 1785 1786 rctl = E1000_READ_REG(hw, E1000_RCTL); 1787 1788 /* set or clear MFLCN.PMCF bit depending on configuration */ 1789 if (fc_conf->mac_ctrl_frame_fwd != 0) 1790 rctl |= E1000_RCTL_PMCF; 1791 else 1792 rctl &= ~E1000_RCTL_PMCF; 1793 1794 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1795 E1000_WRITE_FLUSH(hw); 1796 1797 return 0; 1798 } 1799 1800 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err); 1801 return -EIO; 1802 } 1803 1804 static int 1805 eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 1806 uint32_t index, __rte_unused uint32_t pool) 1807 { 1808 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1809 1810 return e1000_rar_set(hw, mac_addr->addr_bytes, index); 1811 } 1812 1813 static void 1814 eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1815 { 1816 uint8_t addr[ETHER_ADDR_LEN]; 1817 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1818 1819 memset(addr, 0, sizeof(addr)); 1820 1821 e1000_rar_set(hw, addr, index); 1822 } 1823 1824 static int 1825 eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1826 { 1827 struct rte_eth_dev_info dev_info; 1828 struct e1000_hw *hw; 1829 uint32_t frame_size; 1830 uint32_t rctl; 1831 1832 eth_em_infos_get(dev, &dev_info); 1833 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; 1834 1835 /* check that mtu is within the allowed range */ 1836 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 1837 return -EINVAL; 1838 1839 /* refuse mtu that requires the support of scattered packets when this 1840 * feature has not been enabled before. */ 1841 if (!dev->data->scattered_rx && 1842 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 1843 return -EINVAL; 1844 1845 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1846 rctl = E1000_READ_REG(hw, E1000_RCTL); 1847 1848 /* switch to jumbo mode if needed */ 1849 if (frame_size > ETHER_MAX_LEN) { 1850 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1851 rctl |= E1000_RCTL_LPE; 1852 } else { 1853 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1854 rctl &= ~E1000_RCTL_LPE; 1855 } 1856 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 1857 1858 /* update max frame size */ 1859 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1860 return 0; 1861 } 1862 1863 static int 1864 eth_em_set_mc_addr_list(struct rte_eth_dev *dev, 1865 struct ether_addr *mc_addr_set, 1866 uint32_t nb_mc_addr) 1867 { 1868 struct e1000_hw *hw; 1869 1870 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1871 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1872 return 0; 1873 } 1874 1875 RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); 1876 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); 1877 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); 1878