1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 8 #include <rte_string_fns.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_alarm.h> 15 16 #include "igc_logs.h" 17 #include "igc_txrx.h" 18 #include "igc_filter.h" 19 #include "igc_flow.h" 20 21 #define IGC_INTEL_VENDOR_ID 0x8086 22 23 /* 24 * The overhead from MTU to max frame size. 25 * Considering VLAN so tag needs to be counted. 26 */ 27 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \ 28 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE) 29 30 #define IGC_FC_PAUSE_TIME 0x0680 31 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 32 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 33 34 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 35 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 36 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */ 37 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */ 38 39 #define IGC_DEFAULT_RX_FREE_THRESH 32 40 41 #define IGC_DEFAULT_RX_PTHRESH 8 42 #define IGC_DEFAULT_RX_HTHRESH 8 43 #define IGC_DEFAULT_RX_WTHRESH 4 44 45 #define IGC_DEFAULT_TX_PTHRESH 8 46 #define IGC_DEFAULT_TX_HTHRESH 1 47 #define IGC_DEFAULT_TX_WTHRESH 16 48 49 /* MSI-X other interrupt vector */ 50 #define IGC_MSIX_OTHER_INTR_VEC 0 51 52 /* External VLAN Enable bit mask */ 53 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26) 54 55 /* Speed select */ 56 #define IGC_CTRL_SPEED_MASK (7u << 8) 57 #define IGC_CTRL_SPEED_2500 (6u << 8) 58 59 /* External VLAN Ether Type bit mask and shift */ 60 #define IGC_VET_EXT 0xFFFF0000 61 #define IGC_VET_EXT_SHIFT 16 62 63 /* Force EEE Auto-negotiation */ 64 #define IGC_EEER_EEE_FRC_AN (1u << 28) 65 66 /* Per Queue Good Packets Received Count */ 67 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) 68 /* Per Queue Good Octets Received Count */ 69 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx)) 70 /* Per Queue Good Octets Transmitted Count */ 71 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx)) 72 /* Per Queue Multicast Packets Received Count */ 73 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx)) 74 /* Transmit Queue Drop Packet Count */ 75 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx)) 76 77 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 78 #define U32_0_IN_U64 0 /* lower bytes of u64 */ 79 #define U32_1_IN_U64 1 /* higher bytes of u64 */ 80 #else 81 #define U32_0_IN_U64 1 82 #define U32_1_IN_U64 0 83 #endif 84 85 #define IGC_ALARM_INTERVAL 8000000u 86 /* us, about 13.6s some per-queue registers will wrap around back to 0. */ 87 88 static const struct rte_eth_desc_lim rx_desc_lim = { 89 .nb_max = IGC_MAX_RXD, 90 .nb_min = IGC_MIN_RXD, 91 .nb_align = IGC_RXD_ALIGN, 92 }; 93 94 static const struct rte_eth_desc_lim tx_desc_lim = { 95 .nb_max = IGC_MAX_TXD, 96 .nb_min = IGC_MIN_TXD, 97 .nb_align = IGC_TXD_ALIGN, 98 .nb_seg_max = IGC_TX_MAX_SEG, 99 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG, 100 }; 101 102 static const struct rte_pci_id pci_id_igc_map[] = { 103 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) }, 104 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) }, 105 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) }, 106 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) }, 107 { .vendor_id = 0, /* sentinel */ }, 108 }; 109 110 /* store statistics names and its offset in stats structure */ 111 struct rte_igc_xstats_name_off { 112 char name[RTE_ETH_XSTATS_NAME_SIZE]; 113 unsigned int offset; 114 }; 115 116 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { 117 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)}, 118 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)}, 119 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)}, 120 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)}, 121 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)}, 122 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)}, 123 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats, 124 ecol)}, 125 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)}, 126 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)}, 127 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)}, 128 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)}, 129 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)}, 130 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)}, 131 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)}, 132 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)}, 133 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)}, 134 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)}, 135 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats, 136 fcruc)}, 137 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)}, 138 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)}, 139 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)}, 140 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)}, 141 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 142 prc1023)}, 143 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats, 144 prc1522)}, 145 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)}, 146 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)}, 147 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)}, 148 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)}, 149 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)}, 150 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)}, 151 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)}, 152 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)}, 153 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)}, 154 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)}, 155 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)}, 156 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)}, 157 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)}, 158 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)}, 159 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)}, 160 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)}, 161 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)}, 162 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)}, 163 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 164 ptc1023)}, 165 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats, 166 ptc1522)}, 167 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)}, 168 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)}, 169 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)}, 170 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)}, 171 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)}, 172 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)}, 173 {"rx_descriptor_lower_threshold", 174 offsetof(struct igc_hw_stats, icrxdmtc)}, 175 }; 176 177 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \ 178 sizeof(rte_igc_stats_strings[0])) 179 180 static int eth_igc_configure(struct rte_eth_dev *dev); 181 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); 182 static int eth_igc_stop(struct rte_eth_dev *dev); 183 static int eth_igc_start(struct rte_eth_dev *dev); 184 static int eth_igc_set_link_up(struct rte_eth_dev *dev); 185 static int eth_igc_set_link_down(struct rte_eth_dev *dev); 186 static int eth_igc_close(struct rte_eth_dev *dev); 187 static int eth_igc_reset(struct rte_eth_dev *dev); 188 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); 189 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); 190 static int eth_igc_fw_version_get(struct rte_eth_dev *dev, 191 char *fw_version, size_t fw_size); 192 static int eth_igc_infos_get(struct rte_eth_dev *dev, 193 struct rte_eth_dev_info *dev_info); 194 static int eth_igc_led_on(struct rte_eth_dev *dev); 195 static int eth_igc_led_off(struct rte_eth_dev *dev); 196 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev); 197 static int eth_igc_rar_set(struct rte_eth_dev *dev, 198 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); 199 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index); 200 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 201 struct rte_ether_addr *addr); 202 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 203 struct rte_ether_addr *mc_addr_set, 204 uint32_t nb_mc_addr); 205 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev); 206 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev); 207 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 208 static int eth_igc_stats_get(struct rte_eth_dev *dev, 209 struct rte_eth_stats *rte_stats); 210 static int eth_igc_xstats_get(struct rte_eth_dev *dev, 211 struct rte_eth_xstat *xstats, unsigned int n); 212 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, 213 const uint64_t *ids, 214 uint64_t *values, unsigned int n); 215 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev, 216 struct rte_eth_xstat_name *xstats_names, 217 unsigned int size); 218 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 219 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 220 unsigned int limit); 221 static int eth_igc_xstats_reset(struct rte_eth_dev *dev); 222 static int 223 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 224 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); 225 static int 226 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 227 static int 228 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 229 static int 230 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 231 static int 232 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 233 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev, 234 struct rte_eth_rss_reta_entry64 *reta_conf, 235 uint16_t reta_size); 236 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev, 237 struct rte_eth_rss_reta_entry64 *reta_conf, 238 uint16_t reta_size); 239 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, 240 struct rte_eth_rss_conf *rss_conf); 241 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 242 struct rte_eth_rss_conf *rss_conf); 243 static int 244 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 245 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); 246 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 247 enum rte_vlan_type vlan_type, uint16_t tpid); 248 249 static const struct eth_dev_ops eth_igc_ops = { 250 .dev_configure = eth_igc_configure, 251 .link_update = eth_igc_link_update, 252 .dev_stop = eth_igc_stop, 253 .dev_start = eth_igc_start, 254 .dev_close = eth_igc_close, 255 .dev_reset = eth_igc_reset, 256 .dev_set_link_up = eth_igc_set_link_up, 257 .dev_set_link_down = eth_igc_set_link_down, 258 .promiscuous_enable = eth_igc_promiscuous_enable, 259 .promiscuous_disable = eth_igc_promiscuous_disable, 260 .allmulticast_enable = eth_igc_allmulticast_enable, 261 .allmulticast_disable = eth_igc_allmulticast_disable, 262 .fw_version_get = eth_igc_fw_version_get, 263 .dev_infos_get = eth_igc_infos_get, 264 .dev_led_on = eth_igc_led_on, 265 .dev_led_off = eth_igc_led_off, 266 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get, 267 .mtu_set = eth_igc_mtu_set, 268 .mac_addr_add = eth_igc_rar_set, 269 .mac_addr_remove = eth_igc_rar_clear, 270 .mac_addr_set = eth_igc_default_mac_addr_set, 271 .set_mc_addr_list = eth_igc_set_mc_addr_list, 272 273 .rx_queue_setup = eth_igc_rx_queue_setup, 274 .rx_queue_release = eth_igc_rx_queue_release, 275 .tx_queue_setup = eth_igc_tx_queue_setup, 276 .tx_queue_release = eth_igc_tx_queue_release, 277 .tx_done_cleanup = eth_igc_tx_done_cleanup, 278 .rxq_info_get = eth_igc_rxq_info_get, 279 .txq_info_get = eth_igc_txq_info_get, 280 .stats_get = eth_igc_stats_get, 281 .xstats_get = eth_igc_xstats_get, 282 .xstats_get_by_id = eth_igc_xstats_get_by_id, 283 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id, 284 .xstats_get_names = eth_igc_xstats_get_names, 285 .stats_reset = eth_igc_xstats_reset, 286 .xstats_reset = eth_igc_xstats_reset, 287 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set, 288 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable, 289 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable, 290 .flow_ctrl_get = eth_igc_flow_ctrl_get, 291 .flow_ctrl_set = eth_igc_flow_ctrl_set, 292 .reta_update = eth_igc_rss_reta_update, 293 .reta_query = eth_igc_rss_reta_query, 294 .rss_hash_update = eth_igc_rss_hash_update, 295 .rss_hash_conf_get = eth_igc_rss_hash_conf_get, 296 .vlan_filter_set = eth_igc_vlan_filter_set, 297 .vlan_offload_set = eth_igc_vlan_offload_set, 298 .vlan_tpid_set = eth_igc_vlan_tpid_set, 299 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, 300 .filter_ctrl = eth_igc_filter_ctrl, 301 }; 302 303 /* 304 * multiple queue mode checking 305 */ 306 static int 307 igc_check_mq_mode(struct rte_eth_dev *dev) 308 { 309 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 310 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 311 312 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 313 PMD_INIT_LOG(ERR, "SRIOV is not supported."); 314 return -EINVAL; 315 } 316 317 if (rx_mq_mode != ETH_MQ_RX_NONE && 318 rx_mq_mode != ETH_MQ_RX_RSS) { 319 /* RSS together with VMDq not supported*/ 320 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 321 rx_mq_mode); 322 return -EINVAL; 323 } 324 325 /* To no break software that set invalid mode, only display 326 * warning if invalid mode is used. 327 */ 328 if (tx_mq_mode != ETH_MQ_TX_NONE) 329 PMD_INIT_LOG(WARNING, 330 "TX mode %d is not supported. Due to meaningless in this driver, just ignore", 331 tx_mq_mode); 332 333 return 0; 334 } 335 336 static int 337 eth_igc_configure(struct rte_eth_dev *dev) 338 { 339 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 340 int ret; 341 342 PMD_INIT_FUNC_TRACE(); 343 344 ret = igc_check_mq_mode(dev); 345 if (ret != 0) 346 return ret; 347 348 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 349 return 0; 350 } 351 352 static int 353 eth_igc_set_link_up(struct rte_eth_dev *dev) 354 { 355 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 356 357 if (hw->phy.media_type == igc_media_type_copper) 358 igc_power_up_phy(hw); 359 else 360 igc_power_up_fiber_serdes_link(hw); 361 return 0; 362 } 363 364 static int 365 eth_igc_set_link_down(struct rte_eth_dev *dev) 366 { 367 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 368 369 if (hw->phy.media_type == igc_media_type_copper) 370 igc_power_down_phy(hw); 371 else 372 igc_shutdown_fiber_serdes_link(hw); 373 return 0; 374 } 375 376 /* 377 * disable other interrupt 378 */ 379 static void 380 igc_intr_other_disable(struct rte_eth_dev *dev) 381 { 382 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 383 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 384 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 385 386 if (rte_intr_allow_others(intr_handle) && 387 dev->data->dev_conf.intr_conf.lsc) { 388 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC); 389 } 390 391 IGC_WRITE_REG(hw, IGC_IMC, ~0); 392 IGC_WRITE_FLUSH(hw); 393 } 394 395 /* 396 * enable other interrupt 397 */ 398 static inline void 399 igc_intr_other_enable(struct rte_eth_dev *dev) 400 { 401 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 402 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 403 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 404 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 405 406 if (rte_intr_allow_others(intr_handle) && 407 dev->data->dev_conf.intr_conf.lsc) { 408 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC); 409 } 410 411 IGC_WRITE_REG(hw, IGC_IMS, intr->mask); 412 IGC_WRITE_FLUSH(hw); 413 } 414 415 /* 416 * It reads ICR and gets interrupt causes, check it and set a bit flag 417 * to update link status. 418 */ 419 static void 420 eth_igc_interrupt_get_status(struct rte_eth_dev *dev) 421 { 422 uint32_t icr; 423 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 424 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 425 426 /* read-on-clear nic registers here */ 427 icr = IGC_READ_REG(hw, IGC_ICR); 428 429 intr->flags = 0; 430 if (icr & IGC_ICR_LSC) 431 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 432 } 433 434 /* return 0 means link status changed, -1 means not changed */ 435 static int 436 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete) 437 { 438 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 439 struct rte_eth_link link; 440 int link_check, count; 441 442 link_check = 0; 443 hw->mac.get_link_status = 1; 444 445 /* possible wait-to-complete in up to 9 seconds */ 446 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) { 447 /* Read the real link status */ 448 switch (hw->phy.media_type) { 449 case igc_media_type_copper: 450 /* Do the work to read phy */ 451 igc_check_for_link(hw); 452 link_check = !hw->mac.get_link_status; 453 break; 454 455 case igc_media_type_fiber: 456 igc_check_for_link(hw); 457 link_check = (IGC_READ_REG(hw, IGC_STATUS) & 458 IGC_STATUS_LU); 459 break; 460 461 case igc_media_type_internal_serdes: 462 igc_check_for_link(hw); 463 link_check = hw->mac.serdes_has_link; 464 break; 465 466 default: 467 break; 468 } 469 if (link_check || wait_to_complete == 0) 470 break; 471 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL); 472 } 473 memset(&link, 0, sizeof(link)); 474 475 /* Now we check if a transition has happened */ 476 if (link_check) { 477 uint16_t duplex, speed; 478 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 479 link.link_duplex = (duplex == FULL_DUPLEX) ? 480 ETH_LINK_FULL_DUPLEX : 481 ETH_LINK_HALF_DUPLEX; 482 link.link_speed = speed; 483 link.link_status = ETH_LINK_UP; 484 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 485 ETH_LINK_SPEED_FIXED); 486 487 if (speed == SPEED_2500) { 488 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG); 489 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) { 490 tipg &= ~IGC_TIPG_IPGT_MASK; 491 tipg |= 0x0b; 492 IGC_WRITE_REG(hw, IGC_TIPG, tipg); 493 } 494 } 495 } else { 496 link.link_speed = 0; 497 link.link_duplex = ETH_LINK_HALF_DUPLEX; 498 link.link_status = ETH_LINK_DOWN; 499 link.link_autoneg = ETH_LINK_FIXED; 500 } 501 502 return rte_eth_linkstatus_set(dev, &link); 503 } 504 505 /* 506 * It executes link_update after knowing an interrupt is present. 507 */ 508 static void 509 eth_igc_interrupt_action(struct rte_eth_dev *dev) 510 { 511 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 512 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 513 struct rte_eth_link link; 514 int ret; 515 516 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) { 517 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 518 519 /* set get_link_status to check register later */ 520 ret = eth_igc_link_update(dev, 0); 521 522 /* check if link has changed */ 523 if (ret < 0) 524 return; 525 526 rte_eth_linkstatus_get(dev, &link); 527 if (link.link_status) 528 PMD_DRV_LOG(INFO, 529 " Port %d: Link Up - speed %u Mbps - %s", 530 dev->data->port_id, 531 (unsigned int)link.link_speed, 532 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 533 "full-duplex" : "half-duplex"); 534 else 535 PMD_DRV_LOG(INFO, " Port %d: Link Down", 536 dev->data->port_id); 537 538 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 539 pci_dev->addr.domain, 540 pci_dev->addr.bus, 541 pci_dev->addr.devid, 542 pci_dev->addr.function); 543 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 544 } 545 } 546 547 /* 548 * Interrupt handler which shall be registered at first. 549 * 550 * @handle 551 * Pointer to interrupt handle. 552 * @param 553 * The address of parameter (struct rte_eth_dev *) registered before. 554 */ 555 static void 556 eth_igc_interrupt_handler(void *param) 557 { 558 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 559 560 eth_igc_interrupt_get_status(dev); 561 eth_igc_interrupt_action(dev); 562 } 563 564 static void igc_read_queue_stats_register(struct rte_eth_dev *dev); 565 566 /* 567 * Update the queue status every IGC_ALARM_INTERVAL time. 568 * @param 569 * The address of parameter (struct rte_eth_dev *) registered before. 570 */ 571 static void 572 igc_update_queue_stats_handler(void *param) 573 { 574 struct rte_eth_dev *dev = param; 575 igc_read_queue_stats_register(dev); 576 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 577 igc_update_queue_stats_handler, dev); 578 } 579 580 /* 581 * rx,tx enable/disable 582 */ 583 static void 584 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) 585 { 586 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 587 uint32_t tctl, rctl; 588 589 tctl = IGC_READ_REG(hw, IGC_TCTL); 590 rctl = IGC_READ_REG(hw, IGC_RCTL); 591 592 if (enable) { 593 /* enable Tx/Rx */ 594 tctl |= IGC_TCTL_EN; 595 rctl |= IGC_RCTL_EN; 596 } else { 597 /* disable Tx/Rx */ 598 tctl &= ~IGC_TCTL_EN; 599 rctl &= ~IGC_RCTL_EN; 600 } 601 IGC_WRITE_REG(hw, IGC_TCTL, tctl); 602 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 603 IGC_WRITE_FLUSH(hw); 604 } 605 606 /* 607 * This routine disables all traffic on the adapter by issuing a 608 * global reset on the MAC. 609 */ 610 static int 611 eth_igc_stop(struct rte_eth_dev *dev) 612 { 613 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 614 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 615 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 616 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 617 struct rte_eth_link link; 618 619 dev->data->dev_started = 0; 620 adapter->stopped = 1; 621 622 /* disable receive and transmit */ 623 eth_igc_rxtx_control(dev, false); 624 625 /* disable all MSI-X interrupts */ 626 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 627 IGC_WRITE_FLUSH(hw); 628 629 /* clear all MSI-X interrupts */ 630 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 631 632 igc_intr_other_disable(dev); 633 634 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 635 636 /* disable intr eventfd mapping */ 637 rte_intr_disable(intr_handle); 638 639 igc_reset_hw(hw); 640 641 /* disable all wake up */ 642 IGC_WRITE_REG(hw, IGC_WUC, 0); 643 644 /* disable checking EEE operation in MAC loopback mode */ 645 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 646 647 /* Set bit for Go Link disconnect */ 648 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, 649 IGC_82580_PM_GO_LINKD); 650 651 /* Power down the phy. Needed to make the link go Down */ 652 eth_igc_set_link_down(dev); 653 654 igc_dev_clear_queues(dev); 655 656 /* clear the recorded link status */ 657 memset(&link, 0, sizeof(link)); 658 rte_eth_linkstatus_set(dev, &link); 659 660 if (!rte_intr_allow_others(intr_handle)) 661 /* resume to the default handler */ 662 rte_intr_callback_register(intr_handle, 663 eth_igc_interrupt_handler, 664 (void *)dev); 665 666 /* Clean datapath event and queue/vec mapping */ 667 rte_intr_efd_disable(intr_handle); 668 if (intr_handle->intr_vec != NULL) { 669 rte_free(intr_handle->intr_vec); 670 intr_handle->intr_vec = NULL; 671 } 672 673 return 0; 674 } 675 676 /* 677 * write interrupt vector allocation register 678 * @hw 679 * board private structure 680 * @queue_index 681 * queue index, valid 0,1,2,3 682 * @tx 683 * tx:1, rx:0 684 * @msix_vector 685 * msix-vector, valid 0,1,2,3,4 686 */ 687 static void 688 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index, 689 bool tx, uint8_t msix_vector) 690 { 691 uint8_t offset = 0; 692 uint8_t reg_index = queue_index >> 1; 693 uint32_t val; 694 695 /* 696 * IVAR(0) 697 * bit31...24 bit23...16 bit15...8 bit7...0 698 * TX1 RX1 TX0 RX0 699 * 700 * IVAR(1) 701 * bit31...24 bit23...16 bit15...8 bit7...0 702 * TX3 RX3 TX2 RX2 703 */ 704 705 if (tx) 706 offset = 8; 707 708 if (queue_index & 1) 709 offset += 16; 710 711 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index); 712 713 /* clear bits */ 714 val &= ~((uint32_t)0xFF << offset); 715 716 /* write vector and valid bit */ 717 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset; 718 719 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val); 720 } 721 722 /* Sets up the hardware to generate MSI-X interrupts properly 723 * @hw 724 * board private structure 725 */ 726 static void 727 igc_configure_msix_intr(struct rte_eth_dev *dev) 728 { 729 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 730 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 731 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 732 733 uint32_t intr_mask; 734 uint32_t vec = IGC_MISC_VEC_ID; 735 uint32_t base = IGC_MISC_VEC_ID; 736 uint32_t misc_shift = 0; 737 int i; 738 739 /* won't configure msix register if no mapping is done 740 * between intr vector and event fd 741 */ 742 if (!rte_intr_dp_is_en(intr_handle)) 743 return; 744 745 if (rte_intr_allow_others(intr_handle)) { 746 base = IGC_RX_VEC_START; 747 vec = base; 748 misc_shift = 1; 749 } 750 751 /* turn on MSI-X capability first */ 752 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | 753 IGC_GPIE_PBA | IGC_GPIE_EIAME | 754 IGC_GPIE_NSICR); 755 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 756 misc_shift; 757 758 if (dev->data->dev_conf.intr_conf.lsc) 759 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC); 760 761 /* enable msix auto-clear */ 762 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask); 763 764 /* set other cause interrupt vector */ 765 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC, 766 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8); 767 768 /* enable auto-mask */ 769 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask); 770 771 for (i = 0; i < dev->data->nb_rx_queues; i++) { 772 igc_write_ivar(hw, i, 0, vec); 773 intr_handle->intr_vec[i] = vec; 774 if (vec < base + intr_handle->nb_efd - 1) 775 vec++; 776 } 777 778 IGC_WRITE_FLUSH(hw); 779 } 780 781 /** 782 * It enables the interrupt mask and then enable the interrupt. 783 * 784 * @dev 785 * Pointer to struct rte_eth_dev. 786 * @on 787 * Enable or Disable 788 */ 789 static void 790 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 791 { 792 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 793 794 if (on) 795 intr->mask |= IGC_ICR_LSC; 796 else 797 intr->mask &= ~IGC_ICR_LSC; 798 } 799 800 /* 801 * It enables the interrupt. 802 * It will be called once only during nic initialized. 803 */ 804 static void 805 igc_rxq_interrupt_setup(struct rte_eth_dev *dev) 806 { 807 uint32_t mask; 808 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 809 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 810 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 811 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 812 813 /* won't configure msix register if no mapping is done 814 * between intr vector and event fd 815 */ 816 if (!rte_intr_dp_is_en(intr_handle)) 817 return; 818 819 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift; 820 IGC_WRITE_REG(hw, IGC_EIMS, mask); 821 } 822 823 /* 824 * Get hardware rx-buffer size. 825 */ 826 static inline int 827 igc_get_rx_buffer_size(struct igc_hw *hw) 828 { 829 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10; 830 } 831 832 /* 833 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 834 * For ASF and Pass Through versions of f/w this means 835 * that the driver is loaded. 836 */ 837 static void 838 igc_hw_control_acquire(struct igc_hw *hw) 839 { 840 uint32_t ctrl_ext; 841 842 /* Let firmware know the driver has taken over */ 843 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 844 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 845 } 846 847 /* 848 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 849 * For ASF and Pass Through versions of f/w this means that the 850 * driver is no longer loaded. 851 */ 852 static void 853 igc_hw_control_release(struct igc_hw *hw) 854 { 855 uint32_t ctrl_ext; 856 857 /* Let firmware taken over control of h/w */ 858 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 859 IGC_WRITE_REG(hw, IGC_CTRL_EXT, 860 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 861 } 862 863 static int 864 igc_hardware_init(struct igc_hw *hw) 865 { 866 uint32_t rx_buf_size; 867 int diag; 868 869 /* Let the firmware know the OS is in control */ 870 igc_hw_control_acquire(hw); 871 872 /* Issue a global reset */ 873 igc_reset_hw(hw); 874 875 /* disable all wake up */ 876 IGC_WRITE_REG(hw, IGC_WUC, 0); 877 878 /* 879 * Hardware flow control 880 * - High water mark should allow for at least two standard size (1518) 881 * frames to be received after sending an XOFF. 882 * - Low water mark works best when it is very near the high water mark. 883 * This allows the receiver to restart by sending XON when it has 884 * drained a bit. Here we use an arbitrary value of 1500 which will 885 * restart after one full frame is pulled from the buffer. There 886 * could be several smaller frames in the buffer and if so they will 887 * not trigger the XON until their total number reduces the buffer 888 * by 1500. 889 */ 890 rx_buf_size = igc_get_rx_buffer_size(hw); 891 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 892 hw->fc.low_water = hw->fc.high_water - 1500; 893 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 894 hw->fc.send_xon = 1; 895 hw->fc.requested_mode = igc_fc_full; 896 897 diag = igc_init_hw(hw); 898 if (diag < 0) 899 return diag; 900 901 igc_get_phy_info(hw); 902 igc_check_for_link(hw); 903 904 return 0; 905 } 906 907 static int 908 eth_igc_start(struct rte_eth_dev *dev) 909 { 910 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 911 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 912 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 913 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 914 uint32_t *speeds; 915 int ret; 916 917 PMD_INIT_FUNC_TRACE(); 918 919 /* disable all MSI-X interrupts */ 920 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 921 IGC_WRITE_FLUSH(hw); 922 923 /* clear all MSI-X interrupts */ 924 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 925 926 /* disable uio/vfio intr/eventfd mapping */ 927 if (!adapter->stopped) 928 rte_intr_disable(intr_handle); 929 930 /* Power up the phy. Needed to make the link go Up */ 931 eth_igc_set_link_up(dev); 932 933 /* Put the address into the Receive Address Array */ 934 igc_rar_set(hw, hw->mac.addr, 0); 935 936 /* Initialize the hardware */ 937 if (igc_hardware_init(hw)) { 938 PMD_DRV_LOG(ERR, "Unable to initialize the hardware"); 939 return -EIO; 940 } 941 adapter->stopped = 0; 942 943 /* check and configure queue intr-vector mapping */ 944 if (rte_intr_cap_multiple(intr_handle) && 945 dev->data->dev_conf.intr_conf.rxq) { 946 uint32_t intr_vector = dev->data->nb_rx_queues; 947 if (rte_intr_efd_enable(intr_handle, intr_vector)) 948 return -1; 949 } 950 951 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 952 intr_handle->intr_vec = rte_zmalloc("intr_vec", 953 dev->data->nb_rx_queues * sizeof(int), 0); 954 if (intr_handle->intr_vec == NULL) { 955 PMD_DRV_LOG(ERR, 956 "Failed to allocate %d rx_queues intr_vec", 957 dev->data->nb_rx_queues); 958 return -ENOMEM; 959 } 960 } 961 962 /* configure msix for rx interrupt */ 963 igc_configure_msix_intr(dev); 964 965 igc_tx_init(dev); 966 967 /* This can fail when allocating mbufs for descriptor rings */ 968 ret = igc_rx_init(dev); 969 if (ret) { 970 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware"); 971 igc_dev_clear_queues(dev); 972 return ret; 973 } 974 975 igc_clear_hw_cntrs_base_generic(hw); 976 977 /* VLAN Offload Settings */ 978 eth_igc_vlan_offload_set(dev, 979 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 980 ETH_VLAN_EXTEND_MASK); 981 982 /* Setup link speed and duplex */ 983 speeds = &dev->data->dev_conf.link_speeds; 984 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 985 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 986 hw->mac.autoneg = 1; 987 } else { 988 int num_speeds = 0; 989 bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; 990 991 /* Reset */ 992 hw->phy.autoneg_advertised = 0; 993 994 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 995 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 996 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | 997 ETH_LINK_SPEED_FIXED)) { 998 num_speeds = -1; 999 goto error_invalid_config; 1000 } 1001 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1002 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1003 num_speeds++; 1004 } 1005 if (*speeds & ETH_LINK_SPEED_10M) { 1006 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1007 num_speeds++; 1008 } 1009 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1010 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1011 num_speeds++; 1012 } 1013 if (*speeds & ETH_LINK_SPEED_100M) { 1014 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1015 num_speeds++; 1016 } 1017 if (*speeds & ETH_LINK_SPEED_1G) { 1018 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1019 num_speeds++; 1020 } 1021 if (*speeds & ETH_LINK_SPEED_2_5G) { 1022 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; 1023 num_speeds++; 1024 } 1025 if (num_speeds == 0 || (!autoneg && num_speeds > 1)) 1026 goto error_invalid_config; 1027 1028 /* Set/reset the mac.autoneg based on the link speed, 1029 * fixed or not 1030 */ 1031 if (!autoneg) { 1032 hw->mac.autoneg = 0; 1033 hw->mac.forced_speed_duplex = 1034 hw->phy.autoneg_advertised; 1035 } else { 1036 hw->mac.autoneg = 1; 1037 } 1038 } 1039 1040 igc_setup_link(hw); 1041 1042 if (rte_intr_allow_others(intr_handle)) { 1043 /* check if lsc interrupt is enabled */ 1044 if (dev->data->dev_conf.intr_conf.lsc) 1045 igc_lsc_interrupt_setup(dev, 1); 1046 else 1047 igc_lsc_interrupt_setup(dev, 0); 1048 } else { 1049 rte_intr_callback_unregister(intr_handle, 1050 eth_igc_interrupt_handler, 1051 (void *)dev); 1052 if (dev->data->dev_conf.intr_conf.lsc) 1053 PMD_DRV_LOG(INFO, 1054 "LSC won't enable because of no intr multiplex"); 1055 } 1056 1057 /* enable uio/vfio intr/eventfd mapping */ 1058 rte_intr_enable(intr_handle); 1059 1060 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1061 igc_update_queue_stats_handler, dev); 1062 1063 /* check if rxq interrupt is enabled */ 1064 if (dev->data->dev_conf.intr_conf.rxq && 1065 rte_intr_dp_is_en(intr_handle)) 1066 igc_rxq_interrupt_setup(dev); 1067 1068 /* resume enabled intr since hw reset */ 1069 igc_intr_other_enable(dev); 1070 1071 eth_igc_rxtx_control(dev, true); 1072 eth_igc_link_update(dev, 0); 1073 1074 /* configure MAC-loopback mode */ 1075 if (dev->data->dev_conf.lpbk_mode == 1) { 1076 uint32_t reg_val; 1077 1078 reg_val = IGC_READ_REG(hw, IGC_CTRL); 1079 reg_val &= ~IGC_CTRL_SPEED_MASK; 1080 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | 1081 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; 1082 IGC_WRITE_REG(hw, IGC_CTRL, reg_val); 1083 1084 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 1085 } 1086 1087 return 0; 1088 1089 error_invalid_config: 1090 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1091 dev->data->dev_conf.link_speeds, dev->data->port_id); 1092 igc_dev_clear_queues(dev); 1093 return -EINVAL; 1094 } 1095 1096 static int 1097 igc_reset_swfw_lock(struct igc_hw *hw) 1098 { 1099 int ret_val; 1100 1101 /* 1102 * Do mac ops initialization manually here, since we will need 1103 * some function pointers set by this call. 1104 */ 1105 ret_val = igc_init_mac_params(hw); 1106 if (ret_val) 1107 return ret_val; 1108 1109 /* 1110 * SMBI lock should not fail in this early stage. If this is the case, 1111 * it is due to an improper exit of the application. 1112 * So force the release of the faulty lock. 1113 */ 1114 if (igc_get_hw_semaphore_generic(hw) < 0) 1115 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 1116 1117 igc_put_hw_semaphore_generic(hw); 1118 1119 if (hw->mac.ops.acquire_swfw_sync != NULL) { 1120 uint16_t mask; 1121 1122 /* 1123 * Phy lock should not fail in this early stage. 1124 * If this is the case, it is due to an improper exit of the 1125 * application. So force the release of the faulty lock. 1126 */ 1127 mask = IGC_SWFW_PHY0_SM; 1128 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 1129 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 1130 hw->bus.func); 1131 } 1132 hw->mac.ops.release_swfw_sync(hw, mask); 1133 1134 /* 1135 * This one is more tricky since it is common to all ports; but 1136 * swfw_sync retries last long enough (1s) to be almost sure 1137 * that if lock can not be taken it is due to an improper lock 1138 * of the semaphore. 1139 */ 1140 mask = IGC_SWFW_EEP_SM; 1141 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) 1142 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1143 1144 hw->mac.ops.release_swfw_sync(hw, mask); 1145 } 1146 1147 return IGC_SUCCESS; 1148 } 1149 1150 /* 1151 * free all rx/tx queues. 1152 */ 1153 static void 1154 igc_dev_free_queues(struct rte_eth_dev *dev) 1155 { 1156 uint16_t i; 1157 1158 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1159 eth_igc_rx_queue_release(dev->data->rx_queues[i]); 1160 dev->data->rx_queues[i] = NULL; 1161 } 1162 dev->data->nb_rx_queues = 0; 1163 1164 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1165 eth_igc_tx_queue_release(dev->data->tx_queues[i]); 1166 dev->data->tx_queues[i] = NULL; 1167 } 1168 dev->data->nb_tx_queues = 0; 1169 } 1170 1171 static int 1172 eth_igc_close(struct rte_eth_dev *dev) 1173 { 1174 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1175 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1176 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1177 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 1178 int retry = 0; 1179 int ret = 0; 1180 1181 PMD_INIT_FUNC_TRACE(); 1182 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1183 return 0; 1184 1185 if (!adapter->stopped) 1186 ret = eth_igc_stop(dev); 1187 1188 igc_flow_flush(dev, NULL); 1189 igc_clear_all_filter(dev); 1190 1191 igc_intr_other_disable(dev); 1192 do { 1193 int ret = rte_intr_callback_unregister(intr_handle, 1194 eth_igc_interrupt_handler, dev); 1195 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL) 1196 break; 1197 1198 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret); 1199 DELAY(200 * 1000); /* delay 200ms */ 1200 } while (retry++ < 5); 1201 1202 igc_phy_hw_reset(hw); 1203 igc_hw_control_release(hw); 1204 igc_dev_free_queues(dev); 1205 1206 /* Reset any pending lock */ 1207 igc_reset_swfw_lock(hw); 1208 1209 return ret; 1210 } 1211 1212 static void 1213 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 1214 { 1215 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1216 1217 hw->vendor_id = pci_dev->id.vendor_id; 1218 hw->device_id = pci_dev->id.device_id; 1219 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1220 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 1221 } 1222 1223 static int 1224 eth_igc_dev_init(struct rte_eth_dev *dev) 1225 { 1226 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1227 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1228 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1229 int i, error = 0; 1230 1231 PMD_INIT_FUNC_TRACE(); 1232 dev->dev_ops = ð_igc_ops; 1233 dev->rx_descriptor_done = eth_igc_rx_descriptor_done; 1234 dev->rx_queue_count = eth_igc_rx_queue_count; 1235 dev->rx_descriptor_status = eth_igc_rx_descriptor_status; 1236 dev->tx_descriptor_status = eth_igc_tx_descriptor_status; 1237 1238 /* 1239 * for secondary processes, we don't initialize any further as primary 1240 * has already done this work. Only check we don't need a different 1241 * RX function. 1242 */ 1243 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1244 return 0; 1245 1246 rte_eth_copy_pci_info(dev, pci_dev); 1247 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1248 1249 hw->back = pci_dev; 1250 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1251 1252 igc_identify_hardware(dev, pci_dev); 1253 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) { 1254 error = -EIO; 1255 goto err_late; 1256 } 1257 1258 igc_get_bus_info(hw); 1259 1260 /* Reset any pending lock */ 1261 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) { 1262 error = -EIO; 1263 goto err_late; 1264 } 1265 1266 /* Finish initialization */ 1267 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) { 1268 error = -EIO; 1269 goto err_late; 1270 } 1271 1272 hw->mac.autoneg = 1; 1273 hw->phy.autoneg_wait_to_complete = 0; 1274 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 1275 1276 /* Copper options */ 1277 if (hw->phy.media_type == igc_media_type_copper) { 1278 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 1279 hw->phy.disable_polarity_correction = 0; 1280 hw->phy.ms_type = igc_ms_hw_default; 1281 } 1282 1283 /* 1284 * Start from a known state, this is important in reading the nvm 1285 * and mac from that. 1286 */ 1287 igc_reset_hw(hw); 1288 1289 /* Make sure we have a good EEPROM before we read from it */ 1290 if (igc_validate_nvm_checksum(hw) < 0) { 1291 /* 1292 * Some PCI-E parts fail the first check due to 1293 * the link being in sleep state, call it again, 1294 * if it fails a second time its a real issue. 1295 */ 1296 if (igc_validate_nvm_checksum(hw) < 0) { 1297 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 1298 error = -EIO; 1299 goto err_late; 1300 } 1301 } 1302 1303 /* Read the permanent MAC address out of the EEPROM */ 1304 if (igc_read_mac_addr(hw) != 0) { 1305 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 1306 error = -EIO; 1307 goto err_late; 1308 } 1309 1310 /* Allocate memory for storing MAC addresses */ 1311 dev->data->mac_addrs = rte_zmalloc("igc", 1312 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 1313 if (dev->data->mac_addrs == NULL) { 1314 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC", 1315 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1316 error = -ENOMEM; 1317 goto err_late; 1318 } 1319 1320 /* Copy the permanent MAC address */ 1321 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 1322 &dev->data->mac_addrs[0]); 1323 1324 /* Now initialize the hardware */ 1325 if (igc_hardware_init(hw) != 0) { 1326 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 1327 rte_free(dev->data->mac_addrs); 1328 dev->data->mac_addrs = NULL; 1329 error = -ENODEV; 1330 goto err_late; 1331 } 1332 1333 hw->mac.get_link_status = 1; 1334 igc->stopped = 0; 1335 1336 /* Indicate SOL/IDER usage */ 1337 if (igc_check_reset_block(hw) < 0) 1338 PMD_INIT_LOG(ERR, 1339 "PHY reset is blocked due to SOL/IDER session."); 1340 1341 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 1342 dev->data->port_id, pci_dev->id.vendor_id, 1343 pci_dev->id.device_id); 1344 1345 rte_intr_callback_register(&pci_dev->intr_handle, 1346 eth_igc_interrupt_handler, (void *)dev); 1347 1348 /* enable uio/vfio intr/eventfd mapping */ 1349 rte_intr_enable(&pci_dev->intr_handle); 1350 1351 /* enable support intr */ 1352 igc_intr_other_enable(dev); 1353 1354 /* initiate queue status */ 1355 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1356 igc->txq_stats_map[i] = -1; 1357 igc->rxq_stats_map[i] = -1; 1358 } 1359 1360 igc_flow_init(dev); 1361 igc_clear_all_filter(dev); 1362 return 0; 1363 1364 err_late: 1365 igc_hw_control_release(hw); 1366 return error; 1367 } 1368 1369 static int 1370 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) 1371 { 1372 PMD_INIT_FUNC_TRACE(); 1373 eth_igc_close(eth_dev); 1374 return 0; 1375 } 1376 1377 static int 1378 eth_igc_reset(struct rte_eth_dev *dev) 1379 { 1380 int ret; 1381 1382 PMD_INIT_FUNC_TRACE(); 1383 1384 ret = eth_igc_dev_uninit(dev); 1385 if (ret) 1386 return ret; 1387 1388 return eth_igc_dev_init(dev); 1389 } 1390 1391 static int 1392 eth_igc_promiscuous_enable(struct rte_eth_dev *dev) 1393 { 1394 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1395 uint32_t rctl; 1396 1397 rctl = IGC_READ_REG(hw, IGC_RCTL); 1398 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1399 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1400 return 0; 1401 } 1402 1403 static int 1404 eth_igc_promiscuous_disable(struct rte_eth_dev *dev) 1405 { 1406 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1407 uint32_t rctl; 1408 1409 rctl = IGC_READ_REG(hw, IGC_RCTL); 1410 rctl &= (~IGC_RCTL_UPE); 1411 if (dev->data->all_multicast == 1) 1412 rctl |= IGC_RCTL_MPE; 1413 else 1414 rctl &= (~IGC_RCTL_MPE); 1415 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1416 return 0; 1417 } 1418 1419 static int 1420 eth_igc_allmulticast_enable(struct rte_eth_dev *dev) 1421 { 1422 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1423 uint32_t rctl; 1424 1425 rctl = IGC_READ_REG(hw, IGC_RCTL); 1426 rctl |= IGC_RCTL_MPE; 1427 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1428 return 0; 1429 } 1430 1431 static int 1432 eth_igc_allmulticast_disable(struct rte_eth_dev *dev) 1433 { 1434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1435 uint32_t rctl; 1436 1437 if (dev->data->promiscuous == 1) 1438 return 0; /* must remain in all_multicast mode */ 1439 1440 rctl = IGC_READ_REG(hw, IGC_RCTL); 1441 rctl &= (~IGC_RCTL_MPE); 1442 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1443 return 0; 1444 } 1445 1446 static int 1447 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 1448 size_t fw_size) 1449 { 1450 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1451 struct igc_fw_version fw; 1452 int ret; 1453 1454 igc_get_fw_version(hw, &fw); 1455 1456 /* if option rom is valid, display its version too */ 1457 if (fw.or_valid) { 1458 ret = snprintf(fw_version, fw_size, 1459 "%d.%d, 0x%08x, %d.%d.%d", 1460 fw.eep_major, fw.eep_minor, fw.etrack_id, 1461 fw.or_major, fw.or_build, fw.or_patch); 1462 /* no option rom */ 1463 } else { 1464 if (fw.etrack_id != 0X0000) { 1465 ret = snprintf(fw_version, fw_size, 1466 "%d.%d, 0x%08x", 1467 fw.eep_major, fw.eep_minor, 1468 fw.etrack_id); 1469 } else { 1470 ret = snprintf(fw_version, fw_size, 1471 "%d.%d.%d", 1472 fw.eep_major, fw.eep_minor, 1473 fw.eep_build); 1474 } 1475 } 1476 1477 ret += 1; /* add the size of '\0' */ 1478 if (fw_size < (u32)ret) 1479 return ret; 1480 else 1481 return 0; 1482 } 1483 1484 static int 1485 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1486 { 1487 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1488 1489 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1490 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE; 1491 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1492 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; 1493 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; 1494 dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1495 1496 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; 1497 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; 1498 dev_info->max_vmdq_pools = 0; 1499 1500 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t); 1501 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 1502 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL; 1503 1504 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1505 .rx_thresh = { 1506 .pthresh = IGC_DEFAULT_RX_PTHRESH, 1507 .hthresh = IGC_DEFAULT_RX_HTHRESH, 1508 .wthresh = IGC_DEFAULT_RX_WTHRESH, 1509 }, 1510 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH, 1511 .rx_drop_en = 0, 1512 .offloads = 0, 1513 }; 1514 1515 dev_info->default_txconf = (struct rte_eth_txconf) { 1516 .tx_thresh = { 1517 .pthresh = IGC_DEFAULT_TX_PTHRESH, 1518 .hthresh = IGC_DEFAULT_TX_HTHRESH, 1519 .wthresh = IGC_DEFAULT_TX_WTHRESH, 1520 }, 1521 .offloads = 0, 1522 }; 1523 1524 dev_info->rx_desc_lim = rx_desc_lim; 1525 dev_info->tx_desc_lim = tx_desc_lim; 1526 1527 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1528 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1529 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G; 1530 1531 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD; 1532 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1533 return 0; 1534 } 1535 1536 static int 1537 eth_igc_led_on(struct rte_eth_dev *dev) 1538 { 1539 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1540 1541 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1542 } 1543 1544 static int 1545 eth_igc_led_off(struct rte_eth_dev *dev) 1546 { 1547 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1548 1549 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1550 } 1551 1552 static const uint32_t * 1553 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev) 1554 { 1555 static const uint32_t ptypes[] = { 1556 /* refers to rx_desc_pkt_info_to_pkt_type() */ 1557 RTE_PTYPE_L2_ETHER, 1558 RTE_PTYPE_L3_IPV4, 1559 RTE_PTYPE_L3_IPV4_EXT, 1560 RTE_PTYPE_L3_IPV6, 1561 RTE_PTYPE_L3_IPV6_EXT, 1562 RTE_PTYPE_L4_TCP, 1563 RTE_PTYPE_L4_UDP, 1564 RTE_PTYPE_L4_SCTP, 1565 RTE_PTYPE_TUNNEL_IP, 1566 RTE_PTYPE_INNER_L3_IPV6, 1567 RTE_PTYPE_INNER_L3_IPV6_EXT, 1568 RTE_PTYPE_INNER_L4_TCP, 1569 RTE_PTYPE_INNER_L4_UDP, 1570 RTE_PTYPE_UNKNOWN 1571 }; 1572 1573 return ptypes; 1574 } 1575 1576 static int 1577 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1578 { 1579 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1580 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD; 1581 uint32_t rctl; 1582 1583 /* if extend vlan has been enabled */ 1584 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN) 1585 frame_size += VLAN_TAG_SIZE; 1586 1587 /* check that mtu is within the allowed range */ 1588 if (mtu < RTE_ETHER_MIN_MTU || 1589 frame_size > MAX_RX_JUMBO_FRAME_SIZE) 1590 return -EINVAL; 1591 1592 /* 1593 * If device is started, refuse mtu that requires the support of 1594 * scattered packets when this feature has not been enabled before. 1595 */ 1596 if (dev->data->dev_started && !dev->data->scattered_rx && 1597 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1598 PMD_INIT_LOG(ERR, "Stop port first."); 1599 return -EINVAL; 1600 } 1601 1602 rctl = IGC_READ_REG(hw, IGC_RCTL); 1603 1604 /* switch to jumbo mode if needed */ 1605 if (mtu > RTE_ETHER_MTU) { 1606 dev->data->dev_conf.rxmode.offloads |= 1607 DEV_RX_OFFLOAD_JUMBO_FRAME; 1608 rctl |= IGC_RCTL_LPE; 1609 } else { 1610 dev->data->dev_conf.rxmode.offloads &= 1611 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1612 rctl &= ~IGC_RCTL_LPE; 1613 } 1614 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1615 1616 /* update max frame size */ 1617 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1618 1619 IGC_WRITE_REG(hw, IGC_RLPML, 1620 dev->data->dev_conf.rxmode.max_rx_pkt_len); 1621 1622 return 0; 1623 } 1624 1625 static int 1626 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1627 uint32_t index, uint32_t pool) 1628 { 1629 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1630 1631 igc_rar_set(hw, mac_addr->addr_bytes, index); 1632 RTE_SET_USED(pool); 1633 return 0; 1634 } 1635 1636 static void 1637 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1638 { 1639 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1640 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1641 1642 memset(addr, 0, sizeof(addr)); 1643 igc_rar_set(hw, addr, index); 1644 } 1645 1646 static int 1647 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 1648 struct rte_ether_addr *addr) 1649 { 1650 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1651 igc_rar_set(hw, addr->addr_bytes, 0); 1652 return 0; 1653 } 1654 1655 static int 1656 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 1657 struct rte_ether_addr *mc_addr_set, 1658 uint32_t nb_mc_addr) 1659 { 1660 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1661 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1662 return 0; 1663 } 1664 1665 /* 1666 * Read hardware registers 1667 */ 1668 static void 1669 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats) 1670 { 1671 int pause_frames; 1672 1673 uint64_t old_gprc = stats->gprc; 1674 uint64_t old_gptc = stats->gptc; 1675 uint64_t old_tpr = stats->tpr; 1676 uint64_t old_tpt = stats->tpt; 1677 uint64_t old_rpthc = stats->rpthc; 1678 uint64_t old_hgptc = stats->hgptc; 1679 1680 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS); 1681 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC); 1682 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC); 1683 stats->mpc += IGC_READ_REG(hw, IGC_MPC); 1684 stats->scc += IGC_READ_REG(hw, IGC_SCC); 1685 stats->ecol += IGC_READ_REG(hw, IGC_ECOL); 1686 1687 stats->mcc += IGC_READ_REG(hw, IGC_MCC); 1688 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL); 1689 stats->colc += IGC_READ_REG(hw, IGC_COLC); 1690 1691 stats->dc += IGC_READ_REG(hw, IGC_DC); 1692 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS); 1693 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC); 1694 stats->rlec += IGC_READ_REG(hw, IGC_RLEC); 1695 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC); 1696 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC); 1697 1698 /* 1699 * For watchdog management we need to know if we have been 1700 * paused during the last interval, so capture that here. 1701 */ 1702 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC); 1703 stats->xoffrxc += pause_frames; 1704 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC); 1705 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC); 1706 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64); 1707 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127); 1708 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255); 1709 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511); 1710 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023); 1711 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522); 1712 stats->gprc += IGC_READ_REG(hw, IGC_GPRC); 1713 stats->bprc += IGC_READ_REG(hw, IGC_BPRC); 1714 stats->mprc += IGC_READ_REG(hw, IGC_MPRC); 1715 stats->gptc += IGC_READ_REG(hw, IGC_GPTC); 1716 1717 /* For the 64-bit byte counters the low dword must be read first. */ 1718 /* Both registers clear on the read of the high dword */ 1719 1720 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1721 stats->gorc += IGC_READ_REG(hw, IGC_GORCL); 1722 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32); 1723 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1724 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL); 1725 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32); 1726 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1727 1728 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC); 1729 stats->ruc += IGC_READ_REG(hw, IGC_RUC); 1730 stats->rfc += IGC_READ_REG(hw, IGC_RFC); 1731 stats->roc += IGC_READ_REG(hw, IGC_ROC); 1732 stats->rjc += IGC_READ_REG(hw, IGC_RJC); 1733 1734 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC); 1735 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC); 1736 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC); 1737 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC); 1738 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC); 1739 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC); 1740 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC); 1741 1742 stats->tpr += IGC_READ_REG(hw, IGC_TPR); 1743 stats->tpt += IGC_READ_REG(hw, IGC_TPT); 1744 1745 stats->tor += IGC_READ_REG(hw, IGC_TORL); 1746 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32); 1747 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1748 stats->tot += IGC_READ_REG(hw, IGC_TOTL); 1749 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32); 1750 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1751 1752 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64); 1753 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127); 1754 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255); 1755 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511); 1756 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023); 1757 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522); 1758 stats->mptc += IGC_READ_REG(hw, IGC_MPTC); 1759 stats->bptc += IGC_READ_REG(hw, IGC_BPTC); 1760 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC); 1761 1762 stats->iac += IGC_READ_REG(hw, IGC_IAC); 1763 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC); 1764 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC); 1765 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC); 1766 1767 /* Host to Card Statistics */ 1768 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL); 1769 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32); 1770 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1771 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL); 1772 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32); 1773 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1774 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS); 1775 } 1776 1777 /* 1778 * Write 0 to all queue status registers 1779 */ 1780 static void 1781 igc_reset_queue_stats_register(struct igc_hw *hw) 1782 { 1783 int i; 1784 1785 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1786 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0); 1787 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0); 1788 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0); 1789 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0); 1790 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0); 1791 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0); 1792 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0); 1793 } 1794 } 1795 1796 /* 1797 * Read all hardware queue status registers 1798 */ 1799 static void 1800 igc_read_queue_stats_register(struct rte_eth_dev *dev) 1801 { 1802 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1803 struct igc_hw_queue_stats *queue_stats = 1804 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1805 int i; 1806 1807 /* 1808 * This register is not cleared on read. Furthermore, the register wraps 1809 * around back to 0x00000000 on the next increment when reaching a value 1810 * of 0xFFFFFFFF and then continues normal count operation. 1811 */ 1812 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1813 union { 1814 u64 ddword; 1815 u32 dword[2]; 1816 } value; 1817 u32 tmp; 1818 1819 /* 1820 * Read the register first, if the value is smaller than that 1821 * previous read, that mean the register has been overflowed, 1822 * then we add the high 4 bytes by 1 and replace the low 4 1823 * bytes by the new value. 1824 */ 1825 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i)); 1826 value.ddword = queue_stats->pqgprc[i]; 1827 if (value.dword[U32_0_IN_U64] > tmp) 1828 value.dword[U32_1_IN_U64]++; 1829 value.dword[U32_0_IN_U64] = tmp; 1830 queue_stats->pqgprc[i] = value.ddword; 1831 1832 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i)); 1833 value.ddword = queue_stats->pqgptc[i]; 1834 if (value.dword[U32_0_IN_U64] > tmp) 1835 value.dword[U32_1_IN_U64]++; 1836 value.dword[U32_0_IN_U64] = tmp; 1837 queue_stats->pqgptc[i] = value.ddword; 1838 1839 tmp = IGC_READ_REG(hw, IGC_PQGORC(i)); 1840 value.ddword = queue_stats->pqgorc[i]; 1841 if (value.dword[U32_0_IN_U64] > tmp) 1842 value.dword[U32_1_IN_U64]++; 1843 value.dword[U32_0_IN_U64] = tmp; 1844 queue_stats->pqgorc[i] = value.ddword; 1845 1846 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i)); 1847 value.ddword = queue_stats->pqgotc[i]; 1848 if (value.dword[U32_0_IN_U64] > tmp) 1849 value.dword[U32_1_IN_U64]++; 1850 value.dword[U32_0_IN_U64] = tmp; 1851 queue_stats->pqgotc[i] = value.ddword; 1852 1853 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i)); 1854 value.ddword = queue_stats->pqmprc[i]; 1855 if (value.dword[U32_0_IN_U64] > tmp) 1856 value.dword[U32_1_IN_U64]++; 1857 value.dword[U32_0_IN_U64] = tmp; 1858 queue_stats->pqmprc[i] = value.ddword; 1859 1860 tmp = IGC_READ_REG(hw, IGC_RQDPC(i)); 1861 value.ddword = queue_stats->rqdpc[i]; 1862 if (value.dword[U32_0_IN_U64] > tmp) 1863 value.dword[U32_1_IN_U64]++; 1864 value.dword[U32_0_IN_U64] = tmp; 1865 queue_stats->rqdpc[i] = value.ddword; 1866 1867 tmp = IGC_READ_REG(hw, IGC_TQDPC(i)); 1868 value.ddword = queue_stats->tqdpc[i]; 1869 if (value.dword[U32_0_IN_U64] > tmp) 1870 value.dword[U32_1_IN_U64]++; 1871 value.dword[U32_0_IN_U64] = tmp; 1872 queue_stats->tqdpc[i] = value.ddword; 1873 } 1874 } 1875 1876 static int 1877 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1878 { 1879 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1880 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1881 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev); 1882 struct igc_hw_queue_stats *queue_stats = 1883 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1884 int i; 1885 1886 /* 1887 * Cancel status handler since it will read the queue status registers 1888 */ 1889 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1890 1891 /* Read status register */ 1892 igc_read_queue_stats_register(dev); 1893 igc_read_stats_registers(hw, stats); 1894 1895 if (rte_stats == NULL) { 1896 /* Restart queue status handler */ 1897 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1898 igc_update_queue_stats_handler, dev); 1899 return -EINVAL; 1900 } 1901 1902 /* Rx Errors */ 1903 rte_stats->imissed = stats->mpc; 1904 rte_stats->ierrors = stats->crcerrs + 1905 stats->rlec + stats->ruc + stats->roc + 1906 stats->rxerrc + stats->algnerrc; 1907 1908 /* Tx Errors */ 1909 rte_stats->oerrors = stats->ecol + stats->latecol; 1910 1911 rte_stats->ipackets = stats->gprc; 1912 rte_stats->opackets = stats->gptc; 1913 rte_stats->ibytes = stats->gorc; 1914 rte_stats->obytes = stats->gotc; 1915 1916 /* Get per-queue statuses */ 1917 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1918 /* GET TX queue statuses */ 1919 int map_id = igc->txq_stats_map[i]; 1920 if (map_id >= 0) { 1921 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i]; 1922 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i]; 1923 } 1924 /* Get RX queue statuses */ 1925 map_id = igc->rxq_stats_map[i]; 1926 if (map_id >= 0) { 1927 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i]; 1928 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i]; 1929 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i]; 1930 } 1931 } 1932 1933 /* Restart queue status handler */ 1934 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1935 igc_update_queue_stats_handler, dev); 1936 return 0; 1937 } 1938 1939 static int 1940 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1941 unsigned int n) 1942 { 1943 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1944 struct igc_hw_stats *hw_stats = 1945 IGC_DEV_PRIVATE_STATS(dev); 1946 unsigned int i; 1947 1948 igc_read_stats_registers(hw, hw_stats); 1949 1950 if (n < IGC_NB_XSTATS) 1951 return IGC_NB_XSTATS; 1952 1953 /* If this is a reset xstats is NULL, and we have cleared the 1954 * registers by reading them. 1955 */ 1956 if (!xstats) 1957 return 0; 1958 1959 /* Extended stats */ 1960 for (i = 0; i < IGC_NB_XSTATS; i++) { 1961 xstats[i].id = i; 1962 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1963 rte_igc_stats_strings[i].offset); 1964 } 1965 1966 return IGC_NB_XSTATS; 1967 } 1968 1969 static int 1970 eth_igc_xstats_reset(struct rte_eth_dev *dev) 1971 { 1972 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1973 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 1974 struct igc_hw_queue_stats *queue_stats = 1975 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1976 1977 /* Cancel queue status handler for avoid conflict */ 1978 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1979 1980 /* HW registers are cleared on read */ 1981 igc_reset_queue_stats_register(hw); 1982 igc_read_stats_registers(hw, hw_stats); 1983 1984 /* Reset software totals */ 1985 memset(hw_stats, 0, sizeof(*hw_stats)); 1986 memset(queue_stats, 0, sizeof(*queue_stats)); 1987 1988 /* Restart the queue status handler */ 1989 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler, 1990 dev); 1991 1992 return 0; 1993 } 1994 1995 static int 1996 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1997 struct rte_eth_xstat_name *xstats_names, unsigned int size) 1998 { 1999 unsigned int i; 2000 2001 if (xstats_names == NULL) 2002 return IGC_NB_XSTATS; 2003 2004 if (size < IGC_NB_XSTATS) { 2005 PMD_DRV_LOG(ERR, "not enough buffers!"); 2006 return IGC_NB_XSTATS; 2007 } 2008 2009 for (i = 0; i < IGC_NB_XSTATS; i++) 2010 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name, 2011 sizeof(xstats_names[i].name)); 2012 2013 return IGC_NB_XSTATS; 2014 } 2015 2016 static int 2017 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 2018 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 2019 unsigned int limit) 2020 { 2021 unsigned int i; 2022 2023 if (!ids) 2024 return eth_igc_xstats_get_names(dev, xstats_names, limit); 2025 2026 for (i = 0; i < limit; i++) { 2027 if (ids[i] >= IGC_NB_XSTATS) { 2028 PMD_DRV_LOG(ERR, "id value isn't valid"); 2029 return -EINVAL; 2030 } 2031 strlcpy(xstats_names[i].name, 2032 rte_igc_stats_strings[ids[i]].name, 2033 sizeof(xstats_names[i].name)); 2034 } 2035 return limit; 2036 } 2037 2038 static int 2039 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2040 uint64_t *values, unsigned int n) 2041 { 2042 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2043 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 2044 unsigned int i; 2045 2046 igc_read_stats_registers(hw, hw_stats); 2047 2048 if (!ids) { 2049 if (n < IGC_NB_XSTATS) 2050 return IGC_NB_XSTATS; 2051 2052 /* If this is a reset xstats is NULL, and we have cleared the 2053 * registers by reading them. 2054 */ 2055 if (!values) 2056 return 0; 2057 2058 /* Extended stats */ 2059 for (i = 0; i < IGC_NB_XSTATS; i++) 2060 values[i] = *(uint64_t *)(((char *)hw_stats) + 2061 rte_igc_stats_strings[i].offset); 2062 2063 return IGC_NB_XSTATS; 2064 2065 } else { 2066 for (i = 0; i < n; i++) { 2067 if (ids[i] >= IGC_NB_XSTATS) { 2068 PMD_DRV_LOG(ERR, "id value isn't valid"); 2069 return -EINVAL; 2070 } 2071 values[i] = *(uint64_t *)(((char *)hw_stats) + 2072 rte_igc_stats_strings[ids[i]].offset); 2073 } 2074 return n; 2075 } 2076 } 2077 2078 static int 2079 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 2080 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) 2081 { 2082 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 2083 2084 /* check queue id is valid */ 2085 if (queue_id >= IGC_QUEUE_PAIRS_NUM) { 2086 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u", 2087 queue_id, IGC_QUEUE_PAIRS_NUM - 1); 2088 return -EINVAL; 2089 } 2090 2091 /* store the mapping status id */ 2092 if (is_rx) 2093 igc->rxq_stats_map[queue_id] = stat_idx; 2094 else 2095 igc->txq_stats_map[queue_id] = stat_idx; 2096 2097 return 0; 2098 } 2099 2100 static int 2101 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 2102 { 2103 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2104 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2105 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2106 uint32_t vec = IGC_MISC_VEC_ID; 2107 2108 if (rte_intr_allow_others(intr_handle)) 2109 vec = IGC_RX_VEC_START; 2110 2111 uint32_t mask = 1u << (queue_id + vec); 2112 2113 IGC_WRITE_REG(hw, IGC_EIMC, mask); 2114 IGC_WRITE_FLUSH(hw); 2115 2116 return 0; 2117 } 2118 2119 static int 2120 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 2121 { 2122 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2123 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2124 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2125 uint32_t vec = IGC_MISC_VEC_ID; 2126 2127 if (rte_intr_allow_others(intr_handle)) 2128 vec = IGC_RX_VEC_START; 2129 2130 uint32_t mask = 1u << (queue_id + vec); 2131 2132 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2133 IGC_WRITE_FLUSH(hw); 2134 2135 rte_intr_enable(intr_handle); 2136 2137 return 0; 2138 } 2139 2140 static int 2141 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2142 { 2143 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2144 uint32_t ctrl; 2145 int tx_pause; 2146 int rx_pause; 2147 2148 fc_conf->pause_time = hw->fc.pause_time; 2149 fc_conf->high_water = hw->fc.high_water; 2150 fc_conf->low_water = hw->fc.low_water; 2151 fc_conf->send_xon = hw->fc.send_xon; 2152 fc_conf->autoneg = hw->mac.autoneg; 2153 2154 /* 2155 * Return rx_pause and tx_pause status according to actual setting of 2156 * the TFCE and RFCE bits in the CTRL register. 2157 */ 2158 ctrl = IGC_READ_REG(hw, IGC_CTRL); 2159 if (ctrl & IGC_CTRL_TFCE) 2160 tx_pause = 1; 2161 else 2162 tx_pause = 0; 2163 2164 if (ctrl & IGC_CTRL_RFCE) 2165 rx_pause = 1; 2166 else 2167 rx_pause = 0; 2168 2169 if (rx_pause && tx_pause) 2170 fc_conf->mode = RTE_FC_FULL; 2171 else if (rx_pause) 2172 fc_conf->mode = RTE_FC_RX_PAUSE; 2173 else if (tx_pause) 2174 fc_conf->mode = RTE_FC_TX_PAUSE; 2175 else 2176 fc_conf->mode = RTE_FC_NONE; 2177 2178 return 0; 2179 } 2180 2181 static int 2182 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2183 { 2184 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2185 uint32_t rx_buf_size; 2186 uint32_t max_high_water; 2187 uint32_t rctl; 2188 int err; 2189 2190 if (fc_conf->autoneg != hw->mac.autoneg) 2191 return -ENOTSUP; 2192 2193 rx_buf_size = igc_get_rx_buffer_size(hw); 2194 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2195 2196 /* At least reserve one Ethernet frame for watermark */ 2197 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 2198 if (fc_conf->high_water > max_high_water || 2199 fc_conf->high_water < fc_conf->low_water) { 2200 PMD_DRV_LOG(ERR, 2201 "Incorrect high(%u)/low(%u) water value, max is %u", 2202 fc_conf->high_water, fc_conf->low_water, 2203 max_high_water); 2204 return -EINVAL; 2205 } 2206 2207 switch (fc_conf->mode) { 2208 case RTE_FC_NONE: 2209 hw->fc.requested_mode = igc_fc_none; 2210 break; 2211 case RTE_FC_RX_PAUSE: 2212 hw->fc.requested_mode = igc_fc_rx_pause; 2213 break; 2214 case RTE_FC_TX_PAUSE: 2215 hw->fc.requested_mode = igc_fc_tx_pause; 2216 break; 2217 case RTE_FC_FULL: 2218 hw->fc.requested_mode = igc_fc_full; 2219 break; 2220 default: 2221 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode); 2222 return -EINVAL; 2223 } 2224 2225 hw->fc.pause_time = fc_conf->pause_time; 2226 hw->fc.high_water = fc_conf->high_water; 2227 hw->fc.low_water = fc_conf->low_water; 2228 hw->fc.send_xon = fc_conf->send_xon; 2229 2230 err = igc_setup_link_generic(hw); 2231 if (err == IGC_SUCCESS) { 2232 /** 2233 * check if we want to forward MAC frames - driver doesn't have 2234 * native capability to do that, so we'll write the registers 2235 * ourselves 2236 **/ 2237 rctl = IGC_READ_REG(hw, IGC_RCTL); 2238 2239 /* set or clear MFLCN.PMCF bit depending on configuration */ 2240 if (fc_conf->mac_ctrl_frame_fwd != 0) 2241 rctl |= IGC_RCTL_PMCF; 2242 else 2243 rctl &= ~IGC_RCTL_PMCF; 2244 2245 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2246 IGC_WRITE_FLUSH(hw); 2247 2248 return 0; 2249 } 2250 2251 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err); 2252 return -EIO; 2253 } 2254 2255 static int 2256 eth_igc_rss_reta_update(struct rte_eth_dev *dev, 2257 struct rte_eth_rss_reta_entry64 *reta_conf, 2258 uint16_t reta_size) 2259 { 2260 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2261 uint16_t i; 2262 2263 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2264 PMD_DRV_LOG(ERR, 2265 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2266 reta_size, ETH_RSS_RETA_SIZE_128); 2267 return -EINVAL; 2268 } 2269 2270 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2271 2272 /* set redirection table */ 2273 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2274 union igc_rss_reta_reg reta, reg; 2275 uint16_t idx, shift; 2276 uint8_t j, mask; 2277 2278 idx = i / RTE_RETA_GROUP_SIZE; 2279 shift = i % RTE_RETA_GROUP_SIZE; 2280 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2281 IGC_RSS_RDT_REG_SIZE_MASK); 2282 2283 /* if no need to update the register */ 2284 if (!mask || 2285 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2286 continue; 2287 2288 /* check mask whether need to read the register value first */ 2289 if (mask == IGC_RSS_RDT_REG_SIZE_MASK) 2290 reg.dword = 0; 2291 else 2292 reg.dword = IGC_READ_REG_LE_VALUE(hw, 2293 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2294 2295 /* update the register */ 2296 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2297 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2298 if (mask & (1u << j)) 2299 reta.bytes[j] = 2300 (uint8_t)reta_conf[idx].reta[shift + j]; 2301 else 2302 reta.bytes[j] = reg.bytes[j]; 2303 } 2304 IGC_WRITE_REG_LE_VALUE(hw, 2305 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword); 2306 } 2307 2308 return 0; 2309 } 2310 2311 static int 2312 eth_igc_rss_reta_query(struct rte_eth_dev *dev, 2313 struct rte_eth_rss_reta_entry64 *reta_conf, 2314 uint16_t reta_size) 2315 { 2316 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2317 uint16_t i; 2318 2319 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2320 PMD_DRV_LOG(ERR, 2321 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2322 reta_size, ETH_RSS_RETA_SIZE_128); 2323 return -EINVAL; 2324 } 2325 2326 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2327 2328 /* read redirection table */ 2329 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2330 union igc_rss_reta_reg reta; 2331 uint16_t idx, shift; 2332 uint8_t j, mask; 2333 2334 idx = i / RTE_RETA_GROUP_SIZE; 2335 shift = i % RTE_RETA_GROUP_SIZE; 2336 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2337 IGC_RSS_RDT_REG_SIZE_MASK); 2338 2339 /* if no need to read register */ 2340 if (!mask || 2341 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2342 continue; 2343 2344 /* read register and get the queue index */ 2345 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2346 reta.dword = IGC_READ_REG_LE_VALUE(hw, 2347 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2348 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2349 if (mask & (1u << j)) 2350 reta_conf[idx].reta[shift + j] = reta.bytes[j]; 2351 } 2352 } 2353 2354 return 0; 2355 } 2356 2357 static int 2358 eth_igc_rss_hash_update(struct rte_eth_dev *dev, 2359 struct rte_eth_rss_conf *rss_conf) 2360 { 2361 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2362 igc_hw_rss_hash_set(hw, rss_conf); 2363 return 0; 2364 } 2365 2366 static int 2367 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 2368 struct rte_eth_rss_conf *rss_conf) 2369 { 2370 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2371 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; 2372 uint32_t mrqc; 2373 uint64_t rss_hf; 2374 2375 if (hash_key != NULL) { 2376 int i; 2377 2378 /* if not enough space for store hash key */ 2379 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) { 2380 PMD_DRV_LOG(ERR, 2381 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u", 2382 rss_conf->rss_key_len, IGC_HKEY_SIZE); 2383 return -EINVAL; 2384 } 2385 2386 /* read RSS key from register */ 2387 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) 2388 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i)); 2389 } 2390 2391 /* get RSS functions configured in MRQC register */ 2392 mrqc = IGC_READ_REG(hw, IGC_MRQC); 2393 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) 2394 return 0; 2395 2396 rss_hf = 0; 2397 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4) 2398 rss_hf |= ETH_RSS_IPV4; 2399 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP) 2400 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2401 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6) 2402 rss_hf |= ETH_RSS_IPV6; 2403 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX) 2404 rss_hf |= ETH_RSS_IPV6_EX; 2405 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP) 2406 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2407 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX) 2408 rss_hf |= ETH_RSS_IPV6_TCP_EX; 2409 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP) 2410 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2411 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP) 2412 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2413 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX) 2414 rss_hf |= ETH_RSS_IPV6_UDP_EX; 2415 2416 rss_conf->rss_hf |= rss_hf; 2417 return 0; 2418 } 2419 2420 static int 2421 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2422 { 2423 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2424 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2425 uint32_t vfta; 2426 uint32_t vid_idx; 2427 uint32_t vid_bit; 2428 2429 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; 2430 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); 2431 vfta = shadow_vfta->vfta[vid_idx]; 2432 if (on) 2433 vfta |= vid_bit; 2434 else 2435 vfta &= ~vid_bit; 2436 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); 2437 2438 /* update local VFTA copy */ 2439 shadow_vfta->vfta[vid_idx] = vfta; 2440 2441 return 0; 2442 } 2443 2444 static void 2445 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2446 { 2447 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2448 igc_read_reg_check_clear_bits(hw, IGC_RCTL, 2449 IGC_RCTL_CFIEN | IGC_RCTL_VFE); 2450 } 2451 2452 static void 2453 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2454 { 2455 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2456 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2457 uint32_t reg_val; 2458 int i; 2459 2460 /* Filter Table Enable, CFI not used for packet acceptance */ 2461 reg_val = IGC_READ_REG(hw, IGC_RCTL); 2462 reg_val &= ~IGC_RCTL_CFIEN; 2463 reg_val |= IGC_RCTL_VFE; 2464 IGC_WRITE_REG(hw, IGC_RCTL, reg_val); 2465 2466 /* restore VFTA table */ 2467 for (i = 0; i < IGC_VFTA_SIZE; i++) 2468 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); 2469 } 2470 2471 static void 2472 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2473 { 2474 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2475 2476 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2477 } 2478 2479 static void 2480 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2481 { 2482 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2483 2484 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2485 } 2486 2487 static int 2488 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2489 { 2490 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2491 uint32_t ctrl_ext; 2492 2493 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2494 2495 /* if extend vlan hasn't been enabled */ 2496 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) 2497 return 0; 2498 2499 if ((dev->data->dev_conf.rxmode.offloads & 2500 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) 2501 goto write_ext_vlan; 2502 2503 /* Update maximum packet length */ 2504 if (dev->data->dev_conf.rxmode.max_rx_pkt_len < 2505 RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { 2506 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", 2507 dev->data->dev_conf.rxmode.max_rx_pkt_len, 2508 VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); 2509 return -EINVAL; 2510 } 2511 dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE; 2512 IGC_WRITE_REG(hw, IGC_RLPML, 2513 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2514 2515 write_ext_vlan: 2516 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); 2517 return 0; 2518 } 2519 2520 static int 2521 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2522 { 2523 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2524 uint32_t ctrl_ext; 2525 2526 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2527 2528 /* if extend vlan has been enabled */ 2529 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) 2530 return 0; 2531 2532 if ((dev->data->dev_conf.rxmode.offloads & 2533 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) 2534 goto write_ext_vlan; 2535 2536 /* Update maximum packet length */ 2537 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > 2538 MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) { 2539 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", 2540 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2541 VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE); 2542 return -EINVAL; 2543 } 2544 dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE; 2545 IGC_WRITE_REG(hw, IGC_RLPML, 2546 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2547 2548 write_ext_vlan: 2549 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); 2550 return 0; 2551 } 2552 2553 static int 2554 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2555 { 2556 struct rte_eth_rxmode *rxmode; 2557 2558 rxmode = &dev->data->dev_conf.rxmode; 2559 if (mask & ETH_VLAN_STRIP_MASK) { 2560 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2561 igc_vlan_hw_strip_enable(dev); 2562 else 2563 igc_vlan_hw_strip_disable(dev); 2564 } 2565 2566 if (mask & ETH_VLAN_FILTER_MASK) { 2567 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2568 igc_vlan_hw_filter_enable(dev); 2569 else 2570 igc_vlan_hw_filter_disable(dev); 2571 } 2572 2573 if (mask & ETH_VLAN_EXTEND_MASK) { 2574 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2575 return igc_vlan_hw_extend_enable(dev); 2576 else 2577 return igc_vlan_hw_extend_disable(dev); 2578 } 2579 2580 return 0; 2581 } 2582 2583 static int 2584 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 2585 enum rte_vlan_type vlan_type, 2586 uint16_t tpid) 2587 { 2588 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2589 uint32_t reg_val; 2590 2591 /* only outer TPID of double VLAN can be configured*/ 2592 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2593 reg_val = IGC_READ_REG(hw, IGC_VET); 2594 reg_val = (reg_val & (~IGC_VET_EXT)) | 2595 ((uint32_t)tpid << IGC_VET_EXT_SHIFT); 2596 IGC_WRITE_REG(hw, IGC_VET, reg_val); 2597 2598 return 0; 2599 } 2600 2601 /* all other TPID values are read-only*/ 2602 PMD_DRV_LOG(ERR, "Not supported"); 2603 return -ENOTSUP; 2604 } 2605 2606 static int 2607 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2608 struct rte_pci_device *pci_dev) 2609 { 2610 PMD_INIT_FUNC_TRACE(); 2611 return rte_eth_dev_pci_generic_probe(pci_dev, 2612 sizeof(struct igc_adapter), eth_igc_dev_init); 2613 } 2614 2615 static int 2616 eth_igc_pci_remove(struct rte_pci_device *pci_dev) 2617 { 2618 PMD_INIT_FUNC_TRACE(); 2619 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit); 2620 } 2621 2622 static struct rte_pci_driver rte_igc_pmd = { 2623 .id_table = pci_id_igc_map, 2624 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2625 .probe = eth_igc_pci_probe, 2626 .remove = eth_igc_pci_remove, 2627 }; 2628 2629 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd); 2630 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map); 2631 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci"); 2632