1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 8 #include <rte_string_fns.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_alarm.h> 15 16 #include "igc_logs.h" 17 #include "igc_txrx.h" 18 #include "igc_filter.h" 19 #include "igc_flow.h" 20 21 #define IGC_INTEL_VENDOR_ID 0x8086 22 23 #define IGC_FC_PAUSE_TIME 0x0680 24 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 25 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 26 27 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 28 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 29 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */ 30 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */ 31 32 #define IGC_DEFAULT_RX_FREE_THRESH 32 33 34 #define IGC_DEFAULT_RX_PTHRESH 8 35 #define IGC_DEFAULT_RX_HTHRESH 8 36 #define IGC_DEFAULT_RX_WTHRESH 4 37 38 #define IGC_DEFAULT_TX_PTHRESH 8 39 #define IGC_DEFAULT_TX_HTHRESH 1 40 #define IGC_DEFAULT_TX_WTHRESH 16 41 42 /* MSI-X other interrupt vector */ 43 #define IGC_MSIX_OTHER_INTR_VEC 0 44 45 /* External VLAN Enable bit mask */ 46 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26) 47 48 /* Speed select */ 49 #define IGC_CTRL_SPEED_MASK (7u << 8) 50 #define IGC_CTRL_SPEED_2500 (6u << 8) 51 52 /* External VLAN Ether Type bit mask and shift */ 53 #define IGC_VET_EXT 0xFFFF0000 54 #define IGC_VET_EXT_SHIFT 16 55 56 /* Force EEE Auto-negotiation */ 57 #define IGC_EEER_EEE_FRC_AN (1u << 28) 58 59 /* Per Queue Good Packets Received Count */ 60 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) 61 /* Per Queue Good Octets Received Count */ 62 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx)) 63 /* Per Queue Good Octets Transmitted Count */ 64 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx)) 65 /* Per Queue Multicast Packets Received Count */ 66 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx)) 67 /* Transmit Queue Drop Packet Count */ 68 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx)) 69 70 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 71 #define U32_0_IN_U64 0 /* lower bytes of u64 */ 72 #define U32_1_IN_U64 1 /* higher bytes of u64 */ 73 #else 74 #define U32_0_IN_U64 1 75 #define U32_1_IN_U64 0 76 #endif 77 78 #define IGC_ALARM_INTERVAL 8000000u 79 /* us, about 13.6s some per-queue registers will wrap around back to 0. */ 80 81 static const struct rte_eth_desc_lim rx_desc_lim = { 82 .nb_max = IGC_MAX_RXD, 83 .nb_min = IGC_MIN_RXD, 84 .nb_align = IGC_RXD_ALIGN, 85 }; 86 87 static const struct rte_eth_desc_lim tx_desc_lim = { 88 .nb_max = IGC_MAX_TXD, 89 .nb_min = IGC_MIN_TXD, 90 .nb_align = IGC_TXD_ALIGN, 91 .nb_seg_max = IGC_TX_MAX_SEG, 92 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG, 93 }; 94 95 static const struct rte_pci_id pci_id_igc_map[] = { 96 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) }, 97 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) }, 98 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) }, 99 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) }, 100 { .vendor_id = 0, /* sentinel */ }, 101 }; 102 103 /* store statistics names and its offset in stats structure */ 104 struct rte_igc_xstats_name_off { 105 char name[RTE_ETH_XSTATS_NAME_SIZE]; 106 unsigned int offset; 107 }; 108 109 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { 110 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)}, 111 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)}, 112 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)}, 113 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)}, 114 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)}, 115 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)}, 116 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats, 117 ecol)}, 118 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)}, 119 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)}, 120 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)}, 121 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)}, 122 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)}, 123 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)}, 124 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)}, 125 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)}, 126 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)}, 127 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)}, 128 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats, 129 fcruc)}, 130 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)}, 131 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)}, 132 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)}, 133 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)}, 134 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 135 prc1023)}, 136 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats, 137 prc1522)}, 138 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)}, 139 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)}, 140 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)}, 141 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)}, 142 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)}, 143 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)}, 144 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)}, 145 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)}, 146 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)}, 147 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)}, 148 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)}, 149 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)}, 150 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)}, 151 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)}, 152 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)}, 153 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)}, 154 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)}, 155 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)}, 156 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 157 ptc1023)}, 158 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats, 159 ptc1522)}, 160 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)}, 161 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)}, 162 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)}, 163 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)}, 164 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)}, 165 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)}, 166 {"rx_descriptor_lower_threshold", 167 offsetof(struct igc_hw_stats, icrxdmtc)}, 168 }; 169 170 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \ 171 sizeof(rte_igc_stats_strings[0])) 172 173 static int eth_igc_configure(struct rte_eth_dev *dev); 174 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); 175 static int eth_igc_stop(struct rte_eth_dev *dev); 176 static int eth_igc_start(struct rte_eth_dev *dev); 177 static int eth_igc_set_link_up(struct rte_eth_dev *dev); 178 static int eth_igc_set_link_down(struct rte_eth_dev *dev); 179 static int eth_igc_close(struct rte_eth_dev *dev); 180 static int eth_igc_reset(struct rte_eth_dev *dev); 181 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); 182 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); 183 static int eth_igc_fw_version_get(struct rte_eth_dev *dev, 184 char *fw_version, size_t fw_size); 185 static int eth_igc_infos_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static int eth_igc_led_on(struct rte_eth_dev *dev); 188 static int eth_igc_led_off(struct rte_eth_dev *dev); 189 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev); 190 static int eth_igc_rar_set(struct rte_eth_dev *dev, 191 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); 192 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index); 193 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 194 struct rte_ether_addr *addr); 195 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 196 struct rte_ether_addr *mc_addr_set, 197 uint32_t nb_mc_addr); 198 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev); 199 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev); 200 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 201 static int eth_igc_stats_get(struct rte_eth_dev *dev, 202 struct rte_eth_stats *rte_stats); 203 static int eth_igc_xstats_get(struct rte_eth_dev *dev, 204 struct rte_eth_xstat *xstats, unsigned int n); 205 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, 206 const uint64_t *ids, 207 uint64_t *values, unsigned int n); 208 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev, 209 struct rte_eth_xstat_name *xstats_names, 210 unsigned int size); 211 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 212 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 213 unsigned int limit); 214 static int eth_igc_xstats_reset(struct rte_eth_dev *dev); 215 static int 216 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 217 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); 218 static int 219 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 220 static int 221 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 222 static int 223 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 224 static int 225 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 226 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 228 uint16_t reta_size); 229 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev, 230 struct rte_eth_rss_reta_entry64 *reta_conf, 231 uint16_t reta_size); 232 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, 233 struct rte_eth_rss_conf *rss_conf); 234 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 235 struct rte_eth_rss_conf *rss_conf); 236 static int 237 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 238 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); 239 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 240 enum rte_vlan_type vlan_type, uint16_t tpid); 241 242 static const struct eth_dev_ops eth_igc_ops = { 243 .dev_configure = eth_igc_configure, 244 .link_update = eth_igc_link_update, 245 .dev_stop = eth_igc_stop, 246 .dev_start = eth_igc_start, 247 .dev_close = eth_igc_close, 248 .dev_reset = eth_igc_reset, 249 .dev_set_link_up = eth_igc_set_link_up, 250 .dev_set_link_down = eth_igc_set_link_down, 251 .promiscuous_enable = eth_igc_promiscuous_enable, 252 .promiscuous_disable = eth_igc_promiscuous_disable, 253 .allmulticast_enable = eth_igc_allmulticast_enable, 254 .allmulticast_disable = eth_igc_allmulticast_disable, 255 .fw_version_get = eth_igc_fw_version_get, 256 .dev_infos_get = eth_igc_infos_get, 257 .dev_led_on = eth_igc_led_on, 258 .dev_led_off = eth_igc_led_off, 259 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get, 260 .mtu_set = eth_igc_mtu_set, 261 .mac_addr_add = eth_igc_rar_set, 262 .mac_addr_remove = eth_igc_rar_clear, 263 .mac_addr_set = eth_igc_default_mac_addr_set, 264 .set_mc_addr_list = eth_igc_set_mc_addr_list, 265 266 .rx_queue_setup = eth_igc_rx_queue_setup, 267 .rx_queue_release = eth_igc_rx_queue_release, 268 .tx_queue_setup = eth_igc_tx_queue_setup, 269 .tx_queue_release = eth_igc_tx_queue_release, 270 .tx_done_cleanup = eth_igc_tx_done_cleanup, 271 .rxq_info_get = eth_igc_rxq_info_get, 272 .txq_info_get = eth_igc_txq_info_get, 273 .stats_get = eth_igc_stats_get, 274 .xstats_get = eth_igc_xstats_get, 275 .xstats_get_by_id = eth_igc_xstats_get_by_id, 276 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id, 277 .xstats_get_names = eth_igc_xstats_get_names, 278 .stats_reset = eth_igc_xstats_reset, 279 .xstats_reset = eth_igc_xstats_reset, 280 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set, 281 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable, 282 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable, 283 .flow_ctrl_get = eth_igc_flow_ctrl_get, 284 .flow_ctrl_set = eth_igc_flow_ctrl_set, 285 .reta_update = eth_igc_rss_reta_update, 286 .reta_query = eth_igc_rss_reta_query, 287 .rss_hash_update = eth_igc_rss_hash_update, 288 .rss_hash_conf_get = eth_igc_rss_hash_conf_get, 289 .vlan_filter_set = eth_igc_vlan_filter_set, 290 .vlan_offload_set = eth_igc_vlan_offload_set, 291 .vlan_tpid_set = eth_igc_vlan_tpid_set, 292 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, 293 .flow_ops_get = eth_igc_flow_ops_get, 294 }; 295 296 /* 297 * multiple queue mode checking 298 */ 299 static int 300 igc_check_mq_mode(struct rte_eth_dev *dev) 301 { 302 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 303 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 304 305 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 306 PMD_INIT_LOG(ERR, "SRIOV is not supported."); 307 return -EINVAL; 308 } 309 310 if (rx_mq_mode != RTE_ETH_MQ_RX_NONE && 311 rx_mq_mode != RTE_ETH_MQ_RX_RSS) { 312 /* RSS together with VMDq not supported*/ 313 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 314 rx_mq_mode); 315 return -EINVAL; 316 } 317 318 /* To no break software that set invalid mode, only display 319 * warning if invalid mode is used. 320 */ 321 if (tx_mq_mode != RTE_ETH_MQ_TX_NONE) 322 PMD_INIT_LOG(WARNING, 323 "TX mode %d is not supported. Due to meaningless in this driver, just ignore", 324 tx_mq_mode); 325 326 return 0; 327 } 328 329 static int 330 eth_igc_configure(struct rte_eth_dev *dev) 331 { 332 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 333 int ret; 334 335 PMD_INIT_FUNC_TRACE(); 336 337 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 338 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 339 340 ret = igc_check_mq_mode(dev); 341 if (ret != 0) 342 return ret; 343 344 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 345 return 0; 346 } 347 348 static int 349 eth_igc_set_link_up(struct rte_eth_dev *dev) 350 { 351 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 352 353 if (hw->phy.media_type == igc_media_type_copper) 354 igc_power_up_phy(hw); 355 else 356 igc_power_up_fiber_serdes_link(hw); 357 return 0; 358 } 359 360 static int 361 eth_igc_set_link_down(struct rte_eth_dev *dev) 362 { 363 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 364 365 if (hw->phy.media_type == igc_media_type_copper) 366 igc_power_down_phy(hw); 367 else 368 igc_shutdown_fiber_serdes_link(hw); 369 return 0; 370 } 371 372 /* 373 * disable other interrupt 374 */ 375 static void 376 igc_intr_other_disable(struct rte_eth_dev *dev) 377 { 378 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 379 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 380 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 381 382 if (rte_intr_allow_others(intr_handle) && 383 dev->data->dev_conf.intr_conf.lsc) { 384 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC); 385 } 386 387 IGC_WRITE_REG(hw, IGC_IMC, ~0); 388 IGC_WRITE_FLUSH(hw); 389 } 390 391 /* 392 * enable other interrupt 393 */ 394 static inline void 395 igc_intr_other_enable(struct rte_eth_dev *dev) 396 { 397 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 398 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 399 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 400 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 401 402 if (rte_intr_allow_others(intr_handle) && 403 dev->data->dev_conf.intr_conf.lsc) { 404 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC); 405 } 406 407 IGC_WRITE_REG(hw, IGC_IMS, intr->mask); 408 IGC_WRITE_FLUSH(hw); 409 } 410 411 /* 412 * It reads ICR and gets interrupt causes, check it and set a bit flag 413 * to update link status. 414 */ 415 static void 416 eth_igc_interrupt_get_status(struct rte_eth_dev *dev) 417 { 418 uint32_t icr; 419 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 420 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 421 422 /* read-on-clear nic registers here */ 423 icr = IGC_READ_REG(hw, IGC_ICR); 424 425 intr->flags = 0; 426 if (icr & IGC_ICR_LSC) 427 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 428 } 429 430 /* return 0 means link status changed, -1 means not changed */ 431 static int 432 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete) 433 { 434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 435 struct rte_eth_link link; 436 int link_check, count; 437 438 link_check = 0; 439 hw->mac.get_link_status = 1; 440 441 /* possible wait-to-complete in up to 9 seconds */ 442 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) { 443 /* Read the real link status */ 444 switch (hw->phy.media_type) { 445 case igc_media_type_copper: 446 /* Do the work to read phy */ 447 igc_check_for_link(hw); 448 link_check = !hw->mac.get_link_status; 449 break; 450 451 case igc_media_type_fiber: 452 igc_check_for_link(hw); 453 link_check = (IGC_READ_REG(hw, IGC_STATUS) & 454 IGC_STATUS_LU); 455 break; 456 457 case igc_media_type_internal_serdes: 458 igc_check_for_link(hw); 459 link_check = hw->mac.serdes_has_link; 460 break; 461 462 default: 463 break; 464 } 465 if (link_check || wait_to_complete == 0) 466 break; 467 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL); 468 } 469 memset(&link, 0, sizeof(link)); 470 471 /* Now we check if a transition has happened */ 472 if (link_check) { 473 uint16_t duplex, speed; 474 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 475 link.link_duplex = (duplex == FULL_DUPLEX) ? 476 RTE_ETH_LINK_FULL_DUPLEX : 477 RTE_ETH_LINK_HALF_DUPLEX; 478 link.link_speed = speed; 479 link.link_status = RTE_ETH_LINK_UP; 480 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 481 RTE_ETH_LINK_SPEED_FIXED); 482 483 if (speed == SPEED_2500) { 484 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG); 485 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) { 486 tipg &= ~IGC_TIPG_IPGT_MASK; 487 tipg |= 0x0b; 488 IGC_WRITE_REG(hw, IGC_TIPG, tipg); 489 } 490 } 491 } else { 492 link.link_speed = 0; 493 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 494 link.link_status = RTE_ETH_LINK_DOWN; 495 link.link_autoneg = RTE_ETH_LINK_FIXED; 496 } 497 498 return rte_eth_linkstatus_set(dev, &link); 499 } 500 501 /* 502 * It executes link_update after knowing an interrupt is present. 503 */ 504 static void 505 eth_igc_interrupt_action(struct rte_eth_dev *dev) 506 { 507 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 509 struct rte_eth_link link; 510 int ret; 511 512 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) { 513 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 514 515 /* set get_link_status to check register later */ 516 ret = eth_igc_link_update(dev, 0); 517 518 /* check if link has changed */ 519 if (ret < 0) 520 return; 521 522 rte_eth_linkstatus_get(dev, &link); 523 if (link.link_status) 524 PMD_DRV_LOG(INFO, 525 " Port %d: Link Up - speed %u Mbps - %s", 526 dev->data->port_id, 527 (unsigned int)link.link_speed, 528 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 529 "full-duplex" : "half-duplex"); 530 else 531 PMD_DRV_LOG(INFO, " Port %d: Link Down", 532 dev->data->port_id); 533 534 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 535 pci_dev->addr.domain, 536 pci_dev->addr.bus, 537 pci_dev->addr.devid, 538 pci_dev->addr.function); 539 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 540 } 541 } 542 543 /* 544 * Interrupt handler which shall be registered at first. 545 * 546 * @handle 547 * Pointer to interrupt handle. 548 * @param 549 * The address of parameter (struct rte_eth_dev *) registered before. 550 */ 551 static void 552 eth_igc_interrupt_handler(void *param) 553 { 554 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 555 556 eth_igc_interrupt_get_status(dev); 557 eth_igc_interrupt_action(dev); 558 } 559 560 static void igc_read_queue_stats_register(struct rte_eth_dev *dev); 561 562 /* 563 * Update the queue status every IGC_ALARM_INTERVAL time. 564 * @param 565 * The address of parameter (struct rte_eth_dev *) registered before. 566 */ 567 static void 568 igc_update_queue_stats_handler(void *param) 569 { 570 struct rte_eth_dev *dev = param; 571 igc_read_queue_stats_register(dev); 572 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 573 igc_update_queue_stats_handler, dev); 574 } 575 576 /* 577 * rx,tx enable/disable 578 */ 579 static void 580 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) 581 { 582 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 583 uint32_t tctl, rctl; 584 585 tctl = IGC_READ_REG(hw, IGC_TCTL); 586 rctl = IGC_READ_REG(hw, IGC_RCTL); 587 588 if (enable) { 589 /* enable Tx/Rx */ 590 tctl |= IGC_TCTL_EN; 591 rctl |= IGC_RCTL_EN; 592 } else { 593 /* disable Tx/Rx */ 594 tctl &= ~IGC_TCTL_EN; 595 rctl &= ~IGC_RCTL_EN; 596 } 597 IGC_WRITE_REG(hw, IGC_TCTL, tctl); 598 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 599 IGC_WRITE_FLUSH(hw); 600 } 601 602 /* 603 * This routine disables all traffic on the adapter by issuing a 604 * global reset on the MAC. 605 */ 606 static int 607 eth_igc_stop(struct rte_eth_dev *dev) 608 { 609 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 610 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 611 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 612 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 613 struct rte_eth_link link; 614 615 dev->data->dev_started = 0; 616 adapter->stopped = 1; 617 618 /* disable receive and transmit */ 619 eth_igc_rxtx_control(dev, false); 620 621 /* disable all MSI-X interrupts */ 622 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 623 IGC_WRITE_FLUSH(hw); 624 625 /* clear all MSI-X interrupts */ 626 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 627 628 igc_intr_other_disable(dev); 629 630 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 631 632 /* disable intr eventfd mapping */ 633 rte_intr_disable(intr_handle); 634 635 igc_reset_hw(hw); 636 637 /* disable all wake up */ 638 IGC_WRITE_REG(hw, IGC_WUC, 0); 639 640 /* disable checking EEE operation in MAC loopback mode */ 641 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 642 643 /* Set bit for Go Link disconnect */ 644 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, 645 IGC_82580_PM_GO_LINKD); 646 647 /* Power down the phy. Needed to make the link go Down */ 648 eth_igc_set_link_down(dev); 649 650 igc_dev_clear_queues(dev); 651 652 /* clear the recorded link status */ 653 memset(&link, 0, sizeof(link)); 654 rte_eth_linkstatus_set(dev, &link); 655 656 if (!rte_intr_allow_others(intr_handle)) 657 /* resume to the default handler */ 658 rte_intr_callback_register(intr_handle, 659 eth_igc_interrupt_handler, 660 (void *)dev); 661 662 /* Clean datapath event and queue/vec mapping */ 663 rte_intr_efd_disable(intr_handle); 664 rte_intr_vec_list_free(intr_handle); 665 666 return 0; 667 } 668 669 /* 670 * write interrupt vector allocation register 671 * @hw 672 * board private structure 673 * @queue_index 674 * queue index, valid 0,1,2,3 675 * @tx 676 * tx:1, rx:0 677 * @msix_vector 678 * msix-vector, valid 0,1,2,3,4 679 */ 680 static void 681 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index, 682 bool tx, uint8_t msix_vector) 683 { 684 uint8_t offset = 0; 685 uint8_t reg_index = queue_index >> 1; 686 uint32_t val; 687 688 /* 689 * IVAR(0) 690 * bit31...24 bit23...16 bit15...8 bit7...0 691 * TX1 RX1 TX0 RX0 692 * 693 * IVAR(1) 694 * bit31...24 bit23...16 bit15...8 bit7...0 695 * TX3 RX3 TX2 RX2 696 */ 697 698 if (tx) 699 offset = 8; 700 701 if (queue_index & 1) 702 offset += 16; 703 704 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index); 705 706 /* clear bits */ 707 val &= ~((uint32_t)0xFF << offset); 708 709 /* write vector and valid bit */ 710 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset; 711 712 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val); 713 } 714 715 /* Sets up the hardware to generate MSI-X interrupts properly 716 * @hw 717 * board private structure 718 */ 719 static void 720 igc_configure_msix_intr(struct rte_eth_dev *dev) 721 { 722 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 723 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 724 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 725 726 uint32_t intr_mask; 727 uint32_t vec = IGC_MISC_VEC_ID; 728 uint32_t base = IGC_MISC_VEC_ID; 729 uint32_t misc_shift = 0; 730 int i; 731 732 /* won't configure msix register if no mapping is done 733 * between intr vector and event fd 734 */ 735 if (!rte_intr_dp_is_en(intr_handle)) 736 return; 737 738 if (rte_intr_allow_others(intr_handle)) { 739 base = IGC_RX_VEC_START; 740 vec = base; 741 misc_shift = 1; 742 } 743 744 /* turn on MSI-X capability first */ 745 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | 746 IGC_GPIE_PBA | IGC_GPIE_EIAME | 747 IGC_GPIE_NSICR); 748 intr_mask = RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle), 749 uint32_t) << misc_shift; 750 751 if (dev->data->dev_conf.intr_conf.lsc) 752 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC); 753 754 /* enable msix auto-clear */ 755 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask); 756 757 /* set other cause interrupt vector */ 758 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC, 759 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8); 760 761 /* enable auto-mask */ 762 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask); 763 764 for (i = 0; i < dev->data->nb_rx_queues; i++) { 765 igc_write_ivar(hw, i, 0, vec); 766 rte_intr_vec_list_index_set(intr_handle, i, vec); 767 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1) 768 vec++; 769 } 770 771 IGC_WRITE_FLUSH(hw); 772 } 773 774 /** 775 * It enables the interrupt mask and then enable the interrupt. 776 * 777 * @dev 778 * Pointer to struct rte_eth_dev. 779 * @on 780 * Enable or Disable 781 */ 782 static void 783 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 784 { 785 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 786 787 if (on) 788 intr->mask |= IGC_ICR_LSC; 789 else 790 intr->mask &= ~IGC_ICR_LSC; 791 } 792 793 /* 794 * It enables the interrupt. 795 * It will be called once only during nic initialized. 796 */ 797 static void 798 igc_rxq_interrupt_setup(struct rte_eth_dev *dev) 799 { 800 uint32_t mask; 801 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 802 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 803 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 804 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 805 806 /* won't configure msix register if no mapping is done 807 * between intr vector and event fd 808 */ 809 if (!rte_intr_dp_is_en(intr_handle)) 810 return; 811 812 mask = RTE_LEN2MASK(rte_intr_nb_efd_get(intr_handle), uint32_t) 813 << misc_shift; 814 IGC_WRITE_REG(hw, IGC_EIMS, mask); 815 } 816 817 /* 818 * Get hardware rx-buffer size. 819 */ 820 static inline int 821 igc_get_rx_buffer_size(struct igc_hw *hw) 822 { 823 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10; 824 } 825 826 /* 827 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 828 * For ASF and Pass Through versions of f/w this means 829 * that the driver is loaded. 830 */ 831 static void 832 igc_hw_control_acquire(struct igc_hw *hw) 833 { 834 uint32_t ctrl_ext; 835 836 /* Let firmware know the driver has taken over */ 837 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 838 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 839 } 840 841 /* 842 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 843 * For ASF and Pass Through versions of f/w this means that the 844 * driver is no longer loaded. 845 */ 846 static void 847 igc_hw_control_release(struct igc_hw *hw) 848 { 849 uint32_t ctrl_ext; 850 851 /* Let firmware taken over control of h/w */ 852 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 853 IGC_WRITE_REG(hw, IGC_CTRL_EXT, 854 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 855 } 856 857 static int 858 igc_hardware_init(struct igc_hw *hw) 859 { 860 uint32_t rx_buf_size; 861 int diag; 862 863 /* Let the firmware know the OS is in control */ 864 igc_hw_control_acquire(hw); 865 866 /* Issue a global reset */ 867 igc_reset_hw(hw); 868 869 /* disable all wake up */ 870 IGC_WRITE_REG(hw, IGC_WUC, 0); 871 872 /* 873 * Hardware flow control 874 * - High water mark should allow for at least two standard size (1518) 875 * frames to be received after sending an XOFF. 876 * - Low water mark works best when it is very near the high water mark. 877 * This allows the receiver to restart by sending XON when it has 878 * drained a bit. Here we use an arbitrary value of 1500 which will 879 * restart after one full frame is pulled from the buffer. There 880 * could be several smaller frames in the buffer and if so they will 881 * not trigger the XON until their total number reduces the buffer 882 * by 1500. 883 */ 884 rx_buf_size = igc_get_rx_buffer_size(hw); 885 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 886 hw->fc.low_water = hw->fc.high_water - 1500; 887 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 888 hw->fc.send_xon = 1; 889 hw->fc.requested_mode = igc_fc_full; 890 891 diag = igc_init_hw(hw); 892 if (diag < 0) 893 return diag; 894 895 igc_get_phy_info(hw); 896 igc_check_for_link(hw); 897 898 return 0; 899 } 900 901 static int 902 eth_igc_start(struct rte_eth_dev *dev) 903 { 904 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 905 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 906 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 907 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 908 uint32_t *speeds; 909 int ret; 910 911 PMD_INIT_FUNC_TRACE(); 912 913 /* disable all MSI-X interrupts */ 914 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 915 IGC_WRITE_FLUSH(hw); 916 917 /* clear all MSI-X interrupts */ 918 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 919 920 /* disable uio/vfio intr/eventfd mapping */ 921 if (!adapter->stopped) 922 rte_intr_disable(intr_handle); 923 924 /* Power up the phy. Needed to make the link go Up */ 925 eth_igc_set_link_up(dev); 926 927 /* Put the address into the Receive Address Array */ 928 igc_rar_set(hw, hw->mac.addr, 0); 929 930 /* Initialize the hardware */ 931 if (igc_hardware_init(hw)) { 932 PMD_DRV_LOG(ERR, "Unable to initialize the hardware"); 933 return -EIO; 934 } 935 adapter->stopped = 0; 936 937 /* check and configure queue intr-vector mapping */ 938 if (rte_intr_cap_multiple(intr_handle) && 939 dev->data->dev_conf.intr_conf.rxq) { 940 uint32_t intr_vector = dev->data->nb_rx_queues; 941 if (rte_intr_efd_enable(intr_handle, intr_vector)) 942 return -1; 943 } 944 945 if (rte_intr_dp_is_en(intr_handle)) { 946 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 947 dev->data->nb_rx_queues)) { 948 PMD_DRV_LOG(ERR, 949 "Failed to allocate %d rx_queues intr_vec", 950 dev->data->nb_rx_queues); 951 return -ENOMEM; 952 } 953 } 954 955 /* configure msix for rx interrupt */ 956 igc_configure_msix_intr(dev); 957 958 igc_tx_init(dev); 959 960 /* This can fail when allocating mbufs for descriptor rings */ 961 ret = igc_rx_init(dev); 962 if (ret) { 963 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware"); 964 igc_dev_clear_queues(dev); 965 return ret; 966 } 967 968 igc_clear_hw_cntrs_base_generic(hw); 969 970 /* VLAN Offload Settings */ 971 eth_igc_vlan_offload_set(dev, 972 RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 973 RTE_ETH_VLAN_EXTEND_MASK); 974 975 /* Setup link speed and duplex */ 976 speeds = &dev->data->dev_conf.link_speeds; 977 if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 978 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 979 hw->mac.autoneg = 1; 980 } else { 981 int num_speeds = 0; 982 983 if (*speeds & RTE_ETH_LINK_SPEED_FIXED) { 984 PMD_DRV_LOG(ERR, 985 "Force speed mode currently not supported"); 986 igc_dev_clear_queues(dev); 987 return -EINVAL; 988 } 989 990 hw->phy.autoneg_advertised = 0; 991 hw->mac.autoneg = 1; 992 993 if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | 994 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | 995 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) { 996 num_speeds = -1; 997 goto error_invalid_config; 998 } 999 if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) { 1000 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1001 num_speeds++; 1002 } 1003 if (*speeds & RTE_ETH_LINK_SPEED_10M) { 1004 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1005 num_speeds++; 1006 } 1007 if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) { 1008 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1009 num_speeds++; 1010 } 1011 if (*speeds & RTE_ETH_LINK_SPEED_100M) { 1012 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1013 num_speeds++; 1014 } 1015 if (*speeds & RTE_ETH_LINK_SPEED_1G) { 1016 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1017 num_speeds++; 1018 } 1019 if (*speeds & RTE_ETH_LINK_SPEED_2_5G) { 1020 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; 1021 num_speeds++; 1022 } 1023 if (num_speeds == 0) 1024 goto error_invalid_config; 1025 } 1026 1027 igc_setup_link(hw); 1028 1029 if (rte_intr_allow_others(intr_handle)) { 1030 /* check if lsc interrupt is enabled */ 1031 if (dev->data->dev_conf.intr_conf.lsc) 1032 igc_lsc_interrupt_setup(dev, 1); 1033 else 1034 igc_lsc_interrupt_setup(dev, 0); 1035 } else { 1036 rte_intr_callback_unregister(intr_handle, 1037 eth_igc_interrupt_handler, 1038 (void *)dev); 1039 if (dev->data->dev_conf.intr_conf.lsc) 1040 PMD_DRV_LOG(INFO, 1041 "LSC won't enable because of no intr multiplex"); 1042 } 1043 1044 /* enable uio/vfio intr/eventfd mapping */ 1045 rte_intr_enable(intr_handle); 1046 1047 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1048 igc_update_queue_stats_handler, dev); 1049 1050 /* check if rxq interrupt is enabled */ 1051 if (dev->data->dev_conf.intr_conf.rxq && 1052 rte_intr_dp_is_en(intr_handle)) 1053 igc_rxq_interrupt_setup(dev); 1054 1055 /* resume enabled intr since hw reset */ 1056 igc_intr_other_enable(dev); 1057 1058 eth_igc_rxtx_control(dev, true); 1059 eth_igc_link_update(dev, 0); 1060 1061 /* configure MAC-loopback mode */ 1062 if (dev->data->dev_conf.lpbk_mode == 1) { 1063 uint32_t reg_val; 1064 1065 reg_val = IGC_READ_REG(hw, IGC_CTRL); 1066 reg_val &= ~IGC_CTRL_SPEED_MASK; 1067 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | 1068 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; 1069 IGC_WRITE_REG(hw, IGC_CTRL, reg_val); 1070 1071 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 1072 } 1073 1074 return 0; 1075 1076 error_invalid_config: 1077 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1078 dev->data->dev_conf.link_speeds, dev->data->port_id); 1079 igc_dev_clear_queues(dev); 1080 return -EINVAL; 1081 } 1082 1083 static int 1084 igc_reset_swfw_lock(struct igc_hw *hw) 1085 { 1086 int ret_val; 1087 1088 /* 1089 * Do mac ops initialization manually here, since we will need 1090 * some function pointers set by this call. 1091 */ 1092 ret_val = igc_init_mac_params(hw); 1093 if (ret_val) 1094 return ret_val; 1095 1096 /* 1097 * SMBI lock should not fail in this early stage. If this is the case, 1098 * it is due to an improper exit of the application. 1099 * So force the release of the faulty lock. 1100 */ 1101 if (igc_get_hw_semaphore_generic(hw) < 0) 1102 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 1103 1104 igc_put_hw_semaphore_generic(hw); 1105 1106 if (hw->mac.ops.acquire_swfw_sync != NULL) { 1107 uint16_t mask; 1108 1109 /* 1110 * Phy lock should not fail in this early stage. 1111 * If this is the case, it is due to an improper exit of the 1112 * application. So force the release of the faulty lock. 1113 */ 1114 mask = IGC_SWFW_PHY0_SM; 1115 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 1116 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 1117 hw->bus.func); 1118 } 1119 hw->mac.ops.release_swfw_sync(hw, mask); 1120 1121 /* 1122 * This one is more tricky since it is common to all ports; but 1123 * swfw_sync retries last long enough (1s) to be almost sure 1124 * that if lock can not be taken it is due to an improper lock 1125 * of the semaphore. 1126 */ 1127 mask = IGC_SWFW_EEP_SM; 1128 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) 1129 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1130 1131 hw->mac.ops.release_swfw_sync(hw, mask); 1132 } 1133 1134 return IGC_SUCCESS; 1135 } 1136 1137 /* 1138 * free all rx/tx queues. 1139 */ 1140 static void 1141 igc_dev_free_queues(struct rte_eth_dev *dev) 1142 { 1143 uint16_t i; 1144 1145 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1146 eth_igc_rx_queue_release(dev, i); 1147 dev->data->rx_queues[i] = NULL; 1148 } 1149 dev->data->nb_rx_queues = 0; 1150 1151 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1152 eth_igc_tx_queue_release(dev, i); 1153 dev->data->tx_queues[i] = NULL; 1154 } 1155 dev->data->nb_tx_queues = 0; 1156 } 1157 1158 static int 1159 eth_igc_close(struct rte_eth_dev *dev) 1160 { 1161 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1162 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1163 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1164 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 1165 int retry = 0; 1166 int ret = 0; 1167 1168 PMD_INIT_FUNC_TRACE(); 1169 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1170 return 0; 1171 1172 if (!adapter->stopped) 1173 ret = eth_igc_stop(dev); 1174 1175 igc_flow_flush(dev, NULL); 1176 igc_clear_all_filter(dev); 1177 1178 igc_intr_other_disable(dev); 1179 do { 1180 int ret = rte_intr_callback_unregister(intr_handle, 1181 eth_igc_interrupt_handler, dev); 1182 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL) 1183 break; 1184 1185 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret); 1186 DELAY(200 * 1000); /* delay 200ms */ 1187 } while (retry++ < 5); 1188 1189 igc_phy_hw_reset(hw); 1190 igc_hw_control_release(hw); 1191 igc_dev_free_queues(dev); 1192 1193 /* Reset any pending lock */ 1194 igc_reset_swfw_lock(hw); 1195 1196 return ret; 1197 } 1198 1199 static void 1200 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 1201 { 1202 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1203 1204 hw->vendor_id = pci_dev->id.vendor_id; 1205 hw->device_id = pci_dev->id.device_id; 1206 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1207 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 1208 } 1209 1210 static int 1211 eth_igc_dev_init(struct rte_eth_dev *dev) 1212 { 1213 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1214 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1215 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1216 int i, error = 0; 1217 1218 PMD_INIT_FUNC_TRACE(); 1219 dev->dev_ops = ð_igc_ops; 1220 dev->rx_queue_count = eth_igc_rx_queue_count; 1221 dev->rx_descriptor_status = eth_igc_rx_descriptor_status; 1222 dev->tx_descriptor_status = eth_igc_tx_descriptor_status; 1223 1224 /* 1225 * for secondary processes, we don't initialize any further as primary 1226 * has already done this work. Only check we don't need a different 1227 * RX function. 1228 */ 1229 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1230 return 0; 1231 1232 rte_eth_copy_pci_info(dev, pci_dev); 1233 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1234 1235 hw->back = pci_dev; 1236 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1237 1238 igc_identify_hardware(dev, pci_dev); 1239 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) { 1240 error = -EIO; 1241 goto err_late; 1242 } 1243 1244 igc_get_bus_info(hw); 1245 1246 /* Reset any pending lock */ 1247 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) { 1248 error = -EIO; 1249 goto err_late; 1250 } 1251 1252 /* Finish initialization */ 1253 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) { 1254 error = -EIO; 1255 goto err_late; 1256 } 1257 1258 hw->mac.autoneg = 1; 1259 hw->phy.autoneg_wait_to_complete = 0; 1260 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 1261 1262 /* Copper options */ 1263 if (hw->phy.media_type == igc_media_type_copper) { 1264 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 1265 hw->phy.disable_polarity_correction = 0; 1266 hw->phy.ms_type = igc_ms_hw_default; 1267 } 1268 1269 /* 1270 * Start from a known state, this is important in reading the nvm 1271 * and mac from that. 1272 */ 1273 igc_reset_hw(hw); 1274 1275 /* Make sure we have a good EEPROM before we read from it */ 1276 if (igc_validate_nvm_checksum(hw) < 0) { 1277 /* 1278 * Some PCI-E parts fail the first check due to 1279 * the link being in sleep state, call it again, 1280 * if it fails a second time its a real issue. 1281 */ 1282 if (igc_validate_nvm_checksum(hw) < 0) { 1283 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 1284 error = -EIO; 1285 goto err_late; 1286 } 1287 } 1288 1289 /* Read the permanent MAC address out of the EEPROM */ 1290 if (igc_read_mac_addr(hw) != 0) { 1291 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 1292 error = -EIO; 1293 goto err_late; 1294 } 1295 1296 /* Allocate memory for storing MAC addresses */ 1297 dev->data->mac_addrs = rte_zmalloc("igc", 1298 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 1299 if (dev->data->mac_addrs == NULL) { 1300 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC", 1301 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1302 error = -ENOMEM; 1303 goto err_late; 1304 } 1305 1306 /* Copy the permanent MAC address */ 1307 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 1308 &dev->data->mac_addrs[0]); 1309 1310 /* Now initialize the hardware */ 1311 if (igc_hardware_init(hw) != 0) { 1312 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 1313 rte_free(dev->data->mac_addrs); 1314 dev->data->mac_addrs = NULL; 1315 error = -ENODEV; 1316 goto err_late; 1317 } 1318 1319 hw->mac.get_link_status = 1; 1320 igc->stopped = 0; 1321 1322 /* Indicate SOL/IDER usage */ 1323 if (igc_check_reset_block(hw) < 0) 1324 PMD_INIT_LOG(ERR, 1325 "PHY reset is blocked due to SOL/IDER session."); 1326 1327 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 1328 dev->data->port_id, pci_dev->id.vendor_id, 1329 pci_dev->id.device_id); 1330 1331 rte_intr_callback_register(pci_dev->intr_handle, 1332 eth_igc_interrupt_handler, (void *)dev); 1333 1334 /* enable uio/vfio intr/eventfd mapping */ 1335 rte_intr_enable(pci_dev->intr_handle); 1336 1337 /* enable support intr */ 1338 igc_intr_other_enable(dev); 1339 1340 /* initiate queue status */ 1341 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1342 igc->txq_stats_map[i] = -1; 1343 igc->rxq_stats_map[i] = -1; 1344 } 1345 1346 igc_flow_init(dev); 1347 igc_clear_all_filter(dev); 1348 return 0; 1349 1350 err_late: 1351 igc_hw_control_release(hw); 1352 return error; 1353 } 1354 1355 static int 1356 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) 1357 { 1358 PMD_INIT_FUNC_TRACE(); 1359 eth_igc_close(eth_dev); 1360 return 0; 1361 } 1362 1363 static int 1364 eth_igc_reset(struct rte_eth_dev *dev) 1365 { 1366 int ret; 1367 1368 PMD_INIT_FUNC_TRACE(); 1369 1370 ret = eth_igc_dev_uninit(dev); 1371 if (ret) 1372 return ret; 1373 1374 return eth_igc_dev_init(dev); 1375 } 1376 1377 static int 1378 eth_igc_promiscuous_enable(struct rte_eth_dev *dev) 1379 { 1380 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1381 uint32_t rctl; 1382 1383 rctl = IGC_READ_REG(hw, IGC_RCTL); 1384 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1385 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1386 return 0; 1387 } 1388 1389 static int 1390 eth_igc_promiscuous_disable(struct rte_eth_dev *dev) 1391 { 1392 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1393 uint32_t rctl; 1394 1395 rctl = IGC_READ_REG(hw, IGC_RCTL); 1396 rctl &= (~IGC_RCTL_UPE); 1397 if (dev->data->all_multicast == 1) 1398 rctl |= IGC_RCTL_MPE; 1399 else 1400 rctl &= (~IGC_RCTL_MPE); 1401 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1402 return 0; 1403 } 1404 1405 static int 1406 eth_igc_allmulticast_enable(struct rte_eth_dev *dev) 1407 { 1408 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1409 uint32_t rctl; 1410 1411 rctl = IGC_READ_REG(hw, IGC_RCTL); 1412 rctl |= IGC_RCTL_MPE; 1413 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1414 return 0; 1415 } 1416 1417 static int 1418 eth_igc_allmulticast_disable(struct rte_eth_dev *dev) 1419 { 1420 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1421 uint32_t rctl; 1422 1423 if (dev->data->promiscuous == 1) 1424 return 0; /* must remain in all_multicast mode */ 1425 1426 rctl = IGC_READ_REG(hw, IGC_RCTL); 1427 rctl &= (~IGC_RCTL_MPE); 1428 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1429 return 0; 1430 } 1431 1432 static int 1433 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 1434 size_t fw_size) 1435 { 1436 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1437 struct igc_fw_version fw; 1438 int ret; 1439 1440 igc_get_fw_version(hw, &fw); 1441 1442 /* if option rom is valid, display its version too */ 1443 if (fw.or_valid) { 1444 ret = snprintf(fw_version, fw_size, 1445 "%d.%d, 0x%08x, %d.%d.%d", 1446 fw.eep_major, fw.eep_minor, fw.etrack_id, 1447 fw.or_major, fw.or_build, fw.or_patch); 1448 /* no option rom */ 1449 } else { 1450 if (fw.etrack_id != 0X0000) { 1451 ret = snprintf(fw_version, fw_size, 1452 "%d.%d, 0x%08x", 1453 fw.eep_major, fw.eep_minor, 1454 fw.etrack_id); 1455 } else { 1456 ret = snprintf(fw_version, fw_size, 1457 "%d.%d.%d", 1458 fw.eep_major, fw.eep_minor, 1459 fw.eep_build); 1460 } 1461 } 1462 if (ret < 0) 1463 return -EINVAL; 1464 1465 ret += 1; /* add the size of '\0' */ 1466 if (fw_size < (size_t)ret) 1467 return ret; 1468 else 1469 return 0; 1470 } 1471 1472 static int 1473 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1474 { 1475 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1476 1477 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1478 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE; 1479 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1480 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; 1481 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; 1482 dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1483 1484 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; 1485 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; 1486 dev_info->max_vmdq_pools = 0; 1487 1488 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t); 1489 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 1490 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL; 1491 1492 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1493 .rx_thresh = { 1494 .pthresh = IGC_DEFAULT_RX_PTHRESH, 1495 .hthresh = IGC_DEFAULT_RX_HTHRESH, 1496 .wthresh = IGC_DEFAULT_RX_WTHRESH, 1497 }, 1498 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH, 1499 .rx_drop_en = 0, 1500 .offloads = 0, 1501 }; 1502 1503 dev_info->default_txconf = (struct rte_eth_txconf) { 1504 .tx_thresh = { 1505 .pthresh = IGC_DEFAULT_TX_PTHRESH, 1506 .hthresh = IGC_DEFAULT_TX_HTHRESH, 1507 .wthresh = IGC_DEFAULT_TX_WTHRESH, 1508 }, 1509 .offloads = 0, 1510 }; 1511 1512 dev_info->rx_desc_lim = rx_desc_lim; 1513 dev_info->tx_desc_lim = tx_desc_lim; 1514 1515 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M | 1516 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M | 1517 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G; 1518 1519 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD; 1520 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1521 return 0; 1522 } 1523 1524 static int 1525 eth_igc_led_on(struct rte_eth_dev *dev) 1526 { 1527 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1528 1529 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1530 } 1531 1532 static int 1533 eth_igc_led_off(struct rte_eth_dev *dev) 1534 { 1535 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1536 1537 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1538 } 1539 1540 static const uint32_t * 1541 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev) 1542 { 1543 static const uint32_t ptypes[] = { 1544 /* refers to rx_desc_pkt_info_to_pkt_type() */ 1545 RTE_PTYPE_L2_ETHER, 1546 RTE_PTYPE_L3_IPV4, 1547 RTE_PTYPE_L3_IPV4_EXT, 1548 RTE_PTYPE_L3_IPV6, 1549 RTE_PTYPE_L3_IPV6_EXT, 1550 RTE_PTYPE_L4_TCP, 1551 RTE_PTYPE_L4_UDP, 1552 RTE_PTYPE_L4_SCTP, 1553 RTE_PTYPE_TUNNEL_IP, 1554 RTE_PTYPE_INNER_L3_IPV6, 1555 RTE_PTYPE_INNER_L3_IPV6_EXT, 1556 RTE_PTYPE_INNER_L4_TCP, 1557 RTE_PTYPE_INNER_L4_UDP, 1558 RTE_PTYPE_UNKNOWN 1559 }; 1560 1561 return ptypes; 1562 } 1563 1564 static int 1565 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1566 { 1567 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1568 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD; 1569 uint32_t rctl; 1570 1571 /* if extend vlan has been enabled */ 1572 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN) 1573 frame_size += VLAN_TAG_SIZE; 1574 1575 /* 1576 * If device is started, refuse mtu that requires the support of 1577 * scattered packets when this feature has not been enabled before. 1578 */ 1579 if (dev->data->dev_started && !dev->data->scattered_rx && 1580 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1581 PMD_INIT_LOG(ERR, "Stop port first."); 1582 return -EINVAL; 1583 } 1584 1585 rctl = IGC_READ_REG(hw, IGC_RCTL); 1586 if (mtu > RTE_ETHER_MTU) 1587 rctl |= IGC_RCTL_LPE; 1588 else 1589 rctl &= ~IGC_RCTL_LPE; 1590 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1591 1592 IGC_WRITE_REG(hw, IGC_RLPML, frame_size); 1593 1594 return 0; 1595 } 1596 1597 static int 1598 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1599 uint32_t index, uint32_t pool) 1600 { 1601 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1602 1603 igc_rar_set(hw, mac_addr->addr_bytes, index); 1604 RTE_SET_USED(pool); 1605 return 0; 1606 } 1607 1608 static void 1609 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1610 { 1611 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1612 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1613 1614 memset(addr, 0, sizeof(addr)); 1615 igc_rar_set(hw, addr, index); 1616 } 1617 1618 static int 1619 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 1620 struct rte_ether_addr *addr) 1621 { 1622 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1623 igc_rar_set(hw, addr->addr_bytes, 0); 1624 return 0; 1625 } 1626 1627 static int 1628 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 1629 struct rte_ether_addr *mc_addr_set, 1630 uint32_t nb_mc_addr) 1631 { 1632 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1633 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1634 return 0; 1635 } 1636 1637 /* 1638 * Read hardware registers 1639 */ 1640 static void 1641 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats) 1642 { 1643 int pause_frames; 1644 1645 uint64_t old_gprc = stats->gprc; 1646 uint64_t old_gptc = stats->gptc; 1647 uint64_t old_tpr = stats->tpr; 1648 uint64_t old_tpt = stats->tpt; 1649 uint64_t old_rpthc = stats->rpthc; 1650 uint64_t old_hgptc = stats->hgptc; 1651 1652 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS); 1653 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC); 1654 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC); 1655 stats->mpc += IGC_READ_REG(hw, IGC_MPC); 1656 stats->scc += IGC_READ_REG(hw, IGC_SCC); 1657 stats->ecol += IGC_READ_REG(hw, IGC_ECOL); 1658 1659 stats->mcc += IGC_READ_REG(hw, IGC_MCC); 1660 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL); 1661 stats->colc += IGC_READ_REG(hw, IGC_COLC); 1662 1663 stats->dc += IGC_READ_REG(hw, IGC_DC); 1664 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS); 1665 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC); 1666 stats->rlec += IGC_READ_REG(hw, IGC_RLEC); 1667 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC); 1668 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC); 1669 1670 /* 1671 * For watchdog management we need to know if we have been 1672 * paused during the last interval, so capture that here. 1673 */ 1674 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC); 1675 stats->xoffrxc += pause_frames; 1676 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC); 1677 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC); 1678 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64); 1679 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127); 1680 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255); 1681 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511); 1682 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023); 1683 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522); 1684 stats->gprc += IGC_READ_REG(hw, IGC_GPRC); 1685 stats->bprc += IGC_READ_REG(hw, IGC_BPRC); 1686 stats->mprc += IGC_READ_REG(hw, IGC_MPRC); 1687 stats->gptc += IGC_READ_REG(hw, IGC_GPTC); 1688 1689 /* For the 64-bit byte counters the low dword must be read first. */ 1690 /* Both registers clear on the read of the high dword */ 1691 1692 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1693 stats->gorc += IGC_READ_REG(hw, IGC_GORCL); 1694 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32); 1695 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1696 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL); 1697 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32); 1698 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1699 1700 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC); 1701 stats->ruc += IGC_READ_REG(hw, IGC_RUC); 1702 stats->rfc += IGC_READ_REG(hw, IGC_RFC); 1703 stats->roc += IGC_READ_REG(hw, IGC_ROC); 1704 stats->rjc += IGC_READ_REG(hw, IGC_RJC); 1705 1706 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC); 1707 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC); 1708 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC); 1709 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC); 1710 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC); 1711 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC); 1712 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC); 1713 1714 stats->tpr += IGC_READ_REG(hw, IGC_TPR); 1715 stats->tpt += IGC_READ_REG(hw, IGC_TPT); 1716 1717 stats->tor += IGC_READ_REG(hw, IGC_TORL); 1718 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32); 1719 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1720 stats->tot += IGC_READ_REG(hw, IGC_TOTL); 1721 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32); 1722 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1723 1724 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64); 1725 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127); 1726 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255); 1727 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511); 1728 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023); 1729 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522); 1730 stats->mptc += IGC_READ_REG(hw, IGC_MPTC); 1731 stats->bptc += IGC_READ_REG(hw, IGC_BPTC); 1732 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC); 1733 1734 stats->iac += IGC_READ_REG(hw, IGC_IAC); 1735 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC); 1736 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC); 1737 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC); 1738 1739 /* Host to Card Statistics */ 1740 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL); 1741 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32); 1742 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1743 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL); 1744 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32); 1745 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1746 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS); 1747 } 1748 1749 /* 1750 * Write 0 to all queue status registers 1751 */ 1752 static void 1753 igc_reset_queue_stats_register(struct igc_hw *hw) 1754 { 1755 int i; 1756 1757 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1758 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0); 1759 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0); 1760 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0); 1761 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0); 1762 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0); 1763 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0); 1764 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0); 1765 } 1766 } 1767 1768 /* 1769 * Read all hardware queue status registers 1770 */ 1771 static void 1772 igc_read_queue_stats_register(struct rte_eth_dev *dev) 1773 { 1774 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1775 struct igc_hw_queue_stats *queue_stats = 1776 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1777 int i; 1778 1779 /* 1780 * This register is not cleared on read. Furthermore, the register wraps 1781 * around back to 0x00000000 on the next increment when reaching a value 1782 * of 0xFFFFFFFF and then continues normal count operation. 1783 */ 1784 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1785 union { 1786 u64 ddword; 1787 u32 dword[2]; 1788 } value; 1789 u32 tmp; 1790 1791 /* 1792 * Read the register first, if the value is smaller than that 1793 * previous read, that mean the register has been overflowed, 1794 * then we add the high 4 bytes by 1 and replace the low 4 1795 * bytes by the new value. 1796 */ 1797 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i)); 1798 value.ddword = queue_stats->pqgprc[i]; 1799 if (value.dword[U32_0_IN_U64] > tmp) 1800 value.dword[U32_1_IN_U64]++; 1801 value.dword[U32_0_IN_U64] = tmp; 1802 queue_stats->pqgprc[i] = value.ddword; 1803 1804 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i)); 1805 value.ddword = queue_stats->pqgptc[i]; 1806 if (value.dword[U32_0_IN_U64] > tmp) 1807 value.dword[U32_1_IN_U64]++; 1808 value.dword[U32_0_IN_U64] = tmp; 1809 queue_stats->pqgptc[i] = value.ddword; 1810 1811 tmp = IGC_READ_REG(hw, IGC_PQGORC(i)); 1812 value.ddword = queue_stats->pqgorc[i]; 1813 if (value.dword[U32_0_IN_U64] > tmp) 1814 value.dword[U32_1_IN_U64]++; 1815 value.dword[U32_0_IN_U64] = tmp; 1816 queue_stats->pqgorc[i] = value.ddword; 1817 1818 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i)); 1819 value.ddword = queue_stats->pqgotc[i]; 1820 if (value.dword[U32_0_IN_U64] > tmp) 1821 value.dword[U32_1_IN_U64]++; 1822 value.dword[U32_0_IN_U64] = tmp; 1823 queue_stats->pqgotc[i] = value.ddword; 1824 1825 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i)); 1826 value.ddword = queue_stats->pqmprc[i]; 1827 if (value.dword[U32_0_IN_U64] > tmp) 1828 value.dword[U32_1_IN_U64]++; 1829 value.dword[U32_0_IN_U64] = tmp; 1830 queue_stats->pqmprc[i] = value.ddword; 1831 1832 tmp = IGC_READ_REG(hw, IGC_RQDPC(i)); 1833 value.ddword = queue_stats->rqdpc[i]; 1834 if (value.dword[U32_0_IN_U64] > tmp) 1835 value.dword[U32_1_IN_U64]++; 1836 value.dword[U32_0_IN_U64] = tmp; 1837 queue_stats->rqdpc[i] = value.ddword; 1838 1839 tmp = IGC_READ_REG(hw, IGC_TQDPC(i)); 1840 value.ddword = queue_stats->tqdpc[i]; 1841 if (value.dword[U32_0_IN_U64] > tmp) 1842 value.dword[U32_1_IN_U64]++; 1843 value.dword[U32_0_IN_U64] = tmp; 1844 queue_stats->tqdpc[i] = value.ddword; 1845 } 1846 } 1847 1848 static int 1849 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1850 { 1851 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1852 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1853 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev); 1854 struct igc_hw_queue_stats *queue_stats = 1855 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1856 int i; 1857 1858 /* 1859 * Cancel status handler since it will read the queue status registers 1860 */ 1861 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1862 1863 /* Read status register */ 1864 igc_read_queue_stats_register(dev); 1865 igc_read_stats_registers(hw, stats); 1866 1867 if (rte_stats == NULL) { 1868 /* Restart queue status handler */ 1869 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1870 igc_update_queue_stats_handler, dev); 1871 return -EINVAL; 1872 } 1873 1874 /* Rx Errors */ 1875 rte_stats->imissed = stats->mpc; 1876 rte_stats->ierrors = stats->crcerrs + stats->rlec + 1877 stats->rxerrc + stats->algnerrc; 1878 1879 /* Tx Errors */ 1880 rte_stats->oerrors = stats->ecol + stats->latecol; 1881 1882 rte_stats->ipackets = stats->gprc; 1883 rte_stats->opackets = stats->gptc; 1884 rte_stats->ibytes = stats->gorc; 1885 rte_stats->obytes = stats->gotc; 1886 1887 /* Get per-queue statuses */ 1888 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1889 /* GET TX queue statuses */ 1890 int map_id = igc->txq_stats_map[i]; 1891 if (map_id >= 0) { 1892 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i]; 1893 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i]; 1894 } 1895 /* Get RX queue statuses */ 1896 map_id = igc->rxq_stats_map[i]; 1897 if (map_id >= 0) { 1898 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i]; 1899 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i]; 1900 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i]; 1901 } 1902 } 1903 1904 /* Restart queue status handler */ 1905 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1906 igc_update_queue_stats_handler, dev); 1907 return 0; 1908 } 1909 1910 static int 1911 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1912 unsigned int n) 1913 { 1914 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1915 struct igc_hw_stats *hw_stats = 1916 IGC_DEV_PRIVATE_STATS(dev); 1917 unsigned int i; 1918 1919 igc_read_stats_registers(hw, hw_stats); 1920 1921 if (n < IGC_NB_XSTATS) 1922 return IGC_NB_XSTATS; 1923 1924 /* If this is a reset xstats is NULL, and we have cleared the 1925 * registers by reading them. 1926 */ 1927 if (!xstats) 1928 return 0; 1929 1930 /* Extended stats */ 1931 for (i = 0; i < IGC_NB_XSTATS; i++) { 1932 xstats[i].id = i; 1933 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1934 rte_igc_stats_strings[i].offset); 1935 } 1936 1937 return IGC_NB_XSTATS; 1938 } 1939 1940 static int 1941 eth_igc_xstats_reset(struct rte_eth_dev *dev) 1942 { 1943 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1944 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 1945 struct igc_hw_queue_stats *queue_stats = 1946 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1947 1948 /* Cancel queue status handler for avoid conflict */ 1949 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1950 1951 /* HW registers are cleared on read */ 1952 igc_reset_queue_stats_register(hw); 1953 igc_read_stats_registers(hw, hw_stats); 1954 1955 /* Reset software totals */ 1956 memset(hw_stats, 0, sizeof(*hw_stats)); 1957 memset(queue_stats, 0, sizeof(*queue_stats)); 1958 1959 /* Restart the queue status handler */ 1960 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler, 1961 dev); 1962 1963 return 0; 1964 } 1965 1966 static int 1967 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1968 struct rte_eth_xstat_name *xstats_names, unsigned int size) 1969 { 1970 unsigned int i; 1971 1972 if (xstats_names == NULL) 1973 return IGC_NB_XSTATS; 1974 1975 if (size < IGC_NB_XSTATS) { 1976 PMD_DRV_LOG(ERR, "not enough buffers!"); 1977 return IGC_NB_XSTATS; 1978 } 1979 1980 for (i = 0; i < IGC_NB_XSTATS; i++) 1981 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name, 1982 sizeof(xstats_names[i].name)); 1983 1984 return IGC_NB_XSTATS; 1985 } 1986 1987 static int 1988 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 1989 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 1990 unsigned int limit) 1991 { 1992 unsigned int i; 1993 1994 if (!ids) 1995 return eth_igc_xstats_get_names(dev, xstats_names, limit); 1996 1997 for (i = 0; i < limit; i++) { 1998 if (ids[i] >= IGC_NB_XSTATS) { 1999 PMD_DRV_LOG(ERR, "id value isn't valid"); 2000 return -EINVAL; 2001 } 2002 strlcpy(xstats_names[i].name, 2003 rte_igc_stats_strings[ids[i]].name, 2004 sizeof(xstats_names[i].name)); 2005 } 2006 return limit; 2007 } 2008 2009 static int 2010 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2011 uint64_t *values, unsigned int n) 2012 { 2013 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2014 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 2015 unsigned int i; 2016 2017 igc_read_stats_registers(hw, hw_stats); 2018 2019 if (!ids) { 2020 if (n < IGC_NB_XSTATS) 2021 return IGC_NB_XSTATS; 2022 2023 /* If this is a reset xstats is NULL, and we have cleared the 2024 * registers by reading them. 2025 */ 2026 if (!values) 2027 return 0; 2028 2029 /* Extended stats */ 2030 for (i = 0; i < IGC_NB_XSTATS; i++) 2031 values[i] = *(uint64_t *)(((char *)hw_stats) + 2032 rte_igc_stats_strings[i].offset); 2033 2034 return IGC_NB_XSTATS; 2035 2036 } else { 2037 for (i = 0; i < n; i++) { 2038 if (ids[i] >= IGC_NB_XSTATS) { 2039 PMD_DRV_LOG(ERR, "id value isn't valid"); 2040 return -EINVAL; 2041 } 2042 values[i] = *(uint64_t *)(((char *)hw_stats) + 2043 rte_igc_stats_strings[ids[i]].offset); 2044 } 2045 return n; 2046 } 2047 } 2048 2049 static int 2050 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 2051 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) 2052 { 2053 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 2054 2055 /* check queue id is valid */ 2056 if (queue_id >= IGC_QUEUE_PAIRS_NUM) { 2057 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u", 2058 queue_id, IGC_QUEUE_PAIRS_NUM - 1); 2059 return -EINVAL; 2060 } 2061 2062 /* store the mapping status id */ 2063 if (is_rx) 2064 igc->rxq_stats_map[queue_id] = stat_idx; 2065 else 2066 igc->txq_stats_map[queue_id] = stat_idx; 2067 2068 return 0; 2069 } 2070 2071 static int 2072 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 2073 { 2074 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2075 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2076 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2077 uint32_t vec = IGC_MISC_VEC_ID; 2078 2079 if (rte_intr_allow_others(intr_handle)) 2080 vec = IGC_RX_VEC_START; 2081 2082 uint32_t mask = 1u << (queue_id + vec); 2083 2084 IGC_WRITE_REG(hw, IGC_EIMC, mask); 2085 IGC_WRITE_FLUSH(hw); 2086 2087 return 0; 2088 } 2089 2090 static int 2091 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 2092 { 2093 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2094 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2095 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 2096 uint32_t vec = IGC_MISC_VEC_ID; 2097 2098 if (rte_intr_allow_others(intr_handle)) 2099 vec = IGC_RX_VEC_START; 2100 2101 uint32_t mask = 1u << (queue_id + vec); 2102 2103 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2104 IGC_WRITE_FLUSH(hw); 2105 2106 rte_intr_enable(intr_handle); 2107 2108 return 0; 2109 } 2110 2111 static int 2112 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2113 { 2114 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2115 uint32_t ctrl; 2116 int tx_pause; 2117 int rx_pause; 2118 2119 fc_conf->pause_time = hw->fc.pause_time; 2120 fc_conf->high_water = hw->fc.high_water; 2121 fc_conf->low_water = hw->fc.low_water; 2122 fc_conf->send_xon = hw->fc.send_xon; 2123 fc_conf->autoneg = hw->mac.autoneg; 2124 2125 /* 2126 * Return rx_pause and tx_pause status according to actual setting of 2127 * the TFCE and RFCE bits in the CTRL register. 2128 */ 2129 ctrl = IGC_READ_REG(hw, IGC_CTRL); 2130 if (ctrl & IGC_CTRL_TFCE) 2131 tx_pause = 1; 2132 else 2133 tx_pause = 0; 2134 2135 if (ctrl & IGC_CTRL_RFCE) 2136 rx_pause = 1; 2137 else 2138 rx_pause = 0; 2139 2140 if (rx_pause && tx_pause) 2141 fc_conf->mode = RTE_ETH_FC_FULL; 2142 else if (rx_pause) 2143 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2144 else if (tx_pause) 2145 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2146 else 2147 fc_conf->mode = RTE_ETH_FC_NONE; 2148 2149 return 0; 2150 } 2151 2152 static int 2153 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2154 { 2155 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2156 uint32_t rx_buf_size; 2157 uint32_t max_high_water; 2158 uint32_t rctl; 2159 int err; 2160 2161 if (fc_conf->autoneg != hw->mac.autoneg) 2162 return -ENOTSUP; 2163 2164 rx_buf_size = igc_get_rx_buffer_size(hw); 2165 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2166 2167 /* At least reserve one Ethernet frame for watermark */ 2168 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 2169 if (fc_conf->high_water > max_high_water || 2170 fc_conf->high_water < fc_conf->low_water) { 2171 PMD_DRV_LOG(ERR, 2172 "Incorrect high(%u)/low(%u) water value, max is %u", 2173 fc_conf->high_water, fc_conf->low_water, 2174 max_high_water); 2175 return -EINVAL; 2176 } 2177 2178 switch (fc_conf->mode) { 2179 case RTE_ETH_FC_NONE: 2180 hw->fc.requested_mode = igc_fc_none; 2181 break; 2182 case RTE_ETH_FC_RX_PAUSE: 2183 hw->fc.requested_mode = igc_fc_rx_pause; 2184 break; 2185 case RTE_ETH_FC_TX_PAUSE: 2186 hw->fc.requested_mode = igc_fc_tx_pause; 2187 break; 2188 case RTE_ETH_FC_FULL: 2189 hw->fc.requested_mode = igc_fc_full; 2190 break; 2191 default: 2192 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode); 2193 return -EINVAL; 2194 } 2195 2196 hw->fc.pause_time = fc_conf->pause_time; 2197 hw->fc.high_water = fc_conf->high_water; 2198 hw->fc.low_water = fc_conf->low_water; 2199 hw->fc.send_xon = fc_conf->send_xon; 2200 2201 err = igc_setup_link_generic(hw); 2202 if (err == IGC_SUCCESS) { 2203 /** 2204 * check if we want to forward MAC frames - driver doesn't have 2205 * native capability to do that, so we'll write the registers 2206 * ourselves 2207 **/ 2208 rctl = IGC_READ_REG(hw, IGC_RCTL); 2209 2210 /* set or clear MFLCN.PMCF bit depending on configuration */ 2211 if (fc_conf->mac_ctrl_frame_fwd != 0) 2212 rctl |= IGC_RCTL_PMCF; 2213 else 2214 rctl &= ~IGC_RCTL_PMCF; 2215 2216 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2217 IGC_WRITE_FLUSH(hw); 2218 2219 return 0; 2220 } 2221 2222 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err); 2223 return -EIO; 2224 } 2225 2226 static int 2227 eth_igc_rss_reta_update(struct rte_eth_dev *dev, 2228 struct rte_eth_rss_reta_entry64 *reta_conf, 2229 uint16_t reta_size) 2230 { 2231 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2232 uint16_t i; 2233 2234 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2235 PMD_DRV_LOG(ERR, 2236 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2237 reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2238 return -EINVAL; 2239 } 2240 2241 RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2242 2243 /* set redirection table */ 2244 for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2245 union igc_rss_reta_reg reta, reg; 2246 uint16_t idx, shift; 2247 uint8_t j, mask; 2248 2249 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2250 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2251 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2252 IGC_RSS_RDT_REG_SIZE_MASK); 2253 2254 /* if no need to update the register */ 2255 if (!mask || 2256 shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2257 continue; 2258 2259 /* check mask whether need to read the register value first */ 2260 if (mask == IGC_RSS_RDT_REG_SIZE_MASK) 2261 reg.dword = 0; 2262 else 2263 reg.dword = IGC_READ_REG_LE_VALUE(hw, 2264 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2265 2266 /* update the register */ 2267 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2268 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2269 if (mask & (1u << j)) 2270 reta.bytes[j] = 2271 (uint8_t)reta_conf[idx].reta[shift + j]; 2272 else 2273 reta.bytes[j] = reg.bytes[j]; 2274 } 2275 IGC_WRITE_REG_LE_VALUE(hw, 2276 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword); 2277 } 2278 2279 return 0; 2280 } 2281 2282 static int 2283 eth_igc_rss_reta_query(struct rte_eth_dev *dev, 2284 struct rte_eth_rss_reta_entry64 *reta_conf, 2285 uint16_t reta_size) 2286 { 2287 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2288 uint16_t i; 2289 2290 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 2291 PMD_DRV_LOG(ERR, 2292 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2293 reta_size, RTE_ETH_RSS_RETA_SIZE_128); 2294 return -EINVAL; 2295 } 2296 2297 RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2298 2299 /* read redirection table */ 2300 for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2301 union igc_rss_reta_reg reta; 2302 uint16_t idx, shift; 2303 uint8_t j, mask; 2304 2305 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2306 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2307 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2308 IGC_RSS_RDT_REG_SIZE_MASK); 2309 2310 /* if no need to read register */ 2311 if (!mask || 2312 shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2313 continue; 2314 2315 /* read register and get the queue index */ 2316 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2317 reta.dword = IGC_READ_REG_LE_VALUE(hw, 2318 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2319 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2320 if (mask & (1u << j)) 2321 reta_conf[idx].reta[shift + j] = reta.bytes[j]; 2322 } 2323 } 2324 2325 return 0; 2326 } 2327 2328 static int 2329 eth_igc_rss_hash_update(struct rte_eth_dev *dev, 2330 struct rte_eth_rss_conf *rss_conf) 2331 { 2332 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2333 igc_hw_rss_hash_set(hw, rss_conf); 2334 return 0; 2335 } 2336 2337 static int 2338 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 2339 struct rte_eth_rss_conf *rss_conf) 2340 { 2341 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2342 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; 2343 uint32_t mrqc; 2344 uint64_t rss_hf; 2345 2346 if (hash_key != NULL) { 2347 int i; 2348 2349 /* if not enough space for store hash key */ 2350 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) { 2351 PMD_DRV_LOG(ERR, 2352 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u", 2353 rss_conf->rss_key_len, IGC_HKEY_SIZE); 2354 return -EINVAL; 2355 } 2356 2357 /* read RSS key from register */ 2358 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) 2359 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i)); 2360 } 2361 2362 /* get RSS functions configured in MRQC register */ 2363 mrqc = IGC_READ_REG(hw, IGC_MRQC); 2364 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) 2365 return 0; 2366 2367 rss_hf = 0; 2368 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4) 2369 rss_hf |= RTE_ETH_RSS_IPV4; 2370 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP) 2371 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2372 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6) 2373 rss_hf |= RTE_ETH_RSS_IPV6; 2374 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX) 2375 rss_hf |= RTE_ETH_RSS_IPV6_EX; 2376 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP) 2377 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2378 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX) 2379 rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX; 2380 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP) 2381 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2382 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP) 2383 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2384 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX) 2385 rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX; 2386 2387 rss_conf->rss_hf |= rss_hf; 2388 return 0; 2389 } 2390 2391 static int 2392 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2393 { 2394 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2395 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2396 uint32_t vfta; 2397 uint32_t vid_idx; 2398 uint32_t vid_bit; 2399 2400 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; 2401 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); 2402 vfta = shadow_vfta->vfta[vid_idx]; 2403 if (on) 2404 vfta |= vid_bit; 2405 else 2406 vfta &= ~vid_bit; 2407 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); 2408 2409 /* update local VFTA copy */ 2410 shadow_vfta->vfta[vid_idx] = vfta; 2411 2412 return 0; 2413 } 2414 2415 static void 2416 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2417 { 2418 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2419 igc_read_reg_check_clear_bits(hw, IGC_RCTL, 2420 IGC_RCTL_CFIEN | IGC_RCTL_VFE); 2421 } 2422 2423 static void 2424 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2425 { 2426 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2427 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2428 uint32_t reg_val; 2429 int i; 2430 2431 /* Filter Table Enable, CFI not used for packet acceptance */ 2432 reg_val = IGC_READ_REG(hw, IGC_RCTL); 2433 reg_val &= ~IGC_RCTL_CFIEN; 2434 reg_val |= IGC_RCTL_VFE; 2435 IGC_WRITE_REG(hw, IGC_RCTL, reg_val); 2436 2437 /* restore VFTA table */ 2438 for (i = 0; i < IGC_VFTA_SIZE; i++) 2439 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); 2440 } 2441 2442 static void 2443 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2444 { 2445 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2446 2447 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2448 } 2449 2450 static void 2451 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2452 { 2453 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2454 2455 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2456 } 2457 2458 static int 2459 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2460 { 2461 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2462 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD; 2463 uint32_t ctrl_ext; 2464 2465 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2466 2467 /* if extend vlan hasn't been enabled */ 2468 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) 2469 return 0; 2470 2471 /* Update maximum packet length */ 2472 if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { 2473 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", 2474 frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); 2475 return -EINVAL; 2476 } 2477 IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE); 2478 2479 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); 2480 return 0; 2481 } 2482 2483 static int 2484 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2485 { 2486 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2487 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD; 2488 uint32_t ctrl_ext; 2489 2490 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2491 2492 /* if extend vlan has been enabled */ 2493 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) 2494 return 0; 2495 2496 /* Update maximum packet length */ 2497 if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) { 2498 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", 2499 frame_size, MAX_RX_JUMBO_FRAME_SIZE); 2500 return -EINVAL; 2501 } 2502 IGC_WRITE_REG(hw, IGC_RLPML, frame_size); 2503 2504 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); 2505 return 0; 2506 } 2507 2508 static int 2509 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2510 { 2511 struct rte_eth_rxmode *rxmode; 2512 2513 rxmode = &dev->data->dev_conf.rxmode; 2514 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2515 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 2516 igc_vlan_hw_strip_enable(dev); 2517 else 2518 igc_vlan_hw_strip_disable(dev); 2519 } 2520 2521 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2522 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 2523 igc_vlan_hw_filter_enable(dev); 2524 else 2525 igc_vlan_hw_filter_disable(dev); 2526 } 2527 2528 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2529 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2530 return igc_vlan_hw_extend_enable(dev); 2531 else 2532 return igc_vlan_hw_extend_disable(dev); 2533 } 2534 2535 return 0; 2536 } 2537 2538 static int 2539 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 2540 enum rte_vlan_type vlan_type, 2541 uint16_t tpid) 2542 { 2543 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2544 uint32_t reg_val; 2545 2546 /* only outer TPID of double VLAN can be configured*/ 2547 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2548 reg_val = IGC_READ_REG(hw, IGC_VET); 2549 reg_val = (reg_val & (~IGC_VET_EXT)) | 2550 ((uint32_t)tpid << IGC_VET_EXT_SHIFT); 2551 IGC_WRITE_REG(hw, IGC_VET, reg_val); 2552 2553 return 0; 2554 } 2555 2556 /* all other TPID values are read-only*/ 2557 PMD_DRV_LOG(ERR, "Not supported"); 2558 return -ENOTSUP; 2559 } 2560 2561 static int 2562 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2563 struct rte_pci_device *pci_dev) 2564 { 2565 PMD_INIT_FUNC_TRACE(); 2566 return rte_eth_dev_pci_generic_probe(pci_dev, 2567 sizeof(struct igc_adapter), eth_igc_dev_init); 2568 } 2569 2570 static int 2571 eth_igc_pci_remove(struct rte_pci_device *pci_dev) 2572 { 2573 PMD_INIT_FUNC_TRACE(); 2574 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit); 2575 } 2576 2577 static struct rte_pci_driver rte_igc_pmd = { 2578 .id_table = pci_id_igc_map, 2579 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2580 .probe = eth_igc_pci_probe, 2581 .remove = eth_igc_pci_remove, 2582 }; 2583 2584 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd); 2585 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map); 2586 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci"); 2587