1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 8 #include <rte_string_fns.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_alarm.h> 15 16 #include "igc_logs.h" 17 #include "igc_txrx.h" 18 #include "igc_filter.h" 19 #include "igc_flow.h" 20 21 #define IGC_INTEL_VENDOR_ID 0x8086 22 23 #define IGC_FC_PAUSE_TIME 0x0680 24 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 25 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 26 27 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 28 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 29 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */ 30 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */ 31 32 #define IGC_DEFAULT_RX_FREE_THRESH 32 33 34 #define IGC_DEFAULT_RX_PTHRESH 8 35 #define IGC_DEFAULT_RX_HTHRESH 8 36 #define IGC_DEFAULT_RX_WTHRESH 4 37 38 #define IGC_DEFAULT_TX_PTHRESH 8 39 #define IGC_DEFAULT_TX_HTHRESH 1 40 #define IGC_DEFAULT_TX_WTHRESH 16 41 42 /* MSI-X other interrupt vector */ 43 #define IGC_MSIX_OTHER_INTR_VEC 0 44 45 /* External VLAN Enable bit mask */ 46 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26) 47 48 /* Speed select */ 49 #define IGC_CTRL_SPEED_MASK (7u << 8) 50 #define IGC_CTRL_SPEED_2500 (6u << 8) 51 52 /* External VLAN Ether Type bit mask and shift */ 53 #define IGC_VET_EXT 0xFFFF0000 54 #define IGC_VET_EXT_SHIFT 16 55 56 /* Force EEE Auto-negotiation */ 57 #define IGC_EEER_EEE_FRC_AN (1u << 28) 58 59 /* Per Queue Good Packets Received Count */ 60 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) 61 /* Per Queue Good Octets Received Count */ 62 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx)) 63 /* Per Queue Good Octets Transmitted Count */ 64 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx)) 65 /* Per Queue Multicast Packets Received Count */ 66 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx)) 67 /* Transmit Queue Drop Packet Count */ 68 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx)) 69 70 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 71 #define U32_0_IN_U64 0 /* lower bytes of u64 */ 72 #define U32_1_IN_U64 1 /* higher bytes of u64 */ 73 #else 74 #define U32_0_IN_U64 1 75 #define U32_1_IN_U64 0 76 #endif 77 78 #define IGC_ALARM_INTERVAL 8000000u 79 /* us, about 13.6s some per-queue registers will wrap around back to 0. */ 80 81 static const struct rte_eth_desc_lim rx_desc_lim = { 82 .nb_max = IGC_MAX_RXD, 83 .nb_min = IGC_MIN_RXD, 84 .nb_align = IGC_RXD_ALIGN, 85 }; 86 87 static const struct rte_eth_desc_lim tx_desc_lim = { 88 .nb_max = IGC_MAX_TXD, 89 .nb_min = IGC_MIN_TXD, 90 .nb_align = IGC_TXD_ALIGN, 91 .nb_seg_max = IGC_TX_MAX_SEG, 92 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG, 93 }; 94 95 static const struct rte_pci_id pci_id_igc_map[] = { 96 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) }, 97 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) }, 98 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) }, 99 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) }, 100 { .vendor_id = 0, /* sentinel */ }, 101 }; 102 103 /* store statistics names and its offset in stats structure */ 104 struct rte_igc_xstats_name_off { 105 char name[RTE_ETH_XSTATS_NAME_SIZE]; 106 unsigned int offset; 107 }; 108 109 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { 110 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)}, 111 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)}, 112 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)}, 113 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)}, 114 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)}, 115 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)}, 116 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats, 117 ecol)}, 118 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)}, 119 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)}, 120 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)}, 121 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)}, 122 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)}, 123 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)}, 124 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)}, 125 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)}, 126 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)}, 127 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)}, 128 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats, 129 fcruc)}, 130 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)}, 131 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)}, 132 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)}, 133 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)}, 134 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 135 prc1023)}, 136 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats, 137 prc1522)}, 138 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)}, 139 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)}, 140 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)}, 141 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)}, 142 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)}, 143 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)}, 144 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)}, 145 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)}, 146 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)}, 147 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)}, 148 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)}, 149 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)}, 150 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)}, 151 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)}, 152 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)}, 153 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)}, 154 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)}, 155 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)}, 156 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 157 ptc1023)}, 158 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats, 159 ptc1522)}, 160 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)}, 161 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)}, 162 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)}, 163 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)}, 164 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)}, 165 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)}, 166 {"rx_descriptor_lower_threshold", 167 offsetof(struct igc_hw_stats, icrxdmtc)}, 168 }; 169 170 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \ 171 sizeof(rte_igc_stats_strings[0])) 172 173 static int eth_igc_configure(struct rte_eth_dev *dev); 174 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); 175 static int eth_igc_stop(struct rte_eth_dev *dev); 176 static int eth_igc_start(struct rte_eth_dev *dev); 177 static int eth_igc_set_link_up(struct rte_eth_dev *dev); 178 static int eth_igc_set_link_down(struct rte_eth_dev *dev); 179 static int eth_igc_close(struct rte_eth_dev *dev); 180 static int eth_igc_reset(struct rte_eth_dev *dev); 181 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); 182 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); 183 static int eth_igc_fw_version_get(struct rte_eth_dev *dev, 184 char *fw_version, size_t fw_size); 185 static int eth_igc_infos_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static int eth_igc_led_on(struct rte_eth_dev *dev); 188 static int eth_igc_led_off(struct rte_eth_dev *dev); 189 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev); 190 static int eth_igc_rar_set(struct rte_eth_dev *dev, 191 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); 192 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index); 193 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 194 struct rte_ether_addr *addr); 195 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 196 struct rte_ether_addr *mc_addr_set, 197 uint32_t nb_mc_addr); 198 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev); 199 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev); 200 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 201 static int eth_igc_stats_get(struct rte_eth_dev *dev, 202 struct rte_eth_stats *rte_stats); 203 static int eth_igc_xstats_get(struct rte_eth_dev *dev, 204 struct rte_eth_xstat *xstats, unsigned int n); 205 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, 206 const uint64_t *ids, 207 uint64_t *values, unsigned int n); 208 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev, 209 struct rte_eth_xstat_name *xstats_names, 210 unsigned int size); 211 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 212 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 213 unsigned int limit); 214 static int eth_igc_xstats_reset(struct rte_eth_dev *dev); 215 static int 216 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 217 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); 218 static int 219 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 220 static int 221 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 222 static int 223 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 224 static int 225 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 226 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 228 uint16_t reta_size); 229 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev, 230 struct rte_eth_rss_reta_entry64 *reta_conf, 231 uint16_t reta_size); 232 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, 233 struct rte_eth_rss_conf *rss_conf); 234 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 235 struct rte_eth_rss_conf *rss_conf); 236 static int 237 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 238 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); 239 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 240 enum rte_vlan_type vlan_type, uint16_t tpid); 241 242 static const struct eth_dev_ops eth_igc_ops = { 243 .dev_configure = eth_igc_configure, 244 .link_update = eth_igc_link_update, 245 .dev_stop = eth_igc_stop, 246 .dev_start = eth_igc_start, 247 .dev_close = eth_igc_close, 248 .dev_reset = eth_igc_reset, 249 .dev_set_link_up = eth_igc_set_link_up, 250 .dev_set_link_down = eth_igc_set_link_down, 251 .promiscuous_enable = eth_igc_promiscuous_enable, 252 .promiscuous_disable = eth_igc_promiscuous_disable, 253 .allmulticast_enable = eth_igc_allmulticast_enable, 254 .allmulticast_disable = eth_igc_allmulticast_disable, 255 .fw_version_get = eth_igc_fw_version_get, 256 .dev_infos_get = eth_igc_infos_get, 257 .dev_led_on = eth_igc_led_on, 258 .dev_led_off = eth_igc_led_off, 259 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get, 260 .mtu_set = eth_igc_mtu_set, 261 .mac_addr_add = eth_igc_rar_set, 262 .mac_addr_remove = eth_igc_rar_clear, 263 .mac_addr_set = eth_igc_default_mac_addr_set, 264 .set_mc_addr_list = eth_igc_set_mc_addr_list, 265 266 .rx_queue_setup = eth_igc_rx_queue_setup, 267 .rx_queue_release = eth_igc_rx_queue_release, 268 .tx_queue_setup = eth_igc_tx_queue_setup, 269 .tx_queue_release = eth_igc_tx_queue_release, 270 .tx_done_cleanup = eth_igc_tx_done_cleanup, 271 .rxq_info_get = eth_igc_rxq_info_get, 272 .txq_info_get = eth_igc_txq_info_get, 273 .stats_get = eth_igc_stats_get, 274 .xstats_get = eth_igc_xstats_get, 275 .xstats_get_by_id = eth_igc_xstats_get_by_id, 276 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id, 277 .xstats_get_names = eth_igc_xstats_get_names, 278 .stats_reset = eth_igc_xstats_reset, 279 .xstats_reset = eth_igc_xstats_reset, 280 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set, 281 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable, 282 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable, 283 .flow_ctrl_get = eth_igc_flow_ctrl_get, 284 .flow_ctrl_set = eth_igc_flow_ctrl_set, 285 .reta_update = eth_igc_rss_reta_update, 286 .reta_query = eth_igc_rss_reta_query, 287 .rss_hash_update = eth_igc_rss_hash_update, 288 .rss_hash_conf_get = eth_igc_rss_hash_conf_get, 289 .vlan_filter_set = eth_igc_vlan_filter_set, 290 .vlan_offload_set = eth_igc_vlan_offload_set, 291 .vlan_tpid_set = eth_igc_vlan_tpid_set, 292 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, 293 .flow_ops_get = eth_igc_flow_ops_get, 294 }; 295 296 /* 297 * multiple queue mode checking 298 */ 299 static int 300 igc_check_mq_mode(struct rte_eth_dev *dev) 301 { 302 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 303 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 304 305 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 306 PMD_INIT_LOG(ERR, "SRIOV is not supported."); 307 return -EINVAL; 308 } 309 310 if (rx_mq_mode != ETH_MQ_RX_NONE && 311 rx_mq_mode != ETH_MQ_RX_RSS) { 312 /* RSS together with VMDq not supported*/ 313 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 314 rx_mq_mode); 315 return -EINVAL; 316 } 317 318 /* To no break software that set invalid mode, only display 319 * warning if invalid mode is used. 320 */ 321 if (tx_mq_mode != ETH_MQ_TX_NONE) 322 PMD_INIT_LOG(WARNING, 323 "TX mode %d is not supported. Due to meaningless in this driver, just ignore", 324 tx_mq_mode); 325 326 return 0; 327 } 328 329 static int 330 eth_igc_configure(struct rte_eth_dev *dev) 331 { 332 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 333 int ret; 334 335 PMD_INIT_FUNC_TRACE(); 336 337 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 338 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 339 340 ret = igc_check_mq_mode(dev); 341 if (ret != 0) 342 return ret; 343 344 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 345 return 0; 346 } 347 348 static int 349 eth_igc_set_link_up(struct rte_eth_dev *dev) 350 { 351 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 352 353 if (hw->phy.media_type == igc_media_type_copper) 354 igc_power_up_phy(hw); 355 else 356 igc_power_up_fiber_serdes_link(hw); 357 return 0; 358 } 359 360 static int 361 eth_igc_set_link_down(struct rte_eth_dev *dev) 362 { 363 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 364 365 if (hw->phy.media_type == igc_media_type_copper) 366 igc_power_down_phy(hw); 367 else 368 igc_shutdown_fiber_serdes_link(hw); 369 return 0; 370 } 371 372 /* 373 * disable other interrupt 374 */ 375 static void 376 igc_intr_other_disable(struct rte_eth_dev *dev) 377 { 378 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 379 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 380 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 381 382 if (rte_intr_allow_others(intr_handle) && 383 dev->data->dev_conf.intr_conf.lsc) { 384 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC); 385 } 386 387 IGC_WRITE_REG(hw, IGC_IMC, ~0); 388 IGC_WRITE_FLUSH(hw); 389 } 390 391 /* 392 * enable other interrupt 393 */ 394 static inline void 395 igc_intr_other_enable(struct rte_eth_dev *dev) 396 { 397 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 398 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 399 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 400 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 401 402 if (rte_intr_allow_others(intr_handle) && 403 dev->data->dev_conf.intr_conf.lsc) { 404 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC); 405 } 406 407 IGC_WRITE_REG(hw, IGC_IMS, intr->mask); 408 IGC_WRITE_FLUSH(hw); 409 } 410 411 /* 412 * It reads ICR and gets interrupt causes, check it and set a bit flag 413 * to update link status. 414 */ 415 static void 416 eth_igc_interrupt_get_status(struct rte_eth_dev *dev) 417 { 418 uint32_t icr; 419 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 420 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 421 422 /* read-on-clear nic registers here */ 423 icr = IGC_READ_REG(hw, IGC_ICR); 424 425 intr->flags = 0; 426 if (icr & IGC_ICR_LSC) 427 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 428 } 429 430 /* return 0 means link status changed, -1 means not changed */ 431 static int 432 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete) 433 { 434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 435 struct rte_eth_link link; 436 int link_check, count; 437 438 link_check = 0; 439 hw->mac.get_link_status = 1; 440 441 /* possible wait-to-complete in up to 9 seconds */ 442 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) { 443 /* Read the real link status */ 444 switch (hw->phy.media_type) { 445 case igc_media_type_copper: 446 /* Do the work to read phy */ 447 igc_check_for_link(hw); 448 link_check = !hw->mac.get_link_status; 449 break; 450 451 case igc_media_type_fiber: 452 igc_check_for_link(hw); 453 link_check = (IGC_READ_REG(hw, IGC_STATUS) & 454 IGC_STATUS_LU); 455 break; 456 457 case igc_media_type_internal_serdes: 458 igc_check_for_link(hw); 459 link_check = hw->mac.serdes_has_link; 460 break; 461 462 default: 463 break; 464 } 465 if (link_check || wait_to_complete == 0) 466 break; 467 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL); 468 } 469 memset(&link, 0, sizeof(link)); 470 471 /* Now we check if a transition has happened */ 472 if (link_check) { 473 uint16_t duplex, speed; 474 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 475 link.link_duplex = (duplex == FULL_DUPLEX) ? 476 ETH_LINK_FULL_DUPLEX : 477 ETH_LINK_HALF_DUPLEX; 478 link.link_speed = speed; 479 link.link_status = ETH_LINK_UP; 480 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 481 ETH_LINK_SPEED_FIXED); 482 483 if (speed == SPEED_2500) { 484 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG); 485 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) { 486 tipg &= ~IGC_TIPG_IPGT_MASK; 487 tipg |= 0x0b; 488 IGC_WRITE_REG(hw, IGC_TIPG, tipg); 489 } 490 } 491 } else { 492 link.link_speed = 0; 493 link.link_duplex = ETH_LINK_HALF_DUPLEX; 494 link.link_status = ETH_LINK_DOWN; 495 link.link_autoneg = ETH_LINK_FIXED; 496 } 497 498 return rte_eth_linkstatus_set(dev, &link); 499 } 500 501 /* 502 * It executes link_update after knowing an interrupt is present. 503 */ 504 static void 505 eth_igc_interrupt_action(struct rte_eth_dev *dev) 506 { 507 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 509 struct rte_eth_link link; 510 int ret; 511 512 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) { 513 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 514 515 /* set get_link_status to check register later */ 516 ret = eth_igc_link_update(dev, 0); 517 518 /* check if link has changed */ 519 if (ret < 0) 520 return; 521 522 rte_eth_linkstatus_get(dev, &link); 523 if (link.link_status) 524 PMD_DRV_LOG(INFO, 525 " Port %d: Link Up - speed %u Mbps - %s", 526 dev->data->port_id, 527 (unsigned int)link.link_speed, 528 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 529 "full-duplex" : "half-duplex"); 530 else 531 PMD_DRV_LOG(INFO, " Port %d: Link Down", 532 dev->data->port_id); 533 534 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 535 pci_dev->addr.domain, 536 pci_dev->addr.bus, 537 pci_dev->addr.devid, 538 pci_dev->addr.function); 539 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 540 } 541 } 542 543 /* 544 * Interrupt handler which shall be registered at first. 545 * 546 * @handle 547 * Pointer to interrupt handle. 548 * @param 549 * The address of parameter (struct rte_eth_dev *) registered before. 550 */ 551 static void 552 eth_igc_interrupt_handler(void *param) 553 { 554 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 555 556 eth_igc_interrupt_get_status(dev); 557 eth_igc_interrupt_action(dev); 558 } 559 560 static void igc_read_queue_stats_register(struct rte_eth_dev *dev); 561 562 /* 563 * Update the queue status every IGC_ALARM_INTERVAL time. 564 * @param 565 * The address of parameter (struct rte_eth_dev *) registered before. 566 */ 567 static void 568 igc_update_queue_stats_handler(void *param) 569 { 570 struct rte_eth_dev *dev = param; 571 igc_read_queue_stats_register(dev); 572 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 573 igc_update_queue_stats_handler, dev); 574 } 575 576 /* 577 * rx,tx enable/disable 578 */ 579 static void 580 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) 581 { 582 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 583 uint32_t tctl, rctl; 584 585 tctl = IGC_READ_REG(hw, IGC_TCTL); 586 rctl = IGC_READ_REG(hw, IGC_RCTL); 587 588 if (enable) { 589 /* enable Tx/Rx */ 590 tctl |= IGC_TCTL_EN; 591 rctl |= IGC_RCTL_EN; 592 } else { 593 /* disable Tx/Rx */ 594 tctl &= ~IGC_TCTL_EN; 595 rctl &= ~IGC_RCTL_EN; 596 } 597 IGC_WRITE_REG(hw, IGC_TCTL, tctl); 598 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 599 IGC_WRITE_FLUSH(hw); 600 } 601 602 /* 603 * This routine disables all traffic on the adapter by issuing a 604 * global reset on the MAC. 605 */ 606 static int 607 eth_igc_stop(struct rte_eth_dev *dev) 608 { 609 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 610 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 611 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 612 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 613 struct rte_eth_link link; 614 615 dev->data->dev_started = 0; 616 adapter->stopped = 1; 617 618 /* disable receive and transmit */ 619 eth_igc_rxtx_control(dev, false); 620 621 /* disable all MSI-X interrupts */ 622 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 623 IGC_WRITE_FLUSH(hw); 624 625 /* clear all MSI-X interrupts */ 626 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 627 628 igc_intr_other_disable(dev); 629 630 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 631 632 /* disable intr eventfd mapping */ 633 rte_intr_disable(intr_handle); 634 635 igc_reset_hw(hw); 636 637 /* disable all wake up */ 638 IGC_WRITE_REG(hw, IGC_WUC, 0); 639 640 /* disable checking EEE operation in MAC loopback mode */ 641 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 642 643 /* Set bit for Go Link disconnect */ 644 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, 645 IGC_82580_PM_GO_LINKD); 646 647 /* Power down the phy. Needed to make the link go Down */ 648 eth_igc_set_link_down(dev); 649 650 igc_dev_clear_queues(dev); 651 652 /* clear the recorded link status */ 653 memset(&link, 0, sizeof(link)); 654 rte_eth_linkstatus_set(dev, &link); 655 656 if (!rte_intr_allow_others(intr_handle)) 657 /* resume to the default handler */ 658 rte_intr_callback_register(intr_handle, 659 eth_igc_interrupt_handler, 660 (void *)dev); 661 662 /* Clean datapath event and queue/vec mapping */ 663 rte_intr_efd_disable(intr_handle); 664 if (intr_handle->intr_vec != NULL) { 665 rte_free(intr_handle->intr_vec); 666 intr_handle->intr_vec = NULL; 667 } 668 669 return 0; 670 } 671 672 /* 673 * write interrupt vector allocation register 674 * @hw 675 * board private structure 676 * @queue_index 677 * queue index, valid 0,1,2,3 678 * @tx 679 * tx:1, rx:0 680 * @msix_vector 681 * msix-vector, valid 0,1,2,3,4 682 */ 683 static void 684 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index, 685 bool tx, uint8_t msix_vector) 686 { 687 uint8_t offset = 0; 688 uint8_t reg_index = queue_index >> 1; 689 uint32_t val; 690 691 /* 692 * IVAR(0) 693 * bit31...24 bit23...16 bit15...8 bit7...0 694 * TX1 RX1 TX0 RX0 695 * 696 * IVAR(1) 697 * bit31...24 bit23...16 bit15...8 bit7...0 698 * TX3 RX3 TX2 RX2 699 */ 700 701 if (tx) 702 offset = 8; 703 704 if (queue_index & 1) 705 offset += 16; 706 707 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index); 708 709 /* clear bits */ 710 val &= ~((uint32_t)0xFF << offset); 711 712 /* write vector and valid bit */ 713 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset; 714 715 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val); 716 } 717 718 /* Sets up the hardware to generate MSI-X interrupts properly 719 * @hw 720 * board private structure 721 */ 722 static void 723 igc_configure_msix_intr(struct rte_eth_dev *dev) 724 { 725 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 726 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 727 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 728 729 uint32_t intr_mask; 730 uint32_t vec = IGC_MISC_VEC_ID; 731 uint32_t base = IGC_MISC_VEC_ID; 732 uint32_t misc_shift = 0; 733 int i; 734 735 /* won't configure msix register if no mapping is done 736 * between intr vector and event fd 737 */ 738 if (!rte_intr_dp_is_en(intr_handle)) 739 return; 740 741 if (rte_intr_allow_others(intr_handle)) { 742 base = IGC_RX_VEC_START; 743 vec = base; 744 misc_shift = 1; 745 } 746 747 /* turn on MSI-X capability first */ 748 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | 749 IGC_GPIE_PBA | IGC_GPIE_EIAME | 750 IGC_GPIE_NSICR); 751 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 752 misc_shift; 753 754 if (dev->data->dev_conf.intr_conf.lsc) 755 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC); 756 757 /* enable msix auto-clear */ 758 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask); 759 760 /* set other cause interrupt vector */ 761 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC, 762 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8); 763 764 /* enable auto-mask */ 765 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask); 766 767 for (i = 0; i < dev->data->nb_rx_queues; i++) { 768 igc_write_ivar(hw, i, 0, vec); 769 intr_handle->intr_vec[i] = vec; 770 if (vec < base + intr_handle->nb_efd - 1) 771 vec++; 772 } 773 774 IGC_WRITE_FLUSH(hw); 775 } 776 777 /** 778 * It enables the interrupt mask and then enable the interrupt. 779 * 780 * @dev 781 * Pointer to struct rte_eth_dev. 782 * @on 783 * Enable or Disable 784 */ 785 static void 786 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 787 { 788 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 789 790 if (on) 791 intr->mask |= IGC_ICR_LSC; 792 else 793 intr->mask &= ~IGC_ICR_LSC; 794 } 795 796 /* 797 * It enables the interrupt. 798 * It will be called once only during nic initialized. 799 */ 800 static void 801 igc_rxq_interrupt_setup(struct rte_eth_dev *dev) 802 { 803 uint32_t mask; 804 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 805 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 806 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 807 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 808 809 /* won't configure msix register if no mapping is done 810 * between intr vector and event fd 811 */ 812 if (!rte_intr_dp_is_en(intr_handle)) 813 return; 814 815 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift; 816 IGC_WRITE_REG(hw, IGC_EIMS, mask); 817 } 818 819 /* 820 * Get hardware rx-buffer size. 821 */ 822 static inline int 823 igc_get_rx_buffer_size(struct igc_hw *hw) 824 { 825 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10; 826 } 827 828 /* 829 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 830 * For ASF and Pass Through versions of f/w this means 831 * that the driver is loaded. 832 */ 833 static void 834 igc_hw_control_acquire(struct igc_hw *hw) 835 { 836 uint32_t ctrl_ext; 837 838 /* Let firmware know the driver has taken over */ 839 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 840 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 841 } 842 843 /* 844 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 845 * For ASF and Pass Through versions of f/w this means that the 846 * driver is no longer loaded. 847 */ 848 static void 849 igc_hw_control_release(struct igc_hw *hw) 850 { 851 uint32_t ctrl_ext; 852 853 /* Let firmware taken over control of h/w */ 854 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 855 IGC_WRITE_REG(hw, IGC_CTRL_EXT, 856 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 857 } 858 859 static int 860 igc_hardware_init(struct igc_hw *hw) 861 { 862 uint32_t rx_buf_size; 863 int diag; 864 865 /* Let the firmware know the OS is in control */ 866 igc_hw_control_acquire(hw); 867 868 /* Issue a global reset */ 869 igc_reset_hw(hw); 870 871 /* disable all wake up */ 872 IGC_WRITE_REG(hw, IGC_WUC, 0); 873 874 /* 875 * Hardware flow control 876 * - High water mark should allow for at least two standard size (1518) 877 * frames to be received after sending an XOFF. 878 * - Low water mark works best when it is very near the high water mark. 879 * This allows the receiver to restart by sending XON when it has 880 * drained a bit. Here we use an arbitrary value of 1500 which will 881 * restart after one full frame is pulled from the buffer. There 882 * could be several smaller frames in the buffer and if so they will 883 * not trigger the XON until their total number reduces the buffer 884 * by 1500. 885 */ 886 rx_buf_size = igc_get_rx_buffer_size(hw); 887 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 888 hw->fc.low_water = hw->fc.high_water - 1500; 889 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 890 hw->fc.send_xon = 1; 891 hw->fc.requested_mode = igc_fc_full; 892 893 diag = igc_init_hw(hw); 894 if (diag < 0) 895 return diag; 896 897 igc_get_phy_info(hw); 898 igc_check_for_link(hw); 899 900 return 0; 901 } 902 903 static int 904 eth_igc_start(struct rte_eth_dev *dev) 905 { 906 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 907 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 908 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 909 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 910 uint32_t *speeds; 911 int ret; 912 913 PMD_INIT_FUNC_TRACE(); 914 915 /* disable all MSI-X interrupts */ 916 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 917 IGC_WRITE_FLUSH(hw); 918 919 /* clear all MSI-X interrupts */ 920 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 921 922 /* disable uio/vfio intr/eventfd mapping */ 923 if (!adapter->stopped) 924 rte_intr_disable(intr_handle); 925 926 /* Power up the phy. Needed to make the link go Up */ 927 eth_igc_set_link_up(dev); 928 929 /* Put the address into the Receive Address Array */ 930 igc_rar_set(hw, hw->mac.addr, 0); 931 932 /* Initialize the hardware */ 933 if (igc_hardware_init(hw)) { 934 PMD_DRV_LOG(ERR, "Unable to initialize the hardware"); 935 return -EIO; 936 } 937 adapter->stopped = 0; 938 939 /* check and configure queue intr-vector mapping */ 940 if (rte_intr_cap_multiple(intr_handle) && 941 dev->data->dev_conf.intr_conf.rxq) { 942 uint32_t intr_vector = dev->data->nb_rx_queues; 943 if (rte_intr_efd_enable(intr_handle, intr_vector)) 944 return -1; 945 } 946 947 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 948 intr_handle->intr_vec = rte_zmalloc("intr_vec", 949 dev->data->nb_rx_queues * sizeof(int), 0); 950 if (intr_handle->intr_vec == NULL) { 951 PMD_DRV_LOG(ERR, 952 "Failed to allocate %d rx_queues intr_vec", 953 dev->data->nb_rx_queues); 954 return -ENOMEM; 955 } 956 } 957 958 /* configure msix for rx interrupt */ 959 igc_configure_msix_intr(dev); 960 961 igc_tx_init(dev); 962 963 /* This can fail when allocating mbufs for descriptor rings */ 964 ret = igc_rx_init(dev); 965 if (ret) { 966 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware"); 967 igc_dev_clear_queues(dev); 968 return ret; 969 } 970 971 igc_clear_hw_cntrs_base_generic(hw); 972 973 /* VLAN Offload Settings */ 974 eth_igc_vlan_offload_set(dev, 975 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 976 ETH_VLAN_EXTEND_MASK); 977 978 /* Setup link speed and duplex */ 979 speeds = &dev->data->dev_conf.link_speeds; 980 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 981 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 982 hw->mac.autoneg = 1; 983 } else { 984 int num_speeds = 0; 985 986 if (*speeds & ETH_LINK_SPEED_FIXED) { 987 PMD_DRV_LOG(ERR, 988 "Force speed mode currently not supported"); 989 igc_dev_clear_queues(dev); 990 return -EINVAL; 991 } 992 993 hw->phy.autoneg_advertised = 0; 994 hw->mac.autoneg = 1; 995 996 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 997 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 998 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) { 999 num_speeds = -1; 1000 goto error_invalid_config; 1001 } 1002 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1003 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1004 num_speeds++; 1005 } 1006 if (*speeds & ETH_LINK_SPEED_10M) { 1007 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1008 num_speeds++; 1009 } 1010 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1011 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1012 num_speeds++; 1013 } 1014 if (*speeds & ETH_LINK_SPEED_100M) { 1015 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1016 num_speeds++; 1017 } 1018 if (*speeds & ETH_LINK_SPEED_1G) { 1019 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1020 num_speeds++; 1021 } 1022 if (*speeds & ETH_LINK_SPEED_2_5G) { 1023 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; 1024 num_speeds++; 1025 } 1026 if (num_speeds == 0) 1027 goto error_invalid_config; 1028 } 1029 1030 igc_setup_link(hw); 1031 1032 if (rte_intr_allow_others(intr_handle)) { 1033 /* check if lsc interrupt is enabled */ 1034 if (dev->data->dev_conf.intr_conf.lsc) 1035 igc_lsc_interrupt_setup(dev, 1); 1036 else 1037 igc_lsc_interrupt_setup(dev, 0); 1038 } else { 1039 rte_intr_callback_unregister(intr_handle, 1040 eth_igc_interrupt_handler, 1041 (void *)dev); 1042 if (dev->data->dev_conf.intr_conf.lsc) 1043 PMD_DRV_LOG(INFO, 1044 "LSC won't enable because of no intr multiplex"); 1045 } 1046 1047 /* enable uio/vfio intr/eventfd mapping */ 1048 rte_intr_enable(intr_handle); 1049 1050 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1051 igc_update_queue_stats_handler, dev); 1052 1053 /* check if rxq interrupt is enabled */ 1054 if (dev->data->dev_conf.intr_conf.rxq && 1055 rte_intr_dp_is_en(intr_handle)) 1056 igc_rxq_interrupt_setup(dev); 1057 1058 /* resume enabled intr since hw reset */ 1059 igc_intr_other_enable(dev); 1060 1061 eth_igc_rxtx_control(dev, true); 1062 eth_igc_link_update(dev, 0); 1063 1064 /* configure MAC-loopback mode */ 1065 if (dev->data->dev_conf.lpbk_mode == 1) { 1066 uint32_t reg_val; 1067 1068 reg_val = IGC_READ_REG(hw, IGC_CTRL); 1069 reg_val &= ~IGC_CTRL_SPEED_MASK; 1070 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | 1071 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; 1072 IGC_WRITE_REG(hw, IGC_CTRL, reg_val); 1073 1074 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 1075 } 1076 1077 return 0; 1078 1079 error_invalid_config: 1080 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1081 dev->data->dev_conf.link_speeds, dev->data->port_id); 1082 igc_dev_clear_queues(dev); 1083 return -EINVAL; 1084 } 1085 1086 static int 1087 igc_reset_swfw_lock(struct igc_hw *hw) 1088 { 1089 int ret_val; 1090 1091 /* 1092 * Do mac ops initialization manually here, since we will need 1093 * some function pointers set by this call. 1094 */ 1095 ret_val = igc_init_mac_params(hw); 1096 if (ret_val) 1097 return ret_val; 1098 1099 /* 1100 * SMBI lock should not fail in this early stage. If this is the case, 1101 * it is due to an improper exit of the application. 1102 * So force the release of the faulty lock. 1103 */ 1104 if (igc_get_hw_semaphore_generic(hw) < 0) 1105 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 1106 1107 igc_put_hw_semaphore_generic(hw); 1108 1109 if (hw->mac.ops.acquire_swfw_sync != NULL) { 1110 uint16_t mask; 1111 1112 /* 1113 * Phy lock should not fail in this early stage. 1114 * If this is the case, it is due to an improper exit of the 1115 * application. So force the release of the faulty lock. 1116 */ 1117 mask = IGC_SWFW_PHY0_SM; 1118 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 1119 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 1120 hw->bus.func); 1121 } 1122 hw->mac.ops.release_swfw_sync(hw, mask); 1123 1124 /* 1125 * This one is more tricky since it is common to all ports; but 1126 * swfw_sync retries last long enough (1s) to be almost sure 1127 * that if lock can not be taken it is due to an improper lock 1128 * of the semaphore. 1129 */ 1130 mask = IGC_SWFW_EEP_SM; 1131 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) 1132 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1133 1134 hw->mac.ops.release_swfw_sync(hw, mask); 1135 } 1136 1137 return IGC_SUCCESS; 1138 } 1139 1140 /* 1141 * free all rx/tx queues. 1142 */ 1143 static void 1144 igc_dev_free_queues(struct rte_eth_dev *dev) 1145 { 1146 uint16_t i; 1147 1148 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1149 eth_igc_rx_queue_release(dev, i); 1150 dev->data->rx_queues[i] = NULL; 1151 } 1152 dev->data->nb_rx_queues = 0; 1153 1154 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1155 eth_igc_tx_queue_release(dev, i); 1156 dev->data->tx_queues[i] = NULL; 1157 } 1158 dev->data->nb_tx_queues = 0; 1159 } 1160 1161 static int 1162 eth_igc_close(struct rte_eth_dev *dev) 1163 { 1164 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1165 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1166 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1167 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 1168 int retry = 0; 1169 int ret = 0; 1170 1171 PMD_INIT_FUNC_TRACE(); 1172 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1173 return 0; 1174 1175 if (!adapter->stopped) 1176 ret = eth_igc_stop(dev); 1177 1178 igc_flow_flush(dev, NULL); 1179 igc_clear_all_filter(dev); 1180 1181 igc_intr_other_disable(dev); 1182 do { 1183 int ret = rte_intr_callback_unregister(intr_handle, 1184 eth_igc_interrupt_handler, dev); 1185 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL) 1186 break; 1187 1188 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret); 1189 DELAY(200 * 1000); /* delay 200ms */ 1190 } while (retry++ < 5); 1191 1192 igc_phy_hw_reset(hw); 1193 igc_hw_control_release(hw); 1194 igc_dev_free_queues(dev); 1195 1196 /* Reset any pending lock */ 1197 igc_reset_swfw_lock(hw); 1198 1199 return ret; 1200 } 1201 1202 static void 1203 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 1204 { 1205 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1206 1207 hw->vendor_id = pci_dev->id.vendor_id; 1208 hw->device_id = pci_dev->id.device_id; 1209 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1210 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 1211 } 1212 1213 static int 1214 eth_igc_dev_init(struct rte_eth_dev *dev) 1215 { 1216 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1217 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1218 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1219 int i, error = 0; 1220 1221 PMD_INIT_FUNC_TRACE(); 1222 dev->dev_ops = ð_igc_ops; 1223 dev->rx_queue_count = eth_igc_rx_queue_count; 1224 dev->rx_descriptor_status = eth_igc_rx_descriptor_status; 1225 dev->tx_descriptor_status = eth_igc_tx_descriptor_status; 1226 1227 /* 1228 * for secondary processes, we don't initialize any further as primary 1229 * has already done this work. Only check we don't need a different 1230 * RX function. 1231 */ 1232 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1233 return 0; 1234 1235 rte_eth_copy_pci_info(dev, pci_dev); 1236 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1237 1238 hw->back = pci_dev; 1239 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1240 1241 igc_identify_hardware(dev, pci_dev); 1242 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) { 1243 error = -EIO; 1244 goto err_late; 1245 } 1246 1247 igc_get_bus_info(hw); 1248 1249 /* Reset any pending lock */ 1250 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) { 1251 error = -EIO; 1252 goto err_late; 1253 } 1254 1255 /* Finish initialization */ 1256 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) { 1257 error = -EIO; 1258 goto err_late; 1259 } 1260 1261 hw->mac.autoneg = 1; 1262 hw->phy.autoneg_wait_to_complete = 0; 1263 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 1264 1265 /* Copper options */ 1266 if (hw->phy.media_type == igc_media_type_copper) { 1267 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 1268 hw->phy.disable_polarity_correction = 0; 1269 hw->phy.ms_type = igc_ms_hw_default; 1270 } 1271 1272 /* 1273 * Start from a known state, this is important in reading the nvm 1274 * and mac from that. 1275 */ 1276 igc_reset_hw(hw); 1277 1278 /* Make sure we have a good EEPROM before we read from it */ 1279 if (igc_validate_nvm_checksum(hw) < 0) { 1280 /* 1281 * Some PCI-E parts fail the first check due to 1282 * the link being in sleep state, call it again, 1283 * if it fails a second time its a real issue. 1284 */ 1285 if (igc_validate_nvm_checksum(hw) < 0) { 1286 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 1287 error = -EIO; 1288 goto err_late; 1289 } 1290 } 1291 1292 /* Read the permanent MAC address out of the EEPROM */ 1293 if (igc_read_mac_addr(hw) != 0) { 1294 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 1295 error = -EIO; 1296 goto err_late; 1297 } 1298 1299 /* Allocate memory for storing MAC addresses */ 1300 dev->data->mac_addrs = rte_zmalloc("igc", 1301 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 1302 if (dev->data->mac_addrs == NULL) { 1303 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC", 1304 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1305 error = -ENOMEM; 1306 goto err_late; 1307 } 1308 1309 /* Copy the permanent MAC address */ 1310 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 1311 &dev->data->mac_addrs[0]); 1312 1313 /* Now initialize the hardware */ 1314 if (igc_hardware_init(hw) != 0) { 1315 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 1316 rte_free(dev->data->mac_addrs); 1317 dev->data->mac_addrs = NULL; 1318 error = -ENODEV; 1319 goto err_late; 1320 } 1321 1322 hw->mac.get_link_status = 1; 1323 igc->stopped = 0; 1324 1325 /* Indicate SOL/IDER usage */ 1326 if (igc_check_reset_block(hw) < 0) 1327 PMD_INIT_LOG(ERR, 1328 "PHY reset is blocked due to SOL/IDER session."); 1329 1330 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 1331 dev->data->port_id, pci_dev->id.vendor_id, 1332 pci_dev->id.device_id); 1333 1334 rte_intr_callback_register(&pci_dev->intr_handle, 1335 eth_igc_interrupt_handler, (void *)dev); 1336 1337 /* enable uio/vfio intr/eventfd mapping */ 1338 rte_intr_enable(&pci_dev->intr_handle); 1339 1340 /* enable support intr */ 1341 igc_intr_other_enable(dev); 1342 1343 /* initiate queue status */ 1344 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1345 igc->txq_stats_map[i] = -1; 1346 igc->rxq_stats_map[i] = -1; 1347 } 1348 1349 igc_flow_init(dev); 1350 igc_clear_all_filter(dev); 1351 return 0; 1352 1353 err_late: 1354 igc_hw_control_release(hw); 1355 return error; 1356 } 1357 1358 static int 1359 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) 1360 { 1361 PMD_INIT_FUNC_TRACE(); 1362 eth_igc_close(eth_dev); 1363 return 0; 1364 } 1365 1366 static int 1367 eth_igc_reset(struct rte_eth_dev *dev) 1368 { 1369 int ret; 1370 1371 PMD_INIT_FUNC_TRACE(); 1372 1373 ret = eth_igc_dev_uninit(dev); 1374 if (ret) 1375 return ret; 1376 1377 return eth_igc_dev_init(dev); 1378 } 1379 1380 static int 1381 eth_igc_promiscuous_enable(struct rte_eth_dev *dev) 1382 { 1383 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1384 uint32_t rctl; 1385 1386 rctl = IGC_READ_REG(hw, IGC_RCTL); 1387 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1388 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1389 return 0; 1390 } 1391 1392 static int 1393 eth_igc_promiscuous_disable(struct rte_eth_dev *dev) 1394 { 1395 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1396 uint32_t rctl; 1397 1398 rctl = IGC_READ_REG(hw, IGC_RCTL); 1399 rctl &= (~IGC_RCTL_UPE); 1400 if (dev->data->all_multicast == 1) 1401 rctl |= IGC_RCTL_MPE; 1402 else 1403 rctl &= (~IGC_RCTL_MPE); 1404 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1405 return 0; 1406 } 1407 1408 static int 1409 eth_igc_allmulticast_enable(struct rte_eth_dev *dev) 1410 { 1411 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1412 uint32_t rctl; 1413 1414 rctl = IGC_READ_REG(hw, IGC_RCTL); 1415 rctl |= IGC_RCTL_MPE; 1416 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1417 return 0; 1418 } 1419 1420 static int 1421 eth_igc_allmulticast_disable(struct rte_eth_dev *dev) 1422 { 1423 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1424 uint32_t rctl; 1425 1426 if (dev->data->promiscuous == 1) 1427 return 0; /* must remain in all_multicast mode */ 1428 1429 rctl = IGC_READ_REG(hw, IGC_RCTL); 1430 rctl &= (~IGC_RCTL_MPE); 1431 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1432 return 0; 1433 } 1434 1435 static int 1436 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 1437 size_t fw_size) 1438 { 1439 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1440 struct igc_fw_version fw; 1441 int ret; 1442 1443 igc_get_fw_version(hw, &fw); 1444 1445 /* if option rom is valid, display its version too */ 1446 if (fw.or_valid) { 1447 ret = snprintf(fw_version, fw_size, 1448 "%d.%d, 0x%08x, %d.%d.%d", 1449 fw.eep_major, fw.eep_minor, fw.etrack_id, 1450 fw.or_major, fw.or_build, fw.or_patch); 1451 /* no option rom */ 1452 } else { 1453 if (fw.etrack_id != 0X0000) { 1454 ret = snprintf(fw_version, fw_size, 1455 "%d.%d, 0x%08x", 1456 fw.eep_major, fw.eep_minor, 1457 fw.etrack_id); 1458 } else { 1459 ret = snprintf(fw_version, fw_size, 1460 "%d.%d.%d", 1461 fw.eep_major, fw.eep_minor, 1462 fw.eep_build); 1463 } 1464 } 1465 if (ret < 0) 1466 return -EINVAL; 1467 1468 ret += 1; /* add the size of '\0' */ 1469 if (fw_size < (size_t)ret) 1470 return ret; 1471 else 1472 return 0; 1473 } 1474 1475 static int 1476 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1477 { 1478 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1479 1480 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1481 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE; 1482 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1483 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; 1484 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; 1485 dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1486 1487 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; 1488 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; 1489 dev_info->max_vmdq_pools = 0; 1490 1491 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t); 1492 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 1493 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL; 1494 1495 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1496 .rx_thresh = { 1497 .pthresh = IGC_DEFAULT_RX_PTHRESH, 1498 .hthresh = IGC_DEFAULT_RX_HTHRESH, 1499 .wthresh = IGC_DEFAULT_RX_WTHRESH, 1500 }, 1501 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH, 1502 .rx_drop_en = 0, 1503 .offloads = 0, 1504 }; 1505 1506 dev_info->default_txconf = (struct rte_eth_txconf) { 1507 .tx_thresh = { 1508 .pthresh = IGC_DEFAULT_TX_PTHRESH, 1509 .hthresh = IGC_DEFAULT_TX_HTHRESH, 1510 .wthresh = IGC_DEFAULT_TX_WTHRESH, 1511 }, 1512 .offloads = 0, 1513 }; 1514 1515 dev_info->rx_desc_lim = rx_desc_lim; 1516 dev_info->tx_desc_lim = tx_desc_lim; 1517 1518 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1519 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1520 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G; 1521 1522 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD; 1523 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1524 return 0; 1525 } 1526 1527 static int 1528 eth_igc_led_on(struct rte_eth_dev *dev) 1529 { 1530 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1531 1532 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1533 } 1534 1535 static int 1536 eth_igc_led_off(struct rte_eth_dev *dev) 1537 { 1538 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1539 1540 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1541 } 1542 1543 static const uint32_t * 1544 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev) 1545 { 1546 static const uint32_t ptypes[] = { 1547 /* refers to rx_desc_pkt_info_to_pkt_type() */ 1548 RTE_PTYPE_L2_ETHER, 1549 RTE_PTYPE_L3_IPV4, 1550 RTE_PTYPE_L3_IPV4_EXT, 1551 RTE_PTYPE_L3_IPV6, 1552 RTE_PTYPE_L3_IPV6_EXT, 1553 RTE_PTYPE_L4_TCP, 1554 RTE_PTYPE_L4_UDP, 1555 RTE_PTYPE_L4_SCTP, 1556 RTE_PTYPE_TUNNEL_IP, 1557 RTE_PTYPE_INNER_L3_IPV6, 1558 RTE_PTYPE_INNER_L3_IPV6_EXT, 1559 RTE_PTYPE_INNER_L4_TCP, 1560 RTE_PTYPE_INNER_L4_UDP, 1561 RTE_PTYPE_UNKNOWN 1562 }; 1563 1564 return ptypes; 1565 } 1566 1567 static int 1568 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1569 { 1570 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1571 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD; 1572 uint32_t rctl; 1573 1574 /* if extend vlan has been enabled */ 1575 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN) 1576 frame_size += VLAN_TAG_SIZE; 1577 1578 /* 1579 * If device is started, refuse mtu that requires the support of 1580 * scattered packets when this feature has not been enabled before. 1581 */ 1582 if (dev->data->dev_started && !dev->data->scattered_rx && 1583 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1584 PMD_INIT_LOG(ERR, "Stop port first."); 1585 return -EINVAL; 1586 } 1587 1588 rctl = IGC_READ_REG(hw, IGC_RCTL); 1589 if (mtu > RTE_ETHER_MTU) 1590 rctl |= IGC_RCTL_LPE; 1591 else 1592 rctl &= ~IGC_RCTL_LPE; 1593 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1594 1595 IGC_WRITE_REG(hw, IGC_RLPML, frame_size); 1596 1597 return 0; 1598 } 1599 1600 static int 1601 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1602 uint32_t index, uint32_t pool) 1603 { 1604 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1605 1606 igc_rar_set(hw, mac_addr->addr_bytes, index); 1607 RTE_SET_USED(pool); 1608 return 0; 1609 } 1610 1611 static void 1612 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1613 { 1614 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1615 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1616 1617 memset(addr, 0, sizeof(addr)); 1618 igc_rar_set(hw, addr, index); 1619 } 1620 1621 static int 1622 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 1623 struct rte_ether_addr *addr) 1624 { 1625 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1626 igc_rar_set(hw, addr->addr_bytes, 0); 1627 return 0; 1628 } 1629 1630 static int 1631 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 1632 struct rte_ether_addr *mc_addr_set, 1633 uint32_t nb_mc_addr) 1634 { 1635 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1636 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1637 return 0; 1638 } 1639 1640 /* 1641 * Read hardware registers 1642 */ 1643 static void 1644 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats) 1645 { 1646 int pause_frames; 1647 1648 uint64_t old_gprc = stats->gprc; 1649 uint64_t old_gptc = stats->gptc; 1650 uint64_t old_tpr = stats->tpr; 1651 uint64_t old_tpt = stats->tpt; 1652 uint64_t old_rpthc = stats->rpthc; 1653 uint64_t old_hgptc = stats->hgptc; 1654 1655 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS); 1656 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC); 1657 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC); 1658 stats->mpc += IGC_READ_REG(hw, IGC_MPC); 1659 stats->scc += IGC_READ_REG(hw, IGC_SCC); 1660 stats->ecol += IGC_READ_REG(hw, IGC_ECOL); 1661 1662 stats->mcc += IGC_READ_REG(hw, IGC_MCC); 1663 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL); 1664 stats->colc += IGC_READ_REG(hw, IGC_COLC); 1665 1666 stats->dc += IGC_READ_REG(hw, IGC_DC); 1667 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS); 1668 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC); 1669 stats->rlec += IGC_READ_REG(hw, IGC_RLEC); 1670 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC); 1671 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC); 1672 1673 /* 1674 * For watchdog management we need to know if we have been 1675 * paused during the last interval, so capture that here. 1676 */ 1677 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC); 1678 stats->xoffrxc += pause_frames; 1679 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC); 1680 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC); 1681 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64); 1682 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127); 1683 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255); 1684 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511); 1685 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023); 1686 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522); 1687 stats->gprc += IGC_READ_REG(hw, IGC_GPRC); 1688 stats->bprc += IGC_READ_REG(hw, IGC_BPRC); 1689 stats->mprc += IGC_READ_REG(hw, IGC_MPRC); 1690 stats->gptc += IGC_READ_REG(hw, IGC_GPTC); 1691 1692 /* For the 64-bit byte counters the low dword must be read first. */ 1693 /* Both registers clear on the read of the high dword */ 1694 1695 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1696 stats->gorc += IGC_READ_REG(hw, IGC_GORCL); 1697 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32); 1698 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1699 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL); 1700 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32); 1701 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1702 1703 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC); 1704 stats->ruc += IGC_READ_REG(hw, IGC_RUC); 1705 stats->rfc += IGC_READ_REG(hw, IGC_RFC); 1706 stats->roc += IGC_READ_REG(hw, IGC_ROC); 1707 stats->rjc += IGC_READ_REG(hw, IGC_RJC); 1708 1709 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC); 1710 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC); 1711 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC); 1712 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC); 1713 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC); 1714 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC); 1715 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC); 1716 1717 stats->tpr += IGC_READ_REG(hw, IGC_TPR); 1718 stats->tpt += IGC_READ_REG(hw, IGC_TPT); 1719 1720 stats->tor += IGC_READ_REG(hw, IGC_TORL); 1721 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32); 1722 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1723 stats->tot += IGC_READ_REG(hw, IGC_TOTL); 1724 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32); 1725 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1726 1727 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64); 1728 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127); 1729 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255); 1730 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511); 1731 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023); 1732 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522); 1733 stats->mptc += IGC_READ_REG(hw, IGC_MPTC); 1734 stats->bptc += IGC_READ_REG(hw, IGC_BPTC); 1735 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC); 1736 1737 stats->iac += IGC_READ_REG(hw, IGC_IAC); 1738 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC); 1739 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC); 1740 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC); 1741 1742 /* Host to Card Statistics */ 1743 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL); 1744 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32); 1745 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1746 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL); 1747 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32); 1748 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1749 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS); 1750 } 1751 1752 /* 1753 * Write 0 to all queue status registers 1754 */ 1755 static void 1756 igc_reset_queue_stats_register(struct igc_hw *hw) 1757 { 1758 int i; 1759 1760 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1761 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0); 1762 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0); 1763 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0); 1764 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0); 1765 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0); 1766 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0); 1767 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0); 1768 } 1769 } 1770 1771 /* 1772 * Read all hardware queue status registers 1773 */ 1774 static void 1775 igc_read_queue_stats_register(struct rte_eth_dev *dev) 1776 { 1777 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1778 struct igc_hw_queue_stats *queue_stats = 1779 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1780 int i; 1781 1782 /* 1783 * This register is not cleared on read. Furthermore, the register wraps 1784 * around back to 0x00000000 on the next increment when reaching a value 1785 * of 0xFFFFFFFF and then continues normal count operation. 1786 */ 1787 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1788 union { 1789 u64 ddword; 1790 u32 dword[2]; 1791 } value; 1792 u32 tmp; 1793 1794 /* 1795 * Read the register first, if the value is smaller than that 1796 * previous read, that mean the register has been overflowed, 1797 * then we add the high 4 bytes by 1 and replace the low 4 1798 * bytes by the new value. 1799 */ 1800 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i)); 1801 value.ddword = queue_stats->pqgprc[i]; 1802 if (value.dword[U32_0_IN_U64] > tmp) 1803 value.dword[U32_1_IN_U64]++; 1804 value.dword[U32_0_IN_U64] = tmp; 1805 queue_stats->pqgprc[i] = value.ddword; 1806 1807 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i)); 1808 value.ddword = queue_stats->pqgptc[i]; 1809 if (value.dword[U32_0_IN_U64] > tmp) 1810 value.dword[U32_1_IN_U64]++; 1811 value.dword[U32_0_IN_U64] = tmp; 1812 queue_stats->pqgptc[i] = value.ddword; 1813 1814 tmp = IGC_READ_REG(hw, IGC_PQGORC(i)); 1815 value.ddword = queue_stats->pqgorc[i]; 1816 if (value.dword[U32_0_IN_U64] > tmp) 1817 value.dword[U32_1_IN_U64]++; 1818 value.dword[U32_0_IN_U64] = tmp; 1819 queue_stats->pqgorc[i] = value.ddword; 1820 1821 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i)); 1822 value.ddword = queue_stats->pqgotc[i]; 1823 if (value.dword[U32_0_IN_U64] > tmp) 1824 value.dword[U32_1_IN_U64]++; 1825 value.dword[U32_0_IN_U64] = tmp; 1826 queue_stats->pqgotc[i] = value.ddword; 1827 1828 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i)); 1829 value.ddword = queue_stats->pqmprc[i]; 1830 if (value.dword[U32_0_IN_U64] > tmp) 1831 value.dword[U32_1_IN_U64]++; 1832 value.dword[U32_0_IN_U64] = tmp; 1833 queue_stats->pqmprc[i] = value.ddword; 1834 1835 tmp = IGC_READ_REG(hw, IGC_RQDPC(i)); 1836 value.ddword = queue_stats->rqdpc[i]; 1837 if (value.dword[U32_0_IN_U64] > tmp) 1838 value.dword[U32_1_IN_U64]++; 1839 value.dword[U32_0_IN_U64] = tmp; 1840 queue_stats->rqdpc[i] = value.ddword; 1841 1842 tmp = IGC_READ_REG(hw, IGC_TQDPC(i)); 1843 value.ddword = queue_stats->tqdpc[i]; 1844 if (value.dword[U32_0_IN_U64] > tmp) 1845 value.dword[U32_1_IN_U64]++; 1846 value.dword[U32_0_IN_U64] = tmp; 1847 queue_stats->tqdpc[i] = value.ddword; 1848 } 1849 } 1850 1851 static int 1852 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1853 { 1854 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1855 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1856 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev); 1857 struct igc_hw_queue_stats *queue_stats = 1858 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1859 int i; 1860 1861 /* 1862 * Cancel status handler since it will read the queue status registers 1863 */ 1864 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1865 1866 /* Read status register */ 1867 igc_read_queue_stats_register(dev); 1868 igc_read_stats_registers(hw, stats); 1869 1870 if (rte_stats == NULL) { 1871 /* Restart queue status handler */ 1872 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1873 igc_update_queue_stats_handler, dev); 1874 return -EINVAL; 1875 } 1876 1877 /* Rx Errors */ 1878 rte_stats->imissed = stats->mpc; 1879 rte_stats->ierrors = stats->crcerrs + stats->rlec + 1880 stats->rxerrc + stats->algnerrc; 1881 1882 /* Tx Errors */ 1883 rte_stats->oerrors = stats->ecol + stats->latecol; 1884 1885 rte_stats->ipackets = stats->gprc; 1886 rte_stats->opackets = stats->gptc; 1887 rte_stats->ibytes = stats->gorc; 1888 rte_stats->obytes = stats->gotc; 1889 1890 /* Get per-queue statuses */ 1891 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1892 /* GET TX queue statuses */ 1893 int map_id = igc->txq_stats_map[i]; 1894 if (map_id >= 0) { 1895 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i]; 1896 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i]; 1897 } 1898 /* Get RX queue statuses */ 1899 map_id = igc->rxq_stats_map[i]; 1900 if (map_id >= 0) { 1901 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i]; 1902 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i]; 1903 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i]; 1904 } 1905 } 1906 1907 /* Restart queue status handler */ 1908 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1909 igc_update_queue_stats_handler, dev); 1910 return 0; 1911 } 1912 1913 static int 1914 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1915 unsigned int n) 1916 { 1917 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1918 struct igc_hw_stats *hw_stats = 1919 IGC_DEV_PRIVATE_STATS(dev); 1920 unsigned int i; 1921 1922 igc_read_stats_registers(hw, hw_stats); 1923 1924 if (n < IGC_NB_XSTATS) 1925 return IGC_NB_XSTATS; 1926 1927 /* If this is a reset xstats is NULL, and we have cleared the 1928 * registers by reading them. 1929 */ 1930 if (!xstats) 1931 return 0; 1932 1933 /* Extended stats */ 1934 for (i = 0; i < IGC_NB_XSTATS; i++) { 1935 xstats[i].id = i; 1936 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1937 rte_igc_stats_strings[i].offset); 1938 } 1939 1940 return IGC_NB_XSTATS; 1941 } 1942 1943 static int 1944 eth_igc_xstats_reset(struct rte_eth_dev *dev) 1945 { 1946 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1947 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 1948 struct igc_hw_queue_stats *queue_stats = 1949 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1950 1951 /* Cancel queue status handler for avoid conflict */ 1952 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1953 1954 /* HW registers are cleared on read */ 1955 igc_reset_queue_stats_register(hw); 1956 igc_read_stats_registers(hw, hw_stats); 1957 1958 /* Reset software totals */ 1959 memset(hw_stats, 0, sizeof(*hw_stats)); 1960 memset(queue_stats, 0, sizeof(*queue_stats)); 1961 1962 /* Restart the queue status handler */ 1963 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler, 1964 dev); 1965 1966 return 0; 1967 } 1968 1969 static int 1970 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1971 struct rte_eth_xstat_name *xstats_names, unsigned int size) 1972 { 1973 unsigned int i; 1974 1975 if (xstats_names == NULL) 1976 return IGC_NB_XSTATS; 1977 1978 if (size < IGC_NB_XSTATS) { 1979 PMD_DRV_LOG(ERR, "not enough buffers!"); 1980 return IGC_NB_XSTATS; 1981 } 1982 1983 for (i = 0; i < IGC_NB_XSTATS; i++) 1984 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name, 1985 sizeof(xstats_names[i].name)); 1986 1987 return IGC_NB_XSTATS; 1988 } 1989 1990 static int 1991 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 1992 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 1993 unsigned int limit) 1994 { 1995 unsigned int i; 1996 1997 if (!ids) 1998 return eth_igc_xstats_get_names(dev, xstats_names, limit); 1999 2000 for (i = 0; i < limit; i++) { 2001 if (ids[i] >= IGC_NB_XSTATS) { 2002 PMD_DRV_LOG(ERR, "id value isn't valid"); 2003 return -EINVAL; 2004 } 2005 strlcpy(xstats_names[i].name, 2006 rte_igc_stats_strings[ids[i]].name, 2007 sizeof(xstats_names[i].name)); 2008 } 2009 return limit; 2010 } 2011 2012 static int 2013 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2014 uint64_t *values, unsigned int n) 2015 { 2016 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2017 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 2018 unsigned int i; 2019 2020 igc_read_stats_registers(hw, hw_stats); 2021 2022 if (!ids) { 2023 if (n < IGC_NB_XSTATS) 2024 return IGC_NB_XSTATS; 2025 2026 /* If this is a reset xstats is NULL, and we have cleared the 2027 * registers by reading them. 2028 */ 2029 if (!values) 2030 return 0; 2031 2032 /* Extended stats */ 2033 for (i = 0; i < IGC_NB_XSTATS; i++) 2034 values[i] = *(uint64_t *)(((char *)hw_stats) + 2035 rte_igc_stats_strings[i].offset); 2036 2037 return IGC_NB_XSTATS; 2038 2039 } else { 2040 for (i = 0; i < n; i++) { 2041 if (ids[i] >= IGC_NB_XSTATS) { 2042 PMD_DRV_LOG(ERR, "id value isn't valid"); 2043 return -EINVAL; 2044 } 2045 values[i] = *(uint64_t *)(((char *)hw_stats) + 2046 rte_igc_stats_strings[ids[i]].offset); 2047 } 2048 return n; 2049 } 2050 } 2051 2052 static int 2053 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 2054 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) 2055 { 2056 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 2057 2058 /* check queue id is valid */ 2059 if (queue_id >= IGC_QUEUE_PAIRS_NUM) { 2060 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u", 2061 queue_id, IGC_QUEUE_PAIRS_NUM - 1); 2062 return -EINVAL; 2063 } 2064 2065 /* store the mapping status id */ 2066 if (is_rx) 2067 igc->rxq_stats_map[queue_id] = stat_idx; 2068 else 2069 igc->txq_stats_map[queue_id] = stat_idx; 2070 2071 return 0; 2072 } 2073 2074 static int 2075 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 2076 { 2077 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2078 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2079 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2080 uint32_t vec = IGC_MISC_VEC_ID; 2081 2082 if (rte_intr_allow_others(intr_handle)) 2083 vec = IGC_RX_VEC_START; 2084 2085 uint32_t mask = 1u << (queue_id + vec); 2086 2087 IGC_WRITE_REG(hw, IGC_EIMC, mask); 2088 IGC_WRITE_FLUSH(hw); 2089 2090 return 0; 2091 } 2092 2093 static int 2094 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 2095 { 2096 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2097 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2098 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2099 uint32_t vec = IGC_MISC_VEC_ID; 2100 2101 if (rte_intr_allow_others(intr_handle)) 2102 vec = IGC_RX_VEC_START; 2103 2104 uint32_t mask = 1u << (queue_id + vec); 2105 2106 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2107 IGC_WRITE_FLUSH(hw); 2108 2109 rte_intr_enable(intr_handle); 2110 2111 return 0; 2112 } 2113 2114 static int 2115 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2116 { 2117 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2118 uint32_t ctrl; 2119 int tx_pause; 2120 int rx_pause; 2121 2122 fc_conf->pause_time = hw->fc.pause_time; 2123 fc_conf->high_water = hw->fc.high_water; 2124 fc_conf->low_water = hw->fc.low_water; 2125 fc_conf->send_xon = hw->fc.send_xon; 2126 fc_conf->autoneg = hw->mac.autoneg; 2127 2128 /* 2129 * Return rx_pause and tx_pause status according to actual setting of 2130 * the TFCE and RFCE bits in the CTRL register. 2131 */ 2132 ctrl = IGC_READ_REG(hw, IGC_CTRL); 2133 if (ctrl & IGC_CTRL_TFCE) 2134 tx_pause = 1; 2135 else 2136 tx_pause = 0; 2137 2138 if (ctrl & IGC_CTRL_RFCE) 2139 rx_pause = 1; 2140 else 2141 rx_pause = 0; 2142 2143 if (rx_pause && tx_pause) 2144 fc_conf->mode = RTE_FC_FULL; 2145 else if (rx_pause) 2146 fc_conf->mode = RTE_FC_RX_PAUSE; 2147 else if (tx_pause) 2148 fc_conf->mode = RTE_FC_TX_PAUSE; 2149 else 2150 fc_conf->mode = RTE_FC_NONE; 2151 2152 return 0; 2153 } 2154 2155 static int 2156 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2157 { 2158 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2159 uint32_t rx_buf_size; 2160 uint32_t max_high_water; 2161 uint32_t rctl; 2162 int err; 2163 2164 if (fc_conf->autoneg != hw->mac.autoneg) 2165 return -ENOTSUP; 2166 2167 rx_buf_size = igc_get_rx_buffer_size(hw); 2168 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2169 2170 /* At least reserve one Ethernet frame for watermark */ 2171 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 2172 if (fc_conf->high_water > max_high_water || 2173 fc_conf->high_water < fc_conf->low_water) { 2174 PMD_DRV_LOG(ERR, 2175 "Incorrect high(%u)/low(%u) water value, max is %u", 2176 fc_conf->high_water, fc_conf->low_water, 2177 max_high_water); 2178 return -EINVAL; 2179 } 2180 2181 switch (fc_conf->mode) { 2182 case RTE_FC_NONE: 2183 hw->fc.requested_mode = igc_fc_none; 2184 break; 2185 case RTE_FC_RX_PAUSE: 2186 hw->fc.requested_mode = igc_fc_rx_pause; 2187 break; 2188 case RTE_FC_TX_PAUSE: 2189 hw->fc.requested_mode = igc_fc_tx_pause; 2190 break; 2191 case RTE_FC_FULL: 2192 hw->fc.requested_mode = igc_fc_full; 2193 break; 2194 default: 2195 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode); 2196 return -EINVAL; 2197 } 2198 2199 hw->fc.pause_time = fc_conf->pause_time; 2200 hw->fc.high_water = fc_conf->high_water; 2201 hw->fc.low_water = fc_conf->low_water; 2202 hw->fc.send_xon = fc_conf->send_xon; 2203 2204 err = igc_setup_link_generic(hw); 2205 if (err == IGC_SUCCESS) { 2206 /** 2207 * check if we want to forward MAC frames - driver doesn't have 2208 * native capability to do that, so we'll write the registers 2209 * ourselves 2210 **/ 2211 rctl = IGC_READ_REG(hw, IGC_RCTL); 2212 2213 /* set or clear MFLCN.PMCF bit depending on configuration */ 2214 if (fc_conf->mac_ctrl_frame_fwd != 0) 2215 rctl |= IGC_RCTL_PMCF; 2216 else 2217 rctl &= ~IGC_RCTL_PMCF; 2218 2219 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2220 IGC_WRITE_FLUSH(hw); 2221 2222 return 0; 2223 } 2224 2225 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err); 2226 return -EIO; 2227 } 2228 2229 static int 2230 eth_igc_rss_reta_update(struct rte_eth_dev *dev, 2231 struct rte_eth_rss_reta_entry64 *reta_conf, 2232 uint16_t reta_size) 2233 { 2234 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2235 uint16_t i; 2236 2237 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2238 PMD_DRV_LOG(ERR, 2239 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2240 reta_size, ETH_RSS_RETA_SIZE_128); 2241 return -EINVAL; 2242 } 2243 2244 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2245 2246 /* set redirection table */ 2247 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2248 union igc_rss_reta_reg reta, reg; 2249 uint16_t idx, shift; 2250 uint8_t j, mask; 2251 2252 idx = i / RTE_RETA_GROUP_SIZE; 2253 shift = i % RTE_RETA_GROUP_SIZE; 2254 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2255 IGC_RSS_RDT_REG_SIZE_MASK); 2256 2257 /* if no need to update the register */ 2258 if (!mask || 2259 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2260 continue; 2261 2262 /* check mask whether need to read the register value first */ 2263 if (mask == IGC_RSS_RDT_REG_SIZE_MASK) 2264 reg.dword = 0; 2265 else 2266 reg.dword = IGC_READ_REG_LE_VALUE(hw, 2267 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2268 2269 /* update the register */ 2270 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2271 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2272 if (mask & (1u << j)) 2273 reta.bytes[j] = 2274 (uint8_t)reta_conf[idx].reta[shift + j]; 2275 else 2276 reta.bytes[j] = reg.bytes[j]; 2277 } 2278 IGC_WRITE_REG_LE_VALUE(hw, 2279 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword); 2280 } 2281 2282 return 0; 2283 } 2284 2285 static int 2286 eth_igc_rss_reta_query(struct rte_eth_dev *dev, 2287 struct rte_eth_rss_reta_entry64 *reta_conf, 2288 uint16_t reta_size) 2289 { 2290 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2291 uint16_t i; 2292 2293 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2294 PMD_DRV_LOG(ERR, 2295 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2296 reta_size, ETH_RSS_RETA_SIZE_128); 2297 return -EINVAL; 2298 } 2299 2300 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2301 2302 /* read redirection table */ 2303 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2304 union igc_rss_reta_reg reta; 2305 uint16_t idx, shift; 2306 uint8_t j, mask; 2307 2308 idx = i / RTE_RETA_GROUP_SIZE; 2309 shift = i % RTE_RETA_GROUP_SIZE; 2310 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2311 IGC_RSS_RDT_REG_SIZE_MASK); 2312 2313 /* if no need to read register */ 2314 if (!mask || 2315 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2316 continue; 2317 2318 /* read register and get the queue index */ 2319 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2320 reta.dword = IGC_READ_REG_LE_VALUE(hw, 2321 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2322 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2323 if (mask & (1u << j)) 2324 reta_conf[idx].reta[shift + j] = reta.bytes[j]; 2325 } 2326 } 2327 2328 return 0; 2329 } 2330 2331 static int 2332 eth_igc_rss_hash_update(struct rte_eth_dev *dev, 2333 struct rte_eth_rss_conf *rss_conf) 2334 { 2335 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2336 igc_hw_rss_hash_set(hw, rss_conf); 2337 return 0; 2338 } 2339 2340 static int 2341 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 2342 struct rte_eth_rss_conf *rss_conf) 2343 { 2344 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2345 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; 2346 uint32_t mrqc; 2347 uint64_t rss_hf; 2348 2349 if (hash_key != NULL) { 2350 int i; 2351 2352 /* if not enough space for store hash key */ 2353 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) { 2354 PMD_DRV_LOG(ERR, 2355 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u", 2356 rss_conf->rss_key_len, IGC_HKEY_SIZE); 2357 return -EINVAL; 2358 } 2359 2360 /* read RSS key from register */ 2361 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) 2362 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i)); 2363 } 2364 2365 /* get RSS functions configured in MRQC register */ 2366 mrqc = IGC_READ_REG(hw, IGC_MRQC); 2367 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) 2368 return 0; 2369 2370 rss_hf = 0; 2371 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4) 2372 rss_hf |= ETH_RSS_IPV4; 2373 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP) 2374 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2375 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6) 2376 rss_hf |= ETH_RSS_IPV6; 2377 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX) 2378 rss_hf |= ETH_RSS_IPV6_EX; 2379 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP) 2380 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2381 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX) 2382 rss_hf |= ETH_RSS_IPV6_TCP_EX; 2383 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP) 2384 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2385 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP) 2386 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2387 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX) 2388 rss_hf |= ETH_RSS_IPV6_UDP_EX; 2389 2390 rss_conf->rss_hf |= rss_hf; 2391 return 0; 2392 } 2393 2394 static int 2395 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2396 { 2397 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2398 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2399 uint32_t vfta; 2400 uint32_t vid_idx; 2401 uint32_t vid_bit; 2402 2403 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; 2404 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); 2405 vfta = shadow_vfta->vfta[vid_idx]; 2406 if (on) 2407 vfta |= vid_bit; 2408 else 2409 vfta &= ~vid_bit; 2410 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); 2411 2412 /* update local VFTA copy */ 2413 shadow_vfta->vfta[vid_idx] = vfta; 2414 2415 return 0; 2416 } 2417 2418 static void 2419 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2420 { 2421 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2422 igc_read_reg_check_clear_bits(hw, IGC_RCTL, 2423 IGC_RCTL_CFIEN | IGC_RCTL_VFE); 2424 } 2425 2426 static void 2427 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2428 { 2429 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2430 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2431 uint32_t reg_val; 2432 int i; 2433 2434 /* Filter Table Enable, CFI not used for packet acceptance */ 2435 reg_val = IGC_READ_REG(hw, IGC_RCTL); 2436 reg_val &= ~IGC_RCTL_CFIEN; 2437 reg_val |= IGC_RCTL_VFE; 2438 IGC_WRITE_REG(hw, IGC_RCTL, reg_val); 2439 2440 /* restore VFTA table */ 2441 for (i = 0; i < IGC_VFTA_SIZE; i++) 2442 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); 2443 } 2444 2445 static void 2446 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2447 { 2448 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2449 2450 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2451 } 2452 2453 static void 2454 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2455 { 2456 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2457 2458 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2459 } 2460 2461 static int 2462 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2463 { 2464 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2465 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD; 2466 uint32_t ctrl_ext; 2467 2468 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2469 2470 /* if extend vlan hasn't been enabled */ 2471 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) 2472 return 0; 2473 2474 /* Update maximum packet length */ 2475 if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { 2476 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", 2477 frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); 2478 return -EINVAL; 2479 } 2480 IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE); 2481 2482 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); 2483 return 0; 2484 } 2485 2486 static int 2487 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2488 { 2489 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2490 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD; 2491 uint32_t ctrl_ext; 2492 2493 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2494 2495 /* if extend vlan has been enabled */ 2496 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) 2497 return 0; 2498 2499 /* Update maximum packet length */ 2500 if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) { 2501 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", 2502 frame_size, MAX_RX_JUMBO_FRAME_SIZE); 2503 return -EINVAL; 2504 } 2505 IGC_WRITE_REG(hw, IGC_RLPML, frame_size); 2506 2507 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); 2508 return 0; 2509 } 2510 2511 static int 2512 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2513 { 2514 struct rte_eth_rxmode *rxmode; 2515 2516 rxmode = &dev->data->dev_conf.rxmode; 2517 if (mask & ETH_VLAN_STRIP_MASK) { 2518 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2519 igc_vlan_hw_strip_enable(dev); 2520 else 2521 igc_vlan_hw_strip_disable(dev); 2522 } 2523 2524 if (mask & ETH_VLAN_FILTER_MASK) { 2525 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2526 igc_vlan_hw_filter_enable(dev); 2527 else 2528 igc_vlan_hw_filter_disable(dev); 2529 } 2530 2531 if (mask & ETH_VLAN_EXTEND_MASK) { 2532 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2533 return igc_vlan_hw_extend_enable(dev); 2534 else 2535 return igc_vlan_hw_extend_disable(dev); 2536 } 2537 2538 return 0; 2539 } 2540 2541 static int 2542 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 2543 enum rte_vlan_type vlan_type, 2544 uint16_t tpid) 2545 { 2546 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2547 uint32_t reg_val; 2548 2549 /* only outer TPID of double VLAN can be configured*/ 2550 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2551 reg_val = IGC_READ_REG(hw, IGC_VET); 2552 reg_val = (reg_val & (~IGC_VET_EXT)) | 2553 ((uint32_t)tpid << IGC_VET_EXT_SHIFT); 2554 IGC_WRITE_REG(hw, IGC_VET, reg_val); 2555 2556 return 0; 2557 } 2558 2559 /* all other TPID values are read-only*/ 2560 PMD_DRV_LOG(ERR, "Not supported"); 2561 return -ENOTSUP; 2562 } 2563 2564 static int 2565 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2566 struct rte_pci_device *pci_dev) 2567 { 2568 PMD_INIT_FUNC_TRACE(); 2569 return rte_eth_dev_pci_generic_probe(pci_dev, 2570 sizeof(struct igc_adapter), eth_igc_dev_init); 2571 } 2572 2573 static int 2574 eth_igc_pci_remove(struct rte_pci_device *pci_dev) 2575 { 2576 PMD_INIT_FUNC_TRACE(); 2577 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit); 2578 } 2579 2580 static struct rte_pci_driver rte_igc_pmd = { 2581 .id_table = pci_id_igc_map, 2582 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2583 .probe = eth_igc_pci_probe, 2584 .remove = eth_igc_pci_remove, 2585 }; 2586 2587 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd); 2588 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map); 2589 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci"); 2590