1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 8 #include <rte_string_fns.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_alarm.h> 15 16 #include "igc_logs.h" 17 #include "igc_txrx.h" 18 #include "igc_filter.h" 19 #include "igc_flow.h" 20 21 #define IGC_INTEL_VENDOR_ID 0x8086 22 23 #define IGC_FC_PAUSE_TIME 0x0680 24 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 25 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 26 27 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 28 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 29 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */ 30 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */ 31 32 #define IGC_DEFAULT_RX_FREE_THRESH 32 33 34 #define IGC_DEFAULT_RX_PTHRESH 8 35 #define IGC_DEFAULT_RX_HTHRESH 8 36 #define IGC_DEFAULT_RX_WTHRESH 4 37 38 #define IGC_DEFAULT_TX_PTHRESH 8 39 #define IGC_DEFAULT_TX_HTHRESH 1 40 #define IGC_DEFAULT_TX_WTHRESH 16 41 42 /* MSI-X other interrupt vector */ 43 #define IGC_MSIX_OTHER_INTR_VEC 0 44 45 /* External VLAN Enable bit mask */ 46 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26) 47 48 /* Speed select */ 49 #define IGC_CTRL_SPEED_MASK (7u << 8) 50 #define IGC_CTRL_SPEED_2500 (6u << 8) 51 52 /* External VLAN Ether Type bit mask and shift */ 53 #define IGC_VET_EXT 0xFFFF0000 54 #define IGC_VET_EXT_SHIFT 16 55 56 /* Force EEE Auto-negotiation */ 57 #define IGC_EEER_EEE_FRC_AN (1u << 28) 58 59 /* Per Queue Good Packets Received Count */ 60 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) 61 /* Per Queue Good Octets Received Count */ 62 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx)) 63 /* Per Queue Good Octets Transmitted Count */ 64 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx)) 65 /* Per Queue Multicast Packets Received Count */ 66 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx)) 67 /* Transmit Queue Drop Packet Count */ 68 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx)) 69 70 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 71 #define U32_0_IN_U64 0 /* lower bytes of u64 */ 72 #define U32_1_IN_U64 1 /* higher bytes of u64 */ 73 #else 74 #define U32_0_IN_U64 1 75 #define U32_1_IN_U64 0 76 #endif 77 78 #define IGC_ALARM_INTERVAL 8000000u 79 /* us, about 13.6s some per-queue registers will wrap around back to 0. */ 80 81 static const struct rte_eth_desc_lim rx_desc_lim = { 82 .nb_max = IGC_MAX_RXD, 83 .nb_min = IGC_MIN_RXD, 84 .nb_align = IGC_RXD_ALIGN, 85 }; 86 87 static const struct rte_eth_desc_lim tx_desc_lim = { 88 .nb_max = IGC_MAX_TXD, 89 .nb_min = IGC_MIN_TXD, 90 .nb_align = IGC_TXD_ALIGN, 91 .nb_seg_max = IGC_TX_MAX_SEG, 92 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG, 93 }; 94 95 static const struct rte_pci_id pci_id_igc_map[] = { 96 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) }, 97 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) }, 98 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) }, 99 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) }, 100 { .vendor_id = 0, /* sentinel */ }, 101 }; 102 103 /* store statistics names and its offset in stats structure */ 104 struct rte_igc_xstats_name_off { 105 char name[RTE_ETH_XSTATS_NAME_SIZE]; 106 unsigned int offset; 107 }; 108 109 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { 110 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)}, 111 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)}, 112 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)}, 113 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)}, 114 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)}, 115 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)}, 116 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats, 117 ecol)}, 118 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)}, 119 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)}, 120 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)}, 121 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)}, 122 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)}, 123 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)}, 124 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)}, 125 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)}, 126 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)}, 127 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)}, 128 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats, 129 fcruc)}, 130 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)}, 131 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)}, 132 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)}, 133 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)}, 134 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 135 prc1023)}, 136 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats, 137 prc1522)}, 138 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)}, 139 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)}, 140 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)}, 141 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)}, 142 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)}, 143 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)}, 144 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)}, 145 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)}, 146 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)}, 147 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)}, 148 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)}, 149 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)}, 150 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)}, 151 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)}, 152 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)}, 153 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)}, 154 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)}, 155 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)}, 156 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 157 ptc1023)}, 158 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats, 159 ptc1522)}, 160 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)}, 161 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)}, 162 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)}, 163 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)}, 164 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)}, 165 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)}, 166 {"rx_descriptor_lower_threshold", 167 offsetof(struct igc_hw_stats, icrxdmtc)}, 168 }; 169 170 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \ 171 sizeof(rte_igc_stats_strings[0])) 172 173 static int eth_igc_configure(struct rte_eth_dev *dev); 174 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); 175 static int eth_igc_stop(struct rte_eth_dev *dev); 176 static int eth_igc_start(struct rte_eth_dev *dev); 177 static int eth_igc_set_link_up(struct rte_eth_dev *dev); 178 static int eth_igc_set_link_down(struct rte_eth_dev *dev); 179 static int eth_igc_close(struct rte_eth_dev *dev); 180 static int eth_igc_reset(struct rte_eth_dev *dev); 181 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); 182 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); 183 static int eth_igc_fw_version_get(struct rte_eth_dev *dev, 184 char *fw_version, size_t fw_size); 185 static int eth_igc_infos_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static int eth_igc_led_on(struct rte_eth_dev *dev); 188 static int eth_igc_led_off(struct rte_eth_dev *dev); 189 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev); 190 static int eth_igc_rar_set(struct rte_eth_dev *dev, 191 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); 192 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index); 193 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 194 struct rte_ether_addr *addr); 195 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 196 struct rte_ether_addr *mc_addr_set, 197 uint32_t nb_mc_addr); 198 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev); 199 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev); 200 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 201 static int eth_igc_stats_get(struct rte_eth_dev *dev, 202 struct rte_eth_stats *rte_stats); 203 static int eth_igc_xstats_get(struct rte_eth_dev *dev, 204 struct rte_eth_xstat *xstats, unsigned int n); 205 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, 206 const uint64_t *ids, 207 uint64_t *values, unsigned int n); 208 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev, 209 struct rte_eth_xstat_name *xstats_names, 210 unsigned int size); 211 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 212 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 213 unsigned int limit); 214 static int eth_igc_xstats_reset(struct rte_eth_dev *dev); 215 static int 216 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 217 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); 218 static int 219 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 220 static int 221 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 222 static int 223 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 224 static int 225 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 226 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev, 227 struct rte_eth_rss_reta_entry64 *reta_conf, 228 uint16_t reta_size); 229 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev, 230 struct rte_eth_rss_reta_entry64 *reta_conf, 231 uint16_t reta_size); 232 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, 233 struct rte_eth_rss_conf *rss_conf); 234 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 235 struct rte_eth_rss_conf *rss_conf); 236 static int 237 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 238 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); 239 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 240 enum rte_vlan_type vlan_type, uint16_t tpid); 241 242 static const struct eth_dev_ops eth_igc_ops = { 243 .dev_configure = eth_igc_configure, 244 .link_update = eth_igc_link_update, 245 .dev_stop = eth_igc_stop, 246 .dev_start = eth_igc_start, 247 .dev_close = eth_igc_close, 248 .dev_reset = eth_igc_reset, 249 .dev_set_link_up = eth_igc_set_link_up, 250 .dev_set_link_down = eth_igc_set_link_down, 251 .promiscuous_enable = eth_igc_promiscuous_enable, 252 .promiscuous_disable = eth_igc_promiscuous_disable, 253 .allmulticast_enable = eth_igc_allmulticast_enable, 254 .allmulticast_disable = eth_igc_allmulticast_disable, 255 .fw_version_get = eth_igc_fw_version_get, 256 .dev_infos_get = eth_igc_infos_get, 257 .dev_led_on = eth_igc_led_on, 258 .dev_led_off = eth_igc_led_off, 259 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get, 260 .mtu_set = eth_igc_mtu_set, 261 .mac_addr_add = eth_igc_rar_set, 262 .mac_addr_remove = eth_igc_rar_clear, 263 .mac_addr_set = eth_igc_default_mac_addr_set, 264 .set_mc_addr_list = eth_igc_set_mc_addr_list, 265 266 .rx_queue_setup = eth_igc_rx_queue_setup, 267 .rx_queue_release = eth_igc_rx_queue_release, 268 .tx_queue_setup = eth_igc_tx_queue_setup, 269 .tx_queue_release = eth_igc_tx_queue_release, 270 .tx_done_cleanup = eth_igc_tx_done_cleanup, 271 .rxq_info_get = eth_igc_rxq_info_get, 272 .txq_info_get = eth_igc_txq_info_get, 273 .stats_get = eth_igc_stats_get, 274 .xstats_get = eth_igc_xstats_get, 275 .xstats_get_by_id = eth_igc_xstats_get_by_id, 276 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id, 277 .xstats_get_names = eth_igc_xstats_get_names, 278 .stats_reset = eth_igc_xstats_reset, 279 .xstats_reset = eth_igc_xstats_reset, 280 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set, 281 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable, 282 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable, 283 .flow_ctrl_get = eth_igc_flow_ctrl_get, 284 .flow_ctrl_set = eth_igc_flow_ctrl_set, 285 .reta_update = eth_igc_rss_reta_update, 286 .reta_query = eth_igc_rss_reta_query, 287 .rss_hash_update = eth_igc_rss_hash_update, 288 .rss_hash_conf_get = eth_igc_rss_hash_conf_get, 289 .vlan_filter_set = eth_igc_vlan_filter_set, 290 .vlan_offload_set = eth_igc_vlan_offload_set, 291 .vlan_tpid_set = eth_igc_vlan_tpid_set, 292 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, 293 .flow_ops_get = eth_igc_flow_ops_get, 294 }; 295 296 /* 297 * multiple queue mode checking 298 */ 299 static int 300 igc_check_mq_mode(struct rte_eth_dev *dev) 301 { 302 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 303 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 304 305 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 306 PMD_INIT_LOG(ERR, "SRIOV is not supported."); 307 return -EINVAL; 308 } 309 310 if (rx_mq_mode != ETH_MQ_RX_NONE && 311 rx_mq_mode != ETH_MQ_RX_RSS) { 312 /* RSS together with VMDq not supported*/ 313 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 314 rx_mq_mode); 315 return -EINVAL; 316 } 317 318 /* To no break software that set invalid mode, only display 319 * warning if invalid mode is used. 320 */ 321 if (tx_mq_mode != ETH_MQ_TX_NONE) 322 PMD_INIT_LOG(WARNING, 323 "TX mode %d is not supported. Due to meaningless in this driver, just ignore", 324 tx_mq_mode); 325 326 return 0; 327 } 328 329 static int 330 eth_igc_configure(struct rte_eth_dev *dev) 331 { 332 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 333 int ret; 334 335 PMD_INIT_FUNC_TRACE(); 336 337 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 338 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 339 340 ret = igc_check_mq_mode(dev); 341 if (ret != 0) 342 return ret; 343 344 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 345 return 0; 346 } 347 348 static int 349 eth_igc_set_link_up(struct rte_eth_dev *dev) 350 { 351 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 352 353 if (hw->phy.media_type == igc_media_type_copper) 354 igc_power_up_phy(hw); 355 else 356 igc_power_up_fiber_serdes_link(hw); 357 return 0; 358 } 359 360 static int 361 eth_igc_set_link_down(struct rte_eth_dev *dev) 362 { 363 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 364 365 if (hw->phy.media_type == igc_media_type_copper) 366 igc_power_down_phy(hw); 367 else 368 igc_shutdown_fiber_serdes_link(hw); 369 return 0; 370 } 371 372 /* 373 * disable other interrupt 374 */ 375 static void 376 igc_intr_other_disable(struct rte_eth_dev *dev) 377 { 378 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 379 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 380 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 381 382 if (rte_intr_allow_others(intr_handle) && 383 dev->data->dev_conf.intr_conf.lsc) { 384 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC); 385 } 386 387 IGC_WRITE_REG(hw, IGC_IMC, ~0); 388 IGC_WRITE_FLUSH(hw); 389 } 390 391 /* 392 * enable other interrupt 393 */ 394 static inline void 395 igc_intr_other_enable(struct rte_eth_dev *dev) 396 { 397 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 398 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 399 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 400 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 401 402 if (rte_intr_allow_others(intr_handle) && 403 dev->data->dev_conf.intr_conf.lsc) { 404 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC); 405 } 406 407 IGC_WRITE_REG(hw, IGC_IMS, intr->mask); 408 IGC_WRITE_FLUSH(hw); 409 } 410 411 /* 412 * It reads ICR and gets interrupt causes, check it and set a bit flag 413 * to update link status. 414 */ 415 static void 416 eth_igc_interrupt_get_status(struct rte_eth_dev *dev) 417 { 418 uint32_t icr; 419 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 420 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 421 422 /* read-on-clear nic registers here */ 423 icr = IGC_READ_REG(hw, IGC_ICR); 424 425 intr->flags = 0; 426 if (icr & IGC_ICR_LSC) 427 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 428 } 429 430 /* return 0 means link status changed, -1 means not changed */ 431 static int 432 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete) 433 { 434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 435 struct rte_eth_link link; 436 int link_check, count; 437 438 link_check = 0; 439 hw->mac.get_link_status = 1; 440 441 /* possible wait-to-complete in up to 9 seconds */ 442 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) { 443 /* Read the real link status */ 444 switch (hw->phy.media_type) { 445 case igc_media_type_copper: 446 /* Do the work to read phy */ 447 igc_check_for_link(hw); 448 link_check = !hw->mac.get_link_status; 449 break; 450 451 case igc_media_type_fiber: 452 igc_check_for_link(hw); 453 link_check = (IGC_READ_REG(hw, IGC_STATUS) & 454 IGC_STATUS_LU); 455 break; 456 457 case igc_media_type_internal_serdes: 458 igc_check_for_link(hw); 459 link_check = hw->mac.serdes_has_link; 460 break; 461 462 default: 463 break; 464 } 465 if (link_check || wait_to_complete == 0) 466 break; 467 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL); 468 } 469 memset(&link, 0, sizeof(link)); 470 471 /* Now we check if a transition has happened */ 472 if (link_check) { 473 uint16_t duplex, speed; 474 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 475 link.link_duplex = (duplex == FULL_DUPLEX) ? 476 ETH_LINK_FULL_DUPLEX : 477 ETH_LINK_HALF_DUPLEX; 478 link.link_speed = speed; 479 link.link_status = ETH_LINK_UP; 480 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 481 ETH_LINK_SPEED_FIXED); 482 483 if (speed == SPEED_2500) { 484 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG); 485 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) { 486 tipg &= ~IGC_TIPG_IPGT_MASK; 487 tipg |= 0x0b; 488 IGC_WRITE_REG(hw, IGC_TIPG, tipg); 489 } 490 } 491 } else { 492 link.link_speed = 0; 493 link.link_duplex = ETH_LINK_HALF_DUPLEX; 494 link.link_status = ETH_LINK_DOWN; 495 link.link_autoneg = ETH_LINK_FIXED; 496 } 497 498 return rte_eth_linkstatus_set(dev, &link); 499 } 500 501 /* 502 * It executes link_update after knowing an interrupt is present. 503 */ 504 static void 505 eth_igc_interrupt_action(struct rte_eth_dev *dev) 506 { 507 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 509 struct rte_eth_link link; 510 int ret; 511 512 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) { 513 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 514 515 /* set get_link_status to check register later */ 516 ret = eth_igc_link_update(dev, 0); 517 518 /* check if link has changed */ 519 if (ret < 0) 520 return; 521 522 rte_eth_linkstatus_get(dev, &link); 523 if (link.link_status) 524 PMD_DRV_LOG(INFO, 525 " Port %d: Link Up - speed %u Mbps - %s", 526 dev->data->port_id, 527 (unsigned int)link.link_speed, 528 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 529 "full-duplex" : "half-duplex"); 530 else 531 PMD_DRV_LOG(INFO, " Port %d: Link Down", 532 dev->data->port_id); 533 534 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 535 pci_dev->addr.domain, 536 pci_dev->addr.bus, 537 pci_dev->addr.devid, 538 pci_dev->addr.function); 539 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 540 } 541 } 542 543 /* 544 * Interrupt handler which shall be registered at first. 545 * 546 * @handle 547 * Pointer to interrupt handle. 548 * @param 549 * The address of parameter (struct rte_eth_dev *) registered before. 550 */ 551 static void 552 eth_igc_interrupt_handler(void *param) 553 { 554 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 555 556 eth_igc_interrupt_get_status(dev); 557 eth_igc_interrupt_action(dev); 558 } 559 560 static void igc_read_queue_stats_register(struct rte_eth_dev *dev); 561 562 /* 563 * Update the queue status every IGC_ALARM_INTERVAL time. 564 * @param 565 * The address of parameter (struct rte_eth_dev *) registered before. 566 */ 567 static void 568 igc_update_queue_stats_handler(void *param) 569 { 570 struct rte_eth_dev *dev = param; 571 igc_read_queue_stats_register(dev); 572 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 573 igc_update_queue_stats_handler, dev); 574 } 575 576 /* 577 * rx,tx enable/disable 578 */ 579 static void 580 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) 581 { 582 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 583 uint32_t tctl, rctl; 584 585 tctl = IGC_READ_REG(hw, IGC_TCTL); 586 rctl = IGC_READ_REG(hw, IGC_RCTL); 587 588 if (enable) { 589 /* enable Tx/Rx */ 590 tctl |= IGC_TCTL_EN; 591 rctl |= IGC_RCTL_EN; 592 } else { 593 /* disable Tx/Rx */ 594 tctl &= ~IGC_TCTL_EN; 595 rctl &= ~IGC_RCTL_EN; 596 } 597 IGC_WRITE_REG(hw, IGC_TCTL, tctl); 598 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 599 IGC_WRITE_FLUSH(hw); 600 } 601 602 /* 603 * This routine disables all traffic on the adapter by issuing a 604 * global reset on the MAC. 605 */ 606 static int 607 eth_igc_stop(struct rte_eth_dev *dev) 608 { 609 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 610 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 611 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 612 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 613 struct rte_eth_link link; 614 615 dev->data->dev_started = 0; 616 adapter->stopped = 1; 617 618 /* disable receive and transmit */ 619 eth_igc_rxtx_control(dev, false); 620 621 /* disable all MSI-X interrupts */ 622 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 623 IGC_WRITE_FLUSH(hw); 624 625 /* clear all MSI-X interrupts */ 626 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 627 628 igc_intr_other_disable(dev); 629 630 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 631 632 /* disable intr eventfd mapping */ 633 rte_intr_disable(intr_handle); 634 635 igc_reset_hw(hw); 636 637 /* disable all wake up */ 638 IGC_WRITE_REG(hw, IGC_WUC, 0); 639 640 /* disable checking EEE operation in MAC loopback mode */ 641 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 642 643 /* Set bit for Go Link disconnect */ 644 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, 645 IGC_82580_PM_GO_LINKD); 646 647 /* Power down the phy. Needed to make the link go Down */ 648 eth_igc_set_link_down(dev); 649 650 igc_dev_clear_queues(dev); 651 652 /* clear the recorded link status */ 653 memset(&link, 0, sizeof(link)); 654 rte_eth_linkstatus_set(dev, &link); 655 656 if (!rte_intr_allow_others(intr_handle)) 657 /* resume to the default handler */ 658 rte_intr_callback_register(intr_handle, 659 eth_igc_interrupt_handler, 660 (void *)dev); 661 662 /* Clean datapath event and queue/vec mapping */ 663 rte_intr_efd_disable(intr_handle); 664 if (intr_handle->intr_vec != NULL) { 665 rte_free(intr_handle->intr_vec); 666 intr_handle->intr_vec = NULL; 667 } 668 669 return 0; 670 } 671 672 /* 673 * write interrupt vector allocation register 674 * @hw 675 * board private structure 676 * @queue_index 677 * queue index, valid 0,1,2,3 678 * @tx 679 * tx:1, rx:0 680 * @msix_vector 681 * msix-vector, valid 0,1,2,3,4 682 */ 683 static void 684 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index, 685 bool tx, uint8_t msix_vector) 686 { 687 uint8_t offset = 0; 688 uint8_t reg_index = queue_index >> 1; 689 uint32_t val; 690 691 /* 692 * IVAR(0) 693 * bit31...24 bit23...16 bit15...8 bit7...0 694 * TX1 RX1 TX0 RX0 695 * 696 * IVAR(1) 697 * bit31...24 bit23...16 bit15...8 bit7...0 698 * TX3 RX3 TX2 RX2 699 */ 700 701 if (tx) 702 offset = 8; 703 704 if (queue_index & 1) 705 offset += 16; 706 707 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index); 708 709 /* clear bits */ 710 val &= ~((uint32_t)0xFF << offset); 711 712 /* write vector and valid bit */ 713 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset; 714 715 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val); 716 } 717 718 /* Sets up the hardware to generate MSI-X interrupts properly 719 * @hw 720 * board private structure 721 */ 722 static void 723 igc_configure_msix_intr(struct rte_eth_dev *dev) 724 { 725 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 726 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 727 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 728 729 uint32_t intr_mask; 730 uint32_t vec = IGC_MISC_VEC_ID; 731 uint32_t base = IGC_MISC_VEC_ID; 732 uint32_t misc_shift = 0; 733 int i; 734 735 /* won't configure msix register if no mapping is done 736 * between intr vector and event fd 737 */ 738 if (!rte_intr_dp_is_en(intr_handle)) 739 return; 740 741 if (rte_intr_allow_others(intr_handle)) { 742 base = IGC_RX_VEC_START; 743 vec = base; 744 misc_shift = 1; 745 } 746 747 /* turn on MSI-X capability first */ 748 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | 749 IGC_GPIE_PBA | IGC_GPIE_EIAME | 750 IGC_GPIE_NSICR); 751 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 752 misc_shift; 753 754 if (dev->data->dev_conf.intr_conf.lsc) 755 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC); 756 757 /* enable msix auto-clear */ 758 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask); 759 760 /* set other cause interrupt vector */ 761 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC, 762 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8); 763 764 /* enable auto-mask */ 765 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask); 766 767 for (i = 0; i < dev->data->nb_rx_queues; i++) { 768 igc_write_ivar(hw, i, 0, vec); 769 intr_handle->intr_vec[i] = vec; 770 if (vec < base + intr_handle->nb_efd - 1) 771 vec++; 772 } 773 774 IGC_WRITE_FLUSH(hw); 775 } 776 777 /** 778 * It enables the interrupt mask and then enable the interrupt. 779 * 780 * @dev 781 * Pointer to struct rte_eth_dev. 782 * @on 783 * Enable or Disable 784 */ 785 static void 786 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 787 { 788 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 789 790 if (on) 791 intr->mask |= IGC_ICR_LSC; 792 else 793 intr->mask &= ~IGC_ICR_LSC; 794 } 795 796 /* 797 * It enables the interrupt. 798 * It will be called once only during nic initialized. 799 */ 800 static void 801 igc_rxq_interrupt_setup(struct rte_eth_dev *dev) 802 { 803 uint32_t mask; 804 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 805 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 806 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 807 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 808 809 /* won't configure msix register if no mapping is done 810 * between intr vector and event fd 811 */ 812 if (!rte_intr_dp_is_en(intr_handle)) 813 return; 814 815 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift; 816 IGC_WRITE_REG(hw, IGC_EIMS, mask); 817 } 818 819 /* 820 * Get hardware rx-buffer size. 821 */ 822 static inline int 823 igc_get_rx_buffer_size(struct igc_hw *hw) 824 { 825 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10; 826 } 827 828 /* 829 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 830 * For ASF and Pass Through versions of f/w this means 831 * that the driver is loaded. 832 */ 833 static void 834 igc_hw_control_acquire(struct igc_hw *hw) 835 { 836 uint32_t ctrl_ext; 837 838 /* Let firmware know the driver has taken over */ 839 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 840 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 841 } 842 843 /* 844 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 845 * For ASF and Pass Through versions of f/w this means that the 846 * driver is no longer loaded. 847 */ 848 static void 849 igc_hw_control_release(struct igc_hw *hw) 850 { 851 uint32_t ctrl_ext; 852 853 /* Let firmware taken over control of h/w */ 854 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 855 IGC_WRITE_REG(hw, IGC_CTRL_EXT, 856 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 857 } 858 859 static int 860 igc_hardware_init(struct igc_hw *hw) 861 { 862 uint32_t rx_buf_size; 863 int diag; 864 865 /* Let the firmware know the OS is in control */ 866 igc_hw_control_acquire(hw); 867 868 /* Issue a global reset */ 869 igc_reset_hw(hw); 870 871 /* disable all wake up */ 872 IGC_WRITE_REG(hw, IGC_WUC, 0); 873 874 /* 875 * Hardware flow control 876 * - High water mark should allow for at least two standard size (1518) 877 * frames to be received after sending an XOFF. 878 * - Low water mark works best when it is very near the high water mark. 879 * This allows the receiver to restart by sending XON when it has 880 * drained a bit. Here we use an arbitrary value of 1500 which will 881 * restart after one full frame is pulled from the buffer. There 882 * could be several smaller frames in the buffer and if so they will 883 * not trigger the XON until their total number reduces the buffer 884 * by 1500. 885 */ 886 rx_buf_size = igc_get_rx_buffer_size(hw); 887 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 888 hw->fc.low_water = hw->fc.high_water - 1500; 889 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 890 hw->fc.send_xon = 1; 891 hw->fc.requested_mode = igc_fc_full; 892 893 diag = igc_init_hw(hw); 894 if (diag < 0) 895 return diag; 896 897 igc_get_phy_info(hw); 898 igc_check_for_link(hw); 899 900 return 0; 901 } 902 903 static int 904 eth_igc_start(struct rte_eth_dev *dev) 905 { 906 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 907 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 908 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 909 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 910 uint32_t *speeds; 911 int ret; 912 913 PMD_INIT_FUNC_TRACE(); 914 915 /* disable all MSI-X interrupts */ 916 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 917 IGC_WRITE_FLUSH(hw); 918 919 /* clear all MSI-X interrupts */ 920 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 921 922 /* disable uio/vfio intr/eventfd mapping */ 923 if (!adapter->stopped) 924 rte_intr_disable(intr_handle); 925 926 /* Power up the phy. Needed to make the link go Up */ 927 eth_igc_set_link_up(dev); 928 929 /* Put the address into the Receive Address Array */ 930 igc_rar_set(hw, hw->mac.addr, 0); 931 932 /* Initialize the hardware */ 933 if (igc_hardware_init(hw)) { 934 PMD_DRV_LOG(ERR, "Unable to initialize the hardware"); 935 return -EIO; 936 } 937 adapter->stopped = 0; 938 939 /* check and configure queue intr-vector mapping */ 940 if (rte_intr_cap_multiple(intr_handle) && 941 dev->data->dev_conf.intr_conf.rxq) { 942 uint32_t intr_vector = dev->data->nb_rx_queues; 943 if (rte_intr_efd_enable(intr_handle, intr_vector)) 944 return -1; 945 } 946 947 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 948 intr_handle->intr_vec = rte_zmalloc("intr_vec", 949 dev->data->nb_rx_queues * sizeof(int), 0); 950 if (intr_handle->intr_vec == NULL) { 951 PMD_DRV_LOG(ERR, 952 "Failed to allocate %d rx_queues intr_vec", 953 dev->data->nb_rx_queues); 954 return -ENOMEM; 955 } 956 } 957 958 /* configure msix for rx interrupt */ 959 igc_configure_msix_intr(dev); 960 961 igc_tx_init(dev); 962 963 /* This can fail when allocating mbufs for descriptor rings */ 964 ret = igc_rx_init(dev); 965 if (ret) { 966 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware"); 967 igc_dev_clear_queues(dev); 968 return ret; 969 } 970 971 igc_clear_hw_cntrs_base_generic(hw); 972 973 /* VLAN Offload Settings */ 974 eth_igc_vlan_offload_set(dev, 975 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 976 ETH_VLAN_EXTEND_MASK); 977 978 /* Setup link speed and duplex */ 979 speeds = &dev->data->dev_conf.link_speeds; 980 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 981 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 982 hw->mac.autoneg = 1; 983 } else { 984 int num_speeds = 0; 985 986 if (*speeds & ETH_LINK_SPEED_FIXED) { 987 PMD_DRV_LOG(ERR, 988 "Force speed mode currently not supported"); 989 igc_dev_clear_queues(dev); 990 return -EINVAL; 991 } 992 993 hw->phy.autoneg_advertised = 0; 994 hw->mac.autoneg = 1; 995 996 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 997 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 998 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) { 999 num_speeds = -1; 1000 goto error_invalid_config; 1001 } 1002 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1003 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1004 num_speeds++; 1005 } 1006 if (*speeds & ETH_LINK_SPEED_10M) { 1007 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1008 num_speeds++; 1009 } 1010 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1011 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1012 num_speeds++; 1013 } 1014 if (*speeds & ETH_LINK_SPEED_100M) { 1015 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1016 num_speeds++; 1017 } 1018 if (*speeds & ETH_LINK_SPEED_1G) { 1019 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1020 num_speeds++; 1021 } 1022 if (*speeds & ETH_LINK_SPEED_2_5G) { 1023 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; 1024 num_speeds++; 1025 } 1026 if (num_speeds == 0) 1027 goto error_invalid_config; 1028 } 1029 1030 igc_setup_link(hw); 1031 1032 if (rte_intr_allow_others(intr_handle)) { 1033 /* check if lsc interrupt is enabled */ 1034 if (dev->data->dev_conf.intr_conf.lsc) 1035 igc_lsc_interrupt_setup(dev, 1); 1036 else 1037 igc_lsc_interrupt_setup(dev, 0); 1038 } else { 1039 rte_intr_callback_unregister(intr_handle, 1040 eth_igc_interrupt_handler, 1041 (void *)dev); 1042 if (dev->data->dev_conf.intr_conf.lsc) 1043 PMD_DRV_LOG(INFO, 1044 "LSC won't enable because of no intr multiplex"); 1045 } 1046 1047 /* enable uio/vfio intr/eventfd mapping */ 1048 rte_intr_enable(intr_handle); 1049 1050 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1051 igc_update_queue_stats_handler, dev); 1052 1053 /* check if rxq interrupt is enabled */ 1054 if (dev->data->dev_conf.intr_conf.rxq && 1055 rte_intr_dp_is_en(intr_handle)) 1056 igc_rxq_interrupt_setup(dev); 1057 1058 /* resume enabled intr since hw reset */ 1059 igc_intr_other_enable(dev); 1060 1061 eth_igc_rxtx_control(dev, true); 1062 eth_igc_link_update(dev, 0); 1063 1064 /* configure MAC-loopback mode */ 1065 if (dev->data->dev_conf.lpbk_mode == 1) { 1066 uint32_t reg_val; 1067 1068 reg_val = IGC_READ_REG(hw, IGC_CTRL); 1069 reg_val &= ~IGC_CTRL_SPEED_MASK; 1070 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | 1071 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; 1072 IGC_WRITE_REG(hw, IGC_CTRL, reg_val); 1073 1074 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 1075 } 1076 1077 return 0; 1078 1079 error_invalid_config: 1080 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1081 dev->data->dev_conf.link_speeds, dev->data->port_id); 1082 igc_dev_clear_queues(dev); 1083 return -EINVAL; 1084 } 1085 1086 static int 1087 igc_reset_swfw_lock(struct igc_hw *hw) 1088 { 1089 int ret_val; 1090 1091 /* 1092 * Do mac ops initialization manually here, since we will need 1093 * some function pointers set by this call. 1094 */ 1095 ret_val = igc_init_mac_params(hw); 1096 if (ret_val) 1097 return ret_val; 1098 1099 /* 1100 * SMBI lock should not fail in this early stage. If this is the case, 1101 * it is due to an improper exit of the application. 1102 * So force the release of the faulty lock. 1103 */ 1104 if (igc_get_hw_semaphore_generic(hw) < 0) 1105 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 1106 1107 igc_put_hw_semaphore_generic(hw); 1108 1109 if (hw->mac.ops.acquire_swfw_sync != NULL) { 1110 uint16_t mask; 1111 1112 /* 1113 * Phy lock should not fail in this early stage. 1114 * If this is the case, it is due to an improper exit of the 1115 * application. So force the release of the faulty lock. 1116 */ 1117 mask = IGC_SWFW_PHY0_SM; 1118 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 1119 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 1120 hw->bus.func); 1121 } 1122 hw->mac.ops.release_swfw_sync(hw, mask); 1123 1124 /* 1125 * This one is more tricky since it is common to all ports; but 1126 * swfw_sync retries last long enough (1s) to be almost sure 1127 * that if lock can not be taken it is due to an improper lock 1128 * of the semaphore. 1129 */ 1130 mask = IGC_SWFW_EEP_SM; 1131 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) 1132 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1133 1134 hw->mac.ops.release_swfw_sync(hw, mask); 1135 } 1136 1137 return IGC_SUCCESS; 1138 } 1139 1140 /* 1141 * free all rx/tx queues. 1142 */ 1143 static void 1144 igc_dev_free_queues(struct rte_eth_dev *dev) 1145 { 1146 uint16_t i; 1147 1148 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1149 eth_igc_rx_queue_release(dev, i); 1150 dev->data->rx_queues[i] = NULL; 1151 } 1152 dev->data->nb_rx_queues = 0; 1153 1154 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1155 eth_igc_tx_queue_release(dev, i); 1156 dev->data->tx_queues[i] = NULL; 1157 } 1158 dev->data->nb_tx_queues = 0; 1159 } 1160 1161 static int 1162 eth_igc_close(struct rte_eth_dev *dev) 1163 { 1164 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1165 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1166 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1167 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 1168 int retry = 0; 1169 int ret = 0; 1170 1171 PMD_INIT_FUNC_TRACE(); 1172 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1173 return 0; 1174 1175 if (!adapter->stopped) 1176 ret = eth_igc_stop(dev); 1177 1178 igc_flow_flush(dev, NULL); 1179 igc_clear_all_filter(dev); 1180 1181 igc_intr_other_disable(dev); 1182 do { 1183 int ret = rte_intr_callback_unregister(intr_handle, 1184 eth_igc_interrupt_handler, dev); 1185 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL) 1186 break; 1187 1188 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret); 1189 DELAY(200 * 1000); /* delay 200ms */ 1190 } while (retry++ < 5); 1191 1192 igc_phy_hw_reset(hw); 1193 igc_hw_control_release(hw); 1194 igc_dev_free_queues(dev); 1195 1196 /* Reset any pending lock */ 1197 igc_reset_swfw_lock(hw); 1198 1199 return ret; 1200 } 1201 1202 static void 1203 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 1204 { 1205 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1206 1207 hw->vendor_id = pci_dev->id.vendor_id; 1208 hw->device_id = pci_dev->id.device_id; 1209 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1210 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 1211 } 1212 1213 static int 1214 eth_igc_dev_init(struct rte_eth_dev *dev) 1215 { 1216 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1217 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1218 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1219 int i, error = 0; 1220 1221 PMD_INIT_FUNC_TRACE(); 1222 dev->dev_ops = ð_igc_ops; 1223 dev->rx_queue_count = eth_igc_rx_queue_count; 1224 dev->rx_descriptor_status = eth_igc_rx_descriptor_status; 1225 dev->tx_descriptor_status = eth_igc_tx_descriptor_status; 1226 1227 /* 1228 * for secondary processes, we don't initialize any further as primary 1229 * has already done this work. Only check we don't need a different 1230 * RX function. 1231 */ 1232 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1233 return 0; 1234 1235 rte_eth_copy_pci_info(dev, pci_dev); 1236 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1237 1238 hw->back = pci_dev; 1239 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1240 1241 igc_identify_hardware(dev, pci_dev); 1242 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) { 1243 error = -EIO; 1244 goto err_late; 1245 } 1246 1247 igc_get_bus_info(hw); 1248 1249 /* Reset any pending lock */ 1250 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) { 1251 error = -EIO; 1252 goto err_late; 1253 } 1254 1255 /* Finish initialization */ 1256 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) { 1257 error = -EIO; 1258 goto err_late; 1259 } 1260 1261 hw->mac.autoneg = 1; 1262 hw->phy.autoneg_wait_to_complete = 0; 1263 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 1264 1265 /* Copper options */ 1266 if (hw->phy.media_type == igc_media_type_copper) { 1267 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 1268 hw->phy.disable_polarity_correction = 0; 1269 hw->phy.ms_type = igc_ms_hw_default; 1270 } 1271 1272 /* 1273 * Start from a known state, this is important in reading the nvm 1274 * and mac from that. 1275 */ 1276 igc_reset_hw(hw); 1277 1278 /* Make sure we have a good EEPROM before we read from it */ 1279 if (igc_validate_nvm_checksum(hw) < 0) { 1280 /* 1281 * Some PCI-E parts fail the first check due to 1282 * the link being in sleep state, call it again, 1283 * if it fails a second time its a real issue. 1284 */ 1285 if (igc_validate_nvm_checksum(hw) < 0) { 1286 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 1287 error = -EIO; 1288 goto err_late; 1289 } 1290 } 1291 1292 /* Read the permanent MAC address out of the EEPROM */ 1293 if (igc_read_mac_addr(hw) != 0) { 1294 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 1295 error = -EIO; 1296 goto err_late; 1297 } 1298 1299 /* Allocate memory for storing MAC addresses */ 1300 dev->data->mac_addrs = rte_zmalloc("igc", 1301 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 1302 if (dev->data->mac_addrs == NULL) { 1303 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC", 1304 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1305 error = -ENOMEM; 1306 goto err_late; 1307 } 1308 1309 /* Copy the permanent MAC address */ 1310 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 1311 &dev->data->mac_addrs[0]); 1312 1313 /* Now initialize the hardware */ 1314 if (igc_hardware_init(hw) != 0) { 1315 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 1316 rte_free(dev->data->mac_addrs); 1317 dev->data->mac_addrs = NULL; 1318 error = -ENODEV; 1319 goto err_late; 1320 } 1321 1322 hw->mac.get_link_status = 1; 1323 igc->stopped = 0; 1324 1325 /* Indicate SOL/IDER usage */ 1326 if (igc_check_reset_block(hw) < 0) 1327 PMD_INIT_LOG(ERR, 1328 "PHY reset is blocked due to SOL/IDER session."); 1329 1330 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 1331 dev->data->port_id, pci_dev->id.vendor_id, 1332 pci_dev->id.device_id); 1333 1334 rte_intr_callback_register(&pci_dev->intr_handle, 1335 eth_igc_interrupt_handler, (void *)dev); 1336 1337 /* enable uio/vfio intr/eventfd mapping */ 1338 rte_intr_enable(&pci_dev->intr_handle); 1339 1340 /* enable support intr */ 1341 igc_intr_other_enable(dev); 1342 1343 /* initiate queue status */ 1344 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1345 igc->txq_stats_map[i] = -1; 1346 igc->rxq_stats_map[i] = -1; 1347 } 1348 1349 igc_flow_init(dev); 1350 igc_clear_all_filter(dev); 1351 return 0; 1352 1353 err_late: 1354 igc_hw_control_release(hw); 1355 return error; 1356 } 1357 1358 static int 1359 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) 1360 { 1361 PMD_INIT_FUNC_TRACE(); 1362 eth_igc_close(eth_dev); 1363 return 0; 1364 } 1365 1366 static int 1367 eth_igc_reset(struct rte_eth_dev *dev) 1368 { 1369 int ret; 1370 1371 PMD_INIT_FUNC_TRACE(); 1372 1373 ret = eth_igc_dev_uninit(dev); 1374 if (ret) 1375 return ret; 1376 1377 return eth_igc_dev_init(dev); 1378 } 1379 1380 static int 1381 eth_igc_promiscuous_enable(struct rte_eth_dev *dev) 1382 { 1383 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1384 uint32_t rctl; 1385 1386 rctl = IGC_READ_REG(hw, IGC_RCTL); 1387 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1388 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1389 return 0; 1390 } 1391 1392 static int 1393 eth_igc_promiscuous_disable(struct rte_eth_dev *dev) 1394 { 1395 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1396 uint32_t rctl; 1397 1398 rctl = IGC_READ_REG(hw, IGC_RCTL); 1399 rctl &= (~IGC_RCTL_UPE); 1400 if (dev->data->all_multicast == 1) 1401 rctl |= IGC_RCTL_MPE; 1402 else 1403 rctl &= (~IGC_RCTL_MPE); 1404 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1405 return 0; 1406 } 1407 1408 static int 1409 eth_igc_allmulticast_enable(struct rte_eth_dev *dev) 1410 { 1411 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1412 uint32_t rctl; 1413 1414 rctl = IGC_READ_REG(hw, IGC_RCTL); 1415 rctl |= IGC_RCTL_MPE; 1416 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1417 return 0; 1418 } 1419 1420 static int 1421 eth_igc_allmulticast_disable(struct rte_eth_dev *dev) 1422 { 1423 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1424 uint32_t rctl; 1425 1426 if (dev->data->promiscuous == 1) 1427 return 0; /* must remain in all_multicast mode */ 1428 1429 rctl = IGC_READ_REG(hw, IGC_RCTL); 1430 rctl &= (~IGC_RCTL_MPE); 1431 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1432 return 0; 1433 } 1434 1435 static int 1436 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 1437 size_t fw_size) 1438 { 1439 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1440 struct igc_fw_version fw; 1441 int ret; 1442 1443 igc_get_fw_version(hw, &fw); 1444 1445 /* if option rom is valid, display its version too */ 1446 if (fw.or_valid) { 1447 ret = snprintf(fw_version, fw_size, 1448 "%d.%d, 0x%08x, %d.%d.%d", 1449 fw.eep_major, fw.eep_minor, fw.etrack_id, 1450 fw.or_major, fw.or_build, fw.or_patch); 1451 /* no option rom */ 1452 } else { 1453 if (fw.etrack_id != 0X0000) { 1454 ret = snprintf(fw_version, fw_size, 1455 "%d.%d, 0x%08x", 1456 fw.eep_major, fw.eep_minor, 1457 fw.etrack_id); 1458 } else { 1459 ret = snprintf(fw_version, fw_size, 1460 "%d.%d.%d", 1461 fw.eep_major, fw.eep_minor, 1462 fw.eep_build); 1463 } 1464 } 1465 if (ret < 0) 1466 return -EINVAL; 1467 1468 ret += 1; /* add the size of '\0' */ 1469 if (fw_size < (size_t)ret) 1470 return ret; 1471 else 1472 return 0; 1473 } 1474 1475 static int 1476 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1477 { 1478 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1479 1480 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1481 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE; 1482 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1483 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; 1484 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; 1485 dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1486 1487 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; 1488 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; 1489 dev_info->max_vmdq_pools = 0; 1490 1491 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t); 1492 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 1493 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL; 1494 1495 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1496 .rx_thresh = { 1497 .pthresh = IGC_DEFAULT_RX_PTHRESH, 1498 .hthresh = IGC_DEFAULT_RX_HTHRESH, 1499 .wthresh = IGC_DEFAULT_RX_WTHRESH, 1500 }, 1501 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH, 1502 .rx_drop_en = 0, 1503 .offloads = 0, 1504 }; 1505 1506 dev_info->default_txconf = (struct rte_eth_txconf) { 1507 .tx_thresh = { 1508 .pthresh = IGC_DEFAULT_TX_PTHRESH, 1509 .hthresh = IGC_DEFAULT_TX_HTHRESH, 1510 .wthresh = IGC_DEFAULT_TX_WTHRESH, 1511 }, 1512 .offloads = 0, 1513 }; 1514 1515 dev_info->rx_desc_lim = rx_desc_lim; 1516 dev_info->tx_desc_lim = tx_desc_lim; 1517 1518 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1519 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1520 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G; 1521 1522 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD; 1523 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1524 return 0; 1525 } 1526 1527 static int 1528 eth_igc_led_on(struct rte_eth_dev *dev) 1529 { 1530 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1531 1532 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1533 } 1534 1535 static int 1536 eth_igc_led_off(struct rte_eth_dev *dev) 1537 { 1538 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1539 1540 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1541 } 1542 1543 static const uint32_t * 1544 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev) 1545 { 1546 static const uint32_t ptypes[] = { 1547 /* refers to rx_desc_pkt_info_to_pkt_type() */ 1548 RTE_PTYPE_L2_ETHER, 1549 RTE_PTYPE_L3_IPV4, 1550 RTE_PTYPE_L3_IPV4_EXT, 1551 RTE_PTYPE_L3_IPV6, 1552 RTE_PTYPE_L3_IPV6_EXT, 1553 RTE_PTYPE_L4_TCP, 1554 RTE_PTYPE_L4_UDP, 1555 RTE_PTYPE_L4_SCTP, 1556 RTE_PTYPE_TUNNEL_IP, 1557 RTE_PTYPE_INNER_L3_IPV6, 1558 RTE_PTYPE_INNER_L3_IPV6_EXT, 1559 RTE_PTYPE_INNER_L4_TCP, 1560 RTE_PTYPE_INNER_L4_UDP, 1561 RTE_PTYPE_UNKNOWN 1562 }; 1563 1564 return ptypes; 1565 } 1566 1567 static int 1568 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1569 { 1570 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1571 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD; 1572 uint32_t rctl; 1573 1574 /* if extend vlan has been enabled */ 1575 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN) 1576 frame_size += VLAN_TAG_SIZE; 1577 1578 /* check that mtu is within the allowed range */ 1579 if (mtu < RTE_ETHER_MIN_MTU || 1580 frame_size > MAX_RX_JUMBO_FRAME_SIZE) 1581 return -EINVAL; 1582 1583 /* 1584 * If device is started, refuse mtu that requires the support of 1585 * scattered packets when this feature has not been enabled before. 1586 */ 1587 if (dev->data->dev_started && !dev->data->scattered_rx && 1588 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1589 PMD_INIT_LOG(ERR, "Stop port first."); 1590 return -EINVAL; 1591 } 1592 1593 rctl = IGC_READ_REG(hw, IGC_RCTL); 1594 if (mtu > RTE_ETHER_MTU) 1595 rctl |= IGC_RCTL_LPE; 1596 else 1597 rctl &= ~IGC_RCTL_LPE; 1598 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1599 1600 IGC_WRITE_REG(hw, IGC_RLPML, frame_size); 1601 1602 return 0; 1603 } 1604 1605 static int 1606 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1607 uint32_t index, uint32_t pool) 1608 { 1609 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1610 1611 igc_rar_set(hw, mac_addr->addr_bytes, index); 1612 RTE_SET_USED(pool); 1613 return 0; 1614 } 1615 1616 static void 1617 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1618 { 1619 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1620 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1621 1622 memset(addr, 0, sizeof(addr)); 1623 igc_rar_set(hw, addr, index); 1624 } 1625 1626 static int 1627 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 1628 struct rte_ether_addr *addr) 1629 { 1630 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1631 igc_rar_set(hw, addr->addr_bytes, 0); 1632 return 0; 1633 } 1634 1635 static int 1636 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 1637 struct rte_ether_addr *mc_addr_set, 1638 uint32_t nb_mc_addr) 1639 { 1640 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1641 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1642 return 0; 1643 } 1644 1645 /* 1646 * Read hardware registers 1647 */ 1648 static void 1649 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats) 1650 { 1651 int pause_frames; 1652 1653 uint64_t old_gprc = stats->gprc; 1654 uint64_t old_gptc = stats->gptc; 1655 uint64_t old_tpr = stats->tpr; 1656 uint64_t old_tpt = stats->tpt; 1657 uint64_t old_rpthc = stats->rpthc; 1658 uint64_t old_hgptc = stats->hgptc; 1659 1660 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS); 1661 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC); 1662 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC); 1663 stats->mpc += IGC_READ_REG(hw, IGC_MPC); 1664 stats->scc += IGC_READ_REG(hw, IGC_SCC); 1665 stats->ecol += IGC_READ_REG(hw, IGC_ECOL); 1666 1667 stats->mcc += IGC_READ_REG(hw, IGC_MCC); 1668 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL); 1669 stats->colc += IGC_READ_REG(hw, IGC_COLC); 1670 1671 stats->dc += IGC_READ_REG(hw, IGC_DC); 1672 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS); 1673 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC); 1674 stats->rlec += IGC_READ_REG(hw, IGC_RLEC); 1675 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC); 1676 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC); 1677 1678 /* 1679 * For watchdog management we need to know if we have been 1680 * paused during the last interval, so capture that here. 1681 */ 1682 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC); 1683 stats->xoffrxc += pause_frames; 1684 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC); 1685 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC); 1686 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64); 1687 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127); 1688 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255); 1689 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511); 1690 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023); 1691 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522); 1692 stats->gprc += IGC_READ_REG(hw, IGC_GPRC); 1693 stats->bprc += IGC_READ_REG(hw, IGC_BPRC); 1694 stats->mprc += IGC_READ_REG(hw, IGC_MPRC); 1695 stats->gptc += IGC_READ_REG(hw, IGC_GPTC); 1696 1697 /* For the 64-bit byte counters the low dword must be read first. */ 1698 /* Both registers clear on the read of the high dword */ 1699 1700 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1701 stats->gorc += IGC_READ_REG(hw, IGC_GORCL); 1702 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32); 1703 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1704 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL); 1705 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32); 1706 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1707 1708 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC); 1709 stats->ruc += IGC_READ_REG(hw, IGC_RUC); 1710 stats->rfc += IGC_READ_REG(hw, IGC_RFC); 1711 stats->roc += IGC_READ_REG(hw, IGC_ROC); 1712 stats->rjc += IGC_READ_REG(hw, IGC_RJC); 1713 1714 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC); 1715 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC); 1716 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC); 1717 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC); 1718 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC); 1719 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC); 1720 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC); 1721 1722 stats->tpr += IGC_READ_REG(hw, IGC_TPR); 1723 stats->tpt += IGC_READ_REG(hw, IGC_TPT); 1724 1725 stats->tor += IGC_READ_REG(hw, IGC_TORL); 1726 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32); 1727 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1728 stats->tot += IGC_READ_REG(hw, IGC_TOTL); 1729 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32); 1730 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1731 1732 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64); 1733 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127); 1734 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255); 1735 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511); 1736 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023); 1737 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522); 1738 stats->mptc += IGC_READ_REG(hw, IGC_MPTC); 1739 stats->bptc += IGC_READ_REG(hw, IGC_BPTC); 1740 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC); 1741 1742 stats->iac += IGC_READ_REG(hw, IGC_IAC); 1743 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC); 1744 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC); 1745 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC); 1746 1747 /* Host to Card Statistics */ 1748 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL); 1749 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32); 1750 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1751 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL); 1752 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32); 1753 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1754 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS); 1755 } 1756 1757 /* 1758 * Write 0 to all queue status registers 1759 */ 1760 static void 1761 igc_reset_queue_stats_register(struct igc_hw *hw) 1762 { 1763 int i; 1764 1765 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1766 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0); 1767 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0); 1768 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0); 1769 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0); 1770 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0); 1771 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0); 1772 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0); 1773 } 1774 } 1775 1776 /* 1777 * Read all hardware queue status registers 1778 */ 1779 static void 1780 igc_read_queue_stats_register(struct rte_eth_dev *dev) 1781 { 1782 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1783 struct igc_hw_queue_stats *queue_stats = 1784 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1785 int i; 1786 1787 /* 1788 * This register is not cleared on read. Furthermore, the register wraps 1789 * around back to 0x00000000 on the next increment when reaching a value 1790 * of 0xFFFFFFFF and then continues normal count operation. 1791 */ 1792 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1793 union { 1794 u64 ddword; 1795 u32 dword[2]; 1796 } value; 1797 u32 tmp; 1798 1799 /* 1800 * Read the register first, if the value is smaller than that 1801 * previous read, that mean the register has been overflowed, 1802 * then we add the high 4 bytes by 1 and replace the low 4 1803 * bytes by the new value. 1804 */ 1805 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i)); 1806 value.ddword = queue_stats->pqgprc[i]; 1807 if (value.dword[U32_0_IN_U64] > tmp) 1808 value.dword[U32_1_IN_U64]++; 1809 value.dword[U32_0_IN_U64] = tmp; 1810 queue_stats->pqgprc[i] = value.ddword; 1811 1812 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i)); 1813 value.ddword = queue_stats->pqgptc[i]; 1814 if (value.dword[U32_0_IN_U64] > tmp) 1815 value.dword[U32_1_IN_U64]++; 1816 value.dword[U32_0_IN_U64] = tmp; 1817 queue_stats->pqgptc[i] = value.ddword; 1818 1819 tmp = IGC_READ_REG(hw, IGC_PQGORC(i)); 1820 value.ddword = queue_stats->pqgorc[i]; 1821 if (value.dword[U32_0_IN_U64] > tmp) 1822 value.dword[U32_1_IN_U64]++; 1823 value.dword[U32_0_IN_U64] = tmp; 1824 queue_stats->pqgorc[i] = value.ddword; 1825 1826 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i)); 1827 value.ddword = queue_stats->pqgotc[i]; 1828 if (value.dword[U32_0_IN_U64] > tmp) 1829 value.dword[U32_1_IN_U64]++; 1830 value.dword[U32_0_IN_U64] = tmp; 1831 queue_stats->pqgotc[i] = value.ddword; 1832 1833 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i)); 1834 value.ddword = queue_stats->pqmprc[i]; 1835 if (value.dword[U32_0_IN_U64] > tmp) 1836 value.dword[U32_1_IN_U64]++; 1837 value.dword[U32_0_IN_U64] = tmp; 1838 queue_stats->pqmprc[i] = value.ddword; 1839 1840 tmp = IGC_READ_REG(hw, IGC_RQDPC(i)); 1841 value.ddword = queue_stats->rqdpc[i]; 1842 if (value.dword[U32_0_IN_U64] > tmp) 1843 value.dword[U32_1_IN_U64]++; 1844 value.dword[U32_0_IN_U64] = tmp; 1845 queue_stats->rqdpc[i] = value.ddword; 1846 1847 tmp = IGC_READ_REG(hw, IGC_TQDPC(i)); 1848 value.ddword = queue_stats->tqdpc[i]; 1849 if (value.dword[U32_0_IN_U64] > tmp) 1850 value.dword[U32_1_IN_U64]++; 1851 value.dword[U32_0_IN_U64] = tmp; 1852 queue_stats->tqdpc[i] = value.ddword; 1853 } 1854 } 1855 1856 static int 1857 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1858 { 1859 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1860 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1861 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev); 1862 struct igc_hw_queue_stats *queue_stats = 1863 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1864 int i; 1865 1866 /* 1867 * Cancel status handler since it will read the queue status registers 1868 */ 1869 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1870 1871 /* Read status register */ 1872 igc_read_queue_stats_register(dev); 1873 igc_read_stats_registers(hw, stats); 1874 1875 if (rte_stats == NULL) { 1876 /* Restart queue status handler */ 1877 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1878 igc_update_queue_stats_handler, dev); 1879 return -EINVAL; 1880 } 1881 1882 /* Rx Errors */ 1883 rte_stats->imissed = stats->mpc; 1884 rte_stats->ierrors = stats->crcerrs + stats->rlec + 1885 stats->rxerrc + stats->algnerrc; 1886 1887 /* Tx Errors */ 1888 rte_stats->oerrors = stats->ecol + stats->latecol; 1889 1890 rte_stats->ipackets = stats->gprc; 1891 rte_stats->opackets = stats->gptc; 1892 rte_stats->ibytes = stats->gorc; 1893 rte_stats->obytes = stats->gotc; 1894 1895 /* Get per-queue statuses */ 1896 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1897 /* GET TX queue statuses */ 1898 int map_id = igc->txq_stats_map[i]; 1899 if (map_id >= 0) { 1900 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i]; 1901 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i]; 1902 } 1903 /* Get RX queue statuses */ 1904 map_id = igc->rxq_stats_map[i]; 1905 if (map_id >= 0) { 1906 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i]; 1907 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i]; 1908 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i]; 1909 } 1910 } 1911 1912 /* Restart queue status handler */ 1913 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1914 igc_update_queue_stats_handler, dev); 1915 return 0; 1916 } 1917 1918 static int 1919 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1920 unsigned int n) 1921 { 1922 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1923 struct igc_hw_stats *hw_stats = 1924 IGC_DEV_PRIVATE_STATS(dev); 1925 unsigned int i; 1926 1927 igc_read_stats_registers(hw, hw_stats); 1928 1929 if (n < IGC_NB_XSTATS) 1930 return IGC_NB_XSTATS; 1931 1932 /* If this is a reset xstats is NULL, and we have cleared the 1933 * registers by reading them. 1934 */ 1935 if (!xstats) 1936 return 0; 1937 1938 /* Extended stats */ 1939 for (i = 0; i < IGC_NB_XSTATS; i++) { 1940 xstats[i].id = i; 1941 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1942 rte_igc_stats_strings[i].offset); 1943 } 1944 1945 return IGC_NB_XSTATS; 1946 } 1947 1948 static int 1949 eth_igc_xstats_reset(struct rte_eth_dev *dev) 1950 { 1951 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1952 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 1953 struct igc_hw_queue_stats *queue_stats = 1954 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1955 1956 /* Cancel queue status handler for avoid conflict */ 1957 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1958 1959 /* HW registers are cleared on read */ 1960 igc_reset_queue_stats_register(hw); 1961 igc_read_stats_registers(hw, hw_stats); 1962 1963 /* Reset software totals */ 1964 memset(hw_stats, 0, sizeof(*hw_stats)); 1965 memset(queue_stats, 0, sizeof(*queue_stats)); 1966 1967 /* Restart the queue status handler */ 1968 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler, 1969 dev); 1970 1971 return 0; 1972 } 1973 1974 static int 1975 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1976 struct rte_eth_xstat_name *xstats_names, unsigned int size) 1977 { 1978 unsigned int i; 1979 1980 if (xstats_names == NULL) 1981 return IGC_NB_XSTATS; 1982 1983 if (size < IGC_NB_XSTATS) { 1984 PMD_DRV_LOG(ERR, "not enough buffers!"); 1985 return IGC_NB_XSTATS; 1986 } 1987 1988 for (i = 0; i < IGC_NB_XSTATS; i++) 1989 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name, 1990 sizeof(xstats_names[i].name)); 1991 1992 return IGC_NB_XSTATS; 1993 } 1994 1995 static int 1996 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 1997 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names, 1998 unsigned int limit) 1999 { 2000 unsigned int i; 2001 2002 if (!ids) 2003 return eth_igc_xstats_get_names(dev, xstats_names, limit); 2004 2005 for (i = 0; i < limit; i++) { 2006 if (ids[i] >= IGC_NB_XSTATS) { 2007 PMD_DRV_LOG(ERR, "id value isn't valid"); 2008 return -EINVAL; 2009 } 2010 strlcpy(xstats_names[i].name, 2011 rte_igc_stats_strings[ids[i]].name, 2012 sizeof(xstats_names[i].name)); 2013 } 2014 return limit; 2015 } 2016 2017 static int 2018 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2019 uint64_t *values, unsigned int n) 2020 { 2021 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2022 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 2023 unsigned int i; 2024 2025 igc_read_stats_registers(hw, hw_stats); 2026 2027 if (!ids) { 2028 if (n < IGC_NB_XSTATS) 2029 return IGC_NB_XSTATS; 2030 2031 /* If this is a reset xstats is NULL, and we have cleared the 2032 * registers by reading them. 2033 */ 2034 if (!values) 2035 return 0; 2036 2037 /* Extended stats */ 2038 for (i = 0; i < IGC_NB_XSTATS; i++) 2039 values[i] = *(uint64_t *)(((char *)hw_stats) + 2040 rte_igc_stats_strings[i].offset); 2041 2042 return IGC_NB_XSTATS; 2043 2044 } else { 2045 for (i = 0; i < n; i++) { 2046 if (ids[i] >= IGC_NB_XSTATS) { 2047 PMD_DRV_LOG(ERR, "id value isn't valid"); 2048 return -EINVAL; 2049 } 2050 values[i] = *(uint64_t *)(((char *)hw_stats) + 2051 rte_igc_stats_strings[ids[i]].offset); 2052 } 2053 return n; 2054 } 2055 } 2056 2057 static int 2058 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 2059 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) 2060 { 2061 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 2062 2063 /* check queue id is valid */ 2064 if (queue_id >= IGC_QUEUE_PAIRS_NUM) { 2065 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u", 2066 queue_id, IGC_QUEUE_PAIRS_NUM - 1); 2067 return -EINVAL; 2068 } 2069 2070 /* store the mapping status id */ 2071 if (is_rx) 2072 igc->rxq_stats_map[queue_id] = stat_idx; 2073 else 2074 igc->txq_stats_map[queue_id] = stat_idx; 2075 2076 return 0; 2077 } 2078 2079 static int 2080 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 2081 { 2082 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2083 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2084 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2085 uint32_t vec = IGC_MISC_VEC_ID; 2086 2087 if (rte_intr_allow_others(intr_handle)) 2088 vec = IGC_RX_VEC_START; 2089 2090 uint32_t mask = 1u << (queue_id + vec); 2091 2092 IGC_WRITE_REG(hw, IGC_EIMC, mask); 2093 IGC_WRITE_FLUSH(hw); 2094 2095 return 0; 2096 } 2097 2098 static int 2099 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 2100 { 2101 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2102 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2103 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2104 uint32_t vec = IGC_MISC_VEC_ID; 2105 2106 if (rte_intr_allow_others(intr_handle)) 2107 vec = IGC_RX_VEC_START; 2108 2109 uint32_t mask = 1u << (queue_id + vec); 2110 2111 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2112 IGC_WRITE_FLUSH(hw); 2113 2114 rte_intr_enable(intr_handle); 2115 2116 return 0; 2117 } 2118 2119 static int 2120 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2121 { 2122 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2123 uint32_t ctrl; 2124 int tx_pause; 2125 int rx_pause; 2126 2127 fc_conf->pause_time = hw->fc.pause_time; 2128 fc_conf->high_water = hw->fc.high_water; 2129 fc_conf->low_water = hw->fc.low_water; 2130 fc_conf->send_xon = hw->fc.send_xon; 2131 fc_conf->autoneg = hw->mac.autoneg; 2132 2133 /* 2134 * Return rx_pause and tx_pause status according to actual setting of 2135 * the TFCE and RFCE bits in the CTRL register. 2136 */ 2137 ctrl = IGC_READ_REG(hw, IGC_CTRL); 2138 if (ctrl & IGC_CTRL_TFCE) 2139 tx_pause = 1; 2140 else 2141 tx_pause = 0; 2142 2143 if (ctrl & IGC_CTRL_RFCE) 2144 rx_pause = 1; 2145 else 2146 rx_pause = 0; 2147 2148 if (rx_pause && tx_pause) 2149 fc_conf->mode = RTE_FC_FULL; 2150 else if (rx_pause) 2151 fc_conf->mode = RTE_FC_RX_PAUSE; 2152 else if (tx_pause) 2153 fc_conf->mode = RTE_FC_TX_PAUSE; 2154 else 2155 fc_conf->mode = RTE_FC_NONE; 2156 2157 return 0; 2158 } 2159 2160 static int 2161 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2162 { 2163 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2164 uint32_t rx_buf_size; 2165 uint32_t max_high_water; 2166 uint32_t rctl; 2167 int err; 2168 2169 if (fc_conf->autoneg != hw->mac.autoneg) 2170 return -ENOTSUP; 2171 2172 rx_buf_size = igc_get_rx_buffer_size(hw); 2173 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2174 2175 /* At least reserve one Ethernet frame for watermark */ 2176 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 2177 if (fc_conf->high_water > max_high_water || 2178 fc_conf->high_water < fc_conf->low_water) { 2179 PMD_DRV_LOG(ERR, 2180 "Incorrect high(%u)/low(%u) water value, max is %u", 2181 fc_conf->high_water, fc_conf->low_water, 2182 max_high_water); 2183 return -EINVAL; 2184 } 2185 2186 switch (fc_conf->mode) { 2187 case RTE_FC_NONE: 2188 hw->fc.requested_mode = igc_fc_none; 2189 break; 2190 case RTE_FC_RX_PAUSE: 2191 hw->fc.requested_mode = igc_fc_rx_pause; 2192 break; 2193 case RTE_FC_TX_PAUSE: 2194 hw->fc.requested_mode = igc_fc_tx_pause; 2195 break; 2196 case RTE_FC_FULL: 2197 hw->fc.requested_mode = igc_fc_full; 2198 break; 2199 default: 2200 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode); 2201 return -EINVAL; 2202 } 2203 2204 hw->fc.pause_time = fc_conf->pause_time; 2205 hw->fc.high_water = fc_conf->high_water; 2206 hw->fc.low_water = fc_conf->low_water; 2207 hw->fc.send_xon = fc_conf->send_xon; 2208 2209 err = igc_setup_link_generic(hw); 2210 if (err == IGC_SUCCESS) { 2211 /** 2212 * check if we want to forward MAC frames - driver doesn't have 2213 * native capability to do that, so we'll write the registers 2214 * ourselves 2215 **/ 2216 rctl = IGC_READ_REG(hw, IGC_RCTL); 2217 2218 /* set or clear MFLCN.PMCF bit depending on configuration */ 2219 if (fc_conf->mac_ctrl_frame_fwd != 0) 2220 rctl |= IGC_RCTL_PMCF; 2221 else 2222 rctl &= ~IGC_RCTL_PMCF; 2223 2224 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2225 IGC_WRITE_FLUSH(hw); 2226 2227 return 0; 2228 } 2229 2230 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err); 2231 return -EIO; 2232 } 2233 2234 static int 2235 eth_igc_rss_reta_update(struct rte_eth_dev *dev, 2236 struct rte_eth_rss_reta_entry64 *reta_conf, 2237 uint16_t reta_size) 2238 { 2239 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2240 uint16_t i; 2241 2242 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2243 PMD_DRV_LOG(ERR, 2244 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2245 reta_size, ETH_RSS_RETA_SIZE_128); 2246 return -EINVAL; 2247 } 2248 2249 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2250 2251 /* set redirection table */ 2252 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2253 union igc_rss_reta_reg reta, reg; 2254 uint16_t idx, shift; 2255 uint8_t j, mask; 2256 2257 idx = i / RTE_RETA_GROUP_SIZE; 2258 shift = i % RTE_RETA_GROUP_SIZE; 2259 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2260 IGC_RSS_RDT_REG_SIZE_MASK); 2261 2262 /* if no need to update the register */ 2263 if (!mask || 2264 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2265 continue; 2266 2267 /* check mask whether need to read the register value first */ 2268 if (mask == IGC_RSS_RDT_REG_SIZE_MASK) 2269 reg.dword = 0; 2270 else 2271 reg.dword = IGC_READ_REG_LE_VALUE(hw, 2272 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2273 2274 /* update the register */ 2275 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2276 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2277 if (mask & (1u << j)) 2278 reta.bytes[j] = 2279 (uint8_t)reta_conf[idx].reta[shift + j]; 2280 else 2281 reta.bytes[j] = reg.bytes[j]; 2282 } 2283 IGC_WRITE_REG_LE_VALUE(hw, 2284 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword); 2285 } 2286 2287 return 0; 2288 } 2289 2290 static int 2291 eth_igc_rss_reta_query(struct rte_eth_dev *dev, 2292 struct rte_eth_rss_reta_entry64 *reta_conf, 2293 uint16_t reta_size) 2294 { 2295 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2296 uint16_t i; 2297 2298 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2299 PMD_DRV_LOG(ERR, 2300 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2301 reta_size, ETH_RSS_RETA_SIZE_128); 2302 return -EINVAL; 2303 } 2304 2305 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2306 2307 /* read redirection table */ 2308 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2309 union igc_rss_reta_reg reta; 2310 uint16_t idx, shift; 2311 uint8_t j, mask; 2312 2313 idx = i / RTE_RETA_GROUP_SIZE; 2314 shift = i % RTE_RETA_GROUP_SIZE; 2315 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2316 IGC_RSS_RDT_REG_SIZE_MASK); 2317 2318 /* if no need to read register */ 2319 if (!mask || 2320 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2321 continue; 2322 2323 /* read register and get the queue index */ 2324 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2325 reta.dword = IGC_READ_REG_LE_VALUE(hw, 2326 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2327 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2328 if (mask & (1u << j)) 2329 reta_conf[idx].reta[shift + j] = reta.bytes[j]; 2330 } 2331 } 2332 2333 return 0; 2334 } 2335 2336 static int 2337 eth_igc_rss_hash_update(struct rte_eth_dev *dev, 2338 struct rte_eth_rss_conf *rss_conf) 2339 { 2340 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2341 igc_hw_rss_hash_set(hw, rss_conf); 2342 return 0; 2343 } 2344 2345 static int 2346 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 2347 struct rte_eth_rss_conf *rss_conf) 2348 { 2349 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2350 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; 2351 uint32_t mrqc; 2352 uint64_t rss_hf; 2353 2354 if (hash_key != NULL) { 2355 int i; 2356 2357 /* if not enough space for store hash key */ 2358 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) { 2359 PMD_DRV_LOG(ERR, 2360 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u", 2361 rss_conf->rss_key_len, IGC_HKEY_SIZE); 2362 return -EINVAL; 2363 } 2364 2365 /* read RSS key from register */ 2366 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) 2367 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i)); 2368 } 2369 2370 /* get RSS functions configured in MRQC register */ 2371 mrqc = IGC_READ_REG(hw, IGC_MRQC); 2372 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) 2373 return 0; 2374 2375 rss_hf = 0; 2376 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4) 2377 rss_hf |= ETH_RSS_IPV4; 2378 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP) 2379 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2380 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6) 2381 rss_hf |= ETH_RSS_IPV6; 2382 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX) 2383 rss_hf |= ETH_RSS_IPV6_EX; 2384 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP) 2385 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2386 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX) 2387 rss_hf |= ETH_RSS_IPV6_TCP_EX; 2388 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP) 2389 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2390 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP) 2391 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2392 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX) 2393 rss_hf |= ETH_RSS_IPV6_UDP_EX; 2394 2395 rss_conf->rss_hf |= rss_hf; 2396 return 0; 2397 } 2398 2399 static int 2400 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2401 { 2402 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2403 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2404 uint32_t vfta; 2405 uint32_t vid_idx; 2406 uint32_t vid_bit; 2407 2408 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; 2409 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); 2410 vfta = shadow_vfta->vfta[vid_idx]; 2411 if (on) 2412 vfta |= vid_bit; 2413 else 2414 vfta &= ~vid_bit; 2415 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); 2416 2417 /* update local VFTA copy */ 2418 shadow_vfta->vfta[vid_idx] = vfta; 2419 2420 return 0; 2421 } 2422 2423 static void 2424 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2425 { 2426 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2427 igc_read_reg_check_clear_bits(hw, IGC_RCTL, 2428 IGC_RCTL_CFIEN | IGC_RCTL_VFE); 2429 } 2430 2431 static void 2432 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2433 { 2434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2435 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2436 uint32_t reg_val; 2437 int i; 2438 2439 /* Filter Table Enable, CFI not used for packet acceptance */ 2440 reg_val = IGC_READ_REG(hw, IGC_RCTL); 2441 reg_val &= ~IGC_RCTL_CFIEN; 2442 reg_val |= IGC_RCTL_VFE; 2443 IGC_WRITE_REG(hw, IGC_RCTL, reg_val); 2444 2445 /* restore VFTA table */ 2446 for (i = 0; i < IGC_VFTA_SIZE; i++) 2447 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); 2448 } 2449 2450 static void 2451 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2452 { 2453 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2454 2455 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2456 } 2457 2458 static void 2459 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2460 { 2461 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2462 2463 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2464 } 2465 2466 static int 2467 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2468 { 2469 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2470 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD; 2471 uint32_t ctrl_ext; 2472 2473 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2474 2475 /* if extend vlan hasn't been enabled */ 2476 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) 2477 return 0; 2478 2479 /* Update maximum packet length */ 2480 if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { 2481 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", 2482 frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); 2483 return -EINVAL; 2484 } 2485 IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE); 2486 2487 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); 2488 return 0; 2489 } 2490 2491 static int 2492 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2493 { 2494 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2495 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD; 2496 uint32_t ctrl_ext; 2497 2498 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2499 2500 /* if extend vlan has been enabled */ 2501 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) 2502 return 0; 2503 2504 /* Update maximum packet length */ 2505 if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) { 2506 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", 2507 frame_size, MAX_RX_JUMBO_FRAME_SIZE); 2508 return -EINVAL; 2509 } 2510 IGC_WRITE_REG(hw, IGC_RLPML, frame_size); 2511 2512 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); 2513 return 0; 2514 } 2515 2516 static int 2517 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2518 { 2519 struct rte_eth_rxmode *rxmode; 2520 2521 rxmode = &dev->data->dev_conf.rxmode; 2522 if (mask & ETH_VLAN_STRIP_MASK) { 2523 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2524 igc_vlan_hw_strip_enable(dev); 2525 else 2526 igc_vlan_hw_strip_disable(dev); 2527 } 2528 2529 if (mask & ETH_VLAN_FILTER_MASK) { 2530 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2531 igc_vlan_hw_filter_enable(dev); 2532 else 2533 igc_vlan_hw_filter_disable(dev); 2534 } 2535 2536 if (mask & ETH_VLAN_EXTEND_MASK) { 2537 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2538 return igc_vlan_hw_extend_enable(dev); 2539 else 2540 return igc_vlan_hw_extend_disable(dev); 2541 } 2542 2543 return 0; 2544 } 2545 2546 static int 2547 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 2548 enum rte_vlan_type vlan_type, 2549 uint16_t tpid) 2550 { 2551 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2552 uint32_t reg_val; 2553 2554 /* only outer TPID of double VLAN can be configured*/ 2555 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2556 reg_val = IGC_READ_REG(hw, IGC_VET); 2557 reg_val = (reg_val & (~IGC_VET_EXT)) | 2558 ((uint32_t)tpid << IGC_VET_EXT_SHIFT); 2559 IGC_WRITE_REG(hw, IGC_VET, reg_val); 2560 2561 return 0; 2562 } 2563 2564 /* all other TPID values are read-only*/ 2565 PMD_DRV_LOG(ERR, "Not supported"); 2566 return -ENOTSUP; 2567 } 2568 2569 static int 2570 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2571 struct rte_pci_device *pci_dev) 2572 { 2573 PMD_INIT_FUNC_TRACE(); 2574 return rte_eth_dev_pci_generic_probe(pci_dev, 2575 sizeof(struct igc_adapter), eth_igc_dev_init); 2576 } 2577 2578 static int 2579 eth_igc_pci_remove(struct rte_pci_device *pci_dev) 2580 { 2581 PMD_INIT_FUNC_TRACE(); 2582 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit); 2583 } 2584 2585 static struct rte_pci_driver rte_igc_pmd = { 2586 .id_table = pci_id_igc_map, 2587 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2588 .probe = eth_igc_pci_probe, 2589 .remove = eth_igc_pci_remove, 2590 }; 2591 2592 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd); 2593 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map); 2594 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci"); 2595