1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019-2020 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <string.h> 7 8 #include <rte_string_fns.h> 9 #include <rte_pci.h> 10 #include <rte_bus_pci.h> 11 #include <ethdev_driver.h> 12 #include <ethdev_pci.h> 13 #include <rte_malloc.h> 14 #include <rte_alarm.h> 15 16 #include "igc_logs.h" 17 #include "igc_txrx.h" 18 #include "igc_filter.h" 19 #include "igc_flow.h" 20 21 #define IGC_INTEL_VENDOR_ID 0x8086 22 23 /* 24 * The overhead from MTU to max frame size. 25 * Considering VLAN so tag needs to be counted. 26 */ 27 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \ 28 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE) 29 30 #define IGC_FC_PAUSE_TIME 0x0680 31 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ 32 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ 33 34 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 35 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 36 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */ 37 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */ 38 39 #define IGC_DEFAULT_RX_FREE_THRESH 32 40 41 #define IGC_DEFAULT_RX_PTHRESH 8 42 #define IGC_DEFAULT_RX_HTHRESH 8 43 #define IGC_DEFAULT_RX_WTHRESH 4 44 45 #define IGC_DEFAULT_TX_PTHRESH 8 46 #define IGC_DEFAULT_TX_HTHRESH 1 47 #define IGC_DEFAULT_TX_WTHRESH 16 48 49 /* MSI-X other interrupt vector */ 50 #define IGC_MSIX_OTHER_INTR_VEC 0 51 52 /* External VLAN Enable bit mask */ 53 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26) 54 55 /* Speed select */ 56 #define IGC_CTRL_SPEED_MASK (7u << 8) 57 #define IGC_CTRL_SPEED_2500 (6u << 8) 58 59 /* External VLAN Ether Type bit mask and shift */ 60 #define IGC_VET_EXT 0xFFFF0000 61 #define IGC_VET_EXT_SHIFT 16 62 63 /* Force EEE Auto-negotiation */ 64 #define IGC_EEER_EEE_FRC_AN (1u << 28) 65 66 /* Per Queue Good Packets Received Count */ 67 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx)) 68 /* Per Queue Good Octets Received Count */ 69 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx)) 70 /* Per Queue Good Octets Transmitted Count */ 71 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx)) 72 /* Per Queue Multicast Packets Received Count */ 73 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx)) 74 /* Transmit Queue Drop Packet Count */ 75 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx)) 76 77 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 78 #define U32_0_IN_U64 0 /* lower bytes of u64 */ 79 #define U32_1_IN_U64 1 /* higher bytes of u64 */ 80 #else 81 #define U32_0_IN_U64 1 82 #define U32_1_IN_U64 0 83 #endif 84 85 #define IGC_ALARM_INTERVAL 8000000u 86 /* us, about 13.6s some per-queue registers will wrap around back to 0. */ 87 88 static const struct rte_eth_desc_lim rx_desc_lim = { 89 .nb_max = IGC_MAX_RXD, 90 .nb_min = IGC_MIN_RXD, 91 .nb_align = IGC_RXD_ALIGN, 92 }; 93 94 static const struct rte_eth_desc_lim tx_desc_lim = { 95 .nb_max = IGC_MAX_TXD, 96 .nb_min = IGC_MIN_TXD, 97 .nb_align = IGC_TXD_ALIGN, 98 .nb_seg_max = IGC_TX_MAX_SEG, 99 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG, 100 }; 101 102 static const struct rte_pci_id pci_id_igc_map[] = { 103 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) }, 104 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) }, 105 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) }, 106 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) }, 107 { .vendor_id = 0, /* sentinel */ }, 108 }; 109 110 /* store statistics names and its offset in stats structure */ 111 struct rte_igc_xstats_name_off { 112 char name[RTE_ETH_XSTATS_NAME_SIZE]; 113 unsigned int offset; 114 }; 115 116 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = { 117 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)}, 118 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)}, 119 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)}, 120 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)}, 121 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)}, 122 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)}, 123 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats, 124 ecol)}, 125 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)}, 126 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)}, 127 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)}, 128 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)}, 129 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)}, 130 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)}, 131 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)}, 132 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)}, 133 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)}, 134 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)}, 135 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats, 136 fcruc)}, 137 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)}, 138 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)}, 139 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)}, 140 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)}, 141 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 142 prc1023)}, 143 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats, 144 prc1522)}, 145 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)}, 146 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)}, 147 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)}, 148 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)}, 149 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)}, 150 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)}, 151 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)}, 152 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)}, 153 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)}, 154 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)}, 155 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)}, 156 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)}, 157 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)}, 158 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)}, 159 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)}, 160 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)}, 161 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)}, 162 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)}, 163 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats, 164 ptc1023)}, 165 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats, 166 ptc1522)}, 167 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)}, 168 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)}, 169 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)}, 170 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)}, 171 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)}, 172 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)}, 173 {"rx_descriptor_lower_threshold", 174 offsetof(struct igc_hw_stats, icrxdmtc)}, 175 }; 176 177 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \ 178 sizeof(rte_igc_stats_strings[0])) 179 180 static int eth_igc_configure(struct rte_eth_dev *dev); 181 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete); 182 static int eth_igc_stop(struct rte_eth_dev *dev); 183 static int eth_igc_start(struct rte_eth_dev *dev); 184 static int eth_igc_set_link_up(struct rte_eth_dev *dev); 185 static int eth_igc_set_link_down(struct rte_eth_dev *dev); 186 static int eth_igc_close(struct rte_eth_dev *dev); 187 static int eth_igc_reset(struct rte_eth_dev *dev); 188 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev); 189 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev); 190 static int eth_igc_fw_version_get(struct rte_eth_dev *dev, 191 char *fw_version, size_t fw_size); 192 static int eth_igc_infos_get(struct rte_eth_dev *dev, 193 struct rte_eth_dev_info *dev_info); 194 static int eth_igc_led_on(struct rte_eth_dev *dev); 195 static int eth_igc_led_off(struct rte_eth_dev *dev); 196 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev); 197 static int eth_igc_rar_set(struct rte_eth_dev *dev, 198 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool); 199 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index); 200 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 201 struct rte_ether_addr *addr); 202 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 203 struct rte_ether_addr *mc_addr_set, 204 uint32_t nb_mc_addr); 205 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev); 206 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev); 207 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 208 static int eth_igc_stats_get(struct rte_eth_dev *dev, 209 struct rte_eth_stats *rte_stats); 210 static int eth_igc_xstats_get(struct rte_eth_dev *dev, 211 struct rte_eth_xstat *xstats, unsigned int n); 212 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, 213 const uint64_t *ids, 214 uint64_t *values, unsigned int n); 215 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev, 216 struct rte_eth_xstat_name *xstats_names, 217 unsigned int size); 218 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 219 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 220 unsigned int limit); 221 static int eth_igc_xstats_reset(struct rte_eth_dev *dev); 222 static int 223 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 224 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); 225 static int 226 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 227 static int 228 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 229 static int 230 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 231 static int 232 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 233 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev, 234 struct rte_eth_rss_reta_entry64 *reta_conf, 235 uint16_t reta_size); 236 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev, 237 struct rte_eth_rss_reta_entry64 *reta_conf, 238 uint16_t reta_size); 239 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev, 240 struct rte_eth_rss_conf *rss_conf); 241 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 242 struct rte_eth_rss_conf *rss_conf); 243 static int 244 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); 245 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask); 246 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 247 enum rte_vlan_type vlan_type, uint16_t tpid); 248 249 static const struct eth_dev_ops eth_igc_ops = { 250 .dev_configure = eth_igc_configure, 251 .link_update = eth_igc_link_update, 252 .dev_stop = eth_igc_stop, 253 .dev_start = eth_igc_start, 254 .dev_close = eth_igc_close, 255 .dev_reset = eth_igc_reset, 256 .dev_set_link_up = eth_igc_set_link_up, 257 .dev_set_link_down = eth_igc_set_link_down, 258 .promiscuous_enable = eth_igc_promiscuous_enable, 259 .promiscuous_disable = eth_igc_promiscuous_disable, 260 .allmulticast_enable = eth_igc_allmulticast_enable, 261 .allmulticast_disable = eth_igc_allmulticast_disable, 262 .fw_version_get = eth_igc_fw_version_get, 263 .dev_infos_get = eth_igc_infos_get, 264 .dev_led_on = eth_igc_led_on, 265 .dev_led_off = eth_igc_led_off, 266 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get, 267 .mtu_set = eth_igc_mtu_set, 268 .mac_addr_add = eth_igc_rar_set, 269 .mac_addr_remove = eth_igc_rar_clear, 270 .mac_addr_set = eth_igc_default_mac_addr_set, 271 .set_mc_addr_list = eth_igc_set_mc_addr_list, 272 273 .rx_queue_setup = eth_igc_rx_queue_setup, 274 .rx_queue_release = eth_igc_rx_queue_release, 275 .tx_queue_setup = eth_igc_tx_queue_setup, 276 .tx_queue_release = eth_igc_tx_queue_release, 277 .tx_done_cleanup = eth_igc_tx_done_cleanup, 278 .rxq_info_get = eth_igc_rxq_info_get, 279 .txq_info_get = eth_igc_txq_info_get, 280 .stats_get = eth_igc_stats_get, 281 .xstats_get = eth_igc_xstats_get, 282 .xstats_get_by_id = eth_igc_xstats_get_by_id, 283 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id, 284 .xstats_get_names = eth_igc_xstats_get_names, 285 .stats_reset = eth_igc_xstats_reset, 286 .xstats_reset = eth_igc_xstats_reset, 287 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set, 288 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable, 289 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable, 290 .flow_ctrl_get = eth_igc_flow_ctrl_get, 291 .flow_ctrl_set = eth_igc_flow_ctrl_set, 292 .reta_update = eth_igc_rss_reta_update, 293 .reta_query = eth_igc_rss_reta_query, 294 .rss_hash_update = eth_igc_rss_hash_update, 295 .rss_hash_conf_get = eth_igc_rss_hash_conf_get, 296 .vlan_filter_set = eth_igc_vlan_filter_set, 297 .vlan_offload_set = eth_igc_vlan_offload_set, 298 .vlan_tpid_set = eth_igc_vlan_tpid_set, 299 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set, 300 .flow_ops_get = eth_igc_flow_ops_get, 301 }; 302 303 /* 304 * multiple queue mode checking 305 */ 306 static int 307 igc_check_mq_mode(struct rte_eth_dev *dev) 308 { 309 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; 310 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode; 311 312 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 313 PMD_INIT_LOG(ERR, "SRIOV is not supported."); 314 return -EINVAL; 315 } 316 317 if (rx_mq_mode != ETH_MQ_RX_NONE && 318 rx_mq_mode != ETH_MQ_RX_RSS) { 319 /* RSS together with VMDq not supported*/ 320 PMD_INIT_LOG(ERR, "RX mode %d is not supported.", 321 rx_mq_mode); 322 return -EINVAL; 323 } 324 325 /* To no break software that set invalid mode, only display 326 * warning if invalid mode is used. 327 */ 328 if (tx_mq_mode != ETH_MQ_TX_NONE) 329 PMD_INIT_LOG(WARNING, 330 "TX mode %d is not supported. Due to meaningless in this driver, just ignore", 331 tx_mq_mode); 332 333 return 0; 334 } 335 336 static int 337 eth_igc_configure(struct rte_eth_dev *dev) 338 { 339 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 340 int ret; 341 342 PMD_INIT_FUNC_TRACE(); 343 344 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 345 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 346 347 ret = igc_check_mq_mode(dev); 348 if (ret != 0) 349 return ret; 350 351 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 352 return 0; 353 } 354 355 static int 356 eth_igc_set_link_up(struct rte_eth_dev *dev) 357 { 358 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 359 360 if (hw->phy.media_type == igc_media_type_copper) 361 igc_power_up_phy(hw); 362 else 363 igc_power_up_fiber_serdes_link(hw); 364 return 0; 365 } 366 367 static int 368 eth_igc_set_link_down(struct rte_eth_dev *dev) 369 { 370 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 371 372 if (hw->phy.media_type == igc_media_type_copper) 373 igc_power_down_phy(hw); 374 else 375 igc_shutdown_fiber_serdes_link(hw); 376 return 0; 377 } 378 379 /* 380 * disable other interrupt 381 */ 382 static void 383 igc_intr_other_disable(struct rte_eth_dev *dev) 384 { 385 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 386 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 387 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 388 389 if (rte_intr_allow_others(intr_handle) && 390 dev->data->dev_conf.intr_conf.lsc) { 391 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC); 392 } 393 394 IGC_WRITE_REG(hw, IGC_IMC, ~0); 395 IGC_WRITE_FLUSH(hw); 396 } 397 398 /* 399 * enable other interrupt 400 */ 401 static inline void 402 igc_intr_other_enable(struct rte_eth_dev *dev) 403 { 404 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 405 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 406 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 407 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 408 409 if (rte_intr_allow_others(intr_handle) && 410 dev->data->dev_conf.intr_conf.lsc) { 411 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC); 412 } 413 414 IGC_WRITE_REG(hw, IGC_IMS, intr->mask); 415 IGC_WRITE_FLUSH(hw); 416 } 417 418 /* 419 * It reads ICR and gets interrupt causes, check it and set a bit flag 420 * to update link status. 421 */ 422 static void 423 eth_igc_interrupt_get_status(struct rte_eth_dev *dev) 424 { 425 uint32_t icr; 426 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 427 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 428 429 /* read-on-clear nic registers here */ 430 icr = IGC_READ_REG(hw, IGC_ICR); 431 432 intr->flags = 0; 433 if (icr & IGC_ICR_LSC) 434 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE; 435 } 436 437 /* return 0 means link status changed, -1 means not changed */ 438 static int 439 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete) 440 { 441 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 442 struct rte_eth_link link; 443 int link_check, count; 444 445 link_check = 0; 446 hw->mac.get_link_status = 1; 447 448 /* possible wait-to-complete in up to 9 seconds */ 449 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) { 450 /* Read the real link status */ 451 switch (hw->phy.media_type) { 452 case igc_media_type_copper: 453 /* Do the work to read phy */ 454 igc_check_for_link(hw); 455 link_check = !hw->mac.get_link_status; 456 break; 457 458 case igc_media_type_fiber: 459 igc_check_for_link(hw); 460 link_check = (IGC_READ_REG(hw, IGC_STATUS) & 461 IGC_STATUS_LU); 462 break; 463 464 case igc_media_type_internal_serdes: 465 igc_check_for_link(hw); 466 link_check = hw->mac.serdes_has_link; 467 break; 468 469 default: 470 break; 471 } 472 if (link_check || wait_to_complete == 0) 473 break; 474 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL); 475 } 476 memset(&link, 0, sizeof(link)); 477 478 /* Now we check if a transition has happened */ 479 if (link_check) { 480 uint16_t duplex, speed; 481 hw->mac.ops.get_link_up_info(hw, &speed, &duplex); 482 link.link_duplex = (duplex == FULL_DUPLEX) ? 483 ETH_LINK_FULL_DUPLEX : 484 ETH_LINK_HALF_DUPLEX; 485 link.link_speed = speed; 486 link.link_status = ETH_LINK_UP; 487 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 488 ETH_LINK_SPEED_FIXED); 489 490 if (speed == SPEED_2500) { 491 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG); 492 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) { 493 tipg &= ~IGC_TIPG_IPGT_MASK; 494 tipg |= 0x0b; 495 IGC_WRITE_REG(hw, IGC_TIPG, tipg); 496 } 497 } 498 } else { 499 link.link_speed = 0; 500 link.link_duplex = ETH_LINK_HALF_DUPLEX; 501 link.link_status = ETH_LINK_DOWN; 502 link.link_autoneg = ETH_LINK_FIXED; 503 } 504 505 return rte_eth_linkstatus_set(dev, &link); 506 } 507 508 /* 509 * It executes link_update after knowing an interrupt is present. 510 */ 511 static void 512 eth_igc_interrupt_action(struct rte_eth_dev *dev) 513 { 514 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 516 struct rte_eth_link link; 517 int ret; 518 519 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) { 520 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; 521 522 /* set get_link_status to check register later */ 523 ret = eth_igc_link_update(dev, 0); 524 525 /* check if link has changed */ 526 if (ret < 0) 527 return; 528 529 rte_eth_linkstatus_get(dev, &link); 530 if (link.link_status) 531 PMD_DRV_LOG(INFO, 532 " Port %d: Link Up - speed %u Mbps - %s", 533 dev->data->port_id, 534 (unsigned int)link.link_speed, 535 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 536 "full-duplex" : "half-duplex"); 537 else 538 PMD_DRV_LOG(INFO, " Port %d: Link Down", 539 dev->data->port_id); 540 541 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 542 pci_dev->addr.domain, 543 pci_dev->addr.bus, 544 pci_dev->addr.devid, 545 pci_dev->addr.function); 546 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 547 } 548 } 549 550 /* 551 * Interrupt handler which shall be registered at first. 552 * 553 * @handle 554 * Pointer to interrupt handle. 555 * @param 556 * The address of parameter (struct rte_eth_dev *) registered before. 557 */ 558 static void 559 eth_igc_interrupt_handler(void *param) 560 { 561 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 562 563 eth_igc_interrupt_get_status(dev); 564 eth_igc_interrupt_action(dev); 565 } 566 567 static void igc_read_queue_stats_register(struct rte_eth_dev *dev); 568 569 /* 570 * Update the queue status every IGC_ALARM_INTERVAL time. 571 * @param 572 * The address of parameter (struct rte_eth_dev *) registered before. 573 */ 574 static void 575 igc_update_queue_stats_handler(void *param) 576 { 577 struct rte_eth_dev *dev = param; 578 igc_read_queue_stats_register(dev); 579 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 580 igc_update_queue_stats_handler, dev); 581 } 582 583 /* 584 * rx,tx enable/disable 585 */ 586 static void 587 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable) 588 { 589 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 590 uint32_t tctl, rctl; 591 592 tctl = IGC_READ_REG(hw, IGC_TCTL); 593 rctl = IGC_READ_REG(hw, IGC_RCTL); 594 595 if (enable) { 596 /* enable Tx/Rx */ 597 tctl |= IGC_TCTL_EN; 598 rctl |= IGC_RCTL_EN; 599 } else { 600 /* disable Tx/Rx */ 601 tctl &= ~IGC_TCTL_EN; 602 rctl &= ~IGC_RCTL_EN; 603 } 604 IGC_WRITE_REG(hw, IGC_TCTL, tctl); 605 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 606 IGC_WRITE_FLUSH(hw); 607 } 608 609 /* 610 * This routine disables all traffic on the adapter by issuing a 611 * global reset on the MAC. 612 */ 613 static int 614 eth_igc_stop(struct rte_eth_dev *dev) 615 { 616 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 617 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 618 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 619 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 620 struct rte_eth_link link; 621 622 dev->data->dev_started = 0; 623 adapter->stopped = 1; 624 625 /* disable receive and transmit */ 626 eth_igc_rxtx_control(dev, false); 627 628 /* disable all MSI-X interrupts */ 629 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 630 IGC_WRITE_FLUSH(hw); 631 632 /* clear all MSI-X interrupts */ 633 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 634 635 igc_intr_other_disable(dev); 636 637 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 638 639 /* disable intr eventfd mapping */ 640 rte_intr_disable(intr_handle); 641 642 igc_reset_hw(hw); 643 644 /* disable all wake up */ 645 IGC_WRITE_REG(hw, IGC_WUC, 0); 646 647 /* disable checking EEE operation in MAC loopback mode */ 648 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 649 650 /* Set bit for Go Link disconnect */ 651 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT, 652 IGC_82580_PM_GO_LINKD); 653 654 /* Power down the phy. Needed to make the link go Down */ 655 eth_igc_set_link_down(dev); 656 657 igc_dev_clear_queues(dev); 658 659 /* clear the recorded link status */ 660 memset(&link, 0, sizeof(link)); 661 rte_eth_linkstatus_set(dev, &link); 662 663 if (!rte_intr_allow_others(intr_handle)) 664 /* resume to the default handler */ 665 rte_intr_callback_register(intr_handle, 666 eth_igc_interrupt_handler, 667 (void *)dev); 668 669 /* Clean datapath event and queue/vec mapping */ 670 rte_intr_efd_disable(intr_handle); 671 if (intr_handle->intr_vec != NULL) { 672 rte_free(intr_handle->intr_vec); 673 intr_handle->intr_vec = NULL; 674 } 675 676 return 0; 677 } 678 679 /* 680 * write interrupt vector allocation register 681 * @hw 682 * board private structure 683 * @queue_index 684 * queue index, valid 0,1,2,3 685 * @tx 686 * tx:1, rx:0 687 * @msix_vector 688 * msix-vector, valid 0,1,2,3,4 689 */ 690 static void 691 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index, 692 bool tx, uint8_t msix_vector) 693 { 694 uint8_t offset = 0; 695 uint8_t reg_index = queue_index >> 1; 696 uint32_t val; 697 698 /* 699 * IVAR(0) 700 * bit31...24 bit23...16 bit15...8 bit7...0 701 * TX1 RX1 TX0 RX0 702 * 703 * IVAR(1) 704 * bit31...24 bit23...16 bit15...8 bit7...0 705 * TX3 RX3 TX2 RX2 706 */ 707 708 if (tx) 709 offset = 8; 710 711 if (queue_index & 1) 712 offset += 16; 713 714 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index); 715 716 /* clear bits */ 717 val &= ~((uint32_t)0xFF << offset); 718 719 /* write vector and valid bit */ 720 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset; 721 722 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val); 723 } 724 725 /* Sets up the hardware to generate MSI-X interrupts properly 726 * @hw 727 * board private structure 728 */ 729 static void 730 igc_configure_msix_intr(struct rte_eth_dev *dev) 731 { 732 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 733 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 734 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 735 736 uint32_t intr_mask; 737 uint32_t vec = IGC_MISC_VEC_ID; 738 uint32_t base = IGC_MISC_VEC_ID; 739 uint32_t misc_shift = 0; 740 int i; 741 742 /* won't configure msix register if no mapping is done 743 * between intr vector and event fd 744 */ 745 if (!rte_intr_dp_is_en(intr_handle)) 746 return; 747 748 if (rte_intr_allow_others(intr_handle)) { 749 base = IGC_RX_VEC_START; 750 vec = base; 751 misc_shift = 1; 752 } 753 754 /* turn on MSI-X capability first */ 755 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | 756 IGC_GPIE_PBA | IGC_GPIE_EIAME | 757 IGC_GPIE_NSICR); 758 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << 759 misc_shift; 760 761 if (dev->data->dev_conf.intr_conf.lsc) 762 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC); 763 764 /* enable msix auto-clear */ 765 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask); 766 767 /* set other cause interrupt vector */ 768 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC, 769 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8); 770 771 /* enable auto-mask */ 772 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask); 773 774 for (i = 0; i < dev->data->nb_rx_queues; i++) { 775 igc_write_ivar(hw, i, 0, vec); 776 intr_handle->intr_vec[i] = vec; 777 if (vec < base + intr_handle->nb_efd - 1) 778 vec++; 779 } 780 781 IGC_WRITE_FLUSH(hw); 782 } 783 784 /** 785 * It enables the interrupt mask and then enable the interrupt. 786 * 787 * @dev 788 * Pointer to struct rte_eth_dev. 789 * @on 790 * Enable or Disable 791 */ 792 static void 793 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 794 { 795 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev); 796 797 if (on) 798 intr->mask |= IGC_ICR_LSC; 799 else 800 intr->mask &= ~IGC_ICR_LSC; 801 } 802 803 /* 804 * It enables the interrupt. 805 * It will be called once only during nic initialized. 806 */ 807 static void 808 igc_rxq_interrupt_setup(struct rte_eth_dev *dev) 809 { 810 uint32_t mask; 811 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 812 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 813 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 814 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0; 815 816 /* won't configure msix register if no mapping is done 817 * between intr vector and event fd 818 */ 819 if (!rte_intr_dp_is_en(intr_handle)) 820 return; 821 822 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift; 823 IGC_WRITE_REG(hw, IGC_EIMS, mask); 824 } 825 826 /* 827 * Get hardware rx-buffer size. 828 */ 829 static inline int 830 igc_get_rx_buffer_size(struct igc_hw *hw) 831 { 832 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10; 833 } 834 835 /* 836 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. 837 * For ASF and Pass Through versions of f/w this means 838 * that the driver is loaded. 839 */ 840 static void 841 igc_hw_control_acquire(struct igc_hw *hw) 842 { 843 uint32_t ctrl_ext; 844 845 /* Let firmware know the driver has taken over */ 846 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 847 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 848 } 849 850 /* 851 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit. 852 * For ASF and Pass Through versions of f/w this means that the 853 * driver is no longer loaded. 854 */ 855 static void 856 igc_hw_control_release(struct igc_hw *hw) 857 { 858 uint32_t ctrl_ext; 859 860 /* Let firmware taken over control of h/w */ 861 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 862 IGC_WRITE_REG(hw, IGC_CTRL_EXT, 863 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 864 } 865 866 static int 867 igc_hardware_init(struct igc_hw *hw) 868 { 869 uint32_t rx_buf_size; 870 int diag; 871 872 /* Let the firmware know the OS is in control */ 873 igc_hw_control_acquire(hw); 874 875 /* Issue a global reset */ 876 igc_reset_hw(hw); 877 878 /* disable all wake up */ 879 IGC_WRITE_REG(hw, IGC_WUC, 0); 880 881 /* 882 * Hardware flow control 883 * - High water mark should allow for at least two standard size (1518) 884 * frames to be received after sending an XOFF. 885 * - Low water mark works best when it is very near the high water mark. 886 * This allows the receiver to restart by sending XON when it has 887 * drained a bit. Here we use an arbitrary value of 1500 which will 888 * restart after one full frame is pulled from the buffer. There 889 * could be several smaller frames in the buffer and if so they will 890 * not trigger the XON until their total number reduces the buffer 891 * by 1500. 892 */ 893 rx_buf_size = igc_get_rx_buffer_size(hw); 894 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2); 895 hw->fc.low_water = hw->fc.high_water - 1500; 896 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 897 hw->fc.send_xon = 1; 898 hw->fc.requested_mode = igc_fc_full; 899 900 diag = igc_init_hw(hw); 901 if (diag < 0) 902 return diag; 903 904 igc_get_phy_info(hw); 905 igc_check_for_link(hw); 906 907 return 0; 908 } 909 910 static int 911 eth_igc_start(struct rte_eth_dev *dev) 912 { 913 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 914 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 915 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 916 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 917 uint32_t *speeds; 918 int ret; 919 920 PMD_INIT_FUNC_TRACE(); 921 922 /* disable all MSI-X interrupts */ 923 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f); 924 IGC_WRITE_FLUSH(hw); 925 926 /* clear all MSI-X interrupts */ 927 IGC_WRITE_REG(hw, IGC_EICR, 0x1f); 928 929 /* disable uio/vfio intr/eventfd mapping */ 930 if (!adapter->stopped) 931 rte_intr_disable(intr_handle); 932 933 /* Power up the phy. Needed to make the link go Up */ 934 eth_igc_set_link_up(dev); 935 936 /* Put the address into the Receive Address Array */ 937 igc_rar_set(hw, hw->mac.addr, 0); 938 939 /* Initialize the hardware */ 940 if (igc_hardware_init(hw)) { 941 PMD_DRV_LOG(ERR, "Unable to initialize the hardware"); 942 return -EIO; 943 } 944 adapter->stopped = 0; 945 946 /* check and configure queue intr-vector mapping */ 947 if (rte_intr_cap_multiple(intr_handle) && 948 dev->data->dev_conf.intr_conf.rxq) { 949 uint32_t intr_vector = dev->data->nb_rx_queues; 950 if (rte_intr_efd_enable(intr_handle, intr_vector)) 951 return -1; 952 } 953 954 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 955 intr_handle->intr_vec = rte_zmalloc("intr_vec", 956 dev->data->nb_rx_queues * sizeof(int), 0); 957 if (intr_handle->intr_vec == NULL) { 958 PMD_DRV_LOG(ERR, 959 "Failed to allocate %d rx_queues intr_vec", 960 dev->data->nb_rx_queues); 961 return -ENOMEM; 962 } 963 } 964 965 /* configure msix for rx interrupt */ 966 igc_configure_msix_intr(dev); 967 968 igc_tx_init(dev); 969 970 /* This can fail when allocating mbufs for descriptor rings */ 971 ret = igc_rx_init(dev); 972 if (ret) { 973 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware"); 974 igc_dev_clear_queues(dev); 975 return ret; 976 } 977 978 igc_clear_hw_cntrs_base_generic(hw); 979 980 /* VLAN Offload Settings */ 981 eth_igc_vlan_offload_set(dev, 982 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 983 ETH_VLAN_EXTEND_MASK); 984 985 /* Setup link speed and duplex */ 986 speeds = &dev->data->dev_conf.link_speeds; 987 if (*speeds == ETH_LINK_SPEED_AUTONEG) { 988 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 989 hw->mac.autoneg = 1; 990 } else { 991 int num_speeds = 0; 992 993 if (*speeds & ETH_LINK_SPEED_FIXED) { 994 PMD_DRV_LOG(ERR, 995 "Force speed mode currently not supported"); 996 igc_dev_clear_queues(dev); 997 return -EINVAL; 998 } 999 1000 hw->phy.autoneg_advertised = 0; 1001 hw->mac.autoneg = 1; 1002 1003 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1004 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1005 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) { 1006 num_speeds = -1; 1007 goto error_invalid_config; 1008 } 1009 if (*speeds & ETH_LINK_SPEED_10M_HD) { 1010 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; 1011 num_speeds++; 1012 } 1013 if (*speeds & ETH_LINK_SPEED_10M) { 1014 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; 1015 num_speeds++; 1016 } 1017 if (*speeds & ETH_LINK_SPEED_100M_HD) { 1018 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; 1019 num_speeds++; 1020 } 1021 if (*speeds & ETH_LINK_SPEED_100M) { 1022 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; 1023 num_speeds++; 1024 } 1025 if (*speeds & ETH_LINK_SPEED_1G) { 1026 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; 1027 num_speeds++; 1028 } 1029 if (*speeds & ETH_LINK_SPEED_2_5G) { 1030 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL; 1031 num_speeds++; 1032 } 1033 if (num_speeds == 0) 1034 goto error_invalid_config; 1035 } 1036 1037 igc_setup_link(hw); 1038 1039 if (rte_intr_allow_others(intr_handle)) { 1040 /* check if lsc interrupt is enabled */ 1041 if (dev->data->dev_conf.intr_conf.lsc) 1042 igc_lsc_interrupt_setup(dev, 1); 1043 else 1044 igc_lsc_interrupt_setup(dev, 0); 1045 } else { 1046 rte_intr_callback_unregister(intr_handle, 1047 eth_igc_interrupt_handler, 1048 (void *)dev); 1049 if (dev->data->dev_conf.intr_conf.lsc) 1050 PMD_DRV_LOG(INFO, 1051 "LSC won't enable because of no intr multiplex"); 1052 } 1053 1054 /* enable uio/vfio intr/eventfd mapping */ 1055 rte_intr_enable(intr_handle); 1056 1057 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1058 igc_update_queue_stats_handler, dev); 1059 1060 /* check if rxq interrupt is enabled */ 1061 if (dev->data->dev_conf.intr_conf.rxq && 1062 rte_intr_dp_is_en(intr_handle)) 1063 igc_rxq_interrupt_setup(dev); 1064 1065 /* resume enabled intr since hw reset */ 1066 igc_intr_other_enable(dev); 1067 1068 eth_igc_rxtx_control(dev, true); 1069 eth_igc_link_update(dev, 0); 1070 1071 /* configure MAC-loopback mode */ 1072 if (dev->data->dev_conf.lpbk_mode == 1) { 1073 uint32_t reg_val; 1074 1075 reg_val = IGC_READ_REG(hw, IGC_CTRL); 1076 reg_val &= ~IGC_CTRL_SPEED_MASK; 1077 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD | 1078 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500; 1079 IGC_WRITE_REG(hw, IGC_CTRL, reg_val); 1080 1081 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN); 1082 } 1083 1084 return 0; 1085 1086 error_invalid_config: 1087 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u", 1088 dev->data->dev_conf.link_speeds, dev->data->port_id); 1089 igc_dev_clear_queues(dev); 1090 return -EINVAL; 1091 } 1092 1093 static int 1094 igc_reset_swfw_lock(struct igc_hw *hw) 1095 { 1096 int ret_val; 1097 1098 /* 1099 * Do mac ops initialization manually here, since we will need 1100 * some function pointers set by this call. 1101 */ 1102 ret_val = igc_init_mac_params(hw); 1103 if (ret_val) 1104 return ret_val; 1105 1106 /* 1107 * SMBI lock should not fail in this early stage. If this is the case, 1108 * it is due to an improper exit of the application. 1109 * So force the release of the faulty lock. 1110 */ 1111 if (igc_get_hw_semaphore_generic(hw) < 0) 1112 PMD_DRV_LOG(DEBUG, "SMBI lock released"); 1113 1114 igc_put_hw_semaphore_generic(hw); 1115 1116 if (hw->mac.ops.acquire_swfw_sync != NULL) { 1117 uint16_t mask; 1118 1119 /* 1120 * Phy lock should not fail in this early stage. 1121 * If this is the case, it is due to an improper exit of the 1122 * application. So force the release of the faulty lock. 1123 */ 1124 mask = IGC_SWFW_PHY0_SM; 1125 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { 1126 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", 1127 hw->bus.func); 1128 } 1129 hw->mac.ops.release_swfw_sync(hw, mask); 1130 1131 /* 1132 * This one is more tricky since it is common to all ports; but 1133 * swfw_sync retries last long enough (1s) to be almost sure 1134 * that if lock can not be taken it is due to an improper lock 1135 * of the semaphore. 1136 */ 1137 mask = IGC_SWFW_EEP_SM; 1138 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) 1139 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1140 1141 hw->mac.ops.release_swfw_sync(hw, mask); 1142 } 1143 1144 return IGC_SUCCESS; 1145 } 1146 1147 /* 1148 * free all rx/tx queues. 1149 */ 1150 static void 1151 igc_dev_free_queues(struct rte_eth_dev *dev) 1152 { 1153 uint16_t i; 1154 1155 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1156 eth_igc_rx_queue_release(dev->data->rx_queues[i]); 1157 dev->data->rx_queues[i] = NULL; 1158 } 1159 dev->data->nb_rx_queues = 0; 1160 1161 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1162 eth_igc_tx_queue_release(dev->data->tx_queues[i]); 1163 dev->data->tx_queues[i] = NULL; 1164 } 1165 dev->data->nb_tx_queues = 0; 1166 } 1167 1168 static int 1169 eth_igc_close(struct rte_eth_dev *dev) 1170 { 1171 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1172 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1173 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1174 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev); 1175 int retry = 0; 1176 int ret = 0; 1177 1178 PMD_INIT_FUNC_TRACE(); 1179 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1180 return 0; 1181 1182 if (!adapter->stopped) 1183 ret = eth_igc_stop(dev); 1184 1185 igc_flow_flush(dev, NULL); 1186 igc_clear_all_filter(dev); 1187 1188 igc_intr_other_disable(dev); 1189 do { 1190 int ret = rte_intr_callback_unregister(intr_handle, 1191 eth_igc_interrupt_handler, dev); 1192 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL) 1193 break; 1194 1195 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret); 1196 DELAY(200 * 1000); /* delay 200ms */ 1197 } while (retry++ < 5); 1198 1199 igc_phy_hw_reset(hw); 1200 igc_hw_control_release(hw); 1201 igc_dev_free_queues(dev); 1202 1203 /* Reset any pending lock */ 1204 igc_reset_swfw_lock(hw); 1205 1206 return ret; 1207 } 1208 1209 static void 1210 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) 1211 { 1212 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1213 1214 hw->vendor_id = pci_dev->id.vendor_id; 1215 hw->device_id = pci_dev->id.device_id; 1216 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 1217 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 1218 } 1219 1220 static int 1221 eth_igc_dev_init(struct rte_eth_dev *dev) 1222 { 1223 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1224 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1225 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1226 int i, error = 0; 1227 1228 PMD_INIT_FUNC_TRACE(); 1229 dev->dev_ops = ð_igc_ops; 1230 dev->rx_descriptor_done = eth_igc_rx_descriptor_done; 1231 dev->rx_queue_count = eth_igc_rx_queue_count; 1232 dev->rx_descriptor_status = eth_igc_rx_descriptor_status; 1233 dev->tx_descriptor_status = eth_igc_tx_descriptor_status; 1234 1235 /* 1236 * for secondary processes, we don't initialize any further as primary 1237 * has already done this work. Only check we don't need a different 1238 * RX function. 1239 */ 1240 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1241 return 0; 1242 1243 rte_eth_copy_pci_info(dev, pci_dev); 1244 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1245 1246 hw->back = pci_dev; 1247 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1248 1249 igc_identify_hardware(dev, pci_dev); 1250 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) { 1251 error = -EIO; 1252 goto err_late; 1253 } 1254 1255 igc_get_bus_info(hw); 1256 1257 /* Reset any pending lock */ 1258 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) { 1259 error = -EIO; 1260 goto err_late; 1261 } 1262 1263 /* Finish initialization */ 1264 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) { 1265 error = -EIO; 1266 goto err_late; 1267 } 1268 1269 hw->mac.autoneg = 1; 1270 hw->phy.autoneg_wait_to_complete = 0; 1271 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500; 1272 1273 /* Copper options */ 1274 if (hw->phy.media_type == igc_media_type_copper) { 1275 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 1276 hw->phy.disable_polarity_correction = 0; 1277 hw->phy.ms_type = igc_ms_hw_default; 1278 } 1279 1280 /* 1281 * Start from a known state, this is important in reading the nvm 1282 * and mac from that. 1283 */ 1284 igc_reset_hw(hw); 1285 1286 /* Make sure we have a good EEPROM before we read from it */ 1287 if (igc_validate_nvm_checksum(hw) < 0) { 1288 /* 1289 * Some PCI-E parts fail the first check due to 1290 * the link being in sleep state, call it again, 1291 * if it fails a second time its a real issue. 1292 */ 1293 if (igc_validate_nvm_checksum(hw) < 0) { 1294 PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); 1295 error = -EIO; 1296 goto err_late; 1297 } 1298 } 1299 1300 /* Read the permanent MAC address out of the EEPROM */ 1301 if (igc_read_mac_addr(hw) != 0) { 1302 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); 1303 error = -EIO; 1304 goto err_late; 1305 } 1306 1307 /* Allocate memory for storing MAC addresses */ 1308 dev->data->mac_addrs = rte_zmalloc("igc", 1309 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); 1310 if (dev->data->mac_addrs == NULL) { 1311 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC", 1312 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count); 1313 error = -ENOMEM; 1314 goto err_late; 1315 } 1316 1317 /* Copy the permanent MAC address */ 1318 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, 1319 &dev->data->mac_addrs[0]); 1320 1321 /* Now initialize the hardware */ 1322 if (igc_hardware_init(hw) != 0) { 1323 PMD_INIT_LOG(ERR, "Hardware initialization failed"); 1324 rte_free(dev->data->mac_addrs); 1325 dev->data->mac_addrs = NULL; 1326 error = -ENODEV; 1327 goto err_late; 1328 } 1329 1330 hw->mac.get_link_status = 1; 1331 igc->stopped = 0; 1332 1333 /* Indicate SOL/IDER usage */ 1334 if (igc_check_reset_block(hw) < 0) 1335 PMD_INIT_LOG(ERR, 1336 "PHY reset is blocked due to SOL/IDER session."); 1337 1338 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x", 1339 dev->data->port_id, pci_dev->id.vendor_id, 1340 pci_dev->id.device_id); 1341 1342 rte_intr_callback_register(&pci_dev->intr_handle, 1343 eth_igc_interrupt_handler, (void *)dev); 1344 1345 /* enable uio/vfio intr/eventfd mapping */ 1346 rte_intr_enable(&pci_dev->intr_handle); 1347 1348 /* enable support intr */ 1349 igc_intr_other_enable(dev); 1350 1351 /* initiate queue status */ 1352 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1353 igc->txq_stats_map[i] = -1; 1354 igc->rxq_stats_map[i] = -1; 1355 } 1356 1357 igc_flow_init(dev); 1358 igc_clear_all_filter(dev); 1359 return 0; 1360 1361 err_late: 1362 igc_hw_control_release(hw); 1363 return error; 1364 } 1365 1366 static int 1367 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev) 1368 { 1369 PMD_INIT_FUNC_TRACE(); 1370 eth_igc_close(eth_dev); 1371 return 0; 1372 } 1373 1374 static int 1375 eth_igc_reset(struct rte_eth_dev *dev) 1376 { 1377 int ret; 1378 1379 PMD_INIT_FUNC_TRACE(); 1380 1381 ret = eth_igc_dev_uninit(dev); 1382 if (ret) 1383 return ret; 1384 1385 return eth_igc_dev_init(dev); 1386 } 1387 1388 static int 1389 eth_igc_promiscuous_enable(struct rte_eth_dev *dev) 1390 { 1391 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1392 uint32_t rctl; 1393 1394 rctl = IGC_READ_REG(hw, IGC_RCTL); 1395 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); 1396 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1397 return 0; 1398 } 1399 1400 static int 1401 eth_igc_promiscuous_disable(struct rte_eth_dev *dev) 1402 { 1403 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1404 uint32_t rctl; 1405 1406 rctl = IGC_READ_REG(hw, IGC_RCTL); 1407 rctl &= (~IGC_RCTL_UPE); 1408 if (dev->data->all_multicast == 1) 1409 rctl |= IGC_RCTL_MPE; 1410 else 1411 rctl &= (~IGC_RCTL_MPE); 1412 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1413 return 0; 1414 } 1415 1416 static int 1417 eth_igc_allmulticast_enable(struct rte_eth_dev *dev) 1418 { 1419 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1420 uint32_t rctl; 1421 1422 rctl = IGC_READ_REG(hw, IGC_RCTL); 1423 rctl |= IGC_RCTL_MPE; 1424 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1425 return 0; 1426 } 1427 1428 static int 1429 eth_igc_allmulticast_disable(struct rte_eth_dev *dev) 1430 { 1431 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1432 uint32_t rctl; 1433 1434 if (dev->data->promiscuous == 1) 1435 return 0; /* must remain in all_multicast mode */ 1436 1437 rctl = IGC_READ_REG(hw, IGC_RCTL); 1438 rctl &= (~IGC_RCTL_MPE); 1439 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1440 return 0; 1441 } 1442 1443 static int 1444 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 1445 size_t fw_size) 1446 { 1447 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1448 struct igc_fw_version fw; 1449 int ret; 1450 1451 igc_get_fw_version(hw, &fw); 1452 1453 /* if option rom is valid, display its version too */ 1454 if (fw.or_valid) { 1455 ret = snprintf(fw_version, fw_size, 1456 "%d.%d, 0x%08x, %d.%d.%d", 1457 fw.eep_major, fw.eep_minor, fw.etrack_id, 1458 fw.or_major, fw.or_build, fw.or_patch); 1459 /* no option rom */ 1460 } else { 1461 if (fw.etrack_id != 0X0000) { 1462 ret = snprintf(fw_version, fw_size, 1463 "%d.%d, 0x%08x", 1464 fw.eep_major, fw.eep_minor, 1465 fw.etrack_id); 1466 } else { 1467 ret = snprintf(fw_version, fw_size, 1468 "%d.%d.%d", 1469 fw.eep_major, fw.eep_minor, 1470 fw.eep_build); 1471 } 1472 } 1473 if (ret < 0) 1474 return -EINVAL; 1475 1476 ret += 1; /* add the size of '\0' */ 1477 if (fw_size < (size_t)ret) 1478 return ret; 1479 else 1480 return 0; 1481 } 1482 1483 static int 1484 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1485 { 1486 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1487 1488 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ 1489 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE; 1490 dev_info->max_mac_addrs = hw->mac.rar_entry_count; 1491 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL; 1492 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL; 1493 dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; 1494 1495 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM; 1496 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM; 1497 dev_info->max_vmdq_pools = 0; 1498 1499 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t); 1500 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 1501 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL; 1502 1503 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1504 .rx_thresh = { 1505 .pthresh = IGC_DEFAULT_RX_PTHRESH, 1506 .hthresh = IGC_DEFAULT_RX_HTHRESH, 1507 .wthresh = IGC_DEFAULT_RX_WTHRESH, 1508 }, 1509 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH, 1510 .rx_drop_en = 0, 1511 .offloads = 0, 1512 }; 1513 1514 dev_info->default_txconf = (struct rte_eth_txconf) { 1515 .tx_thresh = { 1516 .pthresh = IGC_DEFAULT_TX_PTHRESH, 1517 .hthresh = IGC_DEFAULT_TX_HTHRESH, 1518 .wthresh = IGC_DEFAULT_TX_WTHRESH, 1519 }, 1520 .offloads = 0, 1521 }; 1522 1523 dev_info->rx_desc_lim = rx_desc_lim; 1524 dev_info->tx_desc_lim = tx_desc_lim; 1525 1526 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | 1527 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | 1528 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G; 1529 1530 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD; 1531 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 1532 return 0; 1533 } 1534 1535 static int 1536 eth_igc_led_on(struct rte_eth_dev *dev) 1537 { 1538 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1539 1540 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1541 } 1542 1543 static int 1544 eth_igc_led_off(struct rte_eth_dev *dev) 1545 { 1546 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1547 1548 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP; 1549 } 1550 1551 static const uint32_t * 1552 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev) 1553 { 1554 static const uint32_t ptypes[] = { 1555 /* refers to rx_desc_pkt_info_to_pkt_type() */ 1556 RTE_PTYPE_L2_ETHER, 1557 RTE_PTYPE_L3_IPV4, 1558 RTE_PTYPE_L3_IPV4_EXT, 1559 RTE_PTYPE_L3_IPV6, 1560 RTE_PTYPE_L3_IPV6_EXT, 1561 RTE_PTYPE_L4_TCP, 1562 RTE_PTYPE_L4_UDP, 1563 RTE_PTYPE_L4_SCTP, 1564 RTE_PTYPE_TUNNEL_IP, 1565 RTE_PTYPE_INNER_L3_IPV6, 1566 RTE_PTYPE_INNER_L3_IPV6_EXT, 1567 RTE_PTYPE_INNER_L4_TCP, 1568 RTE_PTYPE_INNER_L4_UDP, 1569 RTE_PTYPE_UNKNOWN 1570 }; 1571 1572 return ptypes; 1573 } 1574 1575 static int 1576 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1577 { 1578 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1579 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD; 1580 uint32_t rctl; 1581 1582 /* if extend vlan has been enabled */ 1583 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN) 1584 frame_size += VLAN_TAG_SIZE; 1585 1586 /* check that mtu is within the allowed range */ 1587 if (mtu < RTE_ETHER_MIN_MTU || 1588 frame_size > MAX_RX_JUMBO_FRAME_SIZE) 1589 return -EINVAL; 1590 1591 /* 1592 * If device is started, refuse mtu that requires the support of 1593 * scattered packets when this feature has not been enabled before. 1594 */ 1595 if (dev->data->dev_started && !dev->data->scattered_rx && 1596 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1597 PMD_INIT_LOG(ERR, "Stop port first."); 1598 return -EINVAL; 1599 } 1600 1601 rctl = IGC_READ_REG(hw, IGC_RCTL); 1602 1603 /* switch to jumbo mode if needed */ 1604 if (mtu > RTE_ETHER_MTU) { 1605 dev->data->dev_conf.rxmode.offloads |= 1606 DEV_RX_OFFLOAD_JUMBO_FRAME; 1607 rctl |= IGC_RCTL_LPE; 1608 } else { 1609 dev->data->dev_conf.rxmode.offloads &= 1610 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1611 rctl &= ~IGC_RCTL_LPE; 1612 } 1613 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 1614 1615 /* update max frame size */ 1616 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1617 1618 IGC_WRITE_REG(hw, IGC_RLPML, 1619 dev->data->dev_conf.rxmode.max_rx_pkt_len); 1620 1621 return 0; 1622 } 1623 1624 static int 1625 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1626 uint32_t index, uint32_t pool) 1627 { 1628 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1629 1630 igc_rar_set(hw, mac_addr->addr_bytes, index); 1631 RTE_SET_USED(pool); 1632 return 0; 1633 } 1634 1635 static void 1636 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index) 1637 { 1638 uint8_t addr[RTE_ETHER_ADDR_LEN]; 1639 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1640 1641 memset(addr, 0, sizeof(addr)); 1642 igc_rar_set(hw, addr, index); 1643 } 1644 1645 static int 1646 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev, 1647 struct rte_ether_addr *addr) 1648 { 1649 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1650 igc_rar_set(hw, addr->addr_bytes, 0); 1651 return 0; 1652 } 1653 1654 static int 1655 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev, 1656 struct rte_ether_addr *mc_addr_set, 1657 uint32_t nb_mc_addr) 1658 { 1659 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1660 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr); 1661 return 0; 1662 } 1663 1664 /* 1665 * Read hardware registers 1666 */ 1667 static void 1668 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats) 1669 { 1670 int pause_frames; 1671 1672 uint64_t old_gprc = stats->gprc; 1673 uint64_t old_gptc = stats->gptc; 1674 uint64_t old_tpr = stats->tpr; 1675 uint64_t old_tpt = stats->tpt; 1676 uint64_t old_rpthc = stats->rpthc; 1677 uint64_t old_hgptc = stats->hgptc; 1678 1679 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS); 1680 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC); 1681 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC); 1682 stats->mpc += IGC_READ_REG(hw, IGC_MPC); 1683 stats->scc += IGC_READ_REG(hw, IGC_SCC); 1684 stats->ecol += IGC_READ_REG(hw, IGC_ECOL); 1685 1686 stats->mcc += IGC_READ_REG(hw, IGC_MCC); 1687 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL); 1688 stats->colc += IGC_READ_REG(hw, IGC_COLC); 1689 1690 stats->dc += IGC_READ_REG(hw, IGC_DC); 1691 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS); 1692 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC); 1693 stats->rlec += IGC_READ_REG(hw, IGC_RLEC); 1694 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC); 1695 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC); 1696 1697 /* 1698 * For watchdog management we need to know if we have been 1699 * paused during the last interval, so capture that here. 1700 */ 1701 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC); 1702 stats->xoffrxc += pause_frames; 1703 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC); 1704 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC); 1705 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64); 1706 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127); 1707 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255); 1708 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511); 1709 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023); 1710 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522); 1711 stats->gprc += IGC_READ_REG(hw, IGC_GPRC); 1712 stats->bprc += IGC_READ_REG(hw, IGC_BPRC); 1713 stats->mprc += IGC_READ_REG(hw, IGC_MPRC); 1714 stats->gptc += IGC_READ_REG(hw, IGC_GPTC); 1715 1716 /* For the 64-bit byte counters the low dword must be read first. */ 1717 /* Both registers clear on the read of the high dword */ 1718 1719 /* Workaround CRC bytes included in size, take away 4 bytes/packet */ 1720 stats->gorc += IGC_READ_REG(hw, IGC_GORCL); 1721 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32); 1722 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN; 1723 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL); 1724 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32); 1725 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN; 1726 1727 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC); 1728 stats->ruc += IGC_READ_REG(hw, IGC_RUC); 1729 stats->rfc += IGC_READ_REG(hw, IGC_RFC); 1730 stats->roc += IGC_READ_REG(hw, IGC_ROC); 1731 stats->rjc += IGC_READ_REG(hw, IGC_RJC); 1732 1733 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC); 1734 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC); 1735 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC); 1736 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC); 1737 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC); 1738 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC); 1739 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC); 1740 1741 stats->tpr += IGC_READ_REG(hw, IGC_TPR); 1742 stats->tpt += IGC_READ_REG(hw, IGC_TPT); 1743 1744 stats->tor += IGC_READ_REG(hw, IGC_TORL); 1745 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32); 1746 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 1747 stats->tot += IGC_READ_REG(hw, IGC_TOTL); 1748 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32); 1749 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN; 1750 1751 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64); 1752 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127); 1753 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255); 1754 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511); 1755 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023); 1756 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522); 1757 stats->mptc += IGC_READ_REG(hw, IGC_MPTC); 1758 stats->bptc += IGC_READ_REG(hw, IGC_BPTC); 1759 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC); 1760 1761 stats->iac += IGC_READ_REG(hw, IGC_IAC); 1762 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC); 1763 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC); 1764 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC); 1765 1766 /* Host to Card Statistics */ 1767 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL); 1768 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32); 1769 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN; 1770 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL); 1771 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32); 1772 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN; 1773 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS); 1774 } 1775 1776 /* 1777 * Write 0 to all queue status registers 1778 */ 1779 static void 1780 igc_reset_queue_stats_register(struct igc_hw *hw) 1781 { 1782 int i; 1783 1784 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1785 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0); 1786 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0); 1787 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0); 1788 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0); 1789 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0); 1790 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0); 1791 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0); 1792 } 1793 } 1794 1795 /* 1796 * Read all hardware queue status registers 1797 */ 1798 static void 1799 igc_read_queue_stats_register(struct rte_eth_dev *dev) 1800 { 1801 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1802 struct igc_hw_queue_stats *queue_stats = 1803 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1804 int i; 1805 1806 /* 1807 * This register is not cleared on read. Furthermore, the register wraps 1808 * around back to 0x00000000 on the next increment when reaching a value 1809 * of 0xFFFFFFFF and then continues normal count operation. 1810 */ 1811 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1812 union { 1813 u64 ddword; 1814 u32 dword[2]; 1815 } value; 1816 u32 tmp; 1817 1818 /* 1819 * Read the register first, if the value is smaller than that 1820 * previous read, that mean the register has been overflowed, 1821 * then we add the high 4 bytes by 1 and replace the low 4 1822 * bytes by the new value. 1823 */ 1824 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i)); 1825 value.ddword = queue_stats->pqgprc[i]; 1826 if (value.dword[U32_0_IN_U64] > tmp) 1827 value.dword[U32_1_IN_U64]++; 1828 value.dword[U32_0_IN_U64] = tmp; 1829 queue_stats->pqgprc[i] = value.ddword; 1830 1831 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i)); 1832 value.ddword = queue_stats->pqgptc[i]; 1833 if (value.dword[U32_0_IN_U64] > tmp) 1834 value.dword[U32_1_IN_U64]++; 1835 value.dword[U32_0_IN_U64] = tmp; 1836 queue_stats->pqgptc[i] = value.ddword; 1837 1838 tmp = IGC_READ_REG(hw, IGC_PQGORC(i)); 1839 value.ddword = queue_stats->pqgorc[i]; 1840 if (value.dword[U32_0_IN_U64] > tmp) 1841 value.dword[U32_1_IN_U64]++; 1842 value.dword[U32_0_IN_U64] = tmp; 1843 queue_stats->pqgorc[i] = value.ddword; 1844 1845 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i)); 1846 value.ddword = queue_stats->pqgotc[i]; 1847 if (value.dword[U32_0_IN_U64] > tmp) 1848 value.dword[U32_1_IN_U64]++; 1849 value.dword[U32_0_IN_U64] = tmp; 1850 queue_stats->pqgotc[i] = value.ddword; 1851 1852 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i)); 1853 value.ddword = queue_stats->pqmprc[i]; 1854 if (value.dword[U32_0_IN_U64] > tmp) 1855 value.dword[U32_1_IN_U64]++; 1856 value.dword[U32_0_IN_U64] = tmp; 1857 queue_stats->pqmprc[i] = value.ddword; 1858 1859 tmp = IGC_READ_REG(hw, IGC_RQDPC(i)); 1860 value.ddword = queue_stats->rqdpc[i]; 1861 if (value.dword[U32_0_IN_U64] > tmp) 1862 value.dword[U32_1_IN_U64]++; 1863 value.dword[U32_0_IN_U64] = tmp; 1864 queue_stats->rqdpc[i] = value.ddword; 1865 1866 tmp = IGC_READ_REG(hw, IGC_TQDPC(i)); 1867 value.ddword = queue_stats->tqdpc[i]; 1868 if (value.dword[U32_0_IN_U64] > tmp) 1869 value.dword[U32_1_IN_U64]++; 1870 value.dword[U32_0_IN_U64] = tmp; 1871 queue_stats->tqdpc[i] = value.ddword; 1872 } 1873 } 1874 1875 static int 1876 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) 1877 { 1878 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 1879 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1880 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev); 1881 struct igc_hw_queue_stats *queue_stats = 1882 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1883 int i; 1884 1885 /* 1886 * Cancel status handler since it will read the queue status registers 1887 */ 1888 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1889 1890 /* Read status register */ 1891 igc_read_queue_stats_register(dev); 1892 igc_read_stats_registers(hw, stats); 1893 1894 if (rte_stats == NULL) { 1895 /* Restart queue status handler */ 1896 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1897 igc_update_queue_stats_handler, dev); 1898 return -EINVAL; 1899 } 1900 1901 /* Rx Errors */ 1902 rte_stats->imissed = stats->mpc; 1903 rte_stats->ierrors = stats->crcerrs + stats->rlec + 1904 stats->rxerrc + stats->algnerrc; 1905 1906 /* Tx Errors */ 1907 rte_stats->oerrors = stats->ecol + stats->latecol; 1908 1909 rte_stats->ipackets = stats->gprc; 1910 rte_stats->opackets = stats->gptc; 1911 rte_stats->ibytes = stats->gorc; 1912 rte_stats->obytes = stats->gotc; 1913 1914 /* Get per-queue statuses */ 1915 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) { 1916 /* GET TX queue statuses */ 1917 int map_id = igc->txq_stats_map[i]; 1918 if (map_id >= 0) { 1919 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i]; 1920 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i]; 1921 } 1922 /* Get RX queue statuses */ 1923 map_id = igc->rxq_stats_map[i]; 1924 if (map_id >= 0) { 1925 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i]; 1926 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i]; 1927 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i]; 1928 } 1929 } 1930 1931 /* Restart queue status handler */ 1932 rte_eal_alarm_set(IGC_ALARM_INTERVAL, 1933 igc_update_queue_stats_handler, dev); 1934 return 0; 1935 } 1936 1937 static int 1938 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1939 unsigned int n) 1940 { 1941 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1942 struct igc_hw_stats *hw_stats = 1943 IGC_DEV_PRIVATE_STATS(dev); 1944 unsigned int i; 1945 1946 igc_read_stats_registers(hw, hw_stats); 1947 1948 if (n < IGC_NB_XSTATS) 1949 return IGC_NB_XSTATS; 1950 1951 /* If this is a reset xstats is NULL, and we have cleared the 1952 * registers by reading them. 1953 */ 1954 if (!xstats) 1955 return 0; 1956 1957 /* Extended stats */ 1958 for (i = 0; i < IGC_NB_XSTATS; i++) { 1959 xstats[i].id = i; 1960 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 1961 rte_igc_stats_strings[i].offset); 1962 } 1963 1964 return IGC_NB_XSTATS; 1965 } 1966 1967 static int 1968 eth_igc_xstats_reset(struct rte_eth_dev *dev) 1969 { 1970 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 1971 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 1972 struct igc_hw_queue_stats *queue_stats = 1973 IGC_DEV_PRIVATE_QUEUE_STATS(dev); 1974 1975 /* Cancel queue status handler for avoid conflict */ 1976 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev); 1977 1978 /* HW registers are cleared on read */ 1979 igc_reset_queue_stats_register(hw); 1980 igc_read_stats_registers(hw, hw_stats); 1981 1982 /* Reset software totals */ 1983 memset(hw_stats, 0, sizeof(*hw_stats)); 1984 memset(queue_stats, 0, sizeof(*queue_stats)); 1985 1986 /* Restart the queue status handler */ 1987 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler, 1988 dev); 1989 1990 return 0; 1991 } 1992 1993 static int 1994 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1995 struct rte_eth_xstat_name *xstats_names, unsigned int size) 1996 { 1997 unsigned int i; 1998 1999 if (xstats_names == NULL) 2000 return IGC_NB_XSTATS; 2001 2002 if (size < IGC_NB_XSTATS) { 2003 PMD_DRV_LOG(ERR, "not enough buffers!"); 2004 return IGC_NB_XSTATS; 2005 } 2006 2007 for (i = 0; i < IGC_NB_XSTATS; i++) 2008 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name, 2009 sizeof(xstats_names[i].name)); 2010 2011 return IGC_NB_XSTATS; 2012 } 2013 2014 static int 2015 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev, 2016 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, 2017 unsigned int limit) 2018 { 2019 unsigned int i; 2020 2021 if (!ids) 2022 return eth_igc_xstats_get_names(dev, xstats_names, limit); 2023 2024 for (i = 0; i < limit; i++) { 2025 if (ids[i] >= IGC_NB_XSTATS) { 2026 PMD_DRV_LOG(ERR, "id value isn't valid"); 2027 return -EINVAL; 2028 } 2029 strlcpy(xstats_names[i].name, 2030 rte_igc_stats_strings[ids[i]].name, 2031 sizeof(xstats_names[i].name)); 2032 } 2033 return limit; 2034 } 2035 2036 static int 2037 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2038 uint64_t *values, unsigned int n) 2039 { 2040 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2041 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev); 2042 unsigned int i; 2043 2044 igc_read_stats_registers(hw, hw_stats); 2045 2046 if (!ids) { 2047 if (n < IGC_NB_XSTATS) 2048 return IGC_NB_XSTATS; 2049 2050 /* If this is a reset xstats is NULL, and we have cleared the 2051 * registers by reading them. 2052 */ 2053 if (!values) 2054 return 0; 2055 2056 /* Extended stats */ 2057 for (i = 0; i < IGC_NB_XSTATS; i++) 2058 values[i] = *(uint64_t *)(((char *)hw_stats) + 2059 rte_igc_stats_strings[i].offset); 2060 2061 return IGC_NB_XSTATS; 2062 2063 } else { 2064 for (i = 0; i < n; i++) { 2065 if (ids[i] >= IGC_NB_XSTATS) { 2066 PMD_DRV_LOG(ERR, "id value isn't valid"); 2067 return -EINVAL; 2068 } 2069 values[i] = *(uint64_t *)(((char *)hw_stats) + 2070 rte_igc_stats_strings[ids[i]].offset); 2071 } 2072 return n; 2073 } 2074 } 2075 2076 static int 2077 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev, 2078 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx) 2079 { 2080 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev); 2081 2082 /* check queue id is valid */ 2083 if (queue_id >= IGC_QUEUE_PAIRS_NUM) { 2084 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u", 2085 queue_id, IGC_QUEUE_PAIRS_NUM - 1); 2086 return -EINVAL; 2087 } 2088 2089 /* store the mapping status id */ 2090 if (is_rx) 2091 igc->rxq_stats_map[queue_id] = stat_idx; 2092 else 2093 igc->txq_stats_map[queue_id] = stat_idx; 2094 2095 return 0; 2096 } 2097 2098 static int 2099 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 2100 { 2101 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2102 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2103 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2104 uint32_t vec = IGC_MISC_VEC_ID; 2105 2106 if (rte_intr_allow_others(intr_handle)) 2107 vec = IGC_RX_VEC_START; 2108 2109 uint32_t mask = 1u << (queue_id + vec); 2110 2111 IGC_WRITE_REG(hw, IGC_EIMC, mask); 2112 IGC_WRITE_FLUSH(hw); 2113 2114 return 0; 2115 } 2116 2117 static int 2118 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 2119 { 2120 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2121 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2122 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2123 uint32_t vec = IGC_MISC_VEC_ID; 2124 2125 if (rte_intr_allow_others(intr_handle)) 2126 vec = IGC_RX_VEC_START; 2127 2128 uint32_t mask = 1u << (queue_id + vec); 2129 2130 IGC_WRITE_REG(hw, IGC_EIMS, mask); 2131 IGC_WRITE_FLUSH(hw); 2132 2133 rte_intr_enable(intr_handle); 2134 2135 return 0; 2136 } 2137 2138 static int 2139 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2140 { 2141 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2142 uint32_t ctrl; 2143 int tx_pause; 2144 int rx_pause; 2145 2146 fc_conf->pause_time = hw->fc.pause_time; 2147 fc_conf->high_water = hw->fc.high_water; 2148 fc_conf->low_water = hw->fc.low_water; 2149 fc_conf->send_xon = hw->fc.send_xon; 2150 fc_conf->autoneg = hw->mac.autoneg; 2151 2152 /* 2153 * Return rx_pause and tx_pause status according to actual setting of 2154 * the TFCE and RFCE bits in the CTRL register. 2155 */ 2156 ctrl = IGC_READ_REG(hw, IGC_CTRL); 2157 if (ctrl & IGC_CTRL_TFCE) 2158 tx_pause = 1; 2159 else 2160 tx_pause = 0; 2161 2162 if (ctrl & IGC_CTRL_RFCE) 2163 rx_pause = 1; 2164 else 2165 rx_pause = 0; 2166 2167 if (rx_pause && tx_pause) 2168 fc_conf->mode = RTE_FC_FULL; 2169 else if (rx_pause) 2170 fc_conf->mode = RTE_FC_RX_PAUSE; 2171 else if (tx_pause) 2172 fc_conf->mode = RTE_FC_TX_PAUSE; 2173 else 2174 fc_conf->mode = RTE_FC_NONE; 2175 2176 return 0; 2177 } 2178 2179 static int 2180 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 2181 { 2182 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2183 uint32_t rx_buf_size; 2184 uint32_t max_high_water; 2185 uint32_t rctl; 2186 int err; 2187 2188 if (fc_conf->autoneg != hw->mac.autoneg) 2189 return -ENOTSUP; 2190 2191 rx_buf_size = igc_get_rx_buffer_size(hw); 2192 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 2193 2194 /* At least reserve one Ethernet frame for watermark */ 2195 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN; 2196 if (fc_conf->high_water > max_high_water || 2197 fc_conf->high_water < fc_conf->low_water) { 2198 PMD_DRV_LOG(ERR, 2199 "Incorrect high(%u)/low(%u) water value, max is %u", 2200 fc_conf->high_water, fc_conf->low_water, 2201 max_high_water); 2202 return -EINVAL; 2203 } 2204 2205 switch (fc_conf->mode) { 2206 case RTE_FC_NONE: 2207 hw->fc.requested_mode = igc_fc_none; 2208 break; 2209 case RTE_FC_RX_PAUSE: 2210 hw->fc.requested_mode = igc_fc_rx_pause; 2211 break; 2212 case RTE_FC_TX_PAUSE: 2213 hw->fc.requested_mode = igc_fc_tx_pause; 2214 break; 2215 case RTE_FC_FULL: 2216 hw->fc.requested_mode = igc_fc_full; 2217 break; 2218 default: 2219 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode); 2220 return -EINVAL; 2221 } 2222 2223 hw->fc.pause_time = fc_conf->pause_time; 2224 hw->fc.high_water = fc_conf->high_water; 2225 hw->fc.low_water = fc_conf->low_water; 2226 hw->fc.send_xon = fc_conf->send_xon; 2227 2228 err = igc_setup_link_generic(hw); 2229 if (err == IGC_SUCCESS) { 2230 /** 2231 * check if we want to forward MAC frames - driver doesn't have 2232 * native capability to do that, so we'll write the registers 2233 * ourselves 2234 **/ 2235 rctl = IGC_READ_REG(hw, IGC_RCTL); 2236 2237 /* set or clear MFLCN.PMCF bit depending on configuration */ 2238 if (fc_conf->mac_ctrl_frame_fwd != 0) 2239 rctl |= IGC_RCTL_PMCF; 2240 else 2241 rctl &= ~IGC_RCTL_PMCF; 2242 2243 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2244 IGC_WRITE_FLUSH(hw); 2245 2246 return 0; 2247 } 2248 2249 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err); 2250 return -EIO; 2251 } 2252 2253 static int 2254 eth_igc_rss_reta_update(struct rte_eth_dev *dev, 2255 struct rte_eth_rss_reta_entry64 *reta_conf, 2256 uint16_t reta_size) 2257 { 2258 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2259 uint16_t i; 2260 2261 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2262 PMD_DRV_LOG(ERR, 2263 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2264 reta_size, ETH_RSS_RETA_SIZE_128); 2265 return -EINVAL; 2266 } 2267 2268 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2269 2270 /* set redirection table */ 2271 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2272 union igc_rss_reta_reg reta, reg; 2273 uint16_t idx, shift; 2274 uint8_t j, mask; 2275 2276 idx = i / RTE_RETA_GROUP_SIZE; 2277 shift = i % RTE_RETA_GROUP_SIZE; 2278 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2279 IGC_RSS_RDT_REG_SIZE_MASK); 2280 2281 /* if no need to update the register */ 2282 if (!mask || 2283 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2284 continue; 2285 2286 /* check mask whether need to read the register value first */ 2287 if (mask == IGC_RSS_RDT_REG_SIZE_MASK) 2288 reg.dword = 0; 2289 else 2290 reg.dword = IGC_READ_REG_LE_VALUE(hw, 2291 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2292 2293 /* update the register */ 2294 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2295 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2296 if (mask & (1u << j)) 2297 reta.bytes[j] = 2298 (uint8_t)reta_conf[idx].reta[shift + j]; 2299 else 2300 reta.bytes[j] = reg.bytes[j]; 2301 } 2302 IGC_WRITE_REG_LE_VALUE(hw, 2303 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword); 2304 } 2305 2306 return 0; 2307 } 2308 2309 static int 2310 eth_igc_rss_reta_query(struct rte_eth_dev *dev, 2311 struct rte_eth_rss_reta_entry64 *reta_conf, 2312 uint16_t reta_size) 2313 { 2314 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2315 uint16_t i; 2316 2317 if (reta_size != ETH_RSS_RETA_SIZE_128) { 2318 PMD_DRV_LOG(ERR, 2319 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)", 2320 reta_size, ETH_RSS_RETA_SIZE_128); 2321 return -EINVAL; 2322 } 2323 2324 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE); 2325 2326 /* read redirection table */ 2327 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) { 2328 union igc_rss_reta_reg reta; 2329 uint16_t idx, shift; 2330 uint8_t j, mask; 2331 2332 idx = i / RTE_RETA_GROUP_SIZE; 2333 shift = i % RTE_RETA_GROUP_SIZE; 2334 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 2335 IGC_RSS_RDT_REG_SIZE_MASK); 2336 2337 /* if no need to read register */ 2338 if (!mask || 2339 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE)) 2340 continue; 2341 2342 /* read register and get the queue index */ 2343 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE); 2344 reta.dword = IGC_READ_REG_LE_VALUE(hw, 2345 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE)); 2346 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) { 2347 if (mask & (1u << j)) 2348 reta_conf[idx].reta[shift + j] = reta.bytes[j]; 2349 } 2350 } 2351 2352 return 0; 2353 } 2354 2355 static int 2356 eth_igc_rss_hash_update(struct rte_eth_dev *dev, 2357 struct rte_eth_rss_conf *rss_conf) 2358 { 2359 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2360 igc_hw_rss_hash_set(hw, rss_conf); 2361 return 0; 2362 } 2363 2364 static int 2365 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev, 2366 struct rte_eth_rss_conf *rss_conf) 2367 { 2368 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2369 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key; 2370 uint32_t mrqc; 2371 uint64_t rss_hf; 2372 2373 if (hash_key != NULL) { 2374 int i; 2375 2376 /* if not enough space for store hash key */ 2377 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) { 2378 PMD_DRV_LOG(ERR, 2379 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u", 2380 rss_conf->rss_key_len, IGC_HKEY_SIZE); 2381 return -EINVAL; 2382 } 2383 2384 /* read RSS key from register */ 2385 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++) 2386 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i)); 2387 } 2388 2389 /* get RSS functions configured in MRQC register */ 2390 mrqc = IGC_READ_REG(hw, IGC_MRQC); 2391 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0) 2392 return 0; 2393 2394 rss_hf = 0; 2395 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4) 2396 rss_hf |= ETH_RSS_IPV4; 2397 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP) 2398 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2399 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6) 2400 rss_hf |= ETH_RSS_IPV6; 2401 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX) 2402 rss_hf |= ETH_RSS_IPV6_EX; 2403 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP) 2404 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2405 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX) 2406 rss_hf |= ETH_RSS_IPV6_TCP_EX; 2407 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP) 2408 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2409 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP) 2410 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2411 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX) 2412 rss_hf |= ETH_RSS_IPV6_UDP_EX; 2413 2414 rss_conf->rss_hf |= rss_hf; 2415 return 0; 2416 } 2417 2418 static int 2419 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 2420 { 2421 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2422 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2423 uint32_t vfta; 2424 uint32_t vid_idx; 2425 uint32_t vid_bit; 2426 2427 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK; 2428 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK); 2429 vfta = shadow_vfta->vfta[vid_idx]; 2430 if (on) 2431 vfta |= vid_bit; 2432 else 2433 vfta &= ~vid_bit; 2434 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta); 2435 2436 /* update local VFTA copy */ 2437 shadow_vfta->vfta[vid_idx] = vfta; 2438 2439 return 0; 2440 } 2441 2442 static void 2443 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev) 2444 { 2445 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2446 igc_read_reg_check_clear_bits(hw, IGC_RCTL, 2447 IGC_RCTL_CFIEN | IGC_RCTL_VFE); 2448 } 2449 2450 static void 2451 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2452 { 2453 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2454 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev); 2455 uint32_t reg_val; 2456 int i; 2457 2458 /* Filter Table Enable, CFI not used for packet acceptance */ 2459 reg_val = IGC_READ_REG(hw, IGC_RCTL); 2460 reg_val &= ~IGC_RCTL_CFIEN; 2461 reg_val |= IGC_RCTL_VFE; 2462 IGC_WRITE_REG(hw, IGC_RCTL, reg_val); 2463 2464 /* restore VFTA table */ 2465 for (i = 0; i < IGC_VFTA_SIZE; i++) 2466 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]); 2467 } 2468 2469 static void 2470 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev) 2471 { 2472 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2473 2474 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2475 } 2476 2477 static void 2478 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev) 2479 { 2480 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2481 2482 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME); 2483 } 2484 2485 static int 2486 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2487 { 2488 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2489 uint32_t ctrl_ext; 2490 2491 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2492 2493 /* if extend vlan hasn't been enabled */ 2494 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0) 2495 return 0; 2496 2497 if ((dev->data->dev_conf.rxmode.offloads & 2498 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) 2499 goto write_ext_vlan; 2500 2501 /* Update maximum packet length */ 2502 if (dev->data->dev_conf.rxmode.max_rx_pkt_len < 2503 RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) { 2504 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u", 2505 dev->data->dev_conf.rxmode.max_rx_pkt_len, 2506 VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU); 2507 return -EINVAL; 2508 } 2509 dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE; 2510 IGC_WRITE_REG(hw, IGC_RLPML, 2511 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2512 2513 write_ext_vlan: 2514 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN); 2515 return 0; 2516 } 2517 2518 static int 2519 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2520 { 2521 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2522 uint32_t ctrl_ext; 2523 2524 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); 2525 2526 /* if extend vlan has been enabled */ 2527 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) 2528 return 0; 2529 2530 if ((dev->data->dev_conf.rxmode.offloads & 2531 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) 2532 goto write_ext_vlan; 2533 2534 /* Update maximum packet length */ 2535 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > 2536 MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) { 2537 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u", 2538 dev->data->dev_conf.rxmode.max_rx_pkt_len + 2539 VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE); 2540 return -EINVAL; 2541 } 2542 dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE; 2543 IGC_WRITE_REG(hw, IGC_RLPML, 2544 dev->data->dev_conf.rxmode.max_rx_pkt_len); 2545 2546 write_ext_vlan: 2547 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN); 2548 return 0; 2549 } 2550 2551 static int 2552 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2553 { 2554 struct rte_eth_rxmode *rxmode; 2555 2556 rxmode = &dev->data->dev_conf.rxmode; 2557 if (mask & ETH_VLAN_STRIP_MASK) { 2558 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2559 igc_vlan_hw_strip_enable(dev); 2560 else 2561 igc_vlan_hw_strip_disable(dev); 2562 } 2563 2564 if (mask & ETH_VLAN_FILTER_MASK) { 2565 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2566 igc_vlan_hw_filter_enable(dev); 2567 else 2568 igc_vlan_hw_filter_disable(dev); 2569 } 2570 2571 if (mask & ETH_VLAN_EXTEND_MASK) { 2572 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2573 return igc_vlan_hw_extend_enable(dev); 2574 else 2575 return igc_vlan_hw_extend_disable(dev); 2576 } 2577 2578 return 0; 2579 } 2580 2581 static int 2582 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev, 2583 enum rte_vlan_type vlan_type, 2584 uint16_t tpid) 2585 { 2586 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev); 2587 uint32_t reg_val; 2588 2589 /* only outer TPID of double VLAN can be configured*/ 2590 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2591 reg_val = IGC_READ_REG(hw, IGC_VET); 2592 reg_val = (reg_val & (~IGC_VET_EXT)) | 2593 ((uint32_t)tpid << IGC_VET_EXT_SHIFT); 2594 IGC_WRITE_REG(hw, IGC_VET, reg_val); 2595 2596 return 0; 2597 } 2598 2599 /* all other TPID values are read-only*/ 2600 PMD_DRV_LOG(ERR, "Not supported"); 2601 return -ENOTSUP; 2602 } 2603 2604 static int 2605 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2606 struct rte_pci_device *pci_dev) 2607 { 2608 PMD_INIT_FUNC_TRACE(); 2609 return rte_eth_dev_pci_generic_probe(pci_dev, 2610 sizeof(struct igc_adapter), eth_igc_dev_init); 2611 } 2612 2613 static int 2614 eth_igc_pci_remove(struct rte_pci_device *pci_dev) 2615 { 2616 PMD_INIT_FUNC_TRACE(); 2617 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit); 2618 } 2619 2620 static struct rte_pci_driver rte_igc_pmd = { 2621 .id_table = pci_id_igc_map, 2622 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2623 .probe = eth_igc_pci_probe, 2624 .remove = eth_igc_pci_remove, 2625 }; 2626 2627 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd); 2628 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map); 2629 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci"); 2630