1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 3 */ 4 5 #include <stdio.h> 6 #include <errno.h> 7 #include <stdint.h> 8 #include <string.h> 9 #include <rte_common.h> 10 #include <ethdev_pci.h> 11 12 #include <rte_interrupts.h> 13 #include <rte_log.h> 14 #include <rte_debug.h> 15 #include <rte_pci.h> 16 #include <rte_memory.h> 17 #include <rte_eal.h> 18 #include <rte_alarm.h> 19 20 #include "txgbe_logs.h" 21 #include "base/txgbe.h" 22 #include "txgbe_ethdev.h" 23 #include "txgbe_rxtx.h" 24 #include "txgbe_regs_group.h" 25 26 static const struct reg_info txgbe_regs_general[] = { 27 {TXGBE_RST, 1, 1, "TXGBE_RST"}, 28 {TXGBE_STAT, 1, 1, "TXGBE_STAT"}, 29 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"}, 30 {TXGBE_SDP, 1, 1, "TXGBE_SDP"}, 31 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"}, 32 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"}, 33 {0, 0, 0, ""} 34 }; 35 36 static const struct reg_info txgbe_regs_nvm[] = { 37 {0, 0, 0, ""} 38 }; 39 40 static const struct reg_info txgbe_regs_interrupt[] = { 41 {0, 0, 0, ""} 42 }; 43 44 static const struct reg_info txgbe_regs_fctl_others[] = { 45 {0, 0, 0, ""} 46 }; 47 48 static const struct reg_info txgbe_regs_rxdma[] = { 49 {0, 0, 0, ""} 50 }; 51 52 static const struct reg_info txgbe_regs_rx[] = { 53 {0, 0, 0, ""} 54 }; 55 56 static struct reg_info txgbe_regs_tx[] = { 57 {0, 0, 0, ""} 58 }; 59 60 static const struct reg_info txgbe_regs_wakeup[] = { 61 {0, 0, 0, ""} 62 }; 63 64 static const struct reg_info txgbe_regs_dcb[] = { 65 {0, 0, 0, ""} 66 }; 67 68 static const struct reg_info txgbe_regs_mac[] = { 69 {0, 0, 0, ""} 70 }; 71 72 static const struct reg_info txgbe_regs_diagnostic[] = { 73 {0, 0, 0, ""}, 74 }; 75 76 /* PF registers */ 77 static const struct reg_info *txgbe_regs_others[] = { 78 txgbe_regs_general, 79 txgbe_regs_nvm, 80 txgbe_regs_interrupt, 81 txgbe_regs_fctl_others, 82 txgbe_regs_rxdma, 83 txgbe_regs_rx, 84 txgbe_regs_tx, 85 txgbe_regs_wakeup, 86 txgbe_regs_dcb, 87 txgbe_regs_mac, 88 txgbe_regs_diagnostic, 89 NULL}; 90 91 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 92 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 93 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 94 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 95 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); 96 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev); 97 static int txgbe_dev_close(struct rte_eth_dev *dev); 98 static int txgbe_dev_link_update(struct rte_eth_dev *dev, 99 int wait_to_complete); 100 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev); 101 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 102 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, 103 uint16_t queue); 104 105 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev); 106 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 107 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 108 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 109 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 110 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev, 111 struct rte_intr_handle *handle); 112 static void txgbe_dev_interrupt_handler(void *param); 113 static void txgbe_dev_interrupt_delayed_handler(void *param); 114 static void txgbe_configure_msix(struct rte_eth_dev *dev); 115 116 static int txgbe_filter_restore(struct rte_eth_dev *dev); 117 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 118 119 #define TXGBE_SET_HWSTRIP(h, q) do {\ 120 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 121 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 122 (h)->bitmap[idx] |= 1 << bit;\ 123 } while (0) 124 125 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\ 126 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 127 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 128 (h)->bitmap[idx] &= ~(1 << bit);\ 129 } while (0) 130 131 #define TXGBE_GET_HWSTRIP(h, q, r) do {\ 132 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 133 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 134 (r) = (h)->bitmap[idx] >> bit & 1;\ 135 } while (0) 136 137 /* 138 * The set of PCI devices this driver supports 139 */ 140 static const struct rte_pci_id pci_id_txgbe_map[] = { 141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) }, 142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) }, 143 { .vendor_id = 0, /* sentinel */ }, 144 }; 145 146 static const struct rte_eth_desc_lim rx_desc_lim = { 147 .nb_max = TXGBE_RING_DESC_MAX, 148 .nb_min = TXGBE_RING_DESC_MIN, 149 .nb_align = TXGBE_RXD_ALIGN, 150 }; 151 152 static const struct rte_eth_desc_lim tx_desc_lim = { 153 .nb_max = TXGBE_RING_DESC_MAX, 154 .nb_min = TXGBE_RING_DESC_MIN, 155 .nb_align = TXGBE_TXD_ALIGN, 156 .nb_seg_max = TXGBE_TX_MAX_SEG, 157 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG, 158 }; 159 160 static const struct eth_dev_ops txgbe_eth_dev_ops; 161 162 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)} 163 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)} 164 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { 165 /* MNG RxTx */ 166 HW_XSTAT(mng_bmc2host_packets), 167 HW_XSTAT(mng_host2bmc_packets), 168 /* Basic RxTx */ 169 HW_XSTAT(rx_packets), 170 HW_XSTAT(tx_packets), 171 HW_XSTAT(rx_bytes), 172 HW_XSTAT(tx_bytes), 173 HW_XSTAT(rx_total_bytes), 174 HW_XSTAT(rx_total_packets), 175 HW_XSTAT(tx_total_packets), 176 HW_XSTAT(rx_total_missed_packets), 177 HW_XSTAT(rx_broadcast_packets), 178 HW_XSTAT(rx_multicast_packets), 179 HW_XSTAT(rx_management_packets), 180 HW_XSTAT(tx_management_packets), 181 HW_XSTAT(rx_management_dropped), 182 183 /* Basic Error */ 184 HW_XSTAT(rx_crc_errors), 185 HW_XSTAT(rx_illegal_byte_errors), 186 HW_XSTAT(rx_error_bytes), 187 HW_XSTAT(rx_mac_short_packet_dropped), 188 HW_XSTAT(rx_length_errors), 189 HW_XSTAT(rx_undersize_errors), 190 HW_XSTAT(rx_fragment_errors), 191 HW_XSTAT(rx_oversize_errors), 192 HW_XSTAT(rx_jabber_errors), 193 HW_XSTAT(rx_l3_l4_xsum_error), 194 HW_XSTAT(mac_local_errors), 195 HW_XSTAT(mac_remote_errors), 196 197 /* Flow Director */ 198 HW_XSTAT(flow_director_added_filters), 199 HW_XSTAT(flow_director_removed_filters), 200 HW_XSTAT(flow_director_filter_add_errors), 201 HW_XSTAT(flow_director_filter_remove_errors), 202 HW_XSTAT(flow_director_matched_filters), 203 HW_XSTAT(flow_director_missed_filters), 204 205 /* FCoE */ 206 HW_XSTAT(rx_fcoe_crc_errors), 207 HW_XSTAT(rx_fcoe_mbuf_allocation_errors), 208 HW_XSTAT(rx_fcoe_dropped), 209 HW_XSTAT(rx_fcoe_packets), 210 HW_XSTAT(tx_fcoe_packets), 211 HW_XSTAT(rx_fcoe_bytes), 212 HW_XSTAT(tx_fcoe_bytes), 213 HW_XSTAT(rx_fcoe_no_ddp), 214 HW_XSTAT(rx_fcoe_no_ddp_ext_buff), 215 216 /* MACSEC */ 217 HW_XSTAT(tx_macsec_pkts_untagged), 218 HW_XSTAT(tx_macsec_pkts_encrypted), 219 HW_XSTAT(tx_macsec_pkts_protected), 220 HW_XSTAT(tx_macsec_octets_encrypted), 221 HW_XSTAT(tx_macsec_octets_protected), 222 HW_XSTAT(rx_macsec_pkts_untagged), 223 HW_XSTAT(rx_macsec_pkts_badtag), 224 HW_XSTAT(rx_macsec_pkts_nosci), 225 HW_XSTAT(rx_macsec_pkts_unknownsci), 226 HW_XSTAT(rx_macsec_octets_decrypted), 227 HW_XSTAT(rx_macsec_octets_validated), 228 HW_XSTAT(rx_macsec_sc_pkts_unchecked), 229 HW_XSTAT(rx_macsec_sc_pkts_delayed), 230 HW_XSTAT(rx_macsec_sc_pkts_late), 231 HW_XSTAT(rx_macsec_sa_pkts_ok), 232 HW_XSTAT(rx_macsec_sa_pkts_invalid), 233 HW_XSTAT(rx_macsec_sa_pkts_notvalid), 234 HW_XSTAT(rx_macsec_sa_pkts_unusedsa), 235 HW_XSTAT(rx_macsec_sa_pkts_notusingsa), 236 237 /* MAC RxTx */ 238 HW_XSTAT(rx_size_64_packets), 239 HW_XSTAT(rx_size_65_to_127_packets), 240 HW_XSTAT(rx_size_128_to_255_packets), 241 HW_XSTAT(rx_size_256_to_511_packets), 242 HW_XSTAT(rx_size_512_to_1023_packets), 243 HW_XSTAT(rx_size_1024_to_max_packets), 244 HW_XSTAT(tx_size_64_packets), 245 HW_XSTAT(tx_size_65_to_127_packets), 246 HW_XSTAT(tx_size_128_to_255_packets), 247 HW_XSTAT(tx_size_256_to_511_packets), 248 HW_XSTAT(tx_size_512_to_1023_packets), 249 HW_XSTAT(tx_size_1024_to_max_packets), 250 251 /* Flow Control */ 252 HW_XSTAT(tx_xon_packets), 253 HW_XSTAT(rx_xon_packets), 254 HW_XSTAT(tx_xoff_packets), 255 HW_XSTAT(rx_xoff_packets), 256 257 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"), 258 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"), 259 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"), 260 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"), 261 }; 262 263 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \ 264 sizeof(rte_txgbe_stats_strings[0])) 265 266 /* Per-priority statistics */ 267 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)} 268 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = { 269 UP_XSTAT(rx_up_packets), 270 UP_XSTAT(tx_up_packets), 271 UP_XSTAT(rx_up_bytes), 272 UP_XSTAT(tx_up_bytes), 273 UP_XSTAT(rx_up_drop_packets), 274 275 UP_XSTAT(tx_up_xon_packets), 276 UP_XSTAT(rx_up_xon_packets), 277 UP_XSTAT(tx_up_xoff_packets), 278 UP_XSTAT(rx_up_xoff_packets), 279 UP_XSTAT(rx_up_dropped), 280 UP_XSTAT(rx_up_mbuf_alloc_errors), 281 UP_XSTAT(tx_up_xon2off_packets), 282 }; 283 284 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \ 285 sizeof(rte_txgbe_up_strings[0])) 286 287 /* Per-queue statistics */ 288 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)} 289 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = { 290 QP_XSTAT(rx_qp_packets), 291 QP_XSTAT(tx_qp_packets), 292 QP_XSTAT(rx_qp_bytes), 293 QP_XSTAT(tx_qp_bytes), 294 QP_XSTAT(rx_qp_mc_packets), 295 }; 296 297 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \ 298 sizeof(rte_txgbe_qp_strings[0])) 299 300 static inline int 301 txgbe_is_sfp(struct txgbe_hw *hw) 302 { 303 switch (hw->phy.type) { 304 case txgbe_phy_sfp_avago: 305 case txgbe_phy_sfp_ftl: 306 case txgbe_phy_sfp_intel: 307 case txgbe_phy_sfp_unknown: 308 case txgbe_phy_sfp_tyco_passive: 309 case txgbe_phy_sfp_unknown_passive: 310 return 1; 311 default: 312 return 0; 313 } 314 } 315 316 static inline int32_t 317 txgbe_pf_reset_hw(struct txgbe_hw *hw) 318 { 319 uint32_t ctrl_ext; 320 int32_t status; 321 322 status = hw->mac.reset_hw(hw); 323 324 ctrl_ext = rd32(hw, TXGBE_PORTCTL); 325 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 326 ctrl_ext |= TXGBE_PORTCTL_RSTDONE; 327 wr32(hw, TXGBE_PORTCTL, ctrl_ext); 328 txgbe_flush(hw); 329 330 if (status == TXGBE_ERR_SFP_NOT_PRESENT) 331 status = 0; 332 return status; 333 } 334 335 static inline void 336 txgbe_enable_intr(struct rte_eth_dev *dev) 337 { 338 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 339 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 340 341 wr32(hw, TXGBE_IENMISC, intr->mask_misc); 342 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK); 343 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK); 344 txgbe_flush(hw); 345 } 346 347 static void 348 txgbe_disable_intr(struct txgbe_hw *hw) 349 { 350 PMD_INIT_FUNC_TRACE(); 351 352 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32); 353 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK); 354 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK); 355 txgbe_flush(hw); 356 } 357 358 static int 359 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 360 uint16_t queue_id, 361 uint8_t stat_idx, 362 uint8_t is_rx) 363 { 364 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 365 struct txgbe_stat_mappings *stat_mappings = 366 TXGBE_DEV_STAT_MAPPINGS(eth_dev); 367 uint32_t qsmr_mask = 0; 368 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 369 uint32_t q_map; 370 uint8_t n, offset; 371 372 if (hw->mac.type != txgbe_mac_raptor) 373 return -ENOSYS; 374 375 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK) 376 return -EIO; 377 378 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 379 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 380 queue_id, stat_idx); 381 382 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 383 if (n >= TXGBE_NB_STAT_MAPPING) { 384 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 385 return -EIO; 386 } 387 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 388 389 /* Now clear any previous stat_idx set */ 390 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 391 if (!is_rx) 392 stat_mappings->tqsm[n] &= ~clearing_mask; 393 else 394 stat_mappings->rqsm[n] &= ~clearing_mask; 395 396 q_map = (uint32_t)stat_idx; 397 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 398 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 399 if (!is_rx) 400 stat_mappings->tqsm[n] |= qsmr_mask; 401 else 402 stat_mappings->rqsm[n] |= qsmr_mask; 403 404 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 405 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 406 queue_id, stat_idx); 407 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 408 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]); 409 return 0; 410 } 411 412 static void 413 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config) 414 { 415 int i; 416 u8 bwgp; 417 struct txgbe_dcb_tc_config *tc; 418 419 UNREFERENCED_PARAMETER(hw); 420 421 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX; 422 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX; 423 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX); 424 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) { 425 tc = &dcb_config->tc_config[i]; 426 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i; 427 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1); 428 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i; 429 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1); 430 tc->pfc = txgbe_dcb_pfc_disabled; 431 } 432 433 /* Initialize default user to priority mapping, UPx->TC0 */ 434 tc = &dcb_config->tc_config[0]; 435 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 436 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 437 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) { 438 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100; 439 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100; 440 } 441 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal; 442 dcb_config->pfc_mode_enable = false; 443 dcb_config->vt_mode = true; 444 dcb_config->round_robin_enable = false; 445 /* support all DCB capabilities */ 446 dcb_config->support.capabilities = 0xFF; 447 } 448 449 /* 450 * Ensure that all locks are released before first NVM or PHY access 451 */ 452 static void 453 txgbe_swfw_lock_reset(struct txgbe_hw *hw) 454 { 455 uint16_t mask; 456 457 /* 458 * These ones are more tricky since they are common to all ports; but 459 * swfw_sync retries last long enough (1s) to be almost sure that if 460 * lock can not be taken it is due to an improper lock of the 461 * semaphore. 462 */ 463 mask = TXGBE_MNGSEM_SWPHY | 464 TXGBE_MNGSEM_SWMBX | 465 TXGBE_MNGSEM_SWFLASH; 466 if (hw->mac.acquire_swfw_sync(hw, mask) < 0) 467 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 468 469 hw->mac.release_swfw_sync(hw, mask); 470 } 471 472 static int 473 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 474 { 475 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 476 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 477 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); 478 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev); 479 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev); 480 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); 481 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev); 482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 483 const struct rte_memzone *mz; 484 uint32_t ctrl_ext; 485 uint16_t csum; 486 int err, i, ret; 487 488 PMD_INIT_FUNC_TRACE(); 489 490 eth_dev->dev_ops = &txgbe_eth_dev_ops; 491 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count; 492 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status; 493 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status; 494 eth_dev->rx_pkt_burst = &txgbe_recv_pkts; 495 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts; 496 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts; 497 498 /* 499 * For secondary processes, we don't initialise any further as primary 500 * has already done this work. Only check we don't need a different 501 * RX and TX function. 502 */ 503 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 504 struct txgbe_tx_queue *txq; 505 /* TX queue function in primary, set by last queue initialized 506 * Tx queue may not initialized by primary process 507 */ 508 if (eth_dev->data->tx_queues) { 509 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 510 txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 511 txgbe_set_tx_function(eth_dev, txq); 512 } else { 513 /* Use default TX function if we get here */ 514 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 515 "Using default TX function."); 516 } 517 518 txgbe_set_rx_function(eth_dev); 519 520 return 0; 521 } 522 523 rte_eth_copy_pci_info(eth_dev, pci_dev); 524 525 /* Vendor and Device ID need to be set before init of shared code */ 526 hw->device_id = pci_dev->id.device_id; 527 hw->vendor_id = pci_dev->id.vendor_id; 528 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 529 hw->allow_unsupported_sfp = 1; 530 531 /* Reserve memory for interrupt status block */ 532 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1, 533 16, TXGBE_ALIGN, SOCKET_ID_ANY); 534 if (mz == NULL) 535 return -ENOMEM; 536 537 hw->isb_dma = TMZ_PADDR(mz); 538 hw->isb_mem = TMZ_VADDR(mz); 539 540 /* Initialize the shared code (base driver) */ 541 err = txgbe_init_shared_code(hw); 542 if (err != 0) { 543 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err); 544 return -EIO; 545 } 546 547 /* Unlock any pending hardware semaphore */ 548 txgbe_swfw_lock_reset(hw); 549 550 #ifdef RTE_LIB_SECURITY 551 /* Initialize security_ctx only for primary process*/ 552 if (txgbe_ipsec_ctx_create(eth_dev)) 553 return -ENOMEM; 554 #endif 555 556 /* Initialize DCB configuration*/ 557 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config)); 558 txgbe_dcb_init(hw, dcb_config); 559 560 /* Get Hardware Flow Control setting */ 561 hw->fc.requested_mode = txgbe_fc_full; 562 hw->fc.current_mode = txgbe_fc_full; 563 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME; 564 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) { 565 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH; 566 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH; 567 } 568 hw->fc.send_xon = 1; 569 570 err = hw->rom.init_params(hw); 571 if (err != 0) { 572 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err); 573 return -EIO; 574 } 575 576 /* Make sure we have a good EEPROM before we read from it */ 577 err = hw->rom.validate_checksum(hw, &csum); 578 if (err != 0) { 579 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err); 580 return -EIO; 581 } 582 583 err = hw->mac.init_hw(hw); 584 585 /* 586 * Devices with copper phys will fail to initialise if txgbe_init_hw() 587 * is called too soon after the kernel driver unbinding/binding occurs. 588 * The failure occurs in txgbe_identify_phy() for all devices, 589 * but for non-copper devies, txgbe_identify_sfp_module() is 590 * also called. See txgbe_identify_phy(). The reason for the 591 * failure is not known, and only occuts when virtualisation features 592 * are disabled in the bios. A delay of 200ms was found to be enough by 593 * trial-and-error, and is doubled to be safe. 594 */ 595 if (err && hw->phy.media_type == txgbe_media_type_copper) { 596 rte_delay_ms(200); 597 err = hw->mac.init_hw(hw); 598 } 599 600 if (err == TXGBE_ERR_SFP_NOT_PRESENT) 601 err = 0; 602 603 if (err == TXGBE_ERR_EEPROM_VERSION) { 604 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 605 "LOM. Please be aware there may be issues associated " 606 "with your hardware."); 607 PMD_INIT_LOG(ERR, "If you are experiencing problems " 608 "please contact your hardware representative " 609 "who provided you with this hardware."); 610 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) { 611 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 612 } 613 if (err) { 614 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err); 615 return -EIO; 616 } 617 618 /* Reset the hw statistics */ 619 txgbe_dev_stats_reset(eth_dev); 620 621 /* disable interrupt */ 622 txgbe_disable_intr(hw); 623 624 /* Allocate memory for storing MAC addresses */ 625 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN * 626 hw->mac.num_rar_entries, 0); 627 if (eth_dev->data->mac_addrs == NULL) { 628 PMD_INIT_LOG(ERR, 629 "Failed to allocate %u bytes needed to store " 630 "MAC addresses", 631 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 632 return -ENOMEM; 633 } 634 635 /* Copy the permanent MAC address */ 636 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 637 ð_dev->data->mac_addrs[0]); 638 639 /* Allocate memory for storing hash filter MAC addresses */ 640 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe", 641 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0); 642 if (eth_dev->data->hash_mac_addrs == NULL) { 643 PMD_INIT_LOG(ERR, 644 "Failed to allocate %d bytes needed to store MAC addresses", 645 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC); 646 return -ENOMEM; 647 } 648 649 /* initialize the vfta */ 650 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 651 652 /* initialize the hw strip bitmap*/ 653 memset(hwstrip, 0, sizeof(*hwstrip)); 654 655 /* initialize PF if max_vfs not zero */ 656 ret = txgbe_pf_host_init(eth_dev); 657 if (ret) { 658 rte_free(eth_dev->data->mac_addrs); 659 eth_dev->data->mac_addrs = NULL; 660 rte_free(eth_dev->data->hash_mac_addrs); 661 eth_dev->data->hash_mac_addrs = NULL; 662 return ret; 663 } 664 665 ctrl_ext = rd32(hw, TXGBE_PORTCTL); 666 /* let hardware know driver is loaded */ 667 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD; 668 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 669 ctrl_ext |= TXGBE_PORTCTL_RSTDONE; 670 wr32(hw, TXGBE_PORTCTL, ctrl_ext); 671 txgbe_flush(hw); 672 673 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present) 674 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 675 (int)hw->mac.type, (int)hw->phy.type, 676 (int)hw->phy.sfp_type); 677 else 678 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 679 (int)hw->mac.type, (int)hw->phy.type); 680 681 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 682 eth_dev->data->port_id, pci_dev->id.vendor_id, 683 pci_dev->id.device_id); 684 685 rte_intr_callback_register(intr_handle, 686 txgbe_dev_interrupt_handler, eth_dev); 687 688 /* enable uio/vfio intr/eventfd mapping */ 689 rte_intr_enable(intr_handle); 690 691 /* enable support intr */ 692 txgbe_enable_intr(eth_dev); 693 694 /* initialize filter info */ 695 memset(filter_info, 0, 696 sizeof(struct txgbe_filter_info)); 697 698 /* initialize 5tuple filter list */ 699 TAILQ_INIT(&filter_info->fivetuple_list); 700 701 /* initialize flow director filter list & hash */ 702 txgbe_fdir_filter_init(eth_dev); 703 704 /* initialize l2 tunnel filter list & hash */ 705 txgbe_l2_tn_filter_init(eth_dev); 706 707 /* initialize flow filter lists */ 708 txgbe_filterlist_init(); 709 710 /* initialize bandwidth configuration info */ 711 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); 712 713 /* initialize Traffic Manager configuration */ 714 txgbe_tm_conf_init(eth_dev); 715 716 return 0; 717 } 718 719 static int 720 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev) 721 { 722 PMD_INIT_FUNC_TRACE(); 723 724 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 725 return 0; 726 727 txgbe_dev_close(eth_dev); 728 729 return 0; 730 } 731 732 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 733 { 734 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); 735 struct txgbe_5tuple_filter *p_5tuple; 736 737 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 738 TAILQ_REMOVE(&filter_info->fivetuple_list, 739 p_5tuple, 740 entries); 741 rte_free(p_5tuple); 742 } 743 memset(filter_info->fivetuple_mask, 0, 744 sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE); 745 746 return 0; 747 } 748 749 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 750 { 751 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); 752 struct txgbe_fdir_filter *fdir_filter; 753 754 if (fdir_info->hash_map) 755 rte_free(fdir_info->hash_map); 756 if (fdir_info->hash_handle) 757 rte_hash_free(fdir_info->hash_handle); 758 759 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 760 TAILQ_REMOVE(&fdir_info->fdir_list, 761 fdir_filter, 762 entries); 763 rte_free(fdir_filter); 764 } 765 766 return 0; 767 } 768 769 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 770 { 771 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); 772 struct txgbe_l2_tn_filter *l2_tn_filter; 773 774 if (l2_tn_info->hash_map) 775 rte_free(l2_tn_info->hash_map); 776 if (l2_tn_info->hash_handle) 777 rte_hash_free(l2_tn_info->hash_handle); 778 779 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 780 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 781 l2_tn_filter, 782 entries); 783 rte_free(l2_tn_filter); 784 } 785 786 return 0; 787 } 788 789 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 790 { 791 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); 792 char fdir_hash_name[RTE_HASH_NAMESIZE]; 793 struct rte_hash_parameters fdir_hash_params = { 794 .name = fdir_hash_name, 795 .entries = TXGBE_MAX_FDIR_FILTER_NUM, 796 .key_len = sizeof(struct txgbe_atr_input), 797 .hash_func = rte_hash_crc, 798 .hash_func_init_val = 0, 799 .socket_id = rte_socket_id(), 800 }; 801 802 TAILQ_INIT(&fdir_info->fdir_list); 803 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 804 "fdir_%s", TDEV_NAME(eth_dev)); 805 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 806 if (!fdir_info->hash_handle) { 807 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 808 return -EINVAL; 809 } 810 fdir_info->hash_map = rte_zmalloc("txgbe", 811 sizeof(struct txgbe_fdir_filter *) * 812 TXGBE_MAX_FDIR_FILTER_NUM, 813 0); 814 if (!fdir_info->hash_map) { 815 PMD_INIT_LOG(ERR, 816 "Failed to allocate memory for fdir hash map!"); 817 return -ENOMEM; 818 } 819 fdir_info->mask_added = FALSE; 820 821 return 0; 822 } 823 824 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 825 { 826 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); 827 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 828 struct rte_hash_parameters l2_tn_hash_params = { 829 .name = l2_tn_hash_name, 830 .entries = TXGBE_MAX_L2_TN_FILTER_NUM, 831 .key_len = sizeof(struct txgbe_l2_tn_key), 832 .hash_func = rte_hash_crc, 833 .hash_func_init_val = 0, 834 .socket_id = rte_socket_id(), 835 }; 836 837 TAILQ_INIT(&l2_tn_info->l2_tn_list); 838 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 839 "l2_tn_%s", TDEV_NAME(eth_dev)); 840 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 841 if (!l2_tn_info->hash_handle) { 842 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 843 return -EINVAL; 844 } 845 l2_tn_info->hash_map = rte_zmalloc("txgbe", 846 sizeof(struct txgbe_l2_tn_filter *) * 847 TXGBE_MAX_L2_TN_FILTER_NUM, 848 0); 849 if (!l2_tn_info->hash_map) { 850 PMD_INIT_LOG(ERR, 851 "Failed to allocate memory for L2 TN hash map!"); 852 return -ENOMEM; 853 } 854 l2_tn_info->e_tag_en = FALSE; 855 l2_tn_info->e_tag_fwd_en = FALSE; 856 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 857 858 return 0; 859 } 860 861 static int 862 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 863 struct rte_pci_device *pci_dev) 864 { 865 struct rte_eth_dev *pf_ethdev; 866 struct rte_eth_devargs eth_da; 867 int retval; 868 869 if (pci_dev->device.devargs) { 870 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 871 ð_da); 872 if (retval) 873 return retval; 874 } else { 875 memset(ð_da, 0, sizeof(eth_da)); 876 } 877 878 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 879 sizeof(struct txgbe_adapter), 880 eth_dev_pci_specific_init, pci_dev, 881 eth_txgbe_dev_init, NULL); 882 883 if (retval || eth_da.nb_representor_ports < 1) 884 return retval; 885 886 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 887 if (pf_ethdev == NULL) 888 return -ENODEV; 889 890 return 0; 891 } 892 893 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) 894 { 895 struct rte_eth_dev *ethdev; 896 897 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 898 if (!ethdev) 899 return -ENODEV; 900 901 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit); 902 } 903 904 static struct rte_pci_driver rte_txgbe_pmd = { 905 .id_table = pci_id_txgbe_map, 906 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 907 RTE_PCI_DRV_INTR_LSC, 908 .probe = eth_txgbe_pci_probe, 909 .remove = eth_txgbe_pci_remove, 910 }; 911 912 static int 913 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 914 { 915 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 916 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev); 917 uint32_t vfta; 918 uint32_t vid_idx; 919 uint32_t vid_bit; 920 921 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 922 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 923 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx)); 924 if (on) 925 vfta |= vid_bit; 926 else 927 vfta &= ~vid_bit; 928 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta); 929 930 /* update local VFTA copy */ 931 shadow_vfta->vfta[vid_idx] = vfta; 932 933 return 0; 934 } 935 936 static void 937 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 938 { 939 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 940 struct txgbe_rx_queue *rxq; 941 bool restart; 942 uint32_t rxcfg, rxbal, rxbah; 943 944 if (on) 945 txgbe_vlan_hw_strip_enable(dev, queue); 946 else 947 txgbe_vlan_hw_strip_disable(dev, queue); 948 949 rxq = dev->data->rx_queues[queue]; 950 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx)); 951 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx)); 952 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); 953 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 954 restart = (rxcfg & TXGBE_RXCFG_ENA) && 955 !(rxcfg & TXGBE_RXCFG_VLAN); 956 rxcfg |= TXGBE_RXCFG_VLAN; 957 } else { 958 restart = (rxcfg & TXGBE_RXCFG_ENA) && 959 (rxcfg & TXGBE_RXCFG_VLAN); 960 rxcfg &= ~TXGBE_RXCFG_VLAN; 961 } 962 rxcfg &= ~TXGBE_RXCFG_ENA; 963 964 if (restart) { 965 /* set vlan strip for ring */ 966 txgbe_dev_rx_queue_stop(dev, queue); 967 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal); 968 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah); 969 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg); 970 txgbe_dev_rx_queue_start(dev, queue); 971 } 972 } 973 974 static int 975 txgbe_vlan_tpid_set(struct rte_eth_dev *dev, 976 enum rte_vlan_type vlan_type, 977 uint16_t tpid) 978 { 979 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 980 int ret = 0; 981 uint32_t portctrl, vlan_ext, qinq; 982 983 portctrl = rd32(hw, TXGBE_PORTCTL); 984 985 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT); 986 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ); 987 switch (vlan_type) { 988 case ETH_VLAN_TYPE_INNER: 989 if (vlan_ext) { 990 wr32m(hw, TXGBE_VLANCTL, 991 TXGBE_VLANCTL_TPID_MASK, 992 TXGBE_VLANCTL_TPID(tpid)); 993 wr32m(hw, TXGBE_DMATXCTRL, 994 TXGBE_DMATXCTRL_TPID_MASK, 995 TXGBE_DMATXCTRL_TPID(tpid)); 996 } else { 997 ret = -ENOTSUP; 998 PMD_DRV_LOG(ERR, "Inner type is not supported" 999 " by single VLAN"); 1000 } 1001 1002 if (qinq) { 1003 wr32m(hw, TXGBE_TAGTPID(0), 1004 TXGBE_TAGTPID_LSB_MASK, 1005 TXGBE_TAGTPID_LSB(tpid)); 1006 } 1007 break; 1008 case ETH_VLAN_TYPE_OUTER: 1009 if (vlan_ext) { 1010 /* Only the high 16-bits is valid */ 1011 wr32m(hw, TXGBE_EXTAG, 1012 TXGBE_EXTAG_VLAN_MASK, 1013 TXGBE_EXTAG_VLAN(tpid)); 1014 } else { 1015 wr32m(hw, TXGBE_VLANCTL, 1016 TXGBE_VLANCTL_TPID_MASK, 1017 TXGBE_VLANCTL_TPID(tpid)); 1018 wr32m(hw, TXGBE_DMATXCTRL, 1019 TXGBE_DMATXCTRL_TPID_MASK, 1020 TXGBE_DMATXCTRL_TPID(tpid)); 1021 } 1022 1023 if (qinq) { 1024 wr32m(hw, TXGBE_TAGTPID(0), 1025 TXGBE_TAGTPID_MSB_MASK, 1026 TXGBE_TAGTPID_MSB(tpid)); 1027 } 1028 break; 1029 default: 1030 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1031 return -EINVAL; 1032 } 1033 1034 return ret; 1035 } 1036 1037 void 1038 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1039 { 1040 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1041 uint32_t vlnctrl; 1042 1043 PMD_INIT_FUNC_TRACE(); 1044 1045 /* Filter Table Disable */ 1046 vlnctrl = rd32(hw, TXGBE_VLANCTL); 1047 vlnctrl &= ~TXGBE_VLANCTL_VFE; 1048 wr32(hw, TXGBE_VLANCTL, vlnctrl); 1049 } 1050 1051 void 1052 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1053 { 1054 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1055 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev); 1056 uint32_t vlnctrl; 1057 uint16_t i; 1058 1059 PMD_INIT_FUNC_TRACE(); 1060 1061 /* Filter Table Enable */ 1062 vlnctrl = rd32(hw, TXGBE_VLANCTL); 1063 vlnctrl &= ~TXGBE_VLANCTL_CFIENA; 1064 vlnctrl |= TXGBE_VLANCTL_VFE; 1065 wr32(hw, TXGBE_VLANCTL, vlnctrl); 1066 1067 /* write whatever is in local vfta copy */ 1068 for (i = 0; i < TXGBE_VFTA_SIZE; i++) 1069 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]); 1070 } 1071 1072 void 1073 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1074 { 1075 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev); 1076 struct txgbe_rx_queue *rxq; 1077 1078 if (queue >= TXGBE_MAX_RX_QUEUE_NUM) 1079 return; 1080 1081 if (on) 1082 TXGBE_SET_HWSTRIP(hwstrip, queue); 1083 else 1084 TXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1085 1086 if (queue >= dev->data->nb_rx_queues) 1087 return; 1088 1089 rxq = dev->data->rx_queues[queue]; 1090 1091 if (on) { 1092 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 1093 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 1094 } else { 1095 rxq->vlan_flags = PKT_RX_VLAN; 1096 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 1097 } 1098 } 1099 1100 static void 1101 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1102 { 1103 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1104 uint32_t ctrl; 1105 1106 PMD_INIT_FUNC_TRACE(); 1107 1108 ctrl = rd32(hw, TXGBE_RXCFG(queue)); 1109 ctrl &= ~TXGBE_RXCFG_VLAN; 1110 wr32(hw, TXGBE_RXCFG(queue), ctrl); 1111 1112 /* record those setting for HW strip per queue */ 1113 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1114 } 1115 1116 static void 1117 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 1118 { 1119 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1120 uint32_t ctrl; 1121 1122 PMD_INIT_FUNC_TRACE(); 1123 1124 ctrl = rd32(hw, TXGBE_RXCFG(queue)); 1125 ctrl |= TXGBE_RXCFG_VLAN; 1126 wr32(hw, TXGBE_RXCFG(queue), ctrl); 1127 1128 /* record those setting for HW strip per queue */ 1129 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 1130 } 1131 1132 static void 1133 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 1134 { 1135 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1136 uint32_t ctrl; 1137 1138 PMD_INIT_FUNC_TRACE(); 1139 1140 ctrl = rd32(hw, TXGBE_PORTCTL); 1141 ctrl &= ~TXGBE_PORTCTL_VLANEXT; 1142 ctrl &= ~TXGBE_PORTCTL_QINQ; 1143 wr32(hw, TXGBE_PORTCTL, ctrl); 1144 } 1145 1146 static void 1147 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 1148 { 1149 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1150 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1151 struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; 1152 uint32_t ctrl; 1153 1154 PMD_INIT_FUNC_TRACE(); 1155 1156 ctrl = rd32(hw, TXGBE_PORTCTL); 1157 ctrl |= TXGBE_PORTCTL_VLANEXT; 1158 if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP || 1159 txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT) 1160 ctrl |= TXGBE_PORTCTL_QINQ; 1161 wr32(hw, TXGBE_PORTCTL, ctrl); 1162 } 1163 1164 void 1165 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 1166 { 1167 struct txgbe_rx_queue *rxq; 1168 uint16_t i; 1169 1170 PMD_INIT_FUNC_TRACE(); 1171 1172 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1173 rxq = dev->data->rx_queues[i]; 1174 1175 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1176 txgbe_vlan_strip_queue_set(dev, i, 1); 1177 else 1178 txgbe_vlan_strip_queue_set(dev, i, 0); 1179 } 1180 } 1181 1182 void 1183 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 1184 { 1185 uint16_t i; 1186 struct rte_eth_rxmode *rxmode; 1187 struct txgbe_rx_queue *rxq; 1188 1189 if (mask & ETH_VLAN_STRIP_MASK) { 1190 rxmode = &dev->data->dev_conf.rxmode; 1191 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1192 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1193 rxq = dev->data->rx_queues[i]; 1194 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 1195 } 1196 else 1197 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1198 rxq = dev->data->rx_queues[i]; 1199 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 1200 } 1201 } 1202 } 1203 1204 static int 1205 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 1206 { 1207 struct rte_eth_rxmode *rxmode; 1208 rxmode = &dev->data->dev_conf.rxmode; 1209 1210 if (mask & ETH_VLAN_STRIP_MASK) 1211 txgbe_vlan_hw_strip_config(dev); 1212 1213 if (mask & ETH_VLAN_FILTER_MASK) { 1214 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1215 txgbe_vlan_hw_filter_enable(dev); 1216 else 1217 txgbe_vlan_hw_filter_disable(dev); 1218 } 1219 1220 if (mask & ETH_VLAN_EXTEND_MASK) { 1221 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 1222 txgbe_vlan_hw_extend_enable(dev); 1223 else 1224 txgbe_vlan_hw_extend_disable(dev); 1225 } 1226 1227 return 0; 1228 } 1229 1230 static int 1231 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1232 { 1233 txgbe_config_vlan_strip_on_all_queues(dev, mask); 1234 1235 txgbe_vlan_offload_config(dev, mask); 1236 1237 return 0; 1238 } 1239 1240 static void 1241 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1242 { 1243 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1244 /* VLNCTL: enable vlan filtering and allow all vlan tags through */ 1245 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL); 1246 1247 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */ 1248 wr32(hw, TXGBE_VLANCTL, vlanctrl); 1249 } 1250 1251 static int 1252 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 1253 { 1254 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1255 1256 switch (nb_rx_q) { 1257 case 1: 1258 case 2: 1259 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 1260 break; 1261 case 4: 1262 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 1263 break; 1264 default: 1265 return -EINVAL; 1266 } 1267 1268 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1269 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 1270 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 1271 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 1272 return 0; 1273 } 1274 1275 static int 1276 txgbe_check_mq_mode(struct rte_eth_dev *dev) 1277 { 1278 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1279 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1280 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1281 1282 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1283 /* check multi-queue mode */ 1284 switch (dev_conf->rxmode.mq_mode) { 1285 case ETH_MQ_RX_VMDQ_DCB: 1286 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 1287 break; 1288 case ETH_MQ_RX_VMDQ_DCB_RSS: 1289 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 1290 PMD_INIT_LOG(ERR, "SRIOV active," 1291 " unsupported mq_mode rx %d.", 1292 dev_conf->rxmode.mq_mode); 1293 return -EINVAL; 1294 case ETH_MQ_RX_RSS: 1295 case ETH_MQ_RX_VMDQ_RSS: 1296 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 1297 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 1298 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 1299 PMD_INIT_LOG(ERR, "SRIOV is active," 1300 " invalid queue number" 1301 " for VMDQ RSS, allowed" 1302 " value are 1, 2 or 4."); 1303 return -EINVAL; 1304 } 1305 break; 1306 case ETH_MQ_RX_VMDQ_ONLY: 1307 case ETH_MQ_RX_NONE: 1308 /* if nothing mq mode configure, use default scheme */ 1309 dev->data->dev_conf.rxmode.mq_mode = 1310 ETH_MQ_RX_VMDQ_ONLY; 1311 break; 1312 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 1313 /* SRIOV only works in VMDq enable mode */ 1314 PMD_INIT_LOG(ERR, "SRIOV is active," 1315 " wrong mq_mode rx %d.", 1316 dev_conf->rxmode.mq_mode); 1317 return -EINVAL; 1318 } 1319 1320 switch (dev_conf->txmode.mq_mode) { 1321 case ETH_MQ_TX_VMDQ_DCB: 1322 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 1323 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 1324 break; 1325 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 1326 dev->data->dev_conf.txmode.mq_mode = 1327 ETH_MQ_TX_VMDQ_ONLY; 1328 break; 1329 } 1330 1331 /* check valid queue number */ 1332 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 1333 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 1334 PMD_INIT_LOG(ERR, "SRIOV is active," 1335 " nb_rx_q=%d nb_tx_q=%d queue number" 1336 " must be less than or equal to %d.", 1337 nb_rx_q, nb_tx_q, 1338 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 1339 return -EINVAL; 1340 } 1341 } else { 1342 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 1343 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 1344 " not supported."); 1345 return -EINVAL; 1346 } 1347 /* check configuration for vmdb+dcb mode */ 1348 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 1349 const struct rte_eth_vmdq_dcb_conf *conf; 1350 1351 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { 1352 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 1353 TXGBE_VMDQ_DCB_NB_QUEUES); 1354 return -EINVAL; 1355 } 1356 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 1357 if (!(conf->nb_queue_pools == ETH_16_POOLS || 1358 conf->nb_queue_pools == ETH_32_POOLS)) { 1359 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 1360 " nb_queue_pools must be %d or %d.", 1361 ETH_16_POOLS, ETH_32_POOLS); 1362 return -EINVAL; 1363 } 1364 } 1365 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 1366 const struct rte_eth_vmdq_dcb_tx_conf *conf; 1367 1368 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { 1369 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 1370 TXGBE_VMDQ_DCB_NB_QUEUES); 1371 return -EINVAL; 1372 } 1373 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1374 if (!(conf->nb_queue_pools == ETH_16_POOLS || 1375 conf->nb_queue_pools == ETH_32_POOLS)) { 1376 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 1377 " nb_queue_pools != %d and" 1378 " nb_queue_pools != %d.", 1379 ETH_16_POOLS, ETH_32_POOLS); 1380 return -EINVAL; 1381 } 1382 } 1383 1384 /* For DCB mode check our configuration before we go further */ 1385 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 1386 const struct rte_eth_dcb_rx_conf *conf; 1387 1388 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 1389 if (!(conf->nb_tcs == ETH_4_TCS || 1390 conf->nb_tcs == ETH_8_TCS)) { 1391 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 1392 " and nb_tcs != %d.", 1393 ETH_4_TCS, ETH_8_TCS); 1394 return -EINVAL; 1395 } 1396 } 1397 1398 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 1399 const struct rte_eth_dcb_tx_conf *conf; 1400 1401 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 1402 if (!(conf->nb_tcs == ETH_4_TCS || 1403 conf->nb_tcs == ETH_8_TCS)) { 1404 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 1405 " and nb_tcs != %d.", 1406 ETH_4_TCS, ETH_8_TCS); 1407 return -EINVAL; 1408 } 1409 } 1410 } 1411 return 0; 1412 } 1413 1414 static int 1415 txgbe_dev_configure(struct rte_eth_dev *dev) 1416 { 1417 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 1418 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 1419 int ret; 1420 1421 PMD_INIT_FUNC_TRACE(); 1422 1423 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1424 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1425 1426 /* multiple queue mode checking */ 1427 ret = txgbe_check_mq_mode(dev); 1428 if (ret != 0) { 1429 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.", 1430 ret); 1431 return ret; 1432 } 1433 1434 /* set flag to update link status after init */ 1435 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; 1436 1437 /* 1438 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 1439 * allocation Rx preconditions we will reset it. 1440 */ 1441 adapter->rx_bulk_alloc_allowed = true; 1442 1443 return 0; 1444 } 1445 1446 static void 1447 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 1448 { 1449 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1450 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 1451 uint32_t gpie; 1452 1453 gpie = rd32(hw, TXGBE_GPIOINTEN); 1454 gpie |= TXGBE_GPIOBIT_6; 1455 wr32(hw, TXGBE_GPIOINTEN, gpie); 1456 intr->mask_misc |= TXGBE_ICRMISC_GPIO; 1457 } 1458 1459 int 1460 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 1461 uint16_t tx_rate, uint64_t q_msk) 1462 { 1463 struct txgbe_hw *hw; 1464 struct txgbe_vf_info *vfinfo; 1465 struct rte_eth_link link; 1466 uint8_t nb_q_per_pool; 1467 uint32_t queue_stride; 1468 uint32_t queue_idx, idx = 0, vf_idx; 1469 uint32_t queue_end; 1470 uint16_t total_rate = 0; 1471 struct rte_pci_device *pci_dev; 1472 int ret; 1473 1474 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1475 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 1476 if (ret < 0) 1477 return ret; 1478 1479 if (vf >= pci_dev->max_vfs) 1480 return -EINVAL; 1481 1482 if (tx_rate > link.link_speed) 1483 return -EINVAL; 1484 1485 if (q_msk == 0) 1486 return 0; 1487 1488 hw = TXGBE_DEV_HW(dev); 1489 vfinfo = *(TXGBE_DEV_VFDATA(dev)); 1490 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 1491 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 1492 queue_idx = vf * queue_stride; 1493 queue_end = queue_idx + nb_q_per_pool - 1; 1494 if (queue_end >= hw->mac.max_tx_queues) 1495 return -EINVAL; 1496 1497 if (vfinfo) { 1498 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 1499 if (vf_idx == vf) 1500 continue; 1501 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 1502 idx++) 1503 total_rate += vfinfo[vf_idx].tx_rate[idx]; 1504 } 1505 } else { 1506 return -EINVAL; 1507 } 1508 1509 /* Store tx_rate for this vf. */ 1510 for (idx = 0; idx < nb_q_per_pool; idx++) { 1511 if (((uint64_t)0x1 << idx) & q_msk) { 1512 if (vfinfo[vf].tx_rate[idx] != tx_rate) 1513 vfinfo[vf].tx_rate[idx] = tx_rate; 1514 total_rate += tx_rate; 1515 } 1516 } 1517 1518 if (total_rate > dev->data->dev_link.link_speed) { 1519 /* Reset stored TX rate of the VF if it causes exceed 1520 * link speed. 1521 */ 1522 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 1523 return -EINVAL; 1524 } 1525 1526 /* Set ARBTXRATE of each queue/pool for vf X */ 1527 for (; queue_idx <= queue_end; queue_idx++) { 1528 if (0x1 & q_msk) 1529 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 1530 q_msk = q_msk >> 1; 1531 } 1532 1533 return 0; 1534 } 1535 1536 /* 1537 * Configure device link speed and setup link. 1538 * It returns 0 on success. 1539 */ 1540 static int 1541 txgbe_dev_start(struct rte_eth_dev *dev) 1542 { 1543 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1544 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 1545 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); 1546 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1547 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1548 uint32_t intr_vector = 0; 1549 int err; 1550 bool link_up = false, negotiate = 0; 1551 uint32_t speed = 0; 1552 uint32_t allowed_speeds = 0; 1553 int mask = 0; 1554 int status; 1555 uint16_t vf, idx; 1556 uint32_t *link_speeds; 1557 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 1558 1559 PMD_INIT_FUNC_TRACE(); 1560 1561 /* TXGBE devices don't support: 1562 * - half duplex (checked afterwards for valid speeds) 1563 * - fixed speed: TODO implement 1564 */ 1565 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { 1566 PMD_INIT_LOG(ERR, 1567 "Invalid link_speeds for port %u, fix speed not supported", 1568 dev->data->port_id); 1569 return -EINVAL; 1570 } 1571 1572 /* Stop the link setup handler before resetting the HW. */ 1573 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); 1574 1575 /* disable uio/vfio intr/eventfd mapping */ 1576 rte_intr_disable(intr_handle); 1577 1578 /* stop adapter */ 1579 hw->adapter_stopped = 0; 1580 txgbe_stop_hw(hw); 1581 1582 /* reinitialize adapter 1583 * this calls reset and start 1584 */ 1585 hw->nb_rx_queues = dev->data->nb_rx_queues; 1586 hw->nb_tx_queues = dev->data->nb_tx_queues; 1587 status = txgbe_pf_reset_hw(hw); 1588 if (status != 0) 1589 return -1; 1590 hw->mac.start_hw(hw); 1591 hw->mac.get_link_status = true; 1592 1593 /* configure PF module if SRIOV enabled */ 1594 txgbe_pf_host_configure(dev); 1595 1596 txgbe_dev_phy_intr_setup(dev); 1597 1598 /* check and configure queue intr-vector mapping */ 1599 if ((rte_intr_cap_multiple(intr_handle) || 1600 !RTE_ETH_DEV_SRIOV(dev).active) && 1601 dev->data->dev_conf.intr_conf.rxq != 0) { 1602 intr_vector = dev->data->nb_rx_queues; 1603 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1604 return -1; 1605 } 1606 1607 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 1608 intr_handle->intr_vec = 1609 rte_zmalloc("intr_vec", 1610 dev->data->nb_rx_queues * sizeof(int), 0); 1611 if (intr_handle->intr_vec == NULL) { 1612 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1613 " intr_vec", dev->data->nb_rx_queues); 1614 return -ENOMEM; 1615 } 1616 } 1617 1618 /* confiugre msix for sleep until rx interrupt */ 1619 txgbe_configure_msix(dev); 1620 1621 /* initialize transmission unit */ 1622 txgbe_dev_tx_init(dev); 1623 1624 /* This can fail when allocating mbufs for descriptor rings */ 1625 err = txgbe_dev_rx_init(dev); 1626 if (err) { 1627 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1628 goto error; 1629 } 1630 1631 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 1632 ETH_VLAN_EXTEND_MASK; 1633 err = txgbe_vlan_offload_config(dev, mask); 1634 if (err) { 1635 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 1636 goto error; 1637 } 1638 1639 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 1640 /* Enable vlan filtering for VMDq */ 1641 txgbe_vmdq_vlan_hw_filter_enable(dev); 1642 } 1643 1644 /* Configure DCB hw */ 1645 txgbe_configure_pb(dev); 1646 txgbe_configure_port(dev); 1647 txgbe_configure_dcb(dev); 1648 1649 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1650 err = txgbe_fdir_configure(dev); 1651 if (err) 1652 goto error; 1653 } 1654 1655 /* Restore vf rate limit */ 1656 if (vfinfo != NULL) { 1657 for (vf = 0; vf < pci_dev->max_vfs; vf++) 1658 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 1659 if (vfinfo[vf].tx_rate[idx] != 0) 1660 txgbe_set_vf_rate_limit(dev, vf, 1661 vfinfo[vf].tx_rate[idx], 1662 1 << idx); 1663 } 1664 1665 err = txgbe_dev_rxtx_start(dev); 1666 if (err < 0) { 1667 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 1668 goto error; 1669 } 1670 1671 /* Skip link setup if loopback mode is enabled. */ 1672 if (hw->mac.type == txgbe_mac_raptor && 1673 dev->data->dev_conf.lpbk_mode) 1674 goto skip_link_setup; 1675 1676 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 1677 err = hw->mac.setup_sfp(hw); 1678 if (err) 1679 goto error; 1680 } 1681 1682 if (hw->phy.media_type == txgbe_media_type_copper) { 1683 /* Turn on the copper */ 1684 hw->phy.set_phy_power(hw, true); 1685 } else { 1686 /* Turn on the laser */ 1687 hw->mac.enable_tx_laser(hw); 1688 } 1689 1690 err = hw->mac.check_link(hw, &speed, &link_up, 0); 1691 if (err) 1692 goto error; 1693 dev->data->dev_link.link_status = link_up; 1694 1695 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); 1696 if (err) 1697 goto error; 1698 1699 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 1700 ETH_LINK_SPEED_10G; 1701 1702 link_speeds = &dev->data->dev_conf.link_speeds; 1703 if (*link_speeds & ~allowed_speeds) { 1704 PMD_INIT_LOG(ERR, "Invalid link setting"); 1705 goto error; 1706 } 1707 1708 speed = 0x0; 1709 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 1710 speed = (TXGBE_LINK_SPEED_100M_FULL | 1711 TXGBE_LINK_SPEED_1GB_FULL | 1712 TXGBE_LINK_SPEED_10GB_FULL); 1713 } else { 1714 if (*link_speeds & ETH_LINK_SPEED_10G) 1715 speed |= TXGBE_LINK_SPEED_10GB_FULL; 1716 if (*link_speeds & ETH_LINK_SPEED_5G) 1717 speed |= TXGBE_LINK_SPEED_5GB_FULL; 1718 if (*link_speeds & ETH_LINK_SPEED_2_5G) 1719 speed |= TXGBE_LINK_SPEED_2_5GB_FULL; 1720 if (*link_speeds & ETH_LINK_SPEED_1G) 1721 speed |= TXGBE_LINK_SPEED_1GB_FULL; 1722 if (*link_speeds & ETH_LINK_SPEED_100M) 1723 speed |= TXGBE_LINK_SPEED_100M_FULL; 1724 } 1725 1726 err = hw->mac.setup_link(hw, speed, link_up); 1727 if (err) 1728 goto error; 1729 1730 skip_link_setup: 1731 1732 if (rte_intr_allow_others(intr_handle)) { 1733 /* check if lsc interrupt is enabled */ 1734 if (dev->data->dev_conf.intr_conf.lsc != 0) 1735 txgbe_dev_lsc_interrupt_setup(dev, TRUE); 1736 else 1737 txgbe_dev_lsc_interrupt_setup(dev, FALSE); 1738 txgbe_dev_macsec_interrupt_setup(dev); 1739 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID); 1740 } else { 1741 rte_intr_callback_unregister(intr_handle, 1742 txgbe_dev_interrupt_handler, dev); 1743 if (dev->data->dev_conf.intr_conf.lsc != 0) 1744 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1745 " no intr multiplex"); 1746 } 1747 1748 /* check if rxq interrupt is enabled */ 1749 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1750 rte_intr_dp_is_en(intr_handle)) 1751 txgbe_dev_rxq_interrupt_setup(dev); 1752 1753 /* enable uio/vfio intr/eventfd mapping */ 1754 rte_intr_enable(intr_handle); 1755 1756 /* resume enabled intr since hw reset */ 1757 txgbe_enable_intr(dev); 1758 txgbe_l2_tunnel_conf(dev); 1759 txgbe_filter_restore(dev); 1760 1761 if (tm_conf->root && !tm_conf->committed) 1762 PMD_DRV_LOG(WARNING, 1763 "please call hierarchy_commit() " 1764 "before starting the port"); 1765 1766 /* 1767 * Update link status right before return, because it may 1768 * start link configuration process in a separate thread. 1769 */ 1770 txgbe_dev_link_update(dev, 0); 1771 1772 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK); 1773 1774 txgbe_read_stats_registers(hw, hw_stats); 1775 hw->offset_loaded = 1; 1776 1777 return 0; 1778 1779 error: 1780 PMD_INIT_LOG(ERR, "failure in dev start: %d", err); 1781 txgbe_dev_clear_queues(dev); 1782 return -EIO; 1783 } 1784 1785 /* 1786 * Stop device: disable rx and tx functions to allow for reconfiguring. 1787 */ 1788 static int 1789 txgbe_dev_stop(struct rte_eth_dev *dev) 1790 { 1791 struct rte_eth_link link; 1792 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 1793 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1794 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); 1795 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1796 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1797 int vf; 1798 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 1799 1800 if (hw->adapter_stopped) 1801 return 0; 1802 1803 PMD_INIT_FUNC_TRACE(); 1804 1805 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); 1806 1807 /* disable interrupts */ 1808 txgbe_disable_intr(hw); 1809 1810 /* reset the NIC */ 1811 txgbe_pf_reset_hw(hw); 1812 hw->adapter_stopped = 0; 1813 1814 /* stop adapter */ 1815 txgbe_stop_hw(hw); 1816 1817 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 1818 vfinfo[vf].clear_to_send = false; 1819 1820 if (hw->phy.media_type == txgbe_media_type_copper) { 1821 /* Turn off the copper */ 1822 hw->phy.set_phy_power(hw, false); 1823 } else { 1824 /* Turn off the laser */ 1825 hw->mac.disable_tx_laser(hw); 1826 } 1827 1828 txgbe_dev_clear_queues(dev); 1829 1830 /* Clear stored conf */ 1831 dev->data->scattered_rx = 0; 1832 dev->data->lro = 0; 1833 1834 /* Clear recorded link status */ 1835 memset(&link, 0, sizeof(link)); 1836 rte_eth_linkstatus_set(dev, &link); 1837 1838 if (!rte_intr_allow_others(intr_handle)) 1839 /* resume to the default handler */ 1840 rte_intr_callback_register(intr_handle, 1841 txgbe_dev_interrupt_handler, 1842 (void *)dev); 1843 1844 /* Clean datapath event and queue/vec mapping */ 1845 rte_intr_efd_disable(intr_handle); 1846 if (intr_handle->intr_vec != NULL) { 1847 rte_free(intr_handle->intr_vec); 1848 intr_handle->intr_vec = NULL; 1849 } 1850 1851 /* reset hierarchy commit */ 1852 tm_conf->committed = false; 1853 1854 adapter->rss_reta_updated = 0; 1855 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); 1856 1857 hw->adapter_stopped = true; 1858 dev->data->dev_started = 0; 1859 1860 return 0; 1861 } 1862 1863 /* 1864 * Set device link up: enable tx. 1865 */ 1866 static int 1867 txgbe_dev_set_link_up(struct rte_eth_dev *dev) 1868 { 1869 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1870 1871 if (hw->phy.media_type == txgbe_media_type_copper) { 1872 /* Turn on the copper */ 1873 hw->phy.set_phy_power(hw, true); 1874 } else { 1875 /* Turn on the laser */ 1876 hw->mac.enable_tx_laser(hw); 1877 txgbe_dev_link_update(dev, 0); 1878 } 1879 1880 return 0; 1881 } 1882 1883 /* 1884 * Set device link down: disable tx. 1885 */ 1886 static int 1887 txgbe_dev_set_link_down(struct rte_eth_dev *dev) 1888 { 1889 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1890 1891 if (hw->phy.media_type == txgbe_media_type_copper) { 1892 /* Turn off the copper */ 1893 hw->phy.set_phy_power(hw, false); 1894 } else { 1895 /* Turn off the laser */ 1896 hw->mac.disable_tx_laser(hw); 1897 txgbe_dev_link_update(dev, 0); 1898 } 1899 1900 return 0; 1901 } 1902 1903 /* 1904 * Reset and stop device. 1905 */ 1906 static int 1907 txgbe_dev_close(struct rte_eth_dev *dev) 1908 { 1909 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1910 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1911 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1912 int retries = 0; 1913 int ret; 1914 1915 PMD_INIT_FUNC_TRACE(); 1916 1917 txgbe_pf_reset_hw(hw); 1918 1919 ret = txgbe_dev_stop(dev); 1920 1921 txgbe_dev_free_queues(dev); 1922 1923 /* reprogram the RAR[0] in case user changed it. */ 1924 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true); 1925 1926 /* Unlock any pending hardware semaphore */ 1927 txgbe_swfw_lock_reset(hw); 1928 1929 /* disable uio intr before callback unregister */ 1930 rte_intr_disable(intr_handle); 1931 1932 do { 1933 ret = rte_intr_callback_unregister(intr_handle, 1934 txgbe_dev_interrupt_handler, dev); 1935 if (ret >= 0 || ret == -ENOENT) { 1936 break; 1937 } else if (ret != -EAGAIN) { 1938 PMD_INIT_LOG(ERR, 1939 "intr callback unregister failed: %d", 1940 ret); 1941 } 1942 rte_delay_ms(100); 1943 } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); 1944 1945 /* cancel the delay handler before remove dev */ 1946 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev); 1947 1948 /* uninitialize PF if max_vfs not zero */ 1949 txgbe_pf_host_uninit(dev); 1950 1951 rte_free(dev->data->mac_addrs); 1952 dev->data->mac_addrs = NULL; 1953 1954 rte_free(dev->data->hash_mac_addrs); 1955 dev->data->hash_mac_addrs = NULL; 1956 1957 /* remove all the fdir filters & hash */ 1958 txgbe_fdir_filter_uninit(dev); 1959 1960 /* remove all the L2 tunnel filters & hash */ 1961 txgbe_l2_tn_filter_uninit(dev); 1962 1963 /* Remove all ntuple filters of the device */ 1964 txgbe_ntuple_filter_uninit(dev); 1965 1966 /* clear all the filters list */ 1967 txgbe_filterlist_flush(); 1968 1969 /* Remove all Traffic Manager configuration */ 1970 txgbe_tm_conf_uninit(dev); 1971 1972 #ifdef RTE_LIB_SECURITY 1973 rte_free(dev->security_ctx); 1974 #endif 1975 1976 return ret; 1977 } 1978 1979 /* 1980 * Reset PF device. 1981 */ 1982 static int 1983 txgbe_dev_reset(struct rte_eth_dev *dev) 1984 { 1985 int ret; 1986 1987 /* When a DPDK PMD PF begin to reset PF port, it should notify all 1988 * its VF to make them align with it. The detailed notification 1989 * mechanism is PMD specific. As to txgbe PF, it is rather complex. 1990 * To avoid unexpected behavior in VF, currently reset of PF with 1991 * SR-IOV activation is not supported. It might be supported later. 1992 */ 1993 if (dev->data->sriov.active) 1994 return -ENOTSUP; 1995 1996 ret = eth_txgbe_dev_uninit(dev); 1997 if (ret) 1998 return ret; 1999 2000 ret = eth_txgbe_dev_init(dev, NULL); 2001 2002 return ret; 2003 } 2004 2005 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ 2006 { \ 2007 uint32_t current_counter = rd32(hw, reg); \ 2008 if (current_counter < last_counter) \ 2009 current_counter += 0x100000000LL; \ 2010 if (!hw->offset_loaded) \ 2011 last_counter = current_counter; \ 2012 counter = current_counter - last_counter; \ 2013 counter &= 0xFFFFFFFFLL; \ 2014 } 2015 2016 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2017 { \ 2018 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ 2019 uint64_t current_counter_msb = rd32(hw, reg_msb); \ 2020 uint64_t current_counter = (current_counter_msb << 32) | \ 2021 current_counter_lsb; \ 2022 if (current_counter < last_counter) \ 2023 current_counter += 0x1000000000LL; \ 2024 if (!hw->offset_loaded) \ 2025 last_counter = current_counter; \ 2026 counter = current_counter - last_counter; \ 2027 counter &= 0xFFFFFFFFFLL; \ 2028 } 2029 2030 void 2031 txgbe_read_stats_registers(struct txgbe_hw *hw, 2032 struct txgbe_hw_stats *hw_stats) 2033 { 2034 unsigned int i; 2035 2036 /* QP Stats */ 2037 for (i = 0; i < hw->nb_rx_queues; i++) { 2038 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i), 2039 hw->qp_last[i].rx_qp_packets, 2040 hw_stats->qp[i].rx_qp_packets); 2041 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i), 2042 hw->qp_last[i].rx_qp_bytes, 2043 hw_stats->qp[i].rx_qp_bytes); 2044 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i), 2045 hw->qp_last[i].rx_qp_mc_packets, 2046 hw_stats->qp[i].rx_qp_mc_packets); 2047 } 2048 2049 for (i = 0; i < hw->nb_tx_queues; i++) { 2050 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i), 2051 hw->qp_last[i].tx_qp_packets, 2052 hw_stats->qp[i].tx_qp_packets); 2053 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i), 2054 hw->qp_last[i].tx_qp_bytes, 2055 hw_stats->qp[i].tx_qp_bytes); 2056 } 2057 /* PB Stats */ 2058 for (i = 0; i < TXGBE_MAX_UP; i++) { 2059 hw_stats->up[i].rx_up_xon_packets += 2060 rd32(hw, TXGBE_PBRXUPXON(i)); 2061 hw_stats->up[i].rx_up_xoff_packets += 2062 rd32(hw, TXGBE_PBRXUPXOFF(i)); 2063 hw_stats->up[i].tx_up_xon_packets += 2064 rd32(hw, TXGBE_PBTXUPXON(i)); 2065 hw_stats->up[i].tx_up_xoff_packets += 2066 rd32(hw, TXGBE_PBTXUPXOFF(i)); 2067 hw_stats->up[i].tx_up_xon2off_packets += 2068 rd32(hw, TXGBE_PBTXUPOFF(i)); 2069 hw_stats->up[i].rx_up_dropped += 2070 rd32(hw, TXGBE_PBRXMISS(i)); 2071 } 2072 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON); 2073 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF); 2074 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON); 2075 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF); 2076 2077 /* DMA Stats */ 2078 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT); 2079 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT); 2080 2081 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL); 2082 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL); 2083 hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP); 2084 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP); 2085 2086 /* MAC Stats */ 2087 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL); 2088 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL); 2089 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL); 2090 2091 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL); 2092 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL); 2093 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL); 2094 2095 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL); 2096 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL); 2097 2098 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L); 2099 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L); 2100 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L); 2101 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L); 2102 hw_stats->rx_size_512_to_1023_packets += 2103 rd64(hw, TXGBE_MACRX512TO1023L); 2104 hw_stats->rx_size_1024_to_max_packets += 2105 rd64(hw, TXGBE_MACRX1024TOMAXL); 2106 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L); 2107 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L); 2108 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L); 2109 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L); 2110 hw_stats->tx_size_512_to_1023_packets += 2111 rd64(hw, TXGBE_MACTX512TO1023L); 2112 hw_stats->tx_size_1024_to_max_packets += 2113 rd64(hw, TXGBE_MACTX1024TOMAXL); 2114 2115 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL); 2116 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE); 2117 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER); 2118 2119 /* MNG Stats */ 2120 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS); 2121 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC); 2122 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG); 2123 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG); 2124 2125 /* FCoE Stats */ 2126 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC); 2127 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST); 2128 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC); 2129 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC); 2130 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC); 2131 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC); 2132 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC); 2133 2134 /* Flow Director Stats */ 2135 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH); 2136 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS); 2137 hw_stats->flow_director_added_filters += 2138 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED)); 2139 hw_stats->flow_director_removed_filters += 2140 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED)); 2141 hw_stats->flow_director_filter_add_errors += 2142 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL)); 2143 hw_stats->flow_director_filter_remove_errors += 2144 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL)); 2145 2146 /* MACsec Stats */ 2147 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT); 2148 hw_stats->tx_macsec_pkts_encrypted += 2149 rd32(hw, TXGBE_LSECTX_ENCPKT); 2150 hw_stats->tx_macsec_pkts_protected += 2151 rd32(hw, TXGBE_LSECTX_PROTPKT); 2152 hw_stats->tx_macsec_octets_encrypted += 2153 rd32(hw, TXGBE_LSECTX_ENCOCT); 2154 hw_stats->tx_macsec_octets_protected += 2155 rd32(hw, TXGBE_LSECTX_PROTOCT); 2156 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT); 2157 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT); 2158 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT); 2159 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT); 2160 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT); 2161 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT); 2162 hw_stats->rx_macsec_sc_pkts_unchecked += 2163 rd32(hw, TXGBE_LSECRX_UNCHKPKT); 2164 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT); 2165 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT); 2166 for (i = 0; i < 2; i++) { 2167 hw_stats->rx_macsec_sa_pkts_ok += 2168 rd32(hw, TXGBE_LSECRX_OKPKT(i)); 2169 hw_stats->rx_macsec_sa_pkts_invalid += 2170 rd32(hw, TXGBE_LSECRX_INVPKT(i)); 2171 hw_stats->rx_macsec_sa_pkts_notvalid += 2172 rd32(hw, TXGBE_LSECRX_BADPKT(i)); 2173 } 2174 hw_stats->rx_macsec_sa_pkts_unusedsa += 2175 rd32(hw, TXGBE_LSECRX_INVSAPKT); 2176 hw_stats->rx_macsec_sa_pkts_notusingsa += 2177 rd32(hw, TXGBE_LSECRX_BADSAPKT); 2178 2179 hw_stats->rx_total_missed_packets = 0; 2180 for (i = 0; i < TXGBE_MAX_UP; i++) { 2181 hw_stats->rx_total_missed_packets += 2182 hw_stats->up[i].rx_up_dropped; 2183 } 2184 } 2185 2186 static int 2187 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 2188 { 2189 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2190 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2191 struct txgbe_stat_mappings *stat_mappings = 2192 TXGBE_DEV_STAT_MAPPINGS(dev); 2193 uint32_t i, j; 2194 2195 txgbe_read_stats_registers(hw, hw_stats); 2196 2197 if (stats == NULL) 2198 return -EINVAL; 2199 2200 /* Fill out the rte_eth_stats statistics structure */ 2201 stats->ipackets = hw_stats->rx_packets; 2202 stats->ibytes = hw_stats->rx_bytes; 2203 stats->opackets = hw_stats->tx_packets; 2204 stats->obytes = hw_stats->tx_bytes; 2205 2206 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); 2207 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); 2208 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); 2209 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); 2210 memset(&stats->q_errors, 0, sizeof(stats->q_errors)); 2211 for (i = 0; i < TXGBE_MAX_QP; i++) { 2212 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; 2213 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; 2214 uint32_t q_map; 2215 2216 q_map = (stat_mappings->rqsm[n] >> offset) 2217 & QMAP_FIELD_RESERVED_BITS_MASK; 2218 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 2219 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 2220 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; 2221 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; 2222 2223 q_map = (stat_mappings->tqsm[n] >> offset) 2224 & QMAP_FIELD_RESERVED_BITS_MASK; 2225 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 2226 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 2227 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; 2228 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; 2229 } 2230 2231 /* Rx Errors */ 2232 stats->imissed = hw_stats->rx_total_missed_packets + 2233 hw_stats->rx_dma_drop; 2234 stats->ierrors = hw_stats->rx_crc_errors + 2235 hw_stats->rx_mac_short_packet_dropped + 2236 hw_stats->rx_length_errors + 2237 hw_stats->rx_undersize_errors + 2238 hw_stats->rx_oversize_errors + 2239 hw_stats->rx_drop_packets + 2240 hw_stats->rx_illegal_byte_errors + 2241 hw_stats->rx_error_bytes + 2242 hw_stats->rx_fragment_errors + 2243 hw_stats->rx_fcoe_crc_errors + 2244 hw_stats->rx_fcoe_mbuf_allocation_errors; 2245 2246 /* Tx Errors */ 2247 stats->oerrors = 0; 2248 return 0; 2249 } 2250 2251 static int 2252 txgbe_dev_stats_reset(struct rte_eth_dev *dev) 2253 { 2254 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2255 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2256 2257 /* HW registers are cleared on read */ 2258 hw->offset_loaded = 0; 2259 txgbe_dev_stats_get(dev, NULL); 2260 hw->offset_loaded = 1; 2261 2262 /* Reset software totals */ 2263 memset(hw_stats, 0, sizeof(*hw_stats)); 2264 2265 return 0; 2266 } 2267 2268 /* This function calculates the number of xstats based on the current config */ 2269 static unsigned 2270 txgbe_xstats_calc_num(struct rte_eth_dev *dev) 2271 { 2272 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 2273 return TXGBE_NB_HW_STATS + 2274 TXGBE_NB_UP_STATS * TXGBE_MAX_UP + 2275 TXGBE_NB_QP_STATS * nb_queues; 2276 } 2277 2278 static inline int 2279 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size) 2280 { 2281 int nb, st; 2282 2283 /* Extended stats from txgbe_hw_stats */ 2284 if (id < TXGBE_NB_HW_STATS) { 2285 snprintf(name, size, "[hw]%s", 2286 rte_txgbe_stats_strings[id].name); 2287 return 0; 2288 } 2289 id -= TXGBE_NB_HW_STATS; 2290 2291 /* Priority Stats */ 2292 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) { 2293 nb = id / TXGBE_NB_UP_STATS; 2294 st = id % TXGBE_NB_UP_STATS; 2295 snprintf(name, size, "[p%u]%s", nb, 2296 rte_txgbe_up_strings[st].name); 2297 return 0; 2298 } 2299 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP; 2300 2301 /* Queue Stats */ 2302 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) { 2303 nb = id / TXGBE_NB_QP_STATS; 2304 st = id % TXGBE_NB_QP_STATS; 2305 snprintf(name, size, "[q%u]%s", nb, 2306 rte_txgbe_qp_strings[st].name); 2307 return 0; 2308 } 2309 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP; 2310 2311 return -(int)(id + 1); 2312 } 2313 2314 static inline int 2315 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset) 2316 { 2317 int nb, st; 2318 2319 /* Extended stats from txgbe_hw_stats */ 2320 if (id < TXGBE_NB_HW_STATS) { 2321 *offset = rte_txgbe_stats_strings[id].offset; 2322 return 0; 2323 } 2324 id -= TXGBE_NB_HW_STATS; 2325 2326 /* Priority Stats */ 2327 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) { 2328 nb = id / TXGBE_NB_UP_STATS; 2329 st = id % TXGBE_NB_UP_STATS; 2330 *offset = rte_txgbe_up_strings[st].offset + 2331 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t)); 2332 return 0; 2333 } 2334 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP; 2335 2336 /* Queue Stats */ 2337 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) { 2338 nb = id / TXGBE_NB_QP_STATS; 2339 st = id % TXGBE_NB_QP_STATS; 2340 *offset = rte_txgbe_qp_strings[st].offset + 2341 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t)); 2342 return 0; 2343 } 2344 2345 return -1; 2346 } 2347 2348 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 2349 struct rte_eth_xstat_name *xstats_names, unsigned int limit) 2350 { 2351 unsigned int i, count; 2352 2353 count = txgbe_xstats_calc_num(dev); 2354 if (xstats_names == NULL) 2355 return count; 2356 2357 /* Note: limit >= cnt_stats checked upstream 2358 * in rte_eth_xstats_names() 2359 */ 2360 limit = min(limit, count); 2361 2362 /* Extended stats from txgbe_hw_stats */ 2363 for (i = 0; i < limit; i++) { 2364 if (txgbe_get_name_by_id(i, xstats_names[i].name, 2365 sizeof(xstats_names[i].name))) { 2366 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2367 break; 2368 } 2369 } 2370 2371 return i; 2372 } 2373 2374 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 2375 struct rte_eth_xstat_name *xstats_names, 2376 const uint64_t *ids, 2377 unsigned int limit) 2378 { 2379 unsigned int i; 2380 2381 if (ids == NULL) 2382 return txgbe_dev_xstats_get_names(dev, xstats_names, limit); 2383 2384 for (i = 0; i < limit; i++) { 2385 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name, 2386 sizeof(xstats_names[i].name))) { 2387 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2388 return -1; 2389 } 2390 } 2391 2392 return i; 2393 } 2394 2395 static int 2396 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2397 unsigned int limit) 2398 { 2399 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2400 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2401 unsigned int i, count; 2402 2403 txgbe_read_stats_registers(hw, hw_stats); 2404 2405 /* If this is a reset xstats is NULL, and we have cleared the 2406 * registers by reading them. 2407 */ 2408 count = txgbe_xstats_calc_num(dev); 2409 if (xstats == NULL) 2410 return count; 2411 2412 limit = min(limit, txgbe_xstats_calc_num(dev)); 2413 2414 /* Extended stats from txgbe_hw_stats */ 2415 for (i = 0; i < limit; i++) { 2416 uint32_t offset = 0; 2417 2418 if (txgbe_get_offset_by_id(i, &offset)) { 2419 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2420 break; 2421 } 2422 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset); 2423 xstats[i].id = i; 2424 } 2425 2426 return i; 2427 } 2428 2429 static int 2430 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values, 2431 unsigned int limit) 2432 { 2433 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2434 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2435 unsigned int i, count; 2436 2437 txgbe_read_stats_registers(hw, hw_stats); 2438 2439 /* If this is a reset xstats is NULL, and we have cleared the 2440 * registers by reading them. 2441 */ 2442 count = txgbe_xstats_calc_num(dev); 2443 if (values == NULL) 2444 return count; 2445 2446 limit = min(limit, txgbe_xstats_calc_num(dev)); 2447 2448 /* Extended stats from txgbe_hw_stats */ 2449 for (i = 0; i < limit; i++) { 2450 uint32_t offset; 2451 2452 if (txgbe_get_offset_by_id(i, &offset)) { 2453 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2454 break; 2455 } 2456 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 2457 } 2458 2459 return i; 2460 } 2461 2462 static int 2463 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2464 uint64_t *values, unsigned int limit) 2465 { 2466 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2467 unsigned int i; 2468 2469 if (ids == NULL) 2470 return txgbe_dev_xstats_get_(dev, values, limit); 2471 2472 for (i = 0; i < limit; i++) { 2473 uint32_t offset; 2474 2475 if (txgbe_get_offset_by_id(ids[i], &offset)) { 2476 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2477 break; 2478 } 2479 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 2480 } 2481 2482 return i; 2483 } 2484 2485 static int 2486 txgbe_dev_xstats_reset(struct rte_eth_dev *dev) 2487 { 2488 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2489 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2490 2491 /* HW registers are cleared on read */ 2492 hw->offset_loaded = 0; 2493 txgbe_read_stats_registers(hw, hw_stats); 2494 hw->offset_loaded = 1; 2495 2496 /* Reset software totals */ 2497 memset(hw_stats, 0, sizeof(*hw_stats)); 2498 2499 return 0; 2500 } 2501 2502 static int 2503 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2504 { 2505 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2506 u16 eeprom_verh, eeprom_verl; 2507 u32 etrack_id; 2508 int ret; 2509 2510 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh); 2511 hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl); 2512 2513 etrack_id = (eeprom_verh << 16) | eeprom_verl; 2514 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 2515 2516 ret += 1; /* add the size of '\0' */ 2517 if (fw_size < (u32)ret) 2518 return ret; 2519 else 2520 return 0; 2521 } 2522 2523 static int 2524 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2525 { 2526 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2527 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2528 2529 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 2530 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 2531 dev_info->min_rx_bufsize = 1024; 2532 dev_info->max_rx_pktlen = 15872; 2533 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 2534 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; 2535 dev_info->max_vfs = pci_dev->max_vfs; 2536 dev_info->max_vmdq_pools = ETH_64_POOLS; 2537 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 2538 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev); 2539 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) | 2540 dev_info->rx_queue_offload_capa); 2541 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev); 2542 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev); 2543 2544 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2545 .rx_thresh = { 2546 .pthresh = TXGBE_DEFAULT_RX_PTHRESH, 2547 .hthresh = TXGBE_DEFAULT_RX_HTHRESH, 2548 .wthresh = TXGBE_DEFAULT_RX_WTHRESH, 2549 }, 2550 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH, 2551 .rx_drop_en = 0, 2552 .offloads = 0, 2553 }; 2554 2555 dev_info->default_txconf = (struct rte_eth_txconf) { 2556 .tx_thresh = { 2557 .pthresh = TXGBE_DEFAULT_TX_PTHRESH, 2558 .hthresh = TXGBE_DEFAULT_TX_HTHRESH, 2559 .wthresh = TXGBE_DEFAULT_TX_WTHRESH, 2560 }, 2561 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH, 2562 .offloads = 0, 2563 }; 2564 2565 dev_info->rx_desc_lim = rx_desc_lim; 2566 dev_info->tx_desc_lim = tx_desc_lim; 2567 2568 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 2569 dev_info->reta_size = ETH_RSS_RETA_SIZE_128; 2570 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL; 2571 2572 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 2573 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 2574 2575 /* Driver-preferred Rx/Tx parameters */ 2576 dev_info->default_rxportconf.burst_size = 32; 2577 dev_info->default_txportconf.burst_size = 32; 2578 dev_info->default_rxportconf.nb_queues = 1; 2579 dev_info->default_txportconf.nb_queues = 1; 2580 dev_info->default_rxportconf.ring_size = 256; 2581 dev_info->default_txportconf.ring_size = 256; 2582 2583 return 0; 2584 } 2585 2586 const uint32_t * 2587 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 2588 { 2589 if (dev->rx_pkt_burst == txgbe_recv_pkts || 2590 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc || 2591 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc || 2592 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc) 2593 return txgbe_get_supported_ptypes(); 2594 2595 return NULL; 2596 } 2597 2598 void 2599 txgbe_dev_setup_link_alarm_handler(void *param) 2600 { 2601 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2602 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2603 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2604 u32 speed; 2605 bool autoneg = false; 2606 2607 speed = hw->phy.autoneg_advertised; 2608 if (!speed) 2609 hw->mac.get_link_capabilities(hw, &speed, &autoneg); 2610 2611 hw->mac.setup_link(hw, speed, true); 2612 2613 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; 2614 } 2615 2616 /* return 0 means link status changed, -1 means not changed */ 2617 int 2618 txgbe_dev_link_update_share(struct rte_eth_dev *dev, 2619 int wait_to_complete) 2620 { 2621 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2622 struct rte_eth_link link; 2623 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; 2624 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2625 bool link_up; 2626 int err; 2627 int wait = 1; 2628 2629 memset(&link, 0, sizeof(link)); 2630 link.link_status = ETH_LINK_DOWN; 2631 link.link_speed = ETH_SPEED_NUM_NONE; 2632 link.link_duplex = ETH_LINK_HALF_DUPLEX; 2633 link.link_autoneg = ETH_LINK_AUTONEG; 2634 2635 hw->mac.get_link_status = true; 2636 2637 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG) 2638 return rte_eth_linkstatus_set(dev, &link); 2639 2640 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 2641 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 2642 wait = 0; 2643 2644 err = hw->mac.check_link(hw, &link_speed, &link_up, wait); 2645 2646 if (err != 0) { 2647 link.link_speed = ETH_SPEED_NUM_100M; 2648 link.link_duplex = ETH_LINK_FULL_DUPLEX; 2649 return rte_eth_linkstatus_set(dev, &link); 2650 } 2651 2652 if (link_up == 0) { 2653 if (hw->phy.media_type == txgbe_media_type_fiber) { 2654 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; 2655 rte_eal_alarm_set(10, 2656 txgbe_dev_setup_link_alarm_handler, dev); 2657 } 2658 return rte_eth_linkstatus_set(dev, &link); 2659 } 2660 2661 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; 2662 link.link_status = ETH_LINK_UP; 2663 link.link_duplex = ETH_LINK_FULL_DUPLEX; 2664 2665 switch (link_speed) { 2666 default: 2667 case TXGBE_LINK_SPEED_UNKNOWN: 2668 link.link_duplex = ETH_LINK_FULL_DUPLEX; 2669 link.link_speed = ETH_SPEED_NUM_100M; 2670 break; 2671 2672 case TXGBE_LINK_SPEED_100M_FULL: 2673 link.link_speed = ETH_SPEED_NUM_100M; 2674 break; 2675 2676 case TXGBE_LINK_SPEED_1GB_FULL: 2677 link.link_speed = ETH_SPEED_NUM_1G; 2678 break; 2679 2680 case TXGBE_LINK_SPEED_2_5GB_FULL: 2681 link.link_speed = ETH_SPEED_NUM_2_5G; 2682 break; 2683 2684 case TXGBE_LINK_SPEED_5GB_FULL: 2685 link.link_speed = ETH_SPEED_NUM_5G; 2686 break; 2687 2688 case TXGBE_LINK_SPEED_10GB_FULL: 2689 link.link_speed = ETH_SPEED_NUM_10G; 2690 break; 2691 } 2692 2693 return rte_eth_linkstatus_set(dev, &link); 2694 } 2695 2696 static int 2697 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2698 { 2699 return txgbe_dev_link_update_share(dev, wait_to_complete); 2700 } 2701 2702 static int 2703 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 2704 { 2705 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2706 uint32_t fctrl; 2707 2708 fctrl = rd32(hw, TXGBE_PSRCTL); 2709 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP); 2710 wr32(hw, TXGBE_PSRCTL, fctrl); 2711 2712 return 0; 2713 } 2714 2715 static int 2716 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 2717 { 2718 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2719 uint32_t fctrl; 2720 2721 fctrl = rd32(hw, TXGBE_PSRCTL); 2722 fctrl &= (~TXGBE_PSRCTL_UCP); 2723 if (dev->data->all_multicast == 1) 2724 fctrl |= TXGBE_PSRCTL_MCP; 2725 else 2726 fctrl &= (~TXGBE_PSRCTL_MCP); 2727 wr32(hw, TXGBE_PSRCTL, fctrl); 2728 2729 return 0; 2730 } 2731 2732 static int 2733 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 2734 { 2735 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2736 uint32_t fctrl; 2737 2738 fctrl = rd32(hw, TXGBE_PSRCTL); 2739 fctrl |= TXGBE_PSRCTL_MCP; 2740 wr32(hw, TXGBE_PSRCTL, fctrl); 2741 2742 return 0; 2743 } 2744 2745 static int 2746 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 2747 { 2748 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2749 uint32_t fctrl; 2750 2751 if (dev->data->promiscuous == 1) 2752 return 0; /* must remain in all_multicast mode */ 2753 2754 fctrl = rd32(hw, TXGBE_PSRCTL); 2755 fctrl &= (~TXGBE_PSRCTL_MCP); 2756 wr32(hw, TXGBE_PSRCTL, fctrl); 2757 2758 return 0; 2759 } 2760 2761 /** 2762 * It clears the interrupt causes and enables the interrupt. 2763 * It will be called once only during nic initialized. 2764 * 2765 * @param dev 2766 * Pointer to struct rte_eth_dev. 2767 * @param on 2768 * Enable or Disable. 2769 * 2770 * @return 2771 * - On success, zero. 2772 * - On failure, a negative value. 2773 */ 2774 static int 2775 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2776 { 2777 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2778 2779 txgbe_dev_link_status_print(dev); 2780 if (on) 2781 intr->mask_misc |= TXGBE_ICRMISC_LSC; 2782 else 2783 intr->mask_misc &= ~TXGBE_ICRMISC_LSC; 2784 2785 return 0; 2786 } 2787 2788 /** 2789 * It clears the interrupt causes and enables the interrupt. 2790 * It will be called once only during nic initialized. 2791 * 2792 * @param dev 2793 * Pointer to struct rte_eth_dev. 2794 * 2795 * @return 2796 * - On success, zero. 2797 * - On failure, a negative value. 2798 */ 2799 static int 2800 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 2801 { 2802 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2803 2804 intr->mask[0] |= TXGBE_ICR_MASK; 2805 intr->mask[1] |= TXGBE_ICR_MASK; 2806 2807 return 0; 2808 } 2809 2810 /** 2811 * It clears the interrupt causes and enables the interrupt. 2812 * It will be called once only during nic initialized. 2813 * 2814 * @param dev 2815 * Pointer to struct rte_eth_dev. 2816 * 2817 * @return 2818 * - On success, zero. 2819 * - On failure, a negative value. 2820 */ 2821 static int 2822 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 2823 { 2824 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2825 2826 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC; 2827 2828 return 0; 2829 } 2830 2831 /* 2832 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update. 2833 * 2834 * @param dev 2835 * Pointer to struct rte_eth_dev. 2836 * 2837 * @return 2838 * - On success, zero. 2839 * - On failure, a negative value. 2840 */ 2841 static int 2842 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 2843 { 2844 uint32_t eicr; 2845 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2846 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2847 2848 /* clear all cause mask */ 2849 txgbe_disable_intr(hw); 2850 2851 /* read-on-clear nic registers here */ 2852 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; 2853 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 2854 2855 intr->flags = 0; 2856 2857 /* set flag for async link update */ 2858 if (eicr & TXGBE_ICRMISC_LSC) 2859 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; 2860 2861 if (eicr & TXGBE_ICRMISC_VFMBX) 2862 intr->flags |= TXGBE_FLAG_MAILBOX; 2863 2864 if (eicr & TXGBE_ICRMISC_LNKSEC) 2865 intr->flags |= TXGBE_FLAG_MACSEC; 2866 2867 if (eicr & TXGBE_ICRMISC_GPIO) 2868 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; 2869 2870 return 0; 2871 } 2872 2873 /** 2874 * It gets and then prints the link status. 2875 * 2876 * @param dev 2877 * Pointer to struct rte_eth_dev. 2878 * 2879 * @return 2880 * - On success, zero. 2881 * - On failure, a negative value. 2882 */ 2883 static void 2884 txgbe_dev_link_status_print(struct rte_eth_dev *dev) 2885 { 2886 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2887 struct rte_eth_link link; 2888 2889 rte_eth_linkstatus_get(dev, &link); 2890 2891 if (link.link_status) { 2892 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 2893 (int)(dev->data->port_id), 2894 (unsigned int)link.link_speed, 2895 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 2896 "full-duplex" : "half-duplex"); 2897 } else { 2898 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2899 (int)(dev->data->port_id)); 2900 } 2901 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 2902 pci_dev->addr.domain, 2903 pci_dev->addr.bus, 2904 pci_dev->addr.devid, 2905 pci_dev->addr.function); 2906 } 2907 2908 /* 2909 * It executes link_update after knowing an interrupt occurred. 2910 * 2911 * @param dev 2912 * Pointer to struct rte_eth_dev. 2913 * 2914 * @return 2915 * - On success, zero. 2916 * - On failure, a negative value. 2917 */ 2918 static int 2919 txgbe_dev_interrupt_action(struct rte_eth_dev *dev, 2920 struct rte_intr_handle *intr_handle) 2921 { 2922 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2923 int64_t timeout; 2924 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2925 2926 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 2927 2928 if (intr->flags & TXGBE_FLAG_MAILBOX) { 2929 txgbe_pf_mbx_process(dev); 2930 intr->flags &= ~TXGBE_FLAG_MAILBOX; 2931 } 2932 2933 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) { 2934 hw->phy.handle_lasi(hw); 2935 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; 2936 } 2937 2938 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { 2939 struct rte_eth_link link; 2940 2941 /*get the link status before link update, for predicting later*/ 2942 rte_eth_linkstatus_get(dev, &link); 2943 2944 txgbe_dev_link_update(dev, 0); 2945 2946 /* likely to up */ 2947 if (!link.link_status) 2948 /* handle it 1 sec later, wait it being stable */ 2949 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT; 2950 /* likely to down */ 2951 else 2952 /* handle it 4 sec later, wait it being stable */ 2953 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT; 2954 2955 txgbe_dev_link_status_print(dev); 2956 if (rte_eal_alarm_set(timeout * 1000, 2957 txgbe_dev_interrupt_delayed_handler, 2958 (void *)dev) < 0) { 2959 PMD_DRV_LOG(ERR, "Error setting alarm"); 2960 } else { 2961 /* remember original mask */ 2962 intr->mask_misc_orig = intr->mask_misc; 2963 /* only disable lsc interrupt */ 2964 intr->mask_misc &= ~TXGBE_ICRMISC_LSC; 2965 } 2966 } 2967 2968 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 2969 txgbe_enable_intr(dev); 2970 rte_intr_enable(intr_handle); 2971 2972 return 0; 2973 } 2974 2975 /** 2976 * Interrupt handler which shall be registered for alarm callback for delayed 2977 * handling specific interrupt to wait for the stable nic state. As the 2978 * NIC interrupt state is not stable for txgbe after link is just down, 2979 * it needs to wait 4 seconds to get the stable status. 2980 * 2981 * @param handle 2982 * Pointer to interrupt handle. 2983 * @param param 2984 * The address of parameter (struct rte_eth_dev *) registered before. 2985 * 2986 * @return 2987 * void 2988 */ 2989 static void 2990 txgbe_dev_interrupt_delayed_handler(void *param) 2991 { 2992 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2993 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2994 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2995 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2996 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2997 uint32_t eicr; 2998 2999 txgbe_disable_intr(hw); 3000 3001 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; 3002 if (eicr & TXGBE_ICRMISC_VFMBX) 3003 txgbe_pf_mbx_process(dev); 3004 3005 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) { 3006 hw->phy.handle_lasi(hw); 3007 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; 3008 } 3009 3010 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { 3011 txgbe_dev_link_update(dev, 0); 3012 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; 3013 txgbe_dev_link_status_print(dev); 3014 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 3015 NULL); 3016 } 3017 3018 if (intr->flags & TXGBE_FLAG_MACSEC) { 3019 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, 3020 NULL); 3021 intr->flags &= ~TXGBE_FLAG_MACSEC; 3022 } 3023 3024 /* restore original mask */ 3025 intr->mask_misc = intr->mask_misc_orig; 3026 intr->mask_misc_orig = 0; 3027 3028 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 3029 txgbe_enable_intr(dev); 3030 rte_intr_enable(intr_handle); 3031 } 3032 3033 /** 3034 * Interrupt handler triggered by NIC for handling 3035 * specific interrupt. 3036 * 3037 * @param handle 3038 * Pointer to interrupt handle. 3039 * @param param 3040 * The address of parameter (struct rte_eth_dev *) registered before. 3041 * 3042 * @return 3043 * void 3044 */ 3045 static void 3046 txgbe_dev_interrupt_handler(void *param) 3047 { 3048 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3049 3050 txgbe_dev_interrupt_get_status(dev); 3051 txgbe_dev_interrupt_action(dev, dev->intr_handle); 3052 } 3053 3054 static int 3055 txgbe_dev_led_on(struct rte_eth_dev *dev) 3056 { 3057 struct txgbe_hw *hw; 3058 3059 hw = TXGBE_DEV_HW(dev); 3060 return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP; 3061 } 3062 3063 static int 3064 txgbe_dev_led_off(struct rte_eth_dev *dev) 3065 { 3066 struct txgbe_hw *hw; 3067 3068 hw = TXGBE_DEV_HW(dev); 3069 return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP; 3070 } 3071 3072 static int 3073 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3074 { 3075 struct txgbe_hw *hw; 3076 uint32_t mflcn_reg; 3077 uint32_t fccfg_reg; 3078 int rx_pause; 3079 int tx_pause; 3080 3081 hw = TXGBE_DEV_HW(dev); 3082 3083 fc_conf->pause_time = hw->fc.pause_time; 3084 fc_conf->high_water = hw->fc.high_water[0]; 3085 fc_conf->low_water = hw->fc.low_water[0]; 3086 fc_conf->send_xon = hw->fc.send_xon; 3087 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 3088 3089 /* 3090 * Return rx_pause status according to actual setting of 3091 * RXFCCFG register. 3092 */ 3093 mflcn_reg = rd32(hw, TXGBE_RXFCCFG); 3094 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC)) 3095 rx_pause = 1; 3096 else 3097 rx_pause = 0; 3098 3099 /* 3100 * Return tx_pause status according to actual setting of 3101 * TXFCCFG register. 3102 */ 3103 fccfg_reg = rd32(hw, TXGBE_TXFCCFG); 3104 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC)) 3105 tx_pause = 1; 3106 else 3107 tx_pause = 0; 3108 3109 if (rx_pause && tx_pause) 3110 fc_conf->mode = RTE_FC_FULL; 3111 else if (rx_pause) 3112 fc_conf->mode = RTE_FC_RX_PAUSE; 3113 else if (tx_pause) 3114 fc_conf->mode = RTE_FC_TX_PAUSE; 3115 else 3116 fc_conf->mode = RTE_FC_NONE; 3117 3118 return 0; 3119 } 3120 3121 static int 3122 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3123 { 3124 struct txgbe_hw *hw; 3125 int err; 3126 uint32_t rx_buf_size; 3127 uint32_t max_high_water; 3128 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = { 3129 txgbe_fc_none, 3130 txgbe_fc_rx_pause, 3131 txgbe_fc_tx_pause, 3132 txgbe_fc_full 3133 }; 3134 3135 PMD_INIT_FUNC_TRACE(); 3136 3137 hw = TXGBE_DEV_HW(dev); 3138 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0)); 3139 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3140 3141 /* 3142 * At least reserve one Ethernet frame for watermark 3143 * high_water/low_water in kilo bytes for txgbe 3144 */ 3145 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 3146 if (fc_conf->high_water > max_high_water || 3147 fc_conf->high_water < fc_conf->low_water) { 3148 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3149 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3150 return -EINVAL; 3151 } 3152 3153 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode]; 3154 hw->fc.pause_time = fc_conf->pause_time; 3155 hw->fc.high_water[0] = fc_conf->high_water; 3156 hw->fc.low_water[0] = fc_conf->low_water; 3157 hw->fc.send_xon = fc_conf->send_xon; 3158 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 3159 3160 err = txgbe_fc_enable(hw); 3161 3162 /* Not negotiated is not an error case */ 3163 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) { 3164 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK, 3165 (fc_conf->mac_ctrl_frame_fwd 3166 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP)); 3167 txgbe_flush(hw); 3168 3169 return 0; 3170 } 3171 3172 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err); 3173 return -EIO; 3174 } 3175 3176 static int 3177 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 3178 struct rte_eth_pfc_conf *pfc_conf) 3179 { 3180 int err; 3181 uint32_t rx_buf_size; 3182 uint32_t max_high_water; 3183 uint8_t tc_num; 3184 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 }; 3185 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3186 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev); 3187 3188 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = { 3189 txgbe_fc_none, 3190 txgbe_fc_rx_pause, 3191 txgbe_fc_tx_pause, 3192 txgbe_fc_full 3193 }; 3194 3195 PMD_INIT_FUNC_TRACE(); 3196 3197 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map); 3198 tc_num = map[pfc_conf->priority]; 3199 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num)); 3200 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3201 /* 3202 * At least reserve one Ethernet frame for watermark 3203 * high_water/low_water in kilo bytes for txgbe 3204 */ 3205 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 3206 if (pfc_conf->fc.high_water > max_high_water || 3207 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) { 3208 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3209 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3210 return -EINVAL; 3211 } 3212 3213 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode]; 3214 hw->fc.pause_time = pfc_conf->fc.pause_time; 3215 hw->fc.send_xon = pfc_conf->fc.send_xon; 3216 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 3217 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 3218 3219 err = txgbe_dcb_pfc_enable(hw, tc_num); 3220 3221 /* Not negotiated is not an error case */ 3222 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) 3223 return 0; 3224 3225 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err); 3226 return -EIO; 3227 } 3228 3229 int 3230 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 3231 struct rte_eth_rss_reta_entry64 *reta_conf, 3232 uint16_t reta_size) 3233 { 3234 uint8_t i, j, mask; 3235 uint32_t reta; 3236 uint16_t idx, shift; 3237 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 3238 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3239 3240 PMD_INIT_FUNC_TRACE(); 3241 3242 if (!txgbe_rss_update_sp(hw->mac.type)) { 3243 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 3244 "NIC."); 3245 return -ENOTSUP; 3246 } 3247 3248 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3249 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3250 "(%d) doesn't match the number hardware can supported " 3251 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3252 return -EINVAL; 3253 } 3254 3255 for (i = 0; i < reta_size; i += 4) { 3256 idx = i / RTE_RETA_GROUP_SIZE; 3257 shift = i % RTE_RETA_GROUP_SIZE; 3258 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 3259 if (!mask) 3260 continue; 3261 3262 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2); 3263 for (j = 0; j < 4; j++) { 3264 if (RS8(mask, j, 0x1)) { 3265 reta &= ~(MS32(8 * j, 0xFF)); 3266 reta |= LS32(reta_conf[idx].reta[shift + j], 3267 8 * j, 0xFF); 3268 } 3269 } 3270 wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta); 3271 } 3272 adapter->rss_reta_updated = 1; 3273 3274 return 0; 3275 } 3276 3277 int 3278 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 3279 struct rte_eth_rss_reta_entry64 *reta_conf, 3280 uint16_t reta_size) 3281 { 3282 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3283 uint8_t i, j, mask; 3284 uint32_t reta; 3285 uint16_t idx, shift; 3286 3287 PMD_INIT_FUNC_TRACE(); 3288 3289 if (reta_size != ETH_RSS_RETA_SIZE_128) { 3290 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3291 "(%d) doesn't match the number hardware can supported " 3292 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); 3293 return -EINVAL; 3294 } 3295 3296 for (i = 0; i < reta_size; i += 4) { 3297 idx = i / RTE_RETA_GROUP_SIZE; 3298 shift = i % RTE_RETA_GROUP_SIZE; 3299 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 3300 if (!mask) 3301 continue; 3302 3303 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2); 3304 for (j = 0; j < 4; j++) { 3305 if (RS8(mask, j, 0x1)) 3306 reta_conf[idx].reta[shift + j] = 3307 (uint16_t)RS32(reta, 8 * j, 0xFF); 3308 } 3309 } 3310 3311 return 0; 3312 } 3313 3314 static int 3315 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 3316 uint32_t index, uint32_t pool) 3317 { 3318 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3319 uint32_t enable_addr = 1; 3320 3321 return txgbe_set_rar(hw, index, mac_addr->addr_bytes, 3322 pool, enable_addr); 3323 } 3324 3325 static void 3326 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 3327 { 3328 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3329 3330 txgbe_clear_rar(hw, index); 3331 } 3332 3333 static int 3334 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 3335 { 3336 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3337 3338 txgbe_remove_rar(dev, 0); 3339 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 3340 3341 return 0; 3342 } 3343 3344 static int 3345 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 3346 { 3347 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3348 struct rte_eth_dev_info dev_info; 3349 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 3350 struct rte_eth_dev_data *dev_data = dev->data; 3351 int ret; 3352 3353 ret = txgbe_dev_info_get(dev, &dev_info); 3354 if (ret != 0) 3355 return ret; 3356 3357 /* check that mtu is within the allowed range */ 3358 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 3359 return -EINVAL; 3360 3361 /* If device is started, refuse mtu that requires the support of 3362 * scattered packets when this feature has not been enabled before. 3363 */ 3364 if (dev_data->dev_started && !dev_data->scattered_rx && 3365 (frame_size + 2 * TXGBE_VLAN_TAG_SIZE > 3366 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3367 PMD_INIT_LOG(ERR, "Stop port first."); 3368 return -EINVAL; 3369 } 3370 3371 /* update max frame size */ 3372 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 3373 3374 if (hw->mode) 3375 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, 3376 TXGBE_FRAME_SIZE_MAX); 3377 else 3378 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, 3379 TXGBE_FRMSZ_MAX(frame_size)); 3380 3381 return 0; 3382 } 3383 3384 static uint32_t 3385 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr) 3386 { 3387 uint32_t vector = 0; 3388 3389 switch (hw->mac.mc_filter_type) { 3390 case 0: /* use bits [47:36] of the address */ 3391 vector = ((uc_addr->addr_bytes[4] >> 4) | 3392 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 3393 break; 3394 case 1: /* use bits [46:35] of the address */ 3395 vector = ((uc_addr->addr_bytes[4] >> 3) | 3396 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 3397 break; 3398 case 2: /* use bits [45:34] of the address */ 3399 vector = ((uc_addr->addr_bytes[4] >> 2) | 3400 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 3401 break; 3402 case 3: /* use bits [43:32] of the address */ 3403 vector = ((uc_addr->addr_bytes[4]) | 3404 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 3405 break; 3406 default: /* Invalid mc_filter_type */ 3407 break; 3408 } 3409 3410 /* vector can only be 12-bits or boundary will be exceeded */ 3411 vector &= 0xFFF; 3412 return vector; 3413 } 3414 3415 static int 3416 txgbe_uc_hash_table_set(struct rte_eth_dev *dev, 3417 struct rte_ether_addr *mac_addr, uint8_t on) 3418 { 3419 uint32_t vector; 3420 uint32_t uta_idx; 3421 uint32_t reg_val; 3422 uint32_t uta_mask; 3423 uint32_t psrctl; 3424 3425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3426 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev); 3427 3428 /* The UTA table only exists on pf hardware */ 3429 if (hw->mac.type < txgbe_mac_raptor) 3430 return -ENOTSUP; 3431 3432 vector = txgbe_uta_vector(hw, mac_addr); 3433 uta_idx = (vector >> 5) & 0x7F; 3434 uta_mask = 0x1UL << (vector & 0x1F); 3435 3436 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask)) 3437 return 0; 3438 3439 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx)); 3440 if (on) { 3441 uta_info->uta_in_use++; 3442 reg_val |= uta_mask; 3443 uta_info->uta_shadow[uta_idx] |= uta_mask; 3444 } else { 3445 uta_info->uta_in_use--; 3446 reg_val &= ~uta_mask; 3447 uta_info->uta_shadow[uta_idx] &= ~uta_mask; 3448 } 3449 3450 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val); 3451 3452 psrctl = rd32(hw, TXGBE_PSRCTL); 3453 if (uta_info->uta_in_use > 0) 3454 psrctl |= TXGBE_PSRCTL_UCHFENA; 3455 else 3456 psrctl &= ~TXGBE_PSRCTL_UCHFENA; 3457 3458 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK; 3459 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 3460 wr32(hw, TXGBE_PSRCTL, psrctl); 3461 3462 return 0; 3463 } 3464 3465 static int 3466 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 3467 { 3468 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3469 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev); 3470 uint32_t psrctl; 3471 int i; 3472 3473 /* The UTA table only exists on pf hardware */ 3474 if (hw->mac.type < txgbe_mac_raptor) 3475 return -ENOTSUP; 3476 3477 if (on) { 3478 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 3479 uta_info->uta_shadow[i] = ~0; 3480 wr32(hw, TXGBE_UCADDRTBL(i), ~0); 3481 } 3482 } else { 3483 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 3484 uta_info->uta_shadow[i] = 0; 3485 wr32(hw, TXGBE_UCADDRTBL(i), 0); 3486 } 3487 } 3488 3489 psrctl = rd32(hw, TXGBE_PSRCTL); 3490 if (on) 3491 psrctl |= TXGBE_PSRCTL_UCHFENA; 3492 else 3493 psrctl &= ~TXGBE_PSRCTL_UCHFENA; 3494 3495 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK; 3496 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 3497 wr32(hw, TXGBE_PSRCTL, psrctl); 3498 3499 return 0; 3500 } 3501 3502 uint32_t 3503 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 3504 { 3505 uint32_t new_val = orig_val; 3506 3507 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 3508 new_val |= TXGBE_POOLETHCTL_UTA; 3509 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 3510 new_val |= TXGBE_POOLETHCTL_MCHA; 3511 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 3512 new_val |= TXGBE_POOLETHCTL_UCHA; 3513 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 3514 new_val |= TXGBE_POOLETHCTL_BCA; 3515 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 3516 new_val |= TXGBE_POOLETHCTL_MCP; 3517 3518 return new_val; 3519 } 3520 3521 static int 3522 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 3523 { 3524 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3525 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3526 uint32_t mask; 3527 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3528 3529 if (queue_id < 32) { 3530 mask = rd32(hw, TXGBE_IMS(0)); 3531 mask &= (1 << queue_id); 3532 wr32(hw, TXGBE_IMS(0), mask); 3533 } else if (queue_id < 64) { 3534 mask = rd32(hw, TXGBE_IMS(1)); 3535 mask &= (1 << (queue_id - 32)); 3536 wr32(hw, TXGBE_IMS(1), mask); 3537 } 3538 rte_intr_enable(intr_handle); 3539 3540 return 0; 3541 } 3542 3543 static int 3544 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 3545 { 3546 uint32_t mask; 3547 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3548 3549 if (queue_id < 32) { 3550 mask = rd32(hw, TXGBE_IMS(0)); 3551 mask &= ~(1 << queue_id); 3552 wr32(hw, TXGBE_IMS(0), mask); 3553 } else if (queue_id < 64) { 3554 mask = rd32(hw, TXGBE_IMS(1)); 3555 mask &= ~(1 << (queue_id - 32)); 3556 wr32(hw, TXGBE_IMS(1), mask); 3557 } 3558 3559 return 0; 3560 } 3561 3562 /** 3563 * set the IVAR registers, mapping interrupt causes to vectors 3564 * @param hw 3565 * pointer to txgbe_hw struct 3566 * @direction 3567 * 0 for Rx, 1 for Tx, -1 for other causes 3568 * @queue 3569 * queue to map the corresponding interrupt to 3570 * @msix_vector 3571 * the vector to map to the corresponding queue 3572 */ 3573 void 3574 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, 3575 uint8_t queue, uint8_t msix_vector) 3576 { 3577 uint32_t tmp, idx; 3578 3579 if (direction == -1) { 3580 /* other causes */ 3581 msix_vector |= TXGBE_IVARMISC_VLD; 3582 idx = 0; 3583 tmp = rd32(hw, TXGBE_IVARMISC); 3584 tmp &= ~(0xFF << idx); 3585 tmp |= (msix_vector << idx); 3586 wr32(hw, TXGBE_IVARMISC, tmp); 3587 } else { 3588 /* rx or tx causes */ 3589 /* Workround for ICR lost */ 3590 idx = ((16 * (queue & 1)) + (8 * direction)); 3591 tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); 3592 tmp &= ~(0xFF << idx); 3593 tmp |= (msix_vector << idx); 3594 wr32(hw, TXGBE_IVAR(queue >> 1), tmp); 3595 } 3596 } 3597 3598 /** 3599 * Sets up the hardware to properly generate MSI-X interrupts 3600 * @hw 3601 * board private structure 3602 */ 3603 static void 3604 txgbe_configure_msix(struct rte_eth_dev *dev) 3605 { 3606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3607 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3608 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3609 uint32_t queue_id, base = TXGBE_MISC_VEC_ID; 3610 uint32_t vec = TXGBE_MISC_VEC_ID; 3611 uint32_t gpie; 3612 3613 /* won't configure msix register if no mapping is done 3614 * between intr vector and event fd 3615 * but if misx has been enabled already, need to configure 3616 * auto clean, auto mask and throttling. 3617 */ 3618 gpie = rd32(hw, TXGBE_GPIE); 3619 if (!rte_intr_dp_is_en(intr_handle) && 3620 !(gpie & TXGBE_GPIE_MSIX)) 3621 return; 3622 3623 if (rte_intr_allow_others(intr_handle)) { 3624 base = TXGBE_RX_VEC_START; 3625 vec = base; 3626 } 3627 3628 /* setup GPIE for MSI-x mode */ 3629 gpie = rd32(hw, TXGBE_GPIE); 3630 gpie |= TXGBE_GPIE_MSIX; 3631 wr32(hw, TXGBE_GPIE, gpie); 3632 3633 /* Populate the IVAR table and set the ITR values to the 3634 * corresponding register. 3635 */ 3636 if (rte_intr_dp_is_en(intr_handle)) { 3637 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 3638 queue_id++) { 3639 /* by default, 1:1 mapping */ 3640 txgbe_set_ivar_map(hw, 0, queue_id, vec); 3641 intr_handle->intr_vec[queue_id] = vec; 3642 if (vec < base + intr_handle->nb_efd - 1) 3643 vec++; 3644 } 3645 3646 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID); 3647 } 3648 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID), 3649 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 3650 | TXGBE_ITR_WRDSA); 3651 } 3652 3653 int 3654 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 3655 uint16_t queue_idx, uint16_t tx_rate) 3656 { 3657 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3658 uint32_t bcnrc_val; 3659 3660 if (queue_idx >= hw->mac.max_tx_queues) 3661 return -EINVAL; 3662 3663 if (tx_rate != 0) { 3664 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate); 3665 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2); 3666 } else { 3667 bcnrc_val = 0; 3668 } 3669 3670 /* 3671 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW 3672 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. 3673 */ 3674 wr32(hw, TXGBE_ARBTXMMW, 0x14); 3675 3676 /* Set ARBTXRATE of queue X */ 3677 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx); 3678 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val); 3679 txgbe_flush(hw); 3680 3681 return 0; 3682 } 3683 3684 int 3685 txgbe_syn_filter_set(struct rte_eth_dev *dev, 3686 struct rte_eth_syn_filter *filter, 3687 bool add) 3688 { 3689 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3690 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3691 uint32_t syn_info; 3692 uint32_t synqf; 3693 3694 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) 3695 return -EINVAL; 3696 3697 syn_info = filter_info->syn_info; 3698 3699 if (add) { 3700 if (syn_info & TXGBE_SYNCLS_ENA) 3701 return -EINVAL; 3702 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); 3703 synqf |= TXGBE_SYNCLS_ENA; 3704 3705 if (filter->hig_pri) 3706 synqf |= TXGBE_SYNCLS_HIPRIO; 3707 else 3708 synqf &= ~TXGBE_SYNCLS_HIPRIO; 3709 } else { 3710 synqf = rd32(hw, TXGBE_SYNCLS); 3711 if (!(syn_info & TXGBE_SYNCLS_ENA)) 3712 return -ENOENT; 3713 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA); 3714 } 3715 3716 filter_info->syn_info = synqf; 3717 wr32(hw, TXGBE_SYNCLS, synqf); 3718 txgbe_flush(hw); 3719 return 0; 3720 } 3721 3722 static inline enum txgbe_5tuple_protocol 3723 convert_protocol_type(uint8_t protocol_value) 3724 { 3725 if (protocol_value == IPPROTO_TCP) 3726 return TXGBE_5TF_PROT_TCP; 3727 else if (protocol_value == IPPROTO_UDP) 3728 return TXGBE_5TF_PROT_UDP; 3729 else if (protocol_value == IPPROTO_SCTP) 3730 return TXGBE_5TF_PROT_SCTP; 3731 else 3732 return TXGBE_5TF_PROT_NONE; 3733 } 3734 3735 /* inject a 5-tuple filter to HW */ 3736 static inline void 3737 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 3738 struct txgbe_5tuple_filter *filter) 3739 { 3740 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3741 int i; 3742 uint32_t ftqf, sdpqf; 3743 uint32_t l34timir = 0; 3744 uint32_t mask = TXGBE_5TFCTL0_MASK; 3745 3746 i = filter->index; 3747 sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port)); 3748 sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port)); 3749 3750 ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto); 3751 ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority); 3752 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 3753 mask &= ~TXGBE_5TFCTL0_MSADDR; 3754 if (filter->filter_info.dst_ip_mask == 0) 3755 mask &= ~TXGBE_5TFCTL0_MDADDR; 3756 if (filter->filter_info.src_port_mask == 0) 3757 mask &= ~TXGBE_5TFCTL0_MSPORT; 3758 if (filter->filter_info.dst_port_mask == 0) 3759 mask &= ~TXGBE_5TFCTL0_MDPORT; 3760 if (filter->filter_info.proto_mask == 0) 3761 mask &= ~TXGBE_5TFCTL0_MPROTO; 3762 ftqf |= mask; 3763 ftqf |= TXGBE_5TFCTL0_MPOOL; 3764 ftqf |= TXGBE_5TFCTL0_ENA; 3765 3766 wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip)); 3767 wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip)); 3768 wr32(hw, TXGBE_5TFPORT(i), sdpqf); 3769 wr32(hw, TXGBE_5TFCTL0(i), ftqf); 3770 3771 l34timir |= TXGBE_5TFCTL1_QP(filter->queue); 3772 wr32(hw, TXGBE_5TFCTL1(i), l34timir); 3773 } 3774 3775 /* 3776 * add a 5tuple filter 3777 * 3778 * @param 3779 * dev: Pointer to struct rte_eth_dev. 3780 * index: the index the filter allocates. 3781 * filter: pointer to the filter that will be added. 3782 * rx_queue: the queue id the filter assigned to. 3783 * 3784 * @return 3785 * - On success, zero. 3786 * - On failure, a negative value. 3787 */ 3788 static int 3789 txgbe_add_5tuple_filter(struct rte_eth_dev *dev, 3790 struct txgbe_5tuple_filter *filter) 3791 { 3792 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3793 int i, idx, shift; 3794 3795 /* 3796 * look for an unused 5tuple filter index, 3797 * and insert the filter to list. 3798 */ 3799 for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) { 3800 idx = i / (sizeof(uint32_t) * NBBY); 3801 shift = i % (sizeof(uint32_t) * NBBY); 3802 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 3803 filter_info->fivetuple_mask[idx] |= 1 << shift; 3804 filter->index = i; 3805 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 3806 filter, 3807 entries); 3808 break; 3809 } 3810 } 3811 if (i >= TXGBE_MAX_FTQF_FILTERS) { 3812 PMD_DRV_LOG(ERR, "5tuple filters are full."); 3813 return -ENOSYS; 3814 } 3815 3816 txgbe_inject_5tuple_filter(dev, filter); 3817 3818 return 0; 3819 } 3820 3821 /* 3822 * remove a 5tuple filter 3823 * 3824 * @param 3825 * dev: Pointer to struct rte_eth_dev. 3826 * filter: the pointer of the filter will be removed. 3827 */ 3828 static void 3829 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 3830 struct txgbe_5tuple_filter *filter) 3831 { 3832 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3833 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3834 uint16_t index = filter->index; 3835 3836 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 3837 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 3838 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 3839 rte_free(filter); 3840 3841 wr32(hw, TXGBE_5TFDADDR(index), 0); 3842 wr32(hw, TXGBE_5TFSADDR(index), 0); 3843 wr32(hw, TXGBE_5TFPORT(index), 0); 3844 wr32(hw, TXGBE_5TFCTL0(index), 0); 3845 wr32(hw, TXGBE_5TFCTL1(index), 0); 3846 } 3847 3848 static inline struct txgbe_5tuple_filter * 3849 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list, 3850 struct txgbe_5tuple_filter_info *key) 3851 { 3852 struct txgbe_5tuple_filter *it; 3853 3854 TAILQ_FOREACH(it, filter_list, entries) { 3855 if (memcmp(key, &it->filter_info, 3856 sizeof(struct txgbe_5tuple_filter_info)) == 0) { 3857 return it; 3858 } 3859 } 3860 return NULL; 3861 } 3862 3863 /* translate elements in struct rte_eth_ntuple_filter 3864 * to struct txgbe_5tuple_filter_info 3865 */ 3866 static inline int 3867 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 3868 struct txgbe_5tuple_filter_info *filter_info) 3869 { 3870 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM || 3871 filter->priority > TXGBE_5TUPLE_MAX_PRI || 3872 filter->priority < TXGBE_5TUPLE_MIN_PRI) 3873 return -EINVAL; 3874 3875 switch (filter->dst_ip_mask) { 3876 case UINT32_MAX: 3877 filter_info->dst_ip_mask = 0; 3878 filter_info->dst_ip = filter->dst_ip; 3879 break; 3880 case 0: 3881 filter_info->dst_ip_mask = 1; 3882 break; 3883 default: 3884 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 3885 return -EINVAL; 3886 } 3887 3888 switch (filter->src_ip_mask) { 3889 case UINT32_MAX: 3890 filter_info->src_ip_mask = 0; 3891 filter_info->src_ip = filter->src_ip; 3892 break; 3893 case 0: 3894 filter_info->src_ip_mask = 1; 3895 break; 3896 default: 3897 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 3898 return -EINVAL; 3899 } 3900 3901 switch (filter->dst_port_mask) { 3902 case UINT16_MAX: 3903 filter_info->dst_port_mask = 0; 3904 filter_info->dst_port = filter->dst_port; 3905 break; 3906 case 0: 3907 filter_info->dst_port_mask = 1; 3908 break; 3909 default: 3910 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 3911 return -EINVAL; 3912 } 3913 3914 switch (filter->src_port_mask) { 3915 case UINT16_MAX: 3916 filter_info->src_port_mask = 0; 3917 filter_info->src_port = filter->src_port; 3918 break; 3919 case 0: 3920 filter_info->src_port_mask = 1; 3921 break; 3922 default: 3923 PMD_DRV_LOG(ERR, "invalid src_port mask."); 3924 return -EINVAL; 3925 } 3926 3927 switch (filter->proto_mask) { 3928 case UINT8_MAX: 3929 filter_info->proto_mask = 0; 3930 filter_info->proto = 3931 convert_protocol_type(filter->proto); 3932 break; 3933 case 0: 3934 filter_info->proto_mask = 1; 3935 break; 3936 default: 3937 PMD_DRV_LOG(ERR, "invalid protocol mask."); 3938 return -EINVAL; 3939 } 3940 3941 filter_info->priority = (uint8_t)filter->priority; 3942 return 0; 3943 } 3944 3945 /* 3946 * add or delete a ntuple filter 3947 * 3948 * @param 3949 * dev: Pointer to struct rte_eth_dev. 3950 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 3951 * add: if true, add filter, if false, remove filter 3952 * 3953 * @return 3954 * - On success, zero. 3955 * - On failure, a negative value. 3956 */ 3957 int 3958 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 3959 struct rte_eth_ntuple_filter *ntuple_filter, 3960 bool add) 3961 { 3962 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3963 struct txgbe_5tuple_filter_info filter_5tuple; 3964 struct txgbe_5tuple_filter *filter; 3965 int ret; 3966 3967 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 3968 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 3969 return -EINVAL; 3970 } 3971 3972 memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info)); 3973 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 3974 if (ret < 0) 3975 return ret; 3976 3977 filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 3978 &filter_5tuple); 3979 if (filter != NULL && add) { 3980 PMD_DRV_LOG(ERR, "filter exists."); 3981 return -EEXIST; 3982 } 3983 if (filter == NULL && !add) { 3984 PMD_DRV_LOG(ERR, "filter doesn't exist."); 3985 return -ENOENT; 3986 } 3987 3988 if (add) { 3989 filter = rte_zmalloc("txgbe_5tuple_filter", 3990 sizeof(struct txgbe_5tuple_filter), 0); 3991 if (filter == NULL) 3992 return -ENOMEM; 3993 rte_memcpy(&filter->filter_info, 3994 &filter_5tuple, 3995 sizeof(struct txgbe_5tuple_filter_info)); 3996 filter->queue = ntuple_filter->queue; 3997 ret = txgbe_add_5tuple_filter(dev, filter); 3998 if (ret < 0) { 3999 rte_free(filter); 4000 return ret; 4001 } 4002 } else { 4003 txgbe_remove_5tuple_filter(dev, filter); 4004 } 4005 4006 return 0; 4007 } 4008 4009 int 4010 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 4011 struct rte_eth_ethertype_filter *filter, 4012 bool add) 4013 { 4014 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4015 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 4016 uint32_t etqf = 0; 4017 uint32_t etqs = 0; 4018 int ret; 4019 struct txgbe_ethertype_filter ethertype_filter; 4020 4021 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) 4022 return -EINVAL; 4023 4024 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 4025 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 4026 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4027 " ethertype filter.", filter->ether_type); 4028 return -EINVAL; 4029 } 4030 4031 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4032 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4033 return -EINVAL; 4034 } 4035 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4036 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4037 return -EINVAL; 4038 } 4039 4040 ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 4041 if (ret >= 0 && add) { 4042 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4043 filter->ether_type); 4044 return -EEXIST; 4045 } 4046 if (ret < 0 && !add) { 4047 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4048 filter->ether_type); 4049 return -ENOENT; 4050 } 4051 4052 if (add) { 4053 etqf = TXGBE_ETFLT_ENA; 4054 etqf |= TXGBE_ETFLT_ETID(filter->ether_type); 4055 etqs |= TXGBE_ETCLS_QPID(filter->queue); 4056 etqs |= TXGBE_ETCLS_QENA; 4057 4058 ethertype_filter.ethertype = filter->ether_type; 4059 ethertype_filter.etqf = etqf; 4060 ethertype_filter.etqs = etqs; 4061 ethertype_filter.conf = FALSE; 4062 ret = txgbe_ethertype_filter_insert(filter_info, 4063 ðertype_filter); 4064 if (ret < 0) { 4065 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4066 return -ENOSPC; 4067 } 4068 } else { 4069 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 4070 if (ret < 0) 4071 return -ENOSYS; 4072 } 4073 wr32(hw, TXGBE_ETFLT(ret), etqf); 4074 wr32(hw, TXGBE_ETCLS(ret), etqs); 4075 txgbe_flush(hw); 4076 4077 return 0; 4078 } 4079 4080 static int 4081 txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev, 4082 enum rte_filter_type filter_type, 4083 enum rte_filter_op filter_op, 4084 void *arg) 4085 { 4086 int ret = 0; 4087 4088 switch (filter_type) { 4089 case RTE_ETH_FILTER_GENERIC: 4090 if (filter_op != RTE_ETH_FILTER_GET) 4091 return -EINVAL; 4092 *(const void **)arg = &txgbe_flow_ops; 4093 break; 4094 default: 4095 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 4096 filter_type); 4097 ret = -EINVAL; 4098 break; 4099 } 4100 4101 return ret; 4102 } 4103 4104 static u8 * 4105 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, 4106 u8 **mc_addr_ptr, u32 *vmdq) 4107 { 4108 u8 *mc_addr; 4109 4110 *vmdq = 0; 4111 mc_addr = *mc_addr_ptr; 4112 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 4113 return mc_addr; 4114 } 4115 4116 int 4117 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 4118 struct rte_ether_addr *mc_addr_set, 4119 uint32_t nb_mc_addr) 4120 { 4121 struct txgbe_hw *hw; 4122 u8 *mc_addr_list; 4123 4124 hw = TXGBE_DEV_HW(dev); 4125 mc_addr_list = (u8 *)mc_addr_set; 4126 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 4127 txgbe_dev_addr_list_itr, TRUE); 4128 } 4129 4130 static uint64_t 4131 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 4132 { 4133 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4134 uint64_t systime_cycles; 4135 4136 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL); 4137 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32; 4138 4139 return systime_cycles; 4140 } 4141 4142 static uint64_t 4143 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4144 { 4145 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4146 uint64_t rx_tstamp_cycles; 4147 4148 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ 4149 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL); 4150 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32; 4151 4152 return rx_tstamp_cycles; 4153 } 4154 4155 static uint64_t 4156 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4157 { 4158 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4159 uint64_t tx_tstamp_cycles; 4160 4161 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ 4162 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL); 4163 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32; 4164 4165 return tx_tstamp_cycles; 4166 } 4167 4168 static void 4169 txgbe_start_timecounters(struct rte_eth_dev *dev) 4170 { 4171 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4172 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4173 struct rte_eth_link link; 4174 uint32_t incval = 0; 4175 uint32_t shift = 0; 4176 4177 /* Get current link speed. */ 4178 txgbe_dev_link_update(dev, 1); 4179 rte_eth_linkstatus_get(dev, &link); 4180 4181 switch (link.link_speed) { 4182 case ETH_SPEED_NUM_100M: 4183 incval = TXGBE_INCVAL_100; 4184 shift = TXGBE_INCVAL_SHIFT_100; 4185 break; 4186 case ETH_SPEED_NUM_1G: 4187 incval = TXGBE_INCVAL_1GB; 4188 shift = TXGBE_INCVAL_SHIFT_1GB; 4189 break; 4190 case ETH_SPEED_NUM_10G: 4191 default: 4192 incval = TXGBE_INCVAL_10GB; 4193 shift = TXGBE_INCVAL_SHIFT_10GB; 4194 break; 4195 } 4196 4197 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2)); 4198 4199 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 4200 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4201 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4202 4203 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; 4204 adapter->systime_tc.cc_shift = shift; 4205 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 4206 4207 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; 4208 adapter->rx_tstamp_tc.cc_shift = shift; 4209 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4210 4211 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; 4212 adapter->tx_tstamp_tc.cc_shift = shift; 4213 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4214 } 4215 4216 static int 4217 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 4218 { 4219 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4220 4221 adapter->systime_tc.nsec += delta; 4222 adapter->rx_tstamp_tc.nsec += delta; 4223 adapter->tx_tstamp_tc.nsec += delta; 4224 4225 return 0; 4226 } 4227 4228 static int 4229 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 4230 { 4231 uint64_t ns; 4232 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4233 4234 ns = rte_timespec_to_ns(ts); 4235 /* Set the timecounters to a new value. */ 4236 adapter->systime_tc.nsec = ns; 4237 adapter->rx_tstamp_tc.nsec = ns; 4238 adapter->tx_tstamp_tc.nsec = ns; 4239 4240 return 0; 4241 } 4242 4243 static int 4244 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 4245 { 4246 uint64_t ns, systime_cycles; 4247 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4248 4249 systime_cycles = txgbe_read_systime_cyclecounter(dev); 4250 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 4251 *ts = rte_ns_to_timespec(ns); 4252 4253 return 0; 4254 } 4255 4256 static int 4257 txgbe_timesync_enable(struct rte_eth_dev *dev) 4258 { 4259 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4260 uint32_t tsync_ctl; 4261 4262 /* Stop the timesync system time. */ 4263 wr32(hw, TXGBE_TSTIMEINC, 0x0); 4264 /* Reset the timesync system time value. */ 4265 wr32(hw, TXGBE_TSTIMEL, 0x0); 4266 wr32(hw, TXGBE_TSTIMEH, 0x0); 4267 4268 txgbe_start_timecounters(dev); 4269 4270 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4271 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 4272 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588); 4273 4274 /* Enable timestamping of received PTP packets. */ 4275 tsync_ctl = rd32(hw, TXGBE_TSRXCTL); 4276 tsync_ctl |= TXGBE_TSRXCTL_ENA; 4277 wr32(hw, TXGBE_TSRXCTL, tsync_ctl); 4278 4279 /* Enable timestamping of transmitted PTP packets. */ 4280 tsync_ctl = rd32(hw, TXGBE_TSTXCTL); 4281 tsync_ctl |= TXGBE_TSTXCTL_ENA; 4282 wr32(hw, TXGBE_TSTXCTL, tsync_ctl); 4283 4284 txgbe_flush(hw); 4285 4286 return 0; 4287 } 4288 4289 static int 4290 txgbe_timesync_disable(struct rte_eth_dev *dev) 4291 { 4292 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4293 uint32_t tsync_ctl; 4294 4295 /* Disable timestamping of transmitted PTP packets. */ 4296 tsync_ctl = rd32(hw, TXGBE_TSTXCTL); 4297 tsync_ctl &= ~TXGBE_TSTXCTL_ENA; 4298 wr32(hw, TXGBE_TSTXCTL, tsync_ctl); 4299 4300 /* Disable timestamping of received PTP packets. */ 4301 tsync_ctl = rd32(hw, TXGBE_TSRXCTL); 4302 tsync_ctl &= ~TXGBE_TSRXCTL_ENA; 4303 wr32(hw, TXGBE_TSRXCTL, tsync_ctl); 4304 4305 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4306 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0); 4307 4308 /* Stop incrementating the System Time registers. */ 4309 wr32(hw, TXGBE_TSTIMEINC, 0); 4310 4311 return 0; 4312 } 4313 4314 static int 4315 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 4316 struct timespec *timestamp, 4317 uint32_t flags __rte_unused) 4318 { 4319 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4320 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4321 uint32_t tsync_rxctl; 4322 uint64_t rx_tstamp_cycles; 4323 uint64_t ns; 4324 4325 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL); 4326 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0) 4327 return -EINVAL; 4328 4329 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev); 4330 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 4331 *timestamp = rte_ns_to_timespec(ns); 4332 4333 return 0; 4334 } 4335 4336 static int 4337 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 4338 struct timespec *timestamp) 4339 { 4340 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4341 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4342 uint32_t tsync_txctl; 4343 uint64_t tx_tstamp_cycles; 4344 uint64_t ns; 4345 4346 tsync_txctl = rd32(hw, TXGBE_TSTXCTL); 4347 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0) 4348 return -EINVAL; 4349 4350 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev); 4351 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 4352 *timestamp = rte_ns_to_timespec(ns); 4353 4354 return 0; 4355 } 4356 4357 static int 4358 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused) 4359 { 4360 int count = 0; 4361 int g_ind = 0; 4362 const struct reg_info *reg_group; 4363 const struct reg_info **reg_set = txgbe_regs_others; 4364 4365 while ((reg_group = reg_set[g_ind++])) 4366 count += txgbe_regs_group_count(reg_group); 4367 4368 return count; 4369 } 4370 4371 static int 4372 txgbe_get_regs(struct rte_eth_dev *dev, 4373 struct rte_dev_reg_info *regs) 4374 { 4375 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4376 uint32_t *data = regs->data; 4377 int g_ind = 0; 4378 int count = 0; 4379 const struct reg_info *reg_group; 4380 const struct reg_info **reg_set = txgbe_regs_others; 4381 4382 if (data == NULL) { 4383 regs->length = txgbe_get_reg_length(dev); 4384 regs->width = sizeof(uint32_t); 4385 return 0; 4386 } 4387 4388 /* Support only full register dump */ 4389 if (regs->length == 0 || 4390 regs->length == (uint32_t)txgbe_get_reg_length(dev)) { 4391 regs->version = hw->mac.type << 24 | 4392 hw->revision_id << 16 | 4393 hw->device_id; 4394 while ((reg_group = reg_set[g_ind++])) 4395 count += txgbe_read_regs_group(dev, &data[count], 4396 reg_group); 4397 return 0; 4398 } 4399 4400 return -ENOTSUP; 4401 } 4402 4403 static int 4404 txgbe_get_eeprom_length(struct rte_eth_dev *dev) 4405 { 4406 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4407 4408 /* Return unit is byte count */ 4409 return hw->rom.word_size * 2; 4410 } 4411 4412 static int 4413 txgbe_get_eeprom(struct rte_eth_dev *dev, 4414 struct rte_dev_eeprom_info *in_eeprom) 4415 { 4416 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4417 struct txgbe_rom_info *eeprom = &hw->rom; 4418 uint16_t *data = in_eeprom->data; 4419 int first, length; 4420 4421 first = in_eeprom->offset >> 1; 4422 length = in_eeprom->length >> 1; 4423 if (first > hw->rom.word_size || 4424 ((first + length) > hw->rom.word_size)) 4425 return -EINVAL; 4426 4427 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4428 4429 return eeprom->readw_buffer(hw, first, length, data); 4430 } 4431 4432 static int 4433 txgbe_set_eeprom(struct rte_eth_dev *dev, 4434 struct rte_dev_eeprom_info *in_eeprom) 4435 { 4436 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4437 struct txgbe_rom_info *eeprom = &hw->rom; 4438 uint16_t *data = in_eeprom->data; 4439 int first, length; 4440 4441 first = in_eeprom->offset >> 1; 4442 length = in_eeprom->length >> 1; 4443 if (first > hw->rom.word_size || 4444 ((first + length) > hw->rom.word_size)) 4445 return -EINVAL; 4446 4447 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4448 4449 return eeprom->writew_buffer(hw, first, length, data); 4450 } 4451 4452 static int 4453 txgbe_get_module_info(struct rte_eth_dev *dev, 4454 struct rte_eth_dev_module_info *modinfo) 4455 { 4456 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4457 uint32_t status; 4458 uint8_t sff8472_rev, addr_mode; 4459 bool page_swap = false; 4460 4461 /* Check whether we support SFF-8472 or not */ 4462 status = hw->phy.read_i2c_eeprom(hw, 4463 TXGBE_SFF_SFF_8472_COMP, 4464 &sff8472_rev); 4465 if (status != 0) 4466 return -EIO; 4467 4468 /* addressing mode is not supported */ 4469 status = hw->phy.read_i2c_eeprom(hw, 4470 TXGBE_SFF_SFF_8472_SWAP, 4471 &addr_mode); 4472 if (status != 0) 4473 return -EIO; 4474 4475 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { 4476 PMD_DRV_LOG(ERR, 4477 "Address change required to access page 0xA2, " 4478 "but not supported. Please report the module " 4479 "type to the driver maintainers."); 4480 page_swap = true; 4481 } 4482 4483 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) { 4484 /* We have a SFP, but it does not support SFF-8472 */ 4485 modinfo->type = RTE_ETH_MODULE_SFF_8079; 4486 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 4487 } else { 4488 /* We have a SFP which supports a revision of SFF-8472. */ 4489 modinfo->type = RTE_ETH_MODULE_SFF_8472; 4490 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 4491 } 4492 4493 return 0; 4494 } 4495 4496 static int 4497 txgbe_get_module_eeprom(struct rte_eth_dev *dev, 4498 struct rte_dev_eeprom_info *info) 4499 { 4500 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4501 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID; 4502 uint8_t databyte = 0xFF; 4503 uint8_t *data = info->data; 4504 uint32_t i = 0; 4505 4506 if (info->length == 0) 4507 return -EINVAL; 4508 4509 for (i = info->offset; i < info->offset + info->length; i++) { 4510 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 4511 status = hw->phy.read_i2c_eeprom(hw, i, &databyte); 4512 else 4513 status = hw->phy.read_i2c_sff8472(hw, i, &databyte); 4514 4515 if (status != 0) 4516 return -EIO; 4517 4518 data[i - info->offset] = databyte; 4519 } 4520 4521 return 0; 4522 } 4523 4524 bool 4525 txgbe_rss_update_sp(enum txgbe_mac_type mac_type) 4526 { 4527 switch (mac_type) { 4528 case txgbe_mac_raptor: 4529 case txgbe_mac_raptor_vf: 4530 return 1; 4531 default: 4532 return 0; 4533 } 4534 } 4535 4536 static int 4537 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 4538 struct rte_eth_dcb_info *dcb_info) 4539 { 4540 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev); 4541 struct txgbe_dcb_tc_config *tc; 4542 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 4543 uint8_t nb_tcs; 4544 uint8_t i, j; 4545 4546 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 4547 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 4548 else 4549 dcb_info->nb_tcs = 1; 4550 4551 tc_queue = &dcb_info->tc_queue; 4552 nb_tcs = dcb_info->nb_tcs; 4553 4554 if (dcb_config->vt_mode) { /* vt is enabled */ 4555 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 4556 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 4557 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 4558 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 4559 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 4560 for (j = 0; j < nb_tcs; j++) { 4561 tc_queue->tc_rxq[0][j].base = j; 4562 tc_queue->tc_rxq[0][j].nb_queue = 1; 4563 tc_queue->tc_txq[0][j].base = j; 4564 tc_queue->tc_txq[0][j].nb_queue = 1; 4565 } 4566 } else { 4567 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 4568 for (j = 0; j < nb_tcs; j++) { 4569 tc_queue->tc_rxq[i][j].base = 4570 i * nb_tcs + j; 4571 tc_queue->tc_rxq[i][j].nb_queue = 1; 4572 tc_queue->tc_txq[i][j].base = 4573 i * nb_tcs + j; 4574 tc_queue->tc_txq[i][j].nb_queue = 1; 4575 } 4576 } 4577 } 4578 } else { /* vt is disabled */ 4579 struct rte_eth_dcb_rx_conf *rx_conf = 4580 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 4581 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 4582 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 4583 if (dcb_info->nb_tcs == ETH_4_TCS) { 4584 for (i = 0; i < dcb_info->nb_tcs; i++) { 4585 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 4586 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 4587 } 4588 dcb_info->tc_queue.tc_txq[0][0].base = 0; 4589 dcb_info->tc_queue.tc_txq[0][1].base = 64; 4590 dcb_info->tc_queue.tc_txq[0][2].base = 96; 4591 dcb_info->tc_queue.tc_txq[0][3].base = 112; 4592 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 4593 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 4594 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 4595 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 4596 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 4597 for (i = 0; i < dcb_info->nb_tcs; i++) { 4598 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 4599 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 4600 } 4601 dcb_info->tc_queue.tc_txq[0][0].base = 0; 4602 dcb_info->tc_queue.tc_txq[0][1].base = 32; 4603 dcb_info->tc_queue.tc_txq[0][2].base = 64; 4604 dcb_info->tc_queue.tc_txq[0][3].base = 80; 4605 dcb_info->tc_queue.tc_txq[0][4].base = 96; 4606 dcb_info->tc_queue.tc_txq[0][5].base = 104; 4607 dcb_info->tc_queue.tc_txq[0][6].base = 112; 4608 dcb_info->tc_queue.tc_txq[0][7].base = 120; 4609 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 4610 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 4611 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 4612 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 4613 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 4614 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 4615 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 4616 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 4617 } 4618 } 4619 for (i = 0; i < dcb_info->nb_tcs; i++) { 4620 tc = &dcb_config->tc_config[i]; 4621 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent; 4622 } 4623 return 0; 4624 } 4625 4626 /* Update e-tag ether type */ 4627 static int 4628 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw, 4629 uint16_t ether_type) 4630 { 4631 uint32_t etag_etype; 4632 4633 etag_etype = rd32(hw, TXGBE_EXTAG); 4634 etag_etype &= ~TXGBE_EXTAG_ETAG_MASK; 4635 etag_etype |= ether_type; 4636 wr32(hw, TXGBE_EXTAG, etag_etype); 4637 txgbe_flush(hw); 4638 4639 return 0; 4640 } 4641 4642 /* Enable e-tag tunnel */ 4643 static int 4644 txgbe_e_tag_enable(struct txgbe_hw *hw) 4645 { 4646 uint32_t etag_etype; 4647 4648 etag_etype = rd32(hw, TXGBE_PORTCTL); 4649 etag_etype |= TXGBE_PORTCTL_ETAG; 4650 wr32(hw, TXGBE_PORTCTL, etag_etype); 4651 txgbe_flush(hw); 4652 4653 return 0; 4654 } 4655 4656 static int 4657 txgbe_e_tag_filter_del(struct rte_eth_dev *dev, 4658 struct txgbe_l2_tunnel_conf *l2_tunnel) 4659 { 4660 int ret = 0; 4661 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4662 uint32_t i, rar_entries; 4663 uint32_t rar_low, rar_high; 4664 4665 rar_entries = hw->mac.num_rar_entries; 4666 4667 for (i = 1; i < rar_entries; i++) { 4668 wr32(hw, TXGBE_ETHADDRIDX, i); 4669 rar_high = rd32(hw, TXGBE_ETHADDRH); 4670 rar_low = rd32(hw, TXGBE_ETHADDRL); 4671 if ((rar_high & TXGBE_ETHADDRH_VLD) && 4672 (rar_high & TXGBE_ETHADDRH_ETAG) && 4673 (TXGBE_ETHADDRL_ETAG(rar_low) == 4674 l2_tunnel->tunnel_id)) { 4675 wr32(hw, TXGBE_ETHADDRL, 0); 4676 wr32(hw, TXGBE_ETHADDRH, 0); 4677 4678 txgbe_clear_vmdq(hw, i, BIT_MASK32); 4679 4680 return ret; 4681 } 4682 } 4683 4684 return ret; 4685 } 4686 4687 static int 4688 txgbe_e_tag_filter_add(struct rte_eth_dev *dev, 4689 struct txgbe_l2_tunnel_conf *l2_tunnel) 4690 { 4691 int ret = 0; 4692 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4693 uint32_t i, rar_entries; 4694 uint32_t rar_low, rar_high; 4695 4696 /* One entry for one tunnel. Try to remove potential existing entry. */ 4697 txgbe_e_tag_filter_del(dev, l2_tunnel); 4698 4699 rar_entries = hw->mac.num_rar_entries; 4700 4701 for (i = 1; i < rar_entries; i++) { 4702 wr32(hw, TXGBE_ETHADDRIDX, i); 4703 rar_high = rd32(hw, TXGBE_ETHADDRH); 4704 if (rar_high & TXGBE_ETHADDRH_VLD) { 4705 continue; 4706 } else { 4707 txgbe_set_vmdq(hw, i, l2_tunnel->pool); 4708 rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG; 4709 rar_low = l2_tunnel->tunnel_id; 4710 4711 wr32(hw, TXGBE_ETHADDRL, rar_low); 4712 wr32(hw, TXGBE_ETHADDRH, rar_high); 4713 4714 return ret; 4715 } 4716 } 4717 4718 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 4719 " Please remove a rule before adding a new one."); 4720 return -EINVAL; 4721 } 4722 4723 static inline struct txgbe_l2_tn_filter * 4724 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info, 4725 struct txgbe_l2_tn_key *key) 4726 { 4727 int ret; 4728 4729 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 4730 if (ret < 0) 4731 return NULL; 4732 4733 return l2_tn_info->hash_map[ret]; 4734 } 4735 4736 static inline int 4737 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, 4738 struct txgbe_l2_tn_filter *l2_tn_filter) 4739 { 4740 int ret; 4741 4742 ret = rte_hash_add_key(l2_tn_info->hash_handle, 4743 &l2_tn_filter->key); 4744 4745 if (ret < 0) { 4746 PMD_DRV_LOG(ERR, 4747 "Failed to insert L2 tunnel filter" 4748 " to hash table %d!", 4749 ret); 4750 return ret; 4751 } 4752 4753 l2_tn_info->hash_map[ret] = l2_tn_filter; 4754 4755 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 4756 4757 return 0; 4758 } 4759 4760 static inline int 4761 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, 4762 struct txgbe_l2_tn_key *key) 4763 { 4764 int ret; 4765 struct txgbe_l2_tn_filter *l2_tn_filter; 4766 4767 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 4768 4769 if (ret < 0) { 4770 PMD_DRV_LOG(ERR, 4771 "No such L2 tunnel filter to delete %d!", 4772 ret); 4773 return ret; 4774 } 4775 4776 l2_tn_filter = l2_tn_info->hash_map[ret]; 4777 l2_tn_info->hash_map[ret] = NULL; 4778 4779 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 4780 rte_free(l2_tn_filter); 4781 4782 return 0; 4783 } 4784 4785 /* Add l2 tunnel filter */ 4786 int 4787 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 4788 struct txgbe_l2_tunnel_conf *l2_tunnel, 4789 bool restore) 4790 { 4791 int ret; 4792 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 4793 struct txgbe_l2_tn_key key; 4794 struct txgbe_l2_tn_filter *node; 4795 4796 if (!restore) { 4797 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 4798 key.tn_id = l2_tunnel->tunnel_id; 4799 4800 node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key); 4801 4802 if (node) { 4803 PMD_DRV_LOG(ERR, 4804 "The L2 tunnel filter already exists!"); 4805 return -EINVAL; 4806 } 4807 4808 node = rte_zmalloc("txgbe_l2_tn", 4809 sizeof(struct txgbe_l2_tn_filter), 4810 0); 4811 if (!node) 4812 return -ENOMEM; 4813 4814 rte_memcpy(&node->key, 4815 &key, 4816 sizeof(struct txgbe_l2_tn_key)); 4817 node->pool = l2_tunnel->pool; 4818 ret = txgbe_insert_l2_tn_filter(l2_tn_info, node); 4819 if (ret < 0) { 4820 rte_free(node); 4821 return ret; 4822 } 4823 } 4824 4825 switch (l2_tunnel->l2_tunnel_type) { 4826 case RTE_L2_TUNNEL_TYPE_E_TAG: 4827 ret = txgbe_e_tag_filter_add(dev, l2_tunnel); 4828 break; 4829 default: 4830 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 4831 ret = -EINVAL; 4832 break; 4833 } 4834 4835 if (!restore && ret < 0) 4836 (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key); 4837 4838 return ret; 4839 } 4840 4841 /* Delete l2 tunnel filter */ 4842 int 4843 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 4844 struct txgbe_l2_tunnel_conf *l2_tunnel) 4845 { 4846 int ret; 4847 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 4848 struct txgbe_l2_tn_key key; 4849 4850 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 4851 key.tn_id = l2_tunnel->tunnel_id; 4852 ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key); 4853 if (ret < 0) 4854 return ret; 4855 4856 switch (l2_tunnel->l2_tunnel_type) { 4857 case RTE_L2_TUNNEL_TYPE_E_TAG: 4858 ret = txgbe_e_tag_filter_del(dev, l2_tunnel); 4859 break; 4860 default: 4861 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 4862 ret = -EINVAL; 4863 break; 4864 } 4865 4866 return ret; 4867 } 4868 4869 static int 4870 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 4871 { 4872 int ret = 0; 4873 uint32_t ctrl; 4874 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4875 4876 ctrl = rd32(hw, TXGBE_POOLCTL); 4877 ctrl &= ~TXGBE_POOLCTL_MODE_MASK; 4878 if (en) 4879 ctrl |= TXGBE_PSRPOOL_MODE_ETAG; 4880 wr32(hw, TXGBE_POOLCTL, ctrl); 4881 4882 return ret; 4883 } 4884 4885 /* Add UDP tunneling port */ 4886 static int 4887 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 4888 struct rte_eth_udp_tunnel *udp_tunnel) 4889 { 4890 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4891 int ret = 0; 4892 4893 if (udp_tunnel == NULL) 4894 return -EINVAL; 4895 4896 switch (udp_tunnel->prot_type) { 4897 case RTE_TUNNEL_TYPE_VXLAN: 4898 if (udp_tunnel->udp_port == 0) { 4899 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 4900 ret = -EINVAL; 4901 break; 4902 } 4903 wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port); 4904 wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port); 4905 break; 4906 case RTE_TUNNEL_TYPE_GENEVE: 4907 if (udp_tunnel->udp_port == 0) { 4908 PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed."); 4909 ret = -EINVAL; 4910 break; 4911 } 4912 wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port); 4913 break; 4914 case RTE_TUNNEL_TYPE_TEREDO: 4915 if (udp_tunnel->udp_port == 0) { 4916 PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed."); 4917 ret = -EINVAL; 4918 break; 4919 } 4920 wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port); 4921 break; 4922 default: 4923 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 4924 ret = -EINVAL; 4925 break; 4926 } 4927 4928 txgbe_flush(hw); 4929 4930 return ret; 4931 } 4932 4933 /* Remove UDP tunneling port */ 4934 static int 4935 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 4936 struct rte_eth_udp_tunnel *udp_tunnel) 4937 { 4938 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4939 int ret = 0; 4940 uint16_t cur_port; 4941 4942 if (udp_tunnel == NULL) 4943 return -EINVAL; 4944 4945 switch (udp_tunnel->prot_type) { 4946 case RTE_TUNNEL_TYPE_VXLAN: 4947 cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT); 4948 if (cur_port != udp_tunnel->udp_port) { 4949 PMD_DRV_LOG(ERR, "Port %u does not exist.", 4950 udp_tunnel->udp_port); 4951 ret = -EINVAL; 4952 break; 4953 } 4954 wr32(hw, TXGBE_VXLANPORT, 0); 4955 wr32(hw, TXGBE_VXLANPORTGPE, 0); 4956 break; 4957 case RTE_TUNNEL_TYPE_GENEVE: 4958 cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT); 4959 if (cur_port != udp_tunnel->udp_port) { 4960 PMD_DRV_LOG(ERR, "Port %u does not exist.", 4961 udp_tunnel->udp_port); 4962 ret = -EINVAL; 4963 break; 4964 } 4965 wr32(hw, TXGBE_GENEVEPORT, 0); 4966 break; 4967 case RTE_TUNNEL_TYPE_TEREDO: 4968 cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT); 4969 if (cur_port != udp_tunnel->udp_port) { 4970 PMD_DRV_LOG(ERR, "Port %u does not exist.", 4971 udp_tunnel->udp_port); 4972 ret = -EINVAL; 4973 break; 4974 } 4975 wr32(hw, TXGBE_TEREDOPORT, 0); 4976 break; 4977 default: 4978 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 4979 ret = -EINVAL; 4980 break; 4981 } 4982 4983 txgbe_flush(hw); 4984 4985 return ret; 4986 } 4987 4988 /* restore n-tuple filter */ 4989 static inline void 4990 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 4991 { 4992 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 4993 struct txgbe_5tuple_filter *node; 4994 4995 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 4996 txgbe_inject_5tuple_filter(dev, node); 4997 } 4998 } 4999 5000 /* restore ethernet type filter */ 5001 static inline void 5002 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 5003 { 5004 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5005 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5006 int i; 5007 5008 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { 5009 if (filter_info->ethertype_mask & (1 << i)) { 5010 wr32(hw, TXGBE_ETFLT(i), 5011 filter_info->ethertype_filters[i].etqf); 5012 wr32(hw, TXGBE_ETCLS(i), 5013 filter_info->ethertype_filters[i].etqs); 5014 txgbe_flush(hw); 5015 } 5016 } 5017 } 5018 5019 /* restore SYN filter */ 5020 static inline void 5021 txgbe_syn_filter_restore(struct rte_eth_dev *dev) 5022 { 5023 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5024 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5025 uint32_t synqf; 5026 5027 synqf = filter_info->syn_info; 5028 5029 if (synqf & TXGBE_SYNCLS_ENA) { 5030 wr32(hw, TXGBE_SYNCLS, synqf); 5031 txgbe_flush(hw); 5032 } 5033 } 5034 5035 /* restore L2 tunnel filter */ 5036 static inline void 5037 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 5038 { 5039 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 5040 struct txgbe_l2_tn_filter *node; 5041 struct txgbe_l2_tunnel_conf l2_tn_conf; 5042 5043 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 5044 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 5045 l2_tn_conf.tunnel_id = node->key.tn_id; 5046 l2_tn_conf.pool = node->pool; 5047 (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 5048 } 5049 } 5050 5051 /* restore rss filter */ 5052 static inline void 5053 txgbe_rss_filter_restore(struct rte_eth_dev *dev) 5054 { 5055 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5056 5057 if (filter_info->rss_info.conf.queue_num) 5058 txgbe_config_rss_filter(dev, 5059 &filter_info->rss_info, TRUE); 5060 } 5061 5062 static int 5063 txgbe_filter_restore(struct rte_eth_dev *dev) 5064 { 5065 txgbe_ntuple_filter_restore(dev); 5066 txgbe_ethertype_filter_restore(dev); 5067 txgbe_syn_filter_restore(dev); 5068 txgbe_fdir_filter_restore(dev); 5069 txgbe_l2_tn_filter_restore(dev); 5070 txgbe_rss_filter_restore(dev); 5071 5072 return 0; 5073 } 5074 5075 static void 5076 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 5077 { 5078 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 5079 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5080 5081 if (l2_tn_info->e_tag_en) 5082 (void)txgbe_e_tag_enable(hw); 5083 5084 if (l2_tn_info->e_tag_fwd_en) 5085 (void)txgbe_e_tag_forwarding_en_dis(dev, 1); 5086 5087 (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 5088 } 5089 5090 /* remove all the n-tuple filters */ 5091 void 5092 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 5093 { 5094 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5095 struct txgbe_5tuple_filter *p_5tuple; 5096 5097 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 5098 txgbe_remove_5tuple_filter(dev, p_5tuple); 5099 } 5100 5101 /* remove all the ether type filters */ 5102 void 5103 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 5104 { 5105 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5106 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5107 int i; 5108 5109 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { 5110 if (filter_info->ethertype_mask & (1 << i) && 5111 !filter_info->ethertype_filters[i].conf) { 5112 (void)txgbe_ethertype_filter_remove(filter_info, 5113 (uint8_t)i); 5114 wr32(hw, TXGBE_ETFLT(i), 0); 5115 wr32(hw, TXGBE_ETCLS(i), 0); 5116 txgbe_flush(hw); 5117 } 5118 } 5119 } 5120 5121 /* remove the SYN filter */ 5122 void 5123 txgbe_clear_syn_filter(struct rte_eth_dev *dev) 5124 { 5125 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5126 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5127 5128 if (filter_info->syn_info & TXGBE_SYNCLS_ENA) { 5129 filter_info->syn_info = 0; 5130 5131 wr32(hw, TXGBE_SYNCLS, 0); 5132 txgbe_flush(hw); 5133 } 5134 } 5135 5136 /* remove all the L2 tunnel filters */ 5137 int 5138 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 5139 { 5140 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 5141 struct txgbe_l2_tn_filter *l2_tn_filter; 5142 struct txgbe_l2_tunnel_conf l2_tn_conf; 5143 int ret = 0; 5144 5145 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 5146 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 5147 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 5148 l2_tn_conf.pool = l2_tn_filter->pool; 5149 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 5150 if (ret < 0) 5151 return ret; 5152 } 5153 5154 return 0; 5155 } 5156 5157 static const struct eth_dev_ops txgbe_eth_dev_ops = { 5158 .dev_configure = txgbe_dev_configure, 5159 .dev_infos_get = txgbe_dev_info_get, 5160 .dev_start = txgbe_dev_start, 5161 .dev_stop = txgbe_dev_stop, 5162 .dev_set_link_up = txgbe_dev_set_link_up, 5163 .dev_set_link_down = txgbe_dev_set_link_down, 5164 .dev_close = txgbe_dev_close, 5165 .dev_reset = txgbe_dev_reset, 5166 .promiscuous_enable = txgbe_dev_promiscuous_enable, 5167 .promiscuous_disable = txgbe_dev_promiscuous_disable, 5168 .allmulticast_enable = txgbe_dev_allmulticast_enable, 5169 .allmulticast_disable = txgbe_dev_allmulticast_disable, 5170 .link_update = txgbe_dev_link_update, 5171 .stats_get = txgbe_dev_stats_get, 5172 .xstats_get = txgbe_dev_xstats_get, 5173 .xstats_get_by_id = txgbe_dev_xstats_get_by_id, 5174 .stats_reset = txgbe_dev_stats_reset, 5175 .xstats_reset = txgbe_dev_xstats_reset, 5176 .xstats_get_names = txgbe_dev_xstats_get_names, 5177 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id, 5178 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set, 5179 .fw_version_get = txgbe_fw_version_get, 5180 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get, 5181 .mtu_set = txgbe_dev_mtu_set, 5182 .vlan_filter_set = txgbe_vlan_filter_set, 5183 .vlan_tpid_set = txgbe_vlan_tpid_set, 5184 .vlan_offload_set = txgbe_vlan_offload_set, 5185 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set, 5186 .rx_queue_start = txgbe_dev_rx_queue_start, 5187 .rx_queue_stop = txgbe_dev_rx_queue_stop, 5188 .tx_queue_start = txgbe_dev_tx_queue_start, 5189 .tx_queue_stop = txgbe_dev_tx_queue_stop, 5190 .rx_queue_setup = txgbe_dev_rx_queue_setup, 5191 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable, 5192 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable, 5193 .rx_queue_release = txgbe_dev_rx_queue_release, 5194 .tx_queue_setup = txgbe_dev_tx_queue_setup, 5195 .tx_queue_release = txgbe_dev_tx_queue_release, 5196 .dev_led_on = txgbe_dev_led_on, 5197 .dev_led_off = txgbe_dev_led_off, 5198 .flow_ctrl_get = txgbe_flow_ctrl_get, 5199 .flow_ctrl_set = txgbe_flow_ctrl_set, 5200 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set, 5201 .mac_addr_add = txgbe_add_rar, 5202 .mac_addr_remove = txgbe_remove_rar, 5203 .mac_addr_set = txgbe_set_default_mac_addr, 5204 .uc_hash_table_set = txgbe_uc_hash_table_set, 5205 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set, 5206 .set_queue_rate_limit = txgbe_set_queue_rate_limit, 5207 .reta_update = txgbe_dev_rss_reta_update, 5208 .reta_query = txgbe_dev_rss_reta_query, 5209 .rss_hash_update = txgbe_dev_rss_hash_update, 5210 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get, 5211 .filter_ctrl = txgbe_dev_filter_ctrl, 5212 .set_mc_addr_list = txgbe_dev_set_mc_addr_list, 5213 .rxq_info_get = txgbe_rxq_info_get, 5214 .txq_info_get = txgbe_txq_info_get, 5215 .timesync_enable = txgbe_timesync_enable, 5216 .timesync_disable = txgbe_timesync_disable, 5217 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp, 5218 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp, 5219 .get_reg = txgbe_get_regs, 5220 .get_eeprom_length = txgbe_get_eeprom_length, 5221 .get_eeprom = txgbe_get_eeprom, 5222 .set_eeprom = txgbe_set_eeprom, 5223 .get_module_info = txgbe_get_module_info, 5224 .get_module_eeprom = txgbe_get_module_eeprom, 5225 .get_dcb_info = txgbe_dev_get_dcb_info, 5226 .timesync_adjust_time = txgbe_timesync_adjust_time, 5227 .timesync_read_time = txgbe_timesync_read_time, 5228 .timesync_write_time = txgbe_timesync_write_time, 5229 .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add, 5230 .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del, 5231 .tm_ops_get = txgbe_tm_ops_get, 5232 .tx_done_cleanup = txgbe_dev_tx_done_cleanup, 5233 }; 5234 5235 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd); 5236 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map); 5237 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 5238 5239 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE); 5240 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE); 5241 5242 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX 5243 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG); 5244 #endif 5245 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX 5246 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG); 5247 #endif 5248 5249 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE 5250 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG); 5251 #endif 5252