1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <rte_common.h> 11 #include <ethdev_pci.h> 12 13 #include <rte_interrupts.h> 14 #include <rte_log.h> 15 #include <rte_debug.h> 16 #include <rte_pci.h> 17 #include <rte_memory.h> 18 #include <rte_eal.h> 19 #include <rte_alarm.h> 20 #include <rte_kvargs.h> 21 22 #include "txgbe_logs.h" 23 #include "base/txgbe.h" 24 #include "txgbe_ethdev.h" 25 #include "txgbe_rxtx.h" 26 #include "txgbe_regs_group.h" 27 28 static const struct reg_info txgbe_regs_general[] = { 29 {TXGBE_RST, 1, 1, "TXGBE_RST"}, 30 {TXGBE_STAT, 1, 1, "TXGBE_STAT"}, 31 {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"}, 32 {TXGBE_SDP, 1, 1, "TXGBE_SDP"}, 33 {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"}, 34 {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"}, 35 {0, 0, 0, ""} 36 }; 37 38 static const struct reg_info txgbe_regs_nvm[] = { 39 {0, 0, 0, ""} 40 }; 41 42 static const struct reg_info txgbe_regs_interrupt[] = { 43 {0, 0, 0, ""} 44 }; 45 46 static const struct reg_info txgbe_regs_fctl_others[] = { 47 {0, 0, 0, ""} 48 }; 49 50 static const struct reg_info txgbe_regs_rxdma[] = { 51 {0, 0, 0, ""} 52 }; 53 54 static const struct reg_info txgbe_regs_rx[] = { 55 {0, 0, 0, ""} 56 }; 57 58 static struct reg_info txgbe_regs_tx[] = { 59 {0, 0, 0, ""} 60 }; 61 62 static const struct reg_info txgbe_regs_wakeup[] = { 63 {0, 0, 0, ""} 64 }; 65 66 static const struct reg_info txgbe_regs_dcb[] = { 67 {0, 0, 0, ""} 68 }; 69 70 static const struct reg_info txgbe_regs_mac[] = { 71 {0, 0, 0, ""} 72 }; 73 74 static const struct reg_info txgbe_regs_diagnostic[] = { 75 {0, 0, 0, ""}, 76 }; 77 78 /* PF registers */ 79 static const struct reg_info *txgbe_regs_others[] = { 80 txgbe_regs_general, 81 txgbe_regs_nvm, 82 txgbe_regs_interrupt, 83 txgbe_regs_fctl_others, 84 txgbe_regs_rxdma, 85 txgbe_regs_rx, 86 txgbe_regs_tx, 87 txgbe_regs_wakeup, 88 txgbe_regs_dcb, 89 txgbe_regs_mac, 90 txgbe_regs_diagnostic, 91 NULL}; 92 93 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 94 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 95 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 96 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 97 static int txgbe_dev_set_link_up(struct rte_eth_dev *dev); 98 static int txgbe_dev_set_link_down(struct rte_eth_dev *dev); 99 static int txgbe_dev_close(struct rte_eth_dev *dev); 100 static int txgbe_dev_link_update(struct rte_eth_dev *dev, 101 int wait_to_complete); 102 static int txgbe_dev_stats_reset(struct rte_eth_dev *dev); 103 static void txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 104 static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, 105 uint16_t queue); 106 107 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev); 108 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 109 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 110 static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); 111 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 112 static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, 113 struct rte_intr_handle *handle); 114 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev, 115 struct rte_intr_handle *handle); 116 static void txgbe_dev_interrupt_handler(void *param); 117 static void txgbe_dev_interrupt_delayed_handler(void *param); 118 static void txgbe_configure_msix(struct rte_eth_dev *dev); 119 120 static int txgbe_filter_restore(struct rte_eth_dev *dev); 121 static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 122 123 #define TXGBE_SET_HWSTRIP(h, q) do {\ 124 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 125 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 126 (h)->bitmap[idx] |= 1 << bit;\ 127 } while (0) 128 129 #define TXGBE_CLEAR_HWSTRIP(h, q) do {\ 130 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 131 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 132 (h)->bitmap[idx] &= ~(1 << bit);\ 133 } while (0) 134 135 #define TXGBE_GET_HWSTRIP(h, q, r) do {\ 136 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 137 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 138 (r) = (h)->bitmap[idx] >> bit & 1;\ 139 } while (0) 140 141 /* 142 * The set of PCI devices this driver supports 143 */ 144 static const struct rte_pci_id pci_id_txgbe_map[] = { 145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) }, 146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) }, 147 { .vendor_id = 0, /* sentinel */ }, 148 }; 149 150 static const struct rte_eth_desc_lim rx_desc_lim = { 151 .nb_max = TXGBE_RING_DESC_MAX, 152 .nb_min = TXGBE_RING_DESC_MIN, 153 .nb_align = TXGBE_RXD_ALIGN, 154 }; 155 156 static const struct rte_eth_desc_lim tx_desc_lim = { 157 .nb_max = TXGBE_RING_DESC_MAX, 158 .nb_min = TXGBE_RING_DESC_MIN, 159 .nb_align = TXGBE_TXD_ALIGN, 160 .nb_seg_max = TXGBE_TX_MAX_SEG, 161 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG, 162 }; 163 164 static const struct eth_dev_ops txgbe_eth_dev_ops; 165 166 #define HW_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, m)} 167 #define HW_XSTAT_NAME(m, n) {n, offsetof(struct txgbe_hw_stats, m)} 168 static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { 169 /* MNG RxTx */ 170 HW_XSTAT(mng_bmc2host_packets), 171 HW_XSTAT(mng_host2bmc_packets), 172 /* Basic RxTx */ 173 HW_XSTAT(rx_packets), 174 HW_XSTAT(tx_packets), 175 HW_XSTAT(rx_bytes), 176 HW_XSTAT(tx_bytes), 177 HW_XSTAT(rx_total_bytes), 178 HW_XSTAT(rx_total_packets), 179 HW_XSTAT(tx_total_packets), 180 HW_XSTAT(rx_total_missed_packets), 181 HW_XSTAT(rx_broadcast_packets), 182 HW_XSTAT(rx_multicast_packets), 183 HW_XSTAT(rx_management_packets), 184 HW_XSTAT(tx_management_packets), 185 HW_XSTAT(rx_management_dropped), 186 187 /* Basic Error */ 188 HW_XSTAT(rx_crc_errors), 189 HW_XSTAT(rx_illegal_byte_errors), 190 HW_XSTAT(rx_error_bytes), 191 HW_XSTAT(rx_mac_short_packet_dropped), 192 HW_XSTAT(rx_length_errors), 193 HW_XSTAT(rx_undersize_errors), 194 HW_XSTAT(rx_fragment_errors), 195 HW_XSTAT(rx_oversize_errors), 196 HW_XSTAT(rx_jabber_errors), 197 HW_XSTAT(rx_l3_l4_xsum_error), 198 HW_XSTAT(mac_local_errors), 199 HW_XSTAT(mac_remote_errors), 200 201 /* Flow Director */ 202 HW_XSTAT(flow_director_added_filters), 203 HW_XSTAT(flow_director_removed_filters), 204 HW_XSTAT(flow_director_filter_add_errors), 205 HW_XSTAT(flow_director_filter_remove_errors), 206 HW_XSTAT(flow_director_matched_filters), 207 HW_XSTAT(flow_director_missed_filters), 208 209 /* FCoE */ 210 HW_XSTAT(rx_fcoe_crc_errors), 211 HW_XSTAT(rx_fcoe_mbuf_allocation_errors), 212 HW_XSTAT(rx_fcoe_dropped), 213 HW_XSTAT(rx_fcoe_packets), 214 HW_XSTAT(tx_fcoe_packets), 215 HW_XSTAT(rx_fcoe_bytes), 216 HW_XSTAT(tx_fcoe_bytes), 217 HW_XSTAT(rx_fcoe_no_ddp), 218 HW_XSTAT(rx_fcoe_no_ddp_ext_buff), 219 220 /* MACSEC */ 221 HW_XSTAT(tx_macsec_pkts_untagged), 222 HW_XSTAT(tx_macsec_pkts_encrypted), 223 HW_XSTAT(tx_macsec_pkts_protected), 224 HW_XSTAT(tx_macsec_octets_encrypted), 225 HW_XSTAT(tx_macsec_octets_protected), 226 HW_XSTAT(rx_macsec_pkts_untagged), 227 HW_XSTAT(rx_macsec_pkts_badtag), 228 HW_XSTAT(rx_macsec_pkts_nosci), 229 HW_XSTAT(rx_macsec_pkts_unknownsci), 230 HW_XSTAT(rx_macsec_octets_decrypted), 231 HW_XSTAT(rx_macsec_octets_validated), 232 HW_XSTAT(rx_macsec_sc_pkts_unchecked), 233 HW_XSTAT(rx_macsec_sc_pkts_delayed), 234 HW_XSTAT(rx_macsec_sc_pkts_late), 235 HW_XSTAT(rx_macsec_sa_pkts_ok), 236 HW_XSTAT(rx_macsec_sa_pkts_invalid), 237 HW_XSTAT(rx_macsec_sa_pkts_notvalid), 238 HW_XSTAT(rx_macsec_sa_pkts_unusedsa), 239 HW_XSTAT(rx_macsec_sa_pkts_notusingsa), 240 241 /* MAC RxTx */ 242 HW_XSTAT(rx_size_64_packets), 243 HW_XSTAT(rx_size_65_to_127_packets), 244 HW_XSTAT(rx_size_128_to_255_packets), 245 HW_XSTAT(rx_size_256_to_511_packets), 246 HW_XSTAT(rx_size_512_to_1023_packets), 247 HW_XSTAT(rx_size_1024_to_max_packets), 248 HW_XSTAT(tx_size_64_packets), 249 HW_XSTAT(tx_size_65_to_127_packets), 250 HW_XSTAT(tx_size_128_to_255_packets), 251 HW_XSTAT(tx_size_256_to_511_packets), 252 HW_XSTAT(tx_size_512_to_1023_packets), 253 HW_XSTAT(tx_size_1024_to_max_packets), 254 255 /* Flow Control */ 256 HW_XSTAT(tx_xon_packets), 257 HW_XSTAT(rx_xon_packets), 258 HW_XSTAT(tx_xoff_packets), 259 HW_XSTAT(rx_xoff_packets), 260 261 HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"), 262 HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"), 263 HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"), 264 HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"), 265 }; 266 267 #define TXGBE_NB_HW_STATS (sizeof(rte_txgbe_stats_strings) / \ 268 sizeof(rte_txgbe_stats_strings[0])) 269 270 /* Per-priority statistics */ 271 #define UP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, up[0].m)} 272 static const struct rte_txgbe_xstats_name_off rte_txgbe_up_strings[] = { 273 UP_XSTAT(rx_up_packets), 274 UP_XSTAT(tx_up_packets), 275 UP_XSTAT(rx_up_bytes), 276 UP_XSTAT(tx_up_bytes), 277 UP_XSTAT(rx_up_drop_packets), 278 279 UP_XSTAT(tx_up_xon_packets), 280 UP_XSTAT(rx_up_xon_packets), 281 UP_XSTAT(tx_up_xoff_packets), 282 UP_XSTAT(rx_up_xoff_packets), 283 UP_XSTAT(rx_up_dropped), 284 UP_XSTAT(rx_up_mbuf_alloc_errors), 285 UP_XSTAT(tx_up_xon2off_packets), 286 }; 287 288 #define TXGBE_NB_UP_STATS (sizeof(rte_txgbe_up_strings) / \ 289 sizeof(rte_txgbe_up_strings[0])) 290 291 /* Per-queue statistics */ 292 #define QP_XSTAT(m) {#m, offsetof(struct txgbe_hw_stats, qp[0].m)} 293 static const struct rte_txgbe_xstats_name_off rte_txgbe_qp_strings[] = { 294 QP_XSTAT(rx_qp_packets), 295 QP_XSTAT(tx_qp_packets), 296 QP_XSTAT(rx_qp_bytes), 297 QP_XSTAT(tx_qp_bytes), 298 QP_XSTAT(rx_qp_mc_packets), 299 }; 300 301 #define TXGBE_NB_QP_STATS (sizeof(rte_txgbe_qp_strings) / \ 302 sizeof(rte_txgbe_qp_strings[0])) 303 304 static inline int 305 txgbe_is_sfp(struct txgbe_hw *hw) 306 { 307 switch (hw->phy.type) { 308 case txgbe_phy_sfp_avago: 309 case txgbe_phy_sfp_ftl: 310 case txgbe_phy_sfp_intel: 311 case txgbe_phy_sfp_unknown: 312 case txgbe_phy_sfp_tyco_passive: 313 case txgbe_phy_sfp_unknown_passive: 314 return 1; 315 default: 316 return 0; 317 } 318 } 319 320 static inline int32_t 321 txgbe_pf_reset_hw(struct txgbe_hw *hw) 322 { 323 uint32_t ctrl_ext; 324 int32_t status; 325 326 status = hw->mac.reset_hw(hw); 327 328 ctrl_ext = rd32(hw, TXGBE_PORTCTL); 329 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 330 ctrl_ext |= TXGBE_PORTCTL_RSTDONE; 331 wr32(hw, TXGBE_PORTCTL, ctrl_ext); 332 txgbe_flush(hw); 333 334 if (status == TXGBE_ERR_SFP_NOT_PRESENT) 335 status = 0; 336 return status; 337 } 338 339 static inline void 340 txgbe_enable_intr(struct rte_eth_dev *dev) 341 { 342 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 343 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 344 345 wr32(hw, TXGBE_IENMISC, intr->mask_misc); 346 wr32(hw, TXGBE_IMC(0), TXGBE_IMC_MASK); 347 wr32(hw, TXGBE_IMC(1), TXGBE_IMC_MASK); 348 txgbe_flush(hw); 349 } 350 351 static void 352 txgbe_disable_intr(struct txgbe_hw *hw) 353 { 354 PMD_INIT_FUNC_TRACE(); 355 356 wr32(hw, TXGBE_IENMISC, ~BIT_MASK32); 357 wr32(hw, TXGBE_IMS(0), TXGBE_IMC_MASK); 358 wr32(hw, TXGBE_IMS(1), TXGBE_IMC_MASK); 359 txgbe_flush(hw); 360 } 361 362 static int 363 txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 364 uint16_t queue_id, 365 uint8_t stat_idx, 366 uint8_t is_rx) 367 { 368 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 369 struct txgbe_stat_mappings *stat_mappings = 370 TXGBE_DEV_STAT_MAPPINGS(eth_dev); 371 uint32_t qsmr_mask = 0; 372 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 373 uint32_t q_map; 374 uint8_t n, offset; 375 376 if (hw->mac.type != txgbe_mac_raptor) 377 return -ENOSYS; 378 379 if (stat_idx & !QMAP_FIELD_RESERVED_BITS_MASK) 380 return -EIO; 381 382 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 383 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 384 queue_id, stat_idx); 385 386 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 387 if (n >= TXGBE_NB_STAT_MAPPING) { 388 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 389 return -EIO; 390 } 391 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 392 393 /* Now clear any previous stat_idx set */ 394 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 395 if (!is_rx) 396 stat_mappings->tqsm[n] &= ~clearing_mask; 397 else 398 stat_mappings->rqsm[n] &= ~clearing_mask; 399 400 q_map = (uint32_t)stat_idx; 401 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 402 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 403 if (!is_rx) 404 stat_mappings->tqsm[n] |= qsmr_mask; 405 else 406 stat_mappings->rqsm[n] |= qsmr_mask; 407 408 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 409 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 410 queue_id, stat_idx); 411 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 412 is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]); 413 return 0; 414 } 415 416 static void 417 txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config) 418 { 419 int i; 420 u8 bwgp; 421 struct txgbe_dcb_tc_config *tc; 422 423 UNREFERENCED_PARAMETER(hw); 424 425 dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX; 426 dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX; 427 bwgp = (u8)(100 / TXGBE_DCB_TC_MAX); 428 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) { 429 tc = &dcb_config->tc_config[i]; 430 tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i; 431 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1); 432 tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i; 433 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1); 434 tc->pfc = txgbe_dcb_pfc_disabled; 435 } 436 437 /* Initialize default user to priority mapping, UPx->TC0 */ 438 tc = &dcb_config->tc_config[0]; 439 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 440 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 441 for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) { 442 dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100; 443 dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100; 444 } 445 dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal; 446 dcb_config->pfc_mode_enable = false; 447 dcb_config->vt_mode = true; 448 dcb_config->round_robin_enable = false; 449 /* support all DCB capabilities */ 450 dcb_config->support.capabilities = 0xFF; 451 } 452 453 /* 454 * Ensure that all locks are released before first NVM or PHY access 455 */ 456 static void 457 txgbe_swfw_lock_reset(struct txgbe_hw *hw) 458 { 459 uint16_t mask; 460 461 /* 462 * These ones are more tricky since they are common to all ports; but 463 * swfw_sync retries last long enough (1s) to be almost sure that if 464 * lock can not be taken it is due to an improper lock of the 465 * semaphore. 466 */ 467 mask = TXGBE_MNGSEM_SWPHY | 468 TXGBE_MNGSEM_SWMBX | 469 TXGBE_MNGSEM_SWFLASH; 470 if (hw->mac.acquire_swfw_sync(hw, mask) < 0) 471 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 472 473 hw->mac.release_swfw_sync(hw, mask); 474 } 475 476 static int 477 txgbe_handle_devarg(__rte_unused const char *key, const char *value, 478 void *extra_args) 479 { 480 uint16_t *n = extra_args; 481 482 if (value == NULL || extra_args == NULL) 483 return -EINVAL; 484 485 *n = (uint16_t)strtoul(value, NULL, 10); 486 if (*n == USHRT_MAX && errno == ERANGE) 487 return -1; 488 489 return 0; 490 } 491 492 static void 493 txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs) 494 { 495 struct rte_kvargs *kvlist; 496 u16 auto_neg = 1; 497 u16 poll = 0; 498 u16 present = 0; 499 u16 sgmii = 0; 500 u16 ffe_set = 0; 501 u16 ffe_main = 27; 502 u16 ffe_pre = 8; 503 u16 ffe_post = 44; 504 505 if (devargs == NULL) 506 goto null; 507 508 kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments); 509 if (kvlist == NULL) 510 goto null; 511 512 rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO, 513 &txgbe_handle_devarg, &auto_neg); 514 rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL, 515 &txgbe_handle_devarg, &poll); 516 rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT, 517 &txgbe_handle_devarg, &present); 518 rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII, 519 &txgbe_handle_devarg, &sgmii); 520 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET, 521 &txgbe_handle_devarg, &ffe_set); 522 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN, 523 &txgbe_handle_devarg, &ffe_main); 524 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE, 525 &txgbe_handle_devarg, &ffe_pre); 526 rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST, 527 &txgbe_handle_devarg, &ffe_post); 528 rte_kvargs_free(kvlist); 529 530 null: 531 hw->devarg.auto_neg = auto_neg; 532 hw->devarg.poll = poll; 533 hw->devarg.present = present; 534 hw->devarg.sgmii = sgmii; 535 hw->phy.ffe_set = ffe_set; 536 hw->phy.ffe_main = ffe_main; 537 hw->phy.ffe_pre = ffe_pre; 538 hw->phy.ffe_post = ffe_post; 539 } 540 541 static int 542 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 543 { 544 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 545 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 546 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); 547 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev); 548 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev); 549 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); 550 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev); 551 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 552 const struct rte_memzone *mz; 553 uint32_t ctrl_ext; 554 uint16_t csum; 555 int err, i, ret; 556 557 PMD_INIT_FUNC_TRACE(); 558 559 eth_dev->dev_ops = &txgbe_eth_dev_ops; 560 eth_dev->rx_queue_count = txgbe_dev_rx_queue_count; 561 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status; 562 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status; 563 eth_dev->rx_pkt_burst = &txgbe_recv_pkts; 564 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts; 565 eth_dev->tx_pkt_prepare = &txgbe_prep_pkts; 566 567 /* 568 * For secondary processes, we don't initialise any further as primary 569 * has already done this work. Only check we don't need a different 570 * RX and TX function. 571 */ 572 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 573 struct txgbe_tx_queue *txq; 574 /* TX queue function in primary, set by last queue initialized 575 * Tx queue may not initialized by primary process 576 */ 577 if (eth_dev->data->tx_queues) { 578 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 579 txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 580 txgbe_set_tx_function(eth_dev, txq); 581 } else { 582 /* Use default TX function if we get here */ 583 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 584 "Using default TX function."); 585 } 586 587 txgbe_set_rx_function(eth_dev); 588 589 return 0; 590 } 591 592 rte_eth_copy_pci_info(eth_dev, pci_dev); 593 594 /* Vendor and Device ID need to be set before init of shared code */ 595 hw->device_id = pci_dev->id.device_id; 596 hw->vendor_id = pci_dev->id.vendor_id; 597 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 598 hw->allow_unsupported_sfp = 1; 599 600 /* Reserve memory for interrupt status block */ 601 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1, 602 16, TXGBE_ALIGN, SOCKET_ID_ANY); 603 if (mz == NULL) 604 return -ENOMEM; 605 606 hw->isb_dma = TMZ_PADDR(mz); 607 hw->isb_mem = TMZ_VADDR(mz); 608 609 txgbe_parse_devargs(hw, pci_dev->device.devargs); 610 /* Initialize the shared code (base driver) */ 611 err = txgbe_init_shared_code(hw); 612 if (err != 0) { 613 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err); 614 return -EIO; 615 } 616 617 /* Unlock any pending hardware semaphore */ 618 txgbe_swfw_lock_reset(hw); 619 620 #ifdef RTE_LIB_SECURITY 621 /* Initialize security_ctx only for primary process*/ 622 if (txgbe_ipsec_ctx_create(eth_dev)) 623 return -ENOMEM; 624 #endif 625 626 /* Initialize DCB configuration*/ 627 memset(dcb_config, 0, sizeof(struct txgbe_dcb_config)); 628 txgbe_dcb_init(hw, dcb_config); 629 630 /* Get Hardware Flow Control setting */ 631 hw->fc.requested_mode = txgbe_fc_full; 632 hw->fc.current_mode = txgbe_fc_full; 633 hw->fc.pause_time = TXGBE_FC_PAUSE_TIME; 634 for (i = 0; i < TXGBE_DCB_TC_MAX; i++) { 635 hw->fc.low_water[i] = TXGBE_FC_XON_LOTH; 636 hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH; 637 } 638 hw->fc.send_xon = 1; 639 640 err = hw->rom.init_params(hw); 641 if (err != 0) { 642 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err); 643 return -EIO; 644 } 645 646 /* Make sure we have a good EEPROM before we read from it */ 647 err = hw->rom.validate_checksum(hw, &csum); 648 if (err != 0) { 649 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err); 650 return -EIO; 651 } 652 653 err = hw->mac.init_hw(hw); 654 655 /* 656 * Devices with copper phys will fail to initialise if txgbe_init_hw() 657 * is called too soon after the kernel driver unbinding/binding occurs. 658 * The failure occurs in txgbe_identify_phy() for all devices, 659 * but for non-copper devies, txgbe_identify_sfp_module() is 660 * also called. See txgbe_identify_phy(). The reason for the 661 * failure is not known, and only occuts when virtualisation features 662 * are disabled in the bios. A delay of 200ms was found to be enough by 663 * trial-and-error, and is doubled to be safe. 664 */ 665 if (err && hw->phy.media_type == txgbe_media_type_copper) { 666 rte_delay_ms(200); 667 err = hw->mac.init_hw(hw); 668 } 669 670 if (err == TXGBE_ERR_SFP_NOT_PRESENT) 671 err = 0; 672 673 if (err == TXGBE_ERR_EEPROM_VERSION) { 674 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 675 "LOM. Please be aware there may be issues associated " 676 "with your hardware."); 677 PMD_INIT_LOG(ERR, "If you are experiencing problems " 678 "please contact your hardware representative " 679 "who provided you with this hardware."); 680 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) { 681 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 682 } 683 if (err) { 684 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err); 685 return -EIO; 686 } 687 688 /* Reset the hw statistics */ 689 txgbe_dev_stats_reset(eth_dev); 690 691 /* disable interrupt */ 692 txgbe_disable_intr(hw); 693 694 /* Allocate memory for storing MAC addresses */ 695 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN * 696 hw->mac.num_rar_entries, 0); 697 if (eth_dev->data->mac_addrs == NULL) { 698 PMD_INIT_LOG(ERR, 699 "Failed to allocate %u bytes needed to store " 700 "MAC addresses", 701 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 702 return -ENOMEM; 703 } 704 705 /* Copy the permanent MAC address */ 706 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 707 ð_dev->data->mac_addrs[0]); 708 709 /* Allocate memory for storing hash filter MAC addresses */ 710 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe", 711 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0); 712 if (eth_dev->data->hash_mac_addrs == NULL) { 713 PMD_INIT_LOG(ERR, 714 "Failed to allocate %d bytes needed to store MAC addresses", 715 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC); 716 return -ENOMEM; 717 } 718 719 /* initialize the vfta */ 720 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 721 722 /* initialize the hw strip bitmap*/ 723 memset(hwstrip, 0, sizeof(*hwstrip)); 724 725 /* initialize PF if max_vfs not zero */ 726 ret = txgbe_pf_host_init(eth_dev); 727 if (ret) { 728 rte_free(eth_dev->data->mac_addrs); 729 eth_dev->data->mac_addrs = NULL; 730 rte_free(eth_dev->data->hash_mac_addrs); 731 eth_dev->data->hash_mac_addrs = NULL; 732 return ret; 733 } 734 735 ctrl_ext = rd32(hw, TXGBE_PORTCTL); 736 /* let hardware know driver is loaded */ 737 ctrl_ext |= TXGBE_PORTCTL_DRVLOAD; 738 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 739 ctrl_ext |= TXGBE_PORTCTL_RSTDONE; 740 wr32(hw, TXGBE_PORTCTL, ctrl_ext); 741 txgbe_flush(hw); 742 743 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present) 744 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 745 (int)hw->mac.type, (int)hw->phy.type, 746 (int)hw->phy.sfp_type); 747 else 748 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 749 (int)hw->mac.type, (int)hw->phy.type); 750 751 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 752 eth_dev->data->port_id, pci_dev->id.vendor_id, 753 pci_dev->id.device_id); 754 755 rte_intr_callback_register(intr_handle, 756 txgbe_dev_interrupt_handler, eth_dev); 757 758 /* enable uio/vfio intr/eventfd mapping */ 759 rte_intr_enable(intr_handle); 760 761 /* enable support intr */ 762 txgbe_enable_intr(eth_dev); 763 764 /* initialize filter info */ 765 memset(filter_info, 0, 766 sizeof(struct txgbe_filter_info)); 767 768 /* initialize 5tuple filter list */ 769 TAILQ_INIT(&filter_info->fivetuple_list); 770 771 /* initialize flow director filter list & hash */ 772 txgbe_fdir_filter_init(eth_dev); 773 774 /* initialize l2 tunnel filter list & hash */ 775 txgbe_l2_tn_filter_init(eth_dev); 776 777 /* initialize flow filter lists */ 778 txgbe_filterlist_init(); 779 780 /* initialize bandwidth configuration info */ 781 memset(bw_conf, 0, sizeof(struct txgbe_bw_conf)); 782 783 /* initialize Traffic Manager configuration */ 784 txgbe_tm_conf_init(eth_dev); 785 786 return 0; 787 } 788 789 static int 790 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev) 791 { 792 PMD_INIT_FUNC_TRACE(); 793 794 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 795 return 0; 796 797 txgbe_dev_close(eth_dev); 798 799 return 0; 800 } 801 802 static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 803 { 804 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev); 805 struct txgbe_5tuple_filter *p_5tuple; 806 807 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 808 TAILQ_REMOVE(&filter_info->fivetuple_list, 809 p_5tuple, 810 entries); 811 rte_free(p_5tuple); 812 } 813 memset(filter_info->fivetuple_mask, 0, 814 sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE); 815 816 return 0; 817 } 818 819 static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 820 { 821 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); 822 struct txgbe_fdir_filter *fdir_filter; 823 824 rte_free(fdir_info->hash_map); 825 rte_hash_free(fdir_info->hash_handle); 826 827 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 828 TAILQ_REMOVE(&fdir_info->fdir_list, 829 fdir_filter, 830 entries); 831 rte_free(fdir_filter); 832 } 833 834 return 0; 835 } 836 837 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 838 { 839 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); 840 struct txgbe_l2_tn_filter *l2_tn_filter; 841 842 rte_free(l2_tn_info->hash_map); 843 rte_hash_free(l2_tn_info->hash_handle); 844 845 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 846 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 847 l2_tn_filter, 848 entries); 849 rte_free(l2_tn_filter); 850 } 851 852 return 0; 853 } 854 855 static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 856 { 857 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev); 858 char fdir_hash_name[RTE_HASH_NAMESIZE]; 859 struct rte_hash_parameters fdir_hash_params = { 860 .name = fdir_hash_name, 861 .entries = TXGBE_MAX_FDIR_FILTER_NUM, 862 .key_len = sizeof(struct txgbe_atr_input), 863 .hash_func = rte_hash_crc, 864 .hash_func_init_val = 0, 865 .socket_id = rte_socket_id(), 866 }; 867 868 TAILQ_INIT(&fdir_info->fdir_list); 869 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 870 "fdir_%s", TDEV_NAME(eth_dev)); 871 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 872 if (!fdir_info->hash_handle) { 873 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 874 return -EINVAL; 875 } 876 fdir_info->hash_map = rte_zmalloc("txgbe", 877 sizeof(struct txgbe_fdir_filter *) * 878 TXGBE_MAX_FDIR_FILTER_NUM, 879 0); 880 if (!fdir_info->hash_map) { 881 PMD_INIT_LOG(ERR, 882 "Failed to allocate memory for fdir hash map!"); 883 return -ENOMEM; 884 } 885 fdir_info->mask_added = FALSE; 886 887 return 0; 888 } 889 890 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 891 { 892 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev); 893 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 894 struct rte_hash_parameters l2_tn_hash_params = { 895 .name = l2_tn_hash_name, 896 .entries = TXGBE_MAX_L2_TN_FILTER_NUM, 897 .key_len = sizeof(struct txgbe_l2_tn_key), 898 .hash_func = rte_hash_crc, 899 .hash_func_init_val = 0, 900 .socket_id = rte_socket_id(), 901 }; 902 903 TAILQ_INIT(&l2_tn_info->l2_tn_list); 904 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 905 "l2_tn_%s", TDEV_NAME(eth_dev)); 906 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 907 if (!l2_tn_info->hash_handle) { 908 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 909 return -EINVAL; 910 } 911 l2_tn_info->hash_map = rte_zmalloc("txgbe", 912 sizeof(struct txgbe_l2_tn_filter *) * 913 TXGBE_MAX_L2_TN_FILTER_NUM, 914 0); 915 if (!l2_tn_info->hash_map) { 916 PMD_INIT_LOG(ERR, 917 "Failed to allocate memory for L2 TN hash map!"); 918 return -ENOMEM; 919 } 920 l2_tn_info->e_tag_en = FALSE; 921 l2_tn_info->e_tag_fwd_en = FALSE; 922 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 923 924 return 0; 925 } 926 927 static int 928 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 929 struct rte_pci_device *pci_dev) 930 { 931 return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 932 sizeof(struct txgbe_adapter), 933 eth_dev_pci_specific_init, pci_dev, 934 eth_txgbe_dev_init, NULL); 935 } 936 937 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) 938 { 939 struct rte_eth_dev *ethdev; 940 941 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 942 if (!ethdev) 943 return 0; 944 945 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit); 946 } 947 948 static struct rte_pci_driver rte_txgbe_pmd = { 949 .id_table = pci_id_txgbe_map, 950 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 951 RTE_PCI_DRV_INTR_LSC, 952 .probe = eth_txgbe_pci_probe, 953 .remove = eth_txgbe_pci_remove, 954 }; 955 956 static int 957 txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 958 { 959 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 960 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev); 961 uint32_t vfta; 962 uint32_t vid_idx; 963 uint32_t vid_bit; 964 965 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 966 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 967 vfta = rd32(hw, TXGBE_VLANTBL(vid_idx)); 968 if (on) 969 vfta |= vid_bit; 970 else 971 vfta &= ~vid_bit; 972 wr32(hw, TXGBE_VLANTBL(vid_idx), vfta); 973 974 /* update local VFTA copy */ 975 shadow_vfta->vfta[vid_idx] = vfta; 976 977 return 0; 978 } 979 980 static void 981 txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 982 { 983 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 984 struct txgbe_rx_queue *rxq; 985 bool restart; 986 uint32_t rxcfg, rxbal, rxbah; 987 988 if (on) 989 txgbe_vlan_hw_strip_enable(dev, queue); 990 else 991 txgbe_vlan_hw_strip_disable(dev, queue); 992 993 rxq = dev->data->rx_queues[queue]; 994 rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx)); 995 rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx)); 996 rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); 997 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 998 restart = (rxcfg & TXGBE_RXCFG_ENA) && 999 !(rxcfg & TXGBE_RXCFG_VLAN); 1000 rxcfg |= TXGBE_RXCFG_VLAN; 1001 } else { 1002 restart = (rxcfg & TXGBE_RXCFG_ENA) && 1003 (rxcfg & TXGBE_RXCFG_VLAN); 1004 rxcfg &= ~TXGBE_RXCFG_VLAN; 1005 } 1006 rxcfg &= ~TXGBE_RXCFG_ENA; 1007 1008 if (restart) { 1009 /* set vlan strip for ring */ 1010 txgbe_dev_rx_queue_stop(dev, queue); 1011 wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal); 1012 wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah); 1013 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg); 1014 txgbe_dev_rx_queue_start(dev, queue); 1015 } 1016 } 1017 1018 static int 1019 txgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1020 enum rte_vlan_type vlan_type, 1021 uint16_t tpid) 1022 { 1023 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1024 int ret = 0; 1025 uint32_t portctrl, vlan_ext, qinq; 1026 1027 portctrl = rd32(hw, TXGBE_PORTCTL); 1028 1029 vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT); 1030 qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ); 1031 switch (vlan_type) { 1032 case RTE_ETH_VLAN_TYPE_INNER: 1033 if (vlan_ext) { 1034 wr32m(hw, TXGBE_VLANCTL, 1035 TXGBE_VLANCTL_TPID_MASK, 1036 TXGBE_VLANCTL_TPID(tpid)); 1037 wr32m(hw, TXGBE_DMATXCTRL, 1038 TXGBE_DMATXCTRL_TPID_MASK, 1039 TXGBE_DMATXCTRL_TPID(tpid)); 1040 } else { 1041 ret = -ENOTSUP; 1042 PMD_DRV_LOG(ERR, "Inner type is not supported" 1043 " by single VLAN"); 1044 } 1045 1046 if (qinq) { 1047 wr32m(hw, TXGBE_TAGTPID(0), 1048 TXGBE_TAGTPID_LSB_MASK, 1049 TXGBE_TAGTPID_LSB(tpid)); 1050 } 1051 break; 1052 case RTE_ETH_VLAN_TYPE_OUTER: 1053 if (vlan_ext) { 1054 /* Only the high 16-bits is valid */ 1055 wr32m(hw, TXGBE_EXTAG, 1056 TXGBE_EXTAG_VLAN_MASK, 1057 TXGBE_EXTAG_VLAN(tpid)); 1058 } else { 1059 wr32m(hw, TXGBE_VLANCTL, 1060 TXGBE_VLANCTL_TPID_MASK, 1061 TXGBE_VLANCTL_TPID(tpid)); 1062 wr32m(hw, TXGBE_DMATXCTRL, 1063 TXGBE_DMATXCTRL_TPID_MASK, 1064 TXGBE_DMATXCTRL_TPID(tpid)); 1065 } 1066 1067 if (qinq) { 1068 wr32m(hw, TXGBE_TAGTPID(0), 1069 TXGBE_TAGTPID_MSB_MASK, 1070 TXGBE_TAGTPID_MSB(tpid)); 1071 } 1072 break; 1073 default: 1074 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1075 return -EINVAL; 1076 } 1077 1078 return ret; 1079 } 1080 1081 void 1082 txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1083 { 1084 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1085 uint32_t vlnctrl; 1086 1087 PMD_INIT_FUNC_TRACE(); 1088 1089 /* Filter Table Disable */ 1090 vlnctrl = rd32(hw, TXGBE_VLANCTL); 1091 vlnctrl &= ~TXGBE_VLANCTL_VFE; 1092 wr32(hw, TXGBE_VLANCTL, vlnctrl); 1093 } 1094 1095 void 1096 txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1097 { 1098 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1099 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev); 1100 uint32_t vlnctrl; 1101 uint16_t i; 1102 1103 PMD_INIT_FUNC_TRACE(); 1104 1105 /* Filter Table Enable */ 1106 vlnctrl = rd32(hw, TXGBE_VLANCTL); 1107 vlnctrl &= ~TXGBE_VLANCTL_CFIENA; 1108 vlnctrl |= TXGBE_VLANCTL_VFE; 1109 wr32(hw, TXGBE_VLANCTL, vlnctrl); 1110 1111 /* write whatever is in local vfta copy */ 1112 for (i = 0; i < TXGBE_VFTA_SIZE; i++) 1113 wr32(hw, TXGBE_VLANTBL(i), shadow_vfta->vfta[i]); 1114 } 1115 1116 void 1117 txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1118 { 1119 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(dev); 1120 struct txgbe_rx_queue *rxq; 1121 1122 if (queue >= TXGBE_MAX_RX_QUEUE_NUM) 1123 return; 1124 1125 if (on) 1126 TXGBE_SET_HWSTRIP(hwstrip, queue); 1127 else 1128 TXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1129 1130 if (queue >= dev->data->nb_rx_queues) 1131 return; 1132 1133 rxq = dev->data->rx_queues[queue]; 1134 1135 if (on) { 1136 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1137 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1138 } else { 1139 rxq->vlan_flags = RTE_MBUF_F_RX_VLAN; 1140 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1141 } 1142 } 1143 1144 static void 1145 txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1146 { 1147 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1148 uint32_t ctrl; 1149 1150 PMD_INIT_FUNC_TRACE(); 1151 1152 ctrl = rd32(hw, TXGBE_RXCFG(queue)); 1153 ctrl &= ~TXGBE_RXCFG_VLAN; 1154 wr32(hw, TXGBE_RXCFG(queue), ctrl); 1155 1156 /* record those setting for HW strip per queue */ 1157 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1158 } 1159 1160 static void 1161 txgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 1162 { 1163 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1164 uint32_t ctrl; 1165 1166 PMD_INIT_FUNC_TRACE(); 1167 1168 ctrl = rd32(hw, TXGBE_RXCFG(queue)); 1169 ctrl |= TXGBE_RXCFG_VLAN; 1170 wr32(hw, TXGBE_RXCFG(queue), ctrl); 1171 1172 /* record those setting for HW strip per queue */ 1173 txgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 1174 } 1175 1176 static void 1177 txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 1178 { 1179 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1180 uint32_t ctrl; 1181 1182 PMD_INIT_FUNC_TRACE(); 1183 1184 ctrl = rd32(hw, TXGBE_PORTCTL); 1185 ctrl &= ~TXGBE_PORTCTL_VLANEXT; 1186 wr32(hw, TXGBE_PORTCTL, ctrl); 1187 } 1188 1189 static void 1190 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 1191 { 1192 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1193 uint32_t ctrl; 1194 1195 PMD_INIT_FUNC_TRACE(); 1196 1197 ctrl = rd32(hw, TXGBE_PORTCTL); 1198 ctrl |= TXGBE_PORTCTL_VLANEXT; 1199 wr32(hw, TXGBE_PORTCTL, ctrl); 1200 } 1201 1202 static void 1203 txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev) 1204 { 1205 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1206 uint32_t ctrl; 1207 1208 PMD_INIT_FUNC_TRACE(); 1209 1210 ctrl = rd32(hw, TXGBE_PORTCTL); 1211 ctrl &= ~TXGBE_PORTCTL_QINQ; 1212 wr32(hw, TXGBE_PORTCTL, ctrl); 1213 } 1214 1215 static void 1216 txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev) 1217 { 1218 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1219 uint32_t ctrl; 1220 1221 PMD_INIT_FUNC_TRACE(); 1222 1223 ctrl = rd32(hw, TXGBE_PORTCTL); 1224 ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT; 1225 wr32(hw, TXGBE_PORTCTL, ctrl); 1226 } 1227 1228 void 1229 txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 1230 { 1231 struct txgbe_rx_queue *rxq; 1232 uint16_t i; 1233 1234 PMD_INIT_FUNC_TRACE(); 1235 1236 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1237 rxq = dev->data->rx_queues[i]; 1238 1239 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1240 txgbe_vlan_strip_queue_set(dev, i, 1); 1241 else 1242 txgbe_vlan_strip_queue_set(dev, i, 0); 1243 } 1244 } 1245 1246 void 1247 txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 1248 { 1249 uint16_t i; 1250 struct rte_eth_rxmode *rxmode; 1251 struct txgbe_rx_queue *rxq; 1252 1253 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1254 rxmode = &dev->data->dev_conf.rxmode; 1255 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1256 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1257 rxq = dev->data->rx_queues[i]; 1258 rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1259 } 1260 else 1261 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1262 rxq = dev->data->rx_queues[i]; 1263 rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 1264 } 1265 } 1266 } 1267 1268 static int 1269 txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 1270 { 1271 struct rte_eth_rxmode *rxmode; 1272 rxmode = &dev->data->dev_conf.rxmode; 1273 1274 if (mask & RTE_ETH_VLAN_STRIP_MASK) 1275 txgbe_vlan_hw_strip_config(dev); 1276 1277 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1278 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1279 txgbe_vlan_hw_filter_enable(dev); 1280 else 1281 txgbe_vlan_hw_filter_disable(dev); 1282 } 1283 1284 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 1285 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 1286 txgbe_vlan_hw_extend_enable(dev); 1287 else 1288 txgbe_vlan_hw_extend_disable(dev); 1289 } 1290 1291 if (mask & RTE_ETH_QINQ_STRIP_MASK) { 1292 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) 1293 txgbe_qinq_hw_strip_enable(dev); 1294 else 1295 txgbe_qinq_hw_strip_disable(dev); 1296 } 1297 1298 return 0; 1299 } 1300 1301 static int 1302 txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1303 { 1304 txgbe_config_vlan_strip_on_all_queues(dev, mask); 1305 1306 txgbe_vlan_offload_config(dev, mask); 1307 1308 return 0; 1309 } 1310 1311 static void 1312 txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1313 { 1314 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1315 /* VLNCTL: enable vlan filtering and allow all vlan tags through */ 1316 uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL); 1317 1318 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */ 1319 wr32(hw, TXGBE_VLANCTL, vlanctrl); 1320 } 1321 1322 static int 1323 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 1324 { 1325 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1326 1327 switch (nb_rx_q) { 1328 case 1: 1329 case 2: 1330 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS; 1331 break; 1332 case 4: 1333 RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS; 1334 break; 1335 default: 1336 return -EINVAL; 1337 } 1338 1339 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1340 TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 1341 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 1342 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 1343 return 0; 1344 } 1345 1346 static int 1347 txgbe_check_mq_mode(struct rte_eth_dev *dev) 1348 { 1349 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1350 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1351 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1352 1353 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1354 /* check multi-queue mode */ 1355 switch (dev_conf->rxmode.mq_mode) { 1356 case RTE_ETH_MQ_RX_VMDQ_DCB: 1357 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 1358 break; 1359 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS: 1360 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 1361 PMD_INIT_LOG(ERR, "SRIOV active," 1362 " unsupported mq_mode rx %d.", 1363 dev_conf->rxmode.mq_mode); 1364 return -EINVAL; 1365 case RTE_ETH_MQ_RX_RSS: 1366 case RTE_ETH_MQ_RX_VMDQ_RSS: 1367 dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS; 1368 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 1369 if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 1370 PMD_INIT_LOG(ERR, "SRIOV is active," 1371 " invalid queue number" 1372 " for VMDQ RSS, allowed" 1373 " value are 1, 2 or 4."); 1374 return -EINVAL; 1375 } 1376 break; 1377 case RTE_ETH_MQ_RX_VMDQ_ONLY: 1378 case RTE_ETH_MQ_RX_NONE: 1379 /* if nothing mq mode configure, use default scheme */ 1380 dev->data->dev_conf.rxmode.mq_mode = 1381 RTE_ETH_MQ_RX_VMDQ_ONLY; 1382 break; 1383 default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/ 1384 /* SRIOV only works in VMDq enable mode */ 1385 PMD_INIT_LOG(ERR, "SRIOV is active," 1386 " wrong mq_mode rx %d.", 1387 dev_conf->rxmode.mq_mode); 1388 return -EINVAL; 1389 } 1390 1391 switch (dev_conf->txmode.mq_mode) { 1392 case RTE_ETH_MQ_TX_VMDQ_DCB: 1393 PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 1394 dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; 1395 break; 1396 default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */ 1397 dev->data->dev_conf.txmode.mq_mode = 1398 RTE_ETH_MQ_TX_VMDQ_ONLY; 1399 break; 1400 } 1401 1402 /* check valid queue number */ 1403 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 1404 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 1405 PMD_INIT_LOG(ERR, "SRIOV is active," 1406 " nb_rx_q=%d nb_tx_q=%d queue number" 1407 " must be less than or equal to %d.", 1408 nb_rx_q, nb_tx_q, 1409 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 1410 return -EINVAL; 1411 } 1412 } else { 1413 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) { 1414 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 1415 " not supported."); 1416 return -EINVAL; 1417 } 1418 /* check configuration for vmdb+dcb mode */ 1419 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { 1420 const struct rte_eth_vmdq_dcb_conf *conf; 1421 1422 if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { 1423 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 1424 TXGBE_VMDQ_DCB_NB_QUEUES); 1425 return -EINVAL; 1426 } 1427 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 1428 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 1429 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 1430 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 1431 " nb_queue_pools must be %d or %d.", 1432 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 1433 return -EINVAL; 1434 } 1435 } 1436 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) { 1437 const struct rte_eth_vmdq_dcb_tx_conf *conf; 1438 1439 if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { 1440 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 1441 TXGBE_VMDQ_DCB_NB_QUEUES); 1442 return -EINVAL; 1443 } 1444 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 1445 if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS || 1446 conf->nb_queue_pools == RTE_ETH_32_POOLS)) { 1447 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 1448 " nb_queue_pools != %d and" 1449 " nb_queue_pools != %d.", 1450 RTE_ETH_16_POOLS, RTE_ETH_32_POOLS); 1451 return -EINVAL; 1452 } 1453 } 1454 1455 /* For DCB mode check our configuration before we go further */ 1456 if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) { 1457 const struct rte_eth_dcb_rx_conf *conf; 1458 1459 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 1460 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 1461 conf->nb_tcs == RTE_ETH_8_TCS)) { 1462 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 1463 " and nb_tcs != %d.", 1464 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 1465 return -EINVAL; 1466 } 1467 } 1468 1469 if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) { 1470 const struct rte_eth_dcb_tx_conf *conf; 1471 1472 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 1473 if (!(conf->nb_tcs == RTE_ETH_4_TCS || 1474 conf->nb_tcs == RTE_ETH_8_TCS)) { 1475 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 1476 " and nb_tcs != %d.", 1477 RTE_ETH_4_TCS, RTE_ETH_8_TCS); 1478 return -EINVAL; 1479 } 1480 } 1481 } 1482 return 0; 1483 } 1484 1485 static int 1486 txgbe_dev_configure(struct rte_eth_dev *dev) 1487 { 1488 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 1489 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 1490 int ret; 1491 1492 PMD_INIT_FUNC_TRACE(); 1493 1494 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1495 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1496 1497 /* multiple queue mode checking */ 1498 ret = txgbe_check_mq_mode(dev); 1499 if (ret != 0) { 1500 PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.", 1501 ret); 1502 return ret; 1503 } 1504 1505 /* set flag to update link status after init */ 1506 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; 1507 1508 /* 1509 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 1510 * allocation Rx preconditions we will reset it. 1511 */ 1512 adapter->rx_bulk_alloc_allowed = true; 1513 1514 return 0; 1515 } 1516 1517 static void 1518 txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 1519 { 1520 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1521 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 1522 uint32_t gpie; 1523 1524 gpie = rd32(hw, TXGBE_GPIOINTEN); 1525 gpie |= TXGBE_GPIOBIT_6; 1526 wr32(hw, TXGBE_GPIOINTEN, gpie); 1527 intr->mask_misc |= TXGBE_ICRMISC_GPIO; 1528 intr->mask_misc |= TXGBE_ICRMISC_ANDONE; 1529 } 1530 1531 int 1532 txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 1533 uint16_t tx_rate, uint64_t q_msk) 1534 { 1535 struct txgbe_hw *hw; 1536 struct txgbe_vf_info *vfinfo; 1537 struct rte_eth_link link; 1538 uint8_t nb_q_per_pool; 1539 uint32_t queue_stride; 1540 uint32_t queue_idx, idx = 0, vf_idx; 1541 uint32_t queue_end; 1542 uint16_t total_rate = 0; 1543 struct rte_pci_device *pci_dev; 1544 int ret; 1545 1546 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1547 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 1548 if (ret < 0) 1549 return ret; 1550 1551 if (vf >= pci_dev->max_vfs) 1552 return -EINVAL; 1553 1554 if (tx_rate > link.link_speed) 1555 return -EINVAL; 1556 1557 if (q_msk == 0) 1558 return 0; 1559 1560 hw = TXGBE_DEV_HW(dev); 1561 vfinfo = *(TXGBE_DEV_VFDATA(dev)); 1562 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 1563 queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 1564 queue_idx = vf * queue_stride; 1565 queue_end = queue_idx + nb_q_per_pool - 1; 1566 if (queue_end >= hw->mac.max_tx_queues) 1567 return -EINVAL; 1568 1569 if (vfinfo) { 1570 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 1571 if (vf_idx == vf) 1572 continue; 1573 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 1574 idx++) 1575 total_rate += vfinfo[vf_idx].tx_rate[idx]; 1576 } 1577 } else { 1578 return -EINVAL; 1579 } 1580 1581 /* Store tx_rate for this vf. */ 1582 for (idx = 0; idx < nb_q_per_pool; idx++) { 1583 if (((uint64_t)0x1 << idx) & q_msk) { 1584 if (vfinfo[vf].tx_rate[idx] != tx_rate) 1585 vfinfo[vf].tx_rate[idx] = tx_rate; 1586 total_rate += tx_rate; 1587 } 1588 } 1589 1590 if (total_rate > dev->data->dev_link.link_speed) { 1591 /* Reset stored TX rate of the VF if it causes exceed 1592 * link speed. 1593 */ 1594 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 1595 return -EINVAL; 1596 } 1597 1598 /* Set ARBTXRATE of each queue/pool for vf X */ 1599 for (; queue_idx <= queue_end; queue_idx++) { 1600 if (0x1 & q_msk) 1601 txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 1602 q_msk = q_msk >> 1; 1603 } 1604 1605 return 0; 1606 } 1607 1608 /* 1609 * Configure device link speed and setup link. 1610 * It returns 0 on success. 1611 */ 1612 static int 1613 txgbe_dev_start(struct rte_eth_dev *dev) 1614 { 1615 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1616 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 1617 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); 1618 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1619 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1620 uint32_t intr_vector = 0; 1621 int err; 1622 bool link_up = false, negotiate = 0; 1623 uint32_t speed = 0; 1624 uint32_t allowed_speeds = 0; 1625 int mask = 0; 1626 int status; 1627 uint16_t vf, idx; 1628 uint32_t *link_speeds; 1629 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 1630 1631 PMD_INIT_FUNC_TRACE(); 1632 1633 /* Stop the link setup handler before resetting the HW. */ 1634 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); 1635 1636 /* disable uio/vfio intr/eventfd mapping */ 1637 rte_intr_disable(intr_handle); 1638 1639 /* stop adapter */ 1640 hw->adapter_stopped = 0; 1641 txgbe_stop_hw(hw); 1642 1643 /* reinitialize adapter 1644 * this calls reset and start 1645 */ 1646 hw->nb_rx_queues = dev->data->nb_rx_queues; 1647 hw->nb_tx_queues = dev->data->nb_tx_queues; 1648 status = txgbe_pf_reset_hw(hw); 1649 if (status != 0) 1650 return -1; 1651 hw->mac.start_hw(hw); 1652 hw->mac.get_link_status = true; 1653 hw->dev_start = true; 1654 1655 /* configure PF module if SRIOV enabled */ 1656 txgbe_pf_host_configure(dev); 1657 1658 txgbe_dev_phy_intr_setup(dev); 1659 1660 /* check and configure queue intr-vector mapping */ 1661 if ((rte_intr_cap_multiple(intr_handle) || 1662 !RTE_ETH_DEV_SRIOV(dev).active) && 1663 dev->data->dev_conf.intr_conf.rxq != 0) { 1664 intr_vector = dev->data->nb_rx_queues; 1665 if (rte_intr_efd_enable(intr_handle, intr_vector)) 1666 return -1; 1667 } 1668 1669 if (rte_intr_dp_is_en(intr_handle)) { 1670 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 1671 dev->data->nb_rx_queues)) { 1672 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 1673 " intr_vec", dev->data->nb_rx_queues); 1674 return -ENOMEM; 1675 } 1676 } 1677 /* configure msix for sleep until rx interrupt */ 1678 txgbe_configure_msix(dev); 1679 1680 /* initialize transmission unit */ 1681 txgbe_dev_tx_init(dev); 1682 1683 /* This can fail when allocating mbufs for descriptor rings */ 1684 err = txgbe_dev_rx_init(dev); 1685 if (err) { 1686 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 1687 goto error; 1688 } 1689 1690 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 1691 RTE_ETH_VLAN_EXTEND_MASK; 1692 err = txgbe_vlan_offload_config(dev, mask); 1693 if (err) { 1694 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 1695 goto error; 1696 } 1697 1698 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) { 1699 /* Enable vlan filtering for VMDq */ 1700 txgbe_vmdq_vlan_hw_filter_enable(dev); 1701 } 1702 1703 /* Configure DCB hw */ 1704 txgbe_configure_pb(dev); 1705 txgbe_configure_port(dev); 1706 txgbe_configure_dcb(dev); 1707 1708 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 1709 err = txgbe_fdir_configure(dev); 1710 if (err) 1711 goto error; 1712 } 1713 1714 /* Restore vf rate limit */ 1715 if (vfinfo != NULL) { 1716 for (vf = 0; vf < pci_dev->max_vfs; vf++) 1717 for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 1718 if (vfinfo[vf].tx_rate[idx] != 0) 1719 txgbe_set_vf_rate_limit(dev, vf, 1720 vfinfo[vf].tx_rate[idx], 1721 1 << idx); 1722 } 1723 1724 err = txgbe_dev_rxtx_start(dev); 1725 if (err < 0) { 1726 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 1727 goto error; 1728 } 1729 1730 /* Skip link setup if loopback mode is enabled. */ 1731 if (hw->mac.type == txgbe_mac_raptor && 1732 dev->data->dev_conf.lpbk_mode) 1733 goto skip_link_setup; 1734 1735 if (txgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 1736 err = hw->mac.setup_sfp(hw); 1737 if (err) 1738 goto error; 1739 } 1740 1741 if (hw->phy.media_type == txgbe_media_type_copper) { 1742 /* Turn on the copper */ 1743 hw->phy.set_phy_power(hw, true); 1744 } else { 1745 /* Turn on the laser */ 1746 hw->mac.enable_tx_laser(hw); 1747 } 1748 1749 if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4) 1750 err = hw->mac.check_link(hw, &speed, &link_up, 0); 1751 if (err) 1752 goto error; 1753 dev->data->dev_link.link_status = link_up; 1754 1755 err = hw->mac.get_link_capabilities(hw, &speed, &negotiate); 1756 if (err) 1757 goto error; 1758 1759 allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G | 1760 RTE_ETH_LINK_SPEED_10G; 1761 1762 link_speeds = &dev->data->dev_conf.link_speeds; 1763 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 1764 PMD_INIT_LOG(ERR, "Invalid link setting"); 1765 goto error; 1766 } 1767 1768 speed = 0x0; 1769 if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) { 1770 speed = (TXGBE_LINK_SPEED_100M_FULL | 1771 TXGBE_LINK_SPEED_1GB_FULL | 1772 TXGBE_LINK_SPEED_10GB_FULL); 1773 } else { 1774 if (*link_speeds & RTE_ETH_LINK_SPEED_10G) 1775 speed |= TXGBE_LINK_SPEED_10GB_FULL; 1776 if (*link_speeds & RTE_ETH_LINK_SPEED_5G) 1777 speed |= TXGBE_LINK_SPEED_5GB_FULL; 1778 if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G) 1779 speed |= TXGBE_LINK_SPEED_2_5GB_FULL; 1780 if (*link_speeds & RTE_ETH_LINK_SPEED_1G) 1781 speed |= TXGBE_LINK_SPEED_1GB_FULL; 1782 if (*link_speeds & RTE_ETH_LINK_SPEED_100M) 1783 speed |= TXGBE_LINK_SPEED_100M_FULL; 1784 } 1785 1786 err = hw->mac.setup_link(hw, speed, link_up); 1787 if (err) 1788 goto error; 1789 1790 skip_link_setup: 1791 1792 if (rte_intr_allow_others(intr_handle)) { 1793 txgbe_dev_misc_interrupt_setup(dev); 1794 /* check if lsc interrupt is enabled */ 1795 if (dev->data->dev_conf.intr_conf.lsc != 0) 1796 txgbe_dev_lsc_interrupt_setup(dev, TRUE); 1797 else 1798 txgbe_dev_lsc_interrupt_setup(dev, FALSE); 1799 txgbe_dev_macsec_interrupt_setup(dev); 1800 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID); 1801 } else { 1802 rte_intr_callback_unregister(intr_handle, 1803 txgbe_dev_interrupt_handler, dev); 1804 if (dev->data->dev_conf.intr_conf.lsc != 0) 1805 PMD_INIT_LOG(INFO, "lsc won't enable because of" 1806 " no intr multiplex"); 1807 } 1808 1809 /* check if rxq interrupt is enabled */ 1810 if (dev->data->dev_conf.intr_conf.rxq != 0 && 1811 rte_intr_dp_is_en(intr_handle)) 1812 txgbe_dev_rxq_interrupt_setup(dev); 1813 1814 /* enable uio/vfio intr/eventfd mapping */ 1815 rte_intr_enable(intr_handle); 1816 1817 /* resume enabled intr since hw reset */ 1818 txgbe_enable_intr(dev); 1819 txgbe_l2_tunnel_conf(dev); 1820 txgbe_filter_restore(dev); 1821 1822 if (tm_conf->root && !tm_conf->committed) 1823 PMD_DRV_LOG(WARNING, 1824 "please call hierarchy_commit() " 1825 "before starting the port"); 1826 1827 /* 1828 * Update link status right before return, because it may 1829 * start link configuration process in a separate thread. 1830 */ 1831 txgbe_dev_link_update(dev, 0); 1832 1833 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK); 1834 1835 txgbe_read_stats_registers(hw, hw_stats); 1836 hw->offset_loaded = 1; 1837 1838 return 0; 1839 1840 error: 1841 PMD_INIT_LOG(ERR, "failure in dev start: %d", err); 1842 txgbe_dev_clear_queues(dev); 1843 return -EIO; 1844 } 1845 1846 /* 1847 * Stop device: disable rx and tx functions to allow for reconfiguring. 1848 */ 1849 static int 1850 txgbe_dev_stop(struct rte_eth_dev *dev) 1851 { 1852 struct rte_eth_link link; 1853 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 1854 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1855 struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev); 1856 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1857 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1858 int vf; 1859 struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); 1860 1861 if (hw->adapter_stopped) 1862 return 0; 1863 1864 PMD_INIT_FUNC_TRACE(); 1865 1866 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); 1867 1868 /* disable interrupts */ 1869 txgbe_disable_intr(hw); 1870 1871 /* reset the NIC */ 1872 txgbe_pf_reset_hw(hw); 1873 hw->adapter_stopped = 0; 1874 1875 /* stop adapter */ 1876 txgbe_stop_hw(hw); 1877 1878 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 1879 vfinfo[vf].clear_to_send = false; 1880 1881 if (hw->phy.media_type == txgbe_media_type_copper) { 1882 /* Turn off the copper */ 1883 hw->phy.set_phy_power(hw, false); 1884 } else { 1885 /* Turn off the laser */ 1886 hw->mac.disable_tx_laser(hw); 1887 } 1888 1889 txgbe_dev_clear_queues(dev); 1890 1891 /* Clear stored conf */ 1892 dev->data->scattered_rx = 0; 1893 dev->data->lro = 0; 1894 1895 /* Clear recorded link status */ 1896 memset(&link, 0, sizeof(link)); 1897 rte_eth_linkstatus_set(dev, &link); 1898 1899 if (!rte_intr_allow_others(intr_handle)) 1900 /* resume to the default handler */ 1901 rte_intr_callback_register(intr_handle, 1902 txgbe_dev_interrupt_handler, 1903 (void *)dev); 1904 1905 /* Clean datapath event and queue/vec mapping */ 1906 rte_intr_efd_disable(intr_handle); 1907 rte_intr_vec_list_free(intr_handle); 1908 1909 /* reset hierarchy commit */ 1910 tm_conf->committed = false; 1911 1912 adapter->rss_reta_updated = 0; 1913 wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); 1914 1915 hw->adapter_stopped = true; 1916 dev->data->dev_started = 0; 1917 hw->dev_start = false; 1918 1919 return 0; 1920 } 1921 1922 /* 1923 * Set device link up: enable tx. 1924 */ 1925 static int 1926 txgbe_dev_set_link_up(struct rte_eth_dev *dev) 1927 { 1928 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1929 1930 if (hw->phy.media_type == txgbe_media_type_copper) { 1931 /* Turn on the copper */ 1932 hw->phy.set_phy_power(hw, true); 1933 } else { 1934 /* Turn on the laser */ 1935 hw->mac.enable_tx_laser(hw); 1936 hw->dev_start = true; 1937 txgbe_dev_link_update(dev, 0); 1938 } 1939 1940 return 0; 1941 } 1942 1943 /* 1944 * Set device link down: disable tx. 1945 */ 1946 static int 1947 txgbe_dev_set_link_down(struct rte_eth_dev *dev) 1948 { 1949 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1950 1951 if (hw->phy.media_type == txgbe_media_type_copper) { 1952 /* Turn off the copper */ 1953 hw->phy.set_phy_power(hw, false); 1954 } else { 1955 /* Turn off the laser */ 1956 hw->mac.disable_tx_laser(hw); 1957 hw->dev_start = false; 1958 txgbe_dev_link_update(dev, 0); 1959 } 1960 1961 return 0; 1962 } 1963 1964 /* 1965 * Reset and stop device. 1966 */ 1967 static int 1968 txgbe_dev_close(struct rte_eth_dev *dev) 1969 { 1970 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1971 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 1972 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1973 int retries = 0; 1974 int ret; 1975 1976 PMD_INIT_FUNC_TRACE(); 1977 1978 txgbe_pf_reset_hw(hw); 1979 1980 ret = txgbe_dev_stop(dev); 1981 1982 txgbe_dev_free_queues(dev); 1983 1984 /* reprogram the RAR[0] in case user changed it. */ 1985 txgbe_set_rar(hw, 0, hw->mac.addr, 0, true); 1986 1987 /* Unlock any pending hardware semaphore */ 1988 txgbe_swfw_lock_reset(hw); 1989 1990 /* disable uio intr before callback unregister */ 1991 rte_intr_disable(intr_handle); 1992 1993 do { 1994 ret = rte_intr_callback_unregister(intr_handle, 1995 txgbe_dev_interrupt_handler, dev); 1996 if (ret >= 0 || ret == -ENOENT) { 1997 break; 1998 } else if (ret != -EAGAIN) { 1999 PMD_INIT_LOG(ERR, 2000 "intr callback unregister failed: %d", 2001 ret); 2002 } 2003 rte_delay_ms(100); 2004 } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); 2005 2006 /* cancel the delay handler before remove dev */ 2007 rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev); 2008 2009 /* uninitialize PF if max_vfs not zero */ 2010 txgbe_pf_host_uninit(dev); 2011 2012 rte_free(dev->data->mac_addrs); 2013 dev->data->mac_addrs = NULL; 2014 2015 rte_free(dev->data->hash_mac_addrs); 2016 dev->data->hash_mac_addrs = NULL; 2017 2018 /* remove all the fdir filters & hash */ 2019 txgbe_fdir_filter_uninit(dev); 2020 2021 /* remove all the L2 tunnel filters & hash */ 2022 txgbe_l2_tn_filter_uninit(dev); 2023 2024 /* Remove all ntuple filters of the device */ 2025 txgbe_ntuple_filter_uninit(dev); 2026 2027 /* clear all the filters list */ 2028 txgbe_filterlist_flush(); 2029 2030 /* Remove all Traffic Manager configuration */ 2031 txgbe_tm_conf_uninit(dev); 2032 2033 #ifdef RTE_LIB_SECURITY 2034 rte_free(dev->security_ctx); 2035 #endif 2036 2037 return ret; 2038 } 2039 2040 /* 2041 * Reset PF device. 2042 */ 2043 static int 2044 txgbe_dev_reset(struct rte_eth_dev *dev) 2045 { 2046 int ret; 2047 2048 /* When a DPDK PMD PF begin to reset PF port, it should notify all 2049 * its VF to make them align with it. The detailed notification 2050 * mechanism is PMD specific. As to txgbe PF, it is rather complex. 2051 * To avoid unexpected behavior in VF, currently reset of PF with 2052 * SR-IOV activation is not supported. It might be supported later. 2053 */ 2054 if (dev->data->sriov.active) 2055 return -ENOTSUP; 2056 2057 ret = eth_txgbe_dev_uninit(dev); 2058 if (ret) 2059 return ret; 2060 2061 ret = eth_txgbe_dev_init(dev, NULL); 2062 2063 return ret; 2064 } 2065 2066 #define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \ 2067 { \ 2068 uint32_t current_counter = rd32(hw, reg); \ 2069 if (current_counter < last_counter) \ 2070 current_counter += 0x100000000LL; \ 2071 if (!hw->offset_loaded) \ 2072 last_counter = current_counter; \ 2073 counter = current_counter - last_counter; \ 2074 counter &= 0xFFFFFFFFLL; \ 2075 } 2076 2077 #define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2078 { \ 2079 uint64_t current_counter_lsb = rd32(hw, reg_lsb); \ 2080 uint64_t current_counter_msb = rd32(hw, reg_msb); \ 2081 uint64_t current_counter = (current_counter_msb << 32) | \ 2082 current_counter_lsb; \ 2083 if (current_counter < last_counter) \ 2084 current_counter += 0x1000000000LL; \ 2085 if (!hw->offset_loaded) \ 2086 last_counter = current_counter; \ 2087 counter = current_counter - last_counter; \ 2088 counter &= 0xFFFFFFFFFLL; \ 2089 } 2090 2091 void 2092 txgbe_read_stats_registers(struct txgbe_hw *hw, 2093 struct txgbe_hw_stats *hw_stats) 2094 { 2095 unsigned int i; 2096 2097 /* QP Stats */ 2098 for (i = 0; i < hw->nb_rx_queues; i++) { 2099 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i), 2100 hw->qp_last[i].rx_qp_packets, 2101 hw_stats->qp[i].rx_qp_packets); 2102 UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i), 2103 hw->qp_last[i].rx_qp_bytes, 2104 hw_stats->qp[i].rx_qp_bytes); 2105 UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i), 2106 hw->qp_last[i].rx_qp_mc_packets, 2107 hw_stats->qp[i].rx_qp_mc_packets); 2108 } 2109 2110 for (i = 0; i < hw->nb_tx_queues; i++) { 2111 UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i), 2112 hw->qp_last[i].tx_qp_packets, 2113 hw_stats->qp[i].tx_qp_packets); 2114 UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i), 2115 hw->qp_last[i].tx_qp_bytes, 2116 hw_stats->qp[i].tx_qp_bytes); 2117 } 2118 /* PB Stats */ 2119 for (i = 0; i < TXGBE_MAX_UP; i++) { 2120 hw_stats->up[i].rx_up_xon_packets += 2121 rd32(hw, TXGBE_PBRXUPXON(i)); 2122 hw_stats->up[i].rx_up_xoff_packets += 2123 rd32(hw, TXGBE_PBRXUPXOFF(i)); 2124 hw_stats->up[i].tx_up_xon_packets += 2125 rd32(hw, TXGBE_PBTXUPXON(i)); 2126 hw_stats->up[i].tx_up_xoff_packets += 2127 rd32(hw, TXGBE_PBTXUPXOFF(i)); 2128 hw_stats->up[i].tx_up_xon2off_packets += 2129 rd32(hw, TXGBE_PBTXUPOFF(i)); 2130 hw_stats->up[i].rx_up_dropped += 2131 rd32(hw, TXGBE_PBRXMISS(i)); 2132 } 2133 hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON); 2134 hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF); 2135 hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON); 2136 hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF); 2137 2138 /* DMA Stats */ 2139 hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT); 2140 hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT); 2141 2142 hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL); 2143 hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL); 2144 hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP); 2145 hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP); 2146 2147 /* MAC Stats */ 2148 hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL); 2149 hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL); 2150 hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL); 2151 2152 hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL); 2153 hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL); 2154 hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL); 2155 2156 hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL); 2157 hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL); 2158 2159 hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L); 2160 hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L); 2161 hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L); 2162 hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L); 2163 hw_stats->rx_size_512_to_1023_packets += 2164 rd64(hw, TXGBE_MACRX512TO1023L); 2165 hw_stats->rx_size_1024_to_max_packets += 2166 rd64(hw, TXGBE_MACRX1024TOMAXL); 2167 hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L); 2168 hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L); 2169 hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L); 2170 hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L); 2171 hw_stats->tx_size_512_to_1023_packets += 2172 rd64(hw, TXGBE_MACTX512TO1023L); 2173 hw_stats->tx_size_1024_to_max_packets += 2174 rd64(hw, TXGBE_MACTX1024TOMAXL); 2175 2176 hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL); 2177 hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE); 2178 hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER); 2179 2180 /* MNG Stats */ 2181 hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS); 2182 hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC); 2183 hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG); 2184 hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG); 2185 2186 /* FCoE Stats */ 2187 hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC); 2188 hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST); 2189 hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC); 2190 hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC); 2191 hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC); 2192 hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC); 2193 hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC); 2194 2195 /* Flow Director Stats */ 2196 hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH); 2197 hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS); 2198 hw_stats->flow_director_added_filters += 2199 TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED)); 2200 hw_stats->flow_director_removed_filters += 2201 TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED)); 2202 hw_stats->flow_director_filter_add_errors += 2203 TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL)); 2204 hw_stats->flow_director_filter_remove_errors += 2205 TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL)); 2206 2207 /* MACsec Stats */ 2208 hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT); 2209 hw_stats->tx_macsec_pkts_encrypted += 2210 rd32(hw, TXGBE_LSECTX_ENCPKT); 2211 hw_stats->tx_macsec_pkts_protected += 2212 rd32(hw, TXGBE_LSECTX_PROTPKT); 2213 hw_stats->tx_macsec_octets_encrypted += 2214 rd32(hw, TXGBE_LSECTX_ENCOCT); 2215 hw_stats->tx_macsec_octets_protected += 2216 rd32(hw, TXGBE_LSECTX_PROTOCT); 2217 hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT); 2218 hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT); 2219 hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT); 2220 hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT); 2221 hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT); 2222 hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT); 2223 hw_stats->rx_macsec_sc_pkts_unchecked += 2224 rd32(hw, TXGBE_LSECRX_UNCHKPKT); 2225 hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT); 2226 hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT); 2227 for (i = 0; i < 2; i++) { 2228 hw_stats->rx_macsec_sa_pkts_ok += 2229 rd32(hw, TXGBE_LSECRX_OKPKT(i)); 2230 hw_stats->rx_macsec_sa_pkts_invalid += 2231 rd32(hw, TXGBE_LSECRX_INVPKT(i)); 2232 hw_stats->rx_macsec_sa_pkts_notvalid += 2233 rd32(hw, TXGBE_LSECRX_BADPKT(i)); 2234 } 2235 hw_stats->rx_macsec_sa_pkts_unusedsa += 2236 rd32(hw, TXGBE_LSECRX_INVSAPKT); 2237 hw_stats->rx_macsec_sa_pkts_notusingsa += 2238 rd32(hw, TXGBE_LSECRX_BADSAPKT); 2239 2240 hw_stats->rx_total_missed_packets = 0; 2241 for (i = 0; i < TXGBE_MAX_UP; i++) { 2242 hw_stats->rx_total_missed_packets += 2243 hw_stats->up[i].rx_up_dropped; 2244 } 2245 } 2246 2247 static int 2248 txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 2249 { 2250 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2251 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2252 struct txgbe_stat_mappings *stat_mappings = 2253 TXGBE_DEV_STAT_MAPPINGS(dev); 2254 uint32_t i, j; 2255 2256 txgbe_read_stats_registers(hw, hw_stats); 2257 2258 if (stats == NULL) 2259 return -EINVAL; 2260 2261 /* Fill out the rte_eth_stats statistics structure */ 2262 stats->ipackets = hw_stats->rx_packets; 2263 stats->ibytes = hw_stats->rx_bytes; 2264 stats->opackets = hw_stats->tx_packets; 2265 stats->obytes = hw_stats->tx_bytes; 2266 2267 memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets)); 2268 memset(&stats->q_opackets, 0, sizeof(stats->q_opackets)); 2269 memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes)); 2270 memset(&stats->q_obytes, 0, sizeof(stats->q_obytes)); 2271 memset(&stats->q_errors, 0, sizeof(stats->q_errors)); 2272 for (i = 0; i < TXGBE_MAX_QP; i++) { 2273 uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG; 2274 uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8; 2275 uint32_t q_map; 2276 2277 q_map = (stat_mappings->rqsm[n] >> offset) 2278 & QMAP_FIELD_RESERVED_BITS_MASK; 2279 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 2280 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 2281 stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets; 2282 stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes; 2283 2284 q_map = (stat_mappings->tqsm[n] >> offset) 2285 & QMAP_FIELD_RESERVED_BITS_MASK; 2286 j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS 2287 ? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS); 2288 stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets; 2289 stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes; 2290 } 2291 2292 /* Rx Errors */ 2293 stats->imissed = hw_stats->rx_total_missed_packets + 2294 hw_stats->rx_dma_drop; 2295 stats->ierrors = hw_stats->rx_crc_errors + 2296 hw_stats->rx_mac_short_packet_dropped + 2297 hw_stats->rx_length_errors + 2298 hw_stats->rx_undersize_errors + 2299 hw_stats->rx_oversize_errors + 2300 hw_stats->rx_drop_packets + 2301 hw_stats->rx_illegal_byte_errors + 2302 hw_stats->rx_error_bytes + 2303 hw_stats->rx_fragment_errors + 2304 hw_stats->rx_fcoe_crc_errors + 2305 hw_stats->rx_fcoe_mbuf_allocation_errors; 2306 2307 /* Tx Errors */ 2308 stats->oerrors = 0; 2309 return 0; 2310 } 2311 2312 static int 2313 txgbe_dev_stats_reset(struct rte_eth_dev *dev) 2314 { 2315 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2316 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2317 2318 /* HW registers are cleared on read */ 2319 hw->offset_loaded = 0; 2320 txgbe_dev_stats_get(dev, NULL); 2321 hw->offset_loaded = 1; 2322 2323 /* Reset software totals */ 2324 memset(hw_stats, 0, sizeof(*hw_stats)); 2325 2326 return 0; 2327 } 2328 2329 /* This function calculates the number of xstats based on the current config */ 2330 static unsigned 2331 txgbe_xstats_calc_num(struct rte_eth_dev *dev) 2332 { 2333 int nb_queues = max(dev->data->nb_rx_queues, dev->data->nb_tx_queues); 2334 return TXGBE_NB_HW_STATS + 2335 TXGBE_NB_UP_STATS * TXGBE_MAX_UP + 2336 TXGBE_NB_QP_STATS * nb_queues; 2337 } 2338 2339 static inline int 2340 txgbe_get_name_by_id(uint32_t id, char *name, uint32_t size) 2341 { 2342 int nb, st; 2343 2344 /* Extended stats from txgbe_hw_stats */ 2345 if (id < TXGBE_NB_HW_STATS) { 2346 snprintf(name, size, "[hw]%s", 2347 rte_txgbe_stats_strings[id].name); 2348 return 0; 2349 } 2350 id -= TXGBE_NB_HW_STATS; 2351 2352 /* Priority Stats */ 2353 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) { 2354 nb = id / TXGBE_NB_UP_STATS; 2355 st = id % TXGBE_NB_UP_STATS; 2356 snprintf(name, size, "[p%u]%s", nb, 2357 rte_txgbe_up_strings[st].name); 2358 return 0; 2359 } 2360 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP; 2361 2362 /* Queue Stats */ 2363 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) { 2364 nb = id / TXGBE_NB_QP_STATS; 2365 st = id % TXGBE_NB_QP_STATS; 2366 snprintf(name, size, "[q%u]%s", nb, 2367 rte_txgbe_qp_strings[st].name); 2368 return 0; 2369 } 2370 id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP; 2371 2372 return -(int)(id + 1); 2373 } 2374 2375 static inline int 2376 txgbe_get_offset_by_id(uint32_t id, uint32_t *offset) 2377 { 2378 int nb, st; 2379 2380 /* Extended stats from txgbe_hw_stats */ 2381 if (id < TXGBE_NB_HW_STATS) { 2382 *offset = rte_txgbe_stats_strings[id].offset; 2383 return 0; 2384 } 2385 id -= TXGBE_NB_HW_STATS; 2386 2387 /* Priority Stats */ 2388 if (id < TXGBE_NB_UP_STATS * TXGBE_MAX_UP) { 2389 nb = id / TXGBE_NB_UP_STATS; 2390 st = id % TXGBE_NB_UP_STATS; 2391 *offset = rte_txgbe_up_strings[st].offset + 2392 nb * (TXGBE_NB_UP_STATS * sizeof(uint64_t)); 2393 return 0; 2394 } 2395 id -= TXGBE_NB_UP_STATS * TXGBE_MAX_UP; 2396 2397 /* Queue Stats */ 2398 if (id < TXGBE_NB_QP_STATS * TXGBE_MAX_QP) { 2399 nb = id / TXGBE_NB_QP_STATS; 2400 st = id % TXGBE_NB_QP_STATS; 2401 *offset = rte_txgbe_qp_strings[st].offset + 2402 nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t)); 2403 return 0; 2404 } 2405 2406 return -1; 2407 } 2408 2409 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 2410 struct rte_eth_xstat_name *xstats_names, unsigned int limit) 2411 { 2412 unsigned int i, count; 2413 2414 count = txgbe_xstats_calc_num(dev); 2415 if (xstats_names == NULL) 2416 return count; 2417 2418 /* Note: limit >= cnt_stats checked upstream 2419 * in rte_eth_xstats_names() 2420 */ 2421 limit = min(limit, count); 2422 2423 /* Extended stats from txgbe_hw_stats */ 2424 for (i = 0; i < limit; i++) { 2425 if (txgbe_get_name_by_id(i, xstats_names[i].name, 2426 sizeof(xstats_names[i].name))) { 2427 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2428 break; 2429 } 2430 } 2431 2432 return i; 2433 } 2434 2435 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 2436 const uint64_t *ids, 2437 struct rte_eth_xstat_name *xstats_names, 2438 unsigned int limit) 2439 { 2440 unsigned int i; 2441 2442 if (ids == NULL) 2443 return txgbe_dev_xstats_get_names(dev, xstats_names, limit); 2444 2445 for (i = 0; i < limit; i++) { 2446 if (txgbe_get_name_by_id(ids[i], xstats_names[i].name, 2447 sizeof(xstats_names[i].name))) { 2448 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2449 return -1; 2450 } 2451 } 2452 2453 return i; 2454 } 2455 2456 static int 2457 txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2458 unsigned int limit) 2459 { 2460 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2461 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2462 unsigned int i, count; 2463 2464 txgbe_read_stats_registers(hw, hw_stats); 2465 2466 /* If this is a reset xstats is NULL, and we have cleared the 2467 * registers by reading them. 2468 */ 2469 count = txgbe_xstats_calc_num(dev); 2470 if (xstats == NULL) 2471 return count; 2472 2473 limit = min(limit, txgbe_xstats_calc_num(dev)); 2474 2475 /* Extended stats from txgbe_hw_stats */ 2476 for (i = 0; i < limit; i++) { 2477 uint32_t offset = 0; 2478 2479 if (txgbe_get_offset_by_id(i, &offset)) { 2480 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2481 break; 2482 } 2483 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + offset); 2484 xstats[i].id = i; 2485 } 2486 2487 return i; 2488 } 2489 2490 static int 2491 txgbe_dev_xstats_get_(struct rte_eth_dev *dev, uint64_t *values, 2492 unsigned int limit) 2493 { 2494 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2495 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2496 unsigned int i, count; 2497 2498 txgbe_read_stats_registers(hw, hw_stats); 2499 2500 /* If this is a reset xstats is NULL, and we have cleared the 2501 * registers by reading them. 2502 */ 2503 count = txgbe_xstats_calc_num(dev); 2504 if (values == NULL) 2505 return count; 2506 2507 limit = min(limit, txgbe_xstats_calc_num(dev)); 2508 2509 /* Extended stats from txgbe_hw_stats */ 2510 for (i = 0; i < limit; i++) { 2511 uint32_t offset; 2512 2513 if (txgbe_get_offset_by_id(i, &offset)) { 2514 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2515 break; 2516 } 2517 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 2518 } 2519 2520 return i; 2521 } 2522 2523 static int 2524 txgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 2525 uint64_t *values, unsigned int limit) 2526 { 2527 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2528 unsigned int i; 2529 2530 if (ids == NULL) 2531 return txgbe_dev_xstats_get_(dev, values, limit); 2532 2533 for (i = 0; i < limit; i++) { 2534 uint32_t offset; 2535 2536 if (txgbe_get_offset_by_id(ids[i], &offset)) { 2537 PMD_INIT_LOG(WARNING, "id value %d isn't valid", i); 2538 break; 2539 } 2540 values[i] = *(uint64_t *)(((char *)hw_stats) + offset); 2541 } 2542 2543 return i; 2544 } 2545 2546 static int 2547 txgbe_dev_xstats_reset(struct rte_eth_dev *dev) 2548 { 2549 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2550 struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev); 2551 2552 /* HW registers are cleared on read */ 2553 hw->offset_loaded = 0; 2554 txgbe_read_stats_registers(hw, hw_stats); 2555 hw->offset_loaded = 1; 2556 2557 /* Reset software totals */ 2558 memset(hw_stats, 0, sizeof(*hw_stats)); 2559 2560 return 0; 2561 } 2562 2563 static int 2564 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2565 { 2566 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2567 u32 etrack_id; 2568 int ret; 2569 2570 hw->phy.get_fw_version(hw, &etrack_id); 2571 2572 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 2573 if (ret < 0) 2574 return -EINVAL; 2575 2576 ret += 1; /* add the size of '\0' */ 2577 if (fw_size < (size_t)ret) 2578 return ret; 2579 else 2580 return 0; 2581 } 2582 2583 static int 2584 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 2585 { 2586 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2587 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2588 2589 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 2590 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 2591 dev_info->min_rx_bufsize = 1024; 2592 dev_info->max_rx_pktlen = 15872; 2593 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 2594 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; 2595 dev_info->max_vfs = pci_dev->max_vfs; 2596 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 2597 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 2598 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 2599 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev); 2600 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) | 2601 dev_info->rx_queue_offload_capa); 2602 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev); 2603 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev); 2604 2605 dev_info->default_rxconf = (struct rte_eth_rxconf) { 2606 .rx_thresh = { 2607 .pthresh = TXGBE_DEFAULT_RX_PTHRESH, 2608 .hthresh = TXGBE_DEFAULT_RX_HTHRESH, 2609 .wthresh = TXGBE_DEFAULT_RX_WTHRESH, 2610 }, 2611 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH, 2612 .rx_drop_en = 0, 2613 .offloads = 0, 2614 }; 2615 2616 dev_info->default_txconf = (struct rte_eth_txconf) { 2617 .tx_thresh = { 2618 .pthresh = TXGBE_DEFAULT_TX_PTHRESH, 2619 .hthresh = TXGBE_DEFAULT_TX_HTHRESH, 2620 .wthresh = TXGBE_DEFAULT_TX_WTHRESH, 2621 }, 2622 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH, 2623 .offloads = 0, 2624 }; 2625 2626 dev_info->rx_desc_lim = rx_desc_lim; 2627 dev_info->tx_desc_lim = tx_desc_lim; 2628 2629 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 2630 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 2631 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL; 2632 2633 dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G; 2634 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M; 2635 2636 /* Driver-preferred Rx/Tx parameters */ 2637 dev_info->default_rxportconf.burst_size = 32; 2638 dev_info->default_txportconf.burst_size = 32; 2639 dev_info->default_rxportconf.nb_queues = 1; 2640 dev_info->default_txportconf.nb_queues = 1; 2641 dev_info->default_rxportconf.ring_size = 256; 2642 dev_info->default_txportconf.ring_size = 256; 2643 2644 return 0; 2645 } 2646 2647 const uint32_t * 2648 txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 2649 { 2650 if (dev->rx_pkt_burst == txgbe_recv_pkts || 2651 dev->rx_pkt_burst == txgbe_recv_pkts_lro_single_alloc || 2652 dev->rx_pkt_burst == txgbe_recv_pkts_lro_bulk_alloc || 2653 dev->rx_pkt_burst == txgbe_recv_pkts_bulk_alloc) 2654 return txgbe_get_supported_ptypes(); 2655 2656 return NULL; 2657 } 2658 2659 void 2660 txgbe_dev_setup_link_alarm_handler(void *param) 2661 { 2662 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 2663 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2664 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2665 u32 speed; 2666 bool autoneg = false; 2667 2668 speed = hw->phy.autoneg_advertised; 2669 if (!speed) 2670 hw->mac.get_link_capabilities(hw, &speed, &autoneg); 2671 2672 hw->mac.setup_link(hw, speed, true); 2673 2674 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; 2675 } 2676 2677 /* return 0 means link status changed, -1 means not changed */ 2678 int 2679 txgbe_dev_link_update_share(struct rte_eth_dev *dev, 2680 int wait_to_complete) 2681 { 2682 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2683 struct rte_eth_link link; 2684 u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; 2685 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2686 bool link_up; 2687 int err; 2688 int wait = 1; 2689 2690 memset(&link, 0, sizeof(link)); 2691 link.link_status = RTE_ETH_LINK_DOWN; 2692 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 2693 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 2694 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 2695 RTE_ETH_LINK_SPEED_FIXED); 2696 2697 hw->mac.get_link_status = true; 2698 2699 if (intr->flags & TXGBE_FLAG_NEED_LINK_CONFIG) 2700 return rte_eth_linkstatus_set(dev, &link); 2701 2702 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 2703 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 2704 wait = 0; 2705 2706 err = hw->mac.check_link(hw, &link_speed, &link_up, wait); 2707 2708 if (err != 0) { 2709 link.link_speed = RTE_ETH_SPEED_NUM_100M; 2710 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 2711 return rte_eth_linkstatus_set(dev, &link); 2712 } 2713 2714 if (link_up == 0) { 2715 if ((hw->subsystem_device_id & 0xFF) == 2716 TXGBE_DEV_ID_KR_KX_KX4) { 2717 hw->mac.bp_down_event(hw); 2718 } else if (hw->phy.media_type == txgbe_media_type_fiber) { 2719 intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG; 2720 rte_eal_alarm_set(10, 2721 txgbe_dev_setup_link_alarm_handler, dev); 2722 } 2723 return rte_eth_linkstatus_set(dev, &link); 2724 } else if (!hw->dev_start) { 2725 return rte_eth_linkstatus_set(dev, &link); 2726 } 2727 2728 intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; 2729 link.link_status = RTE_ETH_LINK_UP; 2730 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 2731 2732 switch (link_speed) { 2733 default: 2734 case TXGBE_LINK_SPEED_UNKNOWN: 2735 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 2736 link.link_speed = RTE_ETH_SPEED_NUM_100M; 2737 break; 2738 2739 case TXGBE_LINK_SPEED_100M_FULL: 2740 link.link_speed = RTE_ETH_SPEED_NUM_100M; 2741 break; 2742 2743 case TXGBE_LINK_SPEED_1GB_FULL: 2744 link.link_speed = RTE_ETH_SPEED_NUM_1G; 2745 break; 2746 2747 case TXGBE_LINK_SPEED_2_5GB_FULL: 2748 link.link_speed = RTE_ETH_SPEED_NUM_2_5G; 2749 break; 2750 2751 case TXGBE_LINK_SPEED_5GB_FULL: 2752 link.link_speed = RTE_ETH_SPEED_NUM_5G; 2753 break; 2754 2755 case TXGBE_LINK_SPEED_10GB_FULL: 2756 link.link_speed = RTE_ETH_SPEED_NUM_10G; 2757 break; 2758 } 2759 2760 return rte_eth_linkstatus_set(dev, &link); 2761 } 2762 2763 static int 2764 txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 2765 { 2766 return txgbe_dev_link_update_share(dev, wait_to_complete); 2767 } 2768 2769 static int 2770 txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 2771 { 2772 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2773 uint32_t fctrl; 2774 2775 fctrl = rd32(hw, TXGBE_PSRCTL); 2776 fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP); 2777 wr32(hw, TXGBE_PSRCTL, fctrl); 2778 2779 return 0; 2780 } 2781 2782 static int 2783 txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 2784 { 2785 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2786 uint32_t fctrl; 2787 2788 fctrl = rd32(hw, TXGBE_PSRCTL); 2789 fctrl &= (~TXGBE_PSRCTL_UCP); 2790 if (dev->data->all_multicast == 1) 2791 fctrl |= TXGBE_PSRCTL_MCP; 2792 else 2793 fctrl &= (~TXGBE_PSRCTL_MCP); 2794 wr32(hw, TXGBE_PSRCTL, fctrl); 2795 2796 return 0; 2797 } 2798 2799 static int 2800 txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 2801 { 2802 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2803 uint32_t fctrl; 2804 2805 fctrl = rd32(hw, TXGBE_PSRCTL); 2806 fctrl |= TXGBE_PSRCTL_MCP; 2807 wr32(hw, TXGBE_PSRCTL, fctrl); 2808 2809 return 0; 2810 } 2811 2812 static int 2813 txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 2814 { 2815 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2816 uint32_t fctrl; 2817 2818 if (dev->data->promiscuous == 1) 2819 return 0; /* must remain in all_multicast mode */ 2820 2821 fctrl = rd32(hw, TXGBE_PSRCTL); 2822 fctrl &= (~TXGBE_PSRCTL_MCP); 2823 wr32(hw, TXGBE_PSRCTL, fctrl); 2824 2825 return 0; 2826 } 2827 2828 /** 2829 * It clears the interrupt causes and enables the interrupt. 2830 * It will be called once only during nic initialized. 2831 * 2832 * @param dev 2833 * Pointer to struct rte_eth_dev. 2834 * @param on 2835 * Enable or Disable. 2836 * 2837 * @return 2838 * - On success, zero. 2839 * - On failure, a negative value. 2840 */ 2841 static int 2842 txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 2843 { 2844 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2845 2846 txgbe_dev_link_status_print(dev); 2847 if (on) 2848 intr->mask_misc |= TXGBE_ICRMISC_LSC; 2849 else 2850 intr->mask_misc &= ~TXGBE_ICRMISC_LSC; 2851 2852 return 0; 2853 } 2854 2855 static int 2856 txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev) 2857 { 2858 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2859 u64 mask; 2860 2861 mask = TXGBE_ICR_MASK; 2862 mask &= (1ULL << TXGBE_MISC_VEC_ID); 2863 intr->mask |= mask; 2864 intr->mask_misc |= TXGBE_ICRMISC_GPIO; 2865 intr->mask_misc |= TXGBE_ICRMISC_ANDONE; 2866 return 0; 2867 } 2868 2869 /** 2870 * It clears the interrupt causes and enables the interrupt. 2871 * It will be called once only during nic initialized. 2872 * 2873 * @param dev 2874 * Pointer to struct rte_eth_dev. 2875 * 2876 * @return 2877 * - On success, zero. 2878 * - On failure, a negative value. 2879 */ 2880 static int 2881 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 2882 { 2883 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2884 u64 mask; 2885 2886 mask = TXGBE_ICR_MASK; 2887 mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1); 2888 intr->mask |= mask; 2889 2890 return 0; 2891 } 2892 2893 /** 2894 * It clears the interrupt causes and enables the interrupt. 2895 * It will be called once only during nic initialized. 2896 * 2897 * @param dev 2898 * Pointer to struct rte_eth_dev. 2899 * 2900 * @return 2901 * - On success, zero. 2902 * - On failure, a negative value. 2903 */ 2904 static int 2905 txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 2906 { 2907 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2908 2909 intr->mask_misc |= TXGBE_ICRMISC_LNKSEC; 2910 2911 return 0; 2912 } 2913 2914 /* 2915 * It reads ICR and sets flag (TXGBE_ICRMISC_LSC) for the link_update. 2916 * 2917 * @param dev 2918 * Pointer to struct rte_eth_dev. 2919 * 2920 * @return 2921 * - On success, zero. 2922 * - On failure, a negative value. 2923 */ 2924 static int 2925 txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, 2926 struct rte_intr_handle *intr_handle) 2927 { 2928 uint32_t eicr; 2929 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 2930 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 2931 2932 if (rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_UIO && 2933 rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) 2934 wr32(hw, TXGBE_PX_INTA, 1); 2935 2936 /* clear all cause mask */ 2937 txgbe_disable_intr(hw); 2938 2939 /* read-on-clear nic registers here */ 2940 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; 2941 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 2942 2943 intr->flags = 0; 2944 2945 /* set flag for async link update */ 2946 if (eicr & TXGBE_ICRMISC_LSC) 2947 intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; 2948 2949 if (eicr & TXGBE_ICRMISC_ANDONE) 2950 intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG; 2951 2952 if (eicr & TXGBE_ICRMISC_VFMBX) 2953 intr->flags |= TXGBE_FLAG_MAILBOX; 2954 2955 if (eicr & TXGBE_ICRMISC_LNKSEC) 2956 intr->flags |= TXGBE_FLAG_MACSEC; 2957 2958 if (eicr & TXGBE_ICRMISC_GPIO) 2959 intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; 2960 2961 return 0; 2962 } 2963 2964 /** 2965 * It gets and then prints the link status. 2966 * 2967 * @param dev 2968 * Pointer to struct rte_eth_dev. 2969 * 2970 * @return 2971 * - On success, zero. 2972 * - On failure, a negative value. 2973 */ 2974 static void 2975 txgbe_dev_link_status_print(struct rte_eth_dev *dev) 2976 { 2977 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2978 struct rte_eth_link link; 2979 2980 rte_eth_linkstatus_get(dev, &link); 2981 2982 if (link.link_status) { 2983 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 2984 (int)(dev->data->port_id), 2985 (unsigned int)link.link_speed, 2986 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ? 2987 "full-duplex" : "half-duplex"); 2988 } else { 2989 PMD_INIT_LOG(INFO, " Port %d: Link Down", 2990 (int)(dev->data->port_id)); 2991 } 2992 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 2993 pci_dev->addr.domain, 2994 pci_dev->addr.bus, 2995 pci_dev->addr.devid, 2996 pci_dev->addr.function); 2997 } 2998 2999 /* 3000 * It executes link_update after knowing an interrupt occurred. 3001 * 3002 * @param dev 3003 * Pointer to struct rte_eth_dev. 3004 * 3005 * @return 3006 * - On success, zero. 3007 * - On failure, a negative value. 3008 */ 3009 static int 3010 txgbe_dev_interrupt_action(struct rte_eth_dev *dev, 3011 struct rte_intr_handle *intr_handle) 3012 { 3013 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 3014 int64_t timeout; 3015 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3016 3017 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 3018 3019 if (intr->flags & TXGBE_FLAG_MAILBOX) { 3020 txgbe_pf_mbx_process(dev); 3021 intr->flags &= ~TXGBE_FLAG_MAILBOX; 3022 } 3023 3024 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) { 3025 hw->phy.handle_lasi(hw); 3026 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; 3027 } 3028 3029 if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) { 3030 if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) { 3031 hw->mac.kr_handle(hw); 3032 intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG; 3033 } 3034 } 3035 3036 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { 3037 struct rte_eth_link link; 3038 3039 /*get the link status before link update, for predicting later*/ 3040 rte_eth_linkstatus_get(dev, &link); 3041 3042 txgbe_dev_link_update(dev, 0); 3043 3044 /* likely to up */ 3045 if (!link.link_status) 3046 /* handle it 1 sec later, wait it being stable */ 3047 timeout = TXGBE_LINK_UP_CHECK_TIMEOUT; 3048 /* likely to down */ 3049 else if ((hw->subsystem_device_id & 0xFF) == 3050 TXGBE_DEV_ID_KR_KX_KX4 && 3051 hw->devarg.auto_neg == 1) 3052 /* handle it 2 sec later for backplane AN73 */ 3053 timeout = 2000; 3054 else 3055 /* handle it 4 sec later, wait it being stable */ 3056 timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT; 3057 3058 txgbe_dev_link_status_print(dev); 3059 if (rte_eal_alarm_set(timeout * 1000, 3060 txgbe_dev_interrupt_delayed_handler, 3061 (void *)dev) < 0) { 3062 PMD_DRV_LOG(ERR, "Error setting alarm"); 3063 } else { 3064 /* only disable lsc interrupt */ 3065 intr->mask_misc &= ~TXGBE_ICRMISC_LSC; 3066 3067 intr->mask_orig = intr->mask; 3068 /* only disable all misc interrupts */ 3069 intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID); 3070 } 3071 } 3072 3073 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 3074 txgbe_enable_intr(dev); 3075 rte_intr_enable(intr_handle); 3076 3077 return 0; 3078 } 3079 3080 /** 3081 * Interrupt handler which shall be registered for alarm callback for delayed 3082 * handling specific interrupt to wait for the stable nic state. As the 3083 * NIC interrupt state is not stable for txgbe after link is just down, 3084 * it needs to wait 4 seconds to get the stable status. 3085 * 3086 * @param handle 3087 * Pointer to interrupt handle. 3088 * @param param 3089 * The address of parameter (struct rte_eth_dev *) registered before. 3090 * 3091 * @return 3092 * void 3093 */ 3094 static void 3095 txgbe_dev_interrupt_delayed_handler(void *param) 3096 { 3097 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3098 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3099 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3100 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 3101 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3102 uint32_t eicr; 3103 3104 txgbe_disable_intr(hw); 3105 3106 eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; 3107 if (eicr & TXGBE_ICRMISC_VFMBX) 3108 txgbe_pf_mbx_process(dev); 3109 3110 if (intr->flags & TXGBE_FLAG_PHY_INTERRUPT) { 3111 hw->phy.handle_lasi(hw); 3112 intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT; 3113 } 3114 3115 if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) { 3116 txgbe_dev_link_update(dev, 0); 3117 intr->flags &= ~TXGBE_FLAG_NEED_LINK_UPDATE; 3118 txgbe_dev_link_status_print(dev); 3119 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 3120 NULL); 3121 } 3122 3123 if (intr->flags & TXGBE_FLAG_MACSEC) { 3124 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, 3125 NULL); 3126 intr->flags &= ~TXGBE_FLAG_MACSEC; 3127 } 3128 3129 /* restore original mask */ 3130 intr->mask_misc |= TXGBE_ICRMISC_LSC; 3131 3132 intr->mask = intr->mask_orig; 3133 intr->mask_orig = 0; 3134 3135 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 3136 txgbe_enable_intr(dev); 3137 rte_intr_enable(intr_handle); 3138 } 3139 3140 /** 3141 * Interrupt handler triggered by NIC for handling 3142 * specific interrupt. 3143 * 3144 * @param handle 3145 * Pointer to interrupt handle. 3146 * @param param 3147 * The address of parameter (struct rte_eth_dev *) registered before. 3148 * 3149 * @return 3150 * void 3151 */ 3152 static void 3153 txgbe_dev_interrupt_handler(void *param) 3154 { 3155 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3156 3157 txgbe_dev_interrupt_get_status(dev, dev->intr_handle); 3158 txgbe_dev_interrupt_action(dev, dev->intr_handle); 3159 } 3160 3161 static int 3162 txgbe_dev_led_on(struct rte_eth_dev *dev) 3163 { 3164 struct txgbe_hw *hw; 3165 3166 hw = TXGBE_DEV_HW(dev); 3167 return txgbe_led_on(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP; 3168 } 3169 3170 static int 3171 txgbe_dev_led_off(struct rte_eth_dev *dev) 3172 { 3173 struct txgbe_hw *hw; 3174 3175 hw = TXGBE_DEV_HW(dev); 3176 return txgbe_led_off(hw, TXGBE_LEDCTL_ACTIVE) == 0 ? 0 : -ENOTSUP; 3177 } 3178 3179 static int 3180 txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3181 { 3182 struct txgbe_hw *hw; 3183 uint32_t mflcn_reg; 3184 uint32_t fccfg_reg; 3185 int rx_pause; 3186 int tx_pause; 3187 3188 hw = TXGBE_DEV_HW(dev); 3189 3190 fc_conf->pause_time = hw->fc.pause_time; 3191 fc_conf->high_water = hw->fc.high_water[0]; 3192 fc_conf->low_water = hw->fc.low_water[0]; 3193 fc_conf->send_xon = hw->fc.send_xon; 3194 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 3195 3196 /* 3197 * Return rx_pause status according to actual setting of 3198 * RXFCCFG register. 3199 */ 3200 mflcn_reg = rd32(hw, TXGBE_RXFCCFG); 3201 if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC)) 3202 rx_pause = 1; 3203 else 3204 rx_pause = 0; 3205 3206 /* 3207 * Return tx_pause status according to actual setting of 3208 * TXFCCFG register. 3209 */ 3210 fccfg_reg = rd32(hw, TXGBE_TXFCCFG); 3211 if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC)) 3212 tx_pause = 1; 3213 else 3214 tx_pause = 0; 3215 3216 if (rx_pause && tx_pause) 3217 fc_conf->mode = RTE_ETH_FC_FULL; 3218 else if (rx_pause) 3219 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 3220 else if (tx_pause) 3221 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 3222 else 3223 fc_conf->mode = RTE_ETH_FC_NONE; 3224 3225 return 0; 3226 } 3227 3228 static int 3229 txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3230 { 3231 struct txgbe_hw *hw; 3232 int err; 3233 uint32_t rx_buf_size; 3234 uint32_t max_high_water; 3235 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = { 3236 txgbe_fc_none, 3237 txgbe_fc_rx_pause, 3238 txgbe_fc_tx_pause, 3239 txgbe_fc_full 3240 }; 3241 3242 PMD_INIT_FUNC_TRACE(); 3243 3244 hw = TXGBE_DEV_HW(dev); 3245 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0)); 3246 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3247 3248 /* 3249 * At least reserve one Ethernet frame for watermark 3250 * high_water/low_water in kilo bytes for txgbe 3251 */ 3252 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 3253 if (fc_conf->high_water > max_high_water || 3254 fc_conf->high_water < fc_conf->low_water) { 3255 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3256 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3257 return -EINVAL; 3258 } 3259 3260 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode]; 3261 hw->fc.pause_time = fc_conf->pause_time; 3262 hw->fc.high_water[0] = fc_conf->high_water; 3263 hw->fc.low_water[0] = fc_conf->low_water; 3264 hw->fc.send_xon = fc_conf->send_xon; 3265 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 3266 3267 err = txgbe_fc_enable(hw); 3268 3269 /* Not negotiated is not an error case */ 3270 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) { 3271 wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK, 3272 (fc_conf->mac_ctrl_frame_fwd 3273 ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP)); 3274 txgbe_flush(hw); 3275 3276 return 0; 3277 } 3278 3279 PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err); 3280 return -EIO; 3281 } 3282 3283 static int 3284 txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 3285 struct rte_eth_pfc_conf *pfc_conf) 3286 { 3287 int err; 3288 uint32_t rx_buf_size; 3289 uint32_t max_high_water; 3290 uint8_t tc_num; 3291 uint8_t map[TXGBE_DCB_UP_MAX] = { 0 }; 3292 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3293 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev); 3294 3295 enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = { 3296 txgbe_fc_none, 3297 txgbe_fc_rx_pause, 3298 txgbe_fc_tx_pause, 3299 txgbe_fc_full 3300 }; 3301 3302 PMD_INIT_FUNC_TRACE(); 3303 3304 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map); 3305 tc_num = map[pfc_conf->priority]; 3306 rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num)); 3307 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3308 /* 3309 * At least reserve one Ethernet frame for watermark 3310 * high_water/low_water in kilo bytes for txgbe 3311 */ 3312 max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10; 3313 if (pfc_conf->fc.high_water > max_high_water || 3314 pfc_conf->fc.high_water <= pfc_conf->fc.low_water) { 3315 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3316 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3317 return -EINVAL; 3318 } 3319 3320 hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode]; 3321 hw->fc.pause_time = pfc_conf->fc.pause_time; 3322 hw->fc.send_xon = pfc_conf->fc.send_xon; 3323 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 3324 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 3325 3326 err = txgbe_dcb_pfc_enable(hw, tc_num); 3327 3328 /* Not negotiated is not an error case */ 3329 if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) 3330 return 0; 3331 3332 PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err); 3333 return -EIO; 3334 } 3335 3336 int 3337 txgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 3338 struct rte_eth_rss_reta_entry64 *reta_conf, 3339 uint16_t reta_size) 3340 { 3341 uint8_t i, j, mask; 3342 uint32_t reta; 3343 uint16_t idx, shift; 3344 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 3345 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3346 3347 PMD_INIT_FUNC_TRACE(); 3348 3349 if (!txgbe_rss_update_sp(hw->mac.type)) { 3350 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 3351 "NIC."); 3352 return -ENOTSUP; 3353 } 3354 3355 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 3356 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3357 "(%d) doesn't match the number hardware can supported " 3358 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 3359 return -EINVAL; 3360 } 3361 3362 for (i = 0; i < reta_size; i += 4) { 3363 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3364 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3365 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 3366 if (!mask) 3367 continue; 3368 3369 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2); 3370 for (j = 0; j < 4; j++) { 3371 if (RS8(mask, j, 0x1)) { 3372 reta &= ~(MS32(8 * j, 0xFF)); 3373 reta |= LS32(reta_conf[idx].reta[shift + j], 3374 8 * j, 0xFF); 3375 } 3376 } 3377 wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta); 3378 } 3379 adapter->rss_reta_updated = 1; 3380 3381 return 0; 3382 } 3383 3384 int 3385 txgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 3386 struct rte_eth_rss_reta_entry64 *reta_conf, 3387 uint16_t reta_size) 3388 { 3389 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3390 uint8_t i, j, mask; 3391 uint32_t reta; 3392 uint16_t idx, shift; 3393 3394 PMD_INIT_FUNC_TRACE(); 3395 3396 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) { 3397 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3398 "(%d) doesn't match the number hardware can supported " 3399 "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128); 3400 return -EINVAL; 3401 } 3402 3403 for (i = 0; i < reta_size; i += 4) { 3404 idx = i / RTE_ETH_RETA_GROUP_SIZE; 3405 shift = i % RTE_ETH_RETA_GROUP_SIZE; 3406 mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF); 3407 if (!mask) 3408 continue; 3409 3410 reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2); 3411 for (j = 0; j < 4; j++) { 3412 if (RS8(mask, j, 0x1)) 3413 reta_conf[idx].reta[shift + j] = 3414 (uint16_t)RS32(reta, 8 * j, 0xFF); 3415 } 3416 } 3417 3418 return 0; 3419 } 3420 3421 static int 3422 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 3423 uint32_t index, uint32_t pool) 3424 { 3425 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3426 uint32_t enable_addr = 1; 3427 3428 return txgbe_set_rar(hw, index, mac_addr->addr_bytes, 3429 pool, enable_addr); 3430 } 3431 3432 static void 3433 txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 3434 { 3435 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3436 3437 txgbe_clear_rar(hw, index); 3438 } 3439 3440 static int 3441 txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 3442 { 3443 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3444 3445 txgbe_remove_rar(dev, 0); 3446 txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 3447 3448 return 0; 3449 } 3450 3451 static int 3452 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 3453 { 3454 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3455 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 3456 struct rte_eth_dev_data *dev_data = dev->data; 3457 3458 /* If device is started, refuse mtu that requires the support of 3459 * scattered packets when this feature has not been enabled before. 3460 */ 3461 if (dev_data->dev_started && !dev_data->scattered_rx && 3462 (frame_size + 2 * RTE_VLAN_HLEN > 3463 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3464 PMD_INIT_LOG(ERR, "Stop port first."); 3465 return -EINVAL; 3466 } 3467 3468 if (hw->mode) 3469 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, 3470 TXGBE_FRAME_SIZE_MAX); 3471 else 3472 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, 3473 TXGBE_FRMSZ_MAX(frame_size)); 3474 3475 return 0; 3476 } 3477 3478 static uint32_t 3479 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr) 3480 { 3481 uint32_t vector = 0; 3482 3483 switch (hw->mac.mc_filter_type) { 3484 case 0: /* use bits [47:36] of the address */ 3485 vector = ((uc_addr->addr_bytes[4] >> 4) | 3486 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 3487 break; 3488 case 1: /* use bits [46:35] of the address */ 3489 vector = ((uc_addr->addr_bytes[4] >> 3) | 3490 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 3491 break; 3492 case 2: /* use bits [45:34] of the address */ 3493 vector = ((uc_addr->addr_bytes[4] >> 2) | 3494 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 3495 break; 3496 case 3: /* use bits [43:32] of the address */ 3497 vector = ((uc_addr->addr_bytes[4]) | 3498 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 3499 break; 3500 default: /* Invalid mc_filter_type */ 3501 break; 3502 } 3503 3504 /* vector can only be 12-bits or boundary will be exceeded */ 3505 vector &= 0xFFF; 3506 return vector; 3507 } 3508 3509 static int 3510 txgbe_uc_hash_table_set(struct rte_eth_dev *dev, 3511 struct rte_ether_addr *mac_addr, uint8_t on) 3512 { 3513 uint32_t vector; 3514 uint32_t uta_idx; 3515 uint32_t reg_val; 3516 uint32_t uta_mask; 3517 uint32_t psrctl; 3518 3519 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3520 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev); 3521 3522 /* The UTA table only exists on pf hardware */ 3523 if (hw->mac.type < txgbe_mac_raptor) 3524 return -ENOTSUP; 3525 3526 vector = txgbe_uta_vector(hw, mac_addr); 3527 uta_idx = (vector >> 5) & 0x7F; 3528 uta_mask = 0x1UL << (vector & 0x1F); 3529 3530 if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask)) 3531 return 0; 3532 3533 reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx)); 3534 if (on) { 3535 uta_info->uta_in_use++; 3536 reg_val |= uta_mask; 3537 uta_info->uta_shadow[uta_idx] |= uta_mask; 3538 } else { 3539 uta_info->uta_in_use--; 3540 reg_val &= ~uta_mask; 3541 uta_info->uta_shadow[uta_idx] &= ~uta_mask; 3542 } 3543 3544 wr32(hw, TXGBE_UCADDRTBL(uta_idx), reg_val); 3545 3546 psrctl = rd32(hw, TXGBE_PSRCTL); 3547 if (uta_info->uta_in_use > 0) 3548 psrctl |= TXGBE_PSRCTL_UCHFENA; 3549 else 3550 psrctl &= ~TXGBE_PSRCTL_UCHFENA; 3551 3552 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK; 3553 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 3554 wr32(hw, TXGBE_PSRCTL, psrctl); 3555 3556 return 0; 3557 } 3558 3559 static int 3560 txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 3561 { 3562 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3563 struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev); 3564 uint32_t psrctl; 3565 int i; 3566 3567 /* The UTA table only exists on pf hardware */ 3568 if (hw->mac.type < txgbe_mac_raptor) 3569 return -ENOTSUP; 3570 3571 if (on) { 3572 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 3573 uta_info->uta_shadow[i] = ~0; 3574 wr32(hw, TXGBE_UCADDRTBL(i), ~0); 3575 } 3576 } else { 3577 for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 3578 uta_info->uta_shadow[i] = 0; 3579 wr32(hw, TXGBE_UCADDRTBL(i), 0); 3580 } 3581 } 3582 3583 psrctl = rd32(hw, TXGBE_PSRCTL); 3584 if (on) 3585 psrctl |= TXGBE_PSRCTL_UCHFENA; 3586 else 3587 psrctl &= ~TXGBE_PSRCTL_UCHFENA; 3588 3589 psrctl &= ~TXGBE_PSRCTL_ADHF12_MASK; 3590 psrctl |= TXGBE_PSRCTL_ADHF12(hw->mac.mc_filter_type); 3591 wr32(hw, TXGBE_PSRCTL, psrctl); 3592 3593 return 0; 3594 } 3595 3596 uint32_t 3597 txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 3598 { 3599 uint32_t new_val = orig_val; 3600 3601 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) 3602 new_val |= TXGBE_POOLETHCTL_UTA; 3603 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC) 3604 new_val |= TXGBE_POOLETHCTL_MCHA; 3605 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC) 3606 new_val |= TXGBE_POOLETHCTL_UCHA; 3607 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST) 3608 new_val |= TXGBE_POOLETHCTL_BCA; 3609 if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST) 3610 new_val |= TXGBE_POOLETHCTL_MCP; 3611 3612 return new_val; 3613 } 3614 3615 static int 3616 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 3617 { 3618 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3619 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3620 uint32_t mask; 3621 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3622 3623 if (queue_id < 32) { 3624 mask = rd32(hw, TXGBE_IMS(0)); 3625 mask &= (1 << queue_id); 3626 wr32(hw, TXGBE_IMS(0), mask); 3627 } else if (queue_id < 64) { 3628 mask = rd32(hw, TXGBE_IMS(1)); 3629 mask &= (1 << (queue_id - 32)); 3630 wr32(hw, TXGBE_IMS(1), mask); 3631 } 3632 rte_intr_enable(intr_handle); 3633 3634 return 0; 3635 } 3636 3637 static int 3638 txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 3639 { 3640 uint32_t mask; 3641 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3642 3643 if (queue_id < 32) { 3644 mask = rd32(hw, TXGBE_IMS(0)); 3645 mask &= ~(1 << queue_id); 3646 wr32(hw, TXGBE_IMS(0), mask); 3647 } else if (queue_id < 64) { 3648 mask = rd32(hw, TXGBE_IMS(1)); 3649 mask &= ~(1 << (queue_id - 32)); 3650 wr32(hw, TXGBE_IMS(1), mask); 3651 } 3652 3653 return 0; 3654 } 3655 3656 /** 3657 * set the IVAR registers, mapping interrupt causes to vectors 3658 * @param hw 3659 * pointer to txgbe_hw struct 3660 * @direction 3661 * 0 for Rx, 1 for Tx, -1 for other causes 3662 * @queue 3663 * queue to map the corresponding interrupt to 3664 * @msix_vector 3665 * the vector to map to the corresponding queue 3666 */ 3667 void 3668 txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, 3669 uint8_t queue, uint8_t msix_vector) 3670 { 3671 uint32_t tmp, idx; 3672 3673 if (direction == -1) { 3674 /* other causes */ 3675 msix_vector |= TXGBE_IVARMISC_VLD; 3676 idx = 0; 3677 tmp = rd32(hw, TXGBE_IVARMISC); 3678 tmp &= ~(0xFF << idx); 3679 tmp |= (msix_vector << idx); 3680 wr32(hw, TXGBE_IVARMISC, tmp); 3681 } else { 3682 /* rx or tx causes */ 3683 /* Workaround for ICR lost */ 3684 idx = ((16 * (queue & 1)) + (8 * direction)); 3685 tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); 3686 tmp &= ~(0xFF << idx); 3687 tmp |= (msix_vector << idx); 3688 wr32(hw, TXGBE_IVAR(queue >> 1), tmp); 3689 } 3690 } 3691 3692 /** 3693 * Sets up the hardware to properly generate MSI-X interrupts 3694 * @hw 3695 * board private structure 3696 */ 3697 static void 3698 txgbe_configure_msix(struct rte_eth_dev *dev) 3699 { 3700 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3701 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 3702 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3703 uint32_t queue_id, base = TXGBE_MISC_VEC_ID; 3704 uint32_t vec = TXGBE_MISC_VEC_ID; 3705 uint32_t gpie; 3706 3707 /* won't configure msix register if no mapping is done 3708 * between intr vector and event fd 3709 * but if misx has been enabled already, need to configure 3710 * auto clean, auto mask and throttling. 3711 */ 3712 gpie = rd32(hw, TXGBE_GPIE); 3713 if (!rte_intr_dp_is_en(intr_handle) && 3714 !(gpie & TXGBE_GPIE_MSIX)) 3715 return; 3716 3717 if (rte_intr_allow_others(intr_handle)) { 3718 base = TXGBE_RX_VEC_START; 3719 vec = base; 3720 } 3721 3722 /* setup GPIE for MSI-x mode */ 3723 gpie = rd32(hw, TXGBE_GPIE); 3724 gpie |= TXGBE_GPIE_MSIX; 3725 wr32(hw, TXGBE_GPIE, gpie); 3726 3727 /* Populate the IVAR table and set the ITR values to the 3728 * corresponding register. 3729 */ 3730 if (rte_intr_dp_is_en(intr_handle)) { 3731 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 3732 queue_id++) { 3733 /* by default, 1:1 mapping */ 3734 txgbe_set_ivar_map(hw, 0, queue_id, vec); 3735 rte_intr_vec_list_index_set(intr_handle, 3736 queue_id, vec); 3737 if (vec < base + rte_intr_nb_efd_get(intr_handle) 3738 - 1) 3739 vec++; 3740 } 3741 3742 txgbe_set_ivar_map(hw, -1, 1, TXGBE_MISC_VEC_ID); 3743 } 3744 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID), 3745 TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 3746 | TXGBE_ITR_WRDSA); 3747 } 3748 3749 int 3750 txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 3751 uint16_t queue_idx, uint16_t tx_rate) 3752 { 3753 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3754 uint32_t bcnrc_val; 3755 3756 if (queue_idx >= hw->mac.max_tx_queues) 3757 return -EINVAL; 3758 3759 if (tx_rate != 0) { 3760 bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate); 3761 bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2); 3762 } else { 3763 bcnrc_val = 0; 3764 } 3765 3766 /* 3767 * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW 3768 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. 3769 */ 3770 wr32(hw, TXGBE_ARBTXMMW, 0x14); 3771 3772 /* Set ARBTXRATE of queue X */ 3773 wr32(hw, TXGBE_ARBPOOLIDX, queue_idx); 3774 wr32(hw, TXGBE_ARBTXRATE, bcnrc_val); 3775 txgbe_flush(hw); 3776 3777 return 0; 3778 } 3779 3780 int 3781 txgbe_syn_filter_set(struct rte_eth_dev *dev, 3782 struct rte_eth_syn_filter *filter, 3783 bool add) 3784 { 3785 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3786 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3787 uint32_t syn_info; 3788 uint32_t synqf; 3789 3790 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) 3791 return -EINVAL; 3792 3793 syn_info = filter_info->syn_info; 3794 3795 if (add) { 3796 if (syn_info & TXGBE_SYNCLS_ENA) 3797 return -EINVAL; 3798 synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); 3799 synqf |= TXGBE_SYNCLS_ENA; 3800 3801 if (filter->hig_pri) 3802 synqf |= TXGBE_SYNCLS_HIPRIO; 3803 else 3804 synqf &= ~TXGBE_SYNCLS_HIPRIO; 3805 } else { 3806 synqf = rd32(hw, TXGBE_SYNCLS); 3807 if (!(syn_info & TXGBE_SYNCLS_ENA)) 3808 return -ENOENT; 3809 synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA); 3810 } 3811 3812 filter_info->syn_info = synqf; 3813 wr32(hw, TXGBE_SYNCLS, synqf); 3814 txgbe_flush(hw); 3815 return 0; 3816 } 3817 3818 static inline enum txgbe_5tuple_protocol 3819 convert_protocol_type(uint8_t protocol_value) 3820 { 3821 if (protocol_value == IPPROTO_TCP) 3822 return TXGBE_5TF_PROT_TCP; 3823 else if (protocol_value == IPPROTO_UDP) 3824 return TXGBE_5TF_PROT_UDP; 3825 else if (protocol_value == IPPROTO_SCTP) 3826 return TXGBE_5TF_PROT_SCTP; 3827 else 3828 return TXGBE_5TF_PROT_NONE; 3829 } 3830 3831 /* inject a 5-tuple filter to HW */ 3832 static inline void 3833 txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 3834 struct txgbe_5tuple_filter *filter) 3835 { 3836 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3837 int i; 3838 uint32_t ftqf, sdpqf; 3839 uint32_t l34timir = 0; 3840 uint32_t mask = TXGBE_5TFCTL0_MASK; 3841 3842 i = filter->index; 3843 sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port)); 3844 sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port)); 3845 3846 ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto); 3847 ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority); 3848 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 3849 mask &= ~TXGBE_5TFCTL0_MSADDR; 3850 if (filter->filter_info.dst_ip_mask == 0) 3851 mask &= ~TXGBE_5TFCTL0_MDADDR; 3852 if (filter->filter_info.src_port_mask == 0) 3853 mask &= ~TXGBE_5TFCTL0_MSPORT; 3854 if (filter->filter_info.dst_port_mask == 0) 3855 mask &= ~TXGBE_5TFCTL0_MDPORT; 3856 if (filter->filter_info.proto_mask == 0) 3857 mask &= ~TXGBE_5TFCTL0_MPROTO; 3858 ftqf |= mask; 3859 ftqf |= TXGBE_5TFCTL0_MPOOL; 3860 ftqf |= TXGBE_5TFCTL0_ENA; 3861 3862 wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip)); 3863 wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip)); 3864 wr32(hw, TXGBE_5TFPORT(i), sdpqf); 3865 wr32(hw, TXGBE_5TFCTL0(i), ftqf); 3866 3867 l34timir |= TXGBE_5TFCTL1_QP(filter->queue); 3868 wr32(hw, TXGBE_5TFCTL1(i), l34timir); 3869 } 3870 3871 /* 3872 * add a 5tuple filter 3873 * 3874 * @param 3875 * dev: Pointer to struct rte_eth_dev. 3876 * index: the index the filter allocates. 3877 * filter: pointer to the filter that will be added. 3878 * rx_queue: the queue id the filter assigned to. 3879 * 3880 * @return 3881 * - On success, zero. 3882 * - On failure, a negative value. 3883 */ 3884 static int 3885 txgbe_add_5tuple_filter(struct rte_eth_dev *dev, 3886 struct txgbe_5tuple_filter *filter) 3887 { 3888 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3889 int i, idx, shift; 3890 3891 /* 3892 * look for an unused 5tuple filter index, 3893 * and insert the filter to list. 3894 */ 3895 for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) { 3896 idx = i / (sizeof(uint32_t) * NBBY); 3897 shift = i % (sizeof(uint32_t) * NBBY); 3898 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 3899 filter_info->fivetuple_mask[idx] |= 1 << shift; 3900 filter->index = i; 3901 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 3902 filter, 3903 entries); 3904 break; 3905 } 3906 } 3907 if (i >= TXGBE_MAX_FTQF_FILTERS) { 3908 PMD_DRV_LOG(ERR, "5tuple filters are full."); 3909 return -ENOSYS; 3910 } 3911 3912 txgbe_inject_5tuple_filter(dev, filter); 3913 3914 return 0; 3915 } 3916 3917 /* 3918 * remove a 5tuple filter 3919 * 3920 * @param 3921 * dev: Pointer to struct rte_eth_dev. 3922 * filter: the pointer of the filter will be removed. 3923 */ 3924 static void 3925 txgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 3926 struct txgbe_5tuple_filter *filter) 3927 { 3928 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 3929 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 3930 uint16_t index = filter->index; 3931 3932 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 3933 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 3934 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 3935 rte_free(filter); 3936 3937 wr32(hw, TXGBE_5TFDADDR(index), 0); 3938 wr32(hw, TXGBE_5TFSADDR(index), 0); 3939 wr32(hw, TXGBE_5TFPORT(index), 0); 3940 wr32(hw, TXGBE_5TFCTL0(index), 0); 3941 wr32(hw, TXGBE_5TFCTL1(index), 0); 3942 } 3943 3944 static inline struct txgbe_5tuple_filter * 3945 txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list, 3946 struct txgbe_5tuple_filter_info *key) 3947 { 3948 struct txgbe_5tuple_filter *it; 3949 3950 TAILQ_FOREACH(it, filter_list, entries) { 3951 if (memcmp(key, &it->filter_info, 3952 sizeof(struct txgbe_5tuple_filter_info)) == 0) { 3953 return it; 3954 } 3955 } 3956 return NULL; 3957 } 3958 3959 /* translate elements in struct rte_eth_ntuple_filter 3960 * to struct txgbe_5tuple_filter_info 3961 */ 3962 static inline int 3963 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 3964 struct txgbe_5tuple_filter_info *filter_info) 3965 { 3966 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM || 3967 filter->priority > TXGBE_5TUPLE_MAX_PRI || 3968 filter->priority < TXGBE_5TUPLE_MIN_PRI) 3969 return -EINVAL; 3970 3971 switch (filter->dst_ip_mask) { 3972 case UINT32_MAX: 3973 filter_info->dst_ip_mask = 0; 3974 filter_info->dst_ip = filter->dst_ip; 3975 break; 3976 case 0: 3977 filter_info->dst_ip_mask = 1; 3978 break; 3979 default: 3980 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 3981 return -EINVAL; 3982 } 3983 3984 switch (filter->src_ip_mask) { 3985 case UINT32_MAX: 3986 filter_info->src_ip_mask = 0; 3987 filter_info->src_ip = filter->src_ip; 3988 break; 3989 case 0: 3990 filter_info->src_ip_mask = 1; 3991 break; 3992 default: 3993 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 3994 return -EINVAL; 3995 } 3996 3997 switch (filter->dst_port_mask) { 3998 case UINT16_MAX: 3999 filter_info->dst_port_mask = 0; 4000 filter_info->dst_port = filter->dst_port; 4001 break; 4002 case 0: 4003 filter_info->dst_port_mask = 1; 4004 break; 4005 default: 4006 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 4007 return -EINVAL; 4008 } 4009 4010 switch (filter->src_port_mask) { 4011 case UINT16_MAX: 4012 filter_info->src_port_mask = 0; 4013 filter_info->src_port = filter->src_port; 4014 break; 4015 case 0: 4016 filter_info->src_port_mask = 1; 4017 break; 4018 default: 4019 PMD_DRV_LOG(ERR, "invalid src_port mask."); 4020 return -EINVAL; 4021 } 4022 4023 switch (filter->proto_mask) { 4024 case UINT8_MAX: 4025 filter_info->proto_mask = 0; 4026 filter_info->proto = 4027 convert_protocol_type(filter->proto); 4028 break; 4029 case 0: 4030 filter_info->proto_mask = 1; 4031 break; 4032 default: 4033 PMD_DRV_LOG(ERR, "invalid protocol mask."); 4034 return -EINVAL; 4035 } 4036 4037 filter_info->priority = (uint8_t)filter->priority; 4038 return 0; 4039 } 4040 4041 /* 4042 * add or delete a ntuple filter 4043 * 4044 * @param 4045 * dev: Pointer to struct rte_eth_dev. 4046 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 4047 * add: if true, add filter, if false, remove filter 4048 * 4049 * @return 4050 * - On success, zero. 4051 * - On failure, a negative value. 4052 */ 4053 int 4054 txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 4055 struct rte_eth_ntuple_filter *ntuple_filter, 4056 bool add) 4057 { 4058 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 4059 struct txgbe_5tuple_filter_info filter_5tuple; 4060 struct txgbe_5tuple_filter *filter; 4061 int ret; 4062 4063 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 4064 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 4065 return -EINVAL; 4066 } 4067 4068 memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info)); 4069 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 4070 if (ret < 0) 4071 return ret; 4072 4073 filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 4074 &filter_5tuple); 4075 if (filter != NULL && add) { 4076 PMD_DRV_LOG(ERR, "filter exists."); 4077 return -EEXIST; 4078 } 4079 if (filter == NULL && !add) { 4080 PMD_DRV_LOG(ERR, "filter doesn't exist."); 4081 return -ENOENT; 4082 } 4083 4084 if (add) { 4085 filter = rte_zmalloc("txgbe_5tuple_filter", 4086 sizeof(struct txgbe_5tuple_filter), 0); 4087 if (filter == NULL) 4088 return -ENOMEM; 4089 rte_memcpy(&filter->filter_info, 4090 &filter_5tuple, 4091 sizeof(struct txgbe_5tuple_filter_info)); 4092 filter->queue = ntuple_filter->queue; 4093 ret = txgbe_add_5tuple_filter(dev, filter); 4094 if (ret < 0) { 4095 rte_free(filter); 4096 return ret; 4097 } 4098 } else { 4099 txgbe_remove_5tuple_filter(dev, filter); 4100 } 4101 4102 return 0; 4103 } 4104 4105 int 4106 txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 4107 struct rte_eth_ethertype_filter *filter, 4108 bool add) 4109 { 4110 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4111 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 4112 uint32_t etqf = 0; 4113 uint32_t etqs = 0; 4114 int ret; 4115 struct txgbe_ethertype_filter ethertype_filter; 4116 4117 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) 4118 return -EINVAL; 4119 4120 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 4121 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 4122 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 4123 " ethertype filter.", filter->ether_type); 4124 return -EINVAL; 4125 } 4126 4127 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 4128 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 4129 return -EINVAL; 4130 } 4131 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 4132 PMD_DRV_LOG(ERR, "drop option is unsupported."); 4133 return -EINVAL; 4134 } 4135 4136 ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 4137 if (ret >= 0 && add) { 4138 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 4139 filter->ether_type); 4140 return -EEXIST; 4141 } 4142 if (ret < 0 && !add) { 4143 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 4144 filter->ether_type); 4145 return -ENOENT; 4146 } 4147 4148 if (add) { 4149 etqf = TXGBE_ETFLT_ENA; 4150 etqf |= TXGBE_ETFLT_ETID(filter->ether_type); 4151 etqs |= TXGBE_ETCLS_QPID(filter->queue); 4152 etqs |= TXGBE_ETCLS_QENA; 4153 4154 ethertype_filter.ethertype = filter->ether_type; 4155 ethertype_filter.etqf = etqf; 4156 ethertype_filter.etqs = etqs; 4157 ethertype_filter.conf = FALSE; 4158 ret = txgbe_ethertype_filter_insert(filter_info, 4159 ðertype_filter); 4160 if (ret < 0) { 4161 PMD_DRV_LOG(ERR, "ethertype filters are full."); 4162 return -ENOSPC; 4163 } 4164 } else { 4165 ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 4166 if (ret < 0) 4167 return -ENOSYS; 4168 } 4169 wr32(hw, TXGBE_ETFLT(ret), etqf); 4170 wr32(hw, TXGBE_ETCLS(ret), etqs); 4171 txgbe_flush(hw); 4172 4173 return 0; 4174 } 4175 4176 static int 4177 txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 4178 const struct rte_flow_ops **ops) 4179 { 4180 *ops = &txgbe_flow_ops; 4181 return 0; 4182 } 4183 4184 static u8 * 4185 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw, 4186 u8 **mc_addr_ptr, u32 *vmdq) 4187 { 4188 u8 *mc_addr; 4189 4190 *vmdq = 0; 4191 mc_addr = *mc_addr_ptr; 4192 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 4193 return mc_addr; 4194 } 4195 4196 int 4197 txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 4198 struct rte_ether_addr *mc_addr_set, 4199 uint32_t nb_mc_addr) 4200 { 4201 struct txgbe_hw *hw; 4202 u8 *mc_addr_list; 4203 4204 hw = TXGBE_DEV_HW(dev); 4205 mc_addr_list = (u8 *)mc_addr_set; 4206 return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 4207 txgbe_dev_addr_list_itr, TRUE); 4208 } 4209 4210 static uint64_t 4211 txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 4212 { 4213 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4214 uint64_t systime_cycles; 4215 4216 systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL); 4217 systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32; 4218 4219 return systime_cycles; 4220 } 4221 4222 static uint64_t 4223 txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4224 { 4225 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4226 uint64_t rx_tstamp_cycles; 4227 4228 /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */ 4229 rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL); 4230 rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32; 4231 4232 return rx_tstamp_cycles; 4233 } 4234 4235 static uint64_t 4236 txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 4237 { 4238 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4239 uint64_t tx_tstamp_cycles; 4240 4241 /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */ 4242 tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL); 4243 tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32; 4244 4245 return tx_tstamp_cycles; 4246 } 4247 4248 static void 4249 txgbe_start_timecounters(struct rte_eth_dev *dev) 4250 { 4251 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4252 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4253 struct rte_eth_link link; 4254 uint32_t incval = 0; 4255 uint32_t shift = 0; 4256 4257 /* Get current link speed. */ 4258 txgbe_dev_link_update(dev, 1); 4259 rte_eth_linkstatus_get(dev, &link); 4260 4261 switch (link.link_speed) { 4262 case RTE_ETH_SPEED_NUM_100M: 4263 incval = TXGBE_INCVAL_100; 4264 shift = TXGBE_INCVAL_SHIFT_100; 4265 break; 4266 case RTE_ETH_SPEED_NUM_1G: 4267 incval = TXGBE_INCVAL_1GB; 4268 shift = TXGBE_INCVAL_SHIFT_1GB; 4269 break; 4270 case RTE_ETH_SPEED_NUM_10G: 4271 default: 4272 incval = TXGBE_INCVAL_10GB; 4273 shift = TXGBE_INCVAL_SHIFT_10GB; 4274 break; 4275 } 4276 4277 wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2)); 4278 4279 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 4280 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4281 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 4282 4283 adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; 4284 adapter->systime_tc.cc_shift = shift; 4285 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 4286 4287 adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; 4288 adapter->rx_tstamp_tc.cc_shift = shift; 4289 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4290 4291 adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK; 4292 adapter->tx_tstamp_tc.cc_shift = shift; 4293 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 4294 } 4295 4296 static int 4297 txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 4298 { 4299 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4300 4301 adapter->systime_tc.nsec += delta; 4302 adapter->rx_tstamp_tc.nsec += delta; 4303 adapter->tx_tstamp_tc.nsec += delta; 4304 4305 return 0; 4306 } 4307 4308 static int 4309 txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 4310 { 4311 uint64_t ns; 4312 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4313 4314 ns = rte_timespec_to_ns(ts); 4315 /* Set the timecounters to a new value. */ 4316 adapter->systime_tc.nsec = ns; 4317 adapter->rx_tstamp_tc.nsec = ns; 4318 adapter->tx_tstamp_tc.nsec = ns; 4319 4320 return 0; 4321 } 4322 4323 static int 4324 txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 4325 { 4326 uint64_t ns, systime_cycles; 4327 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4328 4329 systime_cycles = txgbe_read_systime_cyclecounter(dev); 4330 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 4331 *ts = rte_ns_to_timespec(ns); 4332 4333 return 0; 4334 } 4335 4336 static int 4337 txgbe_timesync_enable(struct rte_eth_dev *dev) 4338 { 4339 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4340 uint32_t tsync_ctl; 4341 4342 /* Stop the timesync system time. */ 4343 wr32(hw, TXGBE_TSTIMEINC, 0x0); 4344 /* Reset the timesync system time value. */ 4345 wr32(hw, TXGBE_TSTIMEL, 0x0); 4346 wr32(hw, TXGBE_TSTIMEH, 0x0); 4347 4348 txgbe_start_timecounters(dev); 4349 4350 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4351 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 4352 RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588); 4353 4354 /* Enable timestamping of received PTP packets. */ 4355 tsync_ctl = rd32(hw, TXGBE_TSRXCTL); 4356 tsync_ctl |= TXGBE_TSRXCTL_ENA; 4357 wr32(hw, TXGBE_TSRXCTL, tsync_ctl); 4358 4359 /* Enable timestamping of transmitted PTP packets. */ 4360 tsync_ctl = rd32(hw, TXGBE_TSTXCTL); 4361 tsync_ctl |= TXGBE_TSTXCTL_ENA; 4362 wr32(hw, TXGBE_TSTXCTL, tsync_ctl); 4363 4364 txgbe_flush(hw); 4365 4366 return 0; 4367 } 4368 4369 static int 4370 txgbe_timesync_disable(struct rte_eth_dev *dev) 4371 { 4372 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4373 uint32_t tsync_ctl; 4374 4375 /* Disable timestamping of transmitted PTP packets. */ 4376 tsync_ctl = rd32(hw, TXGBE_TSTXCTL); 4377 tsync_ctl &= ~TXGBE_TSTXCTL_ENA; 4378 wr32(hw, TXGBE_TSTXCTL, tsync_ctl); 4379 4380 /* Disable timestamping of received PTP packets. */ 4381 tsync_ctl = rd32(hw, TXGBE_TSRXCTL); 4382 tsync_ctl &= ~TXGBE_TSRXCTL_ENA; 4383 wr32(hw, TXGBE_TSRXCTL, tsync_ctl); 4384 4385 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 4386 wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0); 4387 4388 /* Stop incrementing the System Time registers. */ 4389 wr32(hw, TXGBE_TSTIMEINC, 0); 4390 4391 return 0; 4392 } 4393 4394 static int 4395 txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 4396 struct timespec *timestamp, 4397 uint32_t flags __rte_unused) 4398 { 4399 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4400 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4401 uint32_t tsync_rxctl; 4402 uint64_t rx_tstamp_cycles; 4403 uint64_t ns; 4404 4405 tsync_rxctl = rd32(hw, TXGBE_TSRXCTL); 4406 if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0) 4407 return -EINVAL; 4408 4409 rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev); 4410 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 4411 *timestamp = rte_ns_to_timespec(ns); 4412 4413 return 0; 4414 } 4415 4416 static int 4417 txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 4418 struct timespec *timestamp) 4419 { 4420 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4421 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 4422 uint32_t tsync_txctl; 4423 uint64_t tx_tstamp_cycles; 4424 uint64_t ns; 4425 4426 tsync_txctl = rd32(hw, TXGBE_TSTXCTL); 4427 if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0) 4428 return -EINVAL; 4429 4430 tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev); 4431 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 4432 *timestamp = rte_ns_to_timespec(ns); 4433 4434 return 0; 4435 } 4436 4437 static int 4438 txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused) 4439 { 4440 int count = 0; 4441 int g_ind = 0; 4442 const struct reg_info *reg_group; 4443 const struct reg_info **reg_set = txgbe_regs_others; 4444 4445 while ((reg_group = reg_set[g_ind++])) 4446 count += txgbe_regs_group_count(reg_group); 4447 4448 return count; 4449 } 4450 4451 static int 4452 txgbe_get_regs(struct rte_eth_dev *dev, 4453 struct rte_dev_reg_info *regs) 4454 { 4455 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4456 uint32_t *data = regs->data; 4457 int g_ind = 0; 4458 int count = 0; 4459 const struct reg_info *reg_group; 4460 const struct reg_info **reg_set = txgbe_regs_others; 4461 4462 if (data == NULL) { 4463 regs->length = txgbe_get_reg_length(dev); 4464 regs->width = sizeof(uint32_t); 4465 return 0; 4466 } 4467 4468 /* Support only full register dump */ 4469 if (regs->length == 0 || 4470 regs->length == (uint32_t)txgbe_get_reg_length(dev)) { 4471 regs->version = hw->mac.type << 24 | 4472 hw->revision_id << 16 | 4473 hw->device_id; 4474 while ((reg_group = reg_set[g_ind++])) 4475 count += txgbe_read_regs_group(dev, &data[count], 4476 reg_group); 4477 return 0; 4478 } 4479 4480 return -ENOTSUP; 4481 } 4482 4483 static int 4484 txgbe_get_eeprom_length(struct rte_eth_dev *dev) 4485 { 4486 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4487 4488 /* Return unit is byte count */ 4489 return hw->rom.word_size * 2; 4490 } 4491 4492 static int 4493 txgbe_get_eeprom(struct rte_eth_dev *dev, 4494 struct rte_dev_eeprom_info *in_eeprom) 4495 { 4496 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4497 struct txgbe_rom_info *eeprom = &hw->rom; 4498 uint16_t *data = in_eeprom->data; 4499 int first, length; 4500 4501 first = in_eeprom->offset >> 1; 4502 length = in_eeprom->length >> 1; 4503 if (first > hw->rom.word_size || 4504 ((first + length) > hw->rom.word_size)) 4505 return -EINVAL; 4506 4507 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4508 4509 return eeprom->readw_buffer(hw, first, length, data); 4510 } 4511 4512 static int 4513 txgbe_set_eeprom(struct rte_eth_dev *dev, 4514 struct rte_dev_eeprom_info *in_eeprom) 4515 { 4516 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4517 struct txgbe_rom_info *eeprom = &hw->rom; 4518 uint16_t *data = in_eeprom->data; 4519 int first, length; 4520 4521 first = in_eeprom->offset >> 1; 4522 length = in_eeprom->length >> 1; 4523 if (first > hw->rom.word_size || 4524 ((first + length) > hw->rom.word_size)) 4525 return -EINVAL; 4526 4527 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 4528 4529 return eeprom->writew_buffer(hw, first, length, data); 4530 } 4531 4532 static int 4533 txgbe_get_module_info(struct rte_eth_dev *dev, 4534 struct rte_eth_dev_module_info *modinfo) 4535 { 4536 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4537 uint32_t status; 4538 uint8_t sff8472_rev, addr_mode; 4539 bool page_swap = false; 4540 4541 /* Check whether we support SFF-8472 or not */ 4542 status = hw->phy.read_i2c_eeprom(hw, 4543 TXGBE_SFF_SFF_8472_COMP, 4544 &sff8472_rev); 4545 if (status != 0) 4546 return -EIO; 4547 4548 /* addressing mode is not supported */ 4549 status = hw->phy.read_i2c_eeprom(hw, 4550 TXGBE_SFF_SFF_8472_SWAP, 4551 &addr_mode); 4552 if (status != 0) 4553 return -EIO; 4554 4555 if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) { 4556 PMD_DRV_LOG(ERR, 4557 "Address change required to access page 0xA2, " 4558 "but not supported. Please report the module " 4559 "type to the driver maintainers."); 4560 page_swap = true; 4561 } 4562 4563 if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) { 4564 /* We have a SFP, but it does not support SFF-8472 */ 4565 modinfo->type = RTE_ETH_MODULE_SFF_8079; 4566 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 4567 } else { 4568 /* We have a SFP which supports a revision of SFF-8472. */ 4569 modinfo->type = RTE_ETH_MODULE_SFF_8472; 4570 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 4571 } 4572 4573 return 0; 4574 } 4575 4576 static int 4577 txgbe_get_module_eeprom(struct rte_eth_dev *dev, 4578 struct rte_dev_eeprom_info *info) 4579 { 4580 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4581 uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID; 4582 uint8_t databyte = 0xFF; 4583 uint8_t *data = info->data; 4584 uint32_t i = 0; 4585 4586 if (info->length == 0) 4587 return -EINVAL; 4588 4589 for (i = info->offset; i < info->offset + info->length; i++) { 4590 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 4591 status = hw->phy.read_i2c_eeprom(hw, i, &databyte); 4592 else 4593 status = hw->phy.read_i2c_sff8472(hw, i, &databyte); 4594 4595 if (status != 0) 4596 return -EIO; 4597 4598 data[i - info->offset] = databyte; 4599 } 4600 4601 return 0; 4602 } 4603 4604 bool 4605 txgbe_rss_update_sp(enum txgbe_mac_type mac_type) 4606 { 4607 switch (mac_type) { 4608 case txgbe_mac_raptor: 4609 case txgbe_mac_raptor_vf: 4610 return 1; 4611 default: 4612 return 0; 4613 } 4614 } 4615 4616 static int 4617 txgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 4618 struct rte_eth_dcb_info *dcb_info) 4619 { 4620 struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev); 4621 struct txgbe_dcb_tc_config *tc; 4622 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 4623 uint8_t nb_tcs; 4624 uint8_t i, j; 4625 4626 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG) 4627 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 4628 else 4629 dcb_info->nb_tcs = 1; 4630 4631 tc_queue = &dcb_info->tc_queue; 4632 nb_tcs = dcb_info->nb_tcs; 4633 4634 if (dcb_config->vt_mode) { /* vt is enabled */ 4635 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 4636 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 4637 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 4638 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 4639 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 4640 for (j = 0; j < nb_tcs; j++) { 4641 tc_queue->tc_rxq[0][j].base = j; 4642 tc_queue->tc_rxq[0][j].nb_queue = 1; 4643 tc_queue->tc_txq[0][j].base = j; 4644 tc_queue->tc_txq[0][j].nb_queue = 1; 4645 } 4646 } else { 4647 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 4648 for (j = 0; j < nb_tcs; j++) { 4649 tc_queue->tc_rxq[i][j].base = 4650 i * nb_tcs + j; 4651 tc_queue->tc_rxq[i][j].nb_queue = 1; 4652 tc_queue->tc_txq[i][j].base = 4653 i * nb_tcs + j; 4654 tc_queue->tc_txq[i][j].nb_queue = 1; 4655 } 4656 } 4657 } 4658 } else { /* vt is disabled */ 4659 struct rte_eth_dcb_rx_conf *rx_conf = 4660 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 4661 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) 4662 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 4663 if (dcb_info->nb_tcs == RTE_ETH_4_TCS) { 4664 for (i = 0; i < dcb_info->nb_tcs; i++) { 4665 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 4666 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 4667 } 4668 dcb_info->tc_queue.tc_txq[0][0].base = 0; 4669 dcb_info->tc_queue.tc_txq[0][1].base = 64; 4670 dcb_info->tc_queue.tc_txq[0][2].base = 96; 4671 dcb_info->tc_queue.tc_txq[0][3].base = 112; 4672 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 4673 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 4674 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 4675 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 4676 } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) { 4677 for (i = 0; i < dcb_info->nb_tcs; i++) { 4678 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 4679 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 4680 } 4681 dcb_info->tc_queue.tc_txq[0][0].base = 0; 4682 dcb_info->tc_queue.tc_txq[0][1].base = 32; 4683 dcb_info->tc_queue.tc_txq[0][2].base = 64; 4684 dcb_info->tc_queue.tc_txq[0][3].base = 80; 4685 dcb_info->tc_queue.tc_txq[0][4].base = 96; 4686 dcb_info->tc_queue.tc_txq[0][5].base = 104; 4687 dcb_info->tc_queue.tc_txq[0][6].base = 112; 4688 dcb_info->tc_queue.tc_txq[0][7].base = 120; 4689 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 4690 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 4691 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 4692 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 4693 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 4694 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 4695 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 4696 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 4697 } 4698 } 4699 for (i = 0; i < dcb_info->nb_tcs; i++) { 4700 tc = &dcb_config->tc_config[i]; 4701 dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent; 4702 } 4703 return 0; 4704 } 4705 4706 /* Update e-tag ether type */ 4707 static int 4708 txgbe_update_e_tag_eth_type(struct txgbe_hw *hw, 4709 uint16_t ether_type) 4710 { 4711 uint32_t etag_etype; 4712 4713 etag_etype = rd32(hw, TXGBE_EXTAG); 4714 etag_etype &= ~TXGBE_EXTAG_ETAG_MASK; 4715 etag_etype |= ether_type; 4716 wr32(hw, TXGBE_EXTAG, etag_etype); 4717 txgbe_flush(hw); 4718 4719 return 0; 4720 } 4721 4722 /* Enable e-tag tunnel */ 4723 static int 4724 txgbe_e_tag_enable(struct txgbe_hw *hw) 4725 { 4726 uint32_t etag_etype; 4727 4728 etag_etype = rd32(hw, TXGBE_PORTCTL); 4729 etag_etype |= TXGBE_PORTCTL_ETAG; 4730 wr32(hw, TXGBE_PORTCTL, etag_etype); 4731 txgbe_flush(hw); 4732 4733 return 0; 4734 } 4735 4736 static int 4737 txgbe_e_tag_filter_del(struct rte_eth_dev *dev, 4738 struct txgbe_l2_tunnel_conf *l2_tunnel) 4739 { 4740 int ret = 0; 4741 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4742 uint32_t i, rar_entries; 4743 uint32_t rar_low, rar_high; 4744 4745 rar_entries = hw->mac.num_rar_entries; 4746 4747 for (i = 1; i < rar_entries; i++) { 4748 wr32(hw, TXGBE_ETHADDRIDX, i); 4749 rar_high = rd32(hw, TXGBE_ETHADDRH); 4750 rar_low = rd32(hw, TXGBE_ETHADDRL); 4751 if ((rar_high & TXGBE_ETHADDRH_VLD) && 4752 (rar_high & TXGBE_ETHADDRH_ETAG) && 4753 (TXGBE_ETHADDRL_ETAG(rar_low) == 4754 l2_tunnel->tunnel_id)) { 4755 wr32(hw, TXGBE_ETHADDRL, 0); 4756 wr32(hw, TXGBE_ETHADDRH, 0); 4757 4758 txgbe_clear_vmdq(hw, i, BIT_MASK32); 4759 4760 return ret; 4761 } 4762 } 4763 4764 return ret; 4765 } 4766 4767 static int 4768 txgbe_e_tag_filter_add(struct rte_eth_dev *dev, 4769 struct txgbe_l2_tunnel_conf *l2_tunnel) 4770 { 4771 int ret = 0; 4772 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4773 uint32_t i, rar_entries; 4774 uint32_t rar_low, rar_high; 4775 4776 /* One entry for one tunnel. Try to remove potential existing entry. */ 4777 txgbe_e_tag_filter_del(dev, l2_tunnel); 4778 4779 rar_entries = hw->mac.num_rar_entries; 4780 4781 for (i = 1; i < rar_entries; i++) { 4782 wr32(hw, TXGBE_ETHADDRIDX, i); 4783 rar_high = rd32(hw, TXGBE_ETHADDRH); 4784 if (rar_high & TXGBE_ETHADDRH_VLD) { 4785 continue; 4786 } else { 4787 txgbe_set_vmdq(hw, i, l2_tunnel->pool); 4788 rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG; 4789 rar_low = l2_tunnel->tunnel_id; 4790 4791 wr32(hw, TXGBE_ETHADDRL, rar_low); 4792 wr32(hw, TXGBE_ETHADDRH, rar_high); 4793 4794 return ret; 4795 } 4796 } 4797 4798 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 4799 " Please remove a rule before adding a new one."); 4800 return -EINVAL; 4801 } 4802 4803 static inline struct txgbe_l2_tn_filter * 4804 txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info, 4805 struct txgbe_l2_tn_key *key) 4806 { 4807 int ret; 4808 4809 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 4810 if (ret < 0) 4811 return NULL; 4812 4813 return l2_tn_info->hash_map[ret]; 4814 } 4815 4816 static inline int 4817 txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, 4818 struct txgbe_l2_tn_filter *l2_tn_filter) 4819 { 4820 int ret; 4821 4822 ret = rte_hash_add_key(l2_tn_info->hash_handle, 4823 &l2_tn_filter->key); 4824 4825 if (ret < 0) { 4826 PMD_DRV_LOG(ERR, 4827 "Failed to insert L2 tunnel filter" 4828 " to hash table %d!", 4829 ret); 4830 return ret; 4831 } 4832 4833 l2_tn_info->hash_map[ret] = l2_tn_filter; 4834 4835 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 4836 4837 return 0; 4838 } 4839 4840 static inline int 4841 txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info, 4842 struct txgbe_l2_tn_key *key) 4843 { 4844 int ret; 4845 struct txgbe_l2_tn_filter *l2_tn_filter; 4846 4847 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 4848 4849 if (ret < 0) { 4850 PMD_DRV_LOG(ERR, 4851 "No such L2 tunnel filter to delete %d!", 4852 ret); 4853 return ret; 4854 } 4855 4856 l2_tn_filter = l2_tn_info->hash_map[ret]; 4857 l2_tn_info->hash_map[ret] = NULL; 4858 4859 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 4860 rte_free(l2_tn_filter); 4861 4862 return 0; 4863 } 4864 4865 /* Add l2 tunnel filter */ 4866 int 4867 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 4868 struct txgbe_l2_tunnel_conf *l2_tunnel, 4869 bool restore) 4870 { 4871 int ret; 4872 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 4873 struct txgbe_l2_tn_key key; 4874 struct txgbe_l2_tn_filter *node; 4875 4876 if (!restore) { 4877 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 4878 key.tn_id = l2_tunnel->tunnel_id; 4879 4880 node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key); 4881 4882 if (node) { 4883 PMD_DRV_LOG(ERR, 4884 "The L2 tunnel filter already exists!"); 4885 return -EINVAL; 4886 } 4887 4888 node = rte_zmalloc("txgbe_l2_tn", 4889 sizeof(struct txgbe_l2_tn_filter), 4890 0); 4891 if (!node) 4892 return -ENOMEM; 4893 4894 rte_memcpy(&node->key, 4895 &key, 4896 sizeof(struct txgbe_l2_tn_key)); 4897 node->pool = l2_tunnel->pool; 4898 ret = txgbe_insert_l2_tn_filter(l2_tn_info, node); 4899 if (ret < 0) { 4900 rte_free(node); 4901 return ret; 4902 } 4903 } 4904 4905 switch (l2_tunnel->l2_tunnel_type) { 4906 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 4907 ret = txgbe_e_tag_filter_add(dev, l2_tunnel); 4908 break; 4909 default: 4910 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 4911 ret = -EINVAL; 4912 break; 4913 } 4914 4915 if (!restore && ret < 0) 4916 (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key); 4917 4918 return ret; 4919 } 4920 4921 /* Delete l2 tunnel filter */ 4922 int 4923 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 4924 struct txgbe_l2_tunnel_conf *l2_tunnel) 4925 { 4926 int ret; 4927 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 4928 struct txgbe_l2_tn_key key; 4929 4930 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 4931 key.tn_id = l2_tunnel->tunnel_id; 4932 ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key); 4933 if (ret < 0) 4934 return ret; 4935 4936 switch (l2_tunnel->l2_tunnel_type) { 4937 case RTE_ETH_L2_TUNNEL_TYPE_E_TAG: 4938 ret = txgbe_e_tag_filter_del(dev, l2_tunnel); 4939 break; 4940 default: 4941 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 4942 ret = -EINVAL; 4943 break; 4944 } 4945 4946 return ret; 4947 } 4948 4949 static int 4950 txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 4951 { 4952 int ret = 0; 4953 uint32_t ctrl; 4954 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4955 4956 ctrl = rd32(hw, TXGBE_POOLCTL); 4957 ctrl &= ~TXGBE_POOLCTL_MODE_MASK; 4958 if (en) 4959 ctrl |= TXGBE_PSRPOOL_MODE_ETAG; 4960 wr32(hw, TXGBE_POOLCTL, ctrl); 4961 4962 return ret; 4963 } 4964 4965 /* Add UDP tunneling port */ 4966 static int 4967 txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 4968 struct rte_eth_udp_tunnel *udp_tunnel) 4969 { 4970 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 4971 int ret = 0; 4972 4973 if (udp_tunnel == NULL) 4974 return -EINVAL; 4975 4976 switch (udp_tunnel->prot_type) { 4977 case RTE_ETH_TUNNEL_TYPE_VXLAN: 4978 if (udp_tunnel->udp_port == 0) { 4979 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 4980 ret = -EINVAL; 4981 break; 4982 } 4983 wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port); 4984 break; 4985 case RTE_ETH_TUNNEL_TYPE_GENEVE: 4986 if (udp_tunnel->udp_port == 0) { 4987 PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed."); 4988 ret = -EINVAL; 4989 break; 4990 } 4991 wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port); 4992 break; 4993 case RTE_ETH_TUNNEL_TYPE_TEREDO: 4994 if (udp_tunnel->udp_port == 0) { 4995 PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed."); 4996 ret = -EINVAL; 4997 break; 4998 } 4999 wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port); 5000 break; 5001 case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: 5002 if (udp_tunnel->udp_port == 0) { 5003 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 5004 ret = -EINVAL; 5005 break; 5006 } 5007 wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port); 5008 break; 5009 default: 5010 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5011 ret = -EINVAL; 5012 break; 5013 } 5014 5015 txgbe_flush(hw); 5016 5017 return ret; 5018 } 5019 5020 /* Remove UDP tunneling port */ 5021 static int 5022 txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 5023 struct rte_eth_udp_tunnel *udp_tunnel) 5024 { 5025 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5026 int ret = 0; 5027 uint16_t cur_port; 5028 5029 if (udp_tunnel == NULL) 5030 return -EINVAL; 5031 5032 switch (udp_tunnel->prot_type) { 5033 case RTE_ETH_TUNNEL_TYPE_VXLAN: 5034 cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT); 5035 if (cur_port != udp_tunnel->udp_port) { 5036 PMD_DRV_LOG(ERR, "Port %u does not exist.", 5037 udp_tunnel->udp_port); 5038 ret = -EINVAL; 5039 break; 5040 } 5041 wr32(hw, TXGBE_VXLANPORT, 0); 5042 break; 5043 case RTE_ETH_TUNNEL_TYPE_GENEVE: 5044 cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT); 5045 if (cur_port != udp_tunnel->udp_port) { 5046 PMD_DRV_LOG(ERR, "Port %u does not exist.", 5047 udp_tunnel->udp_port); 5048 ret = -EINVAL; 5049 break; 5050 } 5051 wr32(hw, TXGBE_GENEVEPORT, 0); 5052 break; 5053 case RTE_ETH_TUNNEL_TYPE_TEREDO: 5054 cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT); 5055 if (cur_port != udp_tunnel->udp_port) { 5056 PMD_DRV_LOG(ERR, "Port %u does not exist.", 5057 udp_tunnel->udp_port); 5058 ret = -EINVAL; 5059 break; 5060 } 5061 wr32(hw, TXGBE_TEREDOPORT, 0); 5062 break; 5063 case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE: 5064 cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE); 5065 if (cur_port != udp_tunnel->udp_port) { 5066 PMD_DRV_LOG(ERR, "Port %u does not exist.", 5067 udp_tunnel->udp_port); 5068 ret = -EINVAL; 5069 break; 5070 } 5071 wr32(hw, TXGBE_VXLANPORTGPE, 0); 5072 break; 5073 default: 5074 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 5075 ret = -EINVAL; 5076 break; 5077 } 5078 5079 txgbe_flush(hw); 5080 5081 return ret; 5082 } 5083 5084 /* restore n-tuple filter */ 5085 static inline void 5086 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 5087 { 5088 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5089 struct txgbe_5tuple_filter *node; 5090 5091 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 5092 txgbe_inject_5tuple_filter(dev, node); 5093 } 5094 } 5095 5096 /* restore ethernet type filter */ 5097 static inline void 5098 txgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 5099 { 5100 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5101 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5102 int i; 5103 5104 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { 5105 if (filter_info->ethertype_mask & (1 << i)) { 5106 wr32(hw, TXGBE_ETFLT(i), 5107 filter_info->ethertype_filters[i].etqf); 5108 wr32(hw, TXGBE_ETCLS(i), 5109 filter_info->ethertype_filters[i].etqs); 5110 txgbe_flush(hw); 5111 } 5112 } 5113 } 5114 5115 /* restore SYN filter */ 5116 static inline void 5117 txgbe_syn_filter_restore(struct rte_eth_dev *dev) 5118 { 5119 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5120 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5121 uint32_t synqf; 5122 5123 synqf = filter_info->syn_info; 5124 5125 if (synqf & TXGBE_SYNCLS_ENA) { 5126 wr32(hw, TXGBE_SYNCLS, synqf); 5127 txgbe_flush(hw); 5128 } 5129 } 5130 5131 /* restore L2 tunnel filter */ 5132 static inline void 5133 txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 5134 { 5135 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 5136 struct txgbe_l2_tn_filter *node; 5137 struct txgbe_l2_tunnel_conf l2_tn_conf; 5138 5139 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 5140 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 5141 l2_tn_conf.tunnel_id = node->key.tn_id; 5142 l2_tn_conf.pool = node->pool; 5143 (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 5144 } 5145 } 5146 5147 /* restore rss filter */ 5148 static inline void 5149 txgbe_rss_filter_restore(struct rte_eth_dev *dev) 5150 { 5151 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5152 5153 if (filter_info->rss_info.conf.queue_num) 5154 txgbe_config_rss_filter(dev, 5155 &filter_info->rss_info, TRUE); 5156 } 5157 5158 static int 5159 txgbe_filter_restore(struct rte_eth_dev *dev) 5160 { 5161 txgbe_ntuple_filter_restore(dev); 5162 txgbe_ethertype_filter_restore(dev); 5163 txgbe_syn_filter_restore(dev); 5164 txgbe_fdir_filter_restore(dev); 5165 txgbe_l2_tn_filter_restore(dev); 5166 txgbe_rss_filter_restore(dev); 5167 5168 return 0; 5169 } 5170 5171 static void 5172 txgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 5173 { 5174 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 5175 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5176 5177 if (l2_tn_info->e_tag_en) 5178 (void)txgbe_e_tag_enable(hw); 5179 5180 if (l2_tn_info->e_tag_fwd_en) 5181 (void)txgbe_e_tag_forwarding_en_dis(dev, 1); 5182 5183 (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 5184 } 5185 5186 /* remove all the n-tuple filters */ 5187 void 5188 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 5189 { 5190 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5191 struct txgbe_5tuple_filter *p_5tuple; 5192 5193 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 5194 txgbe_remove_5tuple_filter(dev, p_5tuple); 5195 } 5196 5197 /* remove all the ether type filters */ 5198 void 5199 txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 5200 { 5201 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5202 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5203 int i; 5204 5205 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { 5206 if (filter_info->ethertype_mask & (1 << i) && 5207 !filter_info->ethertype_filters[i].conf) { 5208 (void)txgbe_ethertype_filter_remove(filter_info, 5209 (uint8_t)i); 5210 wr32(hw, TXGBE_ETFLT(i), 0); 5211 wr32(hw, TXGBE_ETCLS(i), 0); 5212 txgbe_flush(hw); 5213 } 5214 } 5215 } 5216 5217 /* remove the SYN filter */ 5218 void 5219 txgbe_clear_syn_filter(struct rte_eth_dev *dev) 5220 { 5221 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 5222 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); 5223 5224 if (filter_info->syn_info & TXGBE_SYNCLS_ENA) { 5225 filter_info->syn_info = 0; 5226 5227 wr32(hw, TXGBE_SYNCLS, 0); 5228 txgbe_flush(hw); 5229 } 5230 } 5231 5232 /* remove all the L2 tunnel filters */ 5233 int 5234 txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 5235 { 5236 struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev); 5237 struct txgbe_l2_tn_filter *l2_tn_filter; 5238 struct txgbe_l2_tunnel_conf l2_tn_conf; 5239 int ret = 0; 5240 5241 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 5242 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 5243 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 5244 l2_tn_conf.pool = l2_tn_filter->pool; 5245 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 5246 if (ret < 0) 5247 return ret; 5248 } 5249 5250 return 0; 5251 } 5252 5253 static const struct eth_dev_ops txgbe_eth_dev_ops = { 5254 .dev_configure = txgbe_dev_configure, 5255 .dev_infos_get = txgbe_dev_info_get, 5256 .dev_start = txgbe_dev_start, 5257 .dev_stop = txgbe_dev_stop, 5258 .dev_set_link_up = txgbe_dev_set_link_up, 5259 .dev_set_link_down = txgbe_dev_set_link_down, 5260 .dev_close = txgbe_dev_close, 5261 .dev_reset = txgbe_dev_reset, 5262 .promiscuous_enable = txgbe_dev_promiscuous_enable, 5263 .promiscuous_disable = txgbe_dev_promiscuous_disable, 5264 .allmulticast_enable = txgbe_dev_allmulticast_enable, 5265 .allmulticast_disable = txgbe_dev_allmulticast_disable, 5266 .link_update = txgbe_dev_link_update, 5267 .stats_get = txgbe_dev_stats_get, 5268 .xstats_get = txgbe_dev_xstats_get, 5269 .xstats_get_by_id = txgbe_dev_xstats_get_by_id, 5270 .stats_reset = txgbe_dev_stats_reset, 5271 .xstats_reset = txgbe_dev_xstats_reset, 5272 .xstats_get_names = txgbe_dev_xstats_get_names, 5273 .xstats_get_names_by_id = txgbe_dev_xstats_get_names_by_id, 5274 .queue_stats_mapping_set = txgbe_dev_queue_stats_mapping_set, 5275 .fw_version_get = txgbe_fw_version_get, 5276 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get, 5277 .mtu_set = txgbe_dev_mtu_set, 5278 .vlan_filter_set = txgbe_vlan_filter_set, 5279 .vlan_tpid_set = txgbe_vlan_tpid_set, 5280 .vlan_offload_set = txgbe_vlan_offload_set, 5281 .vlan_strip_queue_set = txgbe_vlan_strip_queue_set, 5282 .rx_queue_start = txgbe_dev_rx_queue_start, 5283 .rx_queue_stop = txgbe_dev_rx_queue_stop, 5284 .tx_queue_start = txgbe_dev_tx_queue_start, 5285 .tx_queue_stop = txgbe_dev_tx_queue_stop, 5286 .rx_queue_setup = txgbe_dev_rx_queue_setup, 5287 .rx_queue_intr_enable = txgbe_dev_rx_queue_intr_enable, 5288 .rx_queue_intr_disable = txgbe_dev_rx_queue_intr_disable, 5289 .rx_queue_release = txgbe_dev_rx_queue_release, 5290 .tx_queue_setup = txgbe_dev_tx_queue_setup, 5291 .tx_queue_release = txgbe_dev_tx_queue_release, 5292 .dev_led_on = txgbe_dev_led_on, 5293 .dev_led_off = txgbe_dev_led_off, 5294 .flow_ctrl_get = txgbe_flow_ctrl_get, 5295 .flow_ctrl_set = txgbe_flow_ctrl_set, 5296 .priority_flow_ctrl_set = txgbe_priority_flow_ctrl_set, 5297 .mac_addr_add = txgbe_add_rar, 5298 .mac_addr_remove = txgbe_remove_rar, 5299 .mac_addr_set = txgbe_set_default_mac_addr, 5300 .uc_hash_table_set = txgbe_uc_hash_table_set, 5301 .uc_all_hash_table_set = txgbe_uc_all_hash_table_set, 5302 .set_queue_rate_limit = txgbe_set_queue_rate_limit, 5303 .reta_update = txgbe_dev_rss_reta_update, 5304 .reta_query = txgbe_dev_rss_reta_query, 5305 .rss_hash_update = txgbe_dev_rss_hash_update, 5306 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get, 5307 .flow_ops_get = txgbe_dev_flow_ops_get, 5308 .set_mc_addr_list = txgbe_dev_set_mc_addr_list, 5309 .rxq_info_get = txgbe_rxq_info_get, 5310 .txq_info_get = txgbe_txq_info_get, 5311 .timesync_enable = txgbe_timesync_enable, 5312 .timesync_disable = txgbe_timesync_disable, 5313 .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp, 5314 .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp, 5315 .get_reg = txgbe_get_regs, 5316 .get_eeprom_length = txgbe_get_eeprom_length, 5317 .get_eeprom = txgbe_get_eeprom, 5318 .set_eeprom = txgbe_set_eeprom, 5319 .get_module_info = txgbe_get_module_info, 5320 .get_module_eeprom = txgbe_get_module_eeprom, 5321 .get_dcb_info = txgbe_dev_get_dcb_info, 5322 .timesync_adjust_time = txgbe_timesync_adjust_time, 5323 .timesync_read_time = txgbe_timesync_read_time, 5324 .timesync_write_time = txgbe_timesync_write_time, 5325 .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add, 5326 .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del, 5327 .tm_ops_get = txgbe_tm_ops_get, 5328 .tx_done_cleanup = txgbe_dev_tx_done_cleanup, 5329 }; 5330 5331 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd); 5332 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map); 5333 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 5334 RTE_PMD_REGISTER_PARAM_STRING(net_txgbe, 5335 TXGBE_DEVARG_BP_AUTO "=<0|1>" 5336 TXGBE_DEVARG_KR_POLL "=<0|1>" 5337 TXGBE_DEVARG_KR_PRESENT "=<0|1>" 5338 TXGBE_DEVARG_KX_SGMII "=<0|1>" 5339 TXGBE_DEVARG_FFE_SET "=<0-4>" 5340 TXGBE_DEVARG_FFE_MAIN "=<uint16>" 5341 TXGBE_DEVARG_FFE_PRE "=<uint16>" 5342 TXGBE_DEVARG_FFE_POST "=<uint16>"); 5343 5344 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE); 5345 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE); 5346 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE); 5347 5348 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX 5349 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG); 5350 #endif 5351 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX 5352 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG); 5353 #endif 5354 5355 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE 5356 RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG); 5357 #endif 5358