1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_ACCUM_STATS "accum-stats" 91 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 92 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 93 #define BNXT_DEVARG_REPRESENTOR "representor" 94 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 95 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 96 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 97 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 98 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 99 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 100 #define BNXT_DEVARG_APP_ID "app-id" 101 102 static const char *const bnxt_dev_args[] = { 103 BNXT_DEVARG_REPRESENTOR, 104 BNXT_DEVARG_ACCUM_STATS, 105 BNXT_DEVARG_FLOW_XSTAT, 106 BNXT_DEVARG_MAX_NUM_KFLOWS, 107 BNXT_DEVARG_REP_BASED_PF, 108 BNXT_DEVARG_REP_IS_PF, 109 BNXT_DEVARG_REP_Q_R2F, 110 BNXT_DEVARG_REP_Q_F2R, 111 BNXT_DEVARG_REP_FC_R2F, 112 BNXT_DEVARG_REP_FC_F2R, 113 BNXT_DEVARG_APP_ID, 114 NULL 115 }; 116 117 /* 118 * accum-stats == false to disable flow counter accumulation 119 * accum-stats == true to enable flow counter accumulation 120 */ 121 #define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1) 122 123 /* 124 * app-id = an non-negative 8-bit number 125 */ 126 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 127 128 /* 129 * flow_xstat == false to disable the feature 130 * flow_xstat == true to enable the feature 131 */ 132 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 133 134 /* 135 * rep_is_pf == false to indicate VF representor 136 * rep_is_pf == true to indicate PF representor 137 */ 138 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 139 140 /* 141 * rep_based_pf == Physical index of the PF 142 */ 143 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 144 /* 145 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 146 */ 147 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 148 149 /* 150 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 151 */ 152 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 153 154 /* 155 * rep_fc_r2f == Flow control for the representor to endpoint direction 156 */ 157 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 158 159 /* 160 * rep_fc_f2r == Flow control for the endpoint to representor direction 161 */ 162 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 163 164 int bnxt_cfa_code_dynfield_offset = -1; 165 166 /* 167 * max_num_kflows must be >= 32 168 * and must be a power-of-2 supported value 169 * return: 1 -> invalid 170 * 0 -> valid 171 */ 172 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 173 { 174 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 175 return 1; 176 return 0; 177 } 178 179 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 180 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 181 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 182 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 183 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 184 static int bnxt_restore_vlan_filters(struct bnxt *bp); 185 static void bnxt_dev_recover(void *arg); 186 static void bnxt_free_error_recovery_info(struct bnxt *bp); 187 static void bnxt_free_rep_info(struct bnxt *bp); 188 189 int is_bnxt_in_error(struct bnxt *bp) 190 { 191 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 192 return -EIO; 193 if (bp->flags & BNXT_FLAG_FW_RESET) 194 return -EBUSY; 195 196 return 0; 197 } 198 199 /***********************/ 200 201 /* 202 * High level utility functions 203 */ 204 205 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 206 { 207 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 208 BNXT_RSS_TBL_SIZE_P5); 209 210 if (!BNXT_CHIP_P5(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_P5) / 215 BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_P5(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 bp->parent = NULL; 230 } 231 232 static void bnxt_free_pf_info(struct bnxt *bp) 233 { 234 rte_free(bp->pf); 235 bp->pf = NULL; 236 } 237 238 static void bnxt_free_link_info(struct bnxt *bp) 239 { 240 rte_free(bp->link_info); 241 bp->link_info = NULL; 242 } 243 244 static void bnxt_free_leds_info(struct bnxt *bp) 245 { 246 if (BNXT_VF(bp)) 247 return; 248 249 rte_free(bp->leds); 250 bp->leds = NULL; 251 } 252 253 static void bnxt_free_flow_stats_info(struct bnxt *bp) 254 { 255 rte_free(bp->flow_stat); 256 bp->flow_stat = NULL; 257 } 258 259 static void bnxt_free_cos_queues(struct bnxt *bp) 260 { 261 rte_free(bp->rx_cos_queue); 262 bp->rx_cos_queue = NULL; 263 rte_free(bp->tx_cos_queue); 264 bp->tx_cos_queue = NULL; 265 } 266 267 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 268 { 269 bnxt_free_filter_mem(bp); 270 bnxt_free_vnic_attributes(bp); 271 bnxt_free_vnic_mem(bp); 272 273 /* tx/rx rings are configured as part of *_queue_setup callbacks. 274 * If the number of rings change across fw update, 275 * we don't have much choice except to warn the user. 276 */ 277 if (!reconfig) { 278 bnxt_free_stats(bp); 279 bnxt_free_tx_rings(bp); 280 bnxt_free_rx_rings(bp); 281 } 282 bnxt_free_async_cp_ring(bp); 283 bnxt_free_rxtx_nq_ring(bp); 284 285 rte_free(bp->grp_info); 286 bp->grp_info = NULL; 287 } 288 289 static int bnxt_alloc_parent_info(struct bnxt *bp) 290 { 291 bp->parent = rte_zmalloc("bnxt_parent_info", 292 sizeof(struct bnxt_parent_info), 0); 293 if (bp->parent == NULL) 294 return -ENOMEM; 295 296 return 0; 297 } 298 299 static int bnxt_alloc_pf_info(struct bnxt *bp) 300 { 301 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 302 if (bp->pf == NULL) 303 return -ENOMEM; 304 305 return 0; 306 } 307 308 static int bnxt_alloc_link_info(struct bnxt *bp) 309 { 310 bp->link_info = 311 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 312 if (bp->link_info == NULL) 313 return -ENOMEM; 314 315 return 0; 316 } 317 318 static int bnxt_alloc_leds_info(struct bnxt *bp) 319 { 320 if (BNXT_VF(bp)) 321 return 0; 322 323 bp->leds = rte_zmalloc("bnxt_leds", 324 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 325 0); 326 if (bp->leds == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 static int bnxt_alloc_cos_queues(struct bnxt *bp) 333 { 334 bp->rx_cos_queue = 335 rte_zmalloc("bnxt_rx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->rx_cos_queue == NULL) 340 return -ENOMEM; 341 342 bp->tx_cos_queue = 343 rte_zmalloc("bnxt_tx_cosq", 344 BNXT_COS_QUEUE_COUNT * 345 sizeof(struct bnxt_cos_queue_info), 346 0); 347 if (bp->tx_cos_queue == NULL) 348 return -ENOMEM; 349 350 return 0; 351 } 352 353 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 354 { 355 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 356 sizeof(struct bnxt_flow_stat_info), 0); 357 if (bp->flow_stat == NULL) 358 return -ENOMEM; 359 360 return 0; 361 } 362 363 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 364 { 365 int rc; 366 367 rc = bnxt_alloc_ring_grps(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_async_ring_struct(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_vnic_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_vnic_attributes(bp); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_filter_mem(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 rc = bnxt_alloc_async_cp_ring(bp); 388 if (rc) 389 goto alloc_mem_err; 390 391 rc = bnxt_alloc_rxtx_nq_ring(bp); 392 if (rc) 393 goto alloc_mem_err; 394 395 if (BNXT_FLOW_XSTATS_EN(bp)) { 396 rc = bnxt_alloc_flow_stats_info(bp); 397 if (rc) 398 goto alloc_mem_err; 399 } 400 401 return 0; 402 403 alloc_mem_err: 404 bnxt_free_mem(bp, reconfig); 405 return rc; 406 } 407 408 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 409 { 410 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 411 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 412 uint64_t rx_offloads = dev_conf->rxmode.offloads; 413 struct bnxt_rx_queue *rxq; 414 unsigned int j; 415 int rc; 416 417 rc = bnxt_vnic_grp_alloc(bp, vnic); 418 if (rc) 419 goto err_out; 420 421 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 422 vnic_id, vnic, vnic->fw_grp_ids); 423 424 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 425 if (rc) 426 goto err_out; 427 428 /* Alloc RSS context only if RSS mode is enabled */ 429 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { 430 int j, nr_ctxs = bnxt_rss_ctxts(bp); 431 432 /* RSS table size in Thor is 512. 433 * Cap max Rx rings to same value 434 */ 435 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 436 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 437 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 438 goto err_out; 439 } 440 441 rc = 0; 442 for (j = 0; j < nr_ctxs; j++) { 443 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 444 if (rc) 445 break; 446 } 447 if (rc) { 448 PMD_DRV_LOG(ERR, 449 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 450 vnic_id, j, rc); 451 goto err_out; 452 } 453 vnic->num_lb_ctxts = nr_ctxs; 454 } 455 456 /* 457 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 458 * setting is not available at this time, it will not be 459 * configured correctly in the CFA. 460 */ 461 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 462 vnic->vlan_strip = true; 463 else 464 vnic->vlan_strip = false; 465 466 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 467 if (rc) 468 goto err_out; 469 470 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 471 if (rc) 472 goto err_out; 473 474 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 475 rxq = bp->eth_dev->data->rx_queues[j]; 476 477 PMD_DRV_LOG(DEBUG, 478 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 479 j, rxq->vnic, rxq->vnic->fw_grp_ids); 480 481 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 482 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 483 else 484 vnic->rx_queue_cnt++; 485 } 486 487 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 488 489 rc = bnxt_vnic_rss_configure(bp, vnic); 490 if (rc) 491 goto err_out; 492 493 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 494 495 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 496 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ? 497 true : false); 498 if (rc) 499 goto err_out; 500 501 return 0; 502 err_out: 503 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 504 vnic_id, rc); 505 return rc; 506 } 507 508 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 509 { 510 int rc = 0; 511 512 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 513 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 514 if (rc) 515 return rc; 516 517 PMD_DRV_LOG(DEBUG, 518 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 519 " rx_fc_in_tbl.ctx_id = %d\n", 520 bp->flow_stat->rx_fc_in_tbl.va, 521 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 522 bp->flow_stat->rx_fc_in_tbl.ctx_id); 523 524 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 525 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 526 if (rc) 527 return rc; 528 529 PMD_DRV_LOG(DEBUG, 530 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 531 " rx_fc_out_tbl.ctx_id = %d\n", 532 bp->flow_stat->rx_fc_out_tbl.va, 533 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 534 bp->flow_stat->rx_fc_out_tbl.ctx_id); 535 536 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 537 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 538 if (rc) 539 return rc; 540 541 PMD_DRV_LOG(DEBUG, 542 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 543 " tx_fc_in_tbl.ctx_id = %d\n", 544 bp->flow_stat->tx_fc_in_tbl.va, 545 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 546 bp->flow_stat->tx_fc_in_tbl.ctx_id); 547 548 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 549 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 550 if (rc) 551 return rc; 552 553 PMD_DRV_LOG(DEBUG, 554 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 555 " tx_fc_out_tbl.ctx_id = %d\n", 556 bp->flow_stat->tx_fc_out_tbl.va, 557 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 558 bp->flow_stat->tx_fc_out_tbl.ctx_id); 559 560 memset(bp->flow_stat->rx_fc_out_tbl.va, 561 0, 562 bp->flow_stat->rx_fc_out_tbl.size); 563 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 564 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 565 bp->flow_stat->rx_fc_out_tbl.ctx_id, 566 bp->flow_stat->max_fc, 567 true); 568 if (rc) 569 return rc; 570 571 memset(bp->flow_stat->tx_fc_out_tbl.va, 572 0, 573 bp->flow_stat->tx_fc_out_tbl.size); 574 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 575 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 576 bp->flow_stat->tx_fc_out_tbl.ctx_id, 577 bp->flow_stat->max_fc, 578 true); 579 580 return rc; 581 } 582 583 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 584 struct bnxt_ctx_mem_buf_info *ctx) 585 { 586 if (!ctx) 587 return -EINVAL; 588 589 ctx->va = rte_zmalloc_socket(type, size, 0, 590 bp->eth_dev->device->numa_node); 591 if (ctx->va == NULL) 592 return -ENOMEM; 593 rte_mem_lock_page(ctx->va); 594 ctx->size = size; 595 ctx->dma = rte_mem_virt2iova(ctx->va); 596 if (ctx->dma == RTE_BAD_IOVA) 597 return -ENOMEM; 598 599 return 0; 600 } 601 602 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 603 { 604 struct rte_pci_device *pdev = bp->pdev; 605 char type[RTE_MEMZONE_NAMESIZE]; 606 uint16_t max_fc; 607 int rc = 0; 608 609 max_fc = bp->flow_stat->max_fc; 610 611 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 612 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 613 /* 4 bytes for each counter-id */ 614 rc = bnxt_alloc_ctx_mem_buf(bp, type, 615 max_fc * 4, 616 &bp->flow_stat->rx_fc_in_tbl); 617 if (rc) 618 return rc; 619 620 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 621 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 622 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 623 rc = bnxt_alloc_ctx_mem_buf(bp, type, 624 max_fc * 16, 625 &bp->flow_stat->rx_fc_out_tbl); 626 if (rc) 627 return rc; 628 629 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 630 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 631 /* 4 bytes for each counter-id */ 632 rc = bnxt_alloc_ctx_mem_buf(bp, type, 633 max_fc * 4, 634 &bp->flow_stat->tx_fc_in_tbl); 635 if (rc) 636 return rc; 637 638 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 639 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 640 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 641 rc = bnxt_alloc_ctx_mem_buf(bp, type, 642 max_fc * 16, 643 &bp->flow_stat->tx_fc_out_tbl); 644 if (rc) 645 return rc; 646 647 rc = bnxt_register_fc_ctx_mem(bp); 648 649 return rc; 650 } 651 652 static int bnxt_init_ctx_mem(struct bnxt *bp) 653 { 654 int rc = 0; 655 656 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 657 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 658 !BNXT_FLOW_XSTATS_EN(bp)) 659 return 0; 660 661 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 662 if (rc) 663 return rc; 664 665 rc = bnxt_init_fc_ctx_mem(bp); 666 667 return rc; 668 } 669 670 static int bnxt_update_phy_setting(struct bnxt *bp) 671 { 672 struct rte_eth_link new; 673 int rc; 674 675 rc = bnxt_get_hwrm_link_config(bp, &new); 676 if (rc) { 677 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 678 return rc; 679 } 680 681 /* 682 * On BCM957508-N2100 adapters, FW will not allow any user other 683 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 684 * always returns link up. Force phy update always in that case. 685 */ 686 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 687 rc = bnxt_set_hwrm_link_config(bp, true); 688 if (rc) { 689 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 690 return rc; 691 } 692 } 693 694 return rc; 695 } 696 697 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 698 { 699 rte_free(bp->prev_rx_ring_stats); 700 rte_free(bp->prev_tx_ring_stats); 701 702 bp->prev_rx_ring_stats = NULL; 703 bp->prev_tx_ring_stats = NULL; 704 } 705 706 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 707 { 708 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 709 sizeof(struct bnxt_ring_stats) * 710 bp->rx_cp_nr_rings, 711 0); 712 if (bp->prev_rx_ring_stats == NULL) 713 return -ENOMEM; 714 715 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 716 sizeof(struct bnxt_ring_stats) * 717 bp->tx_cp_nr_rings, 718 0); 719 if (bp->prev_tx_ring_stats == NULL) 720 goto error; 721 722 return 0; 723 724 error: 725 bnxt_free_prev_ring_stats(bp); 726 return -ENOMEM; 727 } 728 729 static int bnxt_start_nic(struct bnxt *bp) 730 { 731 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 732 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 733 uint32_t intr_vector = 0; 734 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 735 uint32_t vec = BNXT_MISC_VEC_ID; 736 unsigned int i, j; 737 int rc; 738 739 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) 740 bp->flags |= BNXT_FLAG_JUMBO; 741 else 742 bp->flags &= ~BNXT_FLAG_JUMBO; 743 744 /* THOR does not support ring groups. 745 * But we will use the array to save RSS context IDs. 746 */ 747 if (BNXT_CHIP_P5(bp)) 748 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 749 750 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 751 if (rc) { 752 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 753 goto err_out; 754 } 755 756 rc = bnxt_alloc_hwrm_rings(bp); 757 if (rc) { 758 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 759 goto err_out; 760 } 761 762 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 763 if (rc) { 764 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 765 goto err_out; 766 } 767 768 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 769 goto skip_cosq_cfg; 770 771 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 772 if (bp->rx_cos_queue[i].id != 0xff) { 773 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 774 775 if (!vnic) { 776 PMD_DRV_LOG(ERR, 777 "Num pools more than FW profile\n"); 778 rc = -EINVAL; 779 goto err_out; 780 } 781 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 782 bp->rx_cosq_cnt++; 783 } 784 } 785 786 skip_cosq_cfg: 787 rc = bnxt_mq_rx_configure(bp); 788 if (rc) { 789 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 790 goto err_out; 791 } 792 793 /* default vnic 0 */ 794 rc = bnxt_setup_one_vnic(bp, 0); 795 if (rc) 796 goto err_out; 797 /* VNIC configuration */ 798 if (BNXT_RFS_NEEDS_VNIC(bp)) { 799 for (i = 1; i < bp->nr_vnics; i++) { 800 rc = bnxt_setup_one_vnic(bp, i); 801 if (rc) 802 goto err_out; 803 } 804 } 805 806 for (j = 0; j < bp->tx_nr_rings; j++) { 807 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 808 809 if (!txq->tx_deferred_start) { 810 bp->eth_dev->data->tx_queue_state[j] = 811 RTE_ETH_QUEUE_STATE_STARTED; 812 txq->tx_started = true; 813 } 814 } 815 816 for (j = 0; j < bp->rx_nr_rings; j++) { 817 struct bnxt_rx_queue *rxq = bp->rx_queues[j]; 818 819 if (!rxq->rx_deferred_start) { 820 bp->eth_dev->data->rx_queue_state[j] = 821 RTE_ETH_QUEUE_STATE_STARTED; 822 rxq->rx_started = true; 823 } 824 } 825 826 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 827 if (rc) { 828 PMD_DRV_LOG(ERR, 829 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 830 goto err_out; 831 } 832 833 /* check and configure queue intr-vector mapping */ 834 if ((rte_intr_cap_multiple(intr_handle) || 835 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 836 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 837 intr_vector = bp->eth_dev->data->nb_rx_queues; 838 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 839 if (intr_vector > bp->rx_cp_nr_rings) { 840 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 841 bp->rx_cp_nr_rings); 842 return -ENOTSUP; 843 } 844 rc = rte_intr_efd_enable(intr_handle, intr_vector); 845 if (rc) 846 return rc; 847 } 848 849 if (rte_intr_dp_is_en(intr_handle)) { 850 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 851 bp->eth_dev->data->nb_rx_queues)) { 852 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 853 " intr_vec", bp->eth_dev->data->nb_rx_queues); 854 rc = -ENOMEM; 855 goto err_out; 856 } 857 PMD_DRV_LOG(DEBUG, "intr_handle->nb_efd = %d " 858 "intr_handle->max_intr = %d\n", 859 rte_intr_nb_efd_get(intr_handle), 860 rte_intr_max_intr_get(intr_handle)); 861 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 862 queue_id++) { 863 rte_intr_vec_list_index_set(intr_handle, 864 queue_id, vec + BNXT_RX_VEC_START); 865 if (vec < base + rte_intr_nb_efd_get(intr_handle) 866 - 1) 867 vec++; 868 } 869 } 870 871 /* enable uio/vfio intr/eventfd mapping */ 872 rc = rte_intr_enable(intr_handle); 873 #ifndef RTE_EXEC_ENV_FREEBSD 874 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 875 if (rc) 876 goto err_out; 877 #endif 878 879 rc = bnxt_update_phy_setting(bp); 880 if (rc) 881 goto err_out; 882 883 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 884 if (!bp->mark_table) 885 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 886 887 return 0; 888 889 err_out: 890 /* Some of the error status returned by FW may not be from errno.h */ 891 if (rc > 0) 892 rc = -EIO; 893 894 return rc; 895 } 896 897 static int bnxt_shutdown_nic(struct bnxt *bp) 898 { 899 bnxt_free_all_hwrm_resources(bp); 900 bnxt_free_all_filters(bp); 901 bnxt_free_all_vnics(bp); 902 return 0; 903 } 904 905 /* 906 * Device configuration and status function 907 */ 908 909 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 910 { 911 uint32_t link_speed = 0; 912 uint32_t speed_capa = 0; 913 914 if (bp->link_info == NULL) 915 return 0; 916 917 link_speed = bp->link_info->support_speeds; 918 919 /* If PAM4 is configured, use PAM4 supported speed */ 920 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 921 link_speed = bp->link_info->support_pam4_speeds; 922 923 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 924 speed_capa |= RTE_ETH_LINK_SPEED_100M; 925 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 926 speed_capa |= RTE_ETH_LINK_SPEED_100M_HD; 927 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 928 speed_capa |= RTE_ETH_LINK_SPEED_1G; 929 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 930 speed_capa |= RTE_ETH_LINK_SPEED_2_5G; 931 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 932 speed_capa |= RTE_ETH_LINK_SPEED_10G; 933 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 934 speed_capa |= RTE_ETH_LINK_SPEED_20G; 935 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 936 speed_capa |= RTE_ETH_LINK_SPEED_25G; 937 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 938 speed_capa |= RTE_ETH_LINK_SPEED_40G; 939 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 940 speed_capa |= RTE_ETH_LINK_SPEED_50G; 941 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 942 speed_capa |= RTE_ETH_LINK_SPEED_100G; 943 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 944 speed_capa |= RTE_ETH_LINK_SPEED_50G; 945 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 946 speed_capa |= RTE_ETH_LINK_SPEED_100G; 947 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 948 speed_capa |= RTE_ETH_LINK_SPEED_200G; 949 950 if (bp->link_info->auto_mode == 951 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 952 speed_capa |= RTE_ETH_LINK_SPEED_FIXED; 953 954 return speed_capa; 955 } 956 957 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 958 struct rte_eth_dev_info *dev_info) 959 { 960 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 961 struct bnxt *bp = eth_dev->data->dev_private; 962 uint16_t max_vnics, i, j, vpool, vrxq; 963 unsigned int max_rx_rings; 964 int rc; 965 966 rc = is_bnxt_in_error(bp); 967 if (rc) 968 return rc; 969 970 /* MAC Specifics */ 971 dev_info->max_mac_addrs = bp->max_l2_ctx; 972 dev_info->max_hash_mac_addrs = 0; 973 974 /* PF/VF specifics */ 975 if (BNXT_PF(bp)) 976 dev_info->max_vfs = pdev->max_vfs; 977 978 max_rx_rings = bnxt_max_rings(bp); 979 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 980 dev_info->max_rx_queues = max_rx_rings; 981 dev_info->max_tx_queues = max_rx_rings; 982 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 983 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 984 max_vnics = bp->max_vnics; 985 986 /* MTU specifics */ 987 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 988 dev_info->max_mtu = BNXT_MAX_MTU; 989 990 /* Fast path specifics */ 991 dev_info->min_rx_bufsize = 1; 992 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 993 994 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 995 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 996 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; 997 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) 998 dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 999 dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 1000 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 1001 dev_info->tx_queue_offload_capa; 1002 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 1003 dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT; 1004 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 1005 1006 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1007 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1008 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1009 1010 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1011 .rx_thresh = { 1012 .pthresh = 8, 1013 .hthresh = 8, 1014 .wthresh = 0, 1015 }, 1016 .rx_free_thresh = 32, 1017 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1018 }; 1019 1020 dev_info->default_txconf = (struct rte_eth_txconf) { 1021 .tx_thresh = { 1022 .pthresh = 32, 1023 .hthresh = 0, 1024 .wthresh = 0, 1025 }, 1026 .tx_free_thresh = 32, 1027 .tx_rs_thresh = 32, 1028 }; 1029 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1030 1031 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1032 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1033 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1034 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1035 1036 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1037 dev_info->switch_info.name = eth_dev->device->name; 1038 dev_info->switch_info.domain_id = bp->switch_domain_id; 1039 dev_info->switch_info.port_id = 1040 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1041 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1042 } 1043 1044 /* 1045 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1046 * need further investigation. 1047 */ 1048 1049 /* VMDq resources */ 1050 vpool = 64; /* RTE_ETH_64_POOLS */ 1051 vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */ 1052 for (i = 0; i < 4; vpool >>= 1, i++) { 1053 if (max_vnics > vpool) { 1054 for (j = 0; j < 5; vrxq >>= 1, j++) { 1055 if (dev_info->max_rx_queues > vrxq) { 1056 if (vpool > vrxq) 1057 vpool = vrxq; 1058 goto found; 1059 } 1060 } 1061 /* Not enough resources to support VMDq */ 1062 break; 1063 } 1064 } 1065 /* Not enough resources to support VMDq */ 1066 vpool = 0; 1067 vrxq = 0; 1068 found: 1069 dev_info->max_vmdq_pools = vpool; 1070 dev_info->vmdq_queue_num = vrxq; 1071 1072 dev_info->vmdq_pool_base = 0; 1073 dev_info->vmdq_queue_base = 0; 1074 1075 return 0; 1076 } 1077 1078 /* Configure the device based on the configuration provided */ 1079 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1080 { 1081 struct bnxt *bp = eth_dev->data->dev_private; 1082 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1083 int rc; 1084 1085 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1086 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1087 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1088 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1089 1090 rc = is_bnxt_in_error(bp); 1091 if (rc) 1092 return rc; 1093 1094 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1095 rc = bnxt_hwrm_check_vf_rings(bp); 1096 if (rc) { 1097 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1098 return -ENOSPC; 1099 } 1100 1101 /* If a resource has already been allocated - in this case 1102 * it is the async completion ring, free it. Reallocate it after 1103 * resource reservation. This will ensure the resource counts 1104 * are calculated correctly. 1105 */ 1106 1107 pthread_mutex_lock(&bp->def_cp_lock); 1108 1109 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1110 bnxt_disable_int(bp); 1111 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1112 } 1113 1114 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1115 if (rc) { 1116 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1117 pthread_mutex_unlock(&bp->def_cp_lock); 1118 return -ENOSPC; 1119 } 1120 1121 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1122 rc = bnxt_alloc_async_cp_ring(bp); 1123 if (rc) { 1124 pthread_mutex_unlock(&bp->def_cp_lock); 1125 return rc; 1126 } 1127 bnxt_enable_int(bp); 1128 } 1129 1130 pthread_mutex_unlock(&bp->def_cp_lock); 1131 } 1132 1133 /* Inherit new configurations */ 1134 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1135 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1136 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1137 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1138 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1139 bp->max_stat_ctx) 1140 goto resource_error; 1141 1142 if (BNXT_HAS_RING_GRPS(bp) && 1143 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1144 goto resource_error; 1145 1146 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && 1147 bp->max_vnics < eth_dev->data->nb_rx_queues) 1148 goto resource_error; 1149 1150 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1151 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1152 1153 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1154 rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1155 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1156 1157 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1158 1159 return 0; 1160 1161 resource_error: 1162 PMD_DRV_LOG(ERR, 1163 "Insufficient resources to support requested config\n"); 1164 PMD_DRV_LOG(ERR, 1165 "Num Queues Requested: Tx %d, Rx %d\n", 1166 eth_dev->data->nb_tx_queues, 1167 eth_dev->data->nb_rx_queues); 1168 PMD_DRV_LOG(ERR, 1169 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1170 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1171 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1172 return -ENOSPC; 1173 } 1174 1175 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1176 { 1177 struct rte_eth_link *link = ð_dev->data->dev_link; 1178 1179 if (link->link_status) 1180 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1181 eth_dev->data->port_id, 1182 (uint32_t)link->link_speed, 1183 (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ? 1184 ("full-duplex") : ("half-duplex\n")); 1185 else 1186 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1187 eth_dev->data->port_id); 1188 } 1189 1190 /* 1191 * Determine whether the current configuration requires support for scattered 1192 * receive; return 1 if scattered receive is required and 0 if not. 1193 */ 1194 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1195 { 1196 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 1197 uint16_t buf_size; 1198 int i; 1199 1200 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1201 return 1; 1202 1203 if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 1204 return 1; 1205 1206 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1207 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1208 1209 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1210 RTE_PKTMBUF_HEADROOM); 1211 if (eth_dev->data->mtu + overhead > buf_size) 1212 return 1; 1213 } 1214 return 0; 1215 } 1216 1217 static eth_rx_burst_t 1218 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1219 { 1220 struct bnxt *bp = eth_dev->data->dev_private; 1221 1222 /* Disable vector mode RX for Stingray2 for now */ 1223 if (BNXT_CHIP_SR2(bp)) { 1224 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1225 return bnxt_recv_pkts; 1226 } 1227 1228 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1229 !defined(RTE_LIBRTE_IEEE1588) 1230 1231 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1232 if (eth_dev->data->scattered_rx) 1233 goto use_scalar_rx; 1234 1235 /* 1236 * Vector mode receive cannot be enabled if Truflow is enabled or if 1237 * asynchronous completions and receive completions can be placed in 1238 * the same completion ring. 1239 */ 1240 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1241 goto use_scalar_rx; 1242 1243 /* 1244 * Vector mode receive cannot be enabled if any receive offloads outside 1245 * a limited subset have been enabled. 1246 */ 1247 if (eth_dev->data->dev_conf.rxmode.offloads & 1248 ~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1249 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1250 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1251 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1252 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1253 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1254 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | 1255 RTE_ETH_RX_OFFLOAD_RSS_HASH | 1256 RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) 1257 goto use_scalar_rx; 1258 1259 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1260 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1261 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1262 PMD_DRV_LOG(INFO, 1263 "Using AVX2 vector mode receive for port %d\n", 1264 eth_dev->data->port_id); 1265 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1266 return bnxt_recv_pkts_vec_avx2; 1267 } 1268 #endif 1269 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1270 PMD_DRV_LOG(INFO, 1271 "Using SSE vector mode receive for port %d\n", 1272 eth_dev->data->port_id); 1273 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1274 return bnxt_recv_pkts_vec; 1275 } 1276 1277 use_scalar_rx: 1278 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1279 eth_dev->data->port_id); 1280 PMD_DRV_LOG(INFO, 1281 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1282 eth_dev->data->port_id, 1283 eth_dev->data->scattered_rx, 1284 eth_dev->data->dev_conf.rxmode.offloads); 1285 #endif 1286 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1287 return bnxt_recv_pkts; 1288 } 1289 1290 static eth_tx_burst_t 1291 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1292 { 1293 struct bnxt *bp = eth_dev->data->dev_private; 1294 1295 /* Disable vector mode TX for Stingray2 for now */ 1296 if (BNXT_CHIP_SR2(bp)) 1297 return bnxt_xmit_pkts; 1298 1299 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1300 !defined(RTE_LIBRTE_IEEE1588) 1301 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1302 1303 /* 1304 * Vector mode transmit can be enabled only if not using scatter rx 1305 * or tx offloads. 1306 */ 1307 if (eth_dev->data->scattered_rx || 1308 (offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) || 1309 BNXT_TRUFLOW_EN(bp)) 1310 goto use_scalar_tx; 1311 1312 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1313 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1314 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1315 PMD_DRV_LOG(INFO, 1316 "Using AVX2 vector mode transmit for port %d\n", 1317 eth_dev->data->port_id); 1318 return bnxt_xmit_pkts_vec_avx2; 1319 } 1320 #endif 1321 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1322 PMD_DRV_LOG(INFO, 1323 "Using SSE vector mode transmit for port %d\n", 1324 eth_dev->data->port_id); 1325 return bnxt_xmit_pkts_vec; 1326 } 1327 1328 use_scalar_tx: 1329 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1330 eth_dev->data->port_id); 1331 PMD_DRV_LOG(INFO, 1332 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1333 eth_dev->data->port_id, 1334 eth_dev->data->scattered_rx, 1335 offloads); 1336 #endif 1337 return bnxt_xmit_pkts; 1338 } 1339 1340 static int bnxt_handle_if_change_status(struct bnxt *bp) 1341 { 1342 int rc; 1343 1344 /* Since fw has undergone a reset and lost all contexts, 1345 * set fatal flag to not issue hwrm during cleanup 1346 */ 1347 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1348 bnxt_uninit_resources(bp, true); 1349 1350 /* clear fatal flag so that re-init happens */ 1351 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1352 rc = bnxt_init_resources(bp, true); 1353 1354 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1355 1356 return rc; 1357 } 1358 1359 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1360 { 1361 struct bnxt *bp = eth_dev->data->dev_private; 1362 int rc = 0; 1363 1364 if (!BNXT_SINGLE_PF(bp)) 1365 return -ENOTSUP; 1366 1367 if (!bp->link_info->link_up) 1368 rc = bnxt_set_hwrm_link_config(bp, true); 1369 if (!rc) 1370 eth_dev->data->dev_link.link_status = 1; 1371 1372 bnxt_print_link_info(eth_dev); 1373 return rc; 1374 } 1375 1376 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1377 { 1378 struct bnxt *bp = eth_dev->data->dev_private; 1379 1380 if (!BNXT_SINGLE_PF(bp)) 1381 return -ENOTSUP; 1382 1383 eth_dev->data->dev_link.link_status = 0; 1384 bnxt_set_hwrm_link_config(bp, false); 1385 bp->link_info->link_up = 0; 1386 1387 return 0; 1388 } 1389 1390 static void bnxt_free_switch_domain(struct bnxt *bp) 1391 { 1392 int rc = 0; 1393 1394 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1395 return; 1396 1397 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1398 if (rc) 1399 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1400 bp->switch_domain_id, rc); 1401 } 1402 1403 static void bnxt_ptp_get_current_time(void *arg) 1404 { 1405 struct bnxt *bp = arg; 1406 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1407 int rc; 1408 1409 rc = is_bnxt_in_error(bp); 1410 if (rc) 1411 return; 1412 1413 if (!ptp) 1414 return; 1415 1416 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1417 &ptp->current_time); 1418 1419 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1420 if (rc != 0) { 1421 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1422 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1423 } 1424 } 1425 1426 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1427 { 1428 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1429 int rc; 1430 1431 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1432 return 0; 1433 1434 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1435 &ptp->current_time); 1436 1437 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1438 return rc; 1439 } 1440 1441 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1442 { 1443 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1444 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1445 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1446 } 1447 } 1448 1449 static void bnxt_ptp_stop(struct bnxt *bp) 1450 { 1451 bnxt_cancel_ptp_alarm(bp); 1452 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1453 } 1454 1455 static int bnxt_ptp_start(struct bnxt *bp) 1456 { 1457 int rc; 1458 1459 rc = bnxt_schedule_ptp_alarm(bp); 1460 if (rc != 0) { 1461 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1462 } else { 1463 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1464 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1465 } 1466 1467 return rc; 1468 } 1469 1470 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1471 { 1472 struct bnxt *bp = eth_dev->data->dev_private; 1473 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1474 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 1475 struct rte_eth_link link; 1476 int ret; 1477 1478 eth_dev->data->dev_started = 0; 1479 eth_dev->data->scattered_rx = 0; 1480 1481 /* Prevent crashes when queues are still in use */ 1482 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1483 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1484 1485 bnxt_disable_int(bp); 1486 1487 /* disable uio/vfio intr/eventfd mapping */ 1488 rte_intr_disable(intr_handle); 1489 1490 /* Stop the child representors for this device */ 1491 ret = bnxt_rep_stop_all(bp); 1492 if (ret != 0) 1493 return ret; 1494 1495 /* delete the bnxt ULP port details */ 1496 bnxt_ulp_port_deinit(bp); 1497 1498 bnxt_cancel_fw_health_check(bp); 1499 1500 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1501 bnxt_cancel_ptp_alarm(bp); 1502 1503 /* Do not bring link down during reset recovery */ 1504 if (!is_bnxt_in_error(bp)) { 1505 bnxt_dev_set_link_down_op(eth_dev); 1506 /* Wait for link to be reset */ 1507 if (BNXT_SINGLE_PF(bp)) 1508 rte_delay_ms(500); 1509 /* clear the recorded link status */ 1510 memset(&link, 0, sizeof(link)); 1511 rte_eth_linkstatus_set(eth_dev, &link); 1512 } 1513 1514 /* Clean queue intr-vector mapping */ 1515 rte_intr_efd_disable(intr_handle); 1516 rte_intr_vec_list_free(intr_handle); 1517 1518 bnxt_hwrm_port_clr_stats(bp); 1519 bnxt_free_tx_mbufs(bp); 1520 bnxt_free_rx_mbufs(bp); 1521 /* Process any remaining notifications in default completion queue */ 1522 bnxt_int_handler(eth_dev); 1523 bnxt_shutdown_nic(bp); 1524 bnxt_hwrm_if_change(bp, false); 1525 1526 bnxt_free_prev_ring_stats(bp); 1527 rte_free(bp->mark_table); 1528 bp->mark_table = NULL; 1529 1530 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1531 bp->rx_cosq_cnt = 0; 1532 /* All filters are deleted on a port stop. */ 1533 if (BNXT_FLOW_XSTATS_EN(bp)) 1534 bp->flow_stat->flow_count = 0; 1535 1536 return 0; 1537 } 1538 1539 /* Unload the driver, release resources */ 1540 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1541 { 1542 struct bnxt *bp = eth_dev->data->dev_private; 1543 1544 pthread_mutex_lock(&bp->err_recovery_lock); 1545 if (bp->flags & BNXT_FLAG_FW_RESET) { 1546 PMD_DRV_LOG(ERR, 1547 "Adapter recovering from error..Please retry\n"); 1548 pthread_mutex_unlock(&bp->err_recovery_lock); 1549 return -EAGAIN; 1550 } 1551 pthread_mutex_unlock(&bp->err_recovery_lock); 1552 1553 return bnxt_dev_stop(eth_dev); 1554 } 1555 1556 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1557 { 1558 struct bnxt *bp = eth_dev->data->dev_private; 1559 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1560 int vlan_mask = 0; 1561 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1562 1563 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1564 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1565 return -EINVAL; 1566 } 1567 1568 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1569 PMD_DRV_LOG(ERR, 1570 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1571 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1572 1573 do { 1574 rc = bnxt_hwrm_if_change(bp, true); 1575 if (rc == 0 || rc != -EAGAIN) 1576 break; 1577 1578 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1579 } while (retry_cnt--); 1580 1581 if (rc) 1582 return rc; 1583 1584 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1585 rc = bnxt_handle_if_change_status(bp); 1586 if (rc) 1587 return rc; 1588 } 1589 1590 bnxt_enable_int(bp); 1591 1592 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1593 1594 rc = bnxt_start_nic(bp); 1595 if (rc) 1596 goto error; 1597 1598 rc = bnxt_alloc_prev_ring_stats(bp); 1599 if (rc) 1600 goto error; 1601 1602 eth_dev->data->dev_started = 1; 1603 1604 bnxt_link_update_op(eth_dev, 1); 1605 1606 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) 1607 vlan_mask |= RTE_ETH_VLAN_FILTER_MASK; 1608 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1609 vlan_mask |= RTE_ETH_VLAN_STRIP_MASK; 1610 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1611 if (rc) 1612 goto error; 1613 1614 /* Initialize bnxt ULP port details */ 1615 rc = bnxt_ulp_port_init(bp); 1616 if (rc) 1617 goto error; 1618 1619 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1620 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1621 1622 bnxt_schedule_fw_health_check(bp); 1623 1624 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1625 bnxt_schedule_ptp_alarm(bp); 1626 1627 return 0; 1628 1629 error: 1630 bnxt_dev_stop(eth_dev); 1631 return rc; 1632 } 1633 1634 static void 1635 bnxt_uninit_locks(struct bnxt *bp) 1636 { 1637 pthread_mutex_destroy(&bp->flow_lock); 1638 pthread_mutex_destroy(&bp->def_cp_lock); 1639 pthread_mutex_destroy(&bp->health_check_lock); 1640 pthread_mutex_destroy(&bp->err_recovery_lock); 1641 if (bp->rep_info) { 1642 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1643 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1644 } 1645 } 1646 1647 static void bnxt_drv_uninit(struct bnxt *bp) 1648 { 1649 bnxt_free_leds_info(bp); 1650 bnxt_free_cos_queues(bp); 1651 bnxt_free_link_info(bp); 1652 bnxt_free_parent_info(bp); 1653 bnxt_uninit_locks(bp); 1654 1655 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1656 bp->tx_mem_zone = NULL; 1657 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1658 bp->rx_mem_zone = NULL; 1659 1660 bnxt_free_vf_info(bp); 1661 bnxt_free_pf_info(bp); 1662 1663 rte_free(bp->grp_info); 1664 bp->grp_info = NULL; 1665 } 1666 1667 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1668 { 1669 struct bnxt *bp = eth_dev->data->dev_private; 1670 int ret = 0; 1671 1672 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1673 return 0; 1674 1675 pthread_mutex_lock(&bp->err_recovery_lock); 1676 if (bp->flags & BNXT_FLAG_FW_RESET) { 1677 PMD_DRV_LOG(ERR, 1678 "Adapter recovering from error...Please retry\n"); 1679 pthread_mutex_unlock(&bp->err_recovery_lock); 1680 return -EAGAIN; 1681 } 1682 pthread_mutex_unlock(&bp->err_recovery_lock); 1683 1684 /* cancel the recovery handler before remove dev */ 1685 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1686 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1687 bnxt_cancel_fc_thread(bp); 1688 1689 if (eth_dev->data->dev_started) 1690 ret = bnxt_dev_stop(eth_dev); 1691 1692 bnxt_uninit_resources(bp, false); 1693 1694 bnxt_drv_uninit(bp); 1695 1696 return ret; 1697 } 1698 1699 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1700 uint32_t index) 1701 { 1702 struct bnxt *bp = eth_dev->data->dev_private; 1703 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1704 struct bnxt_vnic_info *vnic; 1705 struct bnxt_filter_info *filter, *temp_filter; 1706 uint32_t i; 1707 1708 if (is_bnxt_in_error(bp)) 1709 return; 1710 1711 /* 1712 * Loop through all VNICs from the specified filter flow pools to 1713 * remove the corresponding MAC addr filter 1714 */ 1715 for (i = 0; i < bp->nr_vnics; i++) { 1716 if (!(pool_mask & (1ULL << i))) 1717 continue; 1718 1719 vnic = &bp->vnic_info[i]; 1720 filter = STAILQ_FIRST(&vnic->filter); 1721 while (filter) { 1722 temp_filter = STAILQ_NEXT(filter, next); 1723 if (filter->mac_index == index) { 1724 STAILQ_REMOVE(&vnic->filter, filter, 1725 bnxt_filter_info, next); 1726 bnxt_hwrm_clear_l2_filter(bp, filter); 1727 bnxt_free_filter(bp, filter); 1728 } 1729 filter = temp_filter; 1730 } 1731 } 1732 } 1733 1734 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1735 struct rte_ether_addr *mac_addr, uint32_t index, 1736 uint32_t pool) 1737 { 1738 struct bnxt_filter_info *filter; 1739 int rc = 0; 1740 1741 /* Attach requested MAC address to the new l2_filter */ 1742 STAILQ_FOREACH(filter, &vnic->filter, next) { 1743 if (filter->mac_index == index) { 1744 PMD_DRV_LOG(DEBUG, 1745 "MAC addr already existed for pool %d\n", 1746 pool); 1747 return 0; 1748 } 1749 } 1750 1751 filter = bnxt_alloc_filter(bp); 1752 if (!filter) { 1753 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1754 return -ENODEV; 1755 } 1756 1757 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1758 * if the MAC that's been programmed now is a different one, then, 1759 * copy that addr to filter->l2_addr 1760 */ 1761 if (mac_addr) 1762 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1763 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1764 1765 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1766 if (!rc) { 1767 filter->mac_index = index; 1768 if (filter->mac_index == 0) 1769 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1770 else 1771 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1772 } else { 1773 bnxt_free_filter(bp, filter); 1774 } 1775 1776 return rc; 1777 } 1778 1779 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1780 struct rte_ether_addr *mac_addr, 1781 uint32_t index, uint32_t pool) 1782 { 1783 struct bnxt *bp = eth_dev->data->dev_private; 1784 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1785 int rc = 0; 1786 1787 rc = is_bnxt_in_error(bp); 1788 if (rc) 1789 return rc; 1790 1791 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1792 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1793 return -ENOTSUP; 1794 } 1795 1796 if (!vnic) { 1797 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1798 return -EINVAL; 1799 } 1800 1801 /* Filter settings will get applied when port is started */ 1802 if (!eth_dev->data->dev_started) 1803 return 0; 1804 1805 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1806 1807 return rc; 1808 } 1809 1810 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1811 { 1812 int rc = 0; 1813 struct bnxt *bp = eth_dev->data->dev_private; 1814 struct rte_eth_link new; 1815 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1816 BNXT_MIN_LINK_WAIT_CNT; 1817 1818 rc = is_bnxt_in_error(bp); 1819 if (rc) 1820 return rc; 1821 1822 memset(&new, 0, sizeof(new)); 1823 1824 if (bp->link_info == NULL) 1825 goto out; 1826 1827 do { 1828 /* Retrieve link info from hardware */ 1829 rc = bnxt_get_hwrm_link_config(bp, &new); 1830 if (rc) { 1831 new.link_speed = RTE_ETH_LINK_SPEED_100M; 1832 new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1833 PMD_DRV_LOG(ERR, 1834 "Failed to retrieve link rc = 0x%x!\n", rc); 1835 goto out; 1836 } 1837 1838 if (!wait_to_complete || new.link_status) 1839 break; 1840 1841 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1842 } while (cnt--); 1843 1844 /* Only single function PF can bring phy down. 1845 * When port is stopped, report link down for VF/MH/NPAR functions. 1846 */ 1847 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1848 memset(&new, 0, sizeof(new)); 1849 1850 out: 1851 /* Timed out or success */ 1852 if (new.link_status != eth_dev->data->dev_link.link_status || 1853 new.link_speed != eth_dev->data->dev_link.link_speed) { 1854 rte_eth_linkstatus_set(eth_dev, &new); 1855 bnxt_print_link_info(eth_dev); 1856 } 1857 1858 return rc; 1859 } 1860 1861 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1862 { 1863 struct bnxt *bp = eth_dev->data->dev_private; 1864 struct bnxt_vnic_info *vnic; 1865 uint32_t old_flags; 1866 int rc; 1867 1868 rc = is_bnxt_in_error(bp); 1869 if (rc) 1870 return rc; 1871 1872 /* Filter settings will get applied when port is started */ 1873 if (!eth_dev->data->dev_started) 1874 return 0; 1875 1876 if (bp->vnic_info == NULL) 1877 return 0; 1878 1879 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1880 1881 old_flags = vnic->flags; 1882 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1883 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1884 if (rc != 0) 1885 vnic->flags = old_flags; 1886 1887 return rc; 1888 } 1889 1890 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1891 { 1892 struct bnxt *bp = eth_dev->data->dev_private; 1893 struct bnxt_vnic_info *vnic; 1894 uint32_t old_flags; 1895 int rc; 1896 1897 rc = is_bnxt_in_error(bp); 1898 if (rc) 1899 return rc; 1900 1901 /* Filter settings will get applied when port is started */ 1902 if (!eth_dev->data->dev_started) 1903 return 0; 1904 1905 if (bp->vnic_info == NULL) 1906 return 0; 1907 1908 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1909 1910 old_flags = vnic->flags; 1911 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1912 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1913 if (rc != 0) 1914 vnic->flags = old_flags; 1915 1916 return rc; 1917 } 1918 1919 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1920 { 1921 struct bnxt *bp = eth_dev->data->dev_private; 1922 struct bnxt_vnic_info *vnic; 1923 uint32_t old_flags; 1924 int rc; 1925 1926 rc = is_bnxt_in_error(bp); 1927 if (rc) 1928 return rc; 1929 1930 /* Filter settings will get applied when port is started */ 1931 if (!eth_dev->data->dev_started) 1932 return 0; 1933 1934 if (bp->vnic_info == NULL) 1935 return 0; 1936 1937 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1938 1939 old_flags = vnic->flags; 1940 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1941 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1942 if (rc != 0) 1943 vnic->flags = old_flags; 1944 1945 return rc; 1946 } 1947 1948 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1949 { 1950 struct bnxt *bp = eth_dev->data->dev_private; 1951 struct bnxt_vnic_info *vnic; 1952 uint32_t old_flags; 1953 int rc; 1954 1955 rc = is_bnxt_in_error(bp); 1956 if (rc) 1957 return rc; 1958 1959 /* Filter settings will get applied when port is started */ 1960 if (!eth_dev->data->dev_started) 1961 return 0; 1962 1963 if (bp->vnic_info == NULL) 1964 return 0; 1965 1966 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1967 1968 old_flags = vnic->flags; 1969 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1970 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1971 if (rc != 0) 1972 vnic->flags = old_flags; 1973 1974 return rc; 1975 } 1976 1977 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1978 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1979 { 1980 if (qid >= bp->rx_nr_rings) 1981 return NULL; 1982 1983 return bp->eth_dev->data->rx_queues[qid]; 1984 } 1985 1986 /* Return rxq corresponding to a given rss table ring/group ID. */ 1987 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1988 { 1989 struct bnxt_rx_queue *rxq; 1990 unsigned int i; 1991 1992 if (!BNXT_HAS_RING_GRPS(bp)) { 1993 for (i = 0; i < bp->rx_nr_rings; i++) { 1994 rxq = bp->eth_dev->data->rx_queues[i]; 1995 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1996 return rxq->index; 1997 } 1998 } else { 1999 for (i = 0; i < bp->rx_nr_rings; i++) { 2000 if (bp->grp_info[i].fw_grp_id == fwr) 2001 return i; 2002 } 2003 } 2004 2005 return INVALID_HW_RING_ID; 2006 } 2007 2008 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2009 struct rte_eth_rss_reta_entry64 *reta_conf, 2010 uint16_t reta_size) 2011 { 2012 struct bnxt *bp = eth_dev->data->dev_private; 2013 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2014 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2015 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2016 uint16_t idx, sft; 2017 int i, rc; 2018 2019 rc = is_bnxt_in_error(bp); 2020 if (rc) 2021 return rc; 2022 2023 if (!vnic->rss_table) 2024 return -EINVAL; 2025 2026 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) 2027 return -EINVAL; 2028 2029 if (reta_size != tbl_size) { 2030 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2031 "(%d) must equal the size supported by the hardware " 2032 "(%d)\n", reta_size, tbl_size); 2033 return -EINVAL; 2034 } 2035 2036 for (i = 0; i < reta_size; i++) { 2037 struct bnxt_rx_queue *rxq; 2038 2039 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2040 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2041 2042 if (!(reta_conf[idx].mask & (1ULL << sft))) 2043 continue; 2044 2045 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2046 if (!rxq) { 2047 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2048 return -EINVAL; 2049 } 2050 2051 if (BNXT_CHIP_P5(bp)) { 2052 vnic->rss_table[i * 2] = 2053 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2054 vnic->rss_table[i * 2 + 1] = 2055 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2056 } else { 2057 vnic->rss_table[i] = 2058 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2059 } 2060 } 2061 2062 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2063 return rc; 2064 } 2065 2066 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2067 struct rte_eth_rss_reta_entry64 *reta_conf, 2068 uint16_t reta_size) 2069 { 2070 struct bnxt *bp = eth_dev->data->dev_private; 2071 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2072 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2073 uint16_t idx, sft, i; 2074 int rc; 2075 2076 rc = is_bnxt_in_error(bp); 2077 if (rc) 2078 return rc; 2079 2080 if (!vnic) 2081 return -EINVAL; 2082 if (!vnic->rss_table) 2083 return -EINVAL; 2084 2085 if (reta_size != tbl_size) { 2086 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2087 "(%d) must equal the size supported by the hardware " 2088 "(%d)\n", reta_size, tbl_size); 2089 return -EINVAL; 2090 } 2091 2092 for (idx = 0, i = 0; i < reta_size; i++) { 2093 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2094 sft = i % RTE_ETH_RETA_GROUP_SIZE; 2095 2096 if (reta_conf[idx].mask & (1ULL << sft)) { 2097 uint16_t qid; 2098 2099 if (BNXT_CHIP_P5(bp)) 2100 qid = bnxt_rss_to_qid(bp, 2101 vnic->rss_table[i * 2]); 2102 else 2103 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2104 2105 if (qid == INVALID_HW_RING_ID) { 2106 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2107 return -EINVAL; 2108 } 2109 reta_conf[idx].reta[sft] = qid; 2110 } 2111 } 2112 2113 return 0; 2114 } 2115 2116 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2117 struct rte_eth_rss_conf *rss_conf) 2118 { 2119 struct bnxt *bp = eth_dev->data->dev_private; 2120 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2121 struct bnxt_vnic_info *vnic; 2122 int rc; 2123 2124 rc = is_bnxt_in_error(bp); 2125 if (rc) 2126 return rc; 2127 2128 /* 2129 * If RSS enablement were different than dev_configure, 2130 * then return -EINVAL 2131 */ 2132 if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 2133 if (!rss_conf->rss_hf) 2134 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2135 } else { 2136 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2137 return -EINVAL; 2138 } 2139 2140 bp->flags |= BNXT_FLAG_UPDATE_HASH; 2141 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 2142 rss_conf, 2143 sizeof(*rss_conf)); 2144 2145 /* Update the default RSS VNIC(s) */ 2146 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2147 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2148 vnic->hash_mode = 2149 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2150 RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); 2151 2152 /* 2153 * If hashkey is not specified, use the previously configured 2154 * hashkey 2155 */ 2156 if (!rss_conf->rss_key) 2157 goto rss_config; 2158 2159 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2160 PMD_DRV_LOG(ERR, 2161 "Invalid hashkey length, should be %d bytes\n", 2162 HW_HASH_KEY_SIZE); 2163 return -EINVAL; 2164 } 2165 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2166 2167 rss_config: 2168 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2169 return rc; 2170 } 2171 2172 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2173 struct rte_eth_rss_conf *rss_conf) 2174 { 2175 struct bnxt *bp = eth_dev->data->dev_private; 2176 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2177 int len, rc; 2178 uint32_t hash_types; 2179 2180 rc = is_bnxt_in_error(bp); 2181 if (rc) 2182 return rc; 2183 2184 /* RSS configuration is the same for all VNICs */ 2185 if (vnic && vnic->rss_hash_key) { 2186 if (rss_conf->rss_key) { 2187 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2188 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2189 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2190 } 2191 2192 hash_types = vnic->hash_type; 2193 rss_conf->rss_hf = 0; 2194 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2195 rss_conf->rss_hf |= RTE_ETH_RSS_IPV4; 2196 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2197 } 2198 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2199 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 2200 hash_types &= 2201 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2202 } 2203 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2204 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP; 2205 hash_types &= 2206 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2207 } 2208 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2209 rss_conf->rss_hf |= RTE_ETH_RSS_IPV6; 2210 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2211 } 2212 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2213 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 2214 hash_types &= 2215 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2216 } 2217 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2218 rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; 2219 hash_types &= 2220 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2221 } 2222 2223 rss_conf->rss_hf |= 2224 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2225 2226 if (hash_types) { 2227 PMD_DRV_LOG(ERR, 2228 "Unknown RSS config from firmware (%08x), RSS disabled", 2229 vnic->hash_type); 2230 return -ENOTSUP; 2231 } 2232 } else { 2233 rss_conf->rss_hf = 0; 2234 } 2235 return 0; 2236 } 2237 2238 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2239 struct rte_eth_fc_conf *fc_conf) 2240 { 2241 struct bnxt *bp = dev->data->dev_private; 2242 struct rte_eth_link link_info; 2243 int rc; 2244 2245 rc = is_bnxt_in_error(bp); 2246 if (rc) 2247 return rc; 2248 2249 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2250 if (rc) 2251 return rc; 2252 2253 memset(fc_conf, 0, sizeof(*fc_conf)); 2254 if (bp->link_info->auto_pause) 2255 fc_conf->autoneg = 1; 2256 switch (bp->link_info->pause) { 2257 case 0: 2258 fc_conf->mode = RTE_ETH_FC_NONE; 2259 break; 2260 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2261 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2262 break; 2263 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2264 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2265 break; 2266 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2267 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2268 fc_conf->mode = RTE_ETH_FC_FULL; 2269 break; 2270 } 2271 return 0; 2272 } 2273 2274 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2275 struct rte_eth_fc_conf *fc_conf) 2276 { 2277 struct bnxt *bp = dev->data->dev_private; 2278 int rc; 2279 2280 rc = is_bnxt_in_error(bp); 2281 if (rc) 2282 return rc; 2283 2284 if (!BNXT_SINGLE_PF(bp)) { 2285 PMD_DRV_LOG(ERR, 2286 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2287 return -ENOTSUP; 2288 } 2289 2290 switch (fc_conf->mode) { 2291 case RTE_ETH_FC_NONE: 2292 bp->link_info->auto_pause = 0; 2293 bp->link_info->force_pause = 0; 2294 break; 2295 case RTE_ETH_FC_RX_PAUSE: 2296 if (fc_conf->autoneg) { 2297 bp->link_info->auto_pause = 2298 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2299 bp->link_info->force_pause = 0; 2300 } else { 2301 bp->link_info->auto_pause = 0; 2302 bp->link_info->force_pause = 2303 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2304 } 2305 break; 2306 case RTE_ETH_FC_TX_PAUSE: 2307 if (fc_conf->autoneg) { 2308 bp->link_info->auto_pause = 2309 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2310 bp->link_info->force_pause = 0; 2311 } else { 2312 bp->link_info->auto_pause = 0; 2313 bp->link_info->force_pause = 2314 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2315 } 2316 break; 2317 case RTE_ETH_FC_FULL: 2318 if (fc_conf->autoneg) { 2319 bp->link_info->auto_pause = 2320 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2321 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2322 bp->link_info->force_pause = 0; 2323 } else { 2324 bp->link_info->auto_pause = 0; 2325 bp->link_info->force_pause = 2326 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2327 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2328 } 2329 break; 2330 } 2331 return bnxt_set_hwrm_link_config(bp, true); 2332 } 2333 2334 /* Add UDP tunneling port */ 2335 static int 2336 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2337 struct rte_eth_udp_tunnel *udp_tunnel) 2338 { 2339 struct bnxt *bp = eth_dev->data->dev_private; 2340 uint16_t tunnel_type = 0; 2341 int rc = 0; 2342 2343 rc = is_bnxt_in_error(bp); 2344 if (rc) 2345 return rc; 2346 2347 switch (udp_tunnel->prot_type) { 2348 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2349 if (bp->vxlan_port_cnt) { 2350 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2351 udp_tunnel->udp_port); 2352 if (bp->vxlan_port != udp_tunnel->udp_port) { 2353 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2354 return -ENOSPC; 2355 } 2356 bp->vxlan_port_cnt++; 2357 return 0; 2358 } 2359 tunnel_type = 2360 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2361 break; 2362 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2363 if (bp->geneve_port_cnt) { 2364 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2365 udp_tunnel->udp_port); 2366 if (bp->geneve_port != udp_tunnel->udp_port) { 2367 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2368 return -ENOSPC; 2369 } 2370 bp->geneve_port_cnt++; 2371 return 0; 2372 } 2373 tunnel_type = 2374 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2375 break; 2376 default: 2377 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2378 return -ENOTSUP; 2379 } 2380 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2381 tunnel_type); 2382 2383 if (rc != 0) 2384 return rc; 2385 2386 if (tunnel_type == 2387 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2388 bp->vxlan_port_cnt++; 2389 2390 if (tunnel_type == 2391 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2392 bp->geneve_port_cnt++; 2393 2394 return rc; 2395 } 2396 2397 static int 2398 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2399 struct rte_eth_udp_tunnel *udp_tunnel) 2400 { 2401 struct bnxt *bp = eth_dev->data->dev_private; 2402 uint16_t tunnel_type = 0; 2403 uint16_t port = 0; 2404 int rc = 0; 2405 2406 rc = is_bnxt_in_error(bp); 2407 if (rc) 2408 return rc; 2409 2410 switch (udp_tunnel->prot_type) { 2411 case RTE_ETH_TUNNEL_TYPE_VXLAN: 2412 if (!bp->vxlan_port_cnt) { 2413 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2414 return -EINVAL; 2415 } 2416 if (bp->vxlan_port != udp_tunnel->udp_port) { 2417 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2418 udp_tunnel->udp_port, bp->vxlan_port); 2419 return -EINVAL; 2420 } 2421 if (--bp->vxlan_port_cnt) 2422 return 0; 2423 2424 tunnel_type = 2425 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2426 port = bp->vxlan_fw_dst_port_id; 2427 break; 2428 case RTE_ETH_TUNNEL_TYPE_GENEVE: 2429 if (!bp->geneve_port_cnt) { 2430 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2431 return -EINVAL; 2432 } 2433 if (bp->geneve_port != udp_tunnel->udp_port) { 2434 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2435 udp_tunnel->udp_port, bp->geneve_port); 2436 return -EINVAL; 2437 } 2438 if (--bp->geneve_port_cnt) 2439 return 0; 2440 2441 tunnel_type = 2442 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2443 port = bp->geneve_fw_dst_port_id; 2444 break; 2445 default: 2446 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2447 return -ENOTSUP; 2448 } 2449 2450 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2451 return rc; 2452 } 2453 2454 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2455 { 2456 struct bnxt_filter_info *filter; 2457 struct bnxt_vnic_info *vnic; 2458 int rc = 0; 2459 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2460 2461 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2462 filter = STAILQ_FIRST(&vnic->filter); 2463 while (filter) { 2464 /* Search for this matching MAC+VLAN filter */ 2465 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2466 /* Delete the filter */ 2467 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2468 if (rc) 2469 return rc; 2470 STAILQ_REMOVE(&vnic->filter, filter, 2471 bnxt_filter_info, next); 2472 bnxt_free_filter(bp, filter); 2473 PMD_DRV_LOG(INFO, 2474 "Deleted vlan filter for %d\n", 2475 vlan_id); 2476 return 0; 2477 } 2478 filter = STAILQ_NEXT(filter, next); 2479 } 2480 return -ENOENT; 2481 } 2482 2483 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2484 { 2485 struct bnxt_filter_info *filter; 2486 struct bnxt_vnic_info *vnic; 2487 int rc = 0; 2488 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2489 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2490 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2491 2492 /* Implementation notes on the use of VNIC in this command: 2493 * 2494 * By default, these filters belong to default vnic for the function. 2495 * Once these filters are set up, only destination VNIC can be modified. 2496 * If the destination VNIC is not specified in this command, 2497 * then the HWRM shall only create an l2 context id. 2498 */ 2499 2500 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2501 filter = STAILQ_FIRST(&vnic->filter); 2502 /* Check if the VLAN has already been added */ 2503 while (filter) { 2504 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2505 return -EEXIST; 2506 2507 filter = STAILQ_NEXT(filter, next); 2508 } 2509 2510 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2511 * command to create MAC+VLAN filter with the right flags, enables set. 2512 */ 2513 filter = bnxt_alloc_filter(bp); 2514 if (!filter) { 2515 PMD_DRV_LOG(ERR, 2516 "MAC/VLAN filter alloc failed\n"); 2517 return -ENOMEM; 2518 } 2519 /* MAC + VLAN ID filter */ 2520 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2521 * untagged packets are received 2522 * 2523 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2524 * packets and only the programmed vlan's packets are received 2525 */ 2526 filter->l2_ivlan = vlan_id; 2527 filter->l2_ivlan_mask = 0x0FFF; 2528 filter->enables |= en; 2529 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2530 2531 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2532 if (rc) { 2533 /* Free the newly allocated filter as we were 2534 * not able to create the filter in hardware. 2535 */ 2536 bnxt_free_filter(bp, filter); 2537 return rc; 2538 } 2539 2540 filter->mac_index = 0; 2541 /* Add this new filter to the list */ 2542 if (vlan_id == 0) 2543 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2544 else 2545 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2546 2547 PMD_DRV_LOG(INFO, 2548 "Added Vlan filter for %d\n", vlan_id); 2549 return rc; 2550 } 2551 2552 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2553 uint16_t vlan_id, int on) 2554 { 2555 struct bnxt *bp = eth_dev->data->dev_private; 2556 int rc; 2557 2558 rc = is_bnxt_in_error(bp); 2559 if (rc) 2560 return rc; 2561 2562 if (!eth_dev->data->dev_started) { 2563 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2564 return -EINVAL; 2565 } 2566 2567 /* These operations apply to ALL existing MAC/VLAN filters */ 2568 if (on) 2569 return bnxt_add_vlan_filter(bp, vlan_id); 2570 else 2571 return bnxt_del_vlan_filter(bp, vlan_id); 2572 } 2573 2574 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2575 struct bnxt_vnic_info *vnic) 2576 { 2577 struct bnxt_filter_info *filter; 2578 int rc; 2579 2580 filter = STAILQ_FIRST(&vnic->filter); 2581 while (filter) { 2582 if (filter->mac_index == 0 && 2583 !memcmp(filter->l2_addr, bp->mac_addr, 2584 RTE_ETHER_ADDR_LEN)) { 2585 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2586 if (!rc) { 2587 STAILQ_REMOVE(&vnic->filter, filter, 2588 bnxt_filter_info, next); 2589 bnxt_free_filter(bp, filter); 2590 } 2591 return rc; 2592 } 2593 filter = STAILQ_NEXT(filter, next); 2594 } 2595 return 0; 2596 } 2597 2598 static int 2599 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2600 { 2601 struct bnxt_vnic_info *vnic; 2602 unsigned int i; 2603 int rc; 2604 2605 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2606 if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) { 2607 /* Remove any VLAN filters programmed */ 2608 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2609 bnxt_del_vlan_filter(bp, i); 2610 2611 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2612 if (rc) 2613 return rc; 2614 } else { 2615 /* Default filter will allow packets that match the 2616 * dest mac. So, it has to be deleted, otherwise, we 2617 * will endup receiving vlan packets for which the 2618 * filter is not programmed, when hw-vlan-filter 2619 * configuration is ON 2620 */ 2621 bnxt_del_dflt_mac_filter(bp, vnic); 2622 /* This filter will allow only untagged packets */ 2623 bnxt_add_vlan_filter(bp, 0); 2624 } 2625 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2626 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)); 2627 2628 return 0; 2629 } 2630 2631 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2632 { 2633 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2634 unsigned int i; 2635 int rc; 2636 2637 /* Destroy vnic filters and vnic */ 2638 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2639 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2640 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2641 bnxt_del_vlan_filter(bp, i); 2642 } 2643 bnxt_del_dflt_mac_filter(bp, vnic); 2644 2645 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2646 if (rc) 2647 return rc; 2648 2649 rc = bnxt_hwrm_vnic_free(bp, vnic); 2650 if (rc) 2651 return rc; 2652 2653 rte_free(vnic->fw_grp_ids); 2654 vnic->fw_grp_ids = NULL; 2655 2656 vnic->rx_queue_cnt = 0; 2657 2658 return 0; 2659 } 2660 2661 static int 2662 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2663 { 2664 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2665 int rc; 2666 2667 /* Destroy, recreate and reconfigure the default vnic */ 2668 rc = bnxt_free_one_vnic(bp, 0); 2669 if (rc) 2670 return rc; 2671 2672 /* default vnic 0 */ 2673 rc = bnxt_setup_one_vnic(bp, 0); 2674 if (rc) 2675 return rc; 2676 2677 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2678 RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2679 rc = bnxt_add_vlan_filter(bp, 0); 2680 if (rc) 2681 return rc; 2682 rc = bnxt_restore_vlan_filters(bp); 2683 if (rc) 2684 return rc; 2685 } else { 2686 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2687 if (rc) 2688 return rc; 2689 } 2690 2691 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2692 if (rc) 2693 return rc; 2694 2695 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2696 !!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)); 2697 2698 return rc; 2699 } 2700 2701 static int 2702 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2703 { 2704 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2705 struct bnxt *bp = dev->data->dev_private; 2706 int rc; 2707 2708 rc = is_bnxt_in_error(bp); 2709 if (rc) 2710 return rc; 2711 2712 /* Filter settings will get applied when port is started */ 2713 if (!dev->data->dev_started) 2714 return 0; 2715 2716 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 2717 /* Enable or disable VLAN filtering */ 2718 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2719 if (rc) 2720 return rc; 2721 } 2722 2723 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 2724 /* Enable or disable VLAN stripping */ 2725 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2726 if (rc) 2727 return rc; 2728 } 2729 2730 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 2731 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) 2732 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2733 else 2734 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2735 } 2736 2737 return 0; 2738 } 2739 2740 static int 2741 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2742 uint16_t tpid) 2743 { 2744 struct bnxt *bp = dev->data->dev_private; 2745 int qinq = dev->data->dev_conf.rxmode.offloads & 2746 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; 2747 2748 if (vlan_type != RTE_ETH_VLAN_TYPE_INNER && 2749 vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { 2750 PMD_DRV_LOG(ERR, 2751 "Unsupported vlan type."); 2752 return -EINVAL; 2753 } 2754 if (!qinq) { 2755 PMD_DRV_LOG(ERR, 2756 "QinQ not enabled. Needs to be ON as we can " 2757 "accelerate only outer vlan\n"); 2758 return -EINVAL; 2759 } 2760 2761 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) { 2762 switch (tpid) { 2763 case RTE_ETHER_TYPE_QINQ: 2764 bp->outer_tpid_bd = 2765 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2766 break; 2767 case RTE_ETHER_TYPE_VLAN: 2768 bp->outer_tpid_bd = 2769 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2770 break; 2771 case RTE_ETHER_TYPE_QINQ1: 2772 bp->outer_tpid_bd = 2773 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2774 break; 2775 case RTE_ETHER_TYPE_QINQ2: 2776 bp->outer_tpid_bd = 2777 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2778 break; 2779 case RTE_ETHER_TYPE_QINQ3: 2780 bp->outer_tpid_bd = 2781 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2782 break; 2783 default: 2784 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2785 return -EINVAL; 2786 } 2787 bp->outer_tpid_bd |= tpid; 2788 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2789 } else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) { 2790 PMD_DRV_LOG(ERR, 2791 "Can accelerate only outer vlan in QinQ\n"); 2792 return -EINVAL; 2793 } 2794 2795 return 0; 2796 } 2797 2798 static int 2799 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2800 struct rte_ether_addr *addr) 2801 { 2802 struct bnxt *bp = dev->data->dev_private; 2803 /* Default Filter is tied to VNIC 0 */ 2804 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2805 int rc; 2806 2807 rc = is_bnxt_in_error(bp); 2808 if (rc) 2809 return rc; 2810 2811 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2812 return -EPERM; 2813 2814 if (rte_is_zero_ether_addr(addr)) 2815 return -EINVAL; 2816 2817 /* Filter settings will get applied when port is started */ 2818 if (!dev->data->dev_started) 2819 return 0; 2820 2821 /* Check if the requested MAC is already added */ 2822 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2823 return 0; 2824 2825 /* Destroy filter and re-create it */ 2826 bnxt_del_dflt_mac_filter(bp, vnic); 2827 2828 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2829 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 2830 /* This filter will allow only untagged packets */ 2831 rc = bnxt_add_vlan_filter(bp, 0); 2832 } else { 2833 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2834 } 2835 2836 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2837 return rc; 2838 } 2839 2840 static int 2841 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2842 struct rte_ether_addr *mc_addr_set, 2843 uint32_t nb_mc_addr) 2844 { 2845 struct bnxt *bp = eth_dev->data->dev_private; 2846 char *mc_addr_list = (char *)mc_addr_set; 2847 struct bnxt_vnic_info *vnic; 2848 uint32_t off = 0, i = 0; 2849 int rc; 2850 2851 rc = is_bnxt_in_error(bp); 2852 if (rc) 2853 return rc; 2854 2855 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2856 2857 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2858 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2859 goto allmulti; 2860 } 2861 2862 /* TODO Check for Duplicate mcast addresses */ 2863 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2864 for (i = 0; i < nb_mc_addr; i++) { 2865 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2866 RTE_ETHER_ADDR_LEN); 2867 off += RTE_ETHER_ADDR_LEN; 2868 } 2869 2870 vnic->mc_addr_cnt = i; 2871 if (vnic->mc_addr_cnt) 2872 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2873 else 2874 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2875 2876 allmulti: 2877 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2878 } 2879 2880 static int 2881 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2882 { 2883 struct bnxt *bp = dev->data->dev_private; 2884 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2885 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2886 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2887 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2888 int ret; 2889 2890 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2891 fw_major, fw_minor, fw_updt, fw_rsvd); 2892 if (ret < 0) 2893 return -EINVAL; 2894 2895 ret += 1; /* add the size of '\0' */ 2896 if (fw_size < (size_t)ret) 2897 return ret; 2898 else 2899 return 0; 2900 } 2901 2902 static void 2903 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2904 struct rte_eth_rxq_info *qinfo) 2905 { 2906 struct bnxt *bp = dev->data->dev_private; 2907 struct bnxt_rx_queue *rxq; 2908 2909 if (is_bnxt_in_error(bp)) 2910 return; 2911 2912 rxq = dev->data->rx_queues[queue_id]; 2913 2914 qinfo->mp = rxq->mb_pool; 2915 qinfo->scattered_rx = dev->data->scattered_rx; 2916 qinfo->nb_desc = rxq->nb_rx_desc; 2917 2918 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2919 qinfo->conf.rx_drop_en = rxq->drop_en; 2920 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2921 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2922 } 2923 2924 static void 2925 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2926 struct rte_eth_txq_info *qinfo) 2927 { 2928 struct bnxt *bp = dev->data->dev_private; 2929 struct bnxt_tx_queue *txq; 2930 2931 if (is_bnxt_in_error(bp)) 2932 return; 2933 2934 txq = dev->data->tx_queues[queue_id]; 2935 2936 qinfo->nb_desc = txq->nb_tx_desc; 2937 2938 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2939 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2940 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2941 2942 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2943 qinfo->conf.tx_rs_thresh = 0; 2944 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2945 qinfo->conf.offloads = txq->offloads; 2946 } 2947 2948 static const struct { 2949 eth_rx_burst_t pkt_burst; 2950 const char *info; 2951 } bnxt_rx_burst_info[] = { 2952 {bnxt_recv_pkts, "Scalar"}, 2953 #if defined(RTE_ARCH_X86) 2954 {bnxt_recv_pkts_vec, "Vector SSE"}, 2955 #endif 2956 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2957 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2958 #endif 2959 #if defined(RTE_ARCH_ARM64) 2960 {bnxt_recv_pkts_vec, "Vector Neon"}, 2961 #endif 2962 }; 2963 2964 static int 2965 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2966 struct rte_eth_burst_mode *mode) 2967 { 2968 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2969 size_t i; 2970 2971 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2972 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2973 snprintf(mode->info, sizeof(mode->info), "%s", 2974 bnxt_rx_burst_info[i].info); 2975 return 0; 2976 } 2977 } 2978 2979 return -EINVAL; 2980 } 2981 2982 static const struct { 2983 eth_tx_burst_t pkt_burst; 2984 const char *info; 2985 } bnxt_tx_burst_info[] = { 2986 {bnxt_xmit_pkts, "Scalar"}, 2987 #if defined(RTE_ARCH_X86) 2988 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2989 #endif 2990 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2991 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 2992 #endif 2993 #if defined(RTE_ARCH_ARM64) 2994 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2995 #endif 2996 }; 2997 2998 static int 2999 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3000 struct rte_eth_burst_mode *mode) 3001 { 3002 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3003 size_t i; 3004 3005 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3006 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3007 snprintf(mode->info, sizeof(mode->info), "%s", 3008 bnxt_tx_burst_info[i].info); 3009 return 0; 3010 } 3011 } 3012 3013 return -EINVAL; 3014 } 3015 3016 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3017 { 3018 uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU; 3019 struct bnxt *bp = eth_dev->data->dev_private; 3020 uint32_t new_pkt_size; 3021 uint32_t rc; 3022 uint32_t i; 3023 3024 rc = is_bnxt_in_error(bp); 3025 if (rc) 3026 return rc; 3027 3028 /* Exit if receive queues are not configured yet */ 3029 if (!eth_dev->data->nb_rx_queues) 3030 return rc; 3031 3032 new_pkt_size = new_mtu + overhead; 3033 3034 /* 3035 * Disallow any MTU change that would require scattered receive support 3036 * if it is not already enabled. 3037 */ 3038 if (eth_dev->data->dev_started && 3039 !eth_dev->data->scattered_rx && 3040 (new_pkt_size > 3041 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3042 PMD_DRV_LOG(ERR, 3043 "MTU change would require scattered rx support. "); 3044 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3045 return -EINVAL; 3046 } 3047 3048 if (new_mtu > RTE_ETHER_MTU) 3049 bp->flags |= BNXT_FLAG_JUMBO; 3050 else 3051 bp->flags &= ~BNXT_FLAG_JUMBO; 3052 3053 /* Is there a change in mtu setting? */ 3054 if (eth_dev->data->mtu == new_mtu) 3055 return rc; 3056 3057 for (i = 0; i < bp->nr_vnics; i++) { 3058 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3059 uint16_t size = 0; 3060 3061 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3062 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3063 if (rc) 3064 break; 3065 3066 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3067 size -= RTE_PKTMBUF_HEADROOM; 3068 3069 if (size < new_mtu) { 3070 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3071 if (rc) 3072 return rc; 3073 } 3074 } 3075 3076 if (bnxt_hwrm_config_host_mtu(bp)) 3077 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3078 3079 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3080 3081 return rc; 3082 } 3083 3084 static int 3085 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3086 { 3087 struct bnxt *bp = dev->data->dev_private; 3088 uint16_t vlan = bp->vlan; 3089 int rc; 3090 3091 rc = is_bnxt_in_error(bp); 3092 if (rc) 3093 return rc; 3094 3095 if (!BNXT_SINGLE_PF(bp)) { 3096 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3097 return -ENOTSUP; 3098 } 3099 bp->vlan = on ? pvid : 0; 3100 3101 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3102 if (rc) 3103 bp->vlan = vlan; 3104 return rc; 3105 } 3106 3107 static int 3108 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3109 { 3110 struct bnxt *bp = dev->data->dev_private; 3111 int rc; 3112 3113 rc = is_bnxt_in_error(bp); 3114 if (rc) 3115 return rc; 3116 3117 return bnxt_hwrm_port_led_cfg(bp, true); 3118 } 3119 3120 static int 3121 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3122 { 3123 struct bnxt *bp = dev->data->dev_private; 3124 int rc; 3125 3126 rc = is_bnxt_in_error(bp); 3127 if (rc) 3128 return rc; 3129 3130 return bnxt_hwrm_port_led_cfg(bp, false); 3131 } 3132 3133 static uint32_t 3134 bnxt_rx_queue_count_op(void *rx_queue) 3135 { 3136 struct bnxt *bp; 3137 struct bnxt_cp_ring_info *cpr; 3138 uint32_t desc = 0, raw_cons, cp_ring_size; 3139 struct bnxt_rx_queue *rxq; 3140 struct rx_pkt_cmpl *rxcmp; 3141 int rc; 3142 3143 rxq = rx_queue; 3144 bp = rxq->bp; 3145 3146 rc = is_bnxt_in_error(bp); 3147 if (rc) 3148 return rc; 3149 3150 cpr = rxq->cp_ring; 3151 raw_cons = cpr->cp_raw_cons; 3152 cp_ring_size = cpr->cp_ring_struct->ring_size; 3153 3154 while (1) { 3155 uint32_t agg_cnt, cons, cmpl_type; 3156 3157 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3158 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3159 3160 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3161 break; 3162 3163 cmpl_type = CMP_TYPE(rxcmp); 3164 3165 switch (cmpl_type) { 3166 case CMPL_BASE_TYPE_RX_L2: 3167 case CMPL_BASE_TYPE_RX_L2_V2: 3168 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3169 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3170 desc++; 3171 break; 3172 3173 case CMPL_BASE_TYPE_RX_TPA_END: 3174 if (BNXT_CHIP_P5(rxq->bp)) { 3175 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3176 3177 p5_tpa_end = (void *)rxcmp; 3178 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3179 } else { 3180 struct rx_tpa_end_cmpl *tpa_end; 3181 3182 tpa_end = (void *)rxcmp; 3183 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3184 } 3185 3186 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3187 desc++; 3188 break; 3189 3190 default: 3191 raw_cons += CMP_LEN(cmpl_type); 3192 } 3193 } 3194 3195 return desc; 3196 } 3197 3198 static int 3199 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3200 { 3201 struct bnxt_rx_queue *rxq = rx_queue; 3202 struct bnxt_cp_ring_info *cpr; 3203 struct bnxt_rx_ring_info *rxr; 3204 uint32_t desc, raw_cons, cp_ring_size; 3205 struct bnxt *bp = rxq->bp; 3206 struct rx_pkt_cmpl *rxcmp; 3207 int rc; 3208 3209 rc = is_bnxt_in_error(bp); 3210 if (rc) 3211 return rc; 3212 3213 if (offset >= rxq->nb_rx_desc) 3214 return -EINVAL; 3215 3216 rxr = rxq->rx_ring; 3217 cpr = rxq->cp_ring; 3218 cp_ring_size = cpr->cp_ring_struct->ring_size; 3219 3220 /* 3221 * For the vector receive case, the completion at the requested 3222 * offset can be indexed directly. 3223 */ 3224 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3225 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3226 struct rx_pkt_cmpl *rxcmp; 3227 uint32_t cons; 3228 3229 /* Check status of completion descriptor. */ 3230 raw_cons = cpr->cp_raw_cons + 3231 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3232 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3233 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3234 3235 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3236 return RTE_ETH_RX_DESC_DONE; 3237 3238 /* Check whether rx desc has an mbuf attached. */ 3239 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3240 if (cons >= rxq->rxrearm_start && 3241 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3242 return RTE_ETH_RX_DESC_UNAVAIL; 3243 } 3244 3245 return RTE_ETH_RX_DESC_AVAIL; 3246 } 3247 #endif 3248 3249 /* 3250 * For the non-vector receive case, scan the completion ring to 3251 * locate the completion descriptor for the requested offset. 3252 */ 3253 raw_cons = cpr->cp_raw_cons; 3254 desc = 0; 3255 while (1) { 3256 uint32_t agg_cnt, cons, cmpl_type; 3257 3258 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3259 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3260 3261 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3262 break; 3263 3264 cmpl_type = CMP_TYPE(rxcmp); 3265 3266 switch (cmpl_type) { 3267 case CMPL_BASE_TYPE_RX_L2: 3268 case CMPL_BASE_TYPE_RX_L2_V2: 3269 if (desc == offset) { 3270 cons = rxcmp->opaque; 3271 if (rxr->rx_buf_ring[cons]) 3272 return RTE_ETH_RX_DESC_DONE; 3273 else 3274 return RTE_ETH_RX_DESC_UNAVAIL; 3275 } 3276 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3277 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3278 desc++; 3279 break; 3280 3281 case CMPL_BASE_TYPE_RX_TPA_END: 3282 if (desc == offset) 3283 return RTE_ETH_RX_DESC_DONE; 3284 3285 if (BNXT_CHIP_P5(rxq->bp)) { 3286 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3287 3288 p5_tpa_end = (void *)rxcmp; 3289 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3290 } else { 3291 struct rx_tpa_end_cmpl *tpa_end; 3292 3293 tpa_end = (void *)rxcmp; 3294 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3295 } 3296 3297 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3298 desc++; 3299 break; 3300 3301 default: 3302 raw_cons += CMP_LEN(cmpl_type); 3303 } 3304 } 3305 3306 return RTE_ETH_RX_DESC_AVAIL; 3307 } 3308 3309 static int 3310 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3311 { 3312 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3313 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3314 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3315 struct cmpl_base *cp_desc_ring; 3316 int rc; 3317 3318 rc = is_bnxt_in_error(txq->bp); 3319 if (rc) 3320 return rc; 3321 3322 if (offset >= txq->nb_tx_desc) 3323 return -EINVAL; 3324 3325 /* Return "desc done" if descriptor is available for use. */ 3326 if (bnxt_tx_bds_in_hw(txq) <= offset) 3327 return RTE_ETH_TX_DESC_DONE; 3328 3329 raw_cons = cpr->cp_raw_cons; 3330 cp_desc_ring = cpr->cp_desc_ring; 3331 ring_mask = cpr->cp_ring_struct->ring_mask; 3332 3333 /* Check to see if hw has posted a completion for the descriptor. */ 3334 while (1) { 3335 struct tx_cmpl *txcmp; 3336 uint32_t cons; 3337 3338 cons = RING_CMPL(ring_mask, raw_cons); 3339 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3340 3341 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3342 break; 3343 3344 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3345 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3346 3347 if (nb_tx_pkts > offset) 3348 return RTE_ETH_TX_DESC_DONE; 3349 3350 raw_cons = NEXT_RAW_CMP(raw_cons); 3351 } 3352 3353 /* Descriptor is pending transmit, not yet completed by hardware. */ 3354 return RTE_ETH_TX_DESC_FULL; 3355 } 3356 3357 int 3358 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3359 const struct rte_flow_ops **ops) 3360 { 3361 struct bnxt *bp = dev->data->dev_private; 3362 int ret = 0; 3363 3364 if (!bp) 3365 return -EIO; 3366 3367 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3368 struct bnxt_representor *vfr = dev->data->dev_private; 3369 bp = vfr->parent_dev->data->dev_private; 3370 /* parent is deleted while children are still valid */ 3371 if (!bp) { 3372 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3373 dev->data->port_id); 3374 return -EIO; 3375 } 3376 } 3377 3378 ret = is_bnxt_in_error(bp); 3379 if (ret) 3380 return ret; 3381 3382 /* PMD supports thread-safe flow operations. rte_flow API 3383 * functions can avoid mutex for multi-thread safety. 3384 */ 3385 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3386 3387 if (BNXT_TRUFLOW_EN(bp)) 3388 *ops = &bnxt_ulp_rte_flow_ops; 3389 else 3390 *ops = &bnxt_flow_ops; 3391 3392 return ret; 3393 } 3394 3395 static const uint32_t * 3396 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3397 { 3398 static const uint32_t ptypes[] = { 3399 RTE_PTYPE_L2_ETHER_VLAN, 3400 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3401 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3402 RTE_PTYPE_L4_ICMP, 3403 RTE_PTYPE_L4_TCP, 3404 RTE_PTYPE_L4_UDP, 3405 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3406 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3407 RTE_PTYPE_INNER_L4_ICMP, 3408 RTE_PTYPE_INNER_L4_TCP, 3409 RTE_PTYPE_INNER_L4_UDP, 3410 RTE_PTYPE_UNKNOWN 3411 }; 3412 3413 if (!dev->rx_pkt_burst) 3414 return NULL; 3415 3416 return ptypes; 3417 } 3418 3419 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3420 int reg_win) 3421 { 3422 uint32_t reg_base = *reg_arr & 0xfffff000; 3423 uint32_t win_off; 3424 int i; 3425 3426 for (i = 0; i < count; i++) { 3427 if ((reg_arr[i] & 0xfffff000) != reg_base) 3428 return -ERANGE; 3429 } 3430 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3431 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3432 return 0; 3433 } 3434 3435 static int bnxt_map_ptp_regs(struct bnxt *bp) 3436 { 3437 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3438 uint32_t *reg_arr; 3439 int rc, i; 3440 3441 reg_arr = ptp->rx_regs; 3442 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3443 if (rc) 3444 return rc; 3445 3446 reg_arr = ptp->tx_regs; 3447 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3448 if (rc) 3449 return rc; 3450 3451 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3452 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3453 3454 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3455 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3456 3457 return 0; 3458 } 3459 3460 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3461 { 3462 rte_write32(0, (uint8_t *)bp->bar0 + 3463 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3464 rte_write32(0, (uint8_t *)bp->bar0 + 3465 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3466 } 3467 3468 static uint64_t bnxt_cc_read(struct bnxt *bp) 3469 { 3470 uint64_t ns; 3471 3472 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3473 BNXT_GRCPF_REG_SYNC_TIME)); 3474 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3475 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3476 return ns; 3477 } 3478 3479 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3480 { 3481 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3482 uint32_t fifo; 3483 3484 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3485 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3486 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3487 return -EAGAIN; 3488 3489 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3490 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3491 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3492 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3493 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3494 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3495 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3496 3497 return 0; 3498 } 3499 3500 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3501 { 3502 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3503 struct bnxt_pf_info *pf = bp->pf; 3504 uint16_t port_id; 3505 int i = 0; 3506 uint32_t fifo; 3507 3508 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3509 return -EINVAL; 3510 3511 port_id = pf->port_id; 3512 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3513 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3514 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3515 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3516 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3517 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3518 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3519 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3520 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3521 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3522 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3523 i++; 3524 } 3525 3526 if (i >= BNXT_PTP_RX_PND_CNT) 3527 return -EBUSY; 3528 3529 return 0; 3530 } 3531 3532 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3533 { 3534 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3535 struct bnxt_pf_info *pf = bp->pf; 3536 uint16_t port_id; 3537 uint32_t fifo; 3538 3539 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3540 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3541 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3542 return -EAGAIN; 3543 3544 port_id = pf->port_id; 3545 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3546 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3547 3548 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3549 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3550 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3551 return bnxt_clr_rx_ts(bp, ts); 3552 3553 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3554 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3555 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3556 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3557 3558 return 0; 3559 } 3560 3561 static int 3562 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3563 { 3564 uint64_t ns; 3565 struct bnxt *bp = dev->data->dev_private; 3566 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3567 3568 if (!ptp) 3569 return -ENOTSUP; 3570 3571 ns = rte_timespec_to_ns(ts); 3572 /* Set the timecounters to a new value. */ 3573 ptp->tc.nsec = ns; 3574 ptp->tx_tstamp_tc.nsec = ns; 3575 ptp->rx_tstamp_tc.nsec = ns; 3576 3577 return 0; 3578 } 3579 3580 static int 3581 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3582 { 3583 struct bnxt *bp = dev->data->dev_private; 3584 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3585 uint64_t ns, systime_cycles = 0; 3586 int rc = 0; 3587 3588 if (!ptp) 3589 return -ENOTSUP; 3590 3591 if (BNXT_CHIP_P5(bp)) 3592 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3593 &systime_cycles); 3594 else 3595 systime_cycles = bnxt_cc_read(bp); 3596 3597 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3598 *ts = rte_ns_to_timespec(ns); 3599 3600 return rc; 3601 } 3602 static int 3603 bnxt_timesync_enable(struct rte_eth_dev *dev) 3604 { 3605 struct bnxt *bp = dev->data->dev_private; 3606 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3607 uint32_t shift = 0; 3608 int rc; 3609 3610 if (!ptp) 3611 return -ENOTSUP; 3612 3613 ptp->rx_filter = 1; 3614 ptp->tx_tstamp_en = 1; 3615 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3616 3617 rc = bnxt_hwrm_ptp_cfg(bp); 3618 if (rc) 3619 return rc; 3620 3621 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3622 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3623 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3624 3625 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3626 ptp->tc.cc_shift = shift; 3627 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3628 3629 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3630 ptp->rx_tstamp_tc.cc_shift = shift; 3631 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3632 3633 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3634 ptp->tx_tstamp_tc.cc_shift = shift; 3635 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3636 3637 if (!BNXT_CHIP_P5(bp)) 3638 bnxt_map_ptp_regs(bp); 3639 else 3640 rc = bnxt_ptp_start(bp); 3641 3642 return rc; 3643 } 3644 3645 static int 3646 bnxt_timesync_disable(struct rte_eth_dev *dev) 3647 { 3648 struct bnxt *bp = dev->data->dev_private; 3649 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3650 3651 if (!ptp) 3652 return -ENOTSUP; 3653 3654 ptp->rx_filter = 0; 3655 ptp->tx_tstamp_en = 0; 3656 ptp->rxctl = 0; 3657 3658 bnxt_hwrm_ptp_cfg(bp); 3659 3660 if (!BNXT_CHIP_P5(bp)) 3661 bnxt_unmap_ptp_regs(bp); 3662 else 3663 bnxt_ptp_stop(bp); 3664 3665 return 0; 3666 } 3667 3668 static int 3669 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3670 struct timespec *timestamp, 3671 uint32_t flags __rte_unused) 3672 { 3673 struct bnxt *bp = dev->data->dev_private; 3674 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3675 uint64_t rx_tstamp_cycles = 0; 3676 uint64_t ns; 3677 3678 if (!ptp) 3679 return -ENOTSUP; 3680 3681 if (BNXT_CHIP_P5(bp)) 3682 rx_tstamp_cycles = ptp->rx_timestamp; 3683 else 3684 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3685 3686 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3687 *timestamp = rte_ns_to_timespec(ns); 3688 return 0; 3689 } 3690 3691 static int 3692 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3693 struct timespec *timestamp) 3694 { 3695 struct bnxt *bp = dev->data->dev_private; 3696 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3697 uint64_t tx_tstamp_cycles = 0; 3698 uint64_t ns; 3699 int rc = 0; 3700 3701 if (!ptp) 3702 return -ENOTSUP; 3703 3704 if (BNXT_CHIP_P5(bp)) 3705 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3706 &tx_tstamp_cycles); 3707 else 3708 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3709 3710 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3711 *timestamp = rte_ns_to_timespec(ns); 3712 3713 return rc; 3714 } 3715 3716 static int 3717 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3718 { 3719 struct bnxt *bp = dev->data->dev_private; 3720 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3721 3722 if (!ptp) 3723 return -ENOTSUP; 3724 3725 ptp->tc.nsec += delta; 3726 ptp->tx_tstamp_tc.nsec += delta; 3727 ptp->rx_tstamp_tc.nsec += delta; 3728 3729 return 0; 3730 } 3731 3732 static int 3733 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3734 { 3735 struct bnxt *bp = dev->data->dev_private; 3736 int rc; 3737 uint32_t dir_entries; 3738 uint32_t entry_length; 3739 3740 rc = is_bnxt_in_error(bp); 3741 if (rc) 3742 return rc; 3743 3744 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3745 bp->pdev->addr.domain, bp->pdev->addr.bus, 3746 bp->pdev->addr.devid, bp->pdev->addr.function); 3747 3748 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3749 if (rc != 0) 3750 return rc; 3751 3752 return dir_entries * entry_length; 3753 } 3754 3755 static int 3756 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3757 struct rte_dev_eeprom_info *in_eeprom) 3758 { 3759 struct bnxt *bp = dev->data->dev_private; 3760 uint32_t index; 3761 uint32_t offset; 3762 int rc; 3763 3764 rc = is_bnxt_in_error(bp); 3765 if (rc) 3766 return rc; 3767 3768 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3769 bp->pdev->addr.domain, bp->pdev->addr.bus, 3770 bp->pdev->addr.devid, bp->pdev->addr.function, 3771 in_eeprom->offset, in_eeprom->length); 3772 3773 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3774 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3775 in_eeprom->data); 3776 3777 index = in_eeprom->offset >> 24; 3778 offset = in_eeprom->offset & 0xffffff; 3779 3780 if (index != 0) 3781 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3782 in_eeprom->length, in_eeprom->data); 3783 3784 return 0; 3785 } 3786 3787 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3788 { 3789 switch (dir_type) { 3790 case BNX_DIR_TYPE_CHIMP_PATCH: 3791 case BNX_DIR_TYPE_BOOTCODE: 3792 case BNX_DIR_TYPE_BOOTCODE_2: 3793 case BNX_DIR_TYPE_APE_FW: 3794 case BNX_DIR_TYPE_APE_PATCH: 3795 case BNX_DIR_TYPE_KONG_FW: 3796 case BNX_DIR_TYPE_KONG_PATCH: 3797 case BNX_DIR_TYPE_BONO_FW: 3798 case BNX_DIR_TYPE_BONO_PATCH: 3799 /* FALLTHROUGH */ 3800 return true; 3801 } 3802 3803 return false; 3804 } 3805 3806 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3807 { 3808 switch (dir_type) { 3809 case BNX_DIR_TYPE_AVS: 3810 case BNX_DIR_TYPE_EXP_ROM_MBA: 3811 case BNX_DIR_TYPE_PCIE: 3812 case BNX_DIR_TYPE_TSCF_UCODE: 3813 case BNX_DIR_TYPE_EXT_PHY: 3814 case BNX_DIR_TYPE_CCM: 3815 case BNX_DIR_TYPE_ISCSI_BOOT: 3816 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3817 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3818 /* FALLTHROUGH */ 3819 return true; 3820 } 3821 3822 return false; 3823 } 3824 3825 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3826 { 3827 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3828 bnxt_dir_type_is_other_exec_format(dir_type); 3829 } 3830 3831 static int 3832 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3833 struct rte_dev_eeprom_info *in_eeprom) 3834 { 3835 struct bnxt *bp = dev->data->dev_private; 3836 uint8_t index, dir_op; 3837 uint16_t type, ext, ordinal, attr; 3838 int rc; 3839 3840 rc = is_bnxt_in_error(bp); 3841 if (rc) 3842 return rc; 3843 3844 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3845 bp->pdev->addr.domain, bp->pdev->addr.bus, 3846 bp->pdev->addr.devid, bp->pdev->addr.function, 3847 in_eeprom->offset, in_eeprom->length); 3848 3849 if (!BNXT_PF(bp)) { 3850 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3851 return -EINVAL; 3852 } 3853 3854 type = in_eeprom->magic >> 16; 3855 3856 if (type == 0xffff) { /* special value for directory operations */ 3857 index = in_eeprom->magic & 0xff; 3858 dir_op = in_eeprom->magic >> 8; 3859 if (index == 0) 3860 return -EINVAL; 3861 switch (dir_op) { 3862 case 0x0e: /* erase */ 3863 if (in_eeprom->offset != ~in_eeprom->magic) 3864 return -EINVAL; 3865 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3866 default: 3867 return -EINVAL; 3868 } 3869 } 3870 3871 /* Create or re-write an NVM item: */ 3872 if (bnxt_dir_type_is_executable(type) == true) 3873 return -EOPNOTSUPP; 3874 ext = in_eeprom->magic & 0xffff; 3875 ordinal = in_eeprom->offset >> 16; 3876 attr = in_eeprom->offset & 0xffff; 3877 3878 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3879 in_eeprom->data, in_eeprom->length); 3880 } 3881 3882 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3883 struct rte_eth_dev_module_info *modinfo) 3884 { 3885 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3886 struct bnxt *bp = dev->data->dev_private; 3887 int rc; 3888 3889 /* No point in going further if phy status indicates 3890 * module is not inserted or if it is powered down or 3891 * if it is of type 10GBase-T 3892 */ 3893 if (bp->link_info->module_status > 3894 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3895 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3896 dev->data->port_id); 3897 return -ENOTSUP; 3898 } 3899 3900 /* This feature is not supported in older firmware versions */ 3901 if (bp->hwrm_spec_code < 0x10202) { 3902 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3903 dev->data->port_id); 3904 return -ENOTSUP; 3905 } 3906 3907 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3908 SFF_DIAG_SUPPORT_OFFSET + 1, 3909 module_info); 3910 3911 if (rc) 3912 return rc; 3913 3914 switch (module_info[0]) { 3915 case SFF_MODULE_ID_SFP: 3916 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3917 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3918 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3919 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3920 break; 3921 case SFF_MODULE_ID_QSFP: 3922 case SFF_MODULE_ID_QSFP_PLUS: 3923 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3924 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3925 break; 3926 case SFF_MODULE_ID_QSFP28: 3927 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3928 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3929 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3930 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3931 break; 3932 default: 3933 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3934 return -ENOTSUP; 3935 } 3936 3937 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3938 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3939 3940 return 0; 3941 } 3942 3943 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3944 struct rte_dev_eeprom_info *info) 3945 { 3946 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3947 uint32_t offset = info->offset, length = info->length; 3948 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3949 struct bnxt *bp = dev->data->dev_private; 3950 uint8_t *data = info->data; 3951 uint8_t page = offset >> 7; 3952 uint8_t max_pages = 2; 3953 uint8_t opt_pages; 3954 int rc; 3955 3956 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3957 SFF_DIAG_SUPPORT_OFFSET + 1, 3958 module_info); 3959 if (rc) 3960 return rc; 3961 3962 switch (module_info[0]) { 3963 case SFF_MODULE_ID_SFP: 3964 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3965 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3966 pg_addr[2] = I2C_DEV_ADDR_A2; 3967 pg_addr[3] = I2C_DEV_ADDR_A2; 3968 max_pages = 4; 3969 } 3970 break; 3971 case SFF_MODULE_ID_QSFP28: 3972 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3973 SFF8636_OPT_PAGES_OFFSET, 3974 1, &opt_pages); 3975 if (rc) 3976 return rc; 3977 3978 if (opt_pages & SFF8636_PAGE1_MASK) { 3979 pg_addr[2] = I2C_DEV_ADDR_A0; 3980 max_pages = 3; 3981 } 3982 if (opt_pages & SFF8636_PAGE2_MASK) { 3983 pg_addr[3] = I2C_DEV_ADDR_A0; 3984 max_pages = 4; 3985 } 3986 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 3987 pg_addr[4] = I2C_DEV_ADDR_A0; 3988 max_pages = 5; 3989 } 3990 break; 3991 default: 3992 break; 3993 } 3994 3995 memset(data, 0, length); 3996 3997 offset &= 0xff; 3998 while (length && page < max_pages) { 3999 uint8_t raw_page = page ? page - 1 : 0; 4000 uint16_t chunk; 4001 4002 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4003 raw_page = 0; 4004 else if (page) 4005 offset |= 0x80; 4006 chunk = RTE_MIN(length, 256 - offset); 4007 4008 if (pg_addr[page]) { 4009 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4010 raw_page, offset, 4011 chunk, data); 4012 if (rc) 4013 return rc; 4014 } 4015 4016 data += chunk; 4017 length -= chunk; 4018 offset = 0; 4019 page += 1 + (chunk > 128); 4020 } 4021 4022 return length ? -EINVAL : 0; 4023 } 4024 4025 /* 4026 * Initialization 4027 */ 4028 4029 static const struct eth_dev_ops bnxt_dev_ops = { 4030 .dev_infos_get = bnxt_dev_info_get_op, 4031 .dev_close = bnxt_dev_close_op, 4032 .dev_configure = bnxt_dev_configure_op, 4033 .dev_start = bnxt_dev_start_op, 4034 .dev_stop = bnxt_dev_stop_op, 4035 .dev_set_link_up = bnxt_dev_set_link_up_op, 4036 .dev_set_link_down = bnxt_dev_set_link_down_op, 4037 .stats_get = bnxt_stats_get_op, 4038 .stats_reset = bnxt_stats_reset_op, 4039 .rx_queue_setup = bnxt_rx_queue_setup_op, 4040 .rx_queue_release = bnxt_rx_queue_release_op, 4041 .tx_queue_setup = bnxt_tx_queue_setup_op, 4042 .tx_queue_release = bnxt_tx_queue_release_op, 4043 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4044 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4045 .reta_update = bnxt_reta_update_op, 4046 .reta_query = bnxt_reta_query_op, 4047 .rss_hash_update = bnxt_rss_hash_update_op, 4048 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4049 .link_update = bnxt_link_update_op, 4050 .promiscuous_enable = bnxt_promiscuous_enable_op, 4051 .promiscuous_disable = bnxt_promiscuous_disable_op, 4052 .allmulticast_enable = bnxt_allmulticast_enable_op, 4053 .allmulticast_disable = bnxt_allmulticast_disable_op, 4054 .mac_addr_add = bnxt_mac_addr_add_op, 4055 .mac_addr_remove = bnxt_mac_addr_remove_op, 4056 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4057 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4058 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4059 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4060 .vlan_filter_set = bnxt_vlan_filter_set_op, 4061 .vlan_offload_set = bnxt_vlan_offload_set_op, 4062 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4063 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4064 .mtu_set = bnxt_mtu_set_op, 4065 .mac_addr_set = bnxt_set_default_mac_addr_op, 4066 .xstats_get = bnxt_dev_xstats_get_op, 4067 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4068 .xstats_reset = bnxt_dev_xstats_reset_op, 4069 .fw_version_get = bnxt_fw_version_get, 4070 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4071 .rxq_info_get = bnxt_rxq_info_get_op, 4072 .txq_info_get = bnxt_txq_info_get_op, 4073 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4074 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4075 .dev_led_on = bnxt_dev_led_on_op, 4076 .dev_led_off = bnxt_dev_led_off_op, 4077 .rx_queue_start = bnxt_rx_queue_start, 4078 .rx_queue_stop = bnxt_rx_queue_stop, 4079 .tx_queue_start = bnxt_tx_queue_start, 4080 .tx_queue_stop = bnxt_tx_queue_stop, 4081 .flow_ops_get = bnxt_flow_ops_get_op, 4082 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4083 .get_eeprom_length = bnxt_get_eeprom_length_op, 4084 .get_eeprom = bnxt_get_eeprom_op, 4085 .set_eeprom = bnxt_set_eeprom_op, 4086 .get_module_info = bnxt_get_module_info, 4087 .get_module_eeprom = bnxt_get_module_eeprom, 4088 .timesync_enable = bnxt_timesync_enable, 4089 .timesync_disable = bnxt_timesync_disable, 4090 .timesync_read_time = bnxt_timesync_read_time, 4091 .timesync_write_time = bnxt_timesync_write_time, 4092 .timesync_adjust_time = bnxt_timesync_adjust_time, 4093 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4094 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4095 }; 4096 4097 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4098 { 4099 uint32_t offset; 4100 4101 /* Only pre-map the reset GRC registers using window 3 */ 4102 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4103 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4104 4105 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4106 4107 return offset; 4108 } 4109 4110 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4111 { 4112 struct bnxt_error_recovery_info *info = bp->recovery_info; 4113 uint32_t reg_base = 0xffffffff; 4114 int i; 4115 4116 /* Only pre-map the monitoring GRC registers using window 2 */ 4117 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4118 uint32_t reg = info->status_regs[i]; 4119 4120 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4121 continue; 4122 4123 if (reg_base == 0xffffffff) 4124 reg_base = reg & 0xfffff000; 4125 if ((reg & 0xfffff000) != reg_base) 4126 return -ERANGE; 4127 4128 /* Use mask 0xffc as the Lower 2 bits indicates 4129 * address space location 4130 */ 4131 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4132 (reg & 0xffc); 4133 } 4134 4135 if (reg_base == 0xffffffff) 4136 return 0; 4137 4138 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4139 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4140 4141 return 0; 4142 } 4143 4144 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4145 { 4146 struct bnxt_error_recovery_info *info = bp->recovery_info; 4147 uint32_t delay = info->delay_after_reset[index]; 4148 uint32_t val = info->reset_reg_val[index]; 4149 uint32_t reg = info->reset_reg[index]; 4150 uint32_t type, offset; 4151 int ret; 4152 4153 type = BNXT_FW_STATUS_REG_TYPE(reg); 4154 offset = BNXT_FW_STATUS_REG_OFF(reg); 4155 4156 switch (type) { 4157 case BNXT_FW_STATUS_REG_TYPE_CFG: 4158 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4159 if (ret < 0) { 4160 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4161 val, offset); 4162 return; 4163 } 4164 break; 4165 case BNXT_FW_STATUS_REG_TYPE_GRC: 4166 offset = bnxt_map_reset_regs(bp, offset); 4167 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4168 break; 4169 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4170 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4171 break; 4172 } 4173 /* wait on a specific interval of time until core reset is complete */ 4174 if (delay) 4175 rte_delay_ms(delay); 4176 } 4177 4178 static void bnxt_dev_cleanup(struct bnxt *bp) 4179 { 4180 bp->eth_dev->data->dev_link.link_status = 0; 4181 bp->link_info->link_up = 0; 4182 if (bp->eth_dev->data->dev_started) 4183 bnxt_dev_stop(bp->eth_dev); 4184 4185 bnxt_uninit_resources(bp, true); 4186 } 4187 4188 static int 4189 bnxt_check_fw_reset_done(struct bnxt *bp) 4190 { 4191 int timeout = bp->fw_reset_max_msecs; 4192 uint16_t val = 0; 4193 int rc; 4194 4195 do { 4196 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4197 if (rc < 0) { 4198 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4199 return rc; 4200 } 4201 if (val != 0xffff) 4202 break; 4203 rte_delay_ms(1); 4204 } while (timeout--); 4205 4206 if (val == 0xffff) { 4207 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4208 return -1; 4209 } 4210 4211 return 0; 4212 } 4213 4214 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4215 { 4216 struct rte_eth_dev *dev = bp->eth_dev; 4217 struct rte_vlan_filter_conf *vfc; 4218 int vidx, vbit, rc; 4219 uint16_t vlan_id; 4220 4221 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4222 vfc = &dev->data->vlan_filter_conf; 4223 vidx = vlan_id / 64; 4224 vbit = vlan_id % 64; 4225 4226 /* Each bit corresponds to a VLAN id */ 4227 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4228 rc = bnxt_add_vlan_filter(bp, vlan_id); 4229 if (rc) 4230 return rc; 4231 } 4232 } 4233 4234 return 0; 4235 } 4236 4237 static int bnxt_restore_mac_filters(struct bnxt *bp) 4238 { 4239 struct rte_eth_dev *dev = bp->eth_dev; 4240 struct rte_eth_dev_info dev_info; 4241 struct rte_ether_addr *addr; 4242 uint64_t pool_mask; 4243 uint32_t pool = 0; 4244 uint32_t i; 4245 int rc; 4246 4247 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4248 return 0; 4249 4250 rc = bnxt_dev_info_get_op(dev, &dev_info); 4251 if (rc) 4252 return rc; 4253 4254 /* replay MAC address configuration */ 4255 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4256 addr = &dev->data->mac_addrs[i]; 4257 4258 /* skip zero address */ 4259 if (rte_is_zero_ether_addr(addr)) 4260 continue; 4261 4262 pool = 0; 4263 pool_mask = dev->data->mac_pool_sel[i]; 4264 4265 do { 4266 if (pool_mask & 1ULL) { 4267 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4268 if (rc) 4269 return rc; 4270 } 4271 pool_mask >>= 1; 4272 pool++; 4273 } while (pool_mask); 4274 } 4275 4276 return 0; 4277 } 4278 4279 static int bnxt_restore_filters(struct bnxt *bp) 4280 { 4281 struct rte_eth_dev *dev = bp->eth_dev; 4282 int ret = 0; 4283 4284 if (dev->data->all_multicast) { 4285 ret = bnxt_allmulticast_enable_op(dev); 4286 if (ret) 4287 return ret; 4288 } 4289 if (dev->data->promiscuous) { 4290 ret = bnxt_promiscuous_enable_op(dev); 4291 if (ret) 4292 return ret; 4293 } 4294 4295 ret = bnxt_restore_mac_filters(bp); 4296 if (ret) 4297 return ret; 4298 4299 ret = bnxt_restore_vlan_filters(bp); 4300 /* TODO restore other filters as well */ 4301 return ret; 4302 } 4303 4304 static int bnxt_check_fw_ready(struct bnxt *bp) 4305 { 4306 int timeout = bp->fw_reset_max_msecs; 4307 int rc = 0; 4308 4309 do { 4310 rc = bnxt_hwrm_poll_ver_get(bp); 4311 if (rc == 0) 4312 break; 4313 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4314 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4315 } while (rc && timeout > 0); 4316 4317 if (rc) 4318 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4319 4320 return rc; 4321 } 4322 4323 static void bnxt_dev_recover(void *arg) 4324 { 4325 struct bnxt *bp = arg; 4326 int rc = 0; 4327 4328 pthread_mutex_lock(&bp->err_recovery_lock); 4329 4330 if (!bp->fw_reset_min_msecs) { 4331 rc = bnxt_check_fw_reset_done(bp); 4332 if (rc) 4333 goto err; 4334 } 4335 4336 /* Clear Error flag so that device re-init should happen */ 4337 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4338 4339 rc = bnxt_check_fw_ready(bp); 4340 if (rc) 4341 goto err; 4342 4343 rc = bnxt_init_resources(bp, true); 4344 if (rc) { 4345 PMD_DRV_LOG(ERR, 4346 "Failed to initialize resources after reset\n"); 4347 goto err; 4348 } 4349 /* clear reset flag as the device is initialized now */ 4350 bp->flags &= ~BNXT_FLAG_FW_RESET; 4351 4352 rc = bnxt_dev_start_op(bp->eth_dev); 4353 if (rc) { 4354 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4355 goto err_start; 4356 } 4357 4358 rc = bnxt_restore_filters(bp); 4359 if (rc) 4360 goto err_start; 4361 4362 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4363 pthread_mutex_unlock(&bp->err_recovery_lock); 4364 4365 return; 4366 err_start: 4367 bnxt_dev_stop(bp->eth_dev); 4368 err: 4369 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4370 bnxt_uninit_resources(bp, false); 4371 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4372 rte_eth_dev_callback_process(bp->eth_dev, 4373 RTE_ETH_EVENT_INTR_RMV, 4374 NULL); 4375 pthread_mutex_unlock(&bp->err_recovery_lock); 4376 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4377 } 4378 4379 void bnxt_dev_reset_and_resume(void *arg) 4380 { 4381 struct bnxt *bp = arg; 4382 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4383 uint16_t val = 0; 4384 int rc; 4385 4386 bnxt_dev_cleanup(bp); 4387 4388 bnxt_wait_for_device_shutdown(bp); 4389 4390 /* During some fatal firmware error conditions, the PCI config space 4391 * register 0x2e which normally contains the subsystem ID will become 4392 * 0xffff. This register will revert back to the normal value after 4393 * the chip has completed core reset. If we detect this condition, 4394 * we can poll this config register immediately for the value to revert. 4395 */ 4396 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4397 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4398 if (rc < 0) { 4399 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4400 return; 4401 } 4402 if (val == 0xffff) { 4403 bp->fw_reset_min_msecs = 0; 4404 us = 1; 4405 } 4406 } 4407 4408 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4409 if (rc) 4410 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4411 } 4412 4413 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4414 { 4415 struct bnxt_error_recovery_info *info = bp->recovery_info; 4416 uint32_t reg = info->status_regs[index]; 4417 uint32_t type, offset, val = 0; 4418 int ret = 0; 4419 4420 type = BNXT_FW_STATUS_REG_TYPE(reg); 4421 offset = BNXT_FW_STATUS_REG_OFF(reg); 4422 4423 switch (type) { 4424 case BNXT_FW_STATUS_REG_TYPE_CFG: 4425 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4426 if (ret < 0) 4427 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4428 offset); 4429 break; 4430 case BNXT_FW_STATUS_REG_TYPE_GRC: 4431 offset = info->mapped_status_regs[index]; 4432 /* FALLTHROUGH */ 4433 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4434 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4435 offset)); 4436 break; 4437 } 4438 4439 return val; 4440 } 4441 4442 static int bnxt_fw_reset_all(struct bnxt *bp) 4443 { 4444 struct bnxt_error_recovery_info *info = bp->recovery_info; 4445 uint32_t i; 4446 int rc = 0; 4447 4448 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4449 /* Reset through primary function driver */ 4450 for (i = 0; i < info->reg_array_cnt; i++) 4451 bnxt_write_fw_reset_reg(bp, i); 4452 /* Wait for time specified by FW after triggering reset */ 4453 rte_delay_ms(info->primary_func_wait_period_after_reset); 4454 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4455 /* Reset with the help of Kong processor */ 4456 rc = bnxt_hwrm_fw_reset(bp); 4457 if (rc) 4458 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4459 } 4460 4461 return rc; 4462 } 4463 4464 static void bnxt_fw_reset_cb(void *arg) 4465 { 4466 struct bnxt *bp = arg; 4467 struct bnxt_error_recovery_info *info = bp->recovery_info; 4468 int rc = 0; 4469 4470 /* Only Primary function can do FW reset */ 4471 if (bnxt_is_primary_func(bp) && 4472 bnxt_is_recovery_enabled(bp)) { 4473 rc = bnxt_fw_reset_all(bp); 4474 if (rc) { 4475 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4476 return; 4477 } 4478 } 4479 4480 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4481 * EXCEPTION_FATAL_ASYNC event to all the functions 4482 * (including MASTER FUNC). After receiving this Async, all the active 4483 * drivers should treat this case as FW initiated recovery 4484 */ 4485 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4486 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4487 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4488 4489 /* To recover from error */ 4490 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4491 (void *)bp); 4492 } 4493 } 4494 4495 /* Driver should poll FW heartbeat, reset_counter with the frequency 4496 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4497 * When the driver detects heartbeat stop or change in reset_counter, 4498 * it has to trigger a reset to recover from the error condition. 4499 * A “primary function” is the function who will have the privilege to 4500 * initiate the chimp reset. The primary function will be elected by the 4501 * firmware and will be notified through async message. 4502 */ 4503 static void bnxt_check_fw_health(void *arg) 4504 { 4505 struct bnxt *bp = arg; 4506 struct bnxt_error_recovery_info *info = bp->recovery_info; 4507 uint32_t val = 0, wait_msec; 4508 4509 if (!info || !bnxt_is_recovery_enabled(bp) || 4510 is_bnxt_in_error(bp)) 4511 return; 4512 4513 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4514 if (val == info->last_heart_beat) 4515 goto reset; 4516 4517 info->last_heart_beat = val; 4518 4519 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4520 if (val != info->last_reset_counter) 4521 goto reset; 4522 4523 info->last_reset_counter = val; 4524 4525 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4526 bnxt_check_fw_health, (void *)bp); 4527 4528 return; 4529 reset: 4530 /* Stop DMA to/from device */ 4531 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4532 bp->flags |= BNXT_FLAG_FW_RESET; 4533 4534 bnxt_stop_rxtx(bp); 4535 4536 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4537 4538 if (bnxt_is_primary_func(bp)) 4539 wait_msec = info->primary_func_wait_period; 4540 else 4541 wait_msec = info->normal_func_wait_period; 4542 4543 rte_eal_alarm_set(US_PER_MS * wait_msec, 4544 bnxt_fw_reset_cb, (void *)bp); 4545 } 4546 4547 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4548 { 4549 uint32_t polling_freq; 4550 4551 pthread_mutex_lock(&bp->health_check_lock); 4552 4553 if (!bnxt_is_recovery_enabled(bp)) 4554 goto done; 4555 4556 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4557 goto done; 4558 4559 polling_freq = bp->recovery_info->driver_polling_freq; 4560 4561 rte_eal_alarm_set(US_PER_MS * polling_freq, 4562 bnxt_check_fw_health, (void *)bp); 4563 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4564 4565 done: 4566 pthread_mutex_unlock(&bp->health_check_lock); 4567 } 4568 4569 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4570 { 4571 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4572 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4573 } 4574 4575 static bool bnxt_vf_pciid(uint16_t device_id) 4576 { 4577 switch (device_id) { 4578 case BROADCOM_DEV_ID_57304_VF: 4579 case BROADCOM_DEV_ID_57406_VF: 4580 case BROADCOM_DEV_ID_5731X_VF: 4581 case BROADCOM_DEV_ID_5741X_VF: 4582 case BROADCOM_DEV_ID_57414_VF: 4583 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4584 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4585 case BROADCOM_DEV_ID_58802_VF: 4586 case BROADCOM_DEV_ID_57500_VF1: 4587 case BROADCOM_DEV_ID_57500_VF2: 4588 case BROADCOM_DEV_ID_58818_VF: 4589 /* FALLTHROUGH */ 4590 return true; 4591 default: 4592 return false; 4593 } 4594 } 4595 4596 /* Phase 5 device */ 4597 static bool bnxt_p5_device(uint16_t device_id) 4598 { 4599 switch (device_id) { 4600 case BROADCOM_DEV_ID_57508: 4601 case BROADCOM_DEV_ID_57504: 4602 case BROADCOM_DEV_ID_57502: 4603 case BROADCOM_DEV_ID_57508_MF1: 4604 case BROADCOM_DEV_ID_57504_MF1: 4605 case BROADCOM_DEV_ID_57502_MF1: 4606 case BROADCOM_DEV_ID_57508_MF2: 4607 case BROADCOM_DEV_ID_57504_MF2: 4608 case BROADCOM_DEV_ID_57502_MF2: 4609 case BROADCOM_DEV_ID_57500_VF1: 4610 case BROADCOM_DEV_ID_57500_VF2: 4611 case BROADCOM_DEV_ID_58812: 4612 case BROADCOM_DEV_ID_58814: 4613 case BROADCOM_DEV_ID_58818: 4614 case BROADCOM_DEV_ID_58818_VF: 4615 /* FALLTHROUGH */ 4616 return true; 4617 default: 4618 return false; 4619 } 4620 } 4621 4622 bool bnxt_stratus_device(struct bnxt *bp) 4623 { 4624 uint16_t device_id = bp->pdev->id.device_id; 4625 4626 switch (device_id) { 4627 case BROADCOM_DEV_ID_STRATUS_NIC: 4628 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4629 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4630 /* FALLTHROUGH */ 4631 return true; 4632 default: 4633 return false; 4634 } 4635 } 4636 4637 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4638 { 4639 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4640 struct bnxt *bp = eth_dev->data->dev_private; 4641 4642 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4643 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4644 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4645 if (!bp->bar0 || !bp->doorbell_base) { 4646 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4647 return -ENODEV; 4648 } 4649 4650 bp->eth_dev = eth_dev; 4651 bp->pdev = pci_dev; 4652 4653 return 0; 4654 } 4655 4656 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4657 struct bnxt_ctx_pg_info *ctx_pg, 4658 uint32_t mem_size, 4659 const char *suffix, 4660 uint16_t idx) 4661 { 4662 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4663 const struct rte_memzone *mz = NULL; 4664 char mz_name[RTE_MEMZONE_NAMESIZE]; 4665 rte_iova_t mz_phys_addr; 4666 uint64_t valid_bits = 0; 4667 uint32_t sz; 4668 int i; 4669 4670 if (!mem_size) 4671 return 0; 4672 4673 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4674 BNXT_PAGE_SIZE; 4675 rmem->page_size = BNXT_PAGE_SIZE; 4676 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4677 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4678 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4679 4680 valid_bits = PTU_PTE_VALID; 4681 4682 if (rmem->nr_pages > 1) { 4683 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4684 "bnxt_ctx_pg_tbl%s_%x_%d", 4685 suffix, idx, bp->eth_dev->data->port_id); 4686 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4687 mz = rte_memzone_lookup(mz_name); 4688 if (!mz) { 4689 mz = rte_memzone_reserve_aligned(mz_name, 4690 rmem->nr_pages * 8, 4691 bp->eth_dev->device->numa_node, 4692 RTE_MEMZONE_2MB | 4693 RTE_MEMZONE_SIZE_HINT_ONLY | 4694 RTE_MEMZONE_IOVA_CONTIG, 4695 BNXT_PAGE_SIZE); 4696 if (mz == NULL) 4697 return -ENOMEM; 4698 } 4699 4700 memset(mz->addr, 0, mz->len); 4701 mz_phys_addr = mz->iova; 4702 4703 rmem->pg_tbl = mz->addr; 4704 rmem->pg_tbl_map = mz_phys_addr; 4705 rmem->pg_tbl_mz = mz; 4706 } 4707 4708 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4709 suffix, idx, bp->eth_dev->data->port_id); 4710 mz = rte_memzone_lookup(mz_name); 4711 if (!mz) { 4712 mz = rte_memzone_reserve_aligned(mz_name, 4713 mem_size, 4714 bp->eth_dev->device->numa_node, 4715 RTE_MEMZONE_1GB | 4716 RTE_MEMZONE_SIZE_HINT_ONLY | 4717 RTE_MEMZONE_IOVA_CONTIG, 4718 BNXT_PAGE_SIZE); 4719 if (mz == NULL) 4720 return -ENOMEM; 4721 } 4722 4723 memset(mz->addr, 0, mz->len); 4724 mz_phys_addr = mz->iova; 4725 4726 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4727 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4728 rmem->dma_arr[i] = mz_phys_addr + sz; 4729 4730 if (rmem->nr_pages > 1) { 4731 if (i == rmem->nr_pages - 2 && 4732 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4733 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4734 else if (i == rmem->nr_pages - 1 && 4735 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4736 valid_bits |= PTU_PTE_LAST; 4737 4738 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4739 valid_bits); 4740 } 4741 } 4742 4743 rmem->mz = mz; 4744 if (rmem->vmem_size) 4745 rmem->vmem = (void **)mz->addr; 4746 rmem->dma_arr[0] = mz_phys_addr; 4747 return 0; 4748 } 4749 4750 static void bnxt_free_ctx_mem(struct bnxt *bp) 4751 { 4752 int i; 4753 4754 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4755 return; 4756 4757 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4758 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4759 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4760 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4761 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4762 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4763 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4764 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4765 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4766 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4767 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4768 4769 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4770 if (bp->ctx->tqm_mem[i]) 4771 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4772 } 4773 4774 rte_free(bp->ctx); 4775 bp->ctx = NULL; 4776 } 4777 4778 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4779 4780 #define min_t(type, x, y) ({ \ 4781 type __min1 = (x); \ 4782 type __min2 = (y); \ 4783 __min1 < __min2 ? __min1 : __min2; }) 4784 4785 #define max_t(type, x, y) ({ \ 4786 type __max1 = (x); \ 4787 type __max2 = (y); \ 4788 __max1 > __max2 ? __max1 : __max2; }) 4789 4790 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4791 4792 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4793 { 4794 struct bnxt_ctx_pg_info *ctx_pg; 4795 struct bnxt_ctx_mem_info *ctx; 4796 uint32_t mem_size, ena, entries; 4797 uint32_t entries_sp, min; 4798 int i, rc; 4799 4800 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4801 if (rc) { 4802 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4803 return rc; 4804 } 4805 ctx = bp->ctx; 4806 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4807 return 0; 4808 4809 ctx_pg = &ctx->qp_mem; 4810 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4811 if (ctx->qp_entry_size) { 4812 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4813 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4814 if (rc) 4815 return rc; 4816 } 4817 4818 ctx_pg = &ctx->srq_mem; 4819 ctx_pg->entries = ctx->srq_max_l2_entries; 4820 if (ctx->srq_entry_size) { 4821 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4822 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4823 if (rc) 4824 return rc; 4825 } 4826 4827 ctx_pg = &ctx->cq_mem; 4828 ctx_pg->entries = ctx->cq_max_l2_entries; 4829 if (ctx->cq_entry_size) { 4830 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4831 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4832 if (rc) 4833 return rc; 4834 } 4835 4836 ctx_pg = &ctx->vnic_mem; 4837 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4838 ctx->vnic_max_ring_table_entries; 4839 if (ctx->vnic_entry_size) { 4840 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4841 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4842 if (rc) 4843 return rc; 4844 } 4845 4846 ctx_pg = &ctx->stat_mem; 4847 ctx_pg->entries = ctx->stat_max_entries; 4848 if (ctx->stat_entry_size) { 4849 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4850 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4851 if (rc) 4852 return rc; 4853 } 4854 4855 min = ctx->tqm_min_entries_per_ring; 4856 4857 entries_sp = ctx->qp_max_l2_entries + 4858 ctx->vnic_max_vnic_entries + 4859 2 * ctx->qp_min_qp1_entries + min; 4860 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4861 4862 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4863 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4864 entries = clamp_t(uint32_t, entries, min, 4865 ctx->tqm_max_entries_per_ring); 4866 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4867 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4868 * i > 8 is other ext rings. 4869 */ 4870 ctx_pg = ctx->tqm_mem[i]; 4871 ctx_pg->entries = i ? entries : entries_sp; 4872 if (ctx->tqm_entry_size) { 4873 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4874 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4875 "tqm_mem", i); 4876 if (rc) 4877 return rc; 4878 } 4879 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4880 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4881 else 4882 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4883 } 4884 4885 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4886 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4887 if (rc) 4888 PMD_DRV_LOG(ERR, 4889 "Failed to configure context mem: rc = %d\n", rc); 4890 else 4891 ctx->flags |= BNXT_CTX_FLAG_INITED; 4892 4893 return rc; 4894 } 4895 4896 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4897 { 4898 struct rte_pci_device *pci_dev = bp->pdev; 4899 char mz_name[RTE_MEMZONE_NAMESIZE]; 4900 const struct rte_memzone *mz = NULL; 4901 uint32_t total_alloc_len; 4902 rte_iova_t mz_phys_addr; 4903 4904 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4905 return 0; 4906 4907 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4908 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4909 pci_dev->addr.bus, pci_dev->addr.devid, 4910 pci_dev->addr.function, "rx_port_stats"); 4911 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4912 mz = rte_memzone_lookup(mz_name); 4913 total_alloc_len = 4914 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4915 sizeof(struct rx_port_stats_ext) + 512); 4916 if (!mz) { 4917 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4918 SOCKET_ID_ANY, 4919 RTE_MEMZONE_2MB | 4920 RTE_MEMZONE_SIZE_HINT_ONLY | 4921 RTE_MEMZONE_IOVA_CONTIG); 4922 if (mz == NULL) 4923 return -ENOMEM; 4924 } 4925 memset(mz->addr, 0, mz->len); 4926 mz_phys_addr = mz->iova; 4927 4928 bp->rx_mem_zone = (const void *)mz; 4929 bp->hw_rx_port_stats = mz->addr; 4930 bp->hw_rx_port_stats_map = mz_phys_addr; 4931 4932 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4933 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4934 pci_dev->addr.bus, pci_dev->addr.devid, 4935 pci_dev->addr.function, "tx_port_stats"); 4936 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4937 mz = rte_memzone_lookup(mz_name); 4938 total_alloc_len = 4939 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4940 sizeof(struct tx_port_stats_ext) + 512); 4941 if (!mz) { 4942 mz = rte_memzone_reserve(mz_name, 4943 total_alloc_len, 4944 SOCKET_ID_ANY, 4945 RTE_MEMZONE_2MB | 4946 RTE_MEMZONE_SIZE_HINT_ONLY | 4947 RTE_MEMZONE_IOVA_CONTIG); 4948 if (mz == NULL) 4949 return -ENOMEM; 4950 } 4951 memset(mz->addr, 0, mz->len); 4952 mz_phys_addr = mz->iova; 4953 4954 bp->tx_mem_zone = (const void *)mz; 4955 bp->hw_tx_port_stats = mz->addr; 4956 bp->hw_tx_port_stats_map = mz_phys_addr; 4957 bp->flags |= BNXT_FLAG_PORT_STATS; 4958 4959 /* Display extended statistics if FW supports it */ 4960 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4961 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4962 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4963 return 0; 4964 4965 bp->hw_rx_port_stats_ext = (void *) 4966 ((uint8_t *)bp->hw_rx_port_stats + 4967 sizeof(struct rx_port_stats)); 4968 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4969 sizeof(struct rx_port_stats); 4970 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4971 4972 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4973 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4974 bp->hw_tx_port_stats_ext = (void *) 4975 ((uint8_t *)bp->hw_tx_port_stats + 4976 sizeof(struct tx_port_stats)); 4977 bp->hw_tx_port_stats_ext_map = 4978 bp->hw_tx_port_stats_map + 4979 sizeof(struct tx_port_stats); 4980 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4981 } 4982 4983 return 0; 4984 } 4985 4986 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 4987 { 4988 struct bnxt *bp = eth_dev->data->dev_private; 4989 int rc = 0; 4990 4991 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 4992 RTE_ETHER_ADDR_LEN * 4993 bp->max_l2_ctx, 4994 0); 4995 if (eth_dev->data->mac_addrs == NULL) { 4996 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 4997 return -ENOMEM; 4998 } 4999 5000 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5001 if (BNXT_PF(bp)) 5002 return -EINVAL; 5003 5004 /* Generate a random MAC address, if none was assigned by PF */ 5005 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5006 bnxt_eth_hw_addr_random(bp->mac_addr); 5007 PMD_DRV_LOG(INFO, 5008 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5009 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5010 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5011 5012 rc = bnxt_hwrm_set_mac(bp); 5013 if (rc) 5014 return rc; 5015 } 5016 5017 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5018 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5019 5020 return rc; 5021 } 5022 5023 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5024 { 5025 int rc = 0; 5026 5027 /* MAC is already configured in FW */ 5028 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5029 return 0; 5030 5031 /* Restore the old MAC configured */ 5032 rc = bnxt_hwrm_set_mac(bp); 5033 if (rc) 5034 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5035 5036 return rc; 5037 } 5038 5039 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5040 { 5041 if (!BNXT_PF(bp)) 5042 return; 5043 5044 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5045 5046 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5047 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5048 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5049 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5050 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5051 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5052 } 5053 5054 struct bnxt * 5055 bnxt_get_bp(uint16_t port) 5056 { 5057 struct bnxt *bp; 5058 struct rte_eth_dev *dev; 5059 5060 if (!rte_eth_dev_is_valid_port(port)) { 5061 PMD_DRV_LOG(ERR, "Invalid port %d\n", port); 5062 return NULL; 5063 } 5064 5065 dev = &rte_eth_devices[port]; 5066 if (!is_bnxt_supported(dev)) { 5067 PMD_DRV_LOG(ERR, "Device %d not supported\n", port); 5068 return NULL; 5069 } 5070 5071 bp = (struct bnxt *)dev->data->dev_private; 5072 if (!BNXT_TRUFLOW_EN(bp)) { 5073 PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n"); 5074 return NULL; 5075 } 5076 5077 return bp; 5078 } 5079 5080 uint16_t 5081 bnxt_get_svif(uint16_t port_id, bool func_svif, 5082 enum bnxt_ulp_intf_type type) 5083 { 5084 struct rte_eth_dev *eth_dev; 5085 struct bnxt *bp; 5086 5087 eth_dev = &rte_eth_devices[port_id]; 5088 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5089 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5090 if (!vfr) 5091 return 0; 5092 5093 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5094 return vfr->svif; 5095 5096 eth_dev = vfr->parent_dev; 5097 } 5098 5099 bp = eth_dev->data->dev_private; 5100 5101 return func_svif ? bp->func_svif : bp->port_svif; 5102 } 5103 5104 void 5105 bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type, 5106 uint8_t *mac, uint8_t *parent_mac) 5107 { 5108 struct rte_eth_dev *eth_dev; 5109 struct bnxt *bp; 5110 5111 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF && 5112 type != BNXT_ULP_INTF_TYPE_PF) 5113 return; 5114 5115 eth_dev = &rte_eth_devices[port]; 5116 bp = eth_dev->data->dev_private; 5117 memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN); 5118 5119 if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5120 memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN); 5121 } 5122 5123 uint16_t 5124 bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5125 { 5126 struct rte_eth_dev *eth_dev; 5127 struct bnxt *bp; 5128 5129 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5130 return 0; 5131 5132 eth_dev = &rte_eth_devices[port]; 5133 bp = eth_dev->data->dev_private; 5134 5135 return bp->parent->vnic; 5136 } 5137 uint16_t 5138 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5139 { 5140 struct rte_eth_dev *eth_dev; 5141 struct bnxt_vnic_info *vnic; 5142 struct bnxt *bp; 5143 5144 eth_dev = &rte_eth_devices[port]; 5145 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5146 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5147 if (!vfr) 5148 return 0; 5149 5150 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5151 return vfr->dflt_vnic_id; 5152 5153 eth_dev = vfr->parent_dev; 5154 } 5155 5156 bp = eth_dev->data->dev_private; 5157 5158 vnic = BNXT_GET_DEFAULT_VNIC(bp); 5159 5160 return vnic->fw_vnic_id; 5161 } 5162 5163 uint16_t 5164 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 5165 { 5166 struct rte_eth_dev *eth_dev; 5167 struct bnxt *bp; 5168 5169 eth_dev = &rte_eth_devices[port]; 5170 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5171 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5172 if (!vfr) 5173 return 0; 5174 5175 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5176 return vfr->fw_fid; 5177 5178 eth_dev = vfr->parent_dev; 5179 } 5180 5181 bp = eth_dev->data->dev_private; 5182 5183 return bp->fw_fid; 5184 } 5185 5186 enum bnxt_ulp_intf_type 5187 bnxt_get_interface_type(uint16_t port) 5188 { 5189 struct rte_eth_dev *eth_dev; 5190 struct bnxt *bp; 5191 5192 eth_dev = &rte_eth_devices[port]; 5193 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 5194 return BNXT_ULP_INTF_TYPE_VF_REP; 5195 5196 bp = eth_dev->data->dev_private; 5197 if (BNXT_PF(bp)) 5198 return BNXT_ULP_INTF_TYPE_PF; 5199 else if (BNXT_VF_IS_TRUSTED(bp)) 5200 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 5201 else if (BNXT_VF(bp)) 5202 return BNXT_ULP_INTF_TYPE_VF; 5203 5204 return BNXT_ULP_INTF_TYPE_INVALID; 5205 } 5206 5207 uint16_t 5208 bnxt_get_phy_port_id(uint16_t port_id) 5209 { 5210 struct bnxt_representor *vfr; 5211 struct rte_eth_dev *eth_dev; 5212 struct bnxt *bp; 5213 5214 eth_dev = &rte_eth_devices[port_id]; 5215 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5216 vfr = eth_dev->data->dev_private; 5217 if (!vfr) 5218 return 0; 5219 5220 eth_dev = vfr->parent_dev; 5221 } 5222 5223 bp = eth_dev->data->dev_private; 5224 5225 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 5226 } 5227 5228 uint16_t 5229 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 5230 { 5231 struct rte_eth_dev *eth_dev; 5232 struct bnxt *bp; 5233 5234 eth_dev = &rte_eth_devices[port_id]; 5235 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5236 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5237 if (!vfr) 5238 return 0; 5239 5240 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5241 return vfr->fw_fid - 1; 5242 5243 eth_dev = vfr->parent_dev; 5244 } 5245 5246 bp = eth_dev->data->dev_private; 5247 5248 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 5249 } 5250 5251 uint16_t 5252 bnxt_get_vport(uint16_t port_id) 5253 { 5254 return (1 << bnxt_get_phy_port_id(port_id)); 5255 } 5256 5257 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5258 { 5259 struct bnxt_error_recovery_info *info = bp->recovery_info; 5260 5261 if (info) { 5262 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5263 memset(info, 0, sizeof(*info)); 5264 return; 5265 } 5266 5267 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5268 return; 5269 5270 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5271 sizeof(*info), 0); 5272 if (!info) 5273 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5274 5275 bp->recovery_info = info; 5276 } 5277 5278 static void bnxt_check_fw_status(struct bnxt *bp) 5279 { 5280 uint32_t fw_status; 5281 5282 if (!(bp->recovery_info && 5283 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5284 return; 5285 5286 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5287 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5288 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5289 fw_status); 5290 } 5291 5292 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5293 { 5294 struct bnxt_error_recovery_info *info = bp->recovery_info; 5295 uint32_t status_loc; 5296 uint32_t sig_ver; 5297 5298 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5299 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5300 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5301 BNXT_GRCP_WINDOW_2_BASE + 5302 offsetof(struct hcomm_status, 5303 sig_ver))); 5304 /* If the signature is absent, then FW does not support this feature */ 5305 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5306 HCOMM_STATUS_SIGNATURE_VAL) 5307 return 0; 5308 5309 if (!info) { 5310 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5311 sizeof(*info), 0); 5312 if (!info) 5313 return -ENOMEM; 5314 bp->recovery_info = info; 5315 } else { 5316 memset(info, 0, sizeof(*info)); 5317 } 5318 5319 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5320 BNXT_GRCP_WINDOW_2_BASE + 5321 offsetof(struct hcomm_status, 5322 fw_status_loc))); 5323 5324 /* Only pre-map the FW health status GRC register */ 5325 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5326 return 0; 5327 5328 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5329 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5330 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5331 5332 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5333 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5334 5335 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5336 5337 return 0; 5338 } 5339 5340 /* This function gets the FW version along with the 5341 * capabilities(MAX and current) of the function, vnic, 5342 * error recovery, phy and other chip related info 5343 */ 5344 static int bnxt_get_config(struct bnxt *bp) 5345 { 5346 uint16_t mtu; 5347 int rc = 0; 5348 5349 bp->fw_cap = 0; 5350 5351 rc = bnxt_map_hcomm_fw_status_reg(bp); 5352 if (rc) 5353 return rc; 5354 5355 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5356 if (rc) { 5357 bnxt_check_fw_status(bp); 5358 return rc; 5359 } 5360 5361 rc = bnxt_hwrm_func_reset(bp); 5362 if (rc) 5363 return -EIO; 5364 5365 rc = bnxt_hwrm_vnic_qcaps(bp); 5366 if (rc) 5367 return rc; 5368 5369 rc = bnxt_hwrm_queue_qportcfg(bp); 5370 if (rc) 5371 return rc; 5372 5373 /* Get the MAX capabilities for this function. 5374 * This function also allocates context memory for TQM rings and 5375 * informs the firmware about this allocated backing store memory. 5376 */ 5377 rc = bnxt_hwrm_func_qcaps(bp); 5378 if (rc) 5379 return rc; 5380 5381 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5382 if (rc) 5383 return rc; 5384 5385 rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); 5386 if (rc) 5387 return rc; 5388 5389 bnxt_hwrm_port_mac_qcfg(bp); 5390 5391 bnxt_hwrm_parent_pf_qcfg(bp); 5392 5393 bnxt_hwrm_port_phy_qcaps(bp); 5394 5395 bnxt_alloc_error_recovery_info(bp); 5396 /* Get the adapter error recovery support info */ 5397 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5398 if (rc) 5399 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5400 5401 bnxt_hwrm_port_led_qcaps(bp); 5402 5403 return 0; 5404 } 5405 5406 static int 5407 bnxt_init_locks(struct bnxt *bp) 5408 { 5409 int err; 5410 5411 err = pthread_mutex_init(&bp->flow_lock, NULL); 5412 if (err) { 5413 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5414 return err; 5415 } 5416 5417 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5418 if (err) { 5419 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5420 return err; 5421 } 5422 5423 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5424 if (err) { 5425 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5426 return err; 5427 } 5428 5429 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5430 if (err) 5431 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5432 5433 return err; 5434 } 5435 5436 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5437 { 5438 int rc = 0; 5439 5440 rc = bnxt_get_config(bp); 5441 if (rc) 5442 return rc; 5443 5444 if (!reconfig_dev) { 5445 rc = bnxt_setup_mac_addr(bp->eth_dev); 5446 if (rc) 5447 return rc; 5448 } else { 5449 rc = bnxt_restore_dflt_mac(bp); 5450 if (rc) 5451 return rc; 5452 } 5453 5454 bnxt_config_vf_req_fwd(bp); 5455 5456 rc = bnxt_hwrm_func_driver_register(bp); 5457 if (rc) { 5458 PMD_DRV_LOG(ERR, "Failed to register driver"); 5459 return -EBUSY; 5460 } 5461 5462 if (BNXT_PF(bp)) { 5463 if (bp->pdev->max_vfs) { 5464 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5465 if (rc) { 5466 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5467 return rc; 5468 } 5469 } else { 5470 rc = bnxt_hwrm_allocate_pf_only(bp); 5471 if (rc) { 5472 PMD_DRV_LOG(ERR, 5473 "Failed to allocate PF resources"); 5474 return rc; 5475 } 5476 } 5477 } 5478 5479 rc = bnxt_alloc_mem(bp, reconfig_dev); 5480 if (rc) 5481 return rc; 5482 5483 rc = bnxt_setup_int(bp); 5484 if (rc) 5485 return rc; 5486 5487 rc = bnxt_request_int(bp); 5488 if (rc) 5489 return rc; 5490 5491 rc = bnxt_init_ctx_mem(bp); 5492 if (rc) { 5493 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5494 return rc; 5495 } 5496 5497 return 0; 5498 } 5499 5500 static int 5501 bnxt_parse_devarg_accum_stats(__rte_unused const char *key, 5502 const char *value, void *opaque_arg) 5503 { 5504 struct bnxt *bp = opaque_arg; 5505 unsigned long accum_stats; 5506 char *end = NULL; 5507 5508 if (!value || !opaque_arg) { 5509 PMD_DRV_LOG(ERR, 5510 "Invalid parameter passed to accum-stats devargs.\n"); 5511 return -EINVAL; 5512 } 5513 5514 accum_stats = strtoul(value, &end, 10); 5515 if (end == NULL || *end != '\0' || 5516 (accum_stats == ULONG_MAX && errno == ERANGE)) { 5517 PMD_DRV_LOG(ERR, 5518 "Invalid parameter passed to accum-stats devargs.\n"); 5519 return -EINVAL; 5520 } 5521 5522 if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) { 5523 PMD_DRV_LOG(ERR, 5524 "Invalid value passed to accum-stats devargs.\n"); 5525 return -EINVAL; 5526 } 5527 5528 if (accum_stats) { 5529 bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN; 5530 PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n"); 5531 } else { 5532 bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN; 5533 PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n"); 5534 } 5535 5536 return 0; 5537 } 5538 5539 static int 5540 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5541 const char *value, void *opaque_arg) 5542 { 5543 struct bnxt *bp = opaque_arg; 5544 unsigned long flow_xstat; 5545 char *end = NULL; 5546 5547 if (!value || !opaque_arg) { 5548 PMD_DRV_LOG(ERR, 5549 "Invalid parameter passed to flow_xstat devarg.\n"); 5550 return -EINVAL; 5551 } 5552 5553 flow_xstat = strtoul(value, &end, 10); 5554 if (end == NULL || *end != '\0' || 5555 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5556 PMD_DRV_LOG(ERR, 5557 "Invalid parameter passed to flow_xstat devarg.\n"); 5558 return -EINVAL; 5559 } 5560 5561 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5562 PMD_DRV_LOG(ERR, 5563 "Invalid value passed to flow_xstat devarg.\n"); 5564 return -EINVAL; 5565 } 5566 5567 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5568 if (BNXT_FLOW_XSTATS_EN(bp)) 5569 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5570 5571 return 0; 5572 } 5573 5574 static int 5575 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5576 const char *value, void *opaque_arg) 5577 { 5578 struct bnxt *bp = opaque_arg; 5579 unsigned long max_num_kflows; 5580 char *end = NULL; 5581 5582 if (!value || !opaque_arg) { 5583 PMD_DRV_LOG(ERR, 5584 "Invalid parameter passed to max_num_kflows devarg.\n"); 5585 return -EINVAL; 5586 } 5587 5588 max_num_kflows = strtoul(value, &end, 10); 5589 if (end == NULL || *end != '\0' || 5590 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5591 PMD_DRV_LOG(ERR, 5592 "Invalid parameter passed to max_num_kflows devarg.\n"); 5593 return -EINVAL; 5594 } 5595 5596 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5597 PMD_DRV_LOG(ERR, 5598 "Invalid value passed to max_num_kflows devarg.\n"); 5599 return -EINVAL; 5600 } 5601 5602 bp->max_num_kflows = max_num_kflows; 5603 if (bp->max_num_kflows) 5604 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5605 max_num_kflows); 5606 5607 return 0; 5608 } 5609 5610 static int 5611 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5612 const char *value, void *opaque_arg) 5613 { 5614 struct bnxt *bp = opaque_arg; 5615 unsigned long app_id; 5616 char *end = NULL; 5617 5618 if (!value || !opaque_arg) { 5619 PMD_DRV_LOG(ERR, 5620 "Invalid parameter passed to app-id " 5621 "devargs.\n"); 5622 return -EINVAL; 5623 } 5624 5625 app_id = strtoul(value, &end, 10); 5626 if (end == NULL || *end != '\0' || 5627 (app_id == ULONG_MAX && errno == ERANGE)) { 5628 PMD_DRV_LOG(ERR, 5629 "Invalid parameter passed to app_id " 5630 "devargs.\n"); 5631 return -EINVAL; 5632 } 5633 5634 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5635 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5636 (uint16_t)app_id); 5637 return -EINVAL; 5638 } 5639 5640 bp->app_id = app_id; 5641 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5642 5643 return 0; 5644 } 5645 5646 static int 5647 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5648 const char *value, void *opaque_arg) 5649 { 5650 struct bnxt_representor *vfr_bp = opaque_arg; 5651 unsigned long rep_is_pf; 5652 char *end = NULL; 5653 5654 if (!value || !opaque_arg) { 5655 PMD_DRV_LOG(ERR, 5656 "Invalid parameter passed to rep_is_pf devargs.\n"); 5657 return -EINVAL; 5658 } 5659 5660 rep_is_pf = strtoul(value, &end, 10); 5661 if (end == NULL || *end != '\0' || 5662 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5663 PMD_DRV_LOG(ERR, 5664 "Invalid parameter passed to rep_is_pf devargs.\n"); 5665 return -EINVAL; 5666 } 5667 5668 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5669 PMD_DRV_LOG(ERR, 5670 "Invalid value passed to rep_is_pf devargs.\n"); 5671 return -EINVAL; 5672 } 5673 5674 vfr_bp->flags |= rep_is_pf; 5675 if (BNXT_REP_PF(vfr_bp)) 5676 PMD_DRV_LOG(INFO, "PF representor\n"); 5677 else 5678 PMD_DRV_LOG(INFO, "VF representor\n"); 5679 5680 return 0; 5681 } 5682 5683 static int 5684 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5685 const char *value, void *opaque_arg) 5686 { 5687 struct bnxt_representor *vfr_bp = opaque_arg; 5688 unsigned long rep_based_pf; 5689 char *end = NULL; 5690 5691 if (!value || !opaque_arg) { 5692 PMD_DRV_LOG(ERR, 5693 "Invalid parameter passed to rep_based_pf " 5694 "devargs.\n"); 5695 return -EINVAL; 5696 } 5697 5698 rep_based_pf = strtoul(value, &end, 10); 5699 if (end == NULL || *end != '\0' || 5700 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5701 PMD_DRV_LOG(ERR, 5702 "Invalid parameter passed to rep_based_pf " 5703 "devargs.\n"); 5704 return -EINVAL; 5705 } 5706 5707 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5708 PMD_DRV_LOG(ERR, 5709 "Invalid value passed to rep_based_pf devargs.\n"); 5710 return -EINVAL; 5711 } 5712 5713 vfr_bp->rep_based_pf = rep_based_pf; 5714 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5715 5716 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5717 5718 return 0; 5719 } 5720 5721 static int 5722 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5723 const char *value, void *opaque_arg) 5724 { 5725 struct bnxt_representor *vfr_bp = opaque_arg; 5726 unsigned long rep_q_r2f; 5727 char *end = NULL; 5728 5729 if (!value || !opaque_arg) { 5730 PMD_DRV_LOG(ERR, 5731 "Invalid parameter passed to rep_q_r2f " 5732 "devargs.\n"); 5733 return -EINVAL; 5734 } 5735 5736 rep_q_r2f = strtoul(value, &end, 10); 5737 if (end == NULL || *end != '\0' || 5738 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5739 PMD_DRV_LOG(ERR, 5740 "Invalid parameter passed to rep_q_r2f " 5741 "devargs.\n"); 5742 return -EINVAL; 5743 } 5744 5745 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5746 PMD_DRV_LOG(ERR, 5747 "Invalid value passed to rep_q_r2f devargs.\n"); 5748 return -EINVAL; 5749 } 5750 5751 vfr_bp->rep_q_r2f = rep_q_r2f; 5752 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5753 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5754 5755 return 0; 5756 } 5757 5758 static int 5759 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5760 const char *value, void *opaque_arg) 5761 { 5762 struct bnxt_representor *vfr_bp = opaque_arg; 5763 unsigned long rep_q_f2r; 5764 char *end = NULL; 5765 5766 if (!value || !opaque_arg) { 5767 PMD_DRV_LOG(ERR, 5768 "Invalid parameter passed to rep_q_f2r " 5769 "devargs.\n"); 5770 return -EINVAL; 5771 } 5772 5773 rep_q_f2r = strtoul(value, &end, 10); 5774 if (end == NULL || *end != '\0' || 5775 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5776 PMD_DRV_LOG(ERR, 5777 "Invalid parameter passed to rep_q_f2r " 5778 "devargs.\n"); 5779 return -EINVAL; 5780 } 5781 5782 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5783 PMD_DRV_LOG(ERR, 5784 "Invalid value passed to rep_q_f2r devargs.\n"); 5785 return -EINVAL; 5786 } 5787 5788 vfr_bp->rep_q_f2r = rep_q_f2r; 5789 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5790 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5791 5792 return 0; 5793 } 5794 5795 static int 5796 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5797 const char *value, void *opaque_arg) 5798 { 5799 struct bnxt_representor *vfr_bp = opaque_arg; 5800 unsigned long rep_fc_r2f; 5801 char *end = NULL; 5802 5803 if (!value || !opaque_arg) { 5804 PMD_DRV_LOG(ERR, 5805 "Invalid parameter passed to rep_fc_r2f " 5806 "devargs.\n"); 5807 return -EINVAL; 5808 } 5809 5810 rep_fc_r2f = strtoul(value, &end, 10); 5811 if (end == NULL || *end != '\0' || 5812 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5813 PMD_DRV_LOG(ERR, 5814 "Invalid parameter passed to rep_fc_r2f " 5815 "devargs.\n"); 5816 return -EINVAL; 5817 } 5818 5819 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5820 PMD_DRV_LOG(ERR, 5821 "Invalid value passed to rep_fc_r2f devargs.\n"); 5822 return -EINVAL; 5823 } 5824 5825 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5826 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5827 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5828 5829 return 0; 5830 } 5831 5832 static int 5833 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5834 const char *value, void *opaque_arg) 5835 { 5836 struct bnxt_representor *vfr_bp = opaque_arg; 5837 unsigned long rep_fc_f2r; 5838 char *end = NULL; 5839 5840 if (!value || !opaque_arg) { 5841 PMD_DRV_LOG(ERR, 5842 "Invalid parameter passed to rep_fc_f2r " 5843 "devargs.\n"); 5844 return -EINVAL; 5845 } 5846 5847 rep_fc_f2r = strtoul(value, &end, 10); 5848 if (end == NULL || *end != '\0' || 5849 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5850 PMD_DRV_LOG(ERR, 5851 "Invalid parameter passed to rep_fc_f2r " 5852 "devargs.\n"); 5853 return -EINVAL; 5854 } 5855 5856 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5857 PMD_DRV_LOG(ERR, 5858 "Invalid value passed to rep_fc_f2r devargs.\n"); 5859 return -EINVAL; 5860 } 5861 5862 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5863 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5864 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5865 5866 return 0; 5867 } 5868 5869 static int 5870 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5871 { 5872 struct rte_kvargs *kvlist; 5873 int ret; 5874 5875 if (devargs == NULL) 5876 return 0; 5877 5878 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5879 if (kvlist == NULL) 5880 return -EINVAL; 5881 5882 /* 5883 * Handler for "flow_xstat" devarg. 5884 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5885 */ 5886 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5887 bnxt_parse_devarg_flow_xstat, bp); 5888 if (ret) 5889 goto err; 5890 5891 /* 5892 * Handler for "accum-stats" devarg. 5893 * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1" 5894 */ 5895 rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS, 5896 bnxt_parse_devarg_accum_stats, bp); 5897 /* 5898 * Handler for "max_num_kflows" devarg. 5899 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5900 */ 5901 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5902 bnxt_parse_devarg_max_num_kflows, bp); 5903 if (ret) 5904 goto err; 5905 5906 err: 5907 /* 5908 * Handler for "app-id" devarg. 5909 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5910 */ 5911 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5912 bnxt_parse_devarg_app_id, bp); 5913 5914 rte_kvargs_free(kvlist); 5915 return ret; 5916 } 5917 5918 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5919 { 5920 int rc = 0; 5921 5922 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5923 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5924 if (rc) 5925 PMD_DRV_LOG(ERR, 5926 "Failed to alloc switch domain: %d\n", rc); 5927 else 5928 PMD_DRV_LOG(INFO, 5929 "Switch domain allocated %d\n", 5930 bp->switch_domain_id); 5931 } 5932 5933 return rc; 5934 } 5935 5936 /* Allocate and initialize various fields in bnxt struct that 5937 * need to be allocated/destroyed only once in the lifetime of the driver 5938 */ 5939 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5940 { 5941 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5942 struct bnxt *bp = eth_dev->data->dev_private; 5943 int rc = 0; 5944 5945 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5946 5947 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5948 bp->flags |= BNXT_FLAG_VF; 5949 5950 if (bnxt_p5_device(pci_dev->id.device_id)) 5951 bp->flags |= BNXT_FLAG_CHIP_P5; 5952 5953 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5954 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5955 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5956 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5957 bp->flags |= BNXT_FLAG_STINGRAY; 5958 5959 if (BNXT_TRUFLOW_EN(bp)) { 5960 /* extra mbuf field is required to store CFA code from mark */ 5961 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5962 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5963 .size = sizeof(bnxt_cfa_code_dynfield_t), 5964 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5965 }; 5966 bnxt_cfa_code_dynfield_offset = 5967 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5968 if (bnxt_cfa_code_dynfield_offset < 0) { 5969 PMD_DRV_LOG(ERR, 5970 "Failed to register mbuf field for TruFlow mark\n"); 5971 return -rte_errno; 5972 } 5973 } 5974 5975 rc = bnxt_map_pci_bars(eth_dev); 5976 if (rc) { 5977 PMD_DRV_LOG(ERR, 5978 "Failed to initialize board rc: %x\n", rc); 5979 return rc; 5980 } 5981 5982 rc = bnxt_alloc_pf_info(bp); 5983 if (rc) 5984 return rc; 5985 5986 rc = bnxt_alloc_link_info(bp); 5987 if (rc) 5988 return rc; 5989 5990 rc = bnxt_alloc_parent_info(bp); 5991 if (rc) 5992 return rc; 5993 5994 rc = bnxt_alloc_hwrm_resources(bp); 5995 if (rc) { 5996 PMD_DRV_LOG(ERR, 5997 "Failed to allocate response buffer rc: %x\n", rc); 5998 return rc; 5999 } 6000 rc = bnxt_alloc_leds_info(bp); 6001 if (rc) 6002 return rc; 6003 6004 rc = bnxt_alloc_cos_queues(bp); 6005 if (rc) 6006 return rc; 6007 6008 rc = bnxt_init_locks(bp); 6009 if (rc) 6010 return rc; 6011 6012 rc = bnxt_alloc_switch_domain(bp); 6013 if (rc) 6014 return rc; 6015 6016 return rc; 6017 } 6018 6019 static int 6020 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 6021 { 6022 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 6023 static int version_printed; 6024 struct bnxt *bp; 6025 int rc; 6026 6027 if (version_printed++ == 0) 6028 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 6029 6030 eth_dev->dev_ops = &bnxt_dev_ops; 6031 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 6032 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 6033 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 6034 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 6035 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 6036 6037 /* 6038 * For secondary processes, we don't initialise any further 6039 * as primary has already done this work. 6040 */ 6041 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6042 return 0; 6043 6044 rte_eth_copy_pci_info(eth_dev, pci_dev); 6045 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6046 6047 bp = eth_dev->data->dev_private; 6048 6049 /* Parse dev arguments passed on when starting the DPDK application. */ 6050 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 6051 if (rc) 6052 goto error_free; 6053 6054 rc = bnxt_drv_init(eth_dev); 6055 if (rc) 6056 goto error_free; 6057 6058 rc = bnxt_init_resources(bp, false); 6059 if (rc) 6060 goto error_free; 6061 6062 rc = bnxt_alloc_stats_mem(bp); 6063 if (rc) 6064 goto error_free; 6065 6066 PMD_DRV_LOG(INFO, 6067 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 6068 DRV_MODULE_NAME, 6069 pci_dev->mem_resource[0].phys_addr, 6070 pci_dev->mem_resource[0].addr); 6071 6072 return 0; 6073 6074 error_free: 6075 bnxt_dev_uninit(eth_dev); 6076 return rc; 6077 } 6078 6079 6080 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 6081 { 6082 if (!ctx) 6083 return; 6084 6085 if (ctx->va) 6086 rte_free(ctx->va); 6087 6088 ctx->va = NULL; 6089 ctx->dma = RTE_BAD_IOVA; 6090 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6091 } 6092 6093 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6094 { 6095 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6096 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6097 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6098 bp->flow_stat->max_fc, 6099 false); 6100 6101 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6102 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6103 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6104 bp->flow_stat->max_fc, 6105 false); 6106 6107 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6108 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6109 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6110 6111 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6112 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6113 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6114 6115 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6116 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6117 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6118 6119 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6120 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6121 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6122 } 6123 6124 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6125 { 6126 bnxt_unregister_fc_ctx_mem(bp); 6127 6128 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6129 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6130 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6131 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6132 } 6133 6134 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6135 { 6136 if (BNXT_FLOW_XSTATS_EN(bp)) 6137 bnxt_uninit_fc_ctx_mem(bp); 6138 } 6139 6140 static void 6141 bnxt_free_error_recovery_info(struct bnxt *bp) 6142 { 6143 rte_free(bp->recovery_info); 6144 bp->recovery_info = NULL; 6145 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6146 } 6147 6148 static int 6149 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6150 { 6151 int rc; 6152 6153 bnxt_free_int(bp); 6154 bnxt_free_mem(bp, reconfig_dev); 6155 6156 bnxt_hwrm_func_buf_unrgtr(bp); 6157 if (bp->pf != NULL) { 6158 rte_free(bp->pf->vf_req_buf); 6159 bp->pf->vf_req_buf = NULL; 6160 } 6161 6162 rc = bnxt_hwrm_func_driver_unregister(bp); 6163 bp->flags &= ~BNXT_FLAG_REGISTERED; 6164 bnxt_free_ctx_mem(bp); 6165 if (!reconfig_dev) { 6166 bnxt_free_hwrm_resources(bp); 6167 bnxt_free_error_recovery_info(bp); 6168 } 6169 6170 bnxt_uninit_ctx_mem(bp); 6171 6172 bnxt_free_flow_stats_info(bp); 6173 if (bp->rep_info != NULL) 6174 bnxt_free_switch_domain(bp); 6175 bnxt_free_rep_info(bp); 6176 rte_free(bp->ptp_cfg); 6177 bp->ptp_cfg = NULL; 6178 return rc; 6179 } 6180 6181 static int 6182 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6183 { 6184 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6185 return -EPERM; 6186 6187 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6188 6189 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6190 bnxt_dev_close_op(eth_dev); 6191 6192 return 0; 6193 } 6194 6195 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6196 { 6197 struct bnxt *bp = eth_dev->data->dev_private; 6198 struct rte_eth_dev *vf_rep_eth_dev; 6199 int ret = 0, i; 6200 6201 if (!bp) 6202 return -EINVAL; 6203 6204 for (i = 0; i < bp->num_reps; i++) { 6205 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6206 if (!vf_rep_eth_dev) 6207 continue; 6208 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6209 vf_rep_eth_dev->data->port_id); 6210 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6211 } 6212 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6213 eth_dev->data->port_id); 6214 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6215 6216 return ret; 6217 } 6218 6219 static void bnxt_free_rep_info(struct bnxt *bp) 6220 { 6221 rte_free(bp->rep_info); 6222 bp->rep_info = NULL; 6223 rte_free(bp->cfa_code_map); 6224 bp->cfa_code_map = NULL; 6225 } 6226 6227 static int bnxt_init_rep_info(struct bnxt *bp) 6228 { 6229 int i = 0, rc; 6230 6231 if (bp->rep_info) 6232 return 0; 6233 6234 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6235 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 6236 0); 6237 if (!bp->rep_info) { 6238 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6239 return -ENOMEM; 6240 } 6241 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6242 sizeof(*bp->cfa_code_map) * 6243 BNXT_MAX_CFA_CODE, 0); 6244 if (!bp->cfa_code_map) { 6245 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6246 bnxt_free_rep_info(bp); 6247 return -ENOMEM; 6248 } 6249 6250 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6251 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6252 6253 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6254 if (rc) { 6255 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6256 bnxt_free_rep_info(bp); 6257 return rc; 6258 } 6259 6260 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6261 if (rc) { 6262 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6263 bnxt_free_rep_info(bp); 6264 return rc; 6265 } 6266 6267 return rc; 6268 } 6269 6270 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6271 struct rte_eth_devargs *eth_da, 6272 struct rte_eth_dev *backing_eth_dev, 6273 const char *dev_args) 6274 { 6275 struct rte_eth_dev *vf_rep_eth_dev; 6276 char name[RTE_ETH_NAME_MAX_LEN]; 6277 struct bnxt *backing_bp; 6278 uint16_t num_rep; 6279 int i, ret = 0; 6280 struct rte_kvargs *kvlist = NULL; 6281 6282 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6283 return 0; 6284 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6285 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6286 eth_da->type); 6287 return -ENOTSUP; 6288 } 6289 num_rep = eth_da->nb_representor_ports; 6290 if (num_rep > BNXT_MAX_VF_REPS) { 6291 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6292 num_rep, BNXT_MAX_VF_REPS); 6293 return -EINVAL; 6294 } 6295 6296 if (num_rep >= RTE_MAX_ETHPORTS) { 6297 PMD_DRV_LOG(ERR, 6298 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6299 num_rep, RTE_MAX_ETHPORTS); 6300 return -EINVAL; 6301 } 6302 6303 backing_bp = backing_eth_dev->data->dev_private; 6304 6305 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6306 PMD_DRV_LOG(ERR, 6307 "Not a PF or trusted VF. No Representor support\n"); 6308 /* Returning an error is not an option. 6309 * Applications are not handling this correctly 6310 */ 6311 return 0; 6312 } 6313 6314 if (bnxt_init_rep_info(backing_bp)) 6315 return 0; 6316 6317 for (i = 0; i < num_rep; i++) { 6318 struct bnxt_representor representor = { 6319 .vf_id = eth_da->representor_ports[i], 6320 .switch_domain_id = backing_bp->switch_domain_id, 6321 .parent_dev = backing_eth_dev 6322 }; 6323 6324 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 6325 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6326 representor.vf_id, BNXT_MAX_VF_REPS); 6327 continue; 6328 } 6329 6330 /* representor port net_bdf_port */ 6331 snprintf(name, sizeof(name), "net_%s_representor_%d", 6332 pci_dev->device.name, eth_da->representor_ports[i]); 6333 6334 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6335 if (kvlist) { 6336 /* 6337 * Handler for "rep_is_pf" devarg. 6338 * Invoked as for ex: "-a 000:00:0d.0, 6339 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6340 */ 6341 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6342 bnxt_parse_devarg_rep_is_pf, 6343 (void *)&representor); 6344 if (ret) { 6345 ret = -EINVAL; 6346 goto err; 6347 } 6348 /* 6349 * Handler for "rep_based_pf" devarg. 6350 * Invoked as for ex: "-a 000:00:0d.0, 6351 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6352 */ 6353 ret = rte_kvargs_process(kvlist, 6354 BNXT_DEVARG_REP_BASED_PF, 6355 bnxt_parse_devarg_rep_based_pf, 6356 (void *)&representor); 6357 if (ret) { 6358 ret = -EINVAL; 6359 goto err; 6360 } 6361 /* 6362 * Handler for "rep_based_pf" devarg. 6363 * Invoked as for ex: "-a 000:00:0d.0, 6364 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6365 */ 6366 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6367 bnxt_parse_devarg_rep_q_r2f, 6368 (void *)&representor); 6369 if (ret) { 6370 ret = -EINVAL; 6371 goto err; 6372 } 6373 /* 6374 * Handler for "rep_based_pf" devarg. 6375 * Invoked as for ex: "-a 000:00:0d.0, 6376 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6377 */ 6378 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6379 bnxt_parse_devarg_rep_q_f2r, 6380 (void *)&representor); 6381 if (ret) { 6382 ret = -EINVAL; 6383 goto err; 6384 } 6385 /* 6386 * Handler for "rep_based_pf" devarg. 6387 * Invoked as for ex: "-a 000:00:0d.0, 6388 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6389 */ 6390 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6391 bnxt_parse_devarg_rep_fc_r2f, 6392 (void *)&representor); 6393 if (ret) { 6394 ret = -EINVAL; 6395 goto err; 6396 } 6397 /* 6398 * Handler for "rep_based_pf" devarg. 6399 * Invoked as for ex: "-a 000:00:0d.0, 6400 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6401 */ 6402 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6403 bnxt_parse_devarg_rep_fc_f2r, 6404 (void *)&representor); 6405 if (ret) { 6406 ret = -EINVAL; 6407 goto err; 6408 } 6409 } 6410 6411 ret = rte_eth_dev_create(&pci_dev->device, name, 6412 sizeof(struct bnxt_representor), 6413 NULL, NULL, 6414 bnxt_representor_init, 6415 &representor); 6416 if (ret) { 6417 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6418 "representor %s.", name); 6419 goto err; 6420 } 6421 6422 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6423 if (!vf_rep_eth_dev) { 6424 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6425 " for VF-Rep: %s.", name); 6426 ret = -ENODEV; 6427 goto err; 6428 } 6429 6430 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6431 backing_eth_dev->data->port_id); 6432 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6433 vf_rep_eth_dev; 6434 backing_bp->num_reps++; 6435 6436 } 6437 6438 rte_kvargs_free(kvlist); 6439 return 0; 6440 6441 err: 6442 /* If num_rep > 1, then rollback already created 6443 * ports, since we'll be failing the probe anyway 6444 */ 6445 if (num_rep > 1) 6446 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6447 rte_errno = -ret; 6448 rte_kvargs_free(kvlist); 6449 6450 return ret; 6451 } 6452 6453 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6454 struct rte_pci_device *pci_dev) 6455 { 6456 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6457 struct rte_eth_dev *backing_eth_dev; 6458 uint16_t num_rep; 6459 int ret = 0; 6460 6461 if (pci_dev->device.devargs) { 6462 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6463 ð_da); 6464 if (ret) 6465 return ret; 6466 } 6467 6468 num_rep = eth_da.nb_representor_ports; 6469 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6470 num_rep); 6471 6472 /* We could come here after first level of probe is already invoked 6473 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6474 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6475 */ 6476 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6477 if (backing_eth_dev == NULL) { 6478 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6479 sizeof(struct bnxt), 6480 eth_dev_pci_specific_init, pci_dev, 6481 bnxt_dev_init, NULL); 6482 6483 if (ret || !num_rep) 6484 return ret; 6485 6486 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6487 } 6488 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6489 backing_eth_dev->data->port_id); 6490 6491 if (!num_rep) 6492 return ret; 6493 6494 /* probe representor ports now */ 6495 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6496 pci_dev->device.devargs->args); 6497 6498 return ret; 6499 } 6500 6501 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6502 { 6503 struct rte_eth_dev *eth_dev; 6504 6505 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6506 if (!eth_dev) 6507 return 0; /* Invoked typically only by OVS-DPDK, by the 6508 * time it comes here the eth_dev is already 6509 * deleted by rte_eth_dev_close(), so returning 6510 * +ve value will at least help in proper cleanup 6511 */ 6512 6513 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6514 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6515 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6516 return rte_eth_dev_destroy(eth_dev, 6517 bnxt_representor_uninit); 6518 else 6519 return rte_eth_dev_destroy(eth_dev, 6520 bnxt_dev_uninit); 6521 } else { 6522 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6523 } 6524 } 6525 6526 static struct rte_pci_driver bnxt_rte_pmd = { 6527 .id_table = bnxt_pci_id_map, 6528 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6529 RTE_PCI_DRV_INTR_RMV | 6530 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6531 * and OVS-DPDK 6532 */ 6533 .probe = bnxt_pci_probe, 6534 .remove = bnxt_pci_remove, 6535 }; 6536 6537 static bool 6538 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6539 { 6540 if (strcmp(dev->device->driver->name, drv->driver.name)) 6541 return false; 6542 6543 return true; 6544 } 6545 6546 bool is_bnxt_supported(struct rte_eth_dev *dev) 6547 { 6548 return is_device_supported(dev, &bnxt_rte_pmd); 6549 } 6550 6551 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6552 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6553 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6554 6555