1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2021 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58812) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58814) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58818_VF) }, 87 { .vendor_id = 0, /* sentinel */ }, 88 }; 89 90 #define BNXT_DEVARG_ACCUM_STATS "accum-stats" 91 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 92 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 93 #define BNXT_DEVARG_REPRESENTOR "representor" 94 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 95 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 96 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 97 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 98 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 99 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 100 #define BNXT_DEVARG_APP_ID "app-id" 101 102 static const char *const bnxt_dev_args[] = { 103 BNXT_DEVARG_REPRESENTOR, 104 BNXT_DEVARG_ACCUM_STATS, 105 BNXT_DEVARG_FLOW_XSTAT, 106 BNXT_DEVARG_MAX_NUM_KFLOWS, 107 BNXT_DEVARG_REP_BASED_PF, 108 BNXT_DEVARG_REP_IS_PF, 109 BNXT_DEVARG_REP_Q_R2F, 110 BNXT_DEVARG_REP_Q_F2R, 111 BNXT_DEVARG_REP_FC_R2F, 112 BNXT_DEVARG_REP_FC_F2R, 113 BNXT_DEVARG_APP_ID, 114 NULL 115 }; 116 117 /* 118 * accum-stats == false to disable flow counter accumulation 119 * accum-stats == true to enable flow counter accumulation 120 */ 121 #define BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats) ((accum_stats) > 1) 122 123 /* 124 * app-id = an non-negative 8-bit number 125 */ 126 #define BNXT_DEVARG_APP_ID_INVALID(val) ((val) > 255) 127 128 /* 129 * flow_xstat == false to disable the feature 130 * flow_xstat == true to enable the feature 131 */ 132 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 133 134 /* 135 * rep_is_pf == false to indicate VF representor 136 * rep_is_pf == true to indicate PF representor 137 */ 138 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 139 140 /* 141 * rep_based_pf == Physical index of the PF 142 */ 143 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 144 /* 145 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 146 */ 147 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 148 149 /* 150 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 151 */ 152 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 153 154 /* 155 * rep_fc_r2f == Flow control for the representor to endpoint direction 156 */ 157 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 158 159 /* 160 * rep_fc_f2r == Flow control for the endpoint to representor direction 161 */ 162 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 163 164 int bnxt_cfa_code_dynfield_offset = -1; 165 166 /* 167 * max_num_kflows must be >= 32 168 * and must be a power-of-2 supported value 169 * return: 1 -> invalid 170 * 0 -> valid 171 */ 172 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 173 { 174 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 175 return 1; 176 return 0; 177 } 178 179 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 180 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 181 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 182 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 183 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 184 static int bnxt_restore_vlan_filters(struct bnxt *bp); 185 static void bnxt_dev_recover(void *arg); 186 static void bnxt_free_error_recovery_info(struct bnxt *bp); 187 static void bnxt_free_rep_info(struct bnxt *bp); 188 189 int is_bnxt_in_error(struct bnxt *bp) 190 { 191 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 192 return -EIO; 193 if (bp->flags & BNXT_FLAG_FW_RESET) 194 return -EBUSY; 195 196 return 0; 197 } 198 199 /***********************/ 200 201 /* 202 * High level utility functions 203 */ 204 205 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 206 { 207 unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings, 208 BNXT_RSS_TBL_SIZE_P5); 209 210 if (!BNXT_CHIP_P5(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(num_rss_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_P5) / 215 BNXT_RSS_ENTRIES_PER_CTX_P5; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_P5(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 bp->parent = NULL; 230 } 231 232 static void bnxt_free_pf_info(struct bnxt *bp) 233 { 234 rte_free(bp->pf); 235 bp->pf = NULL; 236 } 237 238 static void bnxt_free_link_info(struct bnxt *bp) 239 { 240 rte_free(bp->link_info); 241 bp->link_info = NULL; 242 } 243 244 static void bnxt_free_leds_info(struct bnxt *bp) 245 { 246 if (BNXT_VF(bp)) 247 return; 248 249 rte_free(bp->leds); 250 bp->leds = NULL; 251 } 252 253 static void bnxt_free_flow_stats_info(struct bnxt *bp) 254 { 255 rte_free(bp->flow_stat); 256 bp->flow_stat = NULL; 257 } 258 259 static void bnxt_free_cos_queues(struct bnxt *bp) 260 { 261 rte_free(bp->rx_cos_queue); 262 bp->rx_cos_queue = NULL; 263 rte_free(bp->tx_cos_queue); 264 bp->tx_cos_queue = NULL; 265 } 266 267 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 268 { 269 bnxt_free_filter_mem(bp); 270 bnxt_free_vnic_attributes(bp); 271 bnxt_free_vnic_mem(bp); 272 273 /* tx/rx rings are configured as part of *_queue_setup callbacks. 274 * If the number of rings change across fw update, 275 * we don't have much choice except to warn the user. 276 */ 277 if (!reconfig) { 278 bnxt_free_stats(bp); 279 bnxt_free_tx_rings(bp); 280 bnxt_free_rx_rings(bp); 281 } 282 bnxt_free_async_cp_ring(bp); 283 bnxt_free_rxtx_nq_ring(bp); 284 285 rte_free(bp->grp_info); 286 bp->grp_info = NULL; 287 } 288 289 static int bnxt_alloc_parent_info(struct bnxt *bp) 290 { 291 bp->parent = rte_zmalloc("bnxt_parent_info", 292 sizeof(struct bnxt_parent_info), 0); 293 if (bp->parent == NULL) 294 return -ENOMEM; 295 296 return 0; 297 } 298 299 static int bnxt_alloc_pf_info(struct bnxt *bp) 300 { 301 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 302 if (bp->pf == NULL) 303 return -ENOMEM; 304 305 return 0; 306 } 307 308 static int bnxt_alloc_link_info(struct bnxt *bp) 309 { 310 bp->link_info = 311 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 312 if (bp->link_info == NULL) 313 return -ENOMEM; 314 315 return 0; 316 } 317 318 static int bnxt_alloc_leds_info(struct bnxt *bp) 319 { 320 if (BNXT_VF(bp)) 321 return 0; 322 323 bp->leds = rte_zmalloc("bnxt_leds", 324 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 325 0); 326 if (bp->leds == NULL) 327 return -ENOMEM; 328 329 return 0; 330 } 331 332 static int bnxt_alloc_cos_queues(struct bnxt *bp) 333 { 334 bp->rx_cos_queue = 335 rte_zmalloc("bnxt_rx_cosq", 336 BNXT_COS_QUEUE_COUNT * 337 sizeof(struct bnxt_cos_queue_info), 338 0); 339 if (bp->rx_cos_queue == NULL) 340 return -ENOMEM; 341 342 bp->tx_cos_queue = 343 rte_zmalloc("bnxt_tx_cosq", 344 BNXT_COS_QUEUE_COUNT * 345 sizeof(struct bnxt_cos_queue_info), 346 0); 347 if (bp->tx_cos_queue == NULL) 348 return -ENOMEM; 349 350 return 0; 351 } 352 353 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 354 { 355 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 356 sizeof(struct bnxt_flow_stat_info), 0); 357 if (bp->flow_stat == NULL) 358 return -ENOMEM; 359 360 return 0; 361 } 362 363 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 364 { 365 int rc; 366 367 rc = bnxt_alloc_ring_grps(bp); 368 if (rc) 369 goto alloc_mem_err; 370 371 rc = bnxt_alloc_async_ring_struct(bp); 372 if (rc) 373 goto alloc_mem_err; 374 375 rc = bnxt_alloc_vnic_mem(bp); 376 if (rc) 377 goto alloc_mem_err; 378 379 rc = bnxt_alloc_vnic_attributes(bp); 380 if (rc) 381 goto alloc_mem_err; 382 383 rc = bnxt_alloc_filter_mem(bp); 384 if (rc) 385 goto alloc_mem_err; 386 387 rc = bnxt_alloc_async_cp_ring(bp); 388 if (rc) 389 goto alloc_mem_err; 390 391 rc = bnxt_alloc_rxtx_nq_ring(bp); 392 if (rc) 393 goto alloc_mem_err; 394 395 if (BNXT_FLOW_XSTATS_EN(bp)) { 396 rc = bnxt_alloc_flow_stats_info(bp); 397 if (rc) 398 goto alloc_mem_err; 399 } 400 401 return 0; 402 403 alloc_mem_err: 404 bnxt_free_mem(bp, reconfig); 405 return rc; 406 } 407 408 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 409 { 410 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 411 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 412 uint64_t rx_offloads = dev_conf->rxmode.offloads; 413 struct bnxt_rx_queue *rxq; 414 unsigned int j; 415 int rc; 416 417 rc = bnxt_vnic_grp_alloc(bp, vnic); 418 if (rc) 419 goto err_out; 420 421 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 422 vnic_id, vnic, vnic->fw_grp_ids); 423 424 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 425 if (rc) 426 goto err_out; 427 428 /* Alloc RSS context only if RSS mode is enabled */ 429 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 430 int j, nr_ctxs = bnxt_rss_ctxts(bp); 431 432 /* RSS table size in Thor is 512. 433 * Cap max Rx rings to same value 434 */ 435 if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) { 436 PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n", 437 bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5); 438 goto err_out; 439 } 440 441 rc = 0; 442 for (j = 0; j < nr_ctxs; j++) { 443 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 444 if (rc) 445 break; 446 } 447 if (rc) { 448 PMD_DRV_LOG(ERR, 449 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 450 vnic_id, j, rc); 451 goto err_out; 452 } 453 vnic->num_lb_ctxts = nr_ctxs; 454 } 455 456 /* 457 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 458 * setting is not available at this time, it will not be 459 * configured correctly in the CFA. 460 */ 461 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 462 vnic->vlan_strip = true; 463 else 464 vnic->vlan_strip = false; 465 466 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 467 if (rc) 468 goto err_out; 469 470 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 471 if (rc) 472 goto err_out; 473 474 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 475 rxq = bp->eth_dev->data->rx_queues[j]; 476 477 PMD_DRV_LOG(DEBUG, 478 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 479 j, rxq->vnic, rxq->vnic->fw_grp_ids); 480 481 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 482 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 483 else 484 vnic->rx_queue_cnt++; 485 486 if (!rxq->rx_deferred_start) { 487 bp->eth_dev->data->rx_queue_state[j] = 488 RTE_ETH_QUEUE_STATE_STARTED; 489 rxq->rx_started = true; 490 } 491 } 492 493 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 494 495 rc = bnxt_vnic_rss_configure(bp, vnic); 496 if (rc) 497 goto err_out; 498 499 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 500 501 rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 502 (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ? 503 true : false); 504 if (rc) 505 goto err_out; 506 507 return 0; 508 err_out: 509 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 510 vnic_id, rc); 511 return rc; 512 } 513 514 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 515 { 516 int rc = 0; 517 518 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 519 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 520 if (rc) 521 return rc; 522 523 PMD_DRV_LOG(DEBUG, 524 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 525 " rx_fc_in_tbl.ctx_id = %d\n", 526 bp->flow_stat->rx_fc_in_tbl.va, 527 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 528 bp->flow_stat->rx_fc_in_tbl.ctx_id); 529 530 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 531 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 532 if (rc) 533 return rc; 534 535 PMD_DRV_LOG(DEBUG, 536 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 537 " rx_fc_out_tbl.ctx_id = %d\n", 538 bp->flow_stat->rx_fc_out_tbl.va, 539 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 540 bp->flow_stat->rx_fc_out_tbl.ctx_id); 541 542 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 543 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 544 if (rc) 545 return rc; 546 547 PMD_DRV_LOG(DEBUG, 548 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 549 " tx_fc_in_tbl.ctx_id = %d\n", 550 bp->flow_stat->tx_fc_in_tbl.va, 551 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 552 bp->flow_stat->tx_fc_in_tbl.ctx_id); 553 554 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 555 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 556 if (rc) 557 return rc; 558 559 PMD_DRV_LOG(DEBUG, 560 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 561 " tx_fc_out_tbl.ctx_id = %d\n", 562 bp->flow_stat->tx_fc_out_tbl.va, 563 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 564 bp->flow_stat->tx_fc_out_tbl.ctx_id); 565 566 memset(bp->flow_stat->rx_fc_out_tbl.va, 567 0, 568 bp->flow_stat->rx_fc_out_tbl.size); 569 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 570 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 571 bp->flow_stat->rx_fc_out_tbl.ctx_id, 572 bp->flow_stat->max_fc, 573 true); 574 if (rc) 575 return rc; 576 577 memset(bp->flow_stat->tx_fc_out_tbl.va, 578 0, 579 bp->flow_stat->tx_fc_out_tbl.size); 580 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 581 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 582 bp->flow_stat->tx_fc_out_tbl.ctx_id, 583 bp->flow_stat->max_fc, 584 true); 585 586 return rc; 587 } 588 589 static int bnxt_alloc_ctx_mem_buf(struct bnxt *bp, char *type, size_t size, 590 struct bnxt_ctx_mem_buf_info *ctx) 591 { 592 if (!ctx) 593 return -EINVAL; 594 595 ctx->va = rte_zmalloc_socket(type, size, 0, 596 bp->eth_dev->device->numa_node); 597 if (ctx->va == NULL) 598 return -ENOMEM; 599 rte_mem_lock_page(ctx->va); 600 ctx->size = size; 601 ctx->dma = rte_mem_virt2iova(ctx->va); 602 if (ctx->dma == RTE_BAD_IOVA) 603 return -ENOMEM; 604 605 return 0; 606 } 607 608 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 609 { 610 struct rte_pci_device *pdev = bp->pdev; 611 char type[RTE_MEMZONE_NAMESIZE]; 612 uint16_t max_fc; 613 int rc = 0; 614 615 max_fc = bp->flow_stat->max_fc; 616 617 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 618 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 619 /* 4 bytes for each counter-id */ 620 rc = bnxt_alloc_ctx_mem_buf(bp, type, 621 max_fc * 4, 622 &bp->flow_stat->rx_fc_in_tbl); 623 if (rc) 624 return rc; 625 626 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 627 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 628 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 629 rc = bnxt_alloc_ctx_mem_buf(bp, type, 630 max_fc * 16, 631 &bp->flow_stat->rx_fc_out_tbl); 632 if (rc) 633 return rc; 634 635 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 636 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 637 /* 4 bytes for each counter-id */ 638 rc = bnxt_alloc_ctx_mem_buf(bp, type, 639 max_fc * 4, 640 &bp->flow_stat->tx_fc_in_tbl); 641 if (rc) 642 return rc; 643 644 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 645 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 646 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 647 rc = bnxt_alloc_ctx_mem_buf(bp, type, 648 max_fc * 16, 649 &bp->flow_stat->tx_fc_out_tbl); 650 if (rc) 651 return rc; 652 653 rc = bnxt_register_fc_ctx_mem(bp); 654 655 return rc; 656 } 657 658 static int bnxt_init_ctx_mem(struct bnxt *bp) 659 { 660 int rc = 0; 661 662 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 663 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 664 !BNXT_FLOW_XSTATS_EN(bp)) 665 return 0; 666 667 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 668 if (rc) 669 return rc; 670 671 rc = bnxt_init_fc_ctx_mem(bp); 672 673 return rc; 674 } 675 676 static int bnxt_update_phy_setting(struct bnxt *bp) 677 { 678 struct rte_eth_link new; 679 int rc; 680 681 rc = bnxt_get_hwrm_link_config(bp, &new); 682 if (rc) { 683 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 684 return rc; 685 } 686 687 /* 688 * On BCM957508-N2100 adapters, FW will not allow any user other 689 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 690 * always returns link up. Force phy update always in that case. 691 */ 692 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 693 rc = bnxt_set_hwrm_link_config(bp, true); 694 if (rc) { 695 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 696 return rc; 697 } 698 } 699 700 return rc; 701 } 702 703 static void bnxt_free_prev_ring_stats(struct bnxt *bp) 704 { 705 rte_free(bp->prev_rx_ring_stats); 706 rte_free(bp->prev_tx_ring_stats); 707 708 bp->prev_rx_ring_stats = NULL; 709 bp->prev_tx_ring_stats = NULL; 710 } 711 712 static int bnxt_alloc_prev_ring_stats(struct bnxt *bp) 713 { 714 bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats", 715 sizeof(struct bnxt_ring_stats) * 716 bp->rx_cp_nr_rings, 717 0); 718 if (bp->prev_rx_ring_stats == NULL) 719 return -ENOMEM; 720 721 bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats", 722 sizeof(struct bnxt_ring_stats) * 723 bp->tx_cp_nr_rings, 724 0); 725 if (bp->prev_tx_ring_stats == NULL) 726 goto error; 727 728 return 0; 729 730 error: 731 bnxt_free_prev_ring_stats(bp); 732 return -ENOMEM; 733 } 734 735 static int bnxt_start_nic(struct bnxt *bp) 736 { 737 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 738 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 739 uint32_t intr_vector = 0; 740 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 741 uint32_t vec = BNXT_MISC_VEC_ID; 742 unsigned int i, j; 743 int rc; 744 745 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 746 bp->eth_dev->data->dev_conf.rxmode.offloads |= 747 DEV_RX_OFFLOAD_JUMBO_FRAME; 748 bp->flags |= BNXT_FLAG_JUMBO; 749 } else { 750 bp->eth_dev->data->dev_conf.rxmode.offloads &= 751 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 752 bp->flags &= ~BNXT_FLAG_JUMBO; 753 } 754 755 /* THOR does not support ring groups. 756 * But we will use the array to save RSS context IDs. 757 */ 758 if (BNXT_CHIP_P5(bp)) 759 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5; 760 761 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 762 if (rc) { 763 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 764 goto err_out; 765 } 766 767 rc = bnxt_alloc_hwrm_rings(bp); 768 if (rc) { 769 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 770 goto err_out; 771 } 772 773 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 774 if (rc) { 775 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 776 goto err_out; 777 } 778 779 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 780 goto skip_cosq_cfg; 781 782 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 783 if (bp->rx_cos_queue[i].id != 0xff) { 784 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 785 786 if (!vnic) { 787 PMD_DRV_LOG(ERR, 788 "Num pools more than FW profile\n"); 789 rc = -EINVAL; 790 goto err_out; 791 } 792 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 793 bp->rx_cosq_cnt++; 794 } 795 } 796 797 skip_cosq_cfg: 798 rc = bnxt_mq_rx_configure(bp); 799 if (rc) { 800 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 801 goto err_out; 802 } 803 804 /* default vnic 0 */ 805 rc = bnxt_setup_one_vnic(bp, 0); 806 if (rc) 807 goto err_out; 808 /* VNIC configuration */ 809 if (BNXT_RFS_NEEDS_VNIC(bp)) { 810 for (i = 1; i < bp->nr_vnics; i++) { 811 rc = bnxt_setup_one_vnic(bp, i); 812 if (rc) 813 goto err_out; 814 } 815 } 816 817 for (j = 0; j < bp->tx_nr_rings; j++) { 818 struct bnxt_tx_queue *txq = bp->tx_queues[j]; 819 820 if (!txq->tx_deferred_start) { 821 bp->eth_dev->data->tx_queue_state[j] = 822 RTE_ETH_QUEUE_STATE_STARTED; 823 txq->tx_started = true; 824 } 825 } 826 827 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 828 if (rc) { 829 PMD_DRV_LOG(ERR, 830 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 831 goto err_out; 832 } 833 834 /* check and configure queue intr-vector mapping */ 835 if ((rte_intr_cap_multiple(intr_handle) || 836 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 837 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 838 intr_vector = bp->eth_dev->data->nb_rx_queues; 839 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 840 if (intr_vector > bp->rx_cp_nr_rings) { 841 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 842 bp->rx_cp_nr_rings); 843 return -ENOTSUP; 844 } 845 rc = rte_intr_efd_enable(intr_handle, intr_vector); 846 if (rc) 847 return rc; 848 } 849 850 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 851 intr_handle->intr_vec = 852 rte_zmalloc("intr_vec", 853 bp->eth_dev->data->nb_rx_queues * 854 sizeof(int), 0); 855 if (intr_handle->intr_vec == NULL) { 856 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 857 " intr_vec", bp->eth_dev->data->nb_rx_queues); 858 rc = -ENOMEM; 859 goto err_out; 860 } 861 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 862 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 863 intr_handle->intr_vec, intr_handle->nb_efd, 864 intr_handle->max_intr); 865 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 866 queue_id++) { 867 intr_handle->intr_vec[queue_id] = 868 vec + BNXT_RX_VEC_START; 869 if (vec < base + intr_handle->nb_efd - 1) 870 vec++; 871 } 872 } 873 874 /* enable uio/vfio intr/eventfd mapping */ 875 rc = rte_intr_enable(intr_handle); 876 #ifndef RTE_EXEC_ENV_FREEBSD 877 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 878 if (rc) 879 goto err_out; 880 #endif 881 882 rc = bnxt_update_phy_setting(bp); 883 if (rc) 884 goto err_out; 885 886 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 887 if (!bp->mark_table) 888 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 889 890 return 0; 891 892 err_out: 893 /* Some of the error status returned by FW may not be from errno.h */ 894 if (rc > 0) 895 rc = -EIO; 896 897 return rc; 898 } 899 900 static int bnxt_shutdown_nic(struct bnxt *bp) 901 { 902 bnxt_free_all_hwrm_resources(bp); 903 bnxt_free_all_filters(bp); 904 bnxt_free_all_vnics(bp); 905 return 0; 906 } 907 908 /* 909 * Device configuration and status function 910 */ 911 912 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 913 { 914 uint32_t link_speed = 0; 915 uint32_t speed_capa = 0; 916 917 if (bp->link_info == NULL) 918 return 0; 919 920 link_speed = bp->link_info->support_speeds; 921 922 /* If PAM4 is configured, use PAM4 supported speed */ 923 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 924 link_speed = bp->link_info->support_pam4_speeds; 925 926 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 927 speed_capa |= ETH_LINK_SPEED_100M; 928 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 929 speed_capa |= ETH_LINK_SPEED_100M_HD; 930 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 931 speed_capa |= ETH_LINK_SPEED_1G; 932 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 933 speed_capa |= ETH_LINK_SPEED_2_5G; 934 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 935 speed_capa |= ETH_LINK_SPEED_10G; 936 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 937 speed_capa |= ETH_LINK_SPEED_20G; 938 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 939 speed_capa |= ETH_LINK_SPEED_25G; 940 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 941 speed_capa |= ETH_LINK_SPEED_40G; 942 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 943 speed_capa |= ETH_LINK_SPEED_50G; 944 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 945 speed_capa |= ETH_LINK_SPEED_100G; 946 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 947 speed_capa |= ETH_LINK_SPEED_50G; 948 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 949 speed_capa |= ETH_LINK_SPEED_100G; 950 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 951 speed_capa |= ETH_LINK_SPEED_200G; 952 953 if (bp->link_info->auto_mode == 954 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 955 speed_capa |= ETH_LINK_SPEED_FIXED; 956 957 return speed_capa; 958 } 959 960 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 961 struct rte_eth_dev_info *dev_info) 962 { 963 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 964 struct bnxt *bp = eth_dev->data->dev_private; 965 uint16_t max_vnics, i, j, vpool, vrxq; 966 unsigned int max_rx_rings; 967 int rc; 968 969 rc = is_bnxt_in_error(bp); 970 if (rc) 971 return rc; 972 973 /* MAC Specifics */ 974 dev_info->max_mac_addrs = bp->max_l2_ctx; 975 dev_info->max_hash_mac_addrs = 0; 976 977 /* PF/VF specifics */ 978 if (BNXT_PF(bp)) 979 dev_info->max_vfs = pdev->max_vfs; 980 981 max_rx_rings = bnxt_max_rings(bp); 982 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 983 dev_info->max_rx_queues = max_rx_rings; 984 dev_info->max_tx_queues = max_rx_rings; 985 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 986 dev_info->hash_key_size = HW_HASH_KEY_SIZE; 987 max_vnics = bp->max_vnics; 988 989 /* MTU specifics */ 990 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 991 dev_info->max_mtu = BNXT_MAX_MTU; 992 993 /* Fast path specifics */ 994 dev_info->min_rx_bufsize = 1; 995 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 996 997 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 998 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 999 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 1000 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP) 1001 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP; 1002 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 1003 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 1004 dev_info->tx_queue_offload_capa; 1005 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 1006 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT; 1007 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 1008 1009 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 1010 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 1011 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 1012 1013 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1014 .rx_thresh = { 1015 .pthresh = 8, 1016 .hthresh = 8, 1017 .wthresh = 0, 1018 }, 1019 .rx_free_thresh = 32, 1020 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 1021 }; 1022 1023 dev_info->default_txconf = (struct rte_eth_txconf) { 1024 .tx_thresh = { 1025 .pthresh = 32, 1026 .hthresh = 0, 1027 .wthresh = 0, 1028 }, 1029 .tx_free_thresh = 32, 1030 .tx_rs_thresh = 32, 1031 }; 1032 eth_dev->data->dev_conf.intr_conf.lsc = 1; 1033 1034 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1035 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 1036 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 1037 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 1038 1039 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 1040 dev_info->switch_info.name = eth_dev->device->name; 1041 dev_info->switch_info.domain_id = bp->switch_domain_id; 1042 dev_info->switch_info.port_id = 1043 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 1044 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 1045 } 1046 1047 /* 1048 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 1049 * need further investigation. 1050 */ 1051 1052 /* VMDq resources */ 1053 vpool = 64; /* ETH_64_POOLS */ 1054 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 1055 for (i = 0; i < 4; vpool >>= 1, i++) { 1056 if (max_vnics > vpool) { 1057 for (j = 0; j < 5; vrxq >>= 1, j++) { 1058 if (dev_info->max_rx_queues > vrxq) { 1059 if (vpool > vrxq) 1060 vpool = vrxq; 1061 goto found; 1062 } 1063 } 1064 /* Not enough resources to support VMDq */ 1065 break; 1066 } 1067 } 1068 /* Not enough resources to support VMDq */ 1069 vpool = 0; 1070 vrxq = 0; 1071 found: 1072 dev_info->max_vmdq_pools = vpool; 1073 dev_info->vmdq_queue_num = vrxq; 1074 1075 dev_info->vmdq_pool_base = 0; 1076 dev_info->vmdq_queue_base = 0; 1077 1078 return 0; 1079 } 1080 1081 /* Configure the device based on the configuration provided */ 1082 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1083 { 1084 struct bnxt *bp = eth_dev->data->dev_private; 1085 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1086 int rc; 1087 1088 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1089 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1090 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1091 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1092 1093 rc = is_bnxt_in_error(bp); 1094 if (rc) 1095 return rc; 1096 1097 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1098 rc = bnxt_hwrm_check_vf_rings(bp); 1099 if (rc) { 1100 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1101 return -ENOSPC; 1102 } 1103 1104 /* If a resource has already been allocated - in this case 1105 * it is the async completion ring, free it. Reallocate it after 1106 * resource reservation. This will ensure the resource counts 1107 * are calculated correctly. 1108 */ 1109 1110 pthread_mutex_lock(&bp->def_cp_lock); 1111 1112 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1113 bnxt_disable_int(bp); 1114 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1115 } 1116 1117 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1118 if (rc) { 1119 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1120 pthread_mutex_unlock(&bp->def_cp_lock); 1121 return -ENOSPC; 1122 } 1123 1124 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1125 rc = bnxt_alloc_async_cp_ring(bp); 1126 if (rc) { 1127 pthread_mutex_unlock(&bp->def_cp_lock); 1128 return rc; 1129 } 1130 bnxt_enable_int(bp); 1131 } 1132 1133 pthread_mutex_unlock(&bp->def_cp_lock); 1134 } 1135 1136 /* Inherit new configurations */ 1137 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1138 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1139 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1140 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1141 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1142 bp->max_stat_ctx) 1143 goto resource_error; 1144 1145 if (BNXT_HAS_RING_GRPS(bp) && 1146 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1147 goto resource_error; 1148 1149 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1150 bp->max_vnics < eth_dev->data->nb_rx_queues) 1151 goto resource_error; 1152 1153 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1154 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1155 1156 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1157 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1158 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1159 1160 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1161 eth_dev->data->mtu = 1162 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1163 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1164 BNXT_NUM_VLANS; 1165 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1166 } 1167 return 0; 1168 1169 resource_error: 1170 PMD_DRV_LOG(ERR, 1171 "Insufficient resources to support requested config\n"); 1172 PMD_DRV_LOG(ERR, 1173 "Num Queues Requested: Tx %d, Rx %d\n", 1174 eth_dev->data->nb_tx_queues, 1175 eth_dev->data->nb_rx_queues); 1176 PMD_DRV_LOG(ERR, 1177 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1178 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1179 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1180 return -ENOSPC; 1181 } 1182 1183 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1184 { 1185 struct rte_eth_link *link = ð_dev->data->dev_link; 1186 1187 if (link->link_status) 1188 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1189 eth_dev->data->port_id, 1190 (uint32_t)link->link_speed, 1191 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1192 ("full-duplex") : ("half-duplex\n")); 1193 else 1194 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1195 eth_dev->data->port_id); 1196 } 1197 1198 /* 1199 * Determine whether the current configuration requires support for scattered 1200 * receive; return 1 if scattered receive is required and 0 if not. 1201 */ 1202 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1203 { 1204 uint16_t buf_size; 1205 int i; 1206 1207 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1208 return 1; 1209 1210 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) 1211 return 1; 1212 1213 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1214 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1215 1216 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1217 RTE_PKTMBUF_HEADROOM); 1218 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1219 return 1; 1220 } 1221 return 0; 1222 } 1223 1224 static eth_rx_burst_t 1225 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1226 { 1227 struct bnxt *bp = eth_dev->data->dev_private; 1228 1229 /* Disable vector mode RX for Stingray2 for now */ 1230 if (BNXT_CHIP_SR2(bp)) { 1231 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1232 return bnxt_recv_pkts; 1233 } 1234 1235 #if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \ 1236 !defined(RTE_LIBRTE_IEEE1588) 1237 1238 /* Vector mode receive cannot be enabled if scattered rx is in use. */ 1239 if (eth_dev->data->scattered_rx) 1240 goto use_scalar_rx; 1241 1242 /* 1243 * Vector mode receive cannot be enabled if Truflow is enabled or if 1244 * asynchronous completions and receive completions can be placed in 1245 * the same completion ring. 1246 */ 1247 if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp)) 1248 goto use_scalar_rx; 1249 1250 /* 1251 * Vector mode receive cannot be enabled if any receive offloads outside 1252 * a limited subset have been enabled. 1253 */ 1254 if (eth_dev->data->dev_conf.rxmode.offloads & 1255 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1256 DEV_RX_OFFLOAD_KEEP_CRC | 1257 DEV_RX_OFFLOAD_JUMBO_FRAME | 1258 DEV_RX_OFFLOAD_IPV4_CKSUM | 1259 DEV_RX_OFFLOAD_UDP_CKSUM | 1260 DEV_RX_OFFLOAD_TCP_CKSUM | 1261 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1262 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 1263 DEV_RX_OFFLOAD_RSS_HASH | 1264 DEV_RX_OFFLOAD_VLAN_FILTER)) 1265 goto use_scalar_rx; 1266 1267 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1268 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1269 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1270 PMD_DRV_LOG(INFO, 1271 "Using AVX2 vector mode receive for port %d\n", 1272 eth_dev->data->port_id); 1273 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1274 return bnxt_recv_pkts_vec_avx2; 1275 } 1276 #endif 1277 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1278 PMD_DRV_LOG(INFO, 1279 "Using SSE vector mode receive for port %d\n", 1280 eth_dev->data->port_id); 1281 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1282 return bnxt_recv_pkts_vec; 1283 } 1284 1285 use_scalar_rx: 1286 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1287 eth_dev->data->port_id); 1288 PMD_DRV_LOG(INFO, 1289 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1290 eth_dev->data->port_id, 1291 eth_dev->data->scattered_rx, 1292 eth_dev->data->dev_conf.rxmode.offloads); 1293 #endif 1294 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1295 return bnxt_recv_pkts; 1296 } 1297 1298 static eth_tx_burst_t 1299 bnxt_transmit_function(struct rte_eth_dev *eth_dev) 1300 { 1301 struct bnxt *bp = eth_dev->data->dev_private; 1302 1303 /* Disable vector mode TX for Stingray2 for now */ 1304 if (BNXT_CHIP_SR2(bp)) 1305 return bnxt_xmit_pkts; 1306 1307 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \ 1308 !defined(RTE_LIBRTE_IEEE1588) 1309 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1310 1311 /* 1312 * Vector mode transmit can be enabled only if not using scatter rx 1313 * or tx offloads. 1314 */ 1315 if (eth_dev->data->scattered_rx || 1316 (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) || 1317 BNXT_TRUFLOW_EN(bp)) 1318 goto use_scalar_tx; 1319 1320 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 1321 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 && 1322 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) { 1323 PMD_DRV_LOG(INFO, 1324 "Using AVX2 vector mode transmit for port %d\n", 1325 eth_dev->data->port_id); 1326 return bnxt_xmit_pkts_vec_avx2; 1327 } 1328 #endif 1329 if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1330 PMD_DRV_LOG(INFO, 1331 "Using SSE vector mode transmit for port %d\n", 1332 eth_dev->data->port_id); 1333 return bnxt_xmit_pkts_vec; 1334 } 1335 1336 use_scalar_tx: 1337 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1338 eth_dev->data->port_id); 1339 PMD_DRV_LOG(INFO, 1340 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1341 eth_dev->data->port_id, 1342 eth_dev->data->scattered_rx, 1343 offloads); 1344 #endif 1345 return bnxt_xmit_pkts; 1346 } 1347 1348 static int bnxt_handle_if_change_status(struct bnxt *bp) 1349 { 1350 int rc; 1351 1352 /* Since fw has undergone a reset and lost all contexts, 1353 * set fatal flag to not issue hwrm during cleanup 1354 */ 1355 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1356 bnxt_uninit_resources(bp, true); 1357 1358 /* clear fatal flag so that re-init happens */ 1359 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1360 rc = bnxt_init_resources(bp, true); 1361 1362 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1363 1364 return rc; 1365 } 1366 1367 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1368 { 1369 struct bnxt *bp = eth_dev->data->dev_private; 1370 int rc = 0; 1371 1372 if (!BNXT_SINGLE_PF(bp)) 1373 return -ENOTSUP; 1374 1375 if (!bp->link_info->link_up) 1376 rc = bnxt_set_hwrm_link_config(bp, true); 1377 if (!rc) 1378 eth_dev->data->dev_link.link_status = 1; 1379 1380 bnxt_print_link_info(eth_dev); 1381 return rc; 1382 } 1383 1384 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1385 { 1386 struct bnxt *bp = eth_dev->data->dev_private; 1387 1388 if (!BNXT_SINGLE_PF(bp)) 1389 return -ENOTSUP; 1390 1391 eth_dev->data->dev_link.link_status = 0; 1392 bnxt_set_hwrm_link_config(bp, false); 1393 bp->link_info->link_up = 0; 1394 1395 return 0; 1396 } 1397 1398 static void bnxt_free_switch_domain(struct bnxt *bp) 1399 { 1400 int rc = 0; 1401 1402 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) 1403 return; 1404 1405 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1406 if (rc) 1407 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1408 bp->switch_domain_id, rc); 1409 } 1410 1411 static void bnxt_ptp_get_current_time(void *arg) 1412 { 1413 struct bnxt *bp = arg; 1414 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1415 int rc; 1416 1417 rc = is_bnxt_in_error(bp); 1418 if (rc) 1419 return; 1420 1421 if (!ptp) 1422 return; 1423 1424 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1425 &ptp->current_time); 1426 1427 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1428 if (rc != 0) { 1429 PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n"); 1430 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1431 } 1432 } 1433 1434 static int bnxt_schedule_ptp_alarm(struct bnxt *bp) 1435 { 1436 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 1437 int rc; 1438 1439 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) 1440 return 0; 1441 1442 bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 1443 &ptp->current_time); 1444 1445 rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp); 1446 return rc; 1447 } 1448 1449 static void bnxt_cancel_ptp_alarm(struct bnxt *bp) 1450 { 1451 if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED) { 1452 rte_eal_alarm_cancel(bnxt_ptp_get_current_time, (void *)bp); 1453 bp->flags2 &= ~BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1454 } 1455 } 1456 1457 static void bnxt_ptp_stop(struct bnxt *bp) 1458 { 1459 bnxt_cancel_ptp_alarm(bp); 1460 bp->flags2 &= ~BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1461 } 1462 1463 static int bnxt_ptp_start(struct bnxt *bp) 1464 { 1465 int rc; 1466 1467 rc = bnxt_schedule_ptp_alarm(bp); 1468 if (rc != 0) { 1469 PMD_DRV_LOG(ERR, "Failed to schedule PTP alarm\n"); 1470 } else { 1471 bp->flags2 |= BNXT_FLAGS2_PTP_TIMESYNC_ENABLED; 1472 bp->flags2 |= BNXT_FLAGS2_PTP_ALARM_SCHEDULED; 1473 } 1474 1475 return rc; 1476 } 1477 1478 static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) 1479 { 1480 struct bnxt *bp = eth_dev->data->dev_private; 1481 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1483 struct rte_eth_link link; 1484 int ret; 1485 1486 eth_dev->data->dev_started = 0; 1487 eth_dev->data->scattered_rx = 0; 1488 1489 /* Prevent crashes when queues are still in use */ 1490 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1491 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1492 1493 bnxt_disable_int(bp); 1494 1495 /* disable uio/vfio intr/eventfd mapping */ 1496 rte_intr_disable(intr_handle); 1497 1498 /* Stop the child representors for this device */ 1499 ret = bnxt_rep_stop_all(bp); 1500 if (ret != 0) 1501 return ret; 1502 1503 /* delete the bnxt ULP port details */ 1504 bnxt_ulp_port_deinit(bp); 1505 1506 bnxt_cancel_fw_health_check(bp); 1507 1508 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1509 bnxt_cancel_ptp_alarm(bp); 1510 1511 /* Do not bring link down during reset recovery */ 1512 if (!is_bnxt_in_error(bp)) { 1513 bnxt_dev_set_link_down_op(eth_dev); 1514 /* Wait for link to be reset */ 1515 if (BNXT_SINGLE_PF(bp)) 1516 rte_delay_ms(500); 1517 /* clear the recorded link status */ 1518 memset(&link, 0, sizeof(link)); 1519 rte_eth_linkstatus_set(eth_dev, &link); 1520 } 1521 1522 /* Clean queue intr-vector mapping */ 1523 rte_intr_efd_disable(intr_handle); 1524 if (intr_handle->intr_vec != NULL) { 1525 rte_free(intr_handle->intr_vec); 1526 intr_handle->intr_vec = NULL; 1527 } 1528 1529 bnxt_hwrm_port_clr_stats(bp); 1530 bnxt_free_tx_mbufs(bp); 1531 bnxt_free_rx_mbufs(bp); 1532 /* Process any remaining notifications in default completion queue */ 1533 bnxt_int_handler(eth_dev); 1534 bnxt_shutdown_nic(bp); 1535 bnxt_hwrm_if_change(bp, false); 1536 1537 bnxt_free_prev_ring_stats(bp); 1538 rte_free(bp->mark_table); 1539 bp->mark_table = NULL; 1540 1541 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1542 bp->rx_cosq_cnt = 0; 1543 /* All filters are deleted on a port stop. */ 1544 if (BNXT_FLOW_XSTATS_EN(bp)) 1545 bp->flow_stat->flow_count = 0; 1546 1547 return 0; 1548 } 1549 1550 /* Unload the driver, release resources */ 1551 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1552 { 1553 struct bnxt *bp = eth_dev->data->dev_private; 1554 1555 pthread_mutex_lock(&bp->err_recovery_lock); 1556 if (bp->flags & BNXT_FLAG_FW_RESET) { 1557 PMD_DRV_LOG(ERR, 1558 "Adapter recovering from error..Please retry\n"); 1559 pthread_mutex_unlock(&bp->err_recovery_lock); 1560 return -EAGAIN; 1561 } 1562 pthread_mutex_unlock(&bp->err_recovery_lock); 1563 1564 return bnxt_dev_stop(eth_dev); 1565 } 1566 1567 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1568 { 1569 struct bnxt *bp = eth_dev->data->dev_private; 1570 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1571 int vlan_mask = 0; 1572 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1573 1574 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1575 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1576 return -EINVAL; 1577 } 1578 1579 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) 1580 PMD_DRV_LOG(ERR, 1581 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1582 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1583 1584 do { 1585 rc = bnxt_hwrm_if_change(bp, true); 1586 if (rc == 0 || rc != -EAGAIN) 1587 break; 1588 1589 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1590 } while (retry_cnt--); 1591 1592 if (rc) 1593 return rc; 1594 1595 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1596 rc = bnxt_handle_if_change_status(bp); 1597 if (rc) 1598 return rc; 1599 } 1600 1601 bnxt_enable_int(bp); 1602 1603 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1604 1605 rc = bnxt_start_nic(bp); 1606 if (rc) 1607 goto error; 1608 1609 rc = bnxt_alloc_prev_ring_stats(bp); 1610 if (rc) 1611 goto error; 1612 1613 eth_dev->data->dev_started = 1; 1614 1615 bnxt_link_update_op(eth_dev, 1); 1616 1617 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1618 vlan_mask |= ETH_VLAN_FILTER_MASK; 1619 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1620 vlan_mask |= ETH_VLAN_STRIP_MASK; 1621 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1622 if (rc) 1623 goto error; 1624 1625 /* Initialize bnxt ULP port details */ 1626 rc = bnxt_ulp_port_init(bp); 1627 if (rc) 1628 goto error; 1629 1630 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1631 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1632 1633 bnxt_schedule_fw_health_check(bp); 1634 1635 if (BNXT_P5_PTP_TIMESYNC_ENABLED(bp)) 1636 bnxt_schedule_ptp_alarm(bp); 1637 1638 return 0; 1639 1640 error: 1641 bnxt_dev_stop(eth_dev); 1642 return rc; 1643 } 1644 1645 static void 1646 bnxt_uninit_locks(struct bnxt *bp) 1647 { 1648 pthread_mutex_destroy(&bp->flow_lock); 1649 pthread_mutex_destroy(&bp->def_cp_lock); 1650 pthread_mutex_destroy(&bp->health_check_lock); 1651 pthread_mutex_destroy(&bp->err_recovery_lock); 1652 if (bp->rep_info) { 1653 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 1654 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 1655 } 1656 } 1657 1658 static void bnxt_drv_uninit(struct bnxt *bp) 1659 { 1660 bnxt_free_leds_info(bp); 1661 bnxt_free_cos_queues(bp); 1662 bnxt_free_link_info(bp); 1663 bnxt_free_parent_info(bp); 1664 bnxt_uninit_locks(bp); 1665 1666 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1667 bp->tx_mem_zone = NULL; 1668 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1669 bp->rx_mem_zone = NULL; 1670 1671 bnxt_free_vf_info(bp); 1672 bnxt_free_pf_info(bp); 1673 1674 rte_free(bp->grp_info); 1675 bp->grp_info = NULL; 1676 } 1677 1678 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1679 { 1680 struct bnxt *bp = eth_dev->data->dev_private; 1681 int ret = 0; 1682 1683 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1684 return 0; 1685 1686 pthread_mutex_lock(&bp->err_recovery_lock); 1687 if (bp->flags & BNXT_FLAG_FW_RESET) { 1688 PMD_DRV_LOG(ERR, 1689 "Adapter recovering from error...Please retry\n"); 1690 pthread_mutex_unlock(&bp->err_recovery_lock); 1691 return -EAGAIN; 1692 } 1693 pthread_mutex_unlock(&bp->err_recovery_lock); 1694 1695 /* cancel the recovery handler before remove dev */ 1696 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1697 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1698 bnxt_cancel_fc_thread(bp); 1699 1700 if (eth_dev->data->dev_started) 1701 ret = bnxt_dev_stop(eth_dev); 1702 1703 bnxt_uninit_resources(bp, false); 1704 1705 bnxt_drv_uninit(bp); 1706 1707 return ret; 1708 } 1709 1710 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1711 uint32_t index) 1712 { 1713 struct bnxt *bp = eth_dev->data->dev_private; 1714 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1715 struct bnxt_vnic_info *vnic; 1716 struct bnxt_filter_info *filter, *temp_filter; 1717 uint32_t i; 1718 1719 if (is_bnxt_in_error(bp)) 1720 return; 1721 1722 /* 1723 * Loop through all VNICs from the specified filter flow pools to 1724 * remove the corresponding MAC addr filter 1725 */ 1726 for (i = 0; i < bp->nr_vnics; i++) { 1727 if (!(pool_mask & (1ULL << i))) 1728 continue; 1729 1730 vnic = &bp->vnic_info[i]; 1731 filter = STAILQ_FIRST(&vnic->filter); 1732 while (filter) { 1733 temp_filter = STAILQ_NEXT(filter, next); 1734 if (filter->mac_index == index) { 1735 STAILQ_REMOVE(&vnic->filter, filter, 1736 bnxt_filter_info, next); 1737 bnxt_hwrm_clear_l2_filter(bp, filter); 1738 bnxt_free_filter(bp, filter); 1739 } 1740 filter = temp_filter; 1741 } 1742 } 1743 } 1744 1745 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1746 struct rte_ether_addr *mac_addr, uint32_t index, 1747 uint32_t pool) 1748 { 1749 struct bnxt_filter_info *filter; 1750 int rc = 0; 1751 1752 /* Attach requested MAC address to the new l2_filter */ 1753 STAILQ_FOREACH(filter, &vnic->filter, next) { 1754 if (filter->mac_index == index) { 1755 PMD_DRV_LOG(DEBUG, 1756 "MAC addr already existed for pool %d\n", 1757 pool); 1758 return 0; 1759 } 1760 } 1761 1762 filter = bnxt_alloc_filter(bp); 1763 if (!filter) { 1764 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1765 return -ENODEV; 1766 } 1767 1768 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1769 * if the MAC that's been programmed now is a different one, then, 1770 * copy that addr to filter->l2_addr 1771 */ 1772 if (mac_addr) 1773 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1774 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1775 1776 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1777 if (!rc) { 1778 filter->mac_index = index; 1779 if (filter->mac_index == 0) 1780 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1781 else 1782 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1783 } else { 1784 bnxt_free_filter(bp, filter); 1785 } 1786 1787 return rc; 1788 } 1789 1790 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1791 struct rte_ether_addr *mac_addr, 1792 uint32_t index, uint32_t pool) 1793 { 1794 struct bnxt *bp = eth_dev->data->dev_private; 1795 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1796 int rc = 0; 1797 1798 rc = is_bnxt_in_error(bp); 1799 if (rc) 1800 return rc; 1801 1802 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1803 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1804 return -ENOTSUP; 1805 } 1806 1807 if (!vnic) { 1808 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1809 return -EINVAL; 1810 } 1811 1812 /* Filter settings will get applied when port is started */ 1813 if (!eth_dev->data->dev_started) 1814 return 0; 1815 1816 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1817 1818 return rc; 1819 } 1820 1821 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1822 { 1823 int rc = 0; 1824 struct bnxt *bp = eth_dev->data->dev_private; 1825 struct rte_eth_link new; 1826 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1827 BNXT_MIN_LINK_WAIT_CNT; 1828 1829 rc = is_bnxt_in_error(bp); 1830 if (rc) 1831 return rc; 1832 1833 memset(&new, 0, sizeof(new)); 1834 1835 if (bp->link_info == NULL) 1836 goto out; 1837 1838 do { 1839 /* Retrieve link info from hardware */ 1840 rc = bnxt_get_hwrm_link_config(bp, &new); 1841 if (rc) { 1842 new.link_speed = ETH_LINK_SPEED_100M; 1843 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1844 PMD_DRV_LOG(ERR, 1845 "Failed to retrieve link rc = 0x%x!\n", rc); 1846 goto out; 1847 } 1848 1849 if (!wait_to_complete || new.link_status) 1850 break; 1851 1852 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1853 } while (cnt--); 1854 1855 /* Only single function PF can bring phy down. 1856 * When port is stopped, report link down for VF/MH/NPAR functions. 1857 */ 1858 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1859 memset(&new, 0, sizeof(new)); 1860 1861 out: 1862 /* Timed out or success */ 1863 if (new.link_status != eth_dev->data->dev_link.link_status || 1864 new.link_speed != eth_dev->data->dev_link.link_speed) { 1865 rte_eth_linkstatus_set(eth_dev, &new); 1866 bnxt_print_link_info(eth_dev); 1867 } 1868 1869 return rc; 1870 } 1871 1872 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1873 { 1874 struct bnxt *bp = eth_dev->data->dev_private; 1875 struct bnxt_vnic_info *vnic; 1876 uint32_t old_flags; 1877 int rc; 1878 1879 rc = is_bnxt_in_error(bp); 1880 if (rc) 1881 return rc; 1882 1883 /* Filter settings will get applied when port is started */ 1884 if (!eth_dev->data->dev_started) 1885 return 0; 1886 1887 if (bp->vnic_info == NULL) 1888 return 0; 1889 1890 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1891 1892 old_flags = vnic->flags; 1893 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1894 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1895 if (rc != 0) 1896 vnic->flags = old_flags; 1897 1898 return rc; 1899 } 1900 1901 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1902 { 1903 struct bnxt *bp = eth_dev->data->dev_private; 1904 struct bnxt_vnic_info *vnic; 1905 uint32_t old_flags; 1906 int rc; 1907 1908 rc = is_bnxt_in_error(bp); 1909 if (rc) 1910 return rc; 1911 1912 /* Filter settings will get applied when port is started */ 1913 if (!eth_dev->data->dev_started) 1914 return 0; 1915 1916 if (bp->vnic_info == NULL) 1917 return 0; 1918 1919 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1920 1921 old_flags = vnic->flags; 1922 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1923 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1924 if (rc != 0) 1925 vnic->flags = old_flags; 1926 1927 return rc; 1928 } 1929 1930 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1931 { 1932 struct bnxt *bp = eth_dev->data->dev_private; 1933 struct bnxt_vnic_info *vnic; 1934 uint32_t old_flags; 1935 int rc; 1936 1937 rc = is_bnxt_in_error(bp); 1938 if (rc) 1939 return rc; 1940 1941 /* Filter settings will get applied when port is started */ 1942 if (!eth_dev->data->dev_started) 1943 return 0; 1944 1945 if (bp->vnic_info == NULL) 1946 return 0; 1947 1948 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1949 1950 old_flags = vnic->flags; 1951 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1952 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1953 if (rc != 0) 1954 vnic->flags = old_flags; 1955 1956 return rc; 1957 } 1958 1959 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1960 { 1961 struct bnxt *bp = eth_dev->data->dev_private; 1962 struct bnxt_vnic_info *vnic; 1963 uint32_t old_flags; 1964 int rc; 1965 1966 rc = is_bnxt_in_error(bp); 1967 if (rc) 1968 return rc; 1969 1970 /* Filter settings will get applied when port is started */ 1971 if (!eth_dev->data->dev_started) 1972 return 0; 1973 1974 if (bp->vnic_info == NULL) 1975 return 0; 1976 1977 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1978 1979 old_flags = vnic->flags; 1980 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1981 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1982 if (rc != 0) 1983 vnic->flags = old_flags; 1984 1985 return rc; 1986 } 1987 1988 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1989 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1990 { 1991 if (qid >= bp->rx_nr_rings) 1992 return NULL; 1993 1994 return bp->eth_dev->data->rx_queues[qid]; 1995 } 1996 1997 /* Return rxq corresponding to a given rss table ring/group ID. */ 1998 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1999 { 2000 struct bnxt_rx_queue *rxq; 2001 unsigned int i; 2002 2003 if (!BNXT_HAS_RING_GRPS(bp)) { 2004 for (i = 0; i < bp->rx_nr_rings; i++) { 2005 rxq = bp->eth_dev->data->rx_queues[i]; 2006 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 2007 return rxq->index; 2008 } 2009 } else { 2010 for (i = 0; i < bp->rx_nr_rings; i++) { 2011 if (bp->grp_info[i].fw_grp_id == fwr) 2012 return i; 2013 } 2014 } 2015 2016 return INVALID_HW_RING_ID; 2017 } 2018 2019 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 2020 struct rte_eth_rss_reta_entry64 *reta_conf, 2021 uint16_t reta_size) 2022 { 2023 struct bnxt *bp = eth_dev->data->dev_private; 2024 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2025 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2026 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2027 uint16_t idx, sft; 2028 int i, rc; 2029 2030 rc = is_bnxt_in_error(bp); 2031 if (rc) 2032 return rc; 2033 2034 if (!vnic->rss_table) 2035 return -EINVAL; 2036 2037 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 2038 return -EINVAL; 2039 2040 if (reta_size != tbl_size) { 2041 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2042 "(%d) must equal the size supported by the hardware " 2043 "(%d)\n", reta_size, tbl_size); 2044 return -EINVAL; 2045 } 2046 2047 for (i = 0; i < reta_size; i++) { 2048 struct bnxt_rx_queue *rxq; 2049 2050 idx = i / RTE_RETA_GROUP_SIZE; 2051 sft = i % RTE_RETA_GROUP_SIZE; 2052 2053 if (!(reta_conf[idx].mask & (1ULL << sft))) 2054 continue; 2055 2056 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 2057 if (!rxq) { 2058 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 2059 return -EINVAL; 2060 } 2061 2062 if (BNXT_CHIP_P5(bp)) { 2063 vnic->rss_table[i * 2] = 2064 rxq->rx_ring->rx_ring_struct->fw_ring_id; 2065 vnic->rss_table[i * 2 + 1] = 2066 rxq->cp_ring->cp_ring_struct->fw_ring_id; 2067 } else { 2068 vnic->rss_table[i] = 2069 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 2070 } 2071 } 2072 2073 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2074 return rc; 2075 } 2076 2077 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 2078 struct rte_eth_rss_reta_entry64 *reta_conf, 2079 uint16_t reta_size) 2080 { 2081 struct bnxt *bp = eth_dev->data->dev_private; 2082 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2083 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 2084 uint16_t idx, sft, i; 2085 int rc; 2086 2087 rc = is_bnxt_in_error(bp); 2088 if (rc) 2089 return rc; 2090 2091 if (!vnic) 2092 return -EINVAL; 2093 if (!vnic->rss_table) 2094 return -EINVAL; 2095 2096 if (reta_size != tbl_size) { 2097 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 2098 "(%d) must equal the size supported by the hardware " 2099 "(%d)\n", reta_size, tbl_size); 2100 return -EINVAL; 2101 } 2102 2103 for (idx = 0, i = 0; i < reta_size; i++) { 2104 idx = i / RTE_RETA_GROUP_SIZE; 2105 sft = i % RTE_RETA_GROUP_SIZE; 2106 2107 if (reta_conf[idx].mask & (1ULL << sft)) { 2108 uint16_t qid; 2109 2110 if (BNXT_CHIP_P5(bp)) 2111 qid = bnxt_rss_to_qid(bp, 2112 vnic->rss_table[i * 2]); 2113 else 2114 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 2115 2116 if (qid == INVALID_HW_RING_ID) { 2117 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 2118 return -EINVAL; 2119 } 2120 reta_conf[idx].reta[sft] = qid; 2121 } 2122 } 2123 2124 return 0; 2125 } 2126 2127 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 2128 struct rte_eth_rss_conf *rss_conf) 2129 { 2130 struct bnxt *bp = eth_dev->data->dev_private; 2131 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 2132 struct bnxt_vnic_info *vnic; 2133 int rc; 2134 2135 rc = is_bnxt_in_error(bp); 2136 if (rc) 2137 return rc; 2138 2139 /* 2140 * If RSS enablement were different than dev_configure, 2141 * then return -EINVAL 2142 */ 2143 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 2144 if (!rss_conf->rss_hf) 2145 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 2146 } else { 2147 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 2148 return -EINVAL; 2149 } 2150 2151 bp->flags |= BNXT_FLAG_UPDATE_HASH; 2152 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 2153 rss_conf, 2154 sizeof(*rss_conf)); 2155 2156 /* Update the default RSS VNIC(s) */ 2157 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2158 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 2159 vnic->hash_mode = 2160 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 2161 ETH_RSS_LEVEL(rss_conf->rss_hf)); 2162 2163 /* 2164 * If hashkey is not specified, use the previously configured 2165 * hashkey 2166 */ 2167 if (!rss_conf->rss_key) 2168 goto rss_config; 2169 2170 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 2171 PMD_DRV_LOG(ERR, 2172 "Invalid hashkey length, should be %d bytes\n", 2173 HW_HASH_KEY_SIZE); 2174 return -EINVAL; 2175 } 2176 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 2177 2178 rss_config: 2179 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 2180 return rc; 2181 } 2182 2183 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 2184 struct rte_eth_rss_conf *rss_conf) 2185 { 2186 struct bnxt *bp = eth_dev->data->dev_private; 2187 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2188 int len, rc; 2189 uint32_t hash_types; 2190 2191 rc = is_bnxt_in_error(bp); 2192 if (rc) 2193 return rc; 2194 2195 /* RSS configuration is the same for all VNICs */ 2196 if (vnic && vnic->rss_hash_key) { 2197 if (rss_conf->rss_key) { 2198 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 2199 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 2200 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 2201 } 2202 2203 hash_types = vnic->hash_type; 2204 rss_conf->rss_hf = 0; 2205 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 2206 rss_conf->rss_hf |= ETH_RSS_IPV4; 2207 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 2208 } 2209 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 2210 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 2211 hash_types &= 2212 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 2213 } 2214 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 2215 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 2216 hash_types &= 2217 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 2218 } 2219 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 2220 rss_conf->rss_hf |= ETH_RSS_IPV6; 2221 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 2222 } 2223 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 2224 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 2225 hash_types &= 2226 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 2227 } 2228 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 2229 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 2230 hash_types &= 2231 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 2232 } 2233 2234 rss_conf->rss_hf |= 2235 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 2236 2237 if (hash_types) { 2238 PMD_DRV_LOG(ERR, 2239 "Unknown RSS config from firmware (%08x), RSS disabled", 2240 vnic->hash_type); 2241 return -ENOTSUP; 2242 } 2243 } else { 2244 rss_conf->rss_hf = 0; 2245 } 2246 return 0; 2247 } 2248 2249 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2250 struct rte_eth_fc_conf *fc_conf) 2251 { 2252 struct bnxt *bp = dev->data->dev_private; 2253 struct rte_eth_link link_info; 2254 int rc; 2255 2256 rc = is_bnxt_in_error(bp); 2257 if (rc) 2258 return rc; 2259 2260 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2261 if (rc) 2262 return rc; 2263 2264 memset(fc_conf, 0, sizeof(*fc_conf)); 2265 if (bp->link_info->auto_pause) 2266 fc_conf->autoneg = 1; 2267 switch (bp->link_info->pause) { 2268 case 0: 2269 fc_conf->mode = RTE_FC_NONE; 2270 break; 2271 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2272 fc_conf->mode = RTE_FC_TX_PAUSE; 2273 break; 2274 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2275 fc_conf->mode = RTE_FC_RX_PAUSE; 2276 break; 2277 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2278 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2279 fc_conf->mode = RTE_FC_FULL; 2280 break; 2281 } 2282 return 0; 2283 } 2284 2285 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2286 struct rte_eth_fc_conf *fc_conf) 2287 { 2288 struct bnxt *bp = dev->data->dev_private; 2289 int rc; 2290 2291 rc = is_bnxt_in_error(bp); 2292 if (rc) 2293 return rc; 2294 2295 if (!BNXT_SINGLE_PF(bp)) { 2296 PMD_DRV_LOG(ERR, 2297 "Flow Control Settings cannot be modified on VF or on shared PF\n"); 2298 return -ENOTSUP; 2299 } 2300 2301 switch (fc_conf->mode) { 2302 case RTE_FC_NONE: 2303 bp->link_info->auto_pause = 0; 2304 bp->link_info->force_pause = 0; 2305 break; 2306 case RTE_FC_RX_PAUSE: 2307 if (fc_conf->autoneg) { 2308 bp->link_info->auto_pause = 2309 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2310 bp->link_info->force_pause = 0; 2311 } else { 2312 bp->link_info->auto_pause = 0; 2313 bp->link_info->force_pause = 2314 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2315 } 2316 break; 2317 case RTE_FC_TX_PAUSE: 2318 if (fc_conf->autoneg) { 2319 bp->link_info->auto_pause = 2320 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2321 bp->link_info->force_pause = 0; 2322 } else { 2323 bp->link_info->auto_pause = 0; 2324 bp->link_info->force_pause = 2325 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2326 } 2327 break; 2328 case RTE_FC_FULL: 2329 if (fc_conf->autoneg) { 2330 bp->link_info->auto_pause = 2331 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2332 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2333 bp->link_info->force_pause = 0; 2334 } else { 2335 bp->link_info->auto_pause = 0; 2336 bp->link_info->force_pause = 2337 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2338 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2339 } 2340 break; 2341 } 2342 return bnxt_set_hwrm_link_config(bp, true); 2343 } 2344 2345 /* Add UDP tunneling port */ 2346 static int 2347 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2348 struct rte_eth_udp_tunnel *udp_tunnel) 2349 { 2350 struct bnxt *bp = eth_dev->data->dev_private; 2351 uint16_t tunnel_type = 0; 2352 int rc = 0; 2353 2354 rc = is_bnxt_in_error(bp); 2355 if (rc) 2356 return rc; 2357 2358 switch (udp_tunnel->prot_type) { 2359 case RTE_TUNNEL_TYPE_VXLAN: 2360 if (bp->vxlan_port_cnt) { 2361 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2362 udp_tunnel->udp_port); 2363 if (bp->vxlan_port != udp_tunnel->udp_port) { 2364 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2365 return -ENOSPC; 2366 } 2367 bp->vxlan_port_cnt++; 2368 return 0; 2369 } 2370 tunnel_type = 2371 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2372 break; 2373 case RTE_TUNNEL_TYPE_GENEVE: 2374 if (bp->geneve_port_cnt) { 2375 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2376 udp_tunnel->udp_port); 2377 if (bp->geneve_port != udp_tunnel->udp_port) { 2378 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2379 return -ENOSPC; 2380 } 2381 bp->geneve_port_cnt++; 2382 return 0; 2383 } 2384 tunnel_type = 2385 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2386 break; 2387 default: 2388 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2389 return -ENOTSUP; 2390 } 2391 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2392 tunnel_type); 2393 2394 if (rc != 0) 2395 return rc; 2396 2397 if (tunnel_type == 2398 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN) 2399 bp->vxlan_port_cnt++; 2400 2401 if (tunnel_type == 2402 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE) 2403 bp->geneve_port_cnt++; 2404 2405 return rc; 2406 } 2407 2408 static int 2409 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2410 struct rte_eth_udp_tunnel *udp_tunnel) 2411 { 2412 struct bnxt *bp = eth_dev->data->dev_private; 2413 uint16_t tunnel_type = 0; 2414 uint16_t port = 0; 2415 int rc = 0; 2416 2417 rc = is_bnxt_in_error(bp); 2418 if (rc) 2419 return rc; 2420 2421 switch (udp_tunnel->prot_type) { 2422 case RTE_TUNNEL_TYPE_VXLAN: 2423 if (!bp->vxlan_port_cnt) { 2424 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2425 return -EINVAL; 2426 } 2427 if (bp->vxlan_port != udp_tunnel->udp_port) { 2428 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2429 udp_tunnel->udp_port, bp->vxlan_port); 2430 return -EINVAL; 2431 } 2432 if (--bp->vxlan_port_cnt) 2433 return 0; 2434 2435 tunnel_type = 2436 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2437 port = bp->vxlan_fw_dst_port_id; 2438 break; 2439 case RTE_TUNNEL_TYPE_GENEVE: 2440 if (!bp->geneve_port_cnt) { 2441 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2442 return -EINVAL; 2443 } 2444 if (bp->geneve_port != udp_tunnel->udp_port) { 2445 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2446 udp_tunnel->udp_port, bp->geneve_port); 2447 return -EINVAL; 2448 } 2449 if (--bp->geneve_port_cnt) 2450 return 0; 2451 2452 tunnel_type = 2453 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2454 port = bp->geneve_fw_dst_port_id; 2455 break; 2456 default: 2457 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2458 return -ENOTSUP; 2459 } 2460 2461 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2462 return rc; 2463 } 2464 2465 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2466 { 2467 struct bnxt_filter_info *filter; 2468 struct bnxt_vnic_info *vnic; 2469 int rc = 0; 2470 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2471 2472 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2473 filter = STAILQ_FIRST(&vnic->filter); 2474 while (filter) { 2475 /* Search for this matching MAC+VLAN filter */ 2476 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2477 /* Delete the filter */ 2478 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2479 if (rc) 2480 return rc; 2481 STAILQ_REMOVE(&vnic->filter, filter, 2482 bnxt_filter_info, next); 2483 bnxt_free_filter(bp, filter); 2484 PMD_DRV_LOG(INFO, 2485 "Deleted vlan filter for %d\n", 2486 vlan_id); 2487 return 0; 2488 } 2489 filter = STAILQ_NEXT(filter, next); 2490 } 2491 return -ENOENT; 2492 } 2493 2494 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2495 { 2496 struct bnxt_filter_info *filter; 2497 struct bnxt_vnic_info *vnic; 2498 int rc = 0; 2499 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2500 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2501 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2502 2503 /* Implementation notes on the use of VNIC in this command: 2504 * 2505 * By default, these filters belong to default vnic for the function. 2506 * Once these filters are set up, only destination VNIC can be modified. 2507 * If the destination VNIC is not specified in this command, 2508 * then the HWRM shall only create an l2 context id. 2509 */ 2510 2511 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2512 filter = STAILQ_FIRST(&vnic->filter); 2513 /* Check if the VLAN has already been added */ 2514 while (filter) { 2515 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2516 return -EEXIST; 2517 2518 filter = STAILQ_NEXT(filter, next); 2519 } 2520 2521 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2522 * command to create MAC+VLAN filter with the right flags, enables set. 2523 */ 2524 filter = bnxt_alloc_filter(bp); 2525 if (!filter) { 2526 PMD_DRV_LOG(ERR, 2527 "MAC/VLAN filter alloc failed\n"); 2528 return -ENOMEM; 2529 } 2530 /* MAC + VLAN ID filter */ 2531 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2532 * untagged packets are received 2533 * 2534 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2535 * packets and only the programmed vlan's packets are received 2536 */ 2537 filter->l2_ivlan = vlan_id; 2538 filter->l2_ivlan_mask = 0x0FFF; 2539 filter->enables |= en; 2540 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2541 2542 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2543 if (rc) { 2544 /* Free the newly allocated filter as we were 2545 * not able to create the filter in hardware. 2546 */ 2547 bnxt_free_filter(bp, filter); 2548 return rc; 2549 } 2550 2551 filter->mac_index = 0; 2552 /* Add this new filter to the list */ 2553 if (vlan_id == 0) 2554 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2555 else 2556 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2557 2558 PMD_DRV_LOG(INFO, 2559 "Added Vlan filter for %d\n", vlan_id); 2560 return rc; 2561 } 2562 2563 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2564 uint16_t vlan_id, int on) 2565 { 2566 struct bnxt *bp = eth_dev->data->dev_private; 2567 int rc; 2568 2569 rc = is_bnxt_in_error(bp); 2570 if (rc) 2571 return rc; 2572 2573 if (!eth_dev->data->dev_started) { 2574 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2575 return -EINVAL; 2576 } 2577 2578 /* These operations apply to ALL existing MAC/VLAN filters */ 2579 if (on) 2580 return bnxt_add_vlan_filter(bp, vlan_id); 2581 else 2582 return bnxt_del_vlan_filter(bp, vlan_id); 2583 } 2584 2585 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2586 struct bnxt_vnic_info *vnic) 2587 { 2588 struct bnxt_filter_info *filter; 2589 int rc; 2590 2591 filter = STAILQ_FIRST(&vnic->filter); 2592 while (filter) { 2593 if (filter->mac_index == 0 && 2594 !memcmp(filter->l2_addr, bp->mac_addr, 2595 RTE_ETHER_ADDR_LEN)) { 2596 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2597 if (!rc) { 2598 STAILQ_REMOVE(&vnic->filter, filter, 2599 bnxt_filter_info, next); 2600 bnxt_free_filter(bp, filter); 2601 } 2602 return rc; 2603 } 2604 filter = STAILQ_NEXT(filter, next); 2605 } 2606 return 0; 2607 } 2608 2609 static int 2610 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2611 { 2612 struct bnxt_vnic_info *vnic; 2613 unsigned int i; 2614 int rc; 2615 2616 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2617 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2618 /* Remove any VLAN filters programmed */ 2619 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2620 bnxt_del_vlan_filter(bp, i); 2621 2622 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2623 if (rc) 2624 return rc; 2625 } else { 2626 /* Default filter will allow packets that match the 2627 * dest mac. So, it has to be deleted, otherwise, we 2628 * will endup receiving vlan packets for which the 2629 * filter is not programmed, when hw-vlan-filter 2630 * configuration is ON 2631 */ 2632 bnxt_del_dflt_mac_filter(bp, vnic); 2633 /* This filter will allow only untagged packets */ 2634 bnxt_add_vlan_filter(bp, 0); 2635 } 2636 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2637 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2638 2639 return 0; 2640 } 2641 2642 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2643 { 2644 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2645 unsigned int i; 2646 int rc; 2647 2648 /* Destroy vnic filters and vnic */ 2649 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2650 DEV_RX_OFFLOAD_VLAN_FILTER) { 2651 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2652 bnxt_del_vlan_filter(bp, i); 2653 } 2654 bnxt_del_dflt_mac_filter(bp, vnic); 2655 2656 rc = bnxt_hwrm_vnic_ctx_free(bp, vnic); 2657 if (rc) 2658 return rc; 2659 2660 rc = bnxt_hwrm_vnic_free(bp, vnic); 2661 if (rc) 2662 return rc; 2663 2664 rte_free(vnic->fw_grp_ids); 2665 vnic->fw_grp_ids = NULL; 2666 2667 vnic->rx_queue_cnt = 0; 2668 2669 return 0; 2670 } 2671 2672 static int 2673 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2674 { 2675 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2676 int rc; 2677 2678 /* Destroy, recreate and reconfigure the default vnic */ 2679 rc = bnxt_free_one_vnic(bp, 0); 2680 if (rc) 2681 return rc; 2682 2683 /* default vnic 0 */ 2684 rc = bnxt_setup_one_vnic(bp, 0); 2685 if (rc) 2686 return rc; 2687 2688 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2689 DEV_RX_OFFLOAD_VLAN_FILTER) { 2690 rc = bnxt_add_vlan_filter(bp, 0); 2691 if (rc) 2692 return rc; 2693 rc = bnxt_restore_vlan_filters(bp); 2694 if (rc) 2695 return rc; 2696 } else { 2697 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2698 if (rc) 2699 return rc; 2700 } 2701 2702 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2703 if (rc) 2704 return rc; 2705 2706 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2707 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2708 2709 return rc; 2710 } 2711 2712 static int 2713 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2714 { 2715 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2716 struct bnxt *bp = dev->data->dev_private; 2717 int rc; 2718 2719 rc = is_bnxt_in_error(bp); 2720 if (rc) 2721 return rc; 2722 2723 /* Filter settings will get applied when port is started */ 2724 if (!dev->data->dev_started) 2725 return 0; 2726 2727 if (mask & ETH_VLAN_FILTER_MASK) { 2728 /* Enable or disable VLAN filtering */ 2729 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2730 if (rc) 2731 return rc; 2732 } 2733 2734 if (mask & ETH_VLAN_STRIP_MASK) { 2735 /* Enable or disable VLAN stripping */ 2736 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2737 if (rc) 2738 return rc; 2739 } 2740 2741 if (mask & ETH_VLAN_EXTEND_MASK) { 2742 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2743 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2744 else 2745 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2746 } 2747 2748 return 0; 2749 } 2750 2751 static int 2752 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2753 uint16_t tpid) 2754 { 2755 struct bnxt *bp = dev->data->dev_private; 2756 int qinq = dev->data->dev_conf.rxmode.offloads & 2757 DEV_RX_OFFLOAD_VLAN_EXTEND; 2758 2759 if (vlan_type != ETH_VLAN_TYPE_INNER && 2760 vlan_type != ETH_VLAN_TYPE_OUTER) { 2761 PMD_DRV_LOG(ERR, 2762 "Unsupported vlan type."); 2763 return -EINVAL; 2764 } 2765 if (!qinq) { 2766 PMD_DRV_LOG(ERR, 2767 "QinQ not enabled. Needs to be ON as we can " 2768 "accelerate only outer vlan\n"); 2769 return -EINVAL; 2770 } 2771 2772 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2773 switch (tpid) { 2774 case RTE_ETHER_TYPE_QINQ: 2775 bp->outer_tpid_bd = 2776 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2777 break; 2778 case RTE_ETHER_TYPE_VLAN: 2779 bp->outer_tpid_bd = 2780 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2781 break; 2782 case RTE_ETHER_TYPE_QINQ1: 2783 bp->outer_tpid_bd = 2784 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2785 break; 2786 case RTE_ETHER_TYPE_QINQ2: 2787 bp->outer_tpid_bd = 2788 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2789 break; 2790 case RTE_ETHER_TYPE_QINQ3: 2791 bp->outer_tpid_bd = 2792 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2793 break; 2794 default: 2795 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2796 return -EINVAL; 2797 } 2798 bp->outer_tpid_bd |= tpid; 2799 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2800 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2801 PMD_DRV_LOG(ERR, 2802 "Can accelerate only outer vlan in QinQ\n"); 2803 return -EINVAL; 2804 } 2805 2806 return 0; 2807 } 2808 2809 static int 2810 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2811 struct rte_ether_addr *addr) 2812 { 2813 struct bnxt *bp = dev->data->dev_private; 2814 /* Default Filter is tied to VNIC 0 */ 2815 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2816 int rc; 2817 2818 rc = is_bnxt_in_error(bp); 2819 if (rc) 2820 return rc; 2821 2822 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2823 return -EPERM; 2824 2825 if (rte_is_zero_ether_addr(addr)) 2826 return -EINVAL; 2827 2828 /* Filter settings will get applied when port is started */ 2829 if (!dev->data->dev_started) 2830 return 0; 2831 2832 /* Check if the requested MAC is already added */ 2833 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2834 return 0; 2835 2836 /* Destroy filter and re-create it */ 2837 bnxt_del_dflt_mac_filter(bp, vnic); 2838 2839 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2840 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2841 /* This filter will allow only untagged packets */ 2842 rc = bnxt_add_vlan_filter(bp, 0); 2843 } else { 2844 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2845 } 2846 2847 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2848 return rc; 2849 } 2850 2851 static int 2852 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2853 struct rte_ether_addr *mc_addr_set, 2854 uint32_t nb_mc_addr) 2855 { 2856 struct bnxt *bp = eth_dev->data->dev_private; 2857 char *mc_addr_list = (char *)mc_addr_set; 2858 struct bnxt_vnic_info *vnic; 2859 uint32_t off = 0, i = 0; 2860 int rc; 2861 2862 rc = is_bnxt_in_error(bp); 2863 if (rc) 2864 return rc; 2865 2866 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2867 2868 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2869 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2870 goto allmulti; 2871 } 2872 2873 /* TODO Check for Duplicate mcast addresses */ 2874 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2875 for (i = 0; i < nb_mc_addr; i++) { 2876 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2877 RTE_ETHER_ADDR_LEN); 2878 off += RTE_ETHER_ADDR_LEN; 2879 } 2880 2881 vnic->mc_addr_cnt = i; 2882 if (vnic->mc_addr_cnt) 2883 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2884 else 2885 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2886 2887 allmulti: 2888 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2889 } 2890 2891 static int 2892 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2893 { 2894 struct bnxt *bp = dev->data->dev_private; 2895 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2896 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2897 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2898 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2899 int ret; 2900 2901 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2902 fw_major, fw_minor, fw_updt, fw_rsvd); 2903 if (ret < 0) 2904 return -EINVAL; 2905 2906 ret += 1; /* add the size of '\0' */ 2907 if (fw_size < (size_t)ret) 2908 return ret; 2909 else 2910 return 0; 2911 } 2912 2913 static void 2914 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2915 struct rte_eth_rxq_info *qinfo) 2916 { 2917 struct bnxt *bp = dev->data->dev_private; 2918 struct bnxt_rx_queue *rxq; 2919 2920 if (is_bnxt_in_error(bp)) 2921 return; 2922 2923 rxq = dev->data->rx_queues[queue_id]; 2924 2925 qinfo->mp = rxq->mb_pool; 2926 qinfo->scattered_rx = dev->data->scattered_rx; 2927 qinfo->nb_desc = rxq->nb_rx_desc; 2928 2929 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2930 qinfo->conf.rx_drop_en = rxq->drop_en; 2931 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2932 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2933 } 2934 2935 static void 2936 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2937 struct rte_eth_txq_info *qinfo) 2938 { 2939 struct bnxt *bp = dev->data->dev_private; 2940 struct bnxt_tx_queue *txq; 2941 2942 if (is_bnxt_in_error(bp)) 2943 return; 2944 2945 txq = dev->data->tx_queues[queue_id]; 2946 2947 qinfo->nb_desc = txq->nb_tx_desc; 2948 2949 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2950 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2951 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2952 2953 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2954 qinfo->conf.tx_rs_thresh = 0; 2955 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2956 qinfo->conf.offloads = txq->offloads; 2957 } 2958 2959 static const struct { 2960 eth_rx_burst_t pkt_burst; 2961 const char *info; 2962 } bnxt_rx_burst_info[] = { 2963 {bnxt_recv_pkts, "Scalar"}, 2964 #if defined(RTE_ARCH_X86) 2965 {bnxt_recv_pkts_vec, "Vector SSE"}, 2966 #endif 2967 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 2968 {bnxt_recv_pkts_vec_avx2, "Vector AVX2"}, 2969 #endif 2970 #if defined(RTE_ARCH_ARM64) 2971 {bnxt_recv_pkts_vec, "Vector Neon"}, 2972 #endif 2973 }; 2974 2975 static int 2976 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2977 struct rte_eth_burst_mode *mode) 2978 { 2979 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2980 size_t i; 2981 2982 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2983 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2984 snprintf(mode->info, sizeof(mode->info), "%s", 2985 bnxt_rx_burst_info[i].info); 2986 return 0; 2987 } 2988 } 2989 2990 return -EINVAL; 2991 } 2992 2993 static const struct { 2994 eth_tx_burst_t pkt_burst; 2995 const char *info; 2996 } bnxt_tx_burst_info[] = { 2997 {bnxt_xmit_pkts, "Scalar"}, 2998 #if defined(RTE_ARCH_X86) 2999 {bnxt_xmit_pkts_vec, "Vector SSE"}, 3000 #endif 3001 #if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT) 3002 {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"}, 3003 #endif 3004 #if defined(RTE_ARCH_ARM64) 3005 {bnxt_xmit_pkts_vec, "Vector Neon"}, 3006 #endif 3007 }; 3008 3009 static int 3010 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3011 struct rte_eth_burst_mode *mode) 3012 { 3013 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 3014 size_t i; 3015 3016 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 3017 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 3018 snprintf(mode->info, sizeof(mode->info), "%s", 3019 bnxt_tx_burst_info[i].info); 3020 return 0; 3021 } 3022 } 3023 3024 return -EINVAL; 3025 } 3026 3027 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 3028 { 3029 struct bnxt *bp = eth_dev->data->dev_private; 3030 uint32_t new_pkt_size; 3031 uint32_t rc = 0; 3032 uint32_t i; 3033 3034 rc = is_bnxt_in_error(bp); 3035 if (rc) 3036 return rc; 3037 3038 /* Exit if receive queues are not configured yet */ 3039 if (!eth_dev->data->nb_rx_queues) 3040 return rc; 3041 3042 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 3043 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 3044 3045 /* 3046 * Disallow any MTU change that would require scattered receive support 3047 * if it is not already enabled. 3048 */ 3049 if (eth_dev->data->dev_started && 3050 !eth_dev->data->scattered_rx && 3051 (new_pkt_size > 3052 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 3053 PMD_DRV_LOG(ERR, 3054 "MTU change would require scattered rx support. "); 3055 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 3056 return -EINVAL; 3057 } 3058 3059 if (new_mtu > RTE_ETHER_MTU) { 3060 bp->flags |= BNXT_FLAG_JUMBO; 3061 bp->eth_dev->data->dev_conf.rxmode.offloads |= 3062 DEV_RX_OFFLOAD_JUMBO_FRAME; 3063 } else { 3064 bp->eth_dev->data->dev_conf.rxmode.offloads &= 3065 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 3066 bp->flags &= ~BNXT_FLAG_JUMBO; 3067 } 3068 3069 /* Is there a change in mtu setting? */ 3070 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 3071 return rc; 3072 3073 for (i = 0; i < bp->nr_vnics; i++) { 3074 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 3075 uint16_t size = 0; 3076 3077 vnic->mru = BNXT_VNIC_MRU(new_mtu); 3078 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 3079 if (rc) 3080 break; 3081 3082 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 3083 size -= RTE_PKTMBUF_HEADROOM; 3084 3085 if (size < new_mtu) { 3086 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 3087 if (rc) 3088 return rc; 3089 } 3090 } 3091 3092 if (!rc) 3093 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 3094 3095 if (bnxt_hwrm_config_host_mtu(bp)) 3096 PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n"); 3097 3098 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 3099 3100 return rc; 3101 } 3102 3103 static int 3104 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 3105 { 3106 struct bnxt *bp = dev->data->dev_private; 3107 uint16_t vlan = bp->vlan; 3108 int rc; 3109 3110 rc = is_bnxt_in_error(bp); 3111 if (rc) 3112 return rc; 3113 3114 if (!BNXT_SINGLE_PF(bp)) { 3115 PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n"); 3116 return -ENOTSUP; 3117 } 3118 bp->vlan = on ? pvid : 0; 3119 3120 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 3121 if (rc) 3122 bp->vlan = vlan; 3123 return rc; 3124 } 3125 3126 static int 3127 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 3128 { 3129 struct bnxt *bp = dev->data->dev_private; 3130 int rc; 3131 3132 rc = is_bnxt_in_error(bp); 3133 if (rc) 3134 return rc; 3135 3136 return bnxt_hwrm_port_led_cfg(bp, true); 3137 } 3138 3139 static int 3140 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 3141 { 3142 struct bnxt *bp = dev->data->dev_private; 3143 int rc; 3144 3145 rc = is_bnxt_in_error(bp); 3146 if (rc) 3147 return rc; 3148 3149 return bnxt_hwrm_port_led_cfg(bp, false); 3150 } 3151 3152 static uint32_t 3153 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 3154 { 3155 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 3156 struct bnxt_cp_ring_info *cpr; 3157 uint32_t desc = 0, raw_cons, cp_ring_size; 3158 struct bnxt_rx_queue *rxq; 3159 struct rx_pkt_cmpl *rxcmp; 3160 int rc; 3161 3162 rc = is_bnxt_in_error(bp); 3163 if (rc) 3164 return rc; 3165 3166 rxq = dev->data->rx_queues[rx_queue_id]; 3167 cpr = rxq->cp_ring; 3168 raw_cons = cpr->cp_raw_cons; 3169 cp_ring_size = cpr->cp_ring_struct->ring_size; 3170 3171 while (1) { 3172 uint32_t agg_cnt, cons, cmpl_type; 3173 3174 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3175 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3176 3177 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3178 break; 3179 3180 cmpl_type = CMP_TYPE(rxcmp); 3181 3182 switch (cmpl_type) { 3183 case CMPL_BASE_TYPE_RX_L2: 3184 case CMPL_BASE_TYPE_RX_L2_V2: 3185 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3186 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3187 desc++; 3188 break; 3189 3190 case CMPL_BASE_TYPE_RX_TPA_END: 3191 if (BNXT_CHIP_P5(rxq->bp)) { 3192 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3193 3194 p5_tpa_end = (void *)rxcmp; 3195 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3196 } else { 3197 struct rx_tpa_end_cmpl *tpa_end; 3198 3199 tpa_end = (void *)rxcmp; 3200 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3201 } 3202 3203 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3204 desc++; 3205 break; 3206 3207 default: 3208 raw_cons += CMP_LEN(cmpl_type); 3209 } 3210 } 3211 3212 return desc; 3213 } 3214 3215 static int 3216 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 3217 { 3218 struct bnxt_rx_queue *rxq = rx_queue; 3219 struct bnxt_cp_ring_info *cpr; 3220 struct bnxt_rx_ring_info *rxr; 3221 uint32_t desc, raw_cons, cp_ring_size; 3222 struct bnxt *bp = rxq->bp; 3223 struct rx_pkt_cmpl *rxcmp; 3224 int rc; 3225 3226 rc = is_bnxt_in_error(bp); 3227 if (rc) 3228 return rc; 3229 3230 if (offset >= rxq->nb_rx_desc) 3231 return -EINVAL; 3232 3233 rxr = rxq->rx_ring; 3234 cpr = rxq->cp_ring; 3235 cp_ring_size = cpr->cp_ring_struct->ring_size; 3236 3237 /* 3238 * For the vector receive case, the completion at the requested 3239 * offset can be indexed directly. 3240 */ 3241 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 3242 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) { 3243 struct rx_pkt_cmpl *rxcmp; 3244 uint32_t cons; 3245 3246 /* Check status of completion descriptor. */ 3247 raw_cons = cpr->cp_raw_cons + 3248 offset * CMP_LEN(CMPL_BASE_TYPE_RX_L2); 3249 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3250 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3251 3252 if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3253 return RTE_ETH_RX_DESC_DONE; 3254 3255 /* Check whether rx desc has an mbuf attached. */ 3256 cons = RING_CMP(rxr->rx_ring_struct, raw_cons / 2); 3257 if (cons >= rxq->rxrearm_start && 3258 cons < rxq->rxrearm_start + rxq->rxrearm_nb) { 3259 return RTE_ETH_RX_DESC_UNAVAIL; 3260 } 3261 3262 return RTE_ETH_RX_DESC_AVAIL; 3263 } 3264 #endif 3265 3266 /* 3267 * For the non-vector receive case, scan the completion ring to 3268 * locate the completion descriptor for the requested offset. 3269 */ 3270 raw_cons = cpr->cp_raw_cons; 3271 desc = 0; 3272 while (1) { 3273 uint32_t agg_cnt, cons, cmpl_type; 3274 3275 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 3276 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 3277 3278 if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size)) 3279 break; 3280 3281 cmpl_type = CMP_TYPE(rxcmp); 3282 3283 switch (cmpl_type) { 3284 case CMPL_BASE_TYPE_RX_L2: 3285 case CMPL_BASE_TYPE_RX_L2_V2: 3286 if (desc == offset) { 3287 cons = rxcmp->opaque; 3288 if (rxr->rx_buf_ring[cons]) 3289 return RTE_ETH_RX_DESC_DONE; 3290 else 3291 return RTE_ETH_RX_DESC_UNAVAIL; 3292 } 3293 agg_cnt = BNXT_RX_L2_AGG_BUFS(rxcmp); 3294 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3295 desc++; 3296 break; 3297 3298 case CMPL_BASE_TYPE_RX_TPA_END: 3299 if (desc == offset) 3300 return RTE_ETH_RX_DESC_DONE; 3301 3302 if (BNXT_CHIP_P5(rxq->bp)) { 3303 struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end; 3304 3305 p5_tpa_end = (void *)rxcmp; 3306 agg_cnt = BNXT_TPA_END_AGG_BUFS_TH(p5_tpa_end); 3307 } else { 3308 struct rx_tpa_end_cmpl *tpa_end; 3309 3310 tpa_end = (void *)rxcmp; 3311 agg_cnt = BNXT_TPA_END_AGG_BUFS(tpa_end); 3312 } 3313 3314 raw_cons = raw_cons + CMP_LEN(cmpl_type) + agg_cnt; 3315 desc++; 3316 break; 3317 3318 default: 3319 raw_cons += CMP_LEN(cmpl_type); 3320 } 3321 } 3322 3323 return RTE_ETH_RX_DESC_AVAIL; 3324 } 3325 3326 static int 3327 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 3328 { 3329 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 3330 struct bnxt_cp_ring_info *cpr = txq->cp_ring; 3331 uint32_t ring_mask, raw_cons, nb_tx_pkts = 0; 3332 struct cmpl_base *cp_desc_ring; 3333 int rc; 3334 3335 rc = is_bnxt_in_error(txq->bp); 3336 if (rc) 3337 return rc; 3338 3339 if (offset >= txq->nb_tx_desc) 3340 return -EINVAL; 3341 3342 /* Return "desc done" if descriptor is available for use. */ 3343 if (bnxt_tx_bds_in_hw(txq) <= offset) 3344 return RTE_ETH_TX_DESC_DONE; 3345 3346 raw_cons = cpr->cp_raw_cons; 3347 cp_desc_ring = cpr->cp_desc_ring; 3348 ring_mask = cpr->cp_ring_struct->ring_mask; 3349 3350 /* Check to see if hw has posted a completion for the descriptor. */ 3351 while (1) { 3352 struct tx_cmpl *txcmp; 3353 uint32_t cons; 3354 3355 cons = RING_CMPL(ring_mask, raw_cons); 3356 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; 3357 3358 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1)) 3359 break; 3360 3361 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) 3362 nb_tx_pkts += rte_le_to_cpu_32(txcmp->opaque); 3363 3364 if (nb_tx_pkts > offset) 3365 return RTE_ETH_TX_DESC_DONE; 3366 3367 raw_cons = NEXT_RAW_CMP(raw_cons); 3368 } 3369 3370 /* Descriptor is pending transmit, not yet completed by hardware. */ 3371 return RTE_ETH_TX_DESC_FULL; 3372 } 3373 3374 int 3375 bnxt_flow_ops_get_op(struct rte_eth_dev *dev, 3376 const struct rte_flow_ops **ops) 3377 { 3378 struct bnxt *bp = dev->data->dev_private; 3379 int ret = 0; 3380 3381 if (!bp) 3382 return -EIO; 3383 3384 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3385 struct bnxt_representor *vfr = dev->data->dev_private; 3386 bp = vfr->parent_dev->data->dev_private; 3387 /* parent is deleted while children are still valid */ 3388 if (!bp) { 3389 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error\n", 3390 dev->data->port_id); 3391 return -EIO; 3392 } 3393 } 3394 3395 ret = is_bnxt_in_error(bp); 3396 if (ret) 3397 return ret; 3398 3399 /* PMD supports thread-safe flow operations. rte_flow API 3400 * functions can avoid mutex for multi-thread safety. 3401 */ 3402 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3403 3404 if (BNXT_TRUFLOW_EN(bp)) 3405 *ops = &bnxt_ulp_rte_flow_ops; 3406 else 3407 *ops = &bnxt_flow_ops; 3408 3409 return ret; 3410 } 3411 3412 static const uint32_t * 3413 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3414 { 3415 static const uint32_t ptypes[] = { 3416 RTE_PTYPE_L2_ETHER_VLAN, 3417 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3418 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3419 RTE_PTYPE_L4_ICMP, 3420 RTE_PTYPE_L4_TCP, 3421 RTE_PTYPE_L4_UDP, 3422 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3423 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3424 RTE_PTYPE_INNER_L4_ICMP, 3425 RTE_PTYPE_INNER_L4_TCP, 3426 RTE_PTYPE_INNER_L4_UDP, 3427 RTE_PTYPE_UNKNOWN 3428 }; 3429 3430 if (!dev->rx_pkt_burst) 3431 return NULL; 3432 3433 return ptypes; 3434 } 3435 3436 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3437 int reg_win) 3438 { 3439 uint32_t reg_base = *reg_arr & 0xfffff000; 3440 uint32_t win_off; 3441 int i; 3442 3443 for (i = 0; i < count; i++) { 3444 if ((reg_arr[i] & 0xfffff000) != reg_base) 3445 return -ERANGE; 3446 } 3447 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3448 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3449 return 0; 3450 } 3451 3452 static int bnxt_map_ptp_regs(struct bnxt *bp) 3453 { 3454 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3455 uint32_t *reg_arr; 3456 int rc, i; 3457 3458 reg_arr = ptp->rx_regs; 3459 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3460 if (rc) 3461 return rc; 3462 3463 reg_arr = ptp->tx_regs; 3464 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3465 if (rc) 3466 return rc; 3467 3468 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3469 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3470 3471 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3472 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3473 3474 return 0; 3475 } 3476 3477 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3478 { 3479 rte_write32(0, (uint8_t *)bp->bar0 + 3480 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3481 rte_write32(0, (uint8_t *)bp->bar0 + 3482 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3483 } 3484 3485 static uint64_t bnxt_cc_read(struct bnxt *bp) 3486 { 3487 uint64_t ns; 3488 3489 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3490 BNXT_GRCPF_REG_SYNC_TIME)); 3491 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3492 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3493 return ns; 3494 } 3495 3496 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3497 { 3498 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3499 uint32_t fifo; 3500 3501 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3502 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3503 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3504 return -EAGAIN; 3505 3506 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3507 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3508 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3509 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3510 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3511 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3512 rte_read32((uint8_t *)bp->bar0 + ptp->tx_mapped_regs[BNXT_PTP_TX_SEQ]); 3513 3514 return 0; 3515 } 3516 3517 static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts) 3518 { 3519 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3520 struct bnxt_pf_info *pf = bp->pf; 3521 uint16_t port_id; 3522 int i = 0; 3523 uint32_t fifo; 3524 3525 if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5)) 3526 return -EINVAL; 3527 3528 port_id = pf->port_id; 3529 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3530 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3531 while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) { 3532 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3533 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3534 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3535 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3536 *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3537 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3538 *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3539 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3540 i++; 3541 } 3542 3543 if (i >= BNXT_PTP_RX_PND_CNT) 3544 return -EBUSY; 3545 3546 return 0; 3547 } 3548 3549 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3550 { 3551 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3552 struct bnxt_pf_info *pf = bp->pf; 3553 uint16_t port_id; 3554 uint32_t fifo; 3555 3556 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3557 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3558 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3559 return -EAGAIN; 3560 3561 port_id = pf->port_id; 3562 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3563 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3564 3565 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3566 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3567 if (fifo & BNXT_PTP_RX_FIFO_PENDING) 3568 return bnxt_clr_rx_ts(bp, ts); 3569 3570 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3571 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3572 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3573 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3574 3575 return 0; 3576 } 3577 3578 static int 3579 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3580 { 3581 uint64_t ns; 3582 struct bnxt *bp = dev->data->dev_private; 3583 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3584 3585 if (!ptp) 3586 return -ENOTSUP; 3587 3588 ns = rte_timespec_to_ns(ts); 3589 /* Set the timecounters to a new value. */ 3590 ptp->tc.nsec = ns; 3591 ptp->tx_tstamp_tc.nsec = ns; 3592 ptp->rx_tstamp_tc.nsec = ns; 3593 3594 return 0; 3595 } 3596 3597 static int 3598 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3599 { 3600 struct bnxt *bp = dev->data->dev_private; 3601 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3602 uint64_t ns, systime_cycles = 0; 3603 int rc = 0; 3604 3605 if (!ptp) 3606 return -ENOTSUP; 3607 3608 if (BNXT_CHIP_P5(bp)) 3609 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3610 &systime_cycles); 3611 else 3612 systime_cycles = bnxt_cc_read(bp); 3613 3614 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3615 *ts = rte_ns_to_timespec(ns); 3616 3617 return rc; 3618 } 3619 static int 3620 bnxt_timesync_enable(struct rte_eth_dev *dev) 3621 { 3622 struct bnxt *bp = dev->data->dev_private; 3623 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3624 uint32_t shift = 0; 3625 int rc; 3626 3627 if (!ptp) 3628 return -ENOTSUP; 3629 3630 ptp->rx_filter = 1; 3631 ptp->tx_tstamp_en = 1; 3632 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3633 3634 rc = bnxt_hwrm_ptp_cfg(bp); 3635 if (rc) 3636 return rc; 3637 3638 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3639 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3640 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3641 3642 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3643 ptp->tc.cc_shift = shift; 3644 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3645 3646 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3647 ptp->rx_tstamp_tc.cc_shift = shift; 3648 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3649 3650 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3651 ptp->tx_tstamp_tc.cc_shift = shift; 3652 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3653 3654 if (!BNXT_CHIP_P5(bp)) 3655 bnxt_map_ptp_regs(bp); 3656 else 3657 rc = bnxt_ptp_start(bp); 3658 3659 return rc; 3660 } 3661 3662 static int 3663 bnxt_timesync_disable(struct rte_eth_dev *dev) 3664 { 3665 struct bnxt *bp = dev->data->dev_private; 3666 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3667 3668 if (!ptp) 3669 return -ENOTSUP; 3670 3671 ptp->rx_filter = 0; 3672 ptp->tx_tstamp_en = 0; 3673 ptp->rxctl = 0; 3674 3675 bnxt_hwrm_ptp_cfg(bp); 3676 3677 if (!BNXT_CHIP_P5(bp)) 3678 bnxt_unmap_ptp_regs(bp); 3679 else 3680 bnxt_ptp_stop(bp); 3681 3682 return 0; 3683 } 3684 3685 static int 3686 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3687 struct timespec *timestamp, 3688 uint32_t flags __rte_unused) 3689 { 3690 struct bnxt *bp = dev->data->dev_private; 3691 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3692 uint64_t rx_tstamp_cycles = 0; 3693 uint64_t ns; 3694 3695 if (!ptp) 3696 return -ENOTSUP; 3697 3698 if (BNXT_CHIP_P5(bp)) 3699 rx_tstamp_cycles = ptp->rx_timestamp; 3700 else 3701 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3702 3703 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3704 *timestamp = rte_ns_to_timespec(ns); 3705 return 0; 3706 } 3707 3708 static int 3709 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3710 struct timespec *timestamp) 3711 { 3712 struct bnxt *bp = dev->data->dev_private; 3713 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3714 uint64_t tx_tstamp_cycles = 0; 3715 uint64_t ns; 3716 int rc = 0; 3717 3718 if (!ptp) 3719 return -ENOTSUP; 3720 3721 if (BNXT_CHIP_P5(bp)) 3722 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3723 &tx_tstamp_cycles); 3724 else 3725 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3726 3727 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3728 *timestamp = rte_ns_to_timespec(ns); 3729 3730 return rc; 3731 } 3732 3733 static int 3734 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3735 { 3736 struct bnxt *bp = dev->data->dev_private; 3737 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3738 3739 if (!ptp) 3740 return -ENOTSUP; 3741 3742 ptp->tc.nsec += delta; 3743 ptp->tx_tstamp_tc.nsec += delta; 3744 ptp->rx_tstamp_tc.nsec += delta; 3745 3746 return 0; 3747 } 3748 3749 static int 3750 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3751 { 3752 struct bnxt *bp = dev->data->dev_private; 3753 int rc; 3754 uint32_t dir_entries; 3755 uint32_t entry_length; 3756 3757 rc = is_bnxt_in_error(bp); 3758 if (rc) 3759 return rc; 3760 3761 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3762 bp->pdev->addr.domain, bp->pdev->addr.bus, 3763 bp->pdev->addr.devid, bp->pdev->addr.function); 3764 3765 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3766 if (rc != 0) 3767 return rc; 3768 3769 return dir_entries * entry_length; 3770 } 3771 3772 static int 3773 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3774 struct rte_dev_eeprom_info *in_eeprom) 3775 { 3776 struct bnxt *bp = dev->data->dev_private; 3777 uint32_t index; 3778 uint32_t offset; 3779 int rc; 3780 3781 rc = is_bnxt_in_error(bp); 3782 if (rc) 3783 return rc; 3784 3785 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3786 bp->pdev->addr.domain, bp->pdev->addr.bus, 3787 bp->pdev->addr.devid, bp->pdev->addr.function, 3788 in_eeprom->offset, in_eeprom->length); 3789 3790 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3791 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3792 in_eeprom->data); 3793 3794 index = in_eeprom->offset >> 24; 3795 offset = in_eeprom->offset & 0xffffff; 3796 3797 if (index != 0) 3798 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3799 in_eeprom->length, in_eeprom->data); 3800 3801 return 0; 3802 } 3803 3804 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3805 { 3806 switch (dir_type) { 3807 case BNX_DIR_TYPE_CHIMP_PATCH: 3808 case BNX_DIR_TYPE_BOOTCODE: 3809 case BNX_DIR_TYPE_BOOTCODE_2: 3810 case BNX_DIR_TYPE_APE_FW: 3811 case BNX_DIR_TYPE_APE_PATCH: 3812 case BNX_DIR_TYPE_KONG_FW: 3813 case BNX_DIR_TYPE_KONG_PATCH: 3814 case BNX_DIR_TYPE_BONO_FW: 3815 case BNX_DIR_TYPE_BONO_PATCH: 3816 /* FALLTHROUGH */ 3817 return true; 3818 } 3819 3820 return false; 3821 } 3822 3823 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3824 { 3825 switch (dir_type) { 3826 case BNX_DIR_TYPE_AVS: 3827 case BNX_DIR_TYPE_EXP_ROM_MBA: 3828 case BNX_DIR_TYPE_PCIE: 3829 case BNX_DIR_TYPE_TSCF_UCODE: 3830 case BNX_DIR_TYPE_EXT_PHY: 3831 case BNX_DIR_TYPE_CCM: 3832 case BNX_DIR_TYPE_ISCSI_BOOT: 3833 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3834 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3835 /* FALLTHROUGH */ 3836 return true; 3837 } 3838 3839 return false; 3840 } 3841 3842 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3843 { 3844 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3845 bnxt_dir_type_is_other_exec_format(dir_type); 3846 } 3847 3848 static int 3849 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3850 struct rte_dev_eeprom_info *in_eeprom) 3851 { 3852 struct bnxt *bp = dev->data->dev_private; 3853 uint8_t index, dir_op; 3854 uint16_t type, ext, ordinal, attr; 3855 int rc; 3856 3857 rc = is_bnxt_in_error(bp); 3858 if (rc) 3859 return rc; 3860 3861 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3862 bp->pdev->addr.domain, bp->pdev->addr.bus, 3863 bp->pdev->addr.devid, bp->pdev->addr.function, 3864 in_eeprom->offset, in_eeprom->length); 3865 3866 if (!BNXT_PF(bp)) { 3867 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3868 return -EINVAL; 3869 } 3870 3871 type = in_eeprom->magic >> 16; 3872 3873 if (type == 0xffff) { /* special value for directory operations */ 3874 index = in_eeprom->magic & 0xff; 3875 dir_op = in_eeprom->magic >> 8; 3876 if (index == 0) 3877 return -EINVAL; 3878 switch (dir_op) { 3879 case 0x0e: /* erase */ 3880 if (in_eeprom->offset != ~in_eeprom->magic) 3881 return -EINVAL; 3882 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3883 default: 3884 return -EINVAL; 3885 } 3886 } 3887 3888 /* Create or re-write an NVM item: */ 3889 if (bnxt_dir_type_is_executable(type) == true) 3890 return -EOPNOTSUPP; 3891 ext = in_eeprom->magic & 0xffff; 3892 ordinal = in_eeprom->offset >> 16; 3893 attr = in_eeprom->offset & 0xffff; 3894 3895 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3896 in_eeprom->data, in_eeprom->length); 3897 } 3898 3899 static int bnxt_get_module_info(struct rte_eth_dev *dev, 3900 struct rte_eth_dev_module_info *modinfo) 3901 { 3902 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3903 struct bnxt *bp = dev->data->dev_private; 3904 int rc; 3905 3906 /* No point in going further if phy status indicates 3907 * module is not inserted or if it is powered down or 3908 * if it is of type 10GBase-T 3909 */ 3910 if (bp->link_info->module_status > 3911 HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG) { 3912 PMD_DRV_LOG(NOTICE, "Port %u : Module is not inserted or is powered down\n", 3913 dev->data->port_id); 3914 return -ENOTSUP; 3915 } 3916 3917 /* This feature is not supported in older firmware versions */ 3918 if (bp->hwrm_spec_code < 0x10202) { 3919 PMD_DRV_LOG(NOTICE, "Port %u : Feature is not supported in older firmware\n", 3920 dev->data->port_id); 3921 return -ENOTSUP; 3922 } 3923 3924 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3925 SFF_DIAG_SUPPORT_OFFSET + 1, 3926 module_info); 3927 3928 if (rc) 3929 return rc; 3930 3931 switch (module_info[0]) { 3932 case SFF_MODULE_ID_SFP: 3933 modinfo->type = RTE_ETH_MODULE_SFF_8472; 3934 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 3935 if (module_info[SFF_DIAG_SUPPORT_OFFSET] == 0) 3936 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3937 break; 3938 case SFF_MODULE_ID_QSFP: 3939 case SFF_MODULE_ID_QSFP_PLUS: 3940 modinfo->type = RTE_ETH_MODULE_SFF_8436; 3941 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8436_LEN; 3942 break; 3943 case SFF_MODULE_ID_QSFP28: 3944 modinfo->type = RTE_ETH_MODULE_SFF_8636; 3945 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; 3946 if (module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) 3947 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_LEN; 3948 break; 3949 default: 3950 PMD_DRV_LOG(NOTICE, "Port %u : Unsupported module\n", dev->data->port_id); 3951 return -ENOTSUP; 3952 } 3953 3954 PMD_DRV_LOG(INFO, "Port %u : modinfo->type = %d modinfo->eeprom_len = %d\n", 3955 dev->data->port_id, modinfo->type, modinfo->eeprom_len); 3956 3957 return 0; 3958 } 3959 3960 static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, 3961 struct rte_dev_eeprom_info *info) 3962 { 3963 uint8_t pg_addr[5] = { I2C_DEV_ADDR_A0, I2C_DEV_ADDR_A0 }; 3964 uint32_t offset = info->offset, length = info->length; 3965 uint8_t module_info[SFF_DIAG_SUPPORT_OFFSET + 1]; 3966 struct bnxt *bp = dev->data->dev_private; 3967 uint8_t *data = info->data; 3968 uint8_t page = offset >> 7; 3969 uint8_t max_pages = 2; 3970 uint8_t opt_pages; 3971 int rc; 3972 3973 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 3974 SFF_DIAG_SUPPORT_OFFSET + 1, 3975 module_info); 3976 if (rc) 3977 return rc; 3978 3979 switch (module_info[0]) { 3980 case SFF_MODULE_ID_SFP: 3981 module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; 3982 if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { 3983 pg_addr[2] = I2C_DEV_ADDR_A2; 3984 pg_addr[3] = I2C_DEV_ADDR_A2; 3985 max_pages = 4; 3986 } 3987 break; 3988 case SFF_MODULE_ID_QSFP28: 3989 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 3990 SFF8636_OPT_PAGES_OFFSET, 3991 1, &opt_pages); 3992 if (rc) 3993 return rc; 3994 3995 if (opt_pages & SFF8636_PAGE1_MASK) { 3996 pg_addr[2] = I2C_DEV_ADDR_A0; 3997 max_pages = 3; 3998 } 3999 if (opt_pages & SFF8636_PAGE2_MASK) { 4000 pg_addr[3] = I2C_DEV_ADDR_A0; 4001 max_pages = 4; 4002 } 4003 if (~module_info[SFF8636_FLATMEM_OFFSET] & SFF8636_FLATMEM_MASK) { 4004 pg_addr[4] = I2C_DEV_ADDR_A0; 4005 max_pages = 5; 4006 } 4007 break; 4008 default: 4009 break; 4010 } 4011 4012 memset(data, 0, length); 4013 4014 offset &= 0xff; 4015 while (length && page < max_pages) { 4016 uint8_t raw_page = page ? page - 1 : 0; 4017 uint16_t chunk; 4018 4019 if (pg_addr[page] == I2C_DEV_ADDR_A2) 4020 raw_page = 0; 4021 else if (page) 4022 offset |= 0x80; 4023 chunk = RTE_MIN(length, 256 - offset); 4024 4025 if (pg_addr[page]) { 4026 rc = bnxt_hwrm_read_sfp_module_eeprom_info(bp, pg_addr[page], 4027 raw_page, offset, 4028 chunk, data); 4029 if (rc) 4030 return rc; 4031 } 4032 4033 data += chunk; 4034 length -= chunk; 4035 offset = 0; 4036 page += 1 + (chunk > 128); 4037 } 4038 4039 return length ? -EINVAL : 0; 4040 } 4041 4042 /* 4043 * Initialization 4044 */ 4045 4046 static const struct eth_dev_ops bnxt_dev_ops = { 4047 .dev_infos_get = bnxt_dev_info_get_op, 4048 .dev_close = bnxt_dev_close_op, 4049 .dev_configure = bnxt_dev_configure_op, 4050 .dev_start = bnxt_dev_start_op, 4051 .dev_stop = bnxt_dev_stop_op, 4052 .dev_set_link_up = bnxt_dev_set_link_up_op, 4053 .dev_set_link_down = bnxt_dev_set_link_down_op, 4054 .stats_get = bnxt_stats_get_op, 4055 .stats_reset = bnxt_stats_reset_op, 4056 .rx_queue_setup = bnxt_rx_queue_setup_op, 4057 .rx_queue_release = bnxt_rx_queue_release_op, 4058 .tx_queue_setup = bnxt_tx_queue_setup_op, 4059 .tx_queue_release = bnxt_tx_queue_release_op, 4060 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 4061 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 4062 .reta_update = bnxt_reta_update_op, 4063 .reta_query = bnxt_reta_query_op, 4064 .rss_hash_update = bnxt_rss_hash_update_op, 4065 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 4066 .link_update = bnxt_link_update_op, 4067 .promiscuous_enable = bnxt_promiscuous_enable_op, 4068 .promiscuous_disable = bnxt_promiscuous_disable_op, 4069 .allmulticast_enable = bnxt_allmulticast_enable_op, 4070 .allmulticast_disable = bnxt_allmulticast_disable_op, 4071 .mac_addr_add = bnxt_mac_addr_add_op, 4072 .mac_addr_remove = bnxt_mac_addr_remove_op, 4073 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 4074 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 4075 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 4076 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 4077 .vlan_filter_set = bnxt_vlan_filter_set_op, 4078 .vlan_offload_set = bnxt_vlan_offload_set_op, 4079 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 4080 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 4081 .mtu_set = bnxt_mtu_set_op, 4082 .mac_addr_set = bnxt_set_default_mac_addr_op, 4083 .xstats_get = bnxt_dev_xstats_get_op, 4084 .xstats_get_names = bnxt_dev_xstats_get_names_op, 4085 .xstats_reset = bnxt_dev_xstats_reset_op, 4086 .fw_version_get = bnxt_fw_version_get, 4087 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 4088 .rxq_info_get = bnxt_rxq_info_get_op, 4089 .txq_info_get = bnxt_txq_info_get_op, 4090 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 4091 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 4092 .dev_led_on = bnxt_dev_led_on_op, 4093 .dev_led_off = bnxt_dev_led_off_op, 4094 .rx_queue_start = bnxt_rx_queue_start, 4095 .rx_queue_stop = bnxt_rx_queue_stop, 4096 .tx_queue_start = bnxt_tx_queue_start, 4097 .tx_queue_stop = bnxt_tx_queue_stop, 4098 .flow_ops_get = bnxt_flow_ops_get_op, 4099 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 4100 .get_eeprom_length = bnxt_get_eeprom_length_op, 4101 .get_eeprom = bnxt_get_eeprom_op, 4102 .set_eeprom = bnxt_set_eeprom_op, 4103 .get_module_info = bnxt_get_module_info, 4104 .get_module_eeprom = bnxt_get_module_eeprom, 4105 .timesync_enable = bnxt_timesync_enable, 4106 .timesync_disable = bnxt_timesync_disable, 4107 .timesync_read_time = bnxt_timesync_read_time, 4108 .timesync_write_time = bnxt_timesync_write_time, 4109 .timesync_adjust_time = bnxt_timesync_adjust_time, 4110 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 4111 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 4112 }; 4113 4114 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 4115 { 4116 uint32_t offset; 4117 4118 /* Only pre-map the reset GRC registers using window 3 */ 4119 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 4120 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 4121 4122 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 4123 4124 return offset; 4125 } 4126 4127 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 4128 { 4129 struct bnxt_error_recovery_info *info = bp->recovery_info; 4130 uint32_t reg_base = 0xffffffff; 4131 int i; 4132 4133 /* Only pre-map the monitoring GRC registers using window 2 */ 4134 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 4135 uint32_t reg = info->status_regs[i]; 4136 4137 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 4138 continue; 4139 4140 if (reg_base == 0xffffffff) 4141 reg_base = reg & 0xfffff000; 4142 if ((reg & 0xfffff000) != reg_base) 4143 return -ERANGE; 4144 4145 /* Use mask 0xffc as the Lower 2 bits indicates 4146 * address space location 4147 */ 4148 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 4149 (reg & 0xffc); 4150 } 4151 4152 if (reg_base == 0xffffffff) 4153 return 0; 4154 4155 rte_write32(reg_base, (uint8_t *)bp->bar0 + 4156 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4157 4158 return 0; 4159 } 4160 4161 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 4162 { 4163 struct bnxt_error_recovery_info *info = bp->recovery_info; 4164 uint32_t delay = info->delay_after_reset[index]; 4165 uint32_t val = info->reset_reg_val[index]; 4166 uint32_t reg = info->reset_reg[index]; 4167 uint32_t type, offset; 4168 int ret; 4169 4170 type = BNXT_FW_STATUS_REG_TYPE(reg); 4171 offset = BNXT_FW_STATUS_REG_OFF(reg); 4172 4173 switch (type) { 4174 case BNXT_FW_STATUS_REG_TYPE_CFG: 4175 ret = rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 4176 if (ret < 0) { 4177 PMD_DRV_LOG(ERR, "Failed to write %#x at PCI offset %#x", 4178 val, offset); 4179 return; 4180 } 4181 break; 4182 case BNXT_FW_STATUS_REG_TYPE_GRC: 4183 offset = bnxt_map_reset_regs(bp, offset); 4184 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4185 break; 4186 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4187 rte_write32(val, (uint8_t *)bp->bar0 + offset); 4188 break; 4189 } 4190 /* wait on a specific interval of time until core reset is complete */ 4191 if (delay) 4192 rte_delay_ms(delay); 4193 } 4194 4195 static void bnxt_dev_cleanup(struct bnxt *bp) 4196 { 4197 bp->eth_dev->data->dev_link.link_status = 0; 4198 bp->link_info->link_up = 0; 4199 if (bp->eth_dev->data->dev_started) 4200 bnxt_dev_stop(bp->eth_dev); 4201 4202 bnxt_uninit_resources(bp, true); 4203 } 4204 4205 static int 4206 bnxt_check_fw_reset_done(struct bnxt *bp) 4207 { 4208 int timeout = bp->fw_reset_max_msecs; 4209 uint16_t val = 0; 4210 int rc; 4211 4212 do { 4213 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4214 if (rc < 0) { 4215 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4216 return rc; 4217 } 4218 if (val != 0xffff) 4219 break; 4220 rte_delay_ms(1); 4221 } while (timeout--); 4222 4223 if (val == 0xffff) { 4224 PMD_DRV_LOG(ERR, "Firmware reset aborted, PCI config space invalid\n"); 4225 return -1; 4226 } 4227 4228 return 0; 4229 } 4230 4231 static int bnxt_restore_vlan_filters(struct bnxt *bp) 4232 { 4233 struct rte_eth_dev *dev = bp->eth_dev; 4234 struct rte_vlan_filter_conf *vfc; 4235 int vidx, vbit, rc; 4236 uint16_t vlan_id; 4237 4238 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 4239 vfc = &dev->data->vlan_filter_conf; 4240 vidx = vlan_id / 64; 4241 vbit = vlan_id % 64; 4242 4243 /* Each bit corresponds to a VLAN id */ 4244 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 4245 rc = bnxt_add_vlan_filter(bp, vlan_id); 4246 if (rc) 4247 return rc; 4248 } 4249 } 4250 4251 return 0; 4252 } 4253 4254 static int bnxt_restore_mac_filters(struct bnxt *bp) 4255 { 4256 struct rte_eth_dev *dev = bp->eth_dev; 4257 struct rte_eth_dev_info dev_info; 4258 struct rte_ether_addr *addr; 4259 uint64_t pool_mask; 4260 uint32_t pool = 0; 4261 uint32_t i; 4262 int rc; 4263 4264 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 4265 return 0; 4266 4267 rc = bnxt_dev_info_get_op(dev, &dev_info); 4268 if (rc) 4269 return rc; 4270 4271 /* replay MAC address configuration */ 4272 for (i = 1; i < dev_info.max_mac_addrs; i++) { 4273 addr = &dev->data->mac_addrs[i]; 4274 4275 /* skip zero address */ 4276 if (rte_is_zero_ether_addr(addr)) 4277 continue; 4278 4279 pool = 0; 4280 pool_mask = dev->data->mac_pool_sel[i]; 4281 4282 do { 4283 if (pool_mask & 1ULL) { 4284 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 4285 if (rc) 4286 return rc; 4287 } 4288 pool_mask >>= 1; 4289 pool++; 4290 } while (pool_mask); 4291 } 4292 4293 return 0; 4294 } 4295 4296 static int bnxt_restore_filters(struct bnxt *bp) 4297 { 4298 struct rte_eth_dev *dev = bp->eth_dev; 4299 int ret = 0; 4300 4301 if (dev->data->all_multicast) { 4302 ret = bnxt_allmulticast_enable_op(dev); 4303 if (ret) 4304 return ret; 4305 } 4306 if (dev->data->promiscuous) { 4307 ret = bnxt_promiscuous_enable_op(dev); 4308 if (ret) 4309 return ret; 4310 } 4311 4312 ret = bnxt_restore_mac_filters(bp); 4313 if (ret) 4314 return ret; 4315 4316 ret = bnxt_restore_vlan_filters(bp); 4317 /* TODO restore other filters as well */ 4318 return ret; 4319 } 4320 4321 static int bnxt_check_fw_ready(struct bnxt *bp) 4322 { 4323 int timeout = bp->fw_reset_max_msecs; 4324 int rc = 0; 4325 4326 do { 4327 rc = bnxt_hwrm_poll_ver_get(bp); 4328 if (rc == 0) 4329 break; 4330 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 4331 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 4332 } while (rc && timeout > 0); 4333 4334 if (rc) 4335 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 4336 4337 return rc; 4338 } 4339 4340 static void bnxt_dev_recover(void *arg) 4341 { 4342 struct bnxt *bp = arg; 4343 int rc = 0; 4344 4345 pthread_mutex_lock(&bp->err_recovery_lock); 4346 4347 if (!bp->fw_reset_min_msecs) { 4348 rc = bnxt_check_fw_reset_done(bp); 4349 if (rc) 4350 goto err; 4351 } 4352 4353 /* Clear Error flag so that device re-init should happen */ 4354 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 4355 4356 rc = bnxt_check_fw_ready(bp); 4357 if (rc) 4358 goto err; 4359 4360 rc = bnxt_init_resources(bp, true); 4361 if (rc) { 4362 PMD_DRV_LOG(ERR, 4363 "Failed to initialize resources after reset\n"); 4364 goto err; 4365 } 4366 /* clear reset flag as the device is initialized now */ 4367 bp->flags &= ~BNXT_FLAG_FW_RESET; 4368 4369 rc = bnxt_dev_start_op(bp->eth_dev); 4370 if (rc) { 4371 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 4372 goto err_start; 4373 } 4374 4375 rc = bnxt_restore_filters(bp); 4376 if (rc) 4377 goto err_start; 4378 4379 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 4380 pthread_mutex_unlock(&bp->err_recovery_lock); 4381 4382 return; 4383 err_start: 4384 bnxt_dev_stop(bp->eth_dev); 4385 err: 4386 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4387 bnxt_uninit_resources(bp, false); 4388 if (bp->eth_dev->data->dev_conf.intr_conf.rmv) 4389 rte_eth_dev_callback_process(bp->eth_dev, 4390 RTE_ETH_EVENT_INTR_RMV, 4391 NULL); 4392 pthread_mutex_unlock(&bp->err_recovery_lock); 4393 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 4394 } 4395 4396 void bnxt_dev_reset_and_resume(void *arg) 4397 { 4398 struct bnxt *bp = arg; 4399 uint32_t us = US_PER_MS * bp->fw_reset_min_msecs; 4400 uint16_t val = 0; 4401 int rc; 4402 4403 bnxt_dev_cleanup(bp); 4404 4405 bnxt_wait_for_device_shutdown(bp); 4406 4407 /* During some fatal firmware error conditions, the PCI config space 4408 * register 0x2e which normally contains the subsystem ID will become 4409 * 0xffff. This register will revert back to the normal value after 4410 * the chip has completed core reset. If we detect this condition, 4411 * we can poll this config register immediately for the value to revert. 4412 */ 4413 if (bp->flags & BNXT_FLAG_FATAL_ERROR) { 4414 rc = rte_pci_read_config(bp->pdev, &val, sizeof(val), PCI_SUBSYSTEM_ID_OFFSET); 4415 if (rc < 0) { 4416 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", PCI_SUBSYSTEM_ID_OFFSET); 4417 return; 4418 } 4419 if (val == 0xffff) { 4420 bp->fw_reset_min_msecs = 0; 4421 us = 1; 4422 } 4423 } 4424 4425 rc = rte_eal_alarm_set(us, bnxt_dev_recover, (void *)bp); 4426 if (rc) 4427 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 4428 } 4429 4430 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 4431 { 4432 struct bnxt_error_recovery_info *info = bp->recovery_info; 4433 uint32_t reg = info->status_regs[index]; 4434 uint32_t type, offset, val = 0; 4435 int ret = 0; 4436 4437 type = BNXT_FW_STATUS_REG_TYPE(reg); 4438 offset = BNXT_FW_STATUS_REG_OFF(reg); 4439 4440 switch (type) { 4441 case BNXT_FW_STATUS_REG_TYPE_CFG: 4442 ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 4443 if (ret < 0) 4444 PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x", 4445 offset); 4446 break; 4447 case BNXT_FW_STATUS_REG_TYPE_GRC: 4448 offset = info->mapped_status_regs[index]; 4449 /* FALLTHROUGH */ 4450 case BNXT_FW_STATUS_REG_TYPE_BAR0: 4451 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4452 offset)); 4453 break; 4454 } 4455 4456 return val; 4457 } 4458 4459 static int bnxt_fw_reset_all(struct bnxt *bp) 4460 { 4461 struct bnxt_error_recovery_info *info = bp->recovery_info; 4462 uint32_t i; 4463 int rc = 0; 4464 4465 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4466 /* Reset through primary function driver */ 4467 for (i = 0; i < info->reg_array_cnt; i++) 4468 bnxt_write_fw_reset_reg(bp, i); 4469 /* Wait for time specified by FW after triggering reset */ 4470 rte_delay_ms(info->primary_func_wait_period_after_reset); 4471 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 4472 /* Reset with the help of Kong processor */ 4473 rc = bnxt_hwrm_fw_reset(bp); 4474 if (rc) 4475 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 4476 } 4477 4478 return rc; 4479 } 4480 4481 static void bnxt_fw_reset_cb(void *arg) 4482 { 4483 struct bnxt *bp = arg; 4484 struct bnxt_error_recovery_info *info = bp->recovery_info; 4485 int rc = 0; 4486 4487 /* Only Primary function can do FW reset */ 4488 if (bnxt_is_primary_func(bp) && 4489 bnxt_is_recovery_enabled(bp)) { 4490 rc = bnxt_fw_reset_all(bp); 4491 if (rc) { 4492 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 4493 return; 4494 } 4495 } 4496 4497 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 4498 * EXCEPTION_FATAL_ASYNC event to all the functions 4499 * (including MASTER FUNC). After receiving this Async, all the active 4500 * drivers should treat this case as FW initiated recovery 4501 */ 4502 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 4503 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 4504 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 4505 4506 /* To recover from error */ 4507 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 4508 (void *)bp); 4509 } 4510 } 4511 4512 /* Driver should poll FW heartbeat, reset_counter with the frequency 4513 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 4514 * When the driver detects heartbeat stop or change in reset_counter, 4515 * it has to trigger a reset to recover from the error condition. 4516 * A “primary function” is the function who will have the privilege to 4517 * initiate the chimp reset. The primary function will be elected by the 4518 * firmware and will be notified through async message. 4519 */ 4520 static void bnxt_check_fw_health(void *arg) 4521 { 4522 struct bnxt *bp = arg; 4523 struct bnxt_error_recovery_info *info = bp->recovery_info; 4524 uint32_t val = 0, wait_msec; 4525 4526 if (!info || !bnxt_is_recovery_enabled(bp) || 4527 is_bnxt_in_error(bp)) 4528 return; 4529 4530 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 4531 if (val == info->last_heart_beat) 4532 goto reset; 4533 4534 info->last_heart_beat = val; 4535 4536 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 4537 if (val != info->last_reset_counter) 4538 goto reset; 4539 4540 info->last_reset_counter = val; 4541 4542 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 4543 bnxt_check_fw_health, (void *)bp); 4544 4545 return; 4546 reset: 4547 /* Stop DMA to/from device */ 4548 bp->flags |= BNXT_FLAG_FATAL_ERROR; 4549 bp->flags |= BNXT_FLAG_FW_RESET; 4550 4551 bnxt_stop_rxtx(bp); 4552 4553 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 4554 4555 if (bnxt_is_primary_func(bp)) 4556 wait_msec = info->primary_func_wait_period; 4557 else 4558 wait_msec = info->normal_func_wait_period; 4559 4560 rte_eal_alarm_set(US_PER_MS * wait_msec, 4561 bnxt_fw_reset_cb, (void *)bp); 4562 } 4563 4564 void bnxt_schedule_fw_health_check(struct bnxt *bp) 4565 { 4566 uint32_t polling_freq; 4567 4568 pthread_mutex_lock(&bp->health_check_lock); 4569 4570 if (!bnxt_is_recovery_enabled(bp)) 4571 goto done; 4572 4573 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 4574 goto done; 4575 4576 polling_freq = bp->recovery_info->driver_polling_freq; 4577 4578 rte_eal_alarm_set(US_PER_MS * polling_freq, 4579 bnxt_check_fw_health, (void *)bp); 4580 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4581 4582 done: 4583 pthread_mutex_unlock(&bp->health_check_lock); 4584 } 4585 4586 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 4587 { 4588 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 4589 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 4590 } 4591 4592 static bool bnxt_vf_pciid(uint16_t device_id) 4593 { 4594 switch (device_id) { 4595 case BROADCOM_DEV_ID_57304_VF: 4596 case BROADCOM_DEV_ID_57406_VF: 4597 case BROADCOM_DEV_ID_5731X_VF: 4598 case BROADCOM_DEV_ID_5741X_VF: 4599 case BROADCOM_DEV_ID_57414_VF: 4600 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4601 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4602 case BROADCOM_DEV_ID_58802_VF: 4603 case BROADCOM_DEV_ID_57500_VF1: 4604 case BROADCOM_DEV_ID_57500_VF2: 4605 case BROADCOM_DEV_ID_58818_VF: 4606 /* FALLTHROUGH */ 4607 return true; 4608 default: 4609 return false; 4610 } 4611 } 4612 4613 /* Phase 5 device */ 4614 static bool bnxt_p5_device(uint16_t device_id) 4615 { 4616 switch (device_id) { 4617 case BROADCOM_DEV_ID_57508: 4618 case BROADCOM_DEV_ID_57504: 4619 case BROADCOM_DEV_ID_57502: 4620 case BROADCOM_DEV_ID_57508_MF1: 4621 case BROADCOM_DEV_ID_57504_MF1: 4622 case BROADCOM_DEV_ID_57502_MF1: 4623 case BROADCOM_DEV_ID_57508_MF2: 4624 case BROADCOM_DEV_ID_57504_MF2: 4625 case BROADCOM_DEV_ID_57502_MF2: 4626 case BROADCOM_DEV_ID_57500_VF1: 4627 case BROADCOM_DEV_ID_57500_VF2: 4628 case BROADCOM_DEV_ID_58812: 4629 case BROADCOM_DEV_ID_58814: 4630 case BROADCOM_DEV_ID_58818: 4631 case BROADCOM_DEV_ID_58818_VF: 4632 /* FALLTHROUGH */ 4633 return true; 4634 default: 4635 return false; 4636 } 4637 } 4638 4639 bool bnxt_stratus_device(struct bnxt *bp) 4640 { 4641 uint16_t device_id = bp->pdev->id.device_id; 4642 4643 switch (device_id) { 4644 case BROADCOM_DEV_ID_STRATUS_NIC: 4645 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4646 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4647 /* FALLTHROUGH */ 4648 return true; 4649 default: 4650 return false; 4651 } 4652 } 4653 4654 static int bnxt_map_pci_bars(struct rte_eth_dev *eth_dev) 4655 { 4656 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4657 struct bnxt *bp = eth_dev->data->dev_private; 4658 4659 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4660 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4661 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4662 if (!bp->bar0 || !bp->doorbell_base) { 4663 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4664 return -ENODEV; 4665 } 4666 4667 bp->eth_dev = eth_dev; 4668 bp->pdev = pci_dev; 4669 4670 return 0; 4671 } 4672 4673 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4674 struct bnxt_ctx_pg_info *ctx_pg, 4675 uint32_t mem_size, 4676 const char *suffix, 4677 uint16_t idx) 4678 { 4679 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4680 const struct rte_memzone *mz = NULL; 4681 char mz_name[RTE_MEMZONE_NAMESIZE]; 4682 rte_iova_t mz_phys_addr; 4683 uint64_t valid_bits = 0; 4684 uint32_t sz; 4685 int i; 4686 4687 if (!mem_size) 4688 return 0; 4689 4690 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4691 BNXT_PAGE_SIZE; 4692 rmem->page_size = BNXT_PAGE_SIZE; 4693 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4694 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4695 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4696 4697 valid_bits = PTU_PTE_VALID; 4698 4699 if (rmem->nr_pages > 1) { 4700 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4701 "bnxt_ctx_pg_tbl%s_%x_%d", 4702 suffix, idx, bp->eth_dev->data->port_id); 4703 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4704 mz = rte_memzone_lookup(mz_name); 4705 if (!mz) { 4706 mz = rte_memzone_reserve_aligned(mz_name, 4707 rmem->nr_pages * 8, 4708 bp->eth_dev->device->numa_node, 4709 RTE_MEMZONE_2MB | 4710 RTE_MEMZONE_SIZE_HINT_ONLY | 4711 RTE_MEMZONE_IOVA_CONTIG, 4712 BNXT_PAGE_SIZE); 4713 if (mz == NULL) 4714 return -ENOMEM; 4715 } 4716 4717 memset(mz->addr, 0, mz->len); 4718 mz_phys_addr = mz->iova; 4719 4720 rmem->pg_tbl = mz->addr; 4721 rmem->pg_tbl_map = mz_phys_addr; 4722 rmem->pg_tbl_mz = mz; 4723 } 4724 4725 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4726 suffix, idx, bp->eth_dev->data->port_id); 4727 mz = rte_memzone_lookup(mz_name); 4728 if (!mz) { 4729 mz = rte_memzone_reserve_aligned(mz_name, 4730 mem_size, 4731 bp->eth_dev->device->numa_node, 4732 RTE_MEMZONE_1GB | 4733 RTE_MEMZONE_SIZE_HINT_ONLY | 4734 RTE_MEMZONE_IOVA_CONTIG, 4735 BNXT_PAGE_SIZE); 4736 if (mz == NULL) 4737 return -ENOMEM; 4738 } 4739 4740 memset(mz->addr, 0, mz->len); 4741 mz_phys_addr = mz->iova; 4742 4743 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4744 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4745 rmem->dma_arr[i] = mz_phys_addr + sz; 4746 4747 if (rmem->nr_pages > 1) { 4748 if (i == rmem->nr_pages - 2 && 4749 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4750 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4751 else if (i == rmem->nr_pages - 1 && 4752 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4753 valid_bits |= PTU_PTE_LAST; 4754 4755 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4756 valid_bits); 4757 } 4758 } 4759 4760 rmem->mz = mz; 4761 if (rmem->vmem_size) 4762 rmem->vmem = (void **)mz->addr; 4763 rmem->dma_arr[0] = mz_phys_addr; 4764 return 0; 4765 } 4766 4767 static void bnxt_free_ctx_mem(struct bnxt *bp) 4768 { 4769 int i; 4770 4771 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4772 return; 4773 4774 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4775 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4776 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4777 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4778 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4779 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4780 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4781 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4782 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4783 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4784 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4785 4786 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4787 if (bp->ctx->tqm_mem[i]) 4788 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4789 } 4790 4791 rte_free(bp->ctx); 4792 bp->ctx = NULL; 4793 } 4794 4795 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4796 4797 #define min_t(type, x, y) ({ \ 4798 type __min1 = (x); \ 4799 type __min2 = (y); \ 4800 __min1 < __min2 ? __min1 : __min2; }) 4801 4802 #define max_t(type, x, y) ({ \ 4803 type __max1 = (x); \ 4804 type __max2 = (y); \ 4805 __max1 > __max2 ? __max1 : __max2; }) 4806 4807 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4808 4809 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4810 { 4811 struct bnxt_ctx_pg_info *ctx_pg; 4812 struct bnxt_ctx_mem_info *ctx; 4813 uint32_t mem_size, ena, entries; 4814 uint32_t entries_sp, min; 4815 int i, rc; 4816 4817 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4818 if (rc) { 4819 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4820 return rc; 4821 } 4822 ctx = bp->ctx; 4823 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4824 return 0; 4825 4826 ctx_pg = &ctx->qp_mem; 4827 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4828 if (ctx->qp_entry_size) { 4829 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4830 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4831 if (rc) 4832 return rc; 4833 } 4834 4835 ctx_pg = &ctx->srq_mem; 4836 ctx_pg->entries = ctx->srq_max_l2_entries; 4837 if (ctx->srq_entry_size) { 4838 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4839 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4840 if (rc) 4841 return rc; 4842 } 4843 4844 ctx_pg = &ctx->cq_mem; 4845 ctx_pg->entries = ctx->cq_max_l2_entries; 4846 if (ctx->cq_entry_size) { 4847 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4848 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4849 if (rc) 4850 return rc; 4851 } 4852 4853 ctx_pg = &ctx->vnic_mem; 4854 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4855 ctx->vnic_max_ring_table_entries; 4856 if (ctx->vnic_entry_size) { 4857 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4858 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4859 if (rc) 4860 return rc; 4861 } 4862 4863 ctx_pg = &ctx->stat_mem; 4864 ctx_pg->entries = ctx->stat_max_entries; 4865 if (ctx->stat_entry_size) { 4866 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4867 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4868 if (rc) 4869 return rc; 4870 } 4871 4872 min = ctx->tqm_min_entries_per_ring; 4873 4874 entries_sp = ctx->qp_max_l2_entries + 4875 ctx->vnic_max_vnic_entries + 4876 2 * ctx->qp_min_qp1_entries + min; 4877 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4878 4879 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4880 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4881 entries = clamp_t(uint32_t, entries, min, 4882 ctx->tqm_max_entries_per_ring); 4883 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4884 /* i=0 is for TQM_SP. i=1 to i=8 applies to RING0 to RING7. 4885 * i > 8 is other ext rings. 4886 */ 4887 ctx_pg = ctx->tqm_mem[i]; 4888 ctx_pg->entries = i ? entries : entries_sp; 4889 if (ctx->tqm_entry_size) { 4890 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4891 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 4892 "tqm_mem", i); 4893 if (rc) 4894 return rc; 4895 } 4896 if (i < BNXT_MAX_TQM_LEGACY_RINGS) 4897 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4898 else 4899 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8; 4900 } 4901 4902 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4903 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4904 if (rc) 4905 PMD_DRV_LOG(ERR, 4906 "Failed to configure context mem: rc = %d\n", rc); 4907 else 4908 ctx->flags |= BNXT_CTX_FLAG_INITED; 4909 4910 return rc; 4911 } 4912 4913 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4914 { 4915 struct rte_pci_device *pci_dev = bp->pdev; 4916 char mz_name[RTE_MEMZONE_NAMESIZE]; 4917 const struct rte_memzone *mz = NULL; 4918 uint32_t total_alloc_len; 4919 rte_iova_t mz_phys_addr; 4920 4921 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4922 return 0; 4923 4924 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4925 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4926 pci_dev->addr.bus, pci_dev->addr.devid, 4927 pci_dev->addr.function, "rx_port_stats"); 4928 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4929 mz = rte_memzone_lookup(mz_name); 4930 total_alloc_len = 4931 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4932 sizeof(struct rx_port_stats_ext) + 512); 4933 if (!mz) { 4934 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4935 SOCKET_ID_ANY, 4936 RTE_MEMZONE_2MB | 4937 RTE_MEMZONE_SIZE_HINT_ONLY | 4938 RTE_MEMZONE_IOVA_CONTIG); 4939 if (mz == NULL) 4940 return -ENOMEM; 4941 } 4942 memset(mz->addr, 0, mz->len); 4943 mz_phys_addr = mz->iova; 4944 4945 bp->rx_mem_zone = (const void *)mz; 4946 bp->hw_rx_port_stats = mz->addr; 4947 bp->hw_rx_port_stats_map = mz_phys_addr; 4948 4949 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4950 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4951 pci_dev->addr.bus, pci_dev->addr.devid, 4952 pci_dev->addr.function, "tx_port_stats"); 4953 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4954 mz = rte_memzone_lookup(mz_name); 4955 total_alloc_len = 4956 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4957 sizeof(struct tx_port_stats_ext) + 512); 4958 if (!mz) { 4959 mz = rte_memzone_reserve(mz_name, 4960 total_alloc_len, 4961 SOCKET_ID_ANY, 4962 RTE_MEMZONE_2MB | 4963 RTE_MEMZONE_SIZE_HINT_ONLY | 4964 RTE_MEMZONE_IOVA_CONTIG); 4965 if (mz == NULL) 4966 return -ENOMEM; 4967 } 4968 memset(mz->addr, 0, mz->len); 4969 mz_phys_addr = mz->iova; 4970 4971 bp->tx_mem_zone = (const void *)mz; 4972 bp->hw_tx_port_stats = mz->addr; 4973 bp->hw_tx_port_stats_map = mz_phys_addr; 4974 bp->flags |= BNXT_FLAG_PORT_STATS; 4975 4976 /* Display extended statistics if FW supports it */ 4977 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4978 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4979 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4980 return 0; 4981 4982 bp->hw_rx_port_stats_ext = (void *) 4983 ((uint8_t *)bp->hw_rx_port_stats + 4984 sizeof(struct rx_port_stats)); 4985 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4986 sizeof(struct rx_port_stats); 4987 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4988 4989 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4990 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4991 bp->hw_tx_port_stats_ext = (void *) 4992 ((uint8_t *)bp->hw_tx_port_stats + 4993 sizeof(struct tx_port_stats)); 4994 bp->hw_tx_port_stats_ext_map = 4995 bp->hw_tx_port_stats_map + 4996 sizeof(struct tx_port_stats); 4997 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4998 } 4999 5000 return 0; 5001 } 5002 5003 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 5004 { 5005 struct bnxt *bp = eth_dev->data->dev_private; 5006 int rc = 0; 5007 5008 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 5009 RTE_ETHER_ADDR_LEN * 5010 bp->max_l2_ctx, 5011 0); 5012 if (eth_dev->data->mac_addrs == NULL) { 5013 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 5014 return -ENOMEM; 5015 } 5016 5017 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 5018 if (BNXT_PF(bp)) 5019 return -EINVAL; 5020 5021 /* Generate a random MAC address, if none was assigned by PF */ 5022 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 5023 bnxt_eth_hw_addr_random(bp->mac_addr); 5024 PMD_DRV_LOG(INFO, 5025 "Assign random MAC:" RTE_ETHER_ADDR_PRT_FMT "\n", 5026 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 5027 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 5028 5029 rc = bnxt_hwrm_set_mac(bp); 5030 if (rc) 5031 return rc; 5032 } 5033 5034 /* Copy the permanent MAC from the FUNC_QCAPS response */ 5035 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 5036 5037 return rc; 5038 } 5039 5040 static int bnxt_restore_dflt_mac(struct bnxt *bp) 5041 { 5042 int rc = 0; 5043 5044 /* MAC is already configured in FW */ 5045 if (BNXT_HAS_DFLT_MAC_SET(bp)) 5046 return 0; 5047 5048 /* Restore the old MAC configured */ 5049 rc = bnxt_hwrm_set_mac(bp); 5050 if (rc) 5051 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 5052 5053 return rc; 5054 } 5055 5056 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 5057 { 5058 if (!BNXT_PF(bp)) 5059 return; 5060 5061 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 5062 5063 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 5064 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 5065 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 5066 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 5067 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 5068 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 5069 } 5070 5071 struct bnxt * 5072 bnxt_get_bp(uint16_t port) 5073 { 5074 struct bnxt *bp; 5075 struct rte_eth_dev *dev; 5076 5077 if (!rte_eth_dev_is_valid_port(port)) { 5078 PMD_DRV_LOG(ERR, "Invalid port %d\n", port); 5079 return NULL; 5080 } 5081 5082 dev = &rte_eth_devices[port]; 5083 if (!is_bnxt_supported(dev)) { 5084 PMD_DRV_LOG(ERR, "Device %d not supported\n", port); 5085 return NULL; 5086 } 5087 5088 bp = (struct bnxt *)dev->data->dev_private; 5089 if (!BNXT_TRUFLOW_EN(bp)) { 5090 PMD_DRV_LOG(ERR, "TRUFLOW not enabled\n"); 5091 return NULL; 5092 } 5093 5094 return bp; 5095 } 5096 5097 uint16_t 5098 bnxt_get_svif(uint16_t port_id, bool func_svif, 5099 enum bnxt_ulp_intf_type type) 5100 { 5101 struct rte_eth_dev *eth_dev; 5102 struct bnxt *bp; 5103 5104 eth_dev = &rte_eth_devices[port_id]; 5105 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5106 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5107 if (!vfr) 5108 return 0; 5109 5110 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5111 return vfr->svif; 5112 5113 eth_dev = vfr->parent_dev; 5114 } 5115 5116 bp = eth_dev->data->dev_private; 5117 5118 return func_svif ? bp->func_svif : bp->port_svif; 5119 } 5120 5121 void 5122 bnxt_get_iface_mac(uint16_t port, enum bnxt_ulp_intf_type type, 5123 uint8_t *mac, uint8_t *parent_mac) 5124 { 5125 struct rte_eth_dev *eth_dev; 5126 struct bnxt *bp; 5127 5128 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF && 5129 type != BNXT_ULP_INTF_TYPE_PF) 5130 return; 5131 5132 eth_dev = &rte_eth_devices[port]; 5133 bp = eth_dev->data->dev_private; 5134 memcpy(mac, bp->mac_addr, RTE_ETHER_ADDR_LEN); 5135 5136 if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5137 memcpy(parent_mac, bp->parent->mac_addr, RTE_ETHER_ADDR_LEN); 5138 } 5139 5140 uint16_t 5141 bnxt_get_parent_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5142 { 5143 struct rte_eth_dev *eth_dev; 5144 struct bnxt *bp; 5145 5146 if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) 5147 return 0; 5148 5149 eth_dev = &rte_eth_devices[port]; 5150 bp = eth_dev->data->dev_private; 5151 5152 return bp->parent->vnic; 5153 } 5154 uint16_t 5155 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 5156 { 5157 struct rte_eth_dev *eth_dev; 5158 struct bnxt_vnic_info *vnic; 5159 struct bnxt *bp; 5160 5161 eth_dev = &rte_eth_devices[port]; 5162 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5163 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5164 if (!vfr) 5165 return 0; 5166 5167 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5168 return vfr->dflt_vnic_id; 5169 5170 eth_dev = vfr->parent_dev; 5171 } 5172 5173 bp = eth_dev->data->dev_private; 5174 5175 vnic = BNXT_GET_DEFAULT_VNIC(bp); 5176 5177 return vnic->fw_vnic_id; 5178 } 5179 5180 uint16_t 5181 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 5182 { 5183 struct rte_eth_dev *eth_dev; 5184 struct bnxt *bp; 5185 5186 eth_dev = &rte_eth_devices[port]; 5187 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5188 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5189 if (!vfr) 5190 return 0; 5191 5192 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5193 return vfr->fw_fid; 5194 5195 eth_dev = vfr->parent_dev; 5196 } 5197 5198 bp = eth_dev->data->dev_private; 5199 5200 return bp->fw_fid; 5201 } 5202 5203 enum bnxt_ulp_intf_type 5204 bnxt_get_interface_type(uint16_t port) 5205 { 5206 struct rte_eth_dev *eth_dev; 5207 struct bnxt *bp; 5208 5209 eth_dev = &rte_eth_devices[port]; 5210 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 5211 return BNXT_ULP_INTF_TYPE_VF_REP; 5212 5213 bp = eth_dev->data->dev_private; 5214 if (BNXT_PF(bp)) 5215 return BNXT_ULP_INTF_TYPE_PF; 5216 else if (BNXT_VF_IS_TRUSTED(bp)) 5217 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 5218 else if (BNXT_VF(bp)) 5219 return BNXT_ULP_INTF_TYPE_VF; 5220 5221 return BNXT_ULP_INTF_TYPE_INVALID; 5222 } 5223 5224 uint16_t 5225 bnxt_get_phy_port_id(uint16_t port_id) 5226 { 5227 struct bnxt_representor *vfr; 5228 struct rte_eth_dev *eth_dev; 5229 struct bnxt *bp; 5230 5231 eth_dev = &rte_eth_devices[port_id]; 5232 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5233 vfr = eth_dev->data->dev_private; 5234 if (!vfr) 5235 return 0; 5236 5237 eth_dev = vfr->parent_dev; 5238 } 5239 5240 bp = eth_dev->data->dev_private; 5241 5242 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 5243 } 5244 5245 uint16_t 5246 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 5247 { 5248 struct rte_eth_dev *eth_dev; 5249 struct bnxt *bp; 5250 5251 eth_dev = &rte_eth_devices[port_id]; 5252 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 5253 struct bnxt_representor *vfr = eth_dev->data->dev_private; 5254 if (!vfr) 5255 return 0; 5256 5257 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 5258 return vfr->fw_fid - 1; 5259 5260 eth_dev = vfr->parent_dev; 5261 } 5262 5263 bp = eth_dev->data->dev_private; 5264 5265 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 5266 } 5267 5268 uint16_t 5269 bnxt_get_vport(uint16_t port_id) 5270 { 5271 return (1 << bnxt_get_phy_port_id(port_id)); 5272 } 5273 5274 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 5275 { 5276 struct bnxt_error_recovery_info *info = bp->recovery_info; 5277 5278 if (info) { 5279 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 5280 memset(info, 0, sizeof(*info)); 5281 return; 5282 } 5283 5284 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5285 return; 5286 5287 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5288 sizeof(*info), 0); 5289 if (!info) 5290 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5291 5292 bp->recovery_info = info; 5293 } 5294 5295 static void bnxt_check_fw_status(struct bnxt *bp) 5296 { 5297 uint32_t fw_status; 5298 5299 if (!(bp->recovery_info && 5300 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 5301 return; 5302 5303 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 5304 if (fw_status != BNXT_FW_STATUS_HEALTHY) 5305 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 5306 fw_status); 5307 } 5308 5309 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 5310 { 5311 struct bnxt_error_recovery_info *info = bp->recovery_info; 5312 uint32_t status_loc; 5313 uint32_t sig_ver; 5314 5315 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 5316 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5317 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5318 BNXT_GRCP_WINDOW_2_BASE + 5319 offsetof(struct hcomm_status, 5320 sig_ver))); 5321 /* If the signature is absent, then FW does not support this feature */ 5322 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 5323 HCOMM_STATUS_SIGNATURE_VAL) 5324 return 0; 5325 5326 if (!info) { 5327 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 5328 sizeof(*info), 0); 5329 if (!info) 5330 return -ENOMEM; 5331 bp->recovery_info = info; 5332 } else { 5333 memset(info, 0, sizeof(*info)); 5334 } 5335 5336 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 5337 BNXT_GRCP_WINDOW_2_BASE + 5338 offsetof(struct hcomm_status, 5339 fw_status_loc))); 5340 5341 /* Only pre-map the FW health status GRC register */ 5342 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 5343 return 0; 5344 5345 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 5346 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 5347 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 5348 5349 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 5350 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 5351 5352 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 5353 5354 return 0; 5355 } 5356 5357 /* This function gets the FW version along with the 5358 * capabilities(MAX and current) of the function, vnic, 5359 * error recovery, phy and other chip related info 5360 */ 5361 static int bnxt_get_config(struct bnxt *bp) 5362 { 5363 uint16_t mtu; 5364 int rc = 0; 5365 5366 bp->fw_cap = 0; 5367 5368 rc = bnxt_map_hcomm_fw_status_reg(bp); 5369 if (rc) 5370 return rc; 5371 5372 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 5373 if (rc) { 5374 bnxt_check_fw_status(bp); 5375 return rc; 5376 } 5377 5378 rc = bnxt_hwrm_func_reset(bp); 5379 if (rc) 5380 return -EIO; 5381 5382 rc = bnxt_hwrm_vnic_qcaps(bp); 5383 if (rc) 5384 return rc; 5385 5386 rc = bnxt_hwrm_queue_qportcfg(bp); 5387 if (rc) 5388 return rc; 5389 5390 /* Get the MAX capabilities for this function. 5391 * This function also allocates context memory for TQM rings and 5392 * informs the firmware about this allocated backing store memory. 5393 */ 5394 rc = bnxt_hwrm_func_qcaps(bp); 5395 if (rc) 5396 return rc; 5397 5398 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 5399 if (rc) 5400 return rc; 5401 5402 rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp); 5403 if (rc) 5404 return rc; 5405 5406 bnxt_hwrm_port_mac_qcfg(bp); 5407 5408 bnxt_hwrm_parent_pf_qcfg(bp); 5409 5410 bnxt_hwrm_port_phy_qcaps(bp); 5411 5412 bnxt_alloc_error_recovery_info(bp); 5413 /* Get the adapter error recovery support info */ 5414 rc = bnxt_hwrm_error_recovery_qcfg(bp); 5415 if (rc) 5416 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5417 5418 bnxt_hwrm_port_led_qcaps(bp); 5419 5420 return 0; 5421 } 5422 5423 static int 5424 bnxt_init_locks(struct bnxt *bp) 5425 { 5426 int err; 5427 5428 err = pthread_mutex_init(&bp->flow_lock, NULL); 5429 if (err) { 5430 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 5431 return err; 5432 } 5433 5434 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 5435 if (err) { 5436 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 5437 return err; 5438 } 5439 5440 err = pthread_mutex_init(&bp->health_check_lock, NULL); 5441 if (err) { 5442 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 5443 return err; 5444 } 5445 5446 err = pthread_mutex_init(&bp->err_recovery_lock, NULL); 5447 if (err) 5448 PMD_DRV_LOG(ERR, "Unable to initialize err_recovery_lock\n"); 5449 5450 return err; 5451 } 5452 5453 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 5454 { 5455 int rc = 0; 5456 5457 rc = bnxt_get_config(bp); 5458 if (rc) 5459 return rc; 5460 5461 if (!reconfig_dev) { 5462 rc = bnxt_setup_mac_addr(bp->eth_dev); 5463 if (rc) 5464 return rc; 5465 } else { 5466 rc = bnxt_restore_dflt_mac(bp); 5467 if (rc) 5468 return rc; 5469 } 5470 5471 bnxt_config_vf_req_fwd(bp); 5472 5473 rc = bnxt_hwrm_func_driver_register(bp); 5474 if (rc) { 5475 PMD_DRV_LOG(ERR, "Failed to register driver"); 5476 return -EBUSY; 5477 } 5478 5479 if (BNXT_PF(bp)) { 5480 if (bp->pdev->max_vfs) { 5481 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 5482 if (rc) { 5483 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 5484 return rc; 5485 } 5486 } else { 5487 rc = bnxt_hwrm_allocate_pf_only(bp); 5488 if (rc) { 5489 PMD_DRV_LOG(ERR, 5490 "Failed to allocate PF resources"); 5491 return rc; 5492 } 5493 } 5494 } 5495 5496 rc = bnxt_alloc_mem(bp, reconfig_dev); 5497 if (rc) 5498 return rc; 5499 5500 rc = bnxt_setup_int(bp); 5501 if (rc) 5502 return rc; 5503 5504 rc = bnxt_request_int(bp); 5505 if (rc) 5506 return rc; 5507 5508 rc = bnxt_init_ctx_mem(bp); 5509 if (rc) { 5510 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 5511 return rc; 5512 } 5513 5514 return 0; 5515 } 5516 5517 static int 5518 bnxt_parse_devarg_accum_stats(__rte_unused const char *key, 5519 const char *value, void *opaque_arg) 5520 { 5521 struct bnxt *bp = opaque_arg; 5522 unsigned long accum_stats; 5523 char *end = NULL; 5524 5525 if (!value || !opaque_arg) { 5526 PMD_DRV_LOG(ERR, 5527 "Invalid parameter passed to accum-stats devargs.\n"); 5528 return -EINVAL; 5529 } 5530 5531 accum_stats = strtoul(value, &end, 10); 5532 if (end == NULL || *end != '\0' || 5533 (accum_stats == ULONG_MAX && errno == ERANGE)) { 5534 PMD_DRV_LOG(ERR, 5535 "Invalid parameter passed to accum-stats devargs.\n"); 5536 return -EINVAL; 5537 } 5538 5539 if (BNXT_DEVARG_ACCUM_STATS_INVALID(accum_stats)) { 5540 PMD_DRV_LOG(ERR, 5541 "Invalid value passed to accum-stats devargs.\n"); 5542 return -EINVAL; 5543 } 5544 5545 if (accum_stats) { 5546 bp->flags2 |= BNXT_FLAGS2_ACCUM_STATS_EN; 5547 PMD_DRV_LOG(INFO, "Host-based accum-stats feature enabled.\n"); 5548 } else { 5549 bp->flags2 &= ~BNXT_FLAGS2_ACCUM_STATS_EN; 5550 PMD_DRV_LOG(INFO, "Host-based accum-stats feature disabled.\n"); 5551 } 5552 5553 return 0; 5554 } 5555 5556 static int 5557 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 5558 const char *value, void *opaque_arg) 5559 { 5560 struct bnxt *bp = opaque_arg; 5561 unsigned long flow_xstat; 5562 char *end = NULL; 5563 5564 if (!value || !opaque_arg) { 5565 PMD_DRV_LOG(ERR, 5566 "Invalid parameter passed to flow_xstat devarg.\n"); 5567 return -EINVAL; 5568 } 5569 5570 flow_xstat = strtoul(value, &end, 10); 5571 if (end == NULL || *end != '\0' || 5572 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 5573 PMD_DRV_LOG(ERR, 5574 "Invalid parameter passed to flow_xstat devarg.\n"); 5575 return -EINVAL; 5576 } 5577 5578 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 5579 PMD_DRV_LOG(ERR, 5580 "Invalid value passed to flow_xstat devarg.\n"); 5581 return -EINVAL; 5582 } 5583 5584 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 5585 if (BNXT_FLOW_XSTATS_EN(bp)) 5586 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 5587 5588 return 0; 5589 } 5590 5591 static int 5592 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 5593 const char *value, void *opaque_arg) 5594 { 5595 struct bnxt *bp = opaque_arg; 5596 unsigned long max_num_kflows; 5597 char *end = NULL; 5598 5599 if (!value || !opaque_arg) { 5600 PMD_DRV_LOG(ERR, 5601 "Invalid parameter passed to max_num_kflows devarg.\n"); 5602 return -EINVAL; 5603 } 5604 5605 max_num_kflows = strtoul(value, &end, 10); 5606 if (end == NULL || *end != '\0' || 5607 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 5608 PMD_DRV_LOG(ERR, 5609 "Invalid parameter passed to max_num_kflows devarg.\n"); 5610 return -EINVAL; 5611 } 5612 5613 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 5614 PMD_DRV_LOG(ERR, 5615 "Invalid value passed to max_num_kflows devarg.\n"); 5616 return -EINVAL; 5617 } 5618 5619 bp->max_num_kflows = max_num_kflows; 5620 if (bp->max_num_kflows) 5621 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 5622 max_num_kflows); 5623 5624 return 0; 5625 } 5626 5627 static int 5628 bnxt_parse_devarg_app_id(__rte_unused const char *key, 5629 const char *value, void *opaque_arg) 5630 { 5631 struct bnxt *bp = opaque_arg; 5632 unsigned long app_id; 5633 char *end = NULL; 5634 5635 if (!value || !opaque_arg) { 5636 PMD_DRV_LOG(ERR, 5637 "Invalid parameter passed to app-id " 5638 "devargs.\n"); 5639 return -EINVAL; 5640 } 5641 5642 app_id = strtoul(value, &end, 10); 5643 if (end == NULL || *end != '\0' || 5644 (app_id == ULONG_MAX && errno == ERANGE)) { 5645 PMD_DRV_LOG(ERR, 5646 "Invalid parameter passed to app_id " 5647 "devargs.\n"); 5648 return -EINVAL; 5649 } 5650 5651 if (BNXT_DEVARG_APP_ID_INVALID(app_id)) { 5652 PMD_DRV_LOG(ERR, "Invalid app-id(%d) devargs.\n", 5653 (uint16_t)app_id); 5654 return -EINVAL; 5655 } 5656 5657 bp->app_id = app_id; 5658 PMD_DRV_LOG(INFO, "app-id=%d feature enabled.\n", (uint16_t)app_id); 5659 5660 return 0; 5661 } 5662 5663 static int 5664 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 5665 const char *value, void *opaque_arg) 5666 { 5667 struct bnxt_representor *vfr_bp = opaque_arg; 5668 unsigned long rep_is_pf; 5669 char *end = NULL; 5670 5671 if (!value || !opaque_arg) { 5672 PMD_DRV_LOG(ERR, 5673 "Invalid parameter passed to rep_is_pf devargs.\n"); 5674 return -EINVAL; 5675 } 5676 5677 rep_is_pf = strtoul(value, &end, 10); 5678 if (end == NULL || *end != '\0' || 5679 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 5680 PMD_DRV_LOG(ERR, 5681 "Invalid parameter passed to rep_is_pf devargs.\n"); 5682 return -EINVAL; 5683 } 5684 5685 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 5686 PMD_DRV_LOG(ERR, 5687 "Invalid value passed to rep_is_pf devargs.\n"); 5688 return -EINVAL; 5689 } 5690 5691 vfr_bp->flags |= rep_is_pf; 5692 if (BNXT_REP_PF(vfr_bp)) 5693 PMD_DRV_LOG(INFO, "PF representor\n"); 5694 else 5695 PMD_DRV_LOG(INFO, "VF representor\n"); 5696 5697 return 0; 5698 } 5699 5700 static int 5701 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 5702 const char *value, void *opaque_arg) 5703 { 5704 struct bnxt_representor *vfr_bp = opaque_arg; 5705 unsigned long rep_based_pf; 5706 char *end = NULL; 5707 5708 if (!value || !opaque_arg) { 5709 PMD_DRV_LOG(ERR, 5710 "Invalid parameter passed to rep_based_pf " 5711 "devargs.\n"); 5712 return -EINVAL; 5713 } 5714 5715 rep_based_pf = strtoul(value, &end, 10); 5716 if (end == NULL || *end != '\0' || 5717 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 5718 PMD_DRV_LOG(ERR, 5719 "Invalid parameter passed to rep_based_pf " 5720 "devargs.\n"); 5721 return -EINVAL; 5722 } 5723 5724 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 5725 PMD_DRV_LOG(ERR, 5726 "Invalid value passed to rep_based_pf devargs.\n"); 5727 return -EINVAL; 5728 } 5729 5730 vfr_bp->rep_based_pf = rep_based_pf; 5731 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 5732 5733 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 5734 5735 return 0; 5736 } 5737 5738 static int 5739 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 5740 const char *value, void *opaque_arg) 5741 { 5742 struct bnxt_representor *vfr_bp = opaque_arg; 5743 unsigned long rep_q_r2f; 5744 char *end = NULL; 5745 5746 if (!value || !opaque_arg) { 5747 PMD_DRV_LOG(ERR, 5748 "Invalid parameter passed to rep_q_r2f " 5749 "devargs.\n"); 5750 return -EINVAL; 5751 } 5752 5753 rep_q_r2f = strtoul(value, &end, 10); 5754 if (end == NULL || *end != '\0' || 5755 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5756 PMD_DRV_LOG(ERR, 5757 "Invalid parameter passed to rep_q_r2f " 5758 "devargs.\n"); 5759 return -EINVAL; 5760 } 5761 5762 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5763 PMD_DRV_LOG(ERR, 5764 "Invalid value passed to rep_q_r2f devargs.\n"); 5765 return -EINVAL; 5766 } 5767 5768 vfr_bp->rep_q_r2f = rep_q_r2f; 5769 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5770 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5771 5772 return 0; 5773 } 5774 5775 static int 5776 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5777 const char *value, void *opaque_arg) 5778 { 5779 struct bnxt_representor *vfr_bp = opaque_arg; 5780 unsigned long rep_q_f2r; 5781 char *end = NULL; 5782 5783 if (!value || !opaque_arg) { 5784 PMD_DRV_LOG(ERR, 5785 "Invalid parameter passed to rep_q_f2r " 5786 "devargs.\n"); 5787 return -EINVAL; 5788 } 5789 5790 rep_q_f2r = strtoul(value, &end, 10); 5791 if (end == NULL || *end != '\0' || 5792 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5793 PMD_DRV_LOG(ERR, 5794 "Invalid parameter passed to rep_q_f2r " 5795 "devargs.\n"); 5796 return -EINVAL; 5797 } 5798 5799 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5800 PMD_DRV_LOG(ERR, 5801 "Invalid value passed to rep_q_f2r devargs.\n"); 5802 return -EINVAL; 5803 } 5804 5805 vfr_bp->rep_q_f2r = rep_q_f2r; 5806 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5807 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5808 5809 return 0; 5810 } 5811 5812 static int 5813 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5814 const char *value, void *opaque_arg) 5815 { 5816 struct bnxt_representor *vfr_bp = opaque_arg; 5817 unsigned long rep_fc_r2f; 5818 char *end = NULL; 5819 5820 if (!value || !opaque_arg) { 5821 PMD_DRV_LOG(ERR, 5822 "Invalid parameter passed to rep_fc_r2f " 5823 "devargs.\n"); 5824 return -EINVAL; 5825 } 5826 5827 rep_fc_r2f = strtoul(value, &end, 10); 5828 if (end == NULL || *end != '\0' || 5829 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5830 PMD_DRV_LOG(ERR, 5831 "Invalid parameter passed to rep_fc_r2f " 5832 "devargs.\n"); 5833 return -EINVAL; 5834 } 5835 5836 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5837 PMD_DRV_LOG(ERR, 5838 "Invalid value passed to rep_fc_r2f devargs.\n"); 5839 return -EINVAL; 5840 } 5841 5842 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5843 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5844 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5845 5846 return 0; 5847 } 5848 5849 static int 5850 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5851 const char *value, void *opaque_arg) 5852 { 5853 struct bnxt_representor *vfr_bp = opaque_arg; 5854 unsigned long rep_fc_f2r; 5855 char *end = NULL; 5856 5857 if (!value || !opaque_arg) { 5858 PMD_DRV_LOG(ERR, 5859 "Invalid parameter passed to rep_fc_f2r " 5860 "devargs.\n"); 5861 return -EINVAL; 5862 } 5863 5864 rep_fc_f2r = strtoul(value, &end, 10); 5865 if (end == NULL || *end != '\0' || 5866 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5867 PMD_DRV_LOG(ERR, 5868 "Invalid parameter passed to rep_fc_f2r " 5869 "devargs.\n"); 5870 return -EINVAL; 5871 } 5872 5873 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5874 PMD_DRV_LOG(ERR, 5875 "Invalid value passed to rep_fc_f2r devargs.\n"); 5876 return -EINVAL; 5877 } 5878 5879 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5880 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5881 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5882 5883 return 0; 5884 } 5885 5886 static int 5887 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5888 { 5889 struct rte_kvargs *kvlist; 5890 int ret; 5891 5892 if (devargs == NULL) 5893 return 0; 5894 5895 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5896 if (kvlist == NULL) 5897 return -EINVAL; 5898 5899 /* 5900 * Handler for "flow_xstat" devarg. 5901 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5902 */ 5903 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5904 bnxt_parse_devarg_flow_xstat, bp); 5905 if (ret) 5906 goto err; 5907 5908 /* 5909 * Handler for "accum-stats" devarg. 5910 * Invoked as for ex: "-a 0000:00:0d.0,accum-stats=1" 5911 */ 5912 rte_kvargs_process(kvlist, BNXT_DEVARG_ACCUM_STATS, 5913 bnxt_parse_devarg_accum_stats, bp); 5914 /* 5915 * Handler for "max_num_kflows" devarg. 5916 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5917 */ 5918 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5919 bnxt_parse_devarg_max_num_kflows, bp); 5920 if (ret) 5921 goto err; 5922 5923 err: 5924 /* 5925 * Handler for "app-id" devarg. 5926 * Invoked as for ex: "-a 000:00:0d.0,app-id=1" 5927 */ 5928 rte_kvargs_process(kvlist, BNXT_DEVARG_APP_ID, 5929 bnxt_parse_devarg_app_id, bp); 5930 5931 rte_kvargs_free(kvlist); 5932 return ret; 5933 } 5934 5935 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5936 { 5937 int rc = 0; 5938 5939 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5940 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5941 if (rc) 5942 PMD_DRV_LOG(ERR, 5943 "Failed to alloc switch domain: %d\n", rc); 5944 else 5945 PMD_DRV_LOG(INFO, 5946 "Switch domain allocated %d\n", 5947 bp->switch_domain_id); 5948 } 5949 5950 return rc; 5951 } 5952 5953 /* Allocate and initialize various fields in bnxt struct that 5954 * need to be allocated/destroyed only once in the lifetime of the driver 5955 */ 5956 static int bnxt_drv_init(struct rte_eth_dev *eth_dev) 5957 { 5958 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5959 struct bnxt *bp = eth_dev->data->dev_private; 5960 int rc = 0; 5961 5962 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5963 5964 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5965 bp->flags |= BNXT_FLAG_VF; 5966 5967 if (bnxt_p5_device(pci_dev->id.device_id)) 5968 bp->flags |= BNXT_FLAG_CHIP_P5; 5969 5970 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5971 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5972 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5973 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5974 bp->flags |= BNXT_FLAG_STINGRAY; 5975 5976 if (BNXT_TRUFLOW_EN(bp)) { 5977 /* extra mbuf field is required to store CFA code from mark */ 5978 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5979 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5980 .size = sizeof(bnxt_cfa_code_dynfield_t), 5981 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5982 }; 5983 bnxt_cfa_code_dynfield_offset = 5984 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5985 if (bnxt_cfa_code_dynfield_offset < 0) { 5986 PMD_DRV_LOG(ERR, 5987 "Failed to register mbuf field for TruFlow mark\n"); 5988 return -rte_errno; 5989 } 5990 } 5991 5992 rc = bnxt_map_pci_bars(eth_dev); 5993 if (rc) { 5994 PMD_DRV_LOG(ERR, 5995 "Failed to initialize board rc: %x\n", rc); 5996 return rc; 5997 } 5998 5999 rc = bnxt_alloc_pf_info(bp); 6000 if (rc) 6001 return rc; 6002 6003 rc = bnxt_alloc_link_info(bp); 6004 if (rc) 6005 return rc; 6006 6007 rc = bnxt_alloc_parent_info(bp); 6008 if (rc) 6009 return rc; 6010 6011 rc = bnxt_alloc_hwrm_resources(bp); 6012 if (rc) { 6013 PMD_DRV_LOG(ERR, 6014 "Failed to allocate response buffer rc: %x\n", rc); 6015 return rc; 6016 } 6017 rc = bnxt_alloc_leds_info(bp); 6018 if (rc) 6019 return rc; 6020 6021 rc = bnxt_alloc_cos_queues(bp); 6022 if (rc) 6023 return rc; 6024 6025 rc = bnxt_init_locks(bp); 6026 if (rc) 6027 return rc; 6028 6029 rc = bnxt_alloc_switch_domain(bp); 6030 if (rc) 6031 return rc; 6032 6033 return rc; 6034 } 6035 6036 static int 6037 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 6038 { 6039 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 6040 static int version_printed; 6041 struct bnxt *bp; 6042 int rc; 6043 6044 if (version_printed++ == 0) 6045 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 6046 6047 eth_dev->dev_ops = &bnxt_dev_ops; 6048 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 6049 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 6050 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 6051 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 6052 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 6053 6054 /* 6055 * For secondary processes, we don't initialise any further 6056 * as primary has already done this work. 6057 */ 6058 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6059 return 0; 6060 6061 rte_eth_copy_pci_info(eth_dev, pci_dev); 6062 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 6063 6064 bp = eth_dev->data->dev_private; 6065 6066 /* Parse dev arguments passed on when starting the DPDK application. */ 6067 rc = bnxt_parse_dev_args(bp, pci_dev->device.devargs); 6068 if (rc) 6069 goto error_free; 6070 6071 rc = bnxt_drv_init(eth_dev); 6072 if (rc) 6073 goto error_free; 6074 6075 rc = bnxt_init_resources(bp, false); 6076 if (rc) 6077 goto error_free; 6078 6079 rc = bnxt_alloc_stats_mem(bp); 6080 if (rc) 6081 goto error_free; 6082 6083 PMD_DRV_LOG(INFO, 6084 "Found %s device at mem %" PRIX64 ", node addr %pM\n", 6085 DRV_MODULE_NAME, 6086 pci_dev->mem_resource[0].phys_addr, 6087 pci_dev->mem_resource[0].addr); 6088 6089 return 0; 6090 6091 error_free: 6092 bnxt_dev_uninit(eth_dev); 6093 return rc; 6094 } 6095 6096 6097 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 6098 { 6099 if (!ctx) 6100 return; 6101 6102 if (ctx->va) 6103 rte_free(ctx->va); 6104 6105 ctx->va = NULL; 6106 ctx->dma = RTE_BAD_IOVA; 6107 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 6108 } 6109 6110 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 6111 { 6112 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 6113 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6114 bp->flow_stat->rx_fc_out_tbl.ctx_id, 6115 bp->flow_stat->max_fc, 6116 false); 6117 6118 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 6119 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 6120 bp->flow_stat->tx_fc_out_tbl.ctx_id, 6121 bp->flow_stat->max_fc, 6122 false); 6123 6124 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6125 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 6126 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6127 6128 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6129 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 6130 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6131 6132 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6133 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 6134 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6135 6136 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 6137 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 6138 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 6139 } 6140 6141 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 6142 { 6143 bnxt_unregister_fc_ctx_mem(bp); 6144 6145 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 6146 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 6147 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 6148 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 6149 } 6150 6151 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 6152 { 6153 if (BNXT_FLOW_XSTATS_EN(bp)) 6154 bnxt_uninit_fc_ctx_mem(bp); 6155 } 6156 6157 static void 6158 bnxt_free_error_recovery_info(struct bnxt *bp) 6159 { 6160 rte_free(bp->recovery_info); 6161 bp->recovery_info = NULL; 6162 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 6163 } 6164 6165 static int 6166 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 6167 { 6168 int rc; 6169 6170 bnxt_free_int(bp); 6171 bnxt_free_mem(bp, reconfig_dev); 6172 6173 bnxt_hwrm_func_buf_unrgtr(bp); 6174 if (bp->pf != NULL) { 6175 rte_free(bp->pf->vf_req_buf); 6176 bp->pf->vf_req_buf = NULL; 6177 } 6178 6179 rc = bnxt_hwrm_func_driver_unregister(bp); 6180 bp->flags &= ~BNXT_FLAG_REGISTERED; 6181 bnxt_free_ctx_mem(bp); 6182 if (!reconfig_dev) { 6183 bnxt_free_hwrm_resources(bp); 6184 bnxt_free_error_recovery_info(bp); 6185 } 6186 6187 bnxt_uninit_ctx_mem(bp); 6188 6189 bnxt_free_flow_stats_info(bp); 6190 if (bp->rep_info != NULL) 6191 bnxt_free_switch_domain(bp); 6192 bnxt_free_rep_info(bp); 6193 rte_free(bp->ptp_cfg); 6194 bp->ptp_cfg = NULL; 6195 return rc; 6196 } 6197 6198 static int 6199 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 6200 { 6201 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 6202 return -EPERM; 6203 6204 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 6205 6206 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 6207 bnxt_dev_close_op(eth_dev); 6208 6209 return 0; 6210 } 6211 6212 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 6213 { 6214 struct bnxt *bp = eth_dev->data->dev_private; 6215 struct rte_eth_dev *vf_rep_eth_dev; 6216 int ret = 0, i; 6217 6218 if (!bp) 6219 return -EINVAL; 6220 6221 for (i = 0; i < bp->num_reps; i++) { 6222 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 6223 if (!vf_rep_eth_dev) 6224 continue; 6225 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 6226 vf_rep_eth_dev->data->port_id); 6227 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 6228 } 6229 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 6230 eth_dev->data->port_id); 6231 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 6232 6233 return ret; 6234 } 6235 6236 static void bnxt_free_rep_info(struct bnxt *bp) 6237 { 6238 rte_free(bp->rep_info); 6239 bp->rep_info = NULL; 6240 rte_free(bp->cfa_code_map); 6241 bp->cfa_code_map = NULL; 6242 } 6243 6244 static int bnxt_init_rep_info(struct bnxt *bp) 6245 { 6246 int i = 0, rc; 6247 6248 if (bp->rep_info) 6249 return 0; 6250 6251 bp->rep_info = rte_zmalloc("bnxt_rep_info", 6252 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 6253 0); 6254 if (!bp->rep_info) { 6255 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 6256 return -ENOMEM; 6257 } 6258 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 6259 sizeof(*bp->cfa_code_map) * 6260 BNXT_MAX_CFA_CODE, 0); 6261 if (!bp->cfa_code_map) { 6262 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 6263 bnxt_free_rep_info(bp); 6264 return -ENOMEM; 6265 } 6266 6267 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 6268 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 6269 6270 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 6271 if (rc) { 6272 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 6273 bnxt_free_rep_info(bp); 6274 return rc; 6275 } 6276 6277 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 6278 if (rc) { 6279 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 6280 bnxt_free_rep_info(bp); 6281 return rc; 6282 } 6283 6284 return rc; 6285 } 6286 6287 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 6288 struct rte_eth_devargs *eth_da, 6289 struct rte_eth_dev *backing_eth_dev, 6290 const char *dev_args) 6291 { 6292 struct rte_eth_dev *vf_rep_eth_dev; 6293 char name[RTE_ETH_NAME_MAX_LEN]; 6294 struct bnxt *backing_bp; 6295 uint16_t num_rep; 6296 int i, ret = 0; 6297 struct rte_kvargs *kvlist = NULL; 6298 6299 if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) 6300 return 0; 6301 if (eth_da->type != RTE_ETH_REPRESENTOR_VF) { 6302 PMD_DRV_LOG(ERR, "unsupported representor type %d\n", 6303 eth_da->type); 6304 return -ENOTSUP; 6305 } 6306 num_rep = eth_da->nb_representor_ports; 6307 if (num_rep > BNXT_MAX_VF_REPS) { 6308 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 6309 num_rep, BNXT_MAX_VF_REPS); 6310 return -EINVAL; 6311 } 6312 6313 if (num_rep >= RTE_MAX_ETHPORTS) { 6314 PMD_DRV_LOG(ERR, 6315 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 6316 num_rep, RTE_MAX_ETHPORTS); 6317 return -EINVAL; 6318 } 6319 6320 backing_bp = backing_eth_dev->data->dev_private; 6321 6322 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 6323 PMD_DRV_LOG(ERR, 6324 "Not a PF or trusted VF. No Representor support\n"); 6325 /* Returning an error is not an option. 6326 * Applications are not handling this correctly 6327 */ 6328 return 0; 6329 } 6330 6331 if (bnxt_init_rep_info(backing_bp)) 6332 return 0; 6333 6334 for (i = 0; i < num_rep; i++) { 6335 struct bnxt_representor representor = { 6336 .vf_id = eth_da->representor_ports[i], 6337 .switch_domain_id = backing_bp->switch_domain_id, 6338 .parent_dev = backing_eth_dev 6339 }; 6340 6341 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 6342 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 6343 representor.vf_id, BNXT_MAX_VF_REPS); 6344 continue; 6345 } 6346 6347 /* representor port net_bdf_port */ 6348 snprintf(name, sizeof(name), "net_%s_representor_%d", 6349 pci_dev->device.name, eth_da->representor_ports[i]); 6350 6351 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 6352 if (kvlist) { 6353 /* 6354 * Handler for "rep_is_pf" devarg. 6355 * Invoked as for ex: "-a 000:00:0d.0, 6356 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6357 */ 6358 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 6359 bnxt_parse_devarg_rep_is_pf, 6360 (void *)&representor); 6361 if (ret) { 6362 ret = -EINVAL; 6363 goto err; 6364 } 6365 /* 6366 * Handler for "rep_based_pf" devarg. 6367 * Invoked as for ex: "-a 000:00:0d.0, 6368 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6369 */ 6370 ret = rte_kvargs_process(kvlist, 6371 BNXT_DEVARG_REP_BASED_PF, 6372 bnxt_parse_devarg_rep_based_pf, 6373 (void *)&representor); 6374 if (ret) { 6375 ret = -EINVAL; 6376 goto err; 6377 } 6378 /* 6379 * Handler for "rep_based_pf" devarg. 6380 * Invoked as for ex: "-a 000:00:0d.0, 6381 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6382 */ 6383 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 6384 bnxt_parse_devarg_rep_q_r2f, 6385 (void *)&representor); 6386 if (ret) { 6387 ret = -EINVAL; 6388 goto err; 6389 } 6390 /* 6391 * Handler for "rep_based_pf" devarg. 6392 * Invoked as for ex: "-a 000:00:0d.0, 6393 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6394 */ 6395 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 6396 bnxt_parse_devarg_rep_q_f2r, 6397 (void *)&representor); 6398 if (ret) { 6399 ret = -EINVAL; 6400 goto err; 6401 } 6402 /* 6403 * Handler for "rep_based_pf" devarg. 6404 * Invoked as for ex: "-a 000:00:0d.0, 6405 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6406 */ 6407 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 6408 bnxt_parse_devarg_rep_fc_r2f, 6409 (void *)&representor); 6410 if (ret) { 6411 ret = -EINVAL; 6412 goto err; 6413 } 6414 /* 6415 * Handler for "rep_based_pf" devarg. 6416 * Invoked as for ex: "-a 000:00:0d.0, 6417 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 6418 */ 6419 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 6420 bnxt_parse_devarg_rep_fc_f2r, 6421 (void *)&representor); 6422 if (ret) { 6423 ret = -EINVAL; 6424 goto err; 6425 } 6426 } 6427 6428 ret = rte_eth_dev_create(&pci_dev->device, name, 6429 sizeof(struct bnxt_representor), 6430 NULL, NULL, 6431 bnxt_representor_init, 6432 &representor); 6433 if (ret) { 6434 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 6435 "representor %s.", name); 6436 goto err; 6437 } 6438 6439 vf_rep_eth_dev = rte_eth_dev_allocated(name); 6440 if (!vf_rep_eth_dev) { 6441 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 6442 " for VF-Rep: %s.", name); 6443 ret = -ENODEV; 6444 goto err; 6445 } 6446 6447 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 6448 backing_eth_dev->data->port_id); 6449 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 6450 vf_rep_eth_dev; 6451 backing_bp->num_reps++; 6452 6453 } 6454 6455 rte_kvargs_free(kvlist); 6456 return 0; 6457 6458 err: 6459 /* If num_rep > 1, then rollback already created 6460 * ports, since we'll be failing the probe anyway 6461 */ 6462 if (num_rep > 1) 6463 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 6464 rte_errno = -ret; 6465 rte_kvargs_free(kvlist); 6466 6467 return ret; 6468 } 6469 6470 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6471 struct rte_pci_device *pci_dev) 6472 { 6473 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 6474 struct rte_eth_dev *backing_eth_dev; 6475 uint16_t num_rep; 6476 int ret = 0; 6477 6478 if (pci_dev->device.devargs) { 6479 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 6480 ð_da); 6481 if (ret) 6482 return ret; 6483 } 6484 6485 num_rep = eth_da.nb_representor_ports; 6486 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 6487 num_rep); 6488 6489 /* We could come here after first level of probe is already invoked 6490 * as part of an application bringup(OVS-DPDK vswitchd), so first check 6491 * for already allocated eth_dev for the backing device (PF/Trusted VF) 6492 */ 6493 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6494 if (backing_eth_dev == NULL) { 6495 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 6496 sizeof(struct bnxt), 6497 eth_dev_pci_specific_init, pci_dev, 6498 bnxt_dev_init, NULL); 6499 6500 if (ret || !num_rep) 6501 return ret; 6502 6503 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6504 } 6505 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 6506 backing_eth_dev->data->port_id); 6507 6508 if (!num_rep) 6509 return ret; 6510 6511 /* probe representor ports now */ 6512 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 6513 pci_dev->device.devargs->args); 6514 6515 return ret; 6516 } 6517 6518 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 6519 { 6520 struct rte_eth_dev *eth_dev; 6521 6522 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 6523 if (!eth_dev) 6524 return 0; /* Invoked typically only by OVS-DPDK, by the 6525 * time it comes here the eth_dev is already 6526 * deleted by rte_eth_dev_close(), so returning 6527 * +ve value will at least help in proper cleanup 6528 */ 6529 6530 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 6531 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 6532 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 6533 return rte_eth_dev_destroy(eth_dev, 6534 bnxt_representor_uninit); 6535 else 6536 return rte_eth_dev_destroy(eth_dev, 6537 bnxt_dev_uninit); 6538 } else { 6539 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 6540 } 6541 } 6542 6543 static struct rte_pci_driver bnxt_rte_pmd = { 6544 .id_table = bnxt_pci_id_map, 6545 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 6546 RTE_PCI_DRV_INTR_RMV | 6547 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 6548 * and OVS-DPDK 6549 */ 6550 .probe = bnxt_pci_probe, 6551 .remove = bnxt_pci_remove, 6552 }; 6553 6554 static bool 6555 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 6556 { 6557 if (strcmp(dev->device->driver->name, drv->driver.name)) 6558 return false; 6559 6560 return true; 6561 } 6562 6563 bool is_bnxt_supported(struct rte_eth_dev *dev) 6564 { 6565 return is_device_supported(dev, &bnxt_rte_pmd); 6566 } 6567 6568 RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); 6569 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 6570 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 6571 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 6572