1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2014-2018 Broadcom 3 * All rights reserved. 4 */ 5 6 #include <inttypes.h> 7 #include <stdbool.h> 8 9 #include <rte_dev.h> 10 #include <rte_ethdev_driver.h> 11 #include <rte_ethdev_pci.h> 12 #include <rte_malloc.h> 13 #include <rte_cycles.h> 14 #include <rte_alarm.h> 15 #include <rte_kvargs.h> 16 #include <rte_vect.h> 17 18 #include "bnxt.h" 19 #include "bnxt_filter.h" 20 #include "bnxt_hwrm.h" 21 #include "bnxt_irq.h" 22 #include "bnxt_reps.h" 23 #include "bnxt_ring.h" 24 #include "bnxt_rxq.h" 25 #include "bnxt_rxr.h" 26 #include "bnxt_stats.h" 27 #include "bnxt_txq.h" 28 #include "bnxt_txr.h" 29 #include "bnxt_vnic.h" 30 #include "hsi_struct_def_dpdk.h" 31 #include "bnxt_nvm_defs.h" 32 #include "bnxt_tf_common.h" 33 #include "ulp_flow_db.h" 34 #include "rte_pmd_bnxt.h" 35 36 #define DRV_MODULE_NAME "bnxt" 37 static const char bnxt_version[] = 38 "Broadcom NetXtreme driver " DRV_MODULE_NAME; 39 40 /* 41 * The set of PCI devices this driver supports 42 */ 43 static const struct rte_pci_id bnxt_pci_id_map[] = { 44 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 45 BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, 46 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 47 BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, 48 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 49 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 50 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 51 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 52 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 53 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 54 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 55 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 56 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 57 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 58 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 59 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 60 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 61 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 62 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 63 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 64 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 65 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 66 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 67 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 68 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 69 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 70 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 71 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 72 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 73 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 74 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 77 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 78 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 79 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 80 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 81 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 82 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, 83 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, 84 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, 85 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, 86 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508) }, 87 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504) }, 88 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502) }, 89 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF1) }, 90 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57500_VF2) }, 91 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF1) }, 92 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF1) }, 93 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF1) }, 94 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57508_MF2) }, 95 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57504_MF2) }, 96 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57502_MF2) }, 97 { .vendor_id = 0, /* sentinel */ }, 98 }; 99 100 #define BNXT_DEVARG_TRUFLOW "host-based-truflow" 101 #define BNXT_DEVARG_FLOW_XSTAT "flow-xstat" 102 #define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows" 103 #define BNXT_DEVARG_REPRESENTOR "representor" 104 #define BNXT_DEVARG_REP_BASED_PF "rep-based-pf" 105 #define BNXT_DEVARG_REP_IS_PF "rep-is-pf" 106 #define BNXT_DEVARG_REP_Q_R2F "rep-q-r2f" 107 #define BNXT_DEVARG_REP_Q_F2R "rep-q-f2r" 108 #define BNXT_DEVARG_REP_FC_R2F "rep-fc-r2f" 109 #define BNXT_DEVARG_REP_FC_F2R "rep-fc-f2r" 110 111 static const char *const bnxt_dev_args[] = { 112 BNXT_DEVARG_REPRESENTOR, 113 BNXT_DEVARG_TRUFLOW, 114 BNXT_DEVARG_FLOW_XSTAT, 115 BNXT_DEVARG_MAX_NUM_KFLOWS, 116 BNXT_DEVARG_REP_BASED_PF, 117 BNXT_DEVARG_REP_IS_PF, 118 BNXT_DEVARG_REP_Q_R2F, 119 BNXT_DEVARG_REP_Q_F2R, 120 BNXT_DEVARG_REP_FC_R2F, 121 BNXT_DEVARG_REP_FC_F2R, 122 NULL 123 }; 124 125 /* 126 * truflow == false to disable the feature 127 * truflow == true to enable the feature 128 */ 129 #define BNXT_DEVARG_TRUFLOW_INVALID(truflow) ((truflow) > 1) 130 131 /* 132 * flow_xstat == false to disable the feature 133 * flow_xstat == true to enable the feature 134 */ 135 #define BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat) ((flow_xstat) > 1) 136 137 /* 138 * rep_is_pf == false to indicate VF representor 139 * rep_is_pf == true to indicate PF representor 140 */ 141 #define BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf) ((rep_is_pf) > 1) 142 143 /* 144 * rep_based_pf == Physical index of the PF 145 */ 146 #define BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf) ((rep_based_pf) > 15) 147 /* 148 * rep_q_r2f == Logical COS Queue index for the rep to endpoint direction 149 */ 150 #define BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f) ((rep_q_r2f) > 3) 151 152 /* 153 * rep_q_f2r == Logical COS Queue index for the endpoint to rep direction 154 */ 155 #define BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r) ((rep_q_f2r) > 3) 156 157 /* 158 * rep_fc_r2f == Flow control for the representor to endpoint direction 159 */ 160 #define BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f) ((rep_fc_r2f) > 1) 161 162 /* 163 * rep_fc_f2r == Flow control for the endpoint to representor direction 164 */ 165 #define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1) 166 167 int bnxt_cfa_code_dynfield_offset = -1; 168 169 /* 170 * max_num_kflows must be >= 32 171 * and must be a power-of-2 supported value 172 * return: 1 -> invalid 173 * 0 -> valid 174 */ 175 static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows) 176 { 177 if (max_num_kflows < 32 || !rte_is_power_of_2(max_num_kflows)) 178 return 1; 179 return 0; 180 } 181 182 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 183 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 184 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev); 185 static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev); 186 static void bnxt_cancel_fw_health_check(struct bnxt *bp); 187 static int bnxt_restore_vlan_filters(struct bnxt *bp); 188 static void bnxt_dev_recover(void *arg); 189 static void bnxt_free_error_recovery_info(struct bnxt *bp); 190 static void bnxt_free_rep_info(struct bnxt *bp); 191 192 int is_bnxt_in_error(struct bnxt *bp) 193 { 194 if (bp->flags & BNXT_FLAG_FATAL_ERROR) 195 return -EIO; 196 if (bp->flags & BNXT_FLAG_FW_RESET) 197 return -EBUSY; 198 199 return 0; 200 } 201 202 /***********************/ 203 204 /* 205 * High level utility functions 206 */ 207 208 static uint16_t bnxt_rss_ctxts(const struct bnxt *bp) 209 { 210 if (!BNXT_CHIP_THOR(bp)) 211 return 1; 212 213 return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings, 214 BNXT_RSS_ENTRIES_PER_CTX_THOR) / 215 BNXT_RSS_ENTRIES_PER_CTX_THOR; 216 } 217 218 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp) 219 { 220 if (!BNXT_CHIP_THOR(bp)) 221 return HW_HASH_INDEX_SIZE; 222 223 return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR; 224 } 225 226 static void bnxt_free_parent_info(struct bnxt *bp) 227 { 228 rte_free(bp->parent); 229 } 230 231 static void bnxt_free_pf_info(struct bnxt *bp) 232 { 233 rte_free(bp->pf); 234 } 235 236 static void bnxt_free_link_info(struct bnxt *bp) 237 { 238 rte_free(bp->link_info); 239 } 240 241 static void bnxt_free_leds_info(struct bnxt *bp) 242 { 243 if (BNXT_VF(bp)) 244 return; 245 246 rte_free(bp->leds); 247 bp->leds = NULL; 248 } 249 250 static void bnxt_free_flow_stats_info(struct bnxt *bp) 251 { 252 rte_free(bp->flow_stat); 253 bp->flow_stat = NULL; 254 } 255 256 static void bnxt_free_cos_queues(struct bnxt *bp) 257 { 258 rte_free(bp->rx_cos_queue); 259 rte_free(bp->tx_cos_queue); 260 } 261 262 static void bnxt_free_mem(struct bnxt *bp, bool reconfig) 263 { 264 bnxt_free_filter_mem(bp); 265 bnxt_free_vnic_attributes(bp); 266 bnxt_free_vnic_mem(bp); 267 268 /* tx/rx rings are configured as part of *_queue_setup callbacks. 269 * If the number of rings change across fw update, 270 * we don't have much choice except to warn the user. 271 */ 272 if (!reconfig) { 273 bnxt_free_stats(bp); 274 bnxt_free_tx_rings(bp); 275 bnxt_free_rx_rings(bp); 276 } 277 bnxt_free_async_cp_ring(bp); 278 bnxt_free_rxtx_nq_ring(bp); 279 280 rte_free(bp->grp_info); 281 bp->grp_info = NULL; 282 } 283 284 static int bnxt_alloc_parent_info(struct bnxt *bp) 285 { 286 bp->parent = rte_zmalloc("bnxt_parent_info", 287 sizeof(struct bnxt_parent_info), 0); 288 if (bp->parent == NULL) 289 return -ENOMEM; 290 291 return 0; 292 } 293 294 static int bnxt_alloc_pf_info(struct bnxt *bp) 295 { 296 bp->pf = rte_zmalloc("bnxt_pf_info", sizeof(struct bnxt_pf_info), 0); 297 if (bp->pf == NULL) 298 return -ENOMEM; 299 300 return 0; 301 } 302 303 static int bnxt_alloc_link_info(struct bnxt *bp) 304 { 305 bp->link_info = 306 rte_zmalloc("bnxt_link_info", sizeof(struct bnxt_link_info), 0); 307 if (bp->link_info == NULL) 308 return -ENOMEM; 309 310 return 0; 311 } 312 313 static int bnxt_alloc_leds_info(struct bnxt *bp) 314 { 315 if (BNXT_VF(bp)) 316 return 0; 317 318 bp->leds = rte_zmalloc("bnxt_leds", 319 BNXT_MAX_LED * sizeof(struct bnxt_led_info), 320 0); 321 if (bp->leds == NULL) 322 return -ENOMEM; 323 324 return 0; 325 } 326 327 static int bnxt_alloc_cos_queues(struct bnxt *bp) 328 { 329 bp->rx_cos_queue = 330 rte_zmalloc("bnxt_rx_cosq", 331 BNXT_COS_QUEUE_COUNT * 332 sizeof(struct bnxt_cos_queue_info), 333 0); 334 if (bp->rx_cos_queue == NULL) 335 return -ENOMEM; 336 337 bp->tx_cos_queue = 338 rte_zmalloc("bnxt_tx_cosq", 339 BNXT_COS_QUEUE_COUNT * 340 sizeof(struct bnxt_cos_queue_info), 341 0); 342 if (bp->tx_cos_queue == NULL) 343 return -ENOMEM; 344 345 return 0; 346 } 347 348 static int bnxt_alloc_flow_stats_info(struct bnxt *bp) 349 { 350 bp->flow_stat = rte_zmalloc("bnxt_flow_xstat", 351 sizeof(struct bnxt_flow_stat_info), 0); 352 if (bp->flow_stat == NULL) 353 return -ENOMEM; 354 355 return 0; 356 } 357 358 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig) 359 { 360 int rc; 361 362 rc = bnxt_alloc_ring_grps(bp); 363 if (rc) 364 goto alloc_mem_err; 365 366 rc = bnxt_alloc_async_ring_struct(bp); 367 if (rc) 368 goto alloc_mem_err; 369 370 rc = bnxt_alloc_vnic_mem(bp); 371 if (rc) 372 goto alloc_mem_err; 373 374 rc = bnxt_alloc_vnic_attributes(bp); 375 if (rc) 376 goto alloc_mem_err; 377 378 rc = bnxt_alloc_filter_mem(bp); 379 if (rc) 380 goto alloc_mem_err; 381 382 rc = bnxt_alloc_async_cp_ring(bp); 383 if (rc) 384 goto alloc_mem_err; 385 386 rc = bnxt_alloc_rxtx_nq_ring(bp); 387 if (rc) 388 goto alloc_mem_err; 389 390 if (BNXT_FLOW_XSTATS_EN(bp)) { 391 rc = bnxt_alloc_flow_stats_info(bp); 392 if (rc) 393 goto alloc_mem_err; 394 } 395 396 return 0; 397 398 alloc_mem_err: 399 bnxt_free_mem(bp, reconfig); 400 return rc; 401 } 402 403 static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id) 404 { 405 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 406 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 407 uint64_t rx_offloads = dev_conf->rxmode.offloads; 408 struct bnxt_rx_queue *rxq; 409 unsigned int j; 410 int rc; 411 412 rc = bnxt_vnic_grp_alloc(bp, vnic); 413 if (rc) 414 goto err_out; 415 416 PMD_DRV_LOG(DEBUG, "vnic[%d] = %p vnic->fw_grp_ids = %p\n", 417 vnic_id, vnic, vnic->fw_grp_ids); 418 419 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 420 if (rc) 421 goto err_out; 422 423 /* Alloc RSS context only if RSS mode is enabled */ 424 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { 425 int j, nr_ctxs = bnxt_rss_ctxts(bp); 426 427 rc = 0; 428 for (j = 0; j < nr_ctxs; j++) { 429 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j); 430 if (rc) 431 break; 432 } 433 if (rc) { 434 PMD_DRV_LOG(ERR, 435 "HWRM vnic %d ctx %d alloc failure rc: %x\n", 436 vnic_id, j, rc); 437 goto err_out; 438 } 439 vnic->num_lb_ctxts = nr_ctxs; 440 } 441 442 /* 443 * Firmware sets pf pair in default vnic cfg. If the VLAN strip 444 * setting is not available at this time, it will not be 445 * configured correctly in the CFA. 446 */ 447 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 448 vnic->vlan_strip = true; 449 else 450 vnic->vlan_strip = false; 451 452 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 453 if (rc) 454 goto err_out; 455 456 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 457 if (rc) 458 goto err_out; 459 460 for (j = 0; j < bp->rx_num_qs_per_vnic; j++) { 461 rxq = bp->eth_dev->data->rx_queues[j]; 462 463 PMD_DRV_LOG(DEBUG, 464 "rxq[%d]->vnic=%p vnic->fw_grp_ids=%p\n", 465 j, rxq->vnic, rxq->vnic->fw_grp_ids); 466 467 if (BNXT_HAS_RING_GRPS(bp) && rxq->rx_deferred_start) 468 rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; 469 else 470 vnic->rx_queue_cnt++; 471 } 472 473 PMD_DRV_LOG(DEBUG, "vnic->rx_queue_cnt = %d\n", vnic->rx_queue_cnt); 474 475 rc = bnxt_vnic_rss_configure(bp, vnic); 476 if (rc) 477 goto err_out; 478 479 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 480 481 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) 482 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 483 else 484 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 485 486 return 0; 487 err_out: 488 PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n", 489 vnic_id, rc); 490 return rc; 491 } 492 493 static int bnxt_register_fc_ctx_mem(struct bnxt *bp) 494 { 495 int rc = 0; 496 497 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma, 498 &bp->flow_stat->rx_fc_in_tbl.ctx_id); 499 if (rc) 500 return rc; 501 502 PMD_DRV_LOG(DEBUG, 503 "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p" 504 " rx_fc_in_tbl.ctx_id = %d\n", 505 bp->flow_stat->rx_fc_in_tbl.va, 506 (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma), 507 bp->flow_stat->rx_fc_in_tbl.ctx_id); 508 509 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma, 510 &bp->flow_stat->rx_fc_out_tbl.ctx_id); 511 if (rc) 512 return rc; 513 514 PMD_DRV_LOG(DEBUG, 515 "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p" 516 " rx_fc_out_tbl.ctx_id = %d\n", 517 bp->flow_stat->rx_fc_out_tbl.va, 518 (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma), 519 bp->flow_stat->rx_fc_out_tbl.ctx_id); 520 521 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma, 522 &bp->flow_stat->tx_fc_in_tbl.ctx_id); 523 if (rc) 524 return rc; 525 526 PMD_DRV_LOG(DEBUG, 527 "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p" 528 " tx_fc_in_tbl.ctx_id = %d\n", 529 bp->flow_stat->tx_fc_in_tbl.va, 530 (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma), 531 bp->flow_stat->tx_fc_in_tbl.ctx_id); 532 533 rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma, 534 &bp->flow_stat->tx_fc_out_tbl.ctx_id); 535 if (rc) 536 return rc; 537 538 PMD_DRV_LOG(DEBUG, 539 "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p" 540 " tx_fc_out_tbl.ctx_id = %d\n", 541 bp->flow_stat->tx_fc_out_tbl.va, 542 (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma), 543 bp->flow_stat->tx_fc_out_tbl.ctx_id); 544 545 memset(bp->flow_stat->rx_fc_out_tbl.va, 546 0, 547 bp->flow_stat->rx_fc_out_tbl.size); 548 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 549 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 550 bp->flow_stat->rx_fc_out_tbl.ctx_id, 551 bp->flow_stat->max_fc, 552 true); 553 if (rc) 554 return rc; 555 556 memset(bp->flow_stat->tx_fc_out_tbl.va, 557 0, 558 bp->flow_stat->tx_fc_out_tbl.size); 559 rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 560 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 561 bp->flow_stat->tx_fc_out_tbl.ctx_id, 562 bp->flow_stat->max_fc, 563 true); 564 565 return rc; 566 } 567 568 static int bnxt_alloc_ctx_mem_buf(char *type, size_t size, 569 struct bnxt_ctx_mem_buf_info *ctx) 570 { 571 if (!ctx) 572 return -EINVAL; 573 574 ctx->va = rte_zmalloc(type, size, 0); 575 if (ctx->va == NULL) 576 return -ENOMEM; 577 rte_mem_lock_page(ctx->va); 578 ctx->size = size; 579 ctx->dma = rte_mem_virt2iova(ctx->va); 580 if (ctx->dma == RTE_BAD_IOVA) 581 return -ENOMEM; 582 583 return 0; 584 } 585 586 static int bnxt_init_fc_ctx_mem(struct bnxt *bp) 587 { 588 struct rte_pci_device *pdev = bp->pdev; 589 char type[RTE_MEMZONE_NAMESIZE]; 590 uint16_t max_fc; 591 int rc = 0; 592 593 max_fc = bp->flow_stat->max_fc; 594 595 sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 596 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 597 /* 4 bytes for each counter-id */ 598 rc = bnxt_alloc_ctx_mem_buf(type, 599 max_fc * 4, 600 &bp->flow_stat->rx_fc_in_tbl); 601 if (rc) 602 return rc; 603 604 sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 605 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 606 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 607 rc = bnxt_alloc_ctx_mem_buf(type, 608 max_fc * 16, 609 &bp->flow_stat->rx_fc_out_tbl); 610 if (rc) 611 return rc; 612 613 sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain, 614 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 615 /* 4 bytes for each counter-id */ 616 rc = bnxt_alloc_ctx_mem_buf(type, 617 max_fc * 4, 618 &bp->flow_stat->tx_fc_in_tbl); 619 if (rc) 620 return rc; 621 622 sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain, 623 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 624 /* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */ 625 rc = bnxt_alloc_ctx_mem_buf(type, 626 max_fc * 16, 627 &bp->flow_stat->tx_fc_out_tbl); 628 if (rc) 629 return rc; 630 631 rc = bnxt_register_fc_ctx_mem(bp); 632 633 return rc; 634 } 635 636 static int bnxt_init_ctx_mem(struct bnxt *bp) 637 { 638 int rc = 0; 639 640 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) || 641 !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) || 642 !BNXT_FLOW_XSTATS_EN(bp)) 643 return 0; 644 645 rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc); 646 if (rc) 647 return rc; 648 649 rc = bnxt_init_fc_ctx_mem(bp); 650 651 return rc; 652 } 653 654 static int bnxt_update_phy_setting(struct bnxt *bp) 655 { 656 struct rte_eth_link new; 657 int rc; 658 659 rc = bnxt_get_hwrm_link_config(bp, &new); 660 if (rc) { 661 PMD_DRV_LOG(ERR, "Failed to get link settings\n"); 662 return rc; 663 } 664 665 /* 666 * On BCM957508-N2100 adapters, FW will not allow any user other 667 * than BMC to shutdown the port. bnxt_get_hwrm_link_config() call 668 * always returns link up. Force phy update always in that case. 669 */ 670 if (!new.link_status || IS_BNXT_DEV_957508_N2100(bp)) { 671 rc = bnxt_set_hwrm_link_config(bp, true); 672 if (rc) { 673 PMD_DRV_LOG(ERR, "Failed to update PHY settings\n"); 674 return rc; 675 } 676 } 677 678 return rc; 679 } 680 681 static int bnxt_init_chip(struct bnxt *bp) 682 { 683 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 684 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 685 uint32_t intr_vector = 0; 686 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 687 uint32_t vec = BNXT_MISC_VEC_ID; 688 unsigned int i, j; 689 int rc; 690 691 if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) { 692 bp->eth_dev->data->dev_conf.rxmode.offloads |= 693 DEV_RX_OFFLOAD_JUMBO_FRAME; 694 bp->flags |= BNXT_FLAG_JUMBO; 695 } else { 696 bp->eth_dev->data->dev_conf.rxmode.offloads &= 697 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 698 bp->flags &= ~BNXT_FLAG_JUMBO; 699 } 700 701 /* THOR does not support ring groups. 702 * But we will use the array to save RSS context IDs. 703 */ 704 if (BNXT_CHIP_THOR(bp)) 705 bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR; 706 707 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 708 if (rc) { 709 PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc); 710 goto err_out; 711 } 712 713 rc = bnxt_alloc_hwrm_rings(bp); 714 if (rc) { 715 PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc); 716 goto err_out; 717 } 718 719 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 720 if (rc) { 721 PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc); 722 goto err_out; 723 } 724 725 if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) 726 goto skip_cosq_cfg; 727 728 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { 729 if (bp->rx_cos_queue[i].id != 0xff) { 730 struct bnxt_vnic_info *vnic = &bp->vnic_info[j++]; 731 732 if (!vnic) { 733 PMD_DRV_LOG(ERR, 734 "Num pools more than FW profile\n"); 735 rc = -EINVAL; 736 goto err_out; 737 } 738 vnic->cos_queue_id = bp->rx_cos_queue[i].id; 739 bp->rx_cosq_cnt++; 740 } 741 } 742 743 skip_cosq_cfg: 744 rc = bnxt_mq_rx_configure(bp); 745 if (rc) { 746 PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc); 747 goto err_out; 748 } 749 750 /* VNIC configuration */ 751 for (i = 0; i < bp->nr_vnics; i++) { 752 rc = bnxt_setup_one_vnic(bp, i); 753 if (rc) 754 goto err_out; 755 } 756 757 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 758 if (rc) { 759 PMD_DRV_LOG(ERR, 760 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 761 goto err_out; 762 } 763 764 /* check and configure queue intr-vector mapping */ 765 if ((rte_intr_cap_multiple(intr_handle) || 766 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 767 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 768 intr_vector = bp->eth_dev->data->nb_rx_queues; 769 PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector); 770 if (intr_vector > bp->rx_cp_nr_rings) { 771 PMD_DRV_LOG(ERR, "At most %d intr queues supported", 772 bp->rx_cp_nr_rings); 773 return -ENOTSUP; 774 } 775 rc = rte_intr_efd_enable(intr_handle, intr_vector); 776 if (rc) 777 return rc; 778 } 779 780 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 781 intr_handle->intr_vec = 782 rte_zmalloc("intr_vec", 783 bp->eth_dev->data->nb_rx_queues * 784 sizeof(int), 0); 785 if (intr_handle->intr_vec == NULL) { 786 PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" 787 " intr_vec", bp->eth_dev->data->nb_rx_queues); 788 rc = -ENOMEM; 789 goto err_disable; 790 } 791 PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " 792 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 793 intr_handle->intr_vec, intr_handle->nb_efd, 794 intr_handle->max_intr); 795 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 796 queue_id++) { 797 intr_handle->intr_vec[queue_id] = 798 vec + BNXT_RX_VEC_START; 799 if (vec < base + intr_handle->nb_efd - 1) 800 vec++; 801 } 802 } 803 804 /* enable uio/vfio intr/eventfd mapping */ 805 rc = rte_intr_enable(intr_handle); 806 #ifndef RTE_EXEC_ENV_FREEBSD 807 /* In FreeBSD OS, nic_uio driver does not support interrupts */ 808 if (rc) 809 goto err_free; 810 #endif 811 812 rc = bnxt_update_phy_setting(bp); 813 if (rc) 814 goto err_free; 815 816 bp->mark_table = rte_zmalloc("bnxt_mark_table", BNXT_MARK_TABLE_SZ, 0); 817 if (!bp->mark_table) 818 PMD_DRV_LOG(ERR, "Allocation of mark table failed\n"); 819 820 return 0; 821 822 err_free: 823 rte_free(intr_handle->intr_vec); 824 err_disable: 825 rte_intr_efd_disable(intr_handle); 826 err_out: 827 /* Some of the error status returned by FW may not be from errno.h */ 828 if (rc > 0) 829 rc = -EIO; 830 831 return rc; 832 } 833 834 static int bnxt_shutdown_nic(struct bnxt *bp) 835 { 836 bnxt_free_all_hwrm_resources(bp); 837 bnxt_free_all_filters(bp); 838 bnxt_free_all_vnics(bp); 839 return 0; 840 } 841 842 /* 843 * Device configuration and status function 844 */ 845 846 uint32_t bnxt_get_speed_capabilities(struct bnxt *bp) 847 { 848 uint32_t link_speed = bp->link_info->support_speeds; 849 uint32_t speed_capa = 0; 850 851 /* If PAM4 is configured, use PAM4 supported speed */ 852 if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0) 853 link_speed = bp->link_info->support_pam4_speeds; 854 855 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB) 856 speed_capa |= ETH_LINK_SPEED_100M; 857 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD) 858 speed_capa |= ETH_LINK_SPEED_100M_HD; 859 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) 860 speed_capa |= ETH_LINK_SPEED_1G; 861 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) 862 speed_capa |= ETH_LINK_SPEED_2_5G; 863 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB) 864 speed_capa |= ETH_LINK_SPEED_10G; 865 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) 866 speed_capa |= ETH_LINK_SPEED_20G; 867 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB) 868 speed_capa |= ETH_LINK_SPEED_25G; 869 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB) 870 speed_capa |= ETH_LINK_SPEED_40G; 871 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) 872 speed_capa |= ETH_LINK_SPEED_50G; 873 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) 874 speed_capa |= ETH_LINK_SPEED_100G; 875 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) 876 speed_capa |= ETH_LINK_SPEED_50G; 877 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) 878 speed_capa |= ETH_LINK_SPEED_100G; 879 if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) 880 speed_capa |= ETH_LINK_SPEED_200G; 881 882 if (bp->link_info->auto_mode == 883 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) 884 speed_capa |= ETH_LINK_SPEED_FIXED; 885 else 886 speed_capa |= ETH_LINK_SPEED_AUTONEG; 887 888 return speed_capa; 889 } 890 891 static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 892 struct rte_eth_dev_info *dev_info) 893 { 894 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device); 895 struct bnxt *bp = eth_dev->data->dev_private; 896 uint16_t max_vnics, i, j, vpool, vrxq; 897 unsigned int max_rx_rings; 898 int rc; 899 900 rc = is_bnxt_in_error(bp); 901 if (rc) 902 return rc; 903 904 /* MAC Specifics */ 905 dev_info->max_mac_addrs = bp->max_l2_ctx; 906 dev_info->max_hash_mac_addrs = 0; 907 908 /* PF/VF specifics */ 909 if (BNXT_PF(bp)) 910 dev_info->max_vfs = pdev->max_vfs; 911 912 max_rx_rings = BNXT_MAX_RINGS(bp); 913 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 914 dev_info->max_rx_queues = max_rx_rings; 915 dev_info->max_tx_queues = max_rx_rings; 916 dev_info->reta_size = bnxt_rss_hash_tbl_size(bp); 917 dev_info->hash_key_size = 40; 918 max_vnics = bp->max_vnics; 919 920 /* MTU specifics */ 921 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 922 dev_info->max_mtu = BNXT_MAX_MTU; 923 924 /* Fast path specifics */ 925 dev_info->min_rx_bufsize = 1; 926 dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN; 927 928 dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; 929 if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) 930 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; 931 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; 932 dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT | 933 dev_info->tx_queue_offload_capa; 934 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; 935 936 dev_info->speed_capa = bnxt_get_speed_capabilities(bp); 937 938 /* *INDENT-OFF* */ 939 dev_info->default_rxconf = (struct rte_eth_rxconf) { 940 .rx_thresh = { 941 .pthresh = 8, 942 .hthresh = 8, 943 .wthresh = 0, 944 }, 945 .rx_free_thresh = 32, 946 .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN, 947 }; 948 949 dev_info->default_txconf = (struct rte_eth_txconf) { 950 .tx_thresh = { 951 .pthresh = 32, 952 .hthresh = 0, 953 .wthresh = 0, 954 }, 955 .tx_free_thresh = 32, 956 .tx_rs_thresh = 32, 957 }; 958 eth_dev->data->dev_conf.intr_conf.lsc = 1; 959 960 eth_dev->data->dev_conf.intr_conf.rxq = 1; 961 dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 962 dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; 963 dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; 964 dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; 965 966 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 967 dev_info->switch_info.name = eth_dev->device->name; 968 dev_info->switch_info.domain_id = bp->switch_domain_id; 969 dev_info->switch_info.port_id = 970 BNXT_PF(bp) ? BNXT_SWITCH_PORT_ID_PF : 971 BNXT_SWITCH_PORT_ID_TRUSTED_VF; 972 } 973 974 /* *INDENT-ON* */ 975 976 /* 977 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 978 * need further investigation. 979 */ 980 981 /* VMDq resources */ 982 vpool = 64; /* ETH_64_POOLS */ 983 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 984 for (i = 0; i < 4; vpool >>= 1, i++) { 985 if (max_vnics > vpool) { 986 for (j = 0; j < 5; vrxq >>= 1, j++) { 987 if (dev_info->max_rx_queues > vrxq) { 988 if (vpool > vrxq) 989 vpool = vrxq; 990 goto found; 991 } 992 } 993 /* Not enough resources to support VMDq */ 994 break; 995 } 996 } 997 /* Not enough resources to support VMDq */ 998 vpool = 0; 999 vrxq = 0; 1000 found: 1001 dev_info->max_vmdq_pools = vpool; 1002 dev_info->vmdq_queue_num = vrxq; 1003 1004 dev_info->vmdq_pool_base = 0; 1005 dev_info->vmdq_queue_base = 0; 1006 1007 return 0; 1008 } 1009 1010 /* Configure the device based on the configuration provided */ 1011 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 1012 { 1013 struct bnxt *bp = eth_dev->data->dev_private; 1014 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1015 int rc; 1016 1017 bp->rx_queues = (void *)eth_dev->data->rx_queues; 1018 bp->tx_queues = (void *)eth_dev->data->tx_queues; 1019 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 1020 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 1021 1022 rc = is_bnxt_in_error(bp); 1023 if (rc) 1024 return rc; 1025 1026 if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { 1027 rc = bnxt_hwrm_check_vf_rings(bp); 1028 if (rc) { 1029 PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); 1030 return -ENOSPC; 1031 } 1032 1033 /* If a resource has already been allocated - in this case 1034 * it is the async completion ring, free it. Reallocate it after 1035 * resource reservation. This will ensure the resource counts 1036 * are calculated correctly. 1037 */ 1038 1039 pthread_mutex_lock(&bp->def_cp_lock); 1040 1041 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1042 bnxt_disable_int(bp); 1043 bnxt_free_cp_ring(bp, bp->async_cp_ring); 1044 } 1045 1046 rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); 1047 if (rc) { 1048 PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); 1049 pthread_mutex_unlock(&bp->def_cp_lock); 1050 return -ENOSPC; 1051 } 1052 1053 if (!BNXT_HAS_NQ(bp) && bp->async_cp_ring) { 1054 rc = bnxt_alloc_async_cp_ring(bp); 1055 if (rc) { 1056 pthread_mutex_unlock(&bp->def_cp_lock); 1057 return rc; 1058 } 1059 bnxt_enable_int(bp); 1060 } 1061 1062 pthread_mutex_unlock(&bp->def_cp_lock); 1063 } else { 1064 /* legacy driver needs to get updated values */ 1065 rc = bnxt_hwrm_func_qcaps(bp); 1066 if (rc) { 1067 PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); 1068 return rc; 1069 } 1070 } 1071 1072 /* Inherit new configurations */ 1073 if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || 1074 eth_dev->data->nb_tx_queues > bp->max_tx_rings || 1075 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues 1076 + BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings || 1077 eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > 1078 bp->max_stat_ctx) 1079 goto resource_error; 1080 1081 if (BNXT_HAS_RING_GRPS(bp) && 1082 (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) 1083 goto resource_error; 1084 1085 if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && 1086 bp->max_vnics < eth_dev->data->nb_rx_queues) 1087 goto resource_error; 1088 1089 bp->rx_cp_nr_rings = bp->rx_nr_rings; 1090 bp->tx_cp_nr_rings = bp->tx_nr_rings; 1091 1092 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1093 rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1094 eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; 1095 1096 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 1097 eth_dev->data->mtu = 1098 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1099 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE * 1100 BNXT_NUM_VLANS; 1101 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); 1102 } 1103 return 0; 1104 1105 resource_error: 1106 PMD_DRV_LOG(ERR, 1107 "Insufficient resources to support requested config\n"); 1108 PMD_DRV_LOG(ERR, 1109 "Num Queues Requested: Tx %d, Rx %d\n", 1110 eth_dev->data->nb_tx_queues, 1111 eth_dev->data->nb_rx_queues); 1112 PMD_DRV_LOG(ERR, 1113 "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", 1114 bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, 1115 bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); 1116 return -ENOSPC; 1117 } 1118 1119 void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 1120 { 1121 struct rte_eth_link *link = ð_dev->data->dev_link; 1122 1123 if (link->link_status) 1124 PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n", 1125 eth_dev->data->port_id, 1126 (uint32_t)link->link_speed, 1127 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 1128 ("full-duplex") : ("half-duplex\n")); 1129 else 1130 PMD_DRV_LOG(INFO, "Port %d Link Down\n", 1131 eth_dev->data->port_id); 1132 } 1133 1134 /* 1135 * Determine whether the current configuration requires support for scattered 1136 * receive; return 1 if scattered receive is required and 0 if not. 1137 */ 1138 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev) 1139 { 1140 uint16_t buf_size; 1141 int i; 1142 1143 if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) 1144 return 1; 1145 1146 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 1147 struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i]; 1148 1149 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 1150 RTE_PKTMBUF_HEADROOM); 1151 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) 1152 return 1; 1153 } 1154 return 0; 1155 } 1156 1157 static eth_rx_burst_t 1158 bnxt_receive_function(struct rte_eth_dev *eth_dev) 1159 { 1160 struct bnxt *bp = eth_dev->data->dev_private; 1161 1162 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1163 #ifndef RTE_LIBRTE_IEEE1588 1164 /* 1165 * Vector mode receive can be enabled only if scatter rx is not 1166 * in use and rx offloads are limited to VLAN stripping and 1167 * CRC stripping. 1168 */ 1169 if (!eth_dev->data->scattered_rx && 1170 !(eth_dev->data->dev_conf.rxmode.offloads & 1171 ~(DEV_RX_OFFLOAD_VLAN_STRIP | 1172 DEV_RX_OFFLOAD_KEEP_CRC | 1173 DEV_RX_OFFLOAD_JUMBO_FRAME | 1174 DEV_RX_OFFLOAD_IPV4_CKSUM | 1175 DEV_RX_OFFLOAD_UDP_CKSUM | 1176 DEV_RX_OFFLOAD_TCP_CKSUM | 1177 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1178 DEV_RX_OFFLOAD_RSS_HASH | 1179 DEV_RX_OFFLOAD_VLAN_FILTER)) && 1180 !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) && 1181 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1182 PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n", 1183 eth_dev->data->port_id); 1184 bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE; 1185 return bnxt_recv_pkts_vec; 1186 } 1187 PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n", 1188 eth_dev->data->port_id); 1189 PMD_DRV_LOG(INFO, 1190 "Port %d scatter: %d rx offload: %" PRIX64 "\n", 1191 eth_dev->data->port_id, 1192 eth_dev->data->scattered_rx, 1193 eth_dev->data->dev_conf.rxmode.offloads); 1194 #endif 1195 #endif 1196 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1197 return bnxt_recv_pkts; 1198 } 1199 1200 static eth_tx_burst_t 1201 bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) 1202 { 1203 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 1204 #ifndef RTE_LIBRTE_IEEE1588 1205 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; 1206 struct bnxt *bp = eth_dev->data->dev_private; 1207 1208 /* 1209 * Vector mode transmit can be enabled only if not using scatter rx 1210 * or tx offloads. 1211 */ 1212 if (!eth_dev->data->scattered_rx && 1213 !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) && 1214 !BNXT_TRUFLOW_EN(bp) && 1215 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 1216 PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n", 1217 eth_dev->data->port_id); 1218 return bnxt_xmit_pkts_vec; 1219 } 1220 PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n", 1221 eth_dev->data->port_id); 1222 PMD_DRV_LOG(INFO, 1223 "Port %d scatter: %d tx offload: %" PRIX64 "\n", 1224 eth_dev->data->port_id, 1225 eth_dev->data->scattered_rx, 1226 offloads); 1227 #endif 1228 #endif 1229 return bnxt_xmit_pkts; 1230 } 1231 1232 static int bnxt_handle_if_change_status(struct bnxt *bp) 1233 { 1234 int rc; 1235 1236 /* Since fw has undergone a reset and lost all contexts, 1237 * set fatal flag to not issue hwrm during cleanup 1238 */ 1239 bp->flags |= BNXT_FLAG_FATAL_ERROR; 1240 bnxt_uninit_resources(bp, true); 1241 1242 /* clear fatal flag so that re-init happens */ 1243 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 1244 rc = bnxt_init_resources(bp, true); 1245 1246 bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; 1247 1248 return rc; 1249 } 1250 1251 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 1252 { 1253 struct bnxt *bp = eth_dev->data->dev_private; 1254 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1255 int vlan_mask = 0; 1256 int rc, retry_cnt = BNXT_IF_CHANGE_RETRY_COUNT; 1257 1258 if (!eth_dev->data->nb_tx_queues || !eth_dev->data->nb_rx_queues) { 1259 PMD_DRV_LOG(ERR, "Queues are not configured yet!\n"); 1260 return -EINVAL; 1261 } 1262 1263 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 1264 PMD_DRV_LOG(ERR, 1265 "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 1266 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 1267 } 1268 1269 do { 1270 rc = bnxt_hwrm_if_change(bp, true); 1271 if (rc == 0 || rc != -EAGAIN) 1272 break; 1273 1274 rte_delay_ms(BNXT_IF_CHANGE_RETRY_INTERVAL); 1275 } while (retry_cnt--); 1276 1277 if (rc) 1278 return rc; 1279 1280 if (bp->flags & BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE) { 1281 rc = bnxt_handle_if_change_status(bp); 1282 if (rc) 1283 return rc; 1284 } 1285 1286 bnxt_enable_int(bp); 1287 1288 rc = bnxt_init_chip(bp); 1289 if (rc) 1290 goto error; 1291 1292 eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev); 1293 eth_dev->data->dev_started = 1; 1294 1295 bnxt_link_update_op(eth_dev, 1); 1296 1297 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1298 vlan_mask |= ETH_VLAN_FILTER_MASK; 1299 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1300 vlan_mask |= ETH_VLAN_STRIP_MASK; 1301 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 1302 if (rc) 1303 goto error; 1304 1305 /* Initialize bnxt ULP port details */ 1306 rc = bnxt_ulp_port_init(bp); 1307 if (rc) 1308 goto error; 1309 1310 eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev); 1311 eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev); 1312 1313 bnxt_schedule_fw_health_check(bp); 1314 1315 return 0; 1316 1317 error: 1318 bnxt_shutdown_nic(bp); 1319 bnxt_free_tx_mbufs(bp); 1320 bnxt_free_rx_mbufs(bp); 1321 bnxt_hwrm_if_change(bp, false); 1322 eth_dev->data->dev_started = 0; 1323 return rc; 1324 } 1325 1326 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 1327 { 1328 struct bnxt *bp = eth_dev->data->dev_private; 1329 int rc = 0; 1330 1331 if (!bp->link_info->link_up) 1332 rc = bnxt_set_hwrm_link_config(bp, true); 1333 if (!rc) 1334 eth_dev->data->dev_link.link_status = 1; 1335 1336 bnxt_print_link_info(eth_dev); 1337 return rc; 1338 } 1339 1340 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 1341 { 1342 struct bnxt *bp = eth_dev->data->dev_private; 1343 1344 eth_dev->data->dev_link.link_status = 0; 1345 bnxt_set_hwrm_link_config(bp, false); 1346 bp->link_info->link_up = 0; 1347 1348 return 0; 1349 } 1350 1351 static void bnxt_free_switch_domain(struct bnxt *bp) 1352 { 1353 int rc = 0; 1354 1355 if (bp->switch_domain_id) { 1356 rc = rte_eth_switch_domain_free(bp->switch_domain_id); 1357 if (rc) 1358 PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n", 1359 bp->switch_domain_id, rc); 1360 } 1361 } 1362 1363 /* Unload the driver, release resources */ 1364 static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 1365 { 1366 struct bnxt *bp = eth_dev->data->dev_private; 1367 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1368 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1369 struct rte_eth_link link; 1370 int ret; 1371 1372 eth_dev->data->dev_started = 0; 1373 eth_dev->data->scattered_rx = 0; 1374 1375 /* Prevent crashes when queues are still in use */ 1376 eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts; 1377 eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts; 1378 1379 bnxt_disable_int(bp); 1380 1381 /* disable uio/vfio intr/eventfd mapping */ 1382 rte_intr_disable(intr_handle); 1383 1384 /* Stop the child representors for this device */ 1385 ret = bnxt_rep_stop_all(bp); 1386 if (ret != 0) 1387 return ret; 1388 1389 /* delete the bnxt ULP port details */ 1390 bnxt_ulp_port_deinit(bp); 1391 1392 bnxt_cancel_fw_health_check(bp); 1393 1394 /* Do not bring link down during reset recovery */ 1395 if (!is_bnxt_in_error(bp)) { 1396 bnxt_dev_set_link_down_op(eth_dev); 1397 /* Wait for link to be reset */ 1398 if (BNXT_SINGLE_PF(bp)) 1399 rte_delay_ms(500); 1400 /* clear the recorded link status */ 1401 memset(&link, 0, sizeof(link)); 1402 rte_eth_linkstatus_set(eth_dev, &link); 1403 } 1404 1405 /* Clean queue intr-vector mapping */ 1406 rte_intr_efd_disable(intr_handle); 1407 if (intr_handle->intr_vec != NULL) { 1408 rte_free(intr_handle->intr_vec); 1409 intr_handle->intr_vec = NULL; 1410 } 1411 1412 bnxt_hwrm_port_clr_stats(bp); 1413 bnxt_free_tx_mbufs(bp); 1414 bnxt_free_rx_mbufs(bp); 1415 /* Process any remaining notifications in default completion queue */ 1416 bnxt_int_handler(eth_dev); 1417 bnxt_shutdown_nic(bp); 1418 bnxt_hwrm_if_change(bp, false); 1419 1420 rte_free(bp->mark_table); 1421 bp->mark_table = NULL; 1422 1423 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 1424 bp->rx_cosq_cnt = 0; 1425 /* All filters are deleted on a port stop. */ 1426 if (BNXT_FLOW_XSTATS_EN(bp)) 1427 bp->flow_stat->flow_count = 0; 1428 1429 return 0; 1430 } 1431 1432 static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 1433 { 1434 struct bnxt *bp = eth_dev->data->dev_private; 1435 int ret = 0; 1436 1437 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1438 return 0; 1439 1440 /* cancel the recovery handler before remove dev */ 1441 rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); 1442 rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); 1443 bnxt_cancel_fc_thread(bp); 1444 1445 if (eth_dev->data->dev_started) 1446 ret = bnxt_dev_stop_op(eth_dev); 1447 1448 bnxt_free_switch_domain(bp); 1449 1450 bnxt_uninit_resources(bp, false); 1451 1452 bnxt_free_leds_info(bp); 1453 bnxt_free_cos_queues(bp); 1454 bnxt_free_link_info(bp); 1455 bnxt_free_pf_info(bp); 1456 bnxt_free_parent_info(bp); 1457 1458 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 1459 bp->tx_mem_zone = NULL; 1460 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 1461 bp->rx_mem_zone = NULL; 1462 1463 bnxt_hwrm_free_vf_info(bp); 1464 1465 rte_free(bp->grp_info); 1466 bp->grp_info = NULL; 1467 1468 return ret; 1469 } 1470 1471 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 1472 uint32_t index) 1473 { 1474 struct bnxt *bp = eth_dev->data->dev_private; 1475 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 1476 struct bnxt_vnic_info *vnic; 1477 struct bnxt_filter_info *filter, *temp_filter; 1478 uint32_t i; 1479 1480 if (is_bnxt_in_error(bp)) 1481 return; 1482 1483 /* 1484 * Loop through all VNICs from the specified filter flow pools to 1485 * remove the corresponding MAC addr filter 1486 */ 1487 for (i = 0; i < bp->nr_vnics; i++) { 1488 if (!(pool_mask & (1ULL << i))) 1489 continue; 1490 1491 vnic = &bp->vnic_info[i]; 1492 filter = STAILQ_FIRST(&vnic->filter); 1493 while (filter) { 1494 temp_filter = STAILQ_NEXT(filter, next); 1495 if (filter->mac_index == index) { 1496 STAILQ_REMOVE(&vnic->filter, filter, 1497 bnxt_filter_info, next); 1498 bnxt_hwrm_clear_l2_filter(bp, filter); 1499 bnxt_free_filter(bp, filter); 1500 } 1501 filter = temp_filter; 1502 } 1503 } 1504 } 1505 1506 static int bnxt_add_mac_filter(struct bnxt *bp, struct bnxt_vnic_info *vnic, 1507 struct rte_ether_addr *mac_addr, uint32_t index, 1508 uint32_t pool) 1509 { 1510 struct bnxt_filter_info *filter; 1511 int rc = 0; 1512 1513 /* Attach requested MAC address to the new l2_filter */ 1514 STAILQ_FOREACH(filter, &vnic->filter, next) { 1515 if (filter->mac_index == index) { 1516 PMD_DRV_LOG(DEBUG, 1517 "MAC addr already existed for pool %d\n", 1518 pool); 1519 return 0; 1520 } 1521 } 1522 1523 filter = bnxt_alloc_filter(bp); 1524 if (!filter) { 1525 PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); 1526 return -ENODEV; 1527 } 1528 1529 /* bnxt_alloc_filter copies default MAC to filter->l2_addr. So, 1530 * if the MAC that's been programmed now is a different one, then, 1531 * copy that addr to filter->l2_addr 1532 */ 1533 if (mac_addr) 1534 memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN); 1535 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 1536 1537 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1538 if (!rc) { 1539 filter->mac_index = index; 1540 if (filter->mac_index == 0) 1541 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 1542 else 1543 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 1544 } else { 1545 bnxt_free_filter(bp, filter); 1546 } 1547 1548 return rc; 1549 } 1550 1551 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 1552 struct rte_ether_addr *mac_addr, 1553 uint32_t index, uint32_t pool) 1554 { 1555 struct bnxt *bp = eth_dev->data->dev_private; 1556 struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; 1557 int rc = 0; 1558 1559 rc = is_bnxt_in_error(bp); 1560 if (rc) 1561 return rc; 1562 1563 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { 1564 PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); 1565 return -ENOTSUP; 1566 } 1567 1568 if (!vnic) { 1569 PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool); 1570 return -EINVAL; 1571 } 1572 1573 /* Filter settings will get applied when port is started */ 1574 if (!eth_dev->data->dev_started) 1575 return 0; 1576 1577 rc = bnxt_add_mac_filter(bp, vnic, mac_addr, index, pool); 1578 1579 return rc; 1580 } 1581 1582 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 1583 { 1584 int rc = 0; 1585 struct bnxt *bp = eth_dev->data->dev_private; 1586 struct rte_eth_link new; 1587 int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT : 1588 BNXT_MIN_LINK_WAIT_CNT; 1589 1590 rc = is_bnxt_in_error(bp); 1591 if (rc) 1592 return rc; 1593 1594 memset(&new, 0, sizeof(new)); 1595 do { 1596 /* Retrieve link info from hardware */ 1597 rc = bnxt_get_hwrm_link_config(bp, &new); 1598 if (rc) { 1599 new.link_speed = ETH_LINK_SPEED_100M; 1600 new.link_duplex = ETH_LINK_FULL_DUPLEX; 1601 PMD_DRV_LOG(ERR, 1602 "Failed to retrieve link rc = 0x%x!\n", rc); 1603 goto out; 1604 } 1605 1606 if (!wait_to_complete || new.link_status) 1607 break; 1608 1609 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 1610 } while (cnt--); 1611 1612 /* Only single function PF can bring phy down. 1613 * When port is stopped, report link down for VF/MH/NPAR functions. 1614 */ 1615 if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started) 1616 memset(&new, 0, sizeof(new)); 1617 1618 out: 1619 /* Timed out or success */ 1620 if (new.link_status != eth_dev->data->dev_link.link_status || 1621 new.link_speed != eth_dev->data->dev_link.link_speed) { 1622 rte_eth_linkstatus_set(eth_dev, &new); 1623 1624 rte_eth_dev_callback_process(eth_dev, 1625 RTE_ETH_EVENT_INTR_LSC, 1626 NULL); 1627 1628 bnxt_print_link_info(eth_dev); 1629 } 1630 1631 return rc; 1632 } 1633 1634 static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 1635 { 1636 struct bnxt *bp = eth_dev->data->dev_private; 1637 struct bnxt_vnic_info *vnic; 1638 uint32_t old_flags; 1639 int rc; 1640 1641 rc = is_bnxt_in_error(bp); 1642 if (rc) 1643 return rc; 1644 1645 /* Filter settings will get applied when port is started */ 1646 if (!eth_dev->data->dev_started) 1647 return 0; 1648 1649 if (bp->vnic_info == NULL) 1650 return 0; 1651 1652 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1653 1654 old_flags = vnic->flags; 1655 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 1656 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1657 if (rc != 0) 1658 vnic->flags = old_flags; 1659 1660 return rc; 1661 } 1662 1663 static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 1664 { 1665 struct bnxt *bp = eth_dev->data->dev_private; 1666 struct bnxt_vnic_info *vnic; 1667 uint32_t old_flags; 1668 int rc; 1669 1670 rc = is_bnxt_in_error(bp); 1671 if (rc) 1672 return rc; 1673 1674 /* Filter settings will get applied when port is started */ 1675 if (!eth_dev->data->dev_started) 1676 return 0; 1677 1678 if (bp->vnic_info == NULL) 1679 return 0; 1680 1681 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1682 1683 old_flags = vnic->flags; 1684 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 1685 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1686 if (rc != 0) 1687 vnic->flags = old_flags; 1688 1689 return rc; 1690 } 1691 1692 static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 1693 { 1694 struct bnxt *bp = eth_dev->data->dev_private; 1695 struct bnxt_vnic_info *vnic; 1696 uint32_t old_flags; 1697 int rc; 1698 1699 rc = is_bnxt_in_error(bp); 1700 if (rc) 1701 return rc; 1702 1703 /* Filter settings will get applied when port is started */ 1704 if (!eth_dev->data->dev_started) 1705 return 0; 1706 1707 if (bp->vnic_info == NULL) 1708 return 0; 1709 1710 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1711 1712 old_flags = vnic->flags; 1713 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1714 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1715 if (rc != 0) 1716 vnic->flags = old_flags; 1717 1718 return rc; 1719 } 1720 1721 static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 1722 { 1723 struct bnxt *bp = eth_dev->data->dev_private; 1724 struct bnxt_vnic_info *vnic; 1725 uint32_t old_flags; 1726 int rc; 1727 1728 rc = is_bnxt_in_error(bp); 1729 if (rc) 1730 return rc; 1731 1732 /* Filter settings will get applied when port is started */ 1733 if (!eth_dev->data->dev_started) 1734 return 0; 1735 1736 if (bp->vnic_info == NULL) 1737 return 0; 1738 1739 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1740 1741 old_flags = vnic->flags; 1742 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1743 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1744 if (rc != 0) 1745 vnic->flags = old_flags; 1746 1747 return rc; 1748 } 1749 1750 /* Return bnxt_rx_queue pointer corresponding to a given rxq. */ 1751 static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) 1752 { 1753 if (qid >= bp->rx_nr_rings) 1754 return NULL; 1755 1756 return bp->eth_dev->data->rx_queues[qid]; 1757 } 1758 1759 /* Return rxq corresponding to a given rss table ring/group ID. */ 1760 static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) 1761 { 1762 struct bnxt_rx_queue *rxq; 1763 unsigned int i; 1764 1765 if (!BNXT_HAS_RING_GRPS(bp)) { 1766 for (i = 0; i < bp->rx_nr_rings; i++) { 1767 rxq = bp->eth_dev->data->rx_queues[i]; 1768 if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr) 1769 return rxq->index; 1770 } 1771 } else { 1772 for (i = 0; i < bp->rx_nr_rings; i++) { 1773 if (bp->grp_info[i].fw_grp_id == fwr) 1774 return i; 1775 } 1776 } 1777 1778 return INVALID_HW_RING_ID; 1779 } 1780 1781 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 1782 struct rte_eth_rss_reta_entry64 *reta_conf, 1783 uint16_t reta_size) 1784 { 1785 struct bnxt *bp = eth_dev->data->dev_private; 1786 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1787 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1788 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1789 uint16_t idx, sft; 1790 int i, rc; 1791 1792 rc = is_bnxt_in_error(bp); 1793 if (rc) 1794 return rc; 1795 1796 if (!vnic->rss_table) 1797 return -EINVAL; 1798 1799 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 1800 return -EINVAL; 1801 1802 if (reta_size != tbl_size) { 1803 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1804 "(%d) must equal the size supported by the hardware " 1805 "(%d)\n", reta_size, tbl_size); 1806 return -EINVAL; 1807 } 1808 1809 for (i = 0; i < reta_size; i++) { 1810 struct bnxt_rx_queue *rxq; 1811 1812 idx = i / RTE_RETA_GROUP_SIZE; 1813 sft = i % RTE_RETA_GROUP_SIZE; 1814 1815 if (!(reta_conf[idx].mask & (1ULL << sft))) 1816 continue; 1817 1818 rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); 1819 if (!rxq) { 1820 PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); 1821 return -EINVAL; 1822 } 1823 1824 if (BNXT_CHIP_THOR(bp)) { 1825 vnic->rss_table[i * 2] = 1826 rxq->rx_ring->rx_ring_struct->fw_ring_id; 1827 vnic->rss_table[i * 2 + 1] = 1828 rxq->cp_ring->cp_ring_struct->fw_ring_id; 1829 } else { 1830 vnic->rss_table[i] = 1831 vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; 1832 } 1833 } 1834 1835 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1836 return 0; 1837 } 1838 1839 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 1840 struct rte_eth_rss_reta_entry64 *reta_conf, 1841 uint16_t reta_size) 1842 { 1843 struct bnxt *bp = eth_dev->data->dev_private; 1844 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1845 uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp); 1846 uint16_t idx, sft, i; 1847 int rc; 1848 1849 rc = is_bnxt_in_error(bp); 1850 if (rc) 1851 return rc; 1852 1853 /* Retrieve from the default VNIC */ 1854 if (!vnic) 1855 return -EINVAL; 1856 if (!vnic->rss_table) 1857 return -EINVAL; 1858 1859 if (reta_size != tbl_size) { 1860 PMD_DRV_LOG(ERR, "The configured hash table lookup size " 1861 "(%d) must equal the size supported by the hardware " 1862 "(%d)\n", reta_size, tbl_size); 1863 return -EINVAL; 1864 } 1865 1866 for (idx = 0, i = 0; i < reta_size; i++) { 1867 idx = i / RTE_RETA_GROUP_SIZE; 1868 sft = i % RTE_RETA_GROUP_SIZE; 1869 1870 if (reta_conf[idx].mask & (1ULL << sft)) { 1871 uint16_t qid; 1872 1873 if (BNXT_CHIP_THOR(bp)) 1874 qid = bnxt_rss_to_qid(bp, 1875 vnic->rss_table[i * 2]); 1876 else 1877 qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); 1878 1879 if (qid == INVALID_HW_RING_ID) { 1880 PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); 1881 return -EINVAL; 1882 } 1883 reta_conf[idx].reta[sft] = qid; 1884 } 1885 } 1886 1887 return 0; 1888 } 1889 1890 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 1891 struct rte_eth_rss_conf *rss_conf) 1892 { 1893 struct bnxt *bp = eth_dev->data->dev_private; 1894 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 1895 struct bnxt_vnic_info *vnic; 1896 int rc; 1897 1898 rc = is_bnxt_in_error(bp); 1899 if (rc) 1900 return rc; 1901 1902 /* 1903 * If RSS enablement were different than dev_configure, 1904 * then return -EINVAL 1905 */ 1906 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 1907 if (!rss_conf->rss_hf) 1908 PMD_DRV_LOG(ERR, "Hash type NONE\n"); 1909 } else { 1910 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 1911 return -EINVAL; 1912 } 1913 1914 bp->flags |= BNXT_FLAG_UPDATE_HASH; 1915 memcpy(ð_dev->data->dev_conf.rx_adv_conf.rss_conf, 1916 rss_conf, 1917 sizeof(*rss_conf)); 1918 1919 /* Update the default RSS VNIC(s) */ 1920 vnic = BNXT_GET_DEFAULT_VNIC(bp); 1921 vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); 1922 vnic->hash_mode = 1923 bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, 1924 ETH_RSS_LEVEL(rss_conf->rss_hf)); 1925 1926 /* 1927 * If hashkey is not specified, use the previously configured 1928 * hashkey 1929 */ 1930 if (!rss_conf->rss_key) 1931 goto rss_config; 1932 1933 if (rss_conf->rss_key_len != HW_HASH_KEY_SIZE) { 1934 PMD_DRV_LOG(ERR, 1935 "Invalid hashkey length, should be 16 bytes\n"); 1936 return -EINVAL; 1937 } 1938 memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); 1939 1940 rss_config: 1941 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 1942 return 0; 1943 } 1944 1945 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 1946 struct rte_eth_rss_conf *rss_conf) 1947 { 1948 struct bnxt *bp = eth_dev->data->dev_private; 1949 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 1950 int len, rc; 1951 uint32_t hash_types; 1952 1953 rc = is_bnxt_in_error(bp); 1954 if (rc) 1955 return rc; 1956 1957 /* RSS configuration is the same for all VNICs */ 1958 if (vnic && vnic->rss_hash_key) { 1959 if (rss_conf->rss_key) { 1960 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 1961 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 1962 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 1963 } 1964 1965 hash_types = vnic->hash_type; 1966 rss_conf->rss_hf = 0; 1967 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 1968 rss_conf->rss_hf |= ETH_RSS_IPV4; 1969 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 1970 } 1971 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 1972 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1973 hash_types &= 1974 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 1975 } 1976 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 1977 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 1978 hash_types &= 1979 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 1980 } 1981 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 1982 rss_conf->rss_hf |= ETH_RSS_IPV6; 1983 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 1984 } 1985 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 1986 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 1987 hash_types &= 1988 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 1989 } 1990 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 1991 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 1992 hash_types &= 1993 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 1994 } 1995 1996 rss_conf->rss_hf |= 1997 bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode); 1998 1999 if (hash_types) { 2000 PMD_DRV_LOG(ERR, 2001 "Unknown RSS config from firmware (%08x), RSS disabled", 2002 vnic->hash_type); 2003 return -ENOTSUP; 2004 } 2005 } else { 2006 rss_conf->rss_hf = 0; 2007 } 2008 return 0; 2009 } 2010 2011 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 2012 struct rte_eth_fc_conf *fc_conf) 2013 { 2014 struct bnxt *bp = dev->data->dev_private; 2015 struct rte_eth_link link_info; 2016 int rc; 2017 2018 rc = is_bnxt_in_error(bp); 2019 if (rc) 2020 return rc; 2021 2022 rc = bnxt_get_hwrm_link_config(bp, &link_info); 2023 if (rc) 2024 return rc; 2025 2026 memset(fc_conf, 0, sizeof(*fc_conf)); 2027 if (bp->link_info->auto_pause) 2028 fc_conf->autoneg = 1; 2029 switch (bp->link_info->pause) { 2030 case 0: 2031 fc_conf->mode = RTE_FC_NONE; 2032 break; 2033 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 2034 fc_conf->mode = RTE_FC_TX_PAUSE; 2035 break; 2036 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 2037 fc_conf->mode = RTE_FC_RX_PAUSE; 2038 break; 2039 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 2040 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 2041 fc_conf->mode = RTE_FC_FULL; 2042 break; 2043 } 2044 return 0; 2045 } 2046 2047 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 2048 struct rte_eth_fc_conf *fc_conf) 2049 { 2050 struct bnxt *bp = dev->data->dev_private; 2051 int rc; 2052 2053 rc = is_bnxt_in_error(bp); 2054 if (rc) 2055 return rc; 2056 2057 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2058 PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); 2059 return -ENOTSUP; 2060 } 2061 2062 switch (fc_conf->mode) { 2063 case RTE_FC_NONE: 2064 bp->link_info->auto_pause = 0; 2065 bp->link_info->force_pause = 0; 2066 break; 2067 case RTE_FC_RX_PAUSE: 2068 if (fc_conf->autoneg) { 2069 bp->link_info->auto_pause = 2070 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2071 bp->link_info->force_pause = 0; 2072 } else { 2073 bp->link_info->auto_pause = 0; 2074 bp->link_info->force_pause = 2075 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2076 } 2077 break; 2078 case RTE_FC_TX_PAUSE: 2079 if (fc_conf->autoneg) { 2080 bp->link_info->auto_pause = 2081 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 2082 bp->link_info->force_pause = 0; 2083 } else { 2084 bp->link_info->auto_pause = 0; 2085 bp->link_info->force_pause = 2086 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 2087 } 2088 break; 2089 case RTE_FC_FULL: 2090 if (fc_conf->autoneg) { 2091 bp->link_info->auto_pause = 2092 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 2093 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 2094 bp->link_info->force_pause = 0; 2095 } else { 2096 bp->link_info->auto_pause = 0; 2097 bp->link_info->force_pause = 2098 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 2099 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 2100 } 2101 break; 2102 } 2103 return bnxt_set_hwrm_link_config(bp, true); 2104 } 2105 2106 /* Add UDP tunneling port */ 2107 static int 2108 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 2109 struct rte_eth_udp_tunnel *udp_tunnel) 2110 { 2111 struct bnxt *bp = eth_dev->data->dev_private; 2112 uint16_t tunnel_type = 0; 2113 int rc = 0; 2114 2115 rc = is_bnxt_in_error(bp); 2116 if (rc) 2117 return rc; 2118 2119 switch (udp_tunnel->prot_type) { 2120 case RTE_TUNNEL_TYPE_VXLAN: 2121 if (bp->vxlan_port_cnt) { 2122 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2123 udp_tunnel->udp_port); 2124 if (bp->vxlan_port != udp_tunnel->udp_port) { 2125 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2126 return -ENOSPC; 2127 } 2128 bp->vxlan_port_cnt++; 2129 return 0; 2130 } 2131 tunnel_type = 2132 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 2133 bp->vxlan_port_cnt++; 2134 break; 2135 case RTE_TUNNEL_TYPE_GENEVE: 2136 if (bp->geneve_port_cnt) { 2137 PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n", 2138 udp_tunnel->udp_port); 2139 if (bp->geneve_port != udp_tunnel->udp_port) { 2140 PMD_DRV_LOG(ERR, "Only one port allowed\n"); 2141 return -ENOSPC; 2142 } 2143 bp->geneve_port_cnt++; 2144 return 0; 2145 } 2146 tunnel_type = 2147 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 2148 bp->geneve_port_cnt++; 2149 break; 2150 default: 2151 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2152 return -ENOTSUP; 2153 } 2154 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 2155 tunnel_type); 2156 return rc; 2157 } 2158 2159 static int 2160 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 2161 struct rte_eth_udp_tunnel *udp_tunnel) 2162 { 2163 struct bnxt *bp = eth_dev->data->dev_private; 2164 uint16_t tunnel_type = 0; 2165 uint16_t port = 0; 2166 int rc = 0; 2167 2168 rc = is_bnxt_in_error(bp); 2169 if (rc) 2170 return rc; 2171 2172 switch (udp_tunnel->prot_type) { 2173 case RTE_TUNNEL_TYPE_VXLAN: 2174 if (!bp->vxlan_port_cnt) { 2175 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2176 return -EINVAL; 2177 } 2178 if (bp->vxlan_port != udp_tunnel->udp_port) { 2179 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2180 udp_tunnel->udp_port, bp->vxlan_port); 2181 return -EINVAL; 2182 } 2183 if (--bp->vxlan_port_cnt) 2184 return 0; 2185 2186 tunnel_type = 2187 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 2188 port = bp->vxlan_fw_dst_port_id; 2189 break; 2190 case RTE_TUNNEL_TYPE_GENEVE: 2191 if (!bp->geneve_port_cnt) { 2192 PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n"); 2193 return -EINVAL; 2194 } 2195 if (bp->geneve_port != udp_tunnel->udp_port) { 2196 PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n", 2197 udp_tunnel->udp_port, bp->geneve_port); 2198 return -EINVAL; 2199 } 2200 if (--bp->geneve_port_cnt) 2201 return 0; 2202 2203 tunnel_type = 2204 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 2205 port = bp->geneve_fw_dst_port_id; 2206 break; 2207 default: 2208 PMD_DRV_LOG(ERR, "Tunnel type is not supported\n"); 2209 return -ENOTSUP; 2210 } 2211 2212 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 2213 return rc; 2214 } 2215 2216 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2217 { 2218 struct bnxt_filter_info *filter; 2219 struct bnxt_vnic_info *vnic; 2220 int rc = 0; 2221 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2222 2223 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2224 filter = STAILQ_FIRST(&vnic->filter); 2225 while (filter) { 2226 /* Search for this matching MAC+VLAN filter */ 2227 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) { 2228 /* Delete the filter */ 2229 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2230 if (rc) 2231 return rc; 2232 STAILQ_REMOVE(&vnic->filter, filter, 2233 bnxt_filter_info, next); 2234 bnxt_free_filter(bp, filter); 2235 PMD_DRV_LOG(INFO, 2236 "Deleted vlan filter for %d\n", 2237 vlan_id); 2238 return 0; 2239 } 2240 filter = STAILQ_NEXT(filter, next); 2241 } 2242 return -ENOENT; 2243 } 2244 2245 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 2246 { 2247 struct bnxt_filter_info *filter; 2248 struct bnxt_vnic_info *vnic; 2249 int rc = 0; 2250 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 2251 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 2252 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 2253 2254 /* Implementation notes on the use of VNIC in this command: 2255 * 2256 * By default, these filters belong to default vnic for the function. 2257 * Once these filters are set up, only destination VNIC can be modified. 2258 * If the destination VNIC is not specified in this command, 2259 * then the HWRM shall only create an l2 context id. 2260 */ 2261 2262 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2263 filter = STAILQ_FIRST(&vnic->filter); 2264 /* Check if the VLAN has already been added */ 2265 while (filter) { 2266 if (bnxt_vlan_filter_exists(bp, filter, chk, vlan_id)) 2267 return -EEXIST; 2268 2269 filter = STAILQ_NEXT(filter, next); 2270 } 2271 2272 /* No match found. Alloc a fresh filter and issue the L2_FILTER_ALLOC 2273 * command to create MAC+VLAN filter with the right flags, enables set. 2274 */ 2275 filter = bnxt_alloc_filter(bp); 2276 if (!filter) { 2277 PMD_DRV_LOG(ERR, 2278 "MAC/VLAN filter alloc failed\n"); 2279 return -ENOMEM; 2280 } 2281 /* MAC + VLAN ID filter */ 2282 /* If l2_ivlan == 0 and l2_ivlan_mask != 0, only 2283 * untagged packets are received 2284 * 2285 * If l2_ivlan != 0 and l2_ivlan_mask != 0, untagged 2286 * packets and only the programmed vlan's packets are received 2287 */ 2288 filter->l2_ivlan = vlan_id; 2289 filter->l2_ivlan_mask = 0x0FFF; 2290 filter->enables |= en; 2291 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST; 2292 2293 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 2294 if (rc) { 2295 /* Free the newly allocated filter as we were 2296 * not able to create the filter in hardware. 2297 */ 2298 bnxt_free_filter(bp, filter); 2299 return rc; 2300 } 2301 2302 filter->mac_index = 0; 2303 /* Add this new filter to the list */ 2304 if (vlan_id == 0) 2305 STAILQ_INSERT_HEAD(&vnic->filter, filter, next); 2306 else 2307 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2308 2309 PMD_DRV_LOG(INFO, 2310 "Added Vlan filter for %d\n", vlan_id); 2311 return rc; 2312 } 2313 2314 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 2315 uint16_t vlan_id, int on) 2316 { 2317 struct bnxt *bp = eth_dev->data->dev_private; 2318 int rc; 2319 2320 rc = is_bnxt_in_error(bp); 2321 if (rc) 2322 return rc; 2323 2324 if (!eth_dev->data->dev_started) { 2325 PMD_DRV_LOG(ERR, "port must be started before setting vlan\n"); 2326 return -EINVAL; 2327 } 2328 2329 /* These operations apply to ALL existing MAC/VLAN filters */ 2330 if (on) 2331 return bnxt_add_vlan_filter(bp, vlan_id); 2332 else 2333 return bnxt_del_vlan_filter(bp, vlan_id); 2334 } 2335 2336 static int bnxt_del_dflt_mac_filter(struct bnxt *bp, 2337 struct bnxt_vnic_info *vnic) 2338 { 2339 struct bnxt_filter_info *filter; 2340 int rc; 2341 2342 filter = STAILQ_FIRST(&vnic->filter); 2343 while (filter) { 2344 if (filter->mac_index == 0 && 2345 !memcmp(filter->l2_addr, bp->mac_addr, 2346 RTE_ETHER_ADDR_LEN)) { 2347 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 2348 if (!rc) { 2349 STAILQ_REMOVE(&vnic->filter, filter, 2350 bnxt_filter_info, next); 2351 bnxt_free_filter(bp, filter); 2352 } 2353 return rc; 2354 } 2355 filter = STAILQ_NEXT(filter, next); 2356 } 2357 return 0; 2358 } 2359 2360 static int 2361 bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads) 2362 { 2363 struct bnxt_vnic_info *vnic; 2364 unsigned int i; 2365 int rc; 2366 2367 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2368 if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 2369 /* Remove any VLAN filters programmed */ 2370 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2371 bnxt_del_vlan_filter(bp, i); 2372 2373 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2374 if (rc) 2375 return rc; 2376 } else { 2377 /* Default filter will allow packets that match the 2378 * dest mac. So, it has to be deleted, otherwise, we 2379 * will endup receiving vlan packets for which the 2380 * filter is not programmed, when hw-vlan-filter 2381 * configuration is ON 2382 */ 2383 bnxt_del_dflt_mac_filter(bp, vnic); 2384 /* This filter will allow only untagged packets */ 2385 bnxt_add_vlan_filter(bp, 0); 2386 } 2387 PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", 2388 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); 2389 2390 return 0; 2391 } 2392 2393 static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id) 2394 { 2395 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 2396 unsigned int i; 2397 int rc; 2398 2399 /* Destroy vnic filters and vnic */ 2400 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2401 DEV_RX_OFFLOAD_VLAN_FILTER) { 2402 for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++) 2403 bnxt_del_vlan_filter(bp, i); 2404 } 2405 bnxt_del_dflt_mac_filter(bp, vnic); 2406 2407 rc = bnxt_hwrm_vnic_free(bp, vnic); 2408 if (rc) 2409 return rc; 2410 2411 rte_free(vnic->fw_grp_ids); 2412 vnic->fw_grp_ids = NULL; 2413 2414 vnic->rx_queue_cnt = 0; 2415 2416 return 0; 2417 } 2418 2419 static int 2420 bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads) 2421 { 2422 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2423 int rc; 2424 2425 /* Destroy, recreate and reconfigure the default vnic */ 2426 rc = bnxt_free_one_vnic(bp, 0); 2427 if (rc) 2428 return rc; 2429 2430 /* default vnic 0 */ 2431 rc = bnxt_setup_one_vnic(bp, 0); 2432 if (rc) 2433 return rc; 2434 2435 if (bp->eth_dev->data->dev_conf.rxmode.offloads & 2436 DEV_RX_OFFLOAD_VLAN_FILTER) { 2437 rc = bnxt_add_vlan_filter(bp, 0); 2438 if (rc) 2439 return rc; 2440 rc = bnxt_restore_vlan_filters(bp); 2441 if (rc) 2442 return rc; 2443 } else { 2444 rc = bnxt_add_mac_filter(bp, vnic, NULL, 0, 0); 2445 if (rc) 2446 return rc; 2447 } 2448 2449 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2450 if (rc) 2451 return rc; 2452 2453 PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", 2454 !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); 2455 2456 return rc; 2457 } 2458 2459 static int 2460 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 2461 { 2462 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 2463 struct bnxt *bp = dev->data->dev_private; 2464 int rc; 2465 2466 rc = is_bnxt_in_error(bp); 2467 if (rc) 2468 return rc; 2469 2470 /* Filter settings will get applied when port is started */ 2471 if (!dev->data->dev_started) 2472 return 0; 2473 2474 if (mask & ETH_VLAN_FILTER_MASK) { 2475 /* Enable or disable VLAN filtering */ 2476 rc = bnxt_config_vlan_hw_filter(bp, rx_offloads); 2477 if (rc) 2478 return rc; 2479 } 2480 2481 if (mask & ETH_VLAN_STRIP_MASK) { 2482 /* Enable or disable VLAN stripping */ 2483 rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads); 2484 if (rc) 2485 return rc; 2486 } 2487 2488 if (mask & ETH_VLAN_EXTEND_MASK) { 2489 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2490 PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n"); 2491 else 2492 PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n"); 2493 } 2494 2495 return 0; 2496 } 2497 2498 static int 2499 bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type, 2500 uint16_t tpid) 2501 { 2502 struct bnxt *bp = dev->data->dev_private; 2503 int qinq = dev->data->dev_conf.rxmode.offloads & 2504 DEV_RX_OFFLOAD_VLAN_EXTEND; 2505 2506 if (vlan_type != ETH_VLAN_TYPE_INNER && 2507 vlan_type != ETH_VLAN_TYPE_OUTER) { 2508 PMD_DRV_LOG(ERR, 2509 "Unsupported vlan type."); 2510 return -EINVAL; 2511 } 2512 if (!qinq) { 2513 PMD_DRV_LOG(ERR, 2514 "QinQ not enabled. Needs to be ON as we can " 2515 "accelerate only outer vlan\n"); 2516 return -EINVAL; 2517 } 2518 2519 if (vlan_type == ETH_VLAN_TYPE_OUTER) { 2520 switch (tpid) { 2521 case RTE_ETHER_TYPE_QINQ: 2522 bp->outer_tpid_bd = 2523 TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8; 2524 break; 2525 case RTE_ETHER_TYPE_VLAN: 2526 bp->outer_tpid_bd = 2527 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; 2528 break; 2529 case RTE_ETHER_TYPE_QINQ1: 2530 bp->outer_tpid_bd = 2531 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100; 2532 break; 2533 case RTE_ETHER_TYPE_QINQ2: 2534 bp->outer_tpid_bd = 2535 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200; 2536 break; 2537 case RTE_ETHER_TYPE_QINQ3: 2538 bp->outer_tpid_bd = 2539 TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300; 2540 break; 2541 default: 2542 PMD_DRV_LOG(ERR, "Invalid TPID: %x\n", tpid); 2543 return -EINVAL; 2544 } 2545 bp->outer_tpid_bd |= tpid; 2546 PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd); 2547 } else if (vlan_type == ETH_VLAN_TYPE_INNER) { 2548 PMD_DRV_LOG(ERR, 2549 "Can accelerate only outer vlan in QinQ\n"); 2550 return -EINVAL; 2551 } 2552 2553 return 0; 2554 } 2555 2556 static int 2557 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, 2558 struct rte_ether_addr *addr) 2559 { 2560 struct bnxt *bp = dev->data->dev_private; 2561 /* Default Filter is tied to VNIC 0 */ 2562 struct bnxt_vnic_info *vnic = BNXT_GET_DEFAULT_VNIC(bp); 2563 int rc; 2564 2565 rc = is_bnxt_in_error(bp); 2566 if (rc) 2567 return rc; 2568 2569 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 2570 return -EPERM; 2571 2572 if (rte_is_zero_ether_addr(addr)) 2573 return -EINVAL; 2574 2575 /* Filter settings will get applied when port is started */ 2576 if (!dev->data->dev_started) 2577 return 0; 2578 2579 /* Check if the requested MAC is already added */ 2580 if (memcmp(addr, bp->mac_addr, RTE_ETHER_ADDR_LEN) == 0) 2581 return 0; 2582 2583 /* Destroy filter and re-create it */ 2584 bnxt_del_dflt_mac_filter(bp, vnic); 2585 2586 memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN); 2587 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 2588 /* This filter will allow only untagged packets */ 2589 rc = bnxt_add_vlan_filter(bp, 0); 2590 } else { 2591 rc = bnxt_add_mac_filter(bp, vnic, addr, 0, 0); 2592 } 2593 2594 PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); 2595 return rc; 2596 } 2597 2598 static int 2599 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 2600 struct rte_ether_addr *mc_addr_set, 2601 uint32_t nb_mc_addr) 2602 { 2603 struct bnxt *bp = eth_dev->data->dev_private; 2604 char *mc_addr_list = (char *)mc_addr_set; 2605 struct bnxt_vnic_info *vnic; 2606 uint32_t off = 0, i = 0; 2607 int rc; 2608 2609 rc = is_bnxt_in_error(bp); 2610 if (rc) 2611 return rc; 2612 2613 vnic = BNXT_GET_DEFAULT_VNIC(bp); 2614 2615 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 2616 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 2617 goto allmulti; 2618 } 2619 2620 /* TODO Check for Duplicate mcast addresses */ 2621 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 2622 for (i = 0; i < nb_mc_addr; i++) { 2623 memcpy(vnic->mc_list + off, &mc_addr_list[i], 2624 RTE_ETHER_ADDR_LEN); 2625 off += RTE_ETHER_ADDR_LEN; 2626 } 2627 2628 vnic->mc_addr_cnt = i; 2629 if (vnic->mc_addr_cnt) 2630 vnic->flags |= BNXT_VNIC_INFO_MCAST; 2631 else 2632 vnic->flags &= ~BNXT_VNIC_INFO_MCAST; 2633 2634 allmulti: 2635 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 2636 } 2637 2638 static int 2639 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 2640 { 2641 struct bnxt *bp = dev->data->dev_private; 2642 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 2643 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 2644 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 2645 uint8_t fw_rsvd = bp->fw_ver & 0xff; 2646 int ret; 2647 2648 ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d", 2649 fw_major, fw_minor, fw_updt, fw_rsvd); 2650 2651 ret += 1; /* add the size of '\0' */ 2652 if (fw_size < (uint32_t)ret) 2653 return ret; 2654 else 2655 return 0; 2656 } 2657 2658 static void 2659 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2660 struct rte_eth_rxq_info *qinfo) 2661 { 2662 struct bnxt *bp = dev->data->dev_private; 2663 struct bnxt_rx_queue *rxq; 2664 2665 if (is_bnxt_in_error(bp)) 2666 return; 2667 2668 rxq = dev->data->rx_queues[queue_id]; 2669 2670 qinfo->mp = rxq->mb_pool; 2671 qinfo->scattered_rx = dev->data->scattered_rx; 2672 qinfo->nb_desc = rxq->nb_rx_desc; 2673 2674 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 2675 qinfo->conf.rx_drop_en = rxq->drop_en; 2676 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 2677 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 2678 } 2679 2680 static void 2681 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 2682 struct rte_eth_txq_info *qinfo) 2683 { 2684 struct bnxt *bp = dev->data->dev_private; 2685 struct bnxt_tx_queue *txq; 2686 2687 if (is_bnxt_in_error(bp)) 2688 return; 2689 2690 txq = dev->data->tx_queues[queue_id]; 2691 2692 qinfo->nb_desc = txq->nb_tx_desc; 2693 2694 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 2695 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 2696 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 2697 2698 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 2699 qinfo->conf.tx_rs_thresh = 0; 2700 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 2701 qinfo->conf.offloads = txq->offloads; 2702 } 2703 2704 static const struct { 2705 eth_rx_burst_t pkt_burst; 2706 const char *info; 2707 } bnxt_rx_burst_info[] = { 2708 {bnxt_recv_pkts, "Scalar"}, 2709 #if defined(RTE_ARCH_X86) 2710 {bnxt_recv_pkts_vec, "Vector SSE"}, 2711 #elif defined(RTE_ARCH_ARM64) 2712 {bnxt_recv_pkts_vec, "Vector Neon"}, 2713 #endif 2714 }; 2715 2716 static int 2717 bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2718 struct rte_eth_burst_mode *mode) 2719 { 2720 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 2721 size_t i; 2722 2723 for (i = 0; i < RTE_DIM(bnxt_rx_burst_info); i++) { 2724 if (pkt_burst == bnxt_rx_burst_info[i].pkt_burst) { 2725 snprintf(mode->info, sizeof(mode->info), "%s", 2726 bnxt_rx_burst_info[i].info); 2727 return 0; 2728 } 2729 } 2730 2731 return -EINVAL; 2732 } 2733 2734 static const struct { 2735 eth_tx_burst_t pkt_burst; 2736 const char *info; 2737 } bnxt_tx_burst_info[] = { 2738 {bnxt_xmit_pkts, "Scalar"}, 2739 #if defined(RTE_ARCH_X86) 2740 {bnxt_xmit_pkts_vec, "Vector SSE"}, 2741 #elif defined(RTE_ARCH_ARM64) 2742 {bnxt_xmit_pkts_vec, "Vector Neon"}, 2743 #endif 2744 }; 2745 2746 static int 2747 bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 2748 struct rte_eth_burst_mode *mode) 2749 { 2750 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 2751 size_t i; 2752 2753 for (i = 0; i < RTE_DIM(bnxt_tx_burst_info); i++) { 2754 if (pkt_burst == bnxt_tx_burst_info[i].pkt_burst) { 2755 snprintf(mode->info, sizeof(mode->info), "%s", 2756 bnxt_tx_burst_info[i].info); 2757 return 0; 2758 } 2759 } 2760 2761 return -EINVAL; 2762 } 2763 2764 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 2765 { 2766 struct bnxt *bp = eth_dev->data->dev_private; 2767 uint32_t new_pkt_size; 2768 uint32_t rc = 0; 2769 uint32_t i; 2770 2771 rc = is_bnxt_in_error(bp); 2772 if (rc) 2773 return rc; 2774 2775 /* Exit if receive queues are not configured yet */ 2776 if (!eth_dev->data->nb_rx_queues) 2777 return rc; 2778 2779 new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 2780 VLAN_TAG_SIZE * BNXT_NUM_VLANS; 2781 2782 /* 2783 * Disallow any MTU change that would require scattered receive support 2784 * if it is not already enabled. 2785 */ 2786 if (eth_dev->data->dev_started && 2787 !eth_dev->data->scattered_rx && 2788 (new_pkt_size > 2789 eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 2790 PMD_DRV_LOG(ERR, 2791 "MTU change would require scattered rx support. "); 2792 PMD_DRV_LOG(ERR, "Stop port before changing MTU.\n"); 2793 return -EINVAL; 2794 } 2795 2796 if (new_mtu > RTE_ETHER_MTU) { 2797 bp->flags |= BNXT_FLAG_JUMBO; 2798 bp->eth_dev->data->dev_conf.rxmode.offloads |= 2799 DEV_RX_OFFLOAD_JUMBO_FRAME; 2800 } else { 2801 bp->eth_dev->data->dev_conf.rxmode.offloads &= 2802 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2803 bp->flags &= ~BNXT_FLAG_JUMBO; 2804 } 2805 2806 /* Is there a change in mtu setting? */ 2807 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size) 2808 return rc; 2809 2810 for (i = 0; i < bp->nr_vnics; i++) { 2811 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2812 uint16_t size = 0; 2813 2814 vnic->mru = BNXT_VNIC_MRU(new_mtu); 2815 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 2816 if (rc) 2817 break; 2818 2819 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 2820 size -= RTE_PKTMBUF_HEADROOM; 2821 2822 if (size < new_mtu) { 2823 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 2824 if (rc) 2825 return rc; 2826 } 2827 } 2828 2829 if (!rc) 2830 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size; 2831 2832 PMD_DRV_LOG(INFO, "New MTU is %d\n", new_mtu); 2833 2834 return rc; 2835 } 2836 2837 static int 2838 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 2839 { 2840 struct bnxt *bp = dev->data->dev_private; 2841 uint16_t vlan = bp->vlan; 2842 int rc; 2843 2844 rc = is_bnxt_in_error(bp); 2845 if (rc) 2846 return rc; 2847 2848 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { 2849 PMD_DRV_LOG(ERR, 2850 "PVID cannot be modified for this function\n"); 2851 return -ENOTSUP; 2852 } 2853 bp->vlan = on ? pvid : 0; 2854 2855 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 2856 if (rc) 2857 bp->vlan = vlan; 2858 return rc; 2859 } 2860 2861 static int 2862 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 2863 { 2864 struct bnxt *bp = dev->data->dev_private; 2865 int rc; 2866 2867 rc = is_bnxt_in_error(bp); 2868 if (rc) 2869 return rc; 2870 2871 return bnxt_hwrm_port_led_cfg(bp, true); 2872 } 2873 2874 static int 2875 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 2876 { 2877 struct bnxt *bp = dev->data->dev_private; 2878 int rc; 2879 2880 rc = is_bnxt_in_error(bp); 2881 if (rc) 2882 return rc; 2883 2884 return bnxt_hwrm_port_led_cfg(bp, false); 2885 } 2886 2887 static uint32_t 2888 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 2889 { 2890 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2891 uint32_t desc = 0, raw_cons = 0, cons; 2892 struct bnxt_cp_ring_info *cpr; 2893 struct bnxt_rx_queue *rxq; 2894 struct rx_pkt_cmpl *rxcmp; 2895 int rc; 2896 2897 rc = is_bnxt_in_error(bp); 2898 if (rc) 2899 return rc; 2900 2901 rxq = dev->data->rx_queues[rx_queue_id]; 2902 cpr = rxq->cp_ring; 2903 raw_cons = cpr->cp_raw_cons; 2904 2905 while (1) { 2906 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 2907 rte_prefetch0(&cpr->cp_desc_ring[cons]); 2908 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2909 2910 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) { 2911 break; 2912 } else { 2913 raw_cons++; 2914 desc++; 2915 } 2916 } 2917 2918 return desc; 2919 } 2920 2921 static int 2922 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 2923 { 2924 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 2925 struct bnxt_rx_ring_info *rxr; 2926 struct bnxt_cp_ring_info *cpr; 2927 struct rte_mbuf *rx_buf; 2928 struct rx_pkt_cmpl *rxcmp; 2929 uint32_t cons, cp_cons; 2930 int rc; 2931 2932 if (!rxq) 2933 return -EINVAL; 2934 2935 rc = is_bnxt_in_error(rxq->bp); 2936 if (rc) 2937 return rc; 2938 2939 cpr = rxq->cp_ring; 2940 rxr = rxq->rx_ring; 2941 2942 if (offset >= rxq->nb_rx_desc) 2943 return -EINVAL; 2944 2945 cons = RING_CMP(cpr->cp_ring_struct, offset); 2946 cp_cons = cpr->cp_raw_cons; 2947 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2948 2949 if (cons > cp_cons) { 2950 if (CMPL_VALID(rxcmp, cpr->valid)) 2951 return RTE_ETH_RX_DESC_DONE; 2952 } else { 2953 if (CMPL_VALID(rxcmp, !cpr->valid)) 2954 return RTE_ETH_RX_DESC_DONE; 2955 } 2956 rx_buf = rxr->rx_buf_ring[cons]; 2957 if (rx_buf == NULL || rx_buf == &rxq->fake_mbuf) 2958 return RTE_ETH_RX_DESC_UNAVAIL; 2959 2960 2961 return RTE_ETH_RX_DESC_AVAIL; 2962 } 2963 2964 static int 2965 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 2966 { 2967 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 2968 struct bnxt_tx_ring_info *txr; 2969 struct bnxt_cp_ring_info *cpr; 2970 struct bnxt_sw_tx_bd *tx_buf; 2971 struct tx_pkt_cmpl *txcmp; 2972 uint32_t cons, cp_cons; 2973 int rc; 2974 2975 if (!txq) 2976 return -EINVAL; 2977 2978 rc = is_bnxt_in_error(txq->bp); 2979 if (rc) 2980 return rc; 2981 2982 cpr = txq->cp_ring; 2983 txr = txq->tx_ring; 2984 2985 if (offset >= txq->nb_tx_desc) 2986 return -EINVAL; 2987 2988 cons = RING_CMP(cpr->cp_ring_struct, offset); 2989 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 2990 cp_cons = cpr->cp_raw_cons; 2991 2992 if (cons > cp_cons) { 2993 if (CMPL_VALID(txcmp, cpr->valid)) 2994 return RTE_ETH_TX_DESC_UNAVAIL; 2995 } else { 2996 if (CMPL_VALID(txcmp, !cpr->valid)) 2997 return RTE_ETH_TX_DESC_UNAVAIL; 2998 } 2999 tx_buf = &txr->tx_buf_ring[cons]; 3000 if (tx_buf->mbuf == NULL) 3001 return RTE_ETH_TX_DESC_DONE; 3002 3003 return RTE_ETH_TX_DESC_FULL; 3004 } 3005 3006 int 3007 bnxt_filter_ctrl_op(struct rte_eth_dev *dev, 3008 enum rte_filter_type filter_type, 3009 enum rte_filter_op filter_op, void *arg) 3010 { 3011 struct bnxt *bp = dev->data->dev_private; 3012 int ret = 0; 3013 3014 if (!bp) 3015 return -EIO; 3016 3017 if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) { 3018 struct bnxt_representor *vfr = dev->data->dev_private; 3019 bp = vfr->parent_dev->data->dev_private; 3020 /* parent is deleted while children are still valid */ 3021 if (!bp) { 3022 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR Error %d:%d\n", 3023 dev->data->port_id, 3024 filter_type, 3025 filter_op); 3026 return -EIO; 3027 } 3028 } 3029 3030 ret = is_bnxt_in_error(bp); 3031 if (ret) 3032 return ret; 3033 3034 switch (filter_type) { 3035 case RTE_ETH_FILTER_GENERIC: 3036 if (filter_op != RTE_ETH_FILTER_GET) 3037 return -EINVAL; 3038 3039 /* PMD supports thread-safe flow operations. rte_flow API 3040 * functions can avoid mutex for multi-thread safety. 3041 */ 3042 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; 3043 3044 if (BNXT_TRUFLOW_EN(bp)) 3045 *(const void **)arg = &bnxt_ulp_rte_flow_ops; 3046 else 3047 *(const void **)arg = &bnxt_flow_ops; 3048 break; 3049 default: 3050 PMD_DRV_LOG(ERR, 3051 "Filter type (%d) not supported", filter_type); 3052 ret = -EINVAL; 3053 break; 3054 } 3055 return ret; 3056 } 3057 3058 static const uint32_t * 3059 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 3060 { 3061 static const uint32_t ptypes[] = { 3062 RTE_PTYPE_L2_ETHER_VLAN, 3063 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 3064 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 3065 RTE_PTYPE_L4_ICMP, 3066 RTE_PTYPE_L4_TCP, 3067 RTE_PTYPE_L4_UDP, 3068 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 3069 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 3070 RTE_PTYPE_INNER_L4_ICMP, 3071 RTE_PTYPE_INNER_L4_TCP, 3072 RTE_PTYPE_INNER_L4_UDP, 3073 RTE_PTYPE_UNKNOWN 3074 }; 3075 3076 if (!dev->rx_pkt_burst) 3077 return NULL; 3078 3079 return ptypes; 3080 } 3081 3082 static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count, 3083 int reg_win) 3084 { 3085 uint32_t reg_base = *reg_arr & 0xfffff000; 3086 uint32_t win_off; 3087 int i; 3088 3089 for (i = 0; i < count; i++) { 3090 if ((reg_arr[i] & 0xfffff000) != reg_base) 3091 return -ERANGE; 3092 } 3093 win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; 3094 rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off); 3095 return 0; 3096 } 3097 3098 static int bnxt_map_ptp_regs(struct bnxt *bp) 3099 { 3100 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3101 uint32_t *reg_arr; 3102 int rc, i; 3103 3104 reg_arr = ptp->rx_regs; 3105 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5); 3106 if (rc) 3107 return rc; 3108 3109 reg_arr = ptp->tx_regs; 3110 rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6); 3111 if (rc) 3112 return rc; 3113 3114 for (i = 0; i < BNXT_PTP_RX_REGS; i++) 3115 ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff); 3116 3117 for (i = 0; i < BNXT_PTP_TX_REGS; i++) 3118 ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff); 3119 3120 return 0; 3121 } 3122 3123 static void bnxt_unmap_ptp_regs(struct bnxt *bp) 3124 { 3125 rte_write32(0, (uint8_t *)bp->bar0 + 3126 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); 3127 rte_write32(0, (uint8_t *)bp->bar0 + 3128 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20); 3129 } 3130 3131 static uint64_t bnxt_cc_read(struct bnxt *bp) 3132 { 3133 uint64_t ns; 3134 3135 ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3136 BNXT_GRCPF_REG_SYNC_TIME)); 3137 ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3138 BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32; 3139 return ns; 3140 } 3141 3142 static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts) 3143 { 3144 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3145 uint32_t fifo; 3146 3147 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3148 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3149 if (fifo & BNXT_PTP_TX_FIFO_EMPTY) 3150 return -EAGAIN; 3151 3152 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3153 ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO])); 3154 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3155 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L])); 3156 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3157 ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32; 3158 3159 return 0; 3160 } 3161 3162 static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts) 3163 { 3164 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3165 struct bnxt_pf_info *pf = bp->pf; 3166 uint16_t port_id; 3167 uint32_t fifo; 3168 3169 if (!ptp) 3170 return -ENODEV; 3171 3172 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3173 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3174 if (!(fifo & BNXT_PTP_RX_FIFO_PENDING)) 3175 return -EAGAIN; 3176 3177 port_id = pf->port_id; 3178 rte_write32(1 << port_id, (uint8_t *)bp->bar0 + 3179 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]); 3180 3181 fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3182 ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO])); 3183 if (fifo & BNXT_PTP_RX_FIFO_PENDING) { 3184 /* bnxt_clr_rx_ts(bp); TBD */ 3185 return -EBUSY; 3186 } 3187 3188 *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3189 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L])); 3190 *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3191 ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32; 3192 3193 return 0; 3194 } 3195 3196 static int 3197 bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 3198 { 3199 uint64_t ns; 3200 struct bnxt *bp = dev->data->dev_private; 3201 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3202 3203 if (!ptp) 3204 return 0; 3205 3206 ns = rte_timespec_to_ns(ts); 3207 /* Set the timecounters to a new value. */ 3208 ptp->tc.nsec = ns; 3209 3210 return 0; 3211 } 3212 3213 static int 3214 bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 3215 { 3216 struct bnxt *bp = dev->data->dev_private; 3217 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3218 uint64_t ns, systime_cycles = 0; 3219 int rc = 0; 3220 3221 if (!ptp) 3222 return 0; 3223 3224 if (BNXT_CHIP_THOR(bp)) 3225 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME, 3226 &systime_cycles); 3227 else 3228 systime_cycles = bnxt_cc_read(bp); 3229 3230 ns = rte_timecounter_update(&ptp->tc, systime_cycles); 3231 *ts = rte_ns_to_timespec(ns); 3232 3233 return rc; 3234 } 3235 static int 3236 bnxt_timesync_enable(struct rte_eth_dev *dev) 3237 { 3238 struct bnxt *bp = dev->data->dev_private; 3239 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3240 uint32_t shift = 0; 3241 int rc; 3242 3243 if (!ptp) 3244 return 0; 3245 3246 ptp->rx_filter = 1; 3247 ptp->tx_tstamp_en = 1; 3248 ptp->rxctl = BNXT_PTP_MSG_EVENTS; 3249 3250 rc = bnxt_hwrm_ptp_cfg(bp); 3251 if (rc) 3252 return rc; 3253 3254 memset(&ptp->tc, 0, sizeof(struct rte_timecounter)); 3255 memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3256 memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 3257 3258 ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3259 ptp->tc.cc_shift = shift; 3260 ptp->tc.nsec_mask = (1ULL << shift) - 1; 3261 3262 ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3263 ptp->rx_tstamp_tc.cc_shift = shift; 3264 ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3265 3266 ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK; 3267 ptp->tx_tstamp_tc.cc_shift = shift; 3268 ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 3269 3270 if (!BNXT_CHIP_THOR(bp)) 3271 bnxt_map_ptp_regs(bp); 3272 3273 return 0; 3274 } 3275 3276 static int 3277 bnxt_timesync_disable(struct rte_eth_dev *dev) 3278 { 3279 struct bnxt *bp = dev->data->dev_private; 3280 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3281 3282 if (!ptp) 3283 return 0; 3284 3285 ptp->rx_filter = 0; 3286 ptp->tx_tstamp_en = 0; 3287 ptp->rxctl = 0; 3288 3289 bnxt_hwrm_ptp_cfg(bp); 3290 3291 if (!BNXT_CHIP_THOR(bp)) 3292 bnxt_unmap_ptp_regs(bp); 3293 3294 return 0; 3295 } 3296 3297 static int 3298 bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 3299 struct timespec *timestamp, 3300 uint32_t flags __rte_unused) 3301 { 3302 struct bnxt *bp = dev->data->dev_private; 3303 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3304 uint64_t rx_tstamp_cycles = 0; 3305 uint64_t ns; 3306 3307 if (!ptp) 3308 return 0; 3309 3310 if (BNXT_CHIP_THOR(bp)) 3311 rx_tstamp_cycles = ptp->rx_timestamp; 3312 else 3313 bnxt_get_rx_ts(bp, &rx_tstamp_cycles); 3314 3315 ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles); 3316 *timestamp = rte_ns_to_timespec(ns); 3317 return 0; 3318 } 3319 3320 static int 3321 bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 3322 struct timespec *timestamp) 3323 { 3324 struct bnxt *bp = dev->data->dev_private; 3325 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3326 uint64_t tx_tstamp_cycles = 0; 3327 uint64_t ns; 3328 int rc = 0; 3329 3330 if (!ptp) 3331 return 0; 3332 3333 if (BNXT_CHIP_THOR(bp)) 3334 rc = bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_PATH_TX, 3335 &tx_tstamp_cycles); 3336 else 3337 rc = bnxt_get_tx_ts(bp, &tx_tstamp_cycles); 3338 3339 ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles); 3340 *timestamp = rte_ns_to_timespec(ns); 3341 3342 return rc; 3343 } 3344 3345 static int 3346 bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 3347 { 3348 struct bnxt *bp = dev->data->dev_private; 3349 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 3350 3351 if (!ptp) 3352 return 0; 3353 3354 ptp->tc.nsec += delta; 3355 3356 return 0; 3357 } 3358 3359 static int 3360 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 3361 { 3362 struct bnxt *bp = dev->data->dev_private; 3363 int rc; 3364 uint32_t dir_entries; 3365 uint32_t entry_length; 3366 3367 rc = is_bnxt_in_error(bp); 3368 if (rc) 3369 return rc; 3370 3371 PMD_DRV_LOG(INFO, PCI_PRI_FMT "\n", 3372 bp->pdev->addr.domain, bp->pdev->addr.bus, 3373 bp->pdev->addr.devid, bp->pdev->addr.function); 3374 3375 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 3376 if (rc != 0) 3377 return rc; 3378 3379 return dir_entries * entry_length; 3380 } 3381 3382 static int 3383 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 3384 struct rte_dev_eeprom_info *in_eeprom) 3385 { 3386 struct bnxt *bp = dev->data->dev_private; 3387 uint32_t index; 3388 uint32_t offset; 3389 int rc; 3390 3391 rc = is_bnxt_in_error(bp); 3392 if (rc) 3393 return rc; 3394 3395 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3396 bp->pdev->addr.domain, bp->pdev->addr.bus, 3397 bp->pdev->addr.devid, bp->pdev->addr.function, 3398 in_eeprom->offset, in_eeprom->length); 3399 3400 if (in_eeprom->offset == 0) /* special offset value to get directory */ 3401 return bnxt_get_nvram_directory(bp, in_eeprom->length, 3402 in_eeprom->data); 3403 3404 index = in_eeprom->offset >> 24; 3405 offset = in_eeprom->offset & 0xffffff; 3406 3407 if (index != 0) 3408 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 3409 in_eeprom->length, in_eeprom->data); 3410 3411 return 0; 3412 } 3413 3414 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 3415 { 3416 switch (dir_type) { 3417 case BNX_DIR_TYPE_CHIMP_PATCH: 3418 case BNX_DIR_TYPE_BOOTCODE: 3419 case BNX_DIR_TYPE_BOOTCODE_2: 3420 case BNX_DIR_TYPE_APE_FW: 3421 case BNX_DIR_TYPE_APE_PATCH: 3422 case BNX_DIR_TYPE_KONG_FW: 3423 case BNX_DIR_TYPE_KONG_PATCH: 3424 case BNX_DIR_TYPE_BONO_FW: 3425 case BNX_DIR_TYPE_BONO_PATCH: 3426 /* FALLTHROUGH */ 3427 return true; 3428 } 3429 3430 return false; 3431 } 3432 3433 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 3434 { 3435 switch (dir_type) { 3436 case BNX_DIR_TYPE_AVS: 3437 case BNX_DIR_TYPE_EXP_ROM_MBA: 3438 case BNX_DIR_TYPE_PCIE: 3439 case BNX_DIR_TYPE_TSCF_UCODE: 3440 case BNX_DIR_TYPE_EXT_PHY: 3441 case BNX_DIR_TYPE_CCM: 3442 case BNX_DIR_TYPE_ISCSI_BOOT: 3443 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 3444 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 3445 /* FALLTHROUGH */ 3446 return true; 3447 } 3448 3449 return false; 3450 } 3451 3452 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 3453 { 3454 return bnxt_dir_type_is_ape_bin_format(dir_type) || 3455 bnxt_dir_type_is_other_exec_format(dir_type); 3456 } 3457 3458 static int 3459 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 3460 struct rte_dev_eeprom_info *in_eeprom) 3461 { 3462 struct bnxt *bp = dev->data->dev_private; 3463 uint8_t index, dir_op; 3464 uint16_t type, ext, ordinal, attr; 3465 int rc; 3466 3467 rc = is_bnxt_in_error(bp); 3468 if (rc) 3469 return rc; 3470 3471 PMD_DRV_LOG(INFO, PCI_PRI_FMT " in_eeprom->offset = %d len = %d\n", 3472 bp->pdev->addr.domain, bp->pdev->addr.bus, 3473 bp->pdev->addr.devid, bp->pdev->addr.function, 3474 in_eeprom->offset, in_eeprom->length); 3475 3476 if (!BNXT_PF(bp)) { 3477 PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n"); 3478 return -EINVAL; 3479 } 3480 3481 type = in_eeprom->magic >> 16; 3482 3483 if (type == 0xffff) { /* special value for directory operations */ 3484 index = in_eeprom->magic & 0xff; 3485 dir_op = in_eeprom->magic >> 8; 3486 if (index == 0) 3487 return -EINVAL; 3488 switch (dir_op) { 3489 case 0x0e: /* erase */ 3490 if (in_eeprom->offset != ~in_eeprom->magic) 3491 return -EINVAL; 3492 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 3493 default: 3494 return -EINVAL; 3495 } 3496 } 3497 3498 /* Create or re-write an NVM item: */ 3499 if (bnxt_dir_type_is_executable(type) == true) 3500 return -EOPNOTSUPP; 3501 ext = in_eeprom->magic & 0xffff; 3502 ordinal = in_eeprom->offset >> 16; 3503 attr = in_eeprom->offset & 0xffff; 3504 3505 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 3506 in_eeprom->data, in_eeprom->length); 3507 } 3508 3509 /* 3510 * Initialization 3511 */ 3512 3513 static const struct eth_dev_ops bnxt_dev_ops = { 3514 .dev_infos_get = bnxt_dev_info_get_op, 3515 .dev_close = bnxt_dev_close_op, 3516 .dev_configure = bnxt_dev_configure_op, 3517 .dev_start = bnxt_dev_start_op, 3518 .dev_stop = bnxt_dev_stop_op, 3519 .dev_set_link_up = bnxt_dev_set_link_up_op, 3520 .dev_set_link_down = bnxt_dev_set_link_down_op, 3521 .stats_get = bnxt_stats_get_op, 3522 .stats_reset = bnxt_stats_reset_op, 3523 .rx_queue_setup = bnxt_rx_queue_setup_op, 3524 .rx_queue_release = bnxt_rx_queue_release_op, 3525 .tx_queue_setup = bnxt_tx_queue_setup_op, 3526 .tx_queue_release = bnxt_tx_queue_release_op, 3527 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 3528 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 3529 .reta_update = bnxt_reta_update_op, 3530 .reta_query = bnxt_reta_query_op, 3531 .rss_hash_update = bnxt_rss_hash_update_op, 3532 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 3533 .link_update = bnxt_link_update_op, 3534 .promiscuous_enable = bnxt_promiscuous_enable_op, 3535 .promiscuous_disable = bnxt_promiscuous_disable_op, 3536 .allmulticast_enable = bnxt_allmulticast_enable_op, 3537 .allmulticast_disable = bnxt_allmulticast_disable_op, 3538 .mac_addr_add = bnxt_mac_addr_add_op, 3539 .mac_addr_remove = bnxt_mac_addr_remove_op, 3540 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 3541 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 3542 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 3543 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 3544 .vlan_filter_set = bnxt_vlan_filter_set_op, 3545 .vlan_offload_set = bnxt_vlan_offload_set_op, 3546 .vlan_tpid_set = bnxt_vlan_tpid_set_op, 3547 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 3548 .mtu_set = bnxt_mtu_set_op, 3549 .mac_addr_set = bnxt_set_default_mac_addr_op, 3550 .xstats_get = bnxt_dev_xstats_get_op, 3551 .xstats_get_names = bnxt_dev_xstats_get_names_op, 3552 .xstats_reset = bnxt_dev_xstats_reset_op, 3553 .fw_version_get = bnxt_fw_version_get, 3554 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 3555 .rxq_info_get = bnxt_rxq_info_get_op, 3556 .txq_info_get = bnxt_txq_info_get_op, 3557 .rx_burst_mode_get = bnxt_rx_burst_mode_get, 3558 .tx_burst_mode_get = bnxt_tx_burst_mode_get, 3559 .dev_led_on = bnxt_dev_led_on_op, 3560 .dev_led_off = bnxt_dev_led_off_op, 3561 .rx_queue_start = bnxt_rx_queue_start, 3562 .rx_queue_stop = bnxt_rx_queue_stop, 3563 .tx_queue_start = bnxt_tx_queue_start, 3564 .tx_queue_stop = bnxt_tx_queue_stop, 3565 .filter_ctrl = bnxt_filter_ctrl_op, 3566 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 3567 .get_eeprom_length = bnxt_get_eeprom_length_op, 3568 .get_eeprom = bnxt_get_eeprom_op, 3569 .set_eeprom = bnxt_set_eeprom_op, 3570 .timesync_enable = bnxt_timesync_enable, 3571 .timesync_disable = bnxt_timesync_disable, 3572 .timesync_read_time = bnxt_timesync_read_time, 3573 .timesync_write_time = bnxt_timesync_write_time, 3574 .timesync_adjust_time = bnxt_timesync_adjust_time, 3575 .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp, 3576 .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp, 3577 }; 3578 3579 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg) 3580 { 3581 uint32_t offset; 3582 3583 /* Only pre-map the reset GRC registers using window 3 */ 3584 rte_write32(reg & 0xfffff000, (uint8_t *)bp->bar0 + 3585 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 8); 3586 3587 offset = BNXT_GRCP_WINDOW_3_BASE + (reg & 0xffc); 3588 3589 return offset; 3590 } 3591 3592 int bnxt_map_fw_health_status_regs(struct bnxt *bp) 3593 { 3594 struct bnxt_error_recovery_info *info = bp->recovery_info; 3595 uint32_t reg_base = 0xffffffff; 3596 int i; 3597 3598 /* Only pre-map the monitoring GRC registers using window 2 */ 3599 for (i = 0; i < BNXT_FW_STATUS_REG_CNT; i++) { 3600 uint32_t reg = info->status_regs[i]; 3601 3602 if (BNXT_FW_STATUS_REG_TYPE(reg) != BNXT_FW_STATUS_REG_TYPE_GRC) 3603 continue; 3604 3605 if (reg_base == 0xffffffff) 3606 reg_base = reg & 0xfffff000; 3607 if ((reg & 0xfffff000) != reg_base) 3608 return -ERANGE; 3609 3610 /* Use mask 0xffc as the Lower 2 bits indicates 3611 * address space location 3612 */ 3613 info->mapped_status_regs[i] = BNXT_GRCP_WINDOW_2_BASE + 3614 (reg & 0xffc); 3615 } 3616 3617 if (reg_base == 0xffffffff) 3618 return 0; 3619 3620 rte_write32(reg_base, (uint8_t *)bp->bar0 + 3621 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 3622 3623 return 0; 3624 } 3625 3626 static void bnxt_write_fw_reset_reg(struct bnxt *bp, uint32_t index) 3627 { 3628 struct bnxt_error_recovery_info *info = bp->recovery_info; 3629 uint32_t delay = info->delay_after_reset[index]; 3630 uint32_t val = info->reset_reg_val[index]; 3631 uint32_t reg = info->reset_reg[index]; 3632 uint32_t type, offset; 3633 3634 type = BNXT_FW_STATUS_REG_TYPE(reg); 3635 offset = BNXT_FW_STATUS_REG_OFF(reg); 3636 3637 switch (type) { 3638 case BNXT_FW_STATUS_REG_TYPE_CFG: 3639 rte_pci_write_config(bp->pdev, &val, sizeof(val), offset); 3640 break; 3641 case BNXT_FW_STATUS_REG_TYPE_GRC: 3642 offset = bnxt_map_reset_regs(bp, offset); 3643 rte_write32(val, (uint8_t *)bp->bar0 + offset); 3644 break; 3645 case BNXT_FW_STATUS_REG_TYPE_BAR0: 3646 rte_write32(val, (uint8_t *)bp->bar0 + offset); 3647 break; 3648 } 3649 /* wait on a specific interval of time until core reset is complete */ 3650 if (delay) 3651 rte_delay_ms(delay); 3652 } 3653 3654 static void bnxt_dev_cleanup(struct bnxt *bp) 3655 { 3656 bp->eth_dev->data->dev_link.link_status = 0; 3657 bp->link_info->link_up = 0; 3658 if (bp->eth_dev->data->dev_started) 3659 bnxt_dev_stop_op(bp->eth_dev); 3660 3661 bnxt_uninit_resources(bp, true); 3662 } 3663 3664 static int bnxt_restore_vlan_filters(struct bnxt *bp) 3665 { 3666 struct rte_eth_dev *dev = bp->eth_dev; 3667 struct rte_vlan_filter_conf *vfc; 3668 int vidx, vbit, rc; 3669 uint16_t vlan_id; 3670 3671 for (vlan_id = 1; vlan_id <= RTE_ETHER_MAX_VLAN_ID; vlan_id++) { 3672 vfc = &dev->data->vlan_filter_conf; 3673 vidx = vlan_id / 64; 3674 vbit = vlan_id % 64; 3675 3676 /* Each bit corresponds to a VLAN id */ 3677 if (vfc->ids[vidx] & (UINT64_C(1) << vbit)) { 3678 rc = bnxt_add_vlan_filter(bp, vlan_id); 3679 if (rc) 3680 return rc; 3681 } 3682 } 3683 3684 return 0; 3685 } 3686 3687 static int bnxt_restore_mac_filters(struct bnxt *bp) 3688 { 3689 struct rte_eth_dev *dev = bp->eth_dev; 3690 struct rte_eth_dev_info dev_info; 3691 struct rte_ether_addr *addr; 3692 uint64_t pool_mask; 3693 uint32_t pool = 0; 3694 uint16_t i; 3695 int rc; 3696 3697 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) 3698 return 0; 3699 3700 rc = bnxt_dev_info_get_op(dev, &dev_info); 3701 if (rc) 3702 return rc; 3703 3704 /* replay MAC address configuration */ 3705 for (i = 1; i < dev_info.max_mac_addrs; i++) { 3706 addr = &dev->data->mac_addrs[i]; 3707 3708 /* skip zero address */ 3709 if (rte_is_zero_ether_addr(addr)) 3710 continue; 3711 3712 pool = 0; 3713 pool_mask = dev->data->mac_pool_sel[i]; 3714 3715 do { 3716 if (pool_mask & 1ULL) { 3717 rc = bnxt_mac_addr_add_op(dev, addr, i, pool); 3718 if (rc) 3719 return rc; 3720 } 3721 pool_mask >>= 1; 3722 pool++; 3723 } while (pool_mask); 3724 } 3725 3726 return 0; 3727 } 3728 3729 static int bnxt_restore_filters(struct bnxt *bp) 3730 { 3731 struct rte_eth_dev *dev = bp->eth_dev; 3732 int ret = 0; 3733 3734 if (dev->data->all_multicast) { 3735 ret = bnxt_allmulticast_enable_op(dev); 3736 if (ret) 3737 return ret; 3738 } 3739 if (dev->data->promiscuous) { 3740 ret = bnxt_promiscuous_enable_op(dev); 3741 if (ret) 3742 return ret; 3743 } 3744 3745 ret = bnxt_restore_mac_filters(bp); 3746 if (ret) 3747 return ret; 3748 3749 ret = bnxt_restore_vlan_filters(bp); 3750 /* TODO restore other filters as well */ 3751 return ret; 3752 } 3753 3754 static void bnxt_dev_recover(void *arg) 3755 { 3756 struct bnxt *bp = arg; 3757 int timeout = bp->fw_reset_max_msecs; 3758 int rc = 0; 3759 3760 /* Clear Error flag so that device re-init should happen */ 3761 bp->flags &= ~BNXT_FLAG_FATAL_ERROR; 3762 3763 do { 3764 rc = bnxt_hwrm_ver_get(bp, SHORT_HWRM_CMD_TIMEOUT); 3765 if (rc == 0) 3766 break; 3767 rte_delay_ms(BNXT_FW_READY_WAIT_INTERVAL); 3768 timeout -= BNXT_FW_READY_WAIT_INTERVAL; 3769 } while (rc && timeout); 3770 3771 if (rc) { 3772 PMD_DRV_LOG(ERR, "FW is not Ready after reset\n"); 3773 goto err; 3774 } 3775 3776 rc = bnxt_init_resources(bp, true); 3777 if (rc) { 3778 PMD_DRV_LOG(ERR, 3779 "Failed to initialize resources after reset\n"); 3780 goto err; 3781 } 3782 /* clear reset flag as the device is initialized now */ 3783 bp->flags &= ~BNXT_FLAG_FW_RESET; 3784 3785 rc = bnxt_dev_start_op(bp->eth_dev); 3786 if (rc) { 3787 PMD_DRV_LOG(ERR, "Failed to start port after reset\n"); 3788 goto err_start; 3789 } 3790 3791 rc = bnxt_restore_filters(bp); 3792 if (rc) 3793 goto err_start; 3794 3795 PMD_DRV_LOG(INFO, "Recovered from FW reset\n"); 3796 return; 3797 err_start: 3798 bnxt_dev_stop_op(bp->eth_dev); 3799 err: 3800 bp->flags |= BNXT_FLAG_FATAL_ERROR; 3801 bnxt_uninit_resources(bp, false); 3802 PMD_DRV_LOG(ERR, "Failed to recover from FW reset\n"); 3803 } 3804 3805 void bnxt_dev_reset_and_resume(void *arg) 3806 { 3807 struct bnxt *bp = arg; 3808 int rc; 3809 3810 bnxt_dev_cleanup(bp); 3811 3812 bnxt_wait_for_device_shutdown(bp); 3813 3814 rc = rte_eal_alarm_set(US_PER_MS * bp->fw_reset_min_msecs, 3815 bnxt_dev_recover, (void *)bp); 3816 if (rc) 3817 PMD_DRV_LOG(ERR, "Error setting recovery alarm"); 3818 } 3819 3820 uint32_t bnxt_read_fw_status_reg(struct bnxt *bp, uint32_t index) 3821 { 3822 struct bnxt_error_recovery_info *info = bp->recovery_info; 3823 uint32_t reg = info->status_regs[index]; 3824 uint32_t type, offset, val = 0; 3825 3826 type = BNXT_FW_STATUS_REG_TYPE(reg); 3827 offset = BNXT_FW_STATUS_REG_OFF(reg); 3828 3829 switch (type) { 3830 case BNXT_FW_STATUS_REG_TYPE_CFG: 3831 rte_pci_read_config(bp->pdev, &val, sizeof(val), offset); 3832 break; 3833 case BNXT_FW_STATUS_REG_TYPE_GRC: 3834 offset = info->mapped_status_regs[index]; 3835 /* FALLTHROUGH */ 3836 case BNXT_FW_STATUS_REG_TYPE_BAR0: 3837 val = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 3838 offset)); 3839 break; 3840 } 3841 3842 return val; 3843 } 3844 3845 static int bnxt_fw_reset_all(struct bnxt *bp) 3846 { 3847 struct bnxt_error_recovery_info *info = bp->recovery_info; 3848 uint32_t i; 3849 int rc = 0; 3850 3851 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 3852 /* Reset through master function driver */ 3853 for (i = 0; i < info->reg_array_cnt; i++) 3854 bnxt_write_fw_reset_reg(bp, i); 3855 /* Wait for time specified by FW after triggering reset */ 3856 rte_delay_ms(info->master_func_wait_period_after_reset); 3857 } else if (info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) { 3858 /* Reset with the help of Kong processor */ 3859 rc = bnxt_hwrm_fw_reset(bp); 3860 if (rc) 3861 PMD_DRV_LOG(ERR, "Failed to reset FW\n"); 3862 } 3863 3864 return rc; 3865 } 3866 3867 static void bnxt_fw_reset_cb(void *arg) 3868 { 3869 struct bnxt *bp = arg; 3870 struct bnxt_error_recovery_info *info = bp->recovery_info; 3871 int rc = 0; 3872 3873 /* Only Master function can do FW reset */ 3874 if (bnxt_is_master_func(bp) && 3875 bnxt_is_recovery_enabled(bp)) { 3876 rc = bnxt_fw_reset_all(bp); 3877 if (rc) { 3878 PMD_DRV_LOG(ERR, "Adapter recovery failed\n"); 3879 return; 3880 } 3881 } 3882 3883 /* if recovery method is ERROR_RECOVERY_CO_CPU, KONG will send 3884 * EXCEPTION_FATAL_ASYNC event to all the functions 3885 * (including MASTER FUNC). After receiving this Async, all the active 3886 * drivers should treat this case as FW initiated recovery 3887 */ 3888 if (info->flags & BNXT_FLAG_ERROR_RECOVERY_HOST) { 3889 bp->fw_reset_min_msecs = BNXT_MIN_FW_READY_TIMEOUT; 3890 bp->fw_reset_max_msecs = BNXT_MAX_FW_RESET_TIMEOUT; 3891 3892 /* To recover from error */ 3893 rte_eal_alarm_set(US_PER_MS, bnxt_dev_reset_and_resume, 3894 (void *)bp); 3895 } 3896 } 3897 3898 /* Driver should poll FW heartbeat, reset_counter with the frequency 3899 * advertised by FW in HWRM_ERROR_RECOVERY_QCFG. 3900 * When the driver detects heartbeat stop or change in reset_counter, 3901 * it has to trigger a reset to recover from the error condition. 3902 * A “master PF” is the function who will have the privilege to 3903 * initiate the chimp reset. The master PF will be elected by the 3904 * firmware and will be notified through async message. 3905 */ 3906 static void bnxt_check_fw_health(void *arg) 3907 { 3908 struct bnxt *bp = arg; 3909 struct bnxt_error_recovery_info *info = bp->recovery_info; 3910 uint32_t val = 0, wait_msec; 3911 3912 if (!info || !bnxt_is_recovery_enabled(bp) || 3913 is_bnxt_in_error(bp)) 3914 return; 3915 3916 val = bnxt_read_fw_status_reg(bp, BNXT_FW_HEARTBEAT_CNT_REG); 3917 if (val == info->last_heart_beat) 3918 goto reset; 3919 3920 info->last_heart_beat = val; 3921 3922 val = bnxt_read_fw_status_reg(bp, BNXT_FW_RECOVERY_CNT_REG); 3923 if (val != info->last_reset_counter) 3924 goto reset; 3925 3926 info->last_reset_counter = val; 3927 3928 rte_eal_alarm_set(US_PER_MS * info->driver_polling_freq, 3929 bnxt_check_fw_health, (void *)bp); 3930 3931 return; 3932 reset: 3933 /* Stop DMA to/from device */ 3934 bp->flags |= BNXT_FLAG_FATAL_ERROR; 3935 bp->flags |= BNXT_FLAG_FW_RESET; 3936 3937 PMD_DRV_LOG(ERR, "Detected FW dead condition\n"); 3938 3939 if (bnxt_is_master_func(bp)) 3940 wait_msec = info->master_func_wait_period; 3941 else 3942 wait_msec = info->normal_func_wait_period; 3943 3944 rte_eal_alarm_set(US_PER_MS * wait_msec, 3945 bnxt_fw_reset_cb, (void *)bp); 3946 } 3947 3948 void bnxt_schedule_fw_health_check(struct bnxt *bp) 3949 { 3950 uint32_t polling_freq; 3951 3952 pthread_mutex_lock(&bp->health_check_lock); 3953 3954 if (!bnxt_is_recovery_enabled(bp)) 3955 goto done; 3956 3957 if (bp->flags & BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED) 3958 goto done; 3959 3960 polling_freq = bp->recovery_info->driver_polling_freq; 3961 3962 rte_eal_alarm_set(US_PER_MS * polling_freq, 3963 bnxt_check_fw_health, (void *)bp); 3964 bp->flags |= BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 3965 3966 done: 3967 pthread_mutex_unlock(&bp->health_check_lock); 3968 } 3969 3970 static void bnxt_cancel_fw_health_check(struct bnxt *bp) 3971 { 3972 if (!bnxt_is_recovery_enabled(bp)) 3973 return; 3974 3975 rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp); 3976 bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED; 3977 } 3978 3979 static bool bnxt_vf_pciid(uint16_t device_id) 3980 { 3981 switch (device_id) { 3982 case BROADCOM_DEV_ID_57304_VF: 3983 case BROADCOM_DEV_ID_57406_VF: 3984 case BROADCOM_DEV_ID_5731X_VF: 3985 case BROADCOM_DEV_ID_5741X_VF: 3986 case BROADCOM_DEV_ID_57414_VF: 3987 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 3988 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 3989 case BROADCOM_DEV_ID_58802_VF: 3990 case BROADCOM_DEV_ID_57500_VF1: 3991 case BROADCOM_DEV_ID_57500_VF2: 3992 /* FALLTHROUGH */ 3993 return true; 3994 default: 3995 return false; 3996 } 3997 } 3998 3999 static bool bnxt_thor_device(uint16_t device_id) 4000 { 4001 switch (device_id) { 4002 case BROADCOM_DEV_ID_57508: 4003 case BROADCOM_DEV_ID_57504: 4004 case BROADCOM_DEV_ID_57502: 4005 case BROADCOM_DEV_ID_57508_MF1: 4006 case BROADCOM_DEV_ID_57504_MF1: 4007 case BROADCOM_DEV_ID_57502_MF1: 4008 case BROADCOM_DEV_ID_57508_MF2: 4009 case BROADCOM_DEV_ID_57504_MF2: 4010 case BROADCOM_DEV_ID_57502_MF2: 4011 case BROADCOM_DEV_ID_57500_VF1: 4012 case BROADCOM_DEV_ID_57500_VF2: 4013 /* FALLTHROUGH */ 4014 return true; 4015 default: 4016 return false; 4017 } 4018 } 4019 4020 bool bnxt_stratus_device(struct bnxt *bp) 4021 { 4022 uint16_t device_id = bp->pdev->id.device_id; 4023 4024 switch (device_id) { 4025 case BROADCOM_DEV_ID_STRATUS_NIC: 4026 case BROADCOM_DEV_ID_STRATUS_NIC_VF1: 4027 case BROADCOM_DEV_ID_STRATUS_NIC_VF2: 4028 /* FALLTHROUGH */ 4029 return true; 4030 default: 4031 return false; 4032 } 4033 } 4034 4035 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 4036 { 4037 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 4038 struct bnxt *bp = eth_dev->data->dev_private; 4039 4040 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 4041 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 4042 bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; 4043 if (!bp->bar0 || !bp->doorbell_base) { 4044 PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); 4045 return -ENODEV; 4046 } 4047 4048 bp->eth_dev = eth_dev; 4049 bp->pdev = pci_dev; 4050 4051 return 0; 4052 } 4053 4054 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 4055 struct bnxt_ctx_pg_info *ctx_pg, 4056 uint32_t mem_size, 4057 const char *suffix, 4058 uint16_t idx) 4059 { 4060 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 4061 const struct rte_memzone *mz = NULL; 4062 char mz_name[RTE_MEMZONE_NAMESIZE]; 4063 rte_iova_t mz_phys_addr; 4064 uint64_t valid_bits = 0; 4065 uint32_t sz; 4066 int i; 4067 4068 if (!mem_size) 4069 return 0; 4070 4071 rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / 4072 BNXT_PAGE_SIZE; 4073 rmem->page_size = BNXT_PAGE_SIZE; 4074 rmem->pg_arr = ctx_pg->ctx_pg_arr; 4075 rmem->dma_arr = ctx_pg->ctx_dma_arr; 4076 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 4077 4078 valid_bits = PTU_PTE_VALID; 4079 4080 if (rmem->nr_pages > 1) { 4081 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4082 "bnxt_ctx_pg_tbl%s_%x_%d", 4083 suffix, idx, bp->eth_dev->data->port_id); 4084 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4085 mz = rte_memzone_lookup(mz_name); 4086 if (!mz) { 4087 mz = rte_memzone_reserve_aligned(mz_name, 4088 rmem->nr_pages * 8, 4089 SOCKET_ID_ANY, 4090 RTE_MEMZONE_2MB | 4091 RTE_MEMZONE_SIZE_HINT_ONLY | 4092 RTE_MEMZONE_IOVA_CONTIG, 4093 BNXT_PAGE_SIZE); 4094 if (mz == NULL) 4095 return -ENOMEM; 4096 } 4097 4098 memset(mz->addr, 0, mz->len); 4099 mz_phys_addr = mz->iova; 4100 4101 rmem->pg_tbl = mz->addr; 4102 rmem->pg_tbl_map = mz_phys_addr; 4103 rmem->pg_tbl_mz = mz; 4104 } 4105 4106 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", 4107 suffix, idx, bp->eth_dev->data->port_id); 4108 mz = rte_memzone_lookup(mz_name); 4109 if (!mz) { 4110 mz = rte_memzone_reserve_aligned(mz_name, 4111 mem_size, 4112 SOCKET_ID_ANY, 4113 RTE_MEMZONE_1GB | 4114 RTE_MEMZONE_SIZE_HINT_ONLY | 4115 RTE_MEMZONE_IOVA_CONTIG, 4116 BNXT_PAGE_SIZE); 4117 if (mz == NULL) 4118 return -ENOMEM; 4119 } 4120 4121 memset(mz->addr, 0, mz->len); 4122 mz_phys_addr = mz->iova; 4123 4124 for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) { 4125 rmem->pg_arr[i] = ((char *)mz->addr) + sz; 4126 rmem->dma_arr[i] = mz_phys_addr + sz; 4127 4128 if (rmem->nr_pages > 1) { 4129 if (i == rmem->nr_pages - 2 && 4130 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4131 valid_bits |= PTU_PTE_NEXT_TO_LAST; 4132 else if (i == rmem->nr_pages - 1 && 4133 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 4134 valid_bits |= PTU_PTE_LAST; 4135 4136 rmem->pg_tbl[i] = rte_cpu_to_le_64(rmem->dma_arr[i] | 4137 valid_bits); 4138 } 4139 } 4140 4141 rmem->mz = mz; 4142 if (rmem->vmem_size) 4143 rmem->vmem = (void **)mz->addr; 4144 rmem->dma_arr[0] = mz_phys_addr; 4145 return 0; 4146 } 4147 4148 static void bnxt_free_ctx_mem(struct bnxt *bp) 4149 { 4150 int i; 4151 4152 if (!bp->ctx || !(bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 4153 return; 4154 4155 bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; 4156 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); 4157 rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); 4158 rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); 4159 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.mz); 4160 rte_memzone_free(bp->ctx->stat_mem.ring_mem.mz); 4161 rte_memzone_free(bp->ctx->qp_mem.ring_mem.pg_tbl_mz); 4162 rte_memzone_free(bp->ctx->srq_mem.ring_mem.pg_tbl_mz); 4163 rte_memzone_free(bp->ctx->cq_mem.ring_mem.pg_tbl_mz); 4164 rte_memzone_free(bp->ctx->vnic_mem.ring_mem.pg_tbl_mz); 4165 rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); 4166 4167 for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { 4168 if (bp->ctx->tqm_mem[i]) 4169 rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); 4170 } 4171 4172 rte_free(bp->ctx); 4173 bp->ctx = NULL; 4174 } 4175 4176 #define bnxt_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 4177 4178 #define min_t(type, x, y) ({ \ 4179 type __min1 = (x); \ 4180 type __min2 = (y); \ 4181 __min1 < __min2 ? __min1 : __min2; }) 4182 4183 #define max_t(type, x, y) ({ \ 4184 type __max1 = (x); \ 4185 type __max2 = (y); \ 4186 __max1 > __max2 ? __max1 : __max2; }) 4187 4188 #define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max) 4189 4190 int bnxt_alloc_ctx_mem(struct bnxt *bp) 4191 { 4192 struct bnxt_ctx_pg_info *ctx_pg; 4193 struct bnxt_ctx_mem_info *ctx; 4194 uint32_t mem_size, ena, entries; 4195 uint32_t entries_sp, min; 4196 int i, rc; 4197 4198 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 4199 if (rc) { 4200 PMD_DRV_LOG(ERR, "Query context mem capability failed\n"); 4201 return rc; 4202 } 4203 ctx = bp->ctx; 4204 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 4205 return 0; 4206 4207 ctx_pg = &ctx->qp_mem; 4208 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; 4209 mem_size = ctx->qp_entry_size * ctx_pg->entries; 4210 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0); 4211 if (rc) 4212 return rc; 4213 4214 ctx_pg = &ctx->srq_mem; 4215 ctx_pg->entries = ctx->srq_max_l2_entries; 4216 mem_size = ctx->srq_entry_size * ctx_pg->entries; 4217 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0); 4218 if (rc) 4219 return rc; 4220 4221 ctx_pg = &ctx->cq_mem; 4222 ctx_pg->entries = ctx->cq_max_l2_entries; 4223 mem_size = ctx->cq_entry_size * ctx_pg->entries; 4224 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0); 4225 if (rc) 4226 return rc; 4227 4228 ctx_pg = &ctx->vnic_mem; 4229 ctx_pg->entries = ctx->vnic_max_vnic_entries + 4230 ctx->vnic_max_ring_table_entries; 4231 mem_size = ctx->vnic_entry_size * ctx_pg->entries; 4232 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0); 4233 if (rc) 4234 return rc; 4235 4236 ctx_pg = &ctx->stat_mem; 4237 ctx_pg->entries = ctx->stat_max_entries; 4238 mem_size = ctx->stat_entry_size * ctx_pg->entries; 4239 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0); 4240 if (rc) 4241 return rc; 4242 4243 min = ctx->tqm_min_entries_per_ring; 4244 4245 entries_sp = ctx->qp_max_l2_entries + 4246 ctx->vnic_max_vnic_entries + 4247 2 * ctx->qp_min_qp1_entries + min; 4248 entries_sp = bnxt_roundup(entries_sp, ctx->tqm_entries_multiple); 4249 4250 entries = ctx->qp_max_l2_entries + ctx->qp_min_qp1_entries; 4251 entries = bnxt_roundup(entries, ctx->tqm_entries_multiple); 4252 entries = clamp_t(uint32_t, entries, min, 4253 ctx->tqm_max_entries_per_ring); 4254 for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) { 4255 ctx_pg = ctx->tqm_mem[i]; 4256 ctx_pg->entries = i ? entries : entries_sp; 4257 mem_size = ctx->tqm_entry_size * ctx_pg->entries; 4258 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i); 4259 if (rc) 4260 return rc; 4261 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i; 4262 } 4263 4264 ena |= FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES; 4265 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 4266 if (rc) 4267 PMD_DRV_LOG(ERR, 4268 "Failed to configure context mem: rc = %d\n", rc); 4269 else 4270 ctx->flags |= BNXT_CTX_FLAG_INITED; 4271 4272 return rc; 4273 } 4274 4275 static int bnxt_alloc_stats_mem(struct bnxt *bp) 4276 { 4277 struct rte_pci_device *pci_dev = bp->pdev; 4278 char mz_name[RTE_MEMZONE_NAMESIZE]; 4279 const struct rte_memzone *mz = NULL; 4280 uint32_t total_alloc_len; 4281 rte_iova_t mz_phys_addr; 4282 4283 if (pci_dev->id.device_id == BROADCOM_DEV_ID_NS2) 4284 return 0; 4285 4286 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4287 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4288 pci_dev->addr.bus, pci_dev->addr.devid, 4289 pci_dev->addr.function, "rx_port_stats"); 4290 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4291 mz = rte_memzone_lookup(mz_name); 4292 total_alloc_len = 4293 RTE_CACHE_LINE_ROUNDUP(sizeof(struct rx_port_stats) + 4294 sizeof(struct rx_port_stats_ext) + 512); 4295 if (!mz) { 4296 mz = rte_memzone_reserve(mz_name, total_alloc_len, 4297 SOCKET_ID_ANY, 4298 RTE_MEMZONE_2MB | 4299 RTE_MEMZONE_SIZE_HINT_ONLY | 4300 RTE_MEMZONE_IOVA_CONTIG); 4301 if (mz == NULL) 4302 return -ENOMEM; 4303 } 4304 memset(mz->addr, 0, mz->len); 4305 mz_phys_addr = mz->iova; 4306 4307 bp->rx_mem_zone = (const void *)mz; 4308 bp->hw_rx_port_stats = mz->addr; 4309 bp->hw_rx_port_stats_map = mz_phys_addr; 4310 4311 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 4312 "bnxt_" PCI_PRI_FMT "-%s", pci_dev->addr.domain, 4313 pci_dev->addr.bus, pci_dev->addr.devid, 4314 pci_dev->addr.function, "tx_port_stats"); 4315 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 4316 mz = rte_memzone_lookup(mz_name); 4317 total_alloc_len = 4318 RTE_CACHE_LINE_ROUNDUP(sizeof(struct tx_port_stats) + 4319 sizeof(struct tx_port_stats_ext) + 512); 4320 if (!mz) { 4321 mz = rte_memzone_reserve(mz_name, 4322 total_alloc_len, 4323 SOCKET_ID_ANY, 4324 RTE_MEMZONE_2MB | 4325 RTE_MEMZONE_SIZE_HINT_ONLY | 4326 RTE_MEMZONE_IOVA_CONTIG); 4327 if (mz == NULL) 4328 return -ENOMEM; 4329 } 4330 memset(mz->addr, 0, mz->len); 4331 mz_phys_addr = mz->iova; 4332 4333 bp->tx_mem_zone = (const void *)mz; 4334 bp->hw_tx_port_stats = mz->addr; 4335 bp->hw_tx_port_stats_map = mz_phys_addr; 4336 bp->flags |= BNXT_FLAG_PORT_STATS; 4337 4338 /* Display extended statistics if FW supports it */ 4339 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_8_4 || 4340 bp->hwrm_spec_code == HWRM_SPEC_CODE_1_9_0 || 4341 !(bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED)) 4342 return 0; 4343 4344 bp->hw_rx_port_stats_ext = (void *) 4345 ((uint8_t *)bp->hw_rx_port_stats + 4346 sizeof(struct rx_port_stats)); 4347 bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + 4348 sizeof(struct rx_port_stats); 4349 bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; 4350 4351 if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2 || 4352 bp->flags & BNXT_FLAG_EXT_STATS_SUPPORTED) { 4353 bp->hw_tx_port_stats_ext = (void *) 4354 ((uint8_t *)bp->hw_tx_port_stats + 4355 sizeof(struct tx_port_stats)); 4356 bp->hw_tx_port_stats_ext_map = 4357 bp->hw_tx_port_stats_map + 4358 sizeof(struct tx_port_stats); 4359 bp->flags |= BNXT_FLAG_EXT_TX_PORT_STATS; 4360 } 4361 4362 return 0; 4363 } 4364 4365 static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) 4366 { 4367 struct bnxt *bp = eth_dev->data->dev_private; 4368 int rc = 0; 4369 4370 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 4371 RTE_ETHER_ADDR_LEN * 4372 bp->max_l2_ctx, 4373 0); 4374 if (eth_dev->data->mac_addrs == NULL) { 4375 PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); 4376 return -ENOMEM; 4377 } 4378 4379 if (!BNXT_HAS_DFLT_MAC_SET(bp)) { 4380 if (BNXT_PF(bp)) 4381 return -EINVAL; 4382 4383 /* Generate a random MAC address, if none was assigned by PF */ 4384 PMD_DRV_LOG(INFO, "VF MAC address not assigned by Host PF\n"); 4385 bnxt_eth_hw_addr_random(bp->mac_addr); 4386 PMD_DRV_LOG(INFO, 4387 "Assign random MAC:%02X:%02X:%02X:%02X:%02X:%02X\n", 4388 bp->mac_addr[0], bp->mac_addr[1], bp->mac_addr[2], 4389 bp->mac_addr[3], bp->mac_addr[4], bp->mac_addr[5]); 4390 4391 rc = bnxt_hwrm_set_mac(bp); 4392 if (rc) 4393 return rc; 4394 } 4395 4396 /* Copy the permanent MAC from the FUNC_QCAPS response */ 4397 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); 4398 4399 return rc; 4400 } 4401 4402 static int bnxt_restore_dflt_mac(struct bnxt *bp) 4403 { 4404 int rc = 0; 4405 4406 /* MAC is already configured in FW */ 4407 if (BNXT_HAS_DFLT_MAC_SET(bp)) 4408 return 0; 4409 4410 /* Restore the old MAC configured */ 4411 rc = bnxt_hwrm_set_mac(bp); 4412 if (rc) 4413 PMD_DRV_LOG(ERR, "Failed to restore MAC address\n"); 4414 4415 return rc; 4416 } 4417 4418 static void bnxt_config_vf_req_fwd(struct bnxt *bp) 4419 { 4420 if (!BNXT_PF(bp)) 4421 return; 4422 4423 memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd)); 4424 4425 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) 4426 BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG); 4427 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG); 4428 BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG); 4429 BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC); 4430 BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD); 4431 } 4432 4433 uint16_t 4434 bnxt_get_svif(uint16_t port_id, bool func_svif, 4435 enum bnxt_ulp_intf_type type) 4436 { 4437 struct rte_eth_dev *eth_dev; 4438 struct bnxt *bp; 4439 4440 eth_dev = &rte_eth_devices[port_id]; 4441 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4442 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4443 if (!vfr) 4444 return 0; 4445 4446 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4447 return vfr->svif; 4448 4449 eth_dev = vfr->parent_dev; 4450 } 4451 4452 bp = eth_dev->data->dev_private; 4453 4454 return func_svif ? bp->func_svif : bp->port_svif; 4455 } 4456 4457 uint16_t 4458 bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type) 4459 { 4460 struct rte_eth_dev *eth_dev; 4461 struct bnxt_vnic_info *vnic; 4462 struct bnxt *bp; 4463 4464 eth_dev = &rte_eth_devices[port]; 4465 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4466 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4467 if (!vfr) 4468 return 0; 4469 4470 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4471 return vfr->dflt_vnic_id; 4472 4473 eth_dev = vfr->parent_dev; 4474 } 4475 4476 bp = eth_dev->data->dev_private; 4477 4478 vnic = BNXT_GET_DEFAULT_VNIC(bp); 4479 4480 return vnic->fw_vnic_id; 4481 } 4482 4483 uint16_t 4484 bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type) 4485 { 4486 struct rte_eth_dev *eth_dev; 4487 struct bnxt *bp; 4488 4489 eth_dev = &rte_eth_devices[port]; 4490 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4491 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4492 if (!vfr) 4493 return 0; 4494 4495 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4496 return vfr->fw_fid; 4497 4498 eth_dev = vfr->parent_dev; 4499 } 4500 4501 bp = eth_dev->data->dev_private; 4502 4503 return bp->fw_fid; 4504 } 4505 4506 enum bnxt_ulp_intf_type 4507 bnxt_get_interface_type(uint16_t port) 4508 { 4509 struct rte_eth_dev *eth_dev; 4510 struct bnxt *bp; 4511 4512 eth_dev = &rte_eth_devices[port]; 4513 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) 4514 return BNXT_ULP_INTF_TYPE_VF_REP; 4515 4516 bp = eth_dev->data->dev_private; 4517 if (BNXT_PF(bp)) 4518 return BNXT_ULP_INTF_TYPE_PF; 4519 else if (BNXT_VF_IS_TRUSTED(bp)) 4520 return BNXT_ULP_INTF_TYPE_TRUSTED_VF; 4521 else if (BNXT_VF(bp)) 4522 return BNXT_ULP_INTF_TYPE_VF; 4523 4524 return BNXT_ULP_INTF_TYPE_INVALID; 4525 } 4526 4527 uint16_t 4528 bnxt_get_phy_port_id(uint16_t port_id) 4529 { 4530 struct bnxt_representor *vfr; 4531 struct rte_eth_dev *eth_dev; 4532 struct bnxt *bp; 4533 4534 eth_dev = &rte_eth_devices[port_id]; 4535 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4536 vfr = eth_dev->data->dev_private; 4537 if (!vfr) 4538 return 0; 4539 4540 eth_dev = vfr->parent_dev; 4541 } 4542 4543 bp = eth_dev->data->dev_private; 4544 4545 return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id; 4546 } 4547 4548 uint16_t 4549 bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type) 4550 { 4551 struct rte_eth_dev *eth_dev; 4552 struct bnxt *bp; 4553 4554 eth_dev = &rte_eth_devices[port_id]; 4555 if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) { 4556 struct bnxt_representor *vfr = eth_dev->data->dev_private; 4557 if (!vfr) 4558 return 0; 4559 4560 if (type == BNXT_ULP_INTF_TYPE_VF_REP) 4561 return vfr->fw_fid - 1; 4562 4563 eth_dev = vfr->parent_dev; 4564 } 4565 4566 bp = eth_dev->data->dev_private; 4567 4568 return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1; 4569 } 4570 4571 uint16_t 4572 bnxt_get_vport(uint16_t port_id) 4573 { 4574 return (1 << bnxt_get_phy_port_id(port_id)); 4575 } 4576 4577 static void bnxt_alloc_error_recovery_info(struct bnxt *bp) 4578 { 4579 struct bnxt_error_recovery_info *info = bp->recovery_info; 4580 4581 if (info) { 4582 if (!(bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS)) 4583 memset(info, 0, sizeof(*info)); 4584 return; 4585 } 4586 4587 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 4588 return; 4589 4590 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 4591 sizeof(*info), 0); 4592 if (!info) 4593 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 4594 4595 bp->recovery_info = info; 4596 } 4597 4598 static void bnxt_check_fw_status(struct bnxt *bp) 4599 { 4600 uint32_t fw_status; 4601 4602 if (!(bp->recovery_info && 4603 (bp->fw_cap & BNXT_FW_CAP_HCOMM_FW_STATUS))) 4604 return; 4605 4606 fw_status = bnxt_read_fw_status_reg(bp, BNXT_FW_STATUS_REG); 4607 if (fw_status != BNXT_FW_STATUS_HEALTHY) 4608 PMD_DRV_LOG(ERR, "Firmware not responding, status: %#x\n", 4609 fw_status); 4610 } 4611 4612 static int bnxt_map_hcomm_fw_status_reg(struct bnxt *bp) 4613 { 4614 struct bnxt_error_recovery_info *info = bp->recovery_info; 4615 uint32_t status_loc; 4616 uint32_t sig_ver; 4617 4618 rte_write32(HCOMM_STATUS_STRUCT_LOC, (uint8_t *)bp->bar0 + 4619 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4620 sig_ver = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4621 BNXT_GRCP_WINDOW_2_BASE + 4622 offsetof(struct hcomm_status, 4623 sig_ver))); 4624 /* If the signature is absent, then FW does not support this feature */ 4625 if ((sig_ver & HCOMM_STATUS_SIGNATURE_MASK) != 4626 HCOMM_STATUS_SIGNATURE_VAL) 4627 return 0; 4628 4629 if (!info) { 4630 info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", 4631 sizeof(*info), 0); 4632 if (!info) 4633 return -ENOMEM; 4634 bp->recovery_info = info; 4635 } else { 4636 memset(info, 0, sizeof(*info)); 4637 } 4638 4639 status_loc = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 + 4640 BNXT_GRCP_WINDOW_2_BASE + 4641 offsetof(struct hcomm_status, 4642 fw_status_loc))); 4643 4644 /* Only pre-map the FW health status GRC register */ 4645 if (BNXT_FW_STATUS_REG_TYPE(status_loc) != BNXT_FW_STATUS_REG_TYPE_GRC) 4646 return 0; 4647 4648 info->status_regs[BNXT_FW_STATUS_REG] = status_loc; 4649 info->mapped_status_regs[BNXT_FW_STATUS_REG] = 4650 BNXT_GRCP_WINDOW_2_BASE + (status_loc & BNXT_GRCP_OFFSET_MASK); 4651 4652 rte_write32((status_loc & BNXT_GRCP_BASE_MASK), (uint8_t *)bp->bar0 + 4653 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 4654 4655 bp->fw_cap |= BNXT_FW_CAP_HCOMM_FW_STATUS; 4656 4657 return 0; 4658 } 4659 4660 static int bnxt_init_fw(struct bnxt *bp) 4661 { 4662 uint16_t mtu; 4663 int rc = 0; 4664 4665 bp->fw_cap = 0; 4666 4667 rc = bnxt_map_hcomm_fw_status_reg(bp); 4668 if (rc) 4669 return rc; 4670 4671 rc = bnxt_hwrm_ver_get(bp, DFLT_HWRM_CMD_TIMEOUT); 4672 if (rc) { 4673 bnxt_check_fw_status(bp); 4674 return rc; 4675 } 4676 4677 rc = bnxt_hwrm_func_reset(bp); 4678 if (rc) 4679 return -EIO; 4680 4681 rc = bnxt_hwrm_vnic_qcaps(bp); 4682 if (rc) 4683 return rc; 4684 4685 rc = bnxt_hwrm_queue_qportcfg(bp); 4686 if (rc) 4687 return rc; 4688 4689 /* Get the MAX capabilities for this function. 4690 * This function also allocates context memory for TQM rings and 4691 * informs the firmware about this allocated backing store memory. 4692 */ 4693 rc = bnxt_hwrm_func_qcaps(bp); 4694 if (rc) 4695 return rc; 4696 4697 rc = bnxt_hwrm_func_qcfg(bp, &mtu); 4698 if (rc) 4699 return rc; 4700 4701 bnxt_hwrm_port_mac_qcfg(bp); 4702 4703 bnxt_hwrm_parent_pf_qcfg(bp); 4704 4705 bnxt_hwrm_port_phy_qcaps(bp); 4706 4707 bnxt_alloc_error_recovery_info(bp); 4708 /* Get the adapter error recovery support info */ 4709 rc = bnxt_hwrm_error_recovery_qcfg(bp); 4710 if (rc) 4711 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 4712 4713 bnxt_hwrm_port_led_qcaps(bp); 4714 4715 return 0; 4716 } 4717 4718 static int 4719 bnxt_init_locks(struct bnxt *bp) 4720 { 4721 int err; 4722 4723 err = pthread_mutex_init(&bp->flow_lock, NULL); 4724 if (err) { 4725 PMD_DRV_LOG(ERR, "Unable to initialize flow_lock\n"); 4726 return err; 4727 } 4728 4729 err = pthread_mutex_init(&bp->def_cp_lock, NULL); 4730 if (err) 4731 PMD_DRV_LOG(ERR, "Unable to initialize def_cp_lock\n"); 4732 4733 err = pthread_mutex_init(&bp->health_check_lock, NULL); 4734 if (err) 4735 PMD_DRV_LOG(ERR, "Unable to initialize health_check_lock\n"); 4736 return err; 4737 } 4738 4739 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) 4740 { 4741 int rc = 0; 4742 4743 rc = bnxt_init_fw(bp); 4744 if (rc) 4745 return rc; 4746 4747 if (!reconfig_dev) { 4748 rc = bnxt_setup_mac_addr(bp->eth_dev); 4749 if (rc) 4750 return rc; 4751 } else { 4752 rc = bnxt_restore_dflt_mac(bp); 4753 if (rc) 4754 return rc; 4755 } 4756 4757 bnxt_config_vf_req_fwd(bp); 4758 4759 rc = bnxt_hwrm_func_driver_register(bp); 4760 if (rc) { 4761 PMD_DRV_LOG(ERR, "Failed to register driver"); 4762 return -EBUSY; 4763 } 4764 4765 if (BNXT_PF(bp)) { 4766 if (bp->pdev->max_vfs) { 4767 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 4768 if (rc) { 4769 PMD_DRV_LOG(ERR, "Failed to allocate VFs\n"); 4770 return rc; 4771 } 4772 } else { 4773 rc = bnxt_hwrm_allocate_pf_only(bp); 4774 if (rc) { 4775 PMD_DRV_LOG(ERR, 4776 "Failed to allocate PF resources"); 4777 return rc; 4778 } 4779 } 4780 } 4781 4782 rc = bnxt_alloc_mem(bp, reconfig_dev); 4783 if (rc) 4784 return rc; 4785 4786 rc = bnxt_setup_int(bp); 4787 if (rc) 4788 return rc; 4789 4790 rc = bnxt_request_int(bp); 4791 if (rc) 4792 return rc; 4793 4794 rc = bnxt_init_ctx_mem(bp); 4795 if (rc) { 4796 PMD_DRV_LOG(ERR, "Failed to init adv_flow_counters\n"); 4797 return rc; 4798 } 4799 4800 rc = bnxt_init_locks(bp); 4801 if (rc) 4802 return rc; 4803 4804 return 0; 4805 } 4806 4807 static int 4808 bnxt_parse_devarg_truflow(__rte_unused const char *key, 4809 const char *value, void *opaque_arg) 4810 { 4811 struct bnxt *bp = opaque_arg; 4812 unsigned long truflow; 4813 char *end = NULL; 4814 4815 if (!value || !opaque_arg) { 4816 PMD_DRV_LOG(ERR, 4817 "Invalid parameter passed to truflow devargs.\n"); 4818 return -EINVAL; 4819 } 4820 4821 truflow = strtoul(value, &end, 10); 4822 if (end == NULL || *end != '\0' || 4823 (truflow == ULONG_MAX && errno == ERANGE)) { 4824 PMD_DRV_LOG(ERR, 4825 "Invalid parameter passed to truflow devargs.\n"); 4826 return -EINVAL; 4827 } 4828 4829 if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) { 4830 PMD_DRV_LOG(ERR, 4831 "Invalid value passed to truflow devargs.\n"); 4832 return -EINVAL; 4833 } 4834 4835 if (truflow) { 4836 bp->flags |= BNXT_FLAG_TRUFLOW_EN; 4837 PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n"); 4838 } else { 4839 bp->flags &= ~BNXT_FLAG_TRUFLOW_EN; 4840 PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n"); 4841 } 4842 4843 return 0; 4844 } 4845 4846 static int 4847 bnxt_parse_devarg_flow_xstat(__rte_unused const char *key, 4848 const char *value, void *opaque_arg) 4849 { 4850 struct bnxt *bp = opaque_arg; 4851 unsigned long flow_xstat; 4852 char *end = NULL; 4853 4854 if (!value || !opaque_arg) { 4855 PMD_DRV_LOG(ERR, 4856 "Invalid parameter passed to flow_xstat devarg.\n"); 4857 return -EINVAL; 4858 } 4859 4860 flow_xstat = strtoul(value, &end, 10); 4861 if (end == NULL || *end != '\0' || 4862 (flow_xstat == ULONG_MAX && errno == ERANGE)) { 4863 PMD_DRV_LOG(ERR, 4864 "Invalid parameter passed to flow_xstat devarg.\n"); 4865 return -EINVAL; 4866 } 4867 4868 if (BNXT_DEVARG_FLOW_XSTAT_INVALID(flow_xstat)) { 4869 PMD_DRV_LOG(ERR, 4870 "Invalid value passed to flow_xstat devarg.\n"); 4871 return -EINVAL; 4872 } 4873 4874 bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN; 4875 if (BNXT_FLOW_XSTATS_EN(bp)) 4876 PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n"); 4877 4878 return 0; 4879 } 4880 4881 static int 4882 bnxt_parse_devarg_max_num_kflows(__rte_unused const char *key, 4883 const char *value, void *opaque_arg) 4884 { 4885 struct bnxt *bp = opaque_arg; 4886 unsigned long max_num_kflows; 4887 char *end = NULL; 4888 4889 if (!value || !opaque_arg) { 4890 PMD_DRV_LOG(ERR, 4891 "Invalid parameter passed to max_num_kflows devarg.\n"); 4892 return -EINVAL; 4893 } 4894 4895 max_num_kflows = strtoul(value, &end, 10); 4896 if (end == NULL || *end != '\0' || 4897 (max_num_kflows == ULONG_MAX && errno == ERANGE)) { 4898 PMD_DRV_LOG(ERR, 4899 "Invalid parameter passed to max_num_kflows devarg.\n"); 4900 return -EINVAL; 4901 } 4902 4903 if (bnxt_devarg_max_num_kflow_invalid(max_num_kflows)) { 4904 PMD_DRV_LOG(ERR, 4905 "Invalid value passed to max_num_kflows devarg.\n"); 4906 return -EINVAL; 4907 } 4908 4909 bp->max_num_kflows = max_num_kflows; 4910 if (bp->max_num_kflows) 4911 PMD_DRV_LOG(INFO, "max_num_kflows set as %ldK.\n", 4912 max_num_kflows); 4913 4914 return 0; 4915 } 4916 4917 static int 4918 bnxt_parse_devarg_rep_is_pf(__rte_unused const char *key, 4919 const char *value, void *opaque_arg) 4920 { 4921 struct bnxt_representor *vfr_bp = opaque_arg; 4922 unsigned long rep_is_pf; 4923 char *end = NULL; 4924 4925 if (!value || !opaque_arg) { 4926 PMD_DRV_LOG(ERR, 4927 "Invalid parameter passed to rep_is_pf devargs.\n"); 4928 return -EINVAL; 4929 } 4930 4931 rep_is_pf = strtoul(value, &end, 10); 4932 if (end == NULL || *end != '\0' || 4933 (rep_is_pf == ULONG_MAX && errno == ERANGE)) { 4934 PMD_DRV_LOG(ERR, 4935 "Invalid parameter passed to rep_is_pf devargs.\n"); 4936 return -EINVAL; 4937 } 4938 4939 if (BNXT_DEVARG_REP_IS_PF_INVALID(rep_is_pf)) { 4940 PMD_DRV_LOG(ERR, 4941 "Invalid value passed to rep_is_pf devargs.\n"); 4942 return -EINVAL; 4943 } 4944 4945 vfr_bp->flags |= rep_is_pf; 4946 if (BNXT_REP_PF(vfr_bp)) 4947 PMD_DRV_LOG(INFO, "PF representor\n"); 4948 else 4949 PMD_DRV_LOG(INFO, "VF representor\n"); 4950 4951 return 0; 4952 } 4953 4954 static int 4955 bnxt_parse_devarg_rep_based_pf(__rte_unused const char *key, 4956 const char *value, void *opaque_arg) 4957 { 4958 struct bnxt_representor *vfr_bp = opaque_arg; 4959 unsigned long rep_based_pf; 4960 char *end = NULL; 4961 4962 if (!value || !opaque_arg) { 4963 PMD_DRV_LOG(ERR, 4964 "Invalid parameter passed to rep_based_pf " 4965 "devargs.\n"); 4966 return -EINVAL; 4967 } 4968 4969 rep_based_pf = strtoul(value, &end, 10); 4970 if (end == NULL || *end != '\0' || 4971 (rep_based_pf == ULONG_MAX && errno == ERANGE)) { 4972 PMD_DRV_LOG(ERR, 4973 "Invalid parameter passed to rep_based_pf " 4974 "devargs.\n"); 4975 return -EINVAL; 4976 } 4977 4978 if (BNXT_DEVARG_REP_BASED_PF_INVALID(rep_based_pf)) { 4979 PMD_DRV_LOG(ERR, 4980 "Invalid value passed to rep_based_pf devargs.\n"); 4981 return -EINVAL; 4982 } 4983 4984 vfr_bp->rep_based_pf = rep_based_pf; 4985 vfr_bp->flags |= BNXT_REP_BASED_PF_VALID; 4986 4987 PMD_DRV_LOG(INFO, "rep-based-pf = %d\n", vfr_bp->rep_based_pf); 4988 4989 return 0; 4990 } 4991 4992 static int 4993 bnxt_parse_devarg_rep_q_r2f(__rte_unused const char *key, 4994 const char *value, void *opaque_arg) 4995 { 4996 struct bnxt_representor *vfr_bp = opaque_arg; 4997 unsigned long rep_q_r2f; 4998 char *end = NULL; 4999 5000 if (!value || !opaque_arg) { 5001 PMD_DRV_LOG(ERR, 5002 "Invalid parameter passed to rep_q_r2f " 5003 "devargs.\n"); 5004 return -EINVAL; 5005 } 5006 5007 rep_q_r2f = strtoul(value, &end, 10); 5008 if (end == NULL || *end != '\0' || 5009 (rep_q_r2f == ULONG_MAX && errno == ERANGE)) { 5010 PMD_DRV_LOG(ERR, 5011 "Invalid parameter passed to rep_q_r2f " 5012 "devargs.\n"); 5013 return -EINVAL; 5014 } 5015 5016 if (BNXT_DEVARG_REP_Q_R2F_INVALID(rep_q_r2f)) { 5017 PMD_DRV_LOG(ERR, 5018 "Invalid value passed to rep_q_r2f devargs.\n"); 5019 return -EINVAL; 5020 } 5021 5022 vfr_bp->rep_q_r2f = rep_q_r2f; 5023 vfr_bp->flags |= BNXT_REP_Q_R2F_VALID; 5024 PMD_DRV_LOG(INFO, "rep-q-r2f = %d\n", vfr_bp->rep_q_r2f); 5025 5026 return 0; 5027 } 5028 5029 static int 5030 bnxt_parse_devarg_rep_q_f2r(__rte_unused const char *key, 5031 const char *value, void *opaque_arg) 5032 { 5033 struct bnxt_representor *vfr_bp = opaque_arg; 5034 unsigned long rep_q_f2r; 5035 char *end = NULL; 5036 5037 if (!value || !opaque_arg) { 5038 PMD_DRV_LOG(ERR, 5039 "Invalid parameter passed to rep_q_f2r " 5040 "devargs.\n"); 5041 return -EINVAL; 5042 } 5043 5044 rep_q_f2r = strtoul(value, &end, 10); 5045 if (end == NULL || *end != '\0' || 5046 (rep_q_f2r == ULONG_MAX && errno == ERANGE)) { 5047 PMD_DRV_LOG(ERR, 5048 "Invalid parameter passed to rep_q_f2r " 5049 "devargs.\n"); 5050 return -EINVAL; 5051 } 5052 5053 if (BNXT_DEVARG_REP_Q_F2R_INVALID(rep_q_f2r)) { 5054 PMD_DRV_LOG(ERR, 5055 "Invalid value passed to rep_q_f2r devargs.\n"); 5056 return -EINVAL; 5057 } 5058 5059 vfr_bp->rep_q_f2r = rep_q_f2r; 5060 vfr_bp->flags |= BNXT_REP_Q_F2R_VALID; 5061 PMD_DRV_LOG(INFO, "rep-q-f2r = %d\n", vfr_bp->rep_q_f2r); 5062 5063 return 0; 5064 } 5065 5066 static int 5067 bnxt_parse_devarg_rep_fc_r2f(__rte_unused const char *key, 5068 const char *value, void *opaque_arg) 5069 { 5070 struct bnxt_representor *vfr_bp = opaque_arg; 5071 unsigned long rep_fc_r2f; 5072 char *end = NULL; 5073 5074 if (!value || !opaque_arg) { 5075 PMD_DRV_LOG(ERR, 5076 "Invalid parameter passed to rep_fc_r2f " 5077 "devargs.\n"); 5078 return -EINVAL; 5079 } 5080 5081 rep_fc_r2f = strtoul(value, &end, 10); 5082 if (end == NULL || *end != '\0' || 5083 (rep_fc_r2f == ULONG_MAX && errno == ERANGE)) { 5084 PMD_DRV_LOG(ERR, 5085 "Invalid parameter passed to rep_fc_r2f " 5086 "devargs.\n"); 5087 return -EINVAL; 5088 } 5089 5090 if (BNXT_DEVARG_REP_FC_R2F_INVALID(rep_fc_r2f)) { 5091 PMD_DRV_LOG(ERR, 5092 "Invalid value passed to rep_fc_r2f devargs.\n"); 5093 return -EINVAL; 5094 } 5095 5096 vfr_bp->flags |= BNXT_REP_FC_R2F_VALID; 5097 vfr_bp->rep_fc_r2f = rep_fc_r2f; 5098 PMD_DRV_LOG(INFO, "rep-fc-r2f = %lu\n", rep_fc_r2f); 5099 5100 return 0; 5101 } 5102 5103 static int 5104 bnxt_parse_devarg_rep_fc_f2r(__rte_unused const char *key, 5105 const char *value, void *opaque_arg) 5106 { 5107 struct bnxt_representor *vfr_bp = opaque_arg; 5108 unsigned long rep_fc_f2r; 5109 char *end = NULL; 5110 5111 if (!value || !opaque_arg) { 5112 PMD_DRV_LOG(ERR, 5113 "Invalid parameter passed to rep_fc_f2r " 5114 "devargs.\n"); 5115 return -EINVAL; 5116 } 5117 5118 rep_fc_f2r = strtoul(value, &end, 10); 5119 if (end == NULL || *end != '\0' || 5120 (rep_fc_f2r == ULONG_MAX && errno == ERANGE)) { 5121 PMD_DRV_LOG(ERR, 5122 "Invalid parameter passed to rep_fc_f2r " 5123 "devargs.\n"); 5124 return -EINVAL; 5125 } 5126 5127 if (BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r)) { 5128 PMD_DRV_LOG(ERR, 5129 "Invalid value passed to rep_fc_f2r devargs.\n"); 5130 return -EINVAL; 5131 } 5132 5133 vfr_bp->flags |= BNXT_REP_FC_F2R_VALID; 5134 vfr_bp->rep_fc_f2r = rep_fc_f2r; 5135 PMD_DRV_LOG(INFO, "rep-fc-f2r = %lu\n", rep_fc_f2r); 5136 5137 return 0; 5138 } 5139 5140 static void 5141 bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) 5142 { 5143 struct rte_kvargs *kvlist; 5144 5145 if (devargs == NULL) 5146 return; 5147 5148 kvlist = rte_kvargs_parse(devargs->args, bnxt_dev_args); 5149 if (kvlist == NULL) 5150 return; 5151 5152 /* 5153 * Handler for "truflow" devarg. 5154 * Invoked as for ex: "-a 0000:00:0d.0,host-based-truflow=1" 5155 */ 5156 rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW, 5157 bnxt_parse_devarg_truflow, bp); 5158 5159 /* 5160 * Handler for "flow_xstat" devarg. 5161 * Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1" 5162 */ 5163 rte_kvargs_process(kvlist, BNXT_DEVARG_FLOW_XSTAT, 5164 bnxt_parse_devarg_flow_xstat, bp); 5165 5166 /* 5167 * Handler for "max_num_kflows" devarg. 5168 * Invoked as for ex: "-a 000:00:0d.0,max_num_kflows=32" 5169 */ 5170 rte_kvargs_process(kvlist, BNXT_DEVARG_MAX_NUM_KFLOWS, 5171 bnxt_parse_devarg_max_num_kflows, bp); 5172 5173 rte_kvargs_free(kvlist); 5174 } 5175 5176 static int bnxt_alloc_switch_domain(struct bnxt *bp) 5177 { 5178 int rc = 0; 5179 5180 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) { 5181 rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id); 5182 if (rc) 5183 PMD_DRV_LOG(ERR, 5184 "Failed to alloc switch domain: %d\n", rc); 5185 else 5186 PMD_DRV_LOG(INFO, 5187 "Switch domain allocated %d\n", 5188 bp->switch_domain_id); 5189 } 5190 5191 return rc; 5192 } 5193 5194 static int 5195 bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) 5196 { 5197 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 5198 static int version_printed; 5199 struct bnxt *bp; 5200 int rc; 5201 5202 if (version_printed++ == 0) 5203 PMD_DRV_LOG(INFO, "%s\n", bnxt_version); 5204 5205 eth_dev->dev_ops = &bnxt_dev_ops; 5206 eth_dev->rx_queue_count = bnxt_rx_queue_count_op; 5207 eth_dev->rx_descriptor_status = bnxt_rx_descriptor_status_op; 5208 eth_dev->tx_descriptor_status = bnxt_tx_descriptor_status_op; 5209 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 5210 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 5211 5212 /* 5213 * For secondary processes, we don't initialise any further 5214 * as primary has already done this work. 5215 */ 5216 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5217 return 0; 5218 5219 rte_eth_copy_pci_info(eth_dev, pci_dev); 5220 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 5221 5222 bp = eth_dev->data->dev_private; 5223 5224 /* Parse dev arguments passed on when starting the DPDK application. */ 5225 bnxt_parse_dev_args(bp, pci_dev->device.devargs); 5226 5227 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; 5228 5229 if (bnxt_vf_pciid(pci_dev->id.device_id)) 5230 bp->flags |= BNXT_FLAG_VF; 5231 5232 if (bnxt_thor_device(pci_dev->id.device_id)) 5233 bp->flags |= BNXT_FLAG_THOR_CHIP; 5234 5235 if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 || 5236 pci_dev->id.device_id == BROADCOM_DEV_ID_58804 || 5237 pci_dev->id.device_id == BROADCOM_DEV_ID_58808 || 5238 pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF) 5239 bp->flags |= BNXT_FLAG_STINGRAY; 5240 5241 if (BNXT_TRUFLOW_EN(bp)) { 5242 /* extra mbuf field is required to store CFA code from mark */ 5243 static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = { 5244 .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME, 5245 .size = sizeof(bnxt_cfa_code_dynfield_t), 5246 .align = __alignof__(bnxt_cfa_code_dynfield_t), 5247 }; 5248 bnxt_cfa_code_dynfield_offset = 5249 rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc); 5250 if (bnxt_cfa_code_dynfield_offset < 0) { 5251 PMD_DRV_LOG(ERR, 5252 "Failed to register mbuf field for TruFlow mark\n"); 5253 return -rte_errno; 5254 } 5255 } 5256 5257 rc = bnxt_init_board(eth_dev); 5258 if (rc) { 5259 PMD_DRV_LOG(ERR, 5260 "Failed to initialize board rc: %x\n", rc); 5261 return rc; 5262 } 5263 5264 rc = bnxt_alloc_pf_info(bp); 5265 if (rc) 5266 goto error_free; 5267 5268 rc = bnxt_alloc_link_info(bp); 5269 if (rc) 5270 goto error_free; 5271 5272 rc = bnxt_alloc_parent_info(bp); 5273 if (rc) 5274 goto error_free; 5275 5276 rc = bnxt_alloc_hwrm_resources(bp); 5277 if (rc) { 5278 PMD_DRV_LOG(ERR, 5279 "Failed to allocate hwrm resource rc: %x\n", rc); 5280 goto error_free; 5281 } 5282 rc = bnxt_alloc_leds_info(bp); 5283 if (rc) 5284 goto error_free; 5285 5286 rc = bnxt_alloc_cos_queues(bp); 5287 if (rc) 5288 goto error_free; 5289 5290 rc = bnxt_init_resources(bp, false); 5291 if (rc) 5292 goto error_free; 5293 5294 rc = bnxt_alloc_stats_mem(bp); 5295 if (rc) 5296 goto error_free; 5297 5298 bnxt_alloc_switch_domain(bp); 5299 5300 PMD_DRV_LOG(INFO, 5301 DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n", 5302 pci_dev->mem_resource[0].phys_addr, 5303 pci_dev->mem_resource[0].addr); 5304 5305 return 0; 5306 5307 error_free: 5308 bnxt_dev_uninit(eth_dev); 5309 return rc; 5310 } 5311 5312 5313 static void bnxt_free_ctx_mem_buf(struct bnxt_ctx_mem_buf_info *ctx) 5314 { 5315 if (!ctx) 5316 return; 5317 5318 if (ctx->va) 5319 rte_free(ctx->va); 5320 5321 ctx->va = NULL; 5322 ctx->dma = RTE_BAD_IOVA; 5323 ctx->ctx_id = BNXT_CTX_VAL_INVAL; 5324 } 5325 5326 static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp) 5327 { 5328 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX, 5329 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5330 bp->flow_stat->rx_fc_out_tbl.ctx_id, 5331 bp->flow_stat->max_fc, 5332 false); 5333 5334 bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX, 5335 CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC, 5336 bp->flow_stat->tx_fc_out_tbl.ctx_id, 5337 bp->flow_stat->max_fc, 5338 false); 5339 5340 if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5341 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id); 5342 bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5343 5344 if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5345 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id); 5346 bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5347 5348 if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5349 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id); 5350 bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5351 5352 if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL) 5353 bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id); 5354 bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL; 5355 } 5356 5357 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp) 5358 { 5359 bnxt_unregister_fc_ctx_mem(bp); 5360 5361 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl); 5362 bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl); 5363 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl); 5364 bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl); 5365 } 5366 5367 static void bnxt_uninit_ctx_mem(struct bnxt *bp) 5368 { 5369 if (BNXT_FLOW_XSTATS_EN(bp)) 5370 bnxt_uninit_fc_ctx_mem(bp); 5371 } 5372 5373 static void 5374 bnxt_free_error_recovery_info(struct bnxt *bp) 5375 { 5376 rte_free(bp->recovery_info); 5377 bp->recovery_info = NULL; 5378 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 5379 } 5380 5381 static void 5382 bnxt_uninit_locks(struct bnxt *bp) 5383 { 5384 pthread_mutex_destroy(&bp->flow_lock); 5385 pthread_mutex_destroy(&bp->def_cp_lock); 5386 pthread_mutex_destroy(&bp->health_check_lock); 5387 if (bp->rep_info) { 5388 pthread_mutex_destroy(&bp->rep_info->vfr_lock); 5389 pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); 5390 } 5391 } 5392 5393 static int 5394 bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) 5395 { 5396 int rc; 5397 5398 bnxt_free_int(bp); 5399 bnxt_free_mem(bp, reconfig_dev); 5400 5401 bnxt_hwrm_func_buf_unrgtr(bp); 5402 rte_free(bp->pf->vf_req_buf); 5403 5404 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 5405 bp->flags &= ~BNXT_FLAG_REGISTERED; 5406 bnxt_free_ctx_mem(bp); 5407 if (!reconfig_dev) { 5408 bnxt_free_hwrm_resources(bp); 5409 bnxt_free_error_recovery_info(bp); 5410 } 5411 5412 bnxt_uninit_ctx_mem(bp); 5413 5414 bnxt_uninit_locks(bp); 5415 bnxt_free_flow_stats_info(bp); 5416 bnxt_free_rep_info(bp); 5417 rte_free(bp->ptp_cfg); 5418 bp->ptp_cfg = NULL; 5419 return rc; 5420 } 5421 5422 static int 5423 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) 5424 { 5425 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5426 return -EPERM; 5427 5428 PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); 5429 5430 if (eth_dev->state != RTE_ETH_DEV_UNUSED) 5431 bnxt_dev_close_op(eth_dev); 5432 5433 return 0; 5434 } 5435 5436 static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev) 5437 { 5438 struct bnxt *bp = eth_dev->data->dev_private; 5439 struct rte_eth_dev *vf_rep_eth_dev; 5440 int ret = 0, i; 5441 5442 if (!bp) 5443 return -EINVAL; 5444 5445 for (i = 0; i < bp->num_reps; i++) { 5446 vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev; 5447 if (!vf_rep_eth_dev) 5448 continue; 5449 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci remove\n", 5450 vf_rep_eth_dev->data->port_id); 5451 rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_representor_uninit); 5452 } 5453 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", 5454 eth_dev->data->port_id); 5455 ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit); 5456 5457 return ret; 5458 } 5459 5460 static void bnxt_free_rep_info(struct bnxt *bp) 5461 { 5462 rte_free(bp->rep_info); 5463 bp->rep_info = NULL; 5464 rte_free(bp->cfa_code_map); 5465 bp->cfa_code_map = NULL; 5466 } 5467 5468 static int bnxt_init_rep_info(struct bnxt *bp) 5469 { 5470 int i = 0, rc; 5471 5472 if (bp->rep_info) 5473 return 0; 5474 5475 bp->rep_info = rte_zmalloc("bnxt_rep_info", 5476 sizeof(bp->rep_info[0]) * BNXT_MAX_VF_REPS, 5477 0); 5478 if (!bp->rep_info) { 5479 PMD_DRV_LOG(ERR, "Failed to alloc memory for rep info\n"); 5480 return -ENOMEM; 5481 } 5482 bp->cfa_code_map = rte_zmalloc("bnxt_cfa_code_map", 5483 sizeof(*bp->cfa_code_map) * 5484 BNXT_MAX_CFA_CODE, 0); 5485 if (!bp->cfa_code_map) { 5486 PMD_DRV_LOG(ERR, "Failed to alloc memory for cfa_code_map\n"); 5487 bnxt_free_rep_info(bp); 5488 return -ENOMEM; 5489 } 5490 5491 for (i = 0; i < BNXT_MAX_CFA_CODE; i++) 5492 bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; 5493 5494 rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); 5495 if (rc) { 5496 PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); 5497 bnxt_free_rep_info(bp); 5498 return rc; 5499 } 5500 5501 rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); 5502 if (rc) { 5503 PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); 5504 bnxt_free_rep_info(bp); 5505 return rc; 5506 } 5507 5508 return rc; 5509 } 5510 5511 static int bnxt_rep_port_probe(struct rte_pci_device *pci_dev, 5512 struct rte_eth_devargs *eth_da, 5513 struct rte_eth_dev *backing_eth_dev, 5514 const char *dev_args) 5515 { 5516 struct rte_eth_dev *vf_rep_eth_dev; 5517 char name[RTE_ETH_NAME_MAX_LEN]; 5518 struct bnxt *backing_bp; 5519 uint16_t num_rep; 5520 int i, ret = 0; 5521 struct rte_kvargs *kvlist = NULL; 5522 5523 num_rep = eth_da->nb_representor_ports; 5524 if (num_rep > BNXT_MAX_VF_REPS) { 5525 PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n", 5526 num_rep, BNXT_MAX_VF_REPS); 5527 return -EINVAL; 5528 } 5529 5530 if (num_rep >= RTE_MAX_ETHPORTS) { 5531 PMD_DRV_LOG(ERR, 5532 "nb_representor_ports = %d > %d MAX ETHPORTS\n", 5533 num_rep, RTE_MAX_ETHPORTS); 5534 return -EINVAL; 5535 } 5536 5537 backing_bp = backing_eth_dev->data->dev_private; 5538 5539 if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) { 5540 PMD_DRV_LOG(ERR, 5541 "Not a PF or trusted VF. No Representor support\n"); 5542 /* Returning an error is not an option. 5543 * Applications are not handling this correctly 5544 */ 5545 return 0; 5546 } 5547 5548 if (bnxt_init_rep_info(backing_bp)) 5549 return 0; 5550 5551 for (i = 0; i < num_rep; i++) { 5552 struct bnxt_representor representor = { 5553 .vf_id = eth_da->representor_ports[i], 5554 .switch_domain_id = backing_bp->switch_domain_id, 5555 .parent_dev = backing_eth_dev 5556 }; 5557 5558 if (representor.vf_id >= BNXT_MAX_VF_REPS) { 5559 PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n", 5560 representor.vf_id, BNXT_MAX_VF_REPS); 5561 continue; 5562 } 5563 5564 /* representor port net_bdf_port */ 5565 snprintf(name, sizeof(name), "net_%s_representor_%d", 5566 pci_dev->device.name, eth_da->representor_ports[i]); 5567 5568 kvlist = rte_kvargs_parse(dev_args, bnxt_dev_args); 5569 if (kvlist) { 5570 /* 5571 * Handler for "rep_is_pf" devarg. 5572 * Invoked as for ex: "-a 000:00:0d.0, 5573 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5574 */ 5575 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF, 5576 bnxt_parse_devarg_rep_is_pf, 5577 (void *)&representor); 5578 if (ret) { 5579 ret = -EINVAL; 5580 goto err; 5581 } 5582 /* 5583 * Handler for "rep_based_pf" devarg. 5584 * Invoked as for ex: "-a 000:00:0d.0, 5585 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5586 */ 5587 ret = rte_kvargs_process(kvlist, 5588 BNXT_DEVARG_REP_BASED_PF, 5589 bnxt_parse_devarg_rep_based_pf, 5590 (void *)&representor); 5591 if (ret) { 5592 ret = -EINVAL; 5593 goto err; 5594 } 5595 /* 5596 * Handler for "rep_based_pf" devarg. 5597 * Invoked as for ex: "-a 000:00:0d.0, 5598 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5599 */ 5600 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F, 5601 bnxt_parse_devarg_rep_q_r2f, 5602 (void *)&representor); 5603 if (ret) { 5604 ret = -EINVAL; 5605 goto err; 5606 } 5607 /* 5608 * Handler for "rep_based_pf" devarg. 5609 * Invoked as for ex: "-a 000:00:0d.0, 5610 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5611 */ 5612 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R, 5613 bnxt_parse_devarg_rep_q_f2r, 5614 (void *)&representor); 5615 if (ret) { 5616 ret = -EINVAL; 5617 goto err; 5618 } 5619 /* 5620 * Handler for "rep_based_pf" devarg. 5621 * Invoked as for ex: "-a 000:00:0d.0, 5622 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5623 */ 5624 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F, 5625 bnxt_parse_devarg_rep_fc_r2f, 5626 (void *)&representor); 5627 if (ret) { 5628 ret = -EINVAL; 5629 goto err; 5630 } 5631 /* 5632 * Handler for "rep_based_pf" devarg. 5633 * Invoked as for ex: "-a 000:00:0d.0, 5634 * rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>" 5635 */ 5636 ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R, 5637 bnxt_parse_devarg_rep_fc_f2r, 5638 (void *)&representor); 5639 if (ret) { 5640 ret = -EINVAL; 5641 goto err; 5642 } 5643 } 5644 5645 ret = rte_eth_dev_create(&pci_dev->device, name, 5646 sizeof(struct bnxt_representor), 5647 NULL, NULL, 5648 bnxt_representor_init, 5649 &representor); 5650 if (ret) { 5651 PMD_DRV_LOG(ERR, "failed to create bnxt vf " 5652 "representor %s.", name); 5653 goto err; 5654 } 5655 5656 vf_rep_eth_dev = rte_eth_dev_allocated(name); 5657 if (!vf_rep_eth_dev) { 5658 PMD_DRV_LOG(ERR, "Failed to find the eth_dev" 5659 " for VF-Rep: %s.", name); 5660 ret = -ENODEV; 5661 goto err; 5662 } 5663 5664 PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR pci probe\n", 5665 backing_eth_dev->data->port_id); 5666 backing_bp->rep_info[representor.vf_id].vfr_eth_dev = 5667 vf_rep_eth_dev; 5668 backing_bp->num_reps++; 5669 5670 } 5671 5672 rte_kvargs_free(kvlist); 5673 return 0; 5674 5675 err: 5676 /* If num_rep > 1, then rollback already created 5677 * ports, since we'll be failing the probe anyway 5678 */ 5679 if (num_rep > 1) 5680 bnxt_pci_remove_dev_with_reps(backing_eth_dev); 5681 rte_errno = -ret; 5682 rte_kvargs_free(kvlist); 5683 5684 return ret; 5685 } 5686 5687 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 5688 struct rte_pci_device *pci_dev) 5689 { 5690 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 5691 struct rte_eth_dev *backing_eth_dev; 5692 uint16_t num_rep; 5693 int ret = 0; 5694 5695 if (pci_dev->device.devargs) { 5696 ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, 5697 ð_da); 5698 if (ret) 5699 return ret; 5700 } 5701 5702 num_rep = eth_da.nb_representor_ports; 5703 PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n", 5704 num_rep); 5705 5706 /* We could come here after first level of probe is already invoked 5707 * as part of an application bringup(OVS-DPDK vswitchd), so first check 5708 * for already allocated eth_dev for the backing device (PF/Trusted VF) 5709 */ 5710 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 5711 if (backing_eth_dev == NULL) { 5712 ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 5713 sizeof(struct bnxt), 5714 eth_dev_pci_specific_init, pci_dev, 5715 bnxt_dev_init, NULL); 5716 5717 if (ret || !num_rep) 5718 return ret; 5719 5720 backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 5721 } 5722 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n", 5723 backing_eth_dev->data->port_id); 5724 5725 if (!num_rep) 5726 return ret; 5727 5728 /* probe representor ports now */ 5729 ret = bnxt_rep_port_probe(pci_dev, ð_da, backing_eth_dev, 5730 pci_dev->device.devargs->args); 5731 5732 return ret; 5733 } 5734 5735 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 5736 { 5737 struct rte_eth_dev *eth_dev; 5738 5739 eth_dev = rte_eth_dev_allocated(pci_dev->device.name); 5740 if (!eth_dev) 5741 return 0; /* Invoked typically only by OVS-DPDK, by the 5742 * time it comes here the eth_dev is already 5743 * deleted by rte_eth_dev_close(), so returning 5744 * +ve value will at least help in proper cleanup 5745 */ 5746 5747 PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci remove\n", eth_dev->data->port_id); 5748 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 5749 if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 5750 return rte_eth_dev_destroy(eth_dev, 5751 bnxt_representor_uninit); 5752 else 5753 return rte_eth_dev_destroy(eth_dev, 5754 bnxt_dev_uninit); 5755 } else { 5756 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 5757 } 5758 } 5759 5760 static struct rte_pci_driver bnxt_rte_pmd = { 5761 .id_table = bnxt_pci_id_map, 5762 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 5763 RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs 5764 * and OVS-DPDK 5765 */ 5766 .probe = bnxt_pci_probe, 5767 .remove = bnxt_pci_remove, 5768 }; 5769 5770 static bool 5771 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5772 { 5773 if (strcmp(dev->device->driver->name, drv->driver.name)) 5774 return false; 5775 5776 return true; 5777 } 5778 5779 bool is_bnxt_supported(struct rte_eth_dev *dev) 5780 { 5781 return is_device_supported(dev, &bnxt_rte_pmd); 5782 } 5783 5784 RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE); 5785 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 5786 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 5787 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 5788