1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) Broadcom Limited. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Broadcom Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <inttypes.h> 35 #include <stdbool.h> 36 37 #include <rte_dev.h> 38 #include <rte_ethdev.h> 39 #include <rte_ethdev_pci.h> 40 #include <rte_malloc.h> 41 #include <rte_cycles.h> 42 43 #include "bnxt.h" 44 #include "bnxt_cpr.h" 45 #include "bnxt_filter.h" 46 #include "bnxt_hwrm.h" 47 #include "bnxt_irq.h" 48 #include "bnxt_ring.h" 49 #include "bnxt_rxq.h" 50 #include "bnxt_rxr.h" 51 #include "bnxt_stats.h" 52 #include "bnxt_txq.h" 53 #include "bnxt_txr.h" 54 #include "bnxt_vnic.h" 55 #include "hsi_struct_def_dpdk.h" 56 #include "bnxt_nvm_defs.h" 57 58 #define DRV_MODULE_NAME "bnxt" 59 static const char bnxt_version[] = 60 "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; 61 62 #define PCI_VENDOR_ID_BROADCOM 0x14E4 63 64 #define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 65 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 66 #define BROADCOM_DEV_ID_57414_VF 0x16c1 67 #define BROADCOM_DEV_ID_57301 0x16c8 68 #define BROADCOM_DEV_ID_57302 0x16c9 69 #define BROADCOM_DEV_ID_57304_PF 0x16ca 70 #define BROADCOM_DEV_ID_57304_VF 0x16cb 71 #define BROADCOM_DEV_ID_57417_MF 0x16cc 72 #define BROADCOM_DEV_ID_NS2 0x16cd 73 #define BROADCOM_DEV_ID_57311 0x16ce 74 #define BROADCOM_DEV_ID_57312 0x16cf 75 #define BROADCOM_DEV_ID_57402 0x16d0 76 #define BROADCOM_DEV_ID_57404 0x16d1 77 #define BROADCOM_DEV_ID_57406_PF 0x16d2 78 #define BROADCOM_DEV_ID_57406_VF 0x16d3 79 #define BROADCOM_DEV_ID_57402_MF 0x16d4 80 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5 81 #define BROADCOM_DEV_ID_57412 0x16d6 82 #define BROADCOM_DEV_ID_57414 0x16d7 83 #define BROADCOM_DEV_ID_57416_RJ45 0x16d8 84 #define BROADCOM_DEV_ID_57417_RJ45 0x16d9 85 #define BROADCOM_DEV_ID_5741X_VF 0x16dc 86 #define BROADCOM_DEV_ID_57412_MF 0x16de 87 #define BROADCOM_DEV_ID_57314 0x16df 88 #define BROADCOM_DEV_ID_57317_RJ45 0x16e0 89 #define BROADCOM_DEV_ID_5731X_VF 0x16e1 90 #define BROADCOM_DEV_ID_57417_SFP 0x16e2 91 #define BROADCOM_DEV_ID_57416_SFP 0x16e3 92 #define BROADCOM_DEV_ID_57317_SFP 0x16e4 93 #define BROADCOM_DEV_ID_57404_MF 0x16e7 94 #define BROADCOM_DEV_ID_57406_MF 0x16e8 95 #define BROADCOM_DEV_ID_57407_SFP 0x16e9 96 #define BROADCOM_DEV_ID_57407_MF 0x16ea 97 #define BROADCOM_DEV_ID_57414_MF 0x16ec 98 #define BROADCOM_DEV_ID_57416_MF 0x16ee 99 100 static const struct rte_pci_id bnxt_pci_id_map[] = { 101 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 102 BROADCOM_DEV_ID_STRATUS_NIC_VF) }, 103 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, 104 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, 105 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, 106 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, 107 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, 108 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, 109 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) }, 110 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, 111 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, 112 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, 113 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, 114 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) }, 115 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) }, 116 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) }, 117 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, 118 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) }, 119 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) }, 120 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) }, 121 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) }, 122 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, 123 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) }, 124 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) }, 125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) }, 126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) }, 127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) }, 128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) }, 129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) }, 130 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) }, 131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) }, 132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) }, 133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) }, 134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, 135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, 136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, 137 { .vendor_id = 0, /* sentinel */ }, 138 }; 139 140 #define BNXT_ETH_RSS_SUPPORT ( \ 141 ETH_RSS_IPV4 | \ 142 ETH_RSS_NONFRAG_IPV4_TCP | \ 143 ETH_RSS_NONFRAG_IPV4_UDP | \ 144 ETH_RSS_IPV6 | \ 145 ETH_RSS_NONFRAG_IPV6_TCP | \ 146 ETH_RSS_NONFRAG_IPV6_UDP) 147 148 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); 149 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); 150 151 /***********************/ 152 153 /* 154 * High level utility functions 155 */ 156 157 static void bnxt_free_mem(struct bnxt *bp) 158 { 159 bnxt_free_filter_mem(bp); 160 bnxt_free_vnic_attributes(bp); 161 bnxt_free_vnic_mem(bp); 162 163 bnxt_free_stats(bp); 164 bnxt_free_tx_rings(bp); 165 bnxt_free_rx_rings(bp); 166 bnxt_free_def_cp_ring(bp); 167 } 168 169 static int bnxt_alloc_mem(struct bnxt *bp) 170 { 171 int rc; 172 173 /* Default completion ring */ 174 rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); 175 if (rc) 176 goto alloc_mem_err; 177 178 rc = bnxt_alloc_rings(bp, 0, NULL, NULL, 179 bp->def_cp_ring, "def_cp"); 180 if (rc) 181 goto alloc_mem_err; 182 183 rc = bnxt_alloc_vnic_mem(bp); 184 if (rc) 185 goto alloc_mem_err; 186 187 rc = bnxt_alloc_vnic_attributes(bp); 188 if (rc) 189 goto alloc_mem_err; 190 191 rc = bnxt_alloc_filter_mem(bp); 192 if (rc) 193 goto alloc_mem_err; 194 195 return 0; 196 197 alloc_mem_err: 198 bnxt_free_mem(bp); 199 return rc; 200 } 201 202 static int bnxt_init_chip(struct bnxt *bp) 203 { 204 unsigned int i, rss_idx, fw_idx; 205 struct rte_eth_link new; 206 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); 207 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 208 uint32_t intr_vector = 0; 209 uint32_t queue_id, base = BNXT_MISC_VEC_ID; 210 uint32_t vec = BNXT_MISC_VEC_ID; 211 int rc; 212 213 /* disable uio/vfio intr/eventfd mapping */ 214 rte_intr_disable(intr_handle); 215 216 if (bp->eth_dev->data->mtu > ETHER_MTU) { 217 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 218 bp->flags |= BNXT_FLAG_JUMBO; 219 } else { 220 bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 221 bp->flags &= ~BNXT_FLAG_JUMBO; 222 } 223 224 rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); 225 if (rc) { 226 RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); 227 goto err_out; 228 } 229 230 rc = bnxt_alloc_hwrm_rings(bp); 231 if (rc) { 232 RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); 233 goto err_out; 234 } 235 236 rc = bnxt_alloc_all_hwrm_ring_grps(bp); 237 if (rc) { 238 RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); 239 goto err_out; 240 } 241 242 rc = bnxt_mq_rx_configure(bp); 243 if (rc) { 244 RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); 245 goto err_out; 246 } 247 248 /* VNIC configuration */ 249 for (i = 0; i < bp->nr_vnics; i++) { 250 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 251 uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; 252 253 vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); 254 if (!vnic->fw_grp_ids) { 255 RTE_LOG(ERR, PMD, 256 "Failed to alloc %d bytes for group ids\n", 257 size); 258 rc = -ENOMEM; 259 goto err_out; 260 } 261 memset(vnic->fw_grp_ids, -1, size); 262 263 rc = bnxt_hwrm_vnic_alloc(bp, vnic); 264 if (rc) { 265 RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n", 266 i, rc); 267 goto err_out; 268 } 269 270 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); 271 if (rc) { 272 RTE_LOG(ERR, PMD, 273 "HWRM vnic %d ctx alloc failure rc: %x\n", 274 i, rc); 275 goto err_out; 276 } 277 278 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 279 if (rc) { 280 RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n", 281 i, rc); 282 goto err_out; 283 } 284 285 rc = bnxt_set_hwrm_vnic_filters(bp, vnic); 286 if (rc) { 287 RTE_LOG(ERR, PMD, 288 "HWRM vnic %d filter failure rc: %x\n", 289 i, rc); 290 goto err_out; 291 } 292 if (vnic->rss_table && vnic->hash_type) { 293 /* 294 * Fill the RSS hash & redirection table with 295 * ring group ids for all VNICs 296 */ 297 for (rss_idx = 0, fw_idx = 0; 298 rss_idx < HW_HASH_INDEX_SIZE; 299 rss_idx++, fw_idx++) { 300 if (vnic->fw_grp_ids[fw_idx] == 301 INVALID_HW_RING_ID) 302 fw_idx = 0; 303 vnic->rss_table[rss_idx] = 304 vnic->fw_grp_ids[fw_idx]; 305 } 306 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); 307 if (rc) { 308 RTE_LOG(ERR, PMD, 309 "HWRM vnic %d set RSS failure rc: %x\n", 310 i, rc); 311 goto err_out; 312 } 313 } 314 315 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 316 317 if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) 318 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); 319 else 320 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); 321 } 322 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL); 323 if (rc) { 324 RTE_LOG(ERR, PMD, 325 "HWRM cfa l2 rx mask failure rc: %x\n", rc); 326 goto err_out; 327 } 328 329 /* check and configure queue intr-vector mapping */ 330 if ((rte_intr_cap_multiple(intr_handle) || 331 !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && 332 bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { 333 intr_vector = bp->eth_dev->data->nb_rx_queues; 334 RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, 335 intr_vector); 336 if (intr_vector > bp->rx_cp_nr_rings) { 337 RTE_LOG(ERR, PMD, "At most %d intr queues supported", 338 bp->rx_cp_nr_rings); 339 return -ENOTSUP; 340 } 341 if (rte_intr_efd_enable(intr_handle, intr_vector)) 342 return -1; 343 } 344 345 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 346 intr_handle->intr_vec = 347 rte_zmalloc("intr_vec", 348 bp->eth_dev->data->nb_rx_queues * 349 sizeof(int), 0); 350 if (intr_handle->intr_vec == NULL) { 351 RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" 352 " intr_vec", bp->eth_dev->data->nb_rx_queues); 353 return -ENOMEM; 354 } 355 RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " 356 "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", 357 __func__, intr_handle->intr_vec, intr_handle->nb_efd, 358 intr_handle->max_intr); 359 } 360 361 for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; 362 queue_id++) { 363 intr_handle->intr_vec[queue_id] = vec; 364 if (vec < base + intr_handle->nb_efd - 1) 365 vec++; 366 } 367 368 /* enable uio/vfio intr/eventfd mapping */ 369 rte_intr_enable(intr_handle); 370 371 rc = bnxt_get_hwrm_link_config(bp, &new); 372 if (rc) { 373 RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); 374 goto err_out; 375 } 376 377 if (!bp->link_info.link_up) { 378 rc = bnxt_set_hwrm_link_config(bp, true); 379 if (rc) { 380 RTE_LOG(ERR, PMD, 381 "HWRM link config failure rc: %x\n", rc); 382 goto err_out; 383 } 384 } 385 bnxt_print_link_info(bp->eth_dev); 386 387 return 0; 388 389 err_out: 390 bnxt_free_all_hwrm_resources(bp); 391 392 return rc; 393 } 394 395 static int bnxt_shutdown_nic(struct bnxt *bp) 396 { 397 bnxt_free_all_hwrm_resources(bp); 398 bnxt_free_all_filters(bp); 399 bnxt_free_all_vnics(bp); 400 return 0; 401 } 402 403 static int bnxt_init_nic(struct bnxt *bp) 404 { 405 int rc; 406 407 rc = bnxt_init_ring_grps(bp); 408 if (rc) 409 return rc; 410 411 bnxt_init_vnics(bp); 412 bnxt_init_filters(bp); 413 414 return 0; 415 } 416 417 /* 418 * Device configuration and status function 419 */ 420 421 static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, 422 struct rte_eth_dev_info *dev_info) 423 { 424 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 425 uint16_t max_vnics, i, j, vpool, vrxq; 426 unsigned int max_rx_rings; 427 428 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 429 430 /* MAC Specifics */ 431 dev_info->max_mac_addrs = bp->max_l2_ctx; 432 dev_info->max_hash_mac_addrs = 0; 433 434 /* PF/VF specifics */ 435 if (BNXT_PF(bp)) 436 dev_info->max_vfs = bp->pdev->max_vfs; 437 max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, 438 RTE_MIN(bp->max_rsscos_ctx, 439 bp->max_stat_ctx))); 440 /* For the sake of symmetry, max_rx_queues = max_tx_queues */ 441 dev_info->max_rx_queues = max_rx_rings; 442 dev_info->max_tx_queues = max_rx_rings; 443 dev_info->reta_size = HW_HASH_INDEX_SIZE; 444 dev_info->hash_key_size = 40; 445 max_vnics = bp->max_vnics; 446 447 /* Fast path specifics */ 448 dev_info->min_rx_bufsize = 1; 449 dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN 450 + VLAN_TAG_SIZE; 451 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 452 DEV_RX_OFFLOAD_IPV4_CKSUM | 453 DEV_RX_OFFLOAD_UDP_CKSUM | 454 DEV_RX_OFFLOAD_TCP_CKSUM | 455 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 456 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 457 DEV_TX_OFFLOAD_IPV4_CKSUM | 458 DEV_TX_OFFLOAD_TCP_CKSUM | 459 DEV_TX_OFFLOAD_UDP_CKSUM | 460 DEV_TX_OFFLOAD_TCP_TSO | 461 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 462 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 463 DEV_TX_OFFLOAD_GRE_TNL_TSO | 464 DEV_TX_OFFLOAD_IPIP_TNL_TSO | 465 DEV_TX_OFFLOAD_GENEVE_TNL_TSO; 466 467 /* *INDENT-OFF* */ 468 dev_info->default_rxconf = (struct rte_eth_rxconf) { 469 .rx_thresh = { 470 .pthresh = 8, 471 .hthresh = 8, 472 .wthresh = 0, 473 }, 474 .rx_free_thresh = 32, 475 /* If no descriptors available, pkts are dropped by default */ 476 .rx_drop_en = 1, 477 }; 478 479 dev_info->default_txconf = (struct rte_eth_txconf) { 480 .tx_thresh = { 481 .pthresh = 32, 482 .hthresh = 0, 483 .wthresh = 0, 484 }, 485 .tx_free_thresh = 32, 486 .tx_rs_thresh = 32, 487 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 488 ETH_TXQ_FLAGS_NOOFFLOADS, 489 }; 490 eth_dev->data->dev_conf.intr_conf.lsc = 1; 491 492 eth_dev->data->dev_conf.intr_conf.rxq = 1; 493 494 /* *INDENT-ON* */ 495 496 /* 497 * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim 498 * need further investigation. 499 */ 500 501 /* VMDq resources */ 502 vpool = 64; /* ETH_64_POOLS */ 503 vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ 504 for (i = 0; i < 4; vpool >>= 1, i++) { 505 if (max_vnics > vpool) { 506 for (j = 0; j < 5; vrxq >>= 1, j++) { 507 if (dev_info->max_rx_queues > vrxq) { 508 if (vpool > vrxq) 509 vpool = vrxq; 510 goto found; 511 } 512 } 513 /* Not enough resources to support VMDq */ 514 break; 515 } 516 } 517 /* Not enough resources to support VMDq */ 518 vpool = 0; 519 vrxq = 0; 520 found: 521 dev_info->max_vmdq_pools = vpool; 522 dev_info->vmdq_queue_num = vrxq; 523 524 dev_info->vmdq_pool_base = 0; 525 dev_info->vmdq_queue_base = 0; 526 } 527 528 /* Configure the device based on the configuration provided */ 529 static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) 530 { 531 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 532 533 bp->rx_queues = (void *)eth_dev->data->rx_queues; 534 bp->tx_queues = (void *)eth_dev->data->tx_queues; 535 536 /* Inherit new configurations */ 537 bp->rx_nr_rings = eth_dev->data->nb_rx_queues; 538 bp->tx_nr_rings = eth_dev->data->nb_tx_queues; 539 bp->rx_cp_nr_rings = bp->rx_nr_rings; 540 bp->tx_cp_nr_rings = bp->tx_nr_rings; 541 542 if (eth_dev->data->dev_conf.rxmode.jumbo_frame) 543 eth_dev->data->mtu = 544 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 545 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; 546 return 0; 547 } 548 549 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) 550 { 551 struct rte_eth_link *link = ð_dev->data->dev_link; 552 553 if (link->link_status) 554 RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", 555 eth_dev->data->port_id, 556 (uint32_t)link->link_speed, 557 (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? 558 ("full-duplex") : ("half-duplex\n")); 559 else 560 RTE_LOG(INFO, PMD, "Port %d Link Down\n", 561 eth_dev->data->port_id); 562 } 563 564 static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) 565 { 566 bnxt_print_link_info(eth_dev); 567 return 0; 568 } 569 570 static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) 571 { 572 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 573 int vlan_mask = 0; 574 int rc; 575 576 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { 577 RTE_LOG(ERR, PMD, 578 "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", 579 bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); 580 } 581 bp->dev_stopped = 0; 582 583 rc = bnxt_init_chip(bp); 584 if (rc) 585 goto error; 586 587 bnxt_link_update_op(eth_dev, 1); 588 589 if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) 590 vlan_mask |= ETH_VLAN_FILTER_MASK; 591 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 592 vlan_mask |= ETH_VLAN_STRIP_MASK; 593 rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); 594 if (rc) 595 goto error; 596 597 return 0; 598 599 error: 600 bnxt_shutdown_nic(bp); 601 bnxt_free_tx_mbufs(bp); 602 bnxt_free_rx_mbufs(bp); 603 return rc; 604 } 605 606 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) 607 { 608 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 609 int rc = 0; 610 611 if (!bp->link_info.link_up) 612 rc = bnxt_set_hwrm_link_config(bp, true); 613 if (!rc) 614 eth_dev->data->dev_link.link_status = 1; 615 616 bnxt_print_link_info(eth_dev); 617 return 0; 618 } 619 620 static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) 621 { 622 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 623 624 eth_dev->data->dev_link.link_status = 0; 625 bnxt_set_hwrm_link_config(bp, false); 626 bp->link_info.link_up = 0; 627 628 return 0; 629 } 630 631 /* Unload the driver, release resources */ 632 static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) 633 { 634 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 635 636 if (bp->eth_dev->data->dev_started) { 637 /* TBD: STOP HW queues DMA */ 638 eth_dev->data->dev_link.link_status = 0; 639 } 640 bnxt_set_hwrm_link_config(bp, false); 641 bnxt_hwrm_port_clr_stats(bp); 642 bnxt_free_tx_mbufs(bp); 643 bnxt_free_rx_mbufs(bp); 644 bnxt_shutdown_nic(bp); 645 bp->dev_stopped = 1; 646 } 647 648 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) 649 { 650 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 651 652 if (bp->dev_stopped == 0) 653 bnxt_dev_stop_op(eth_dev); 654 655 bnxt_free_mem(bp); 656 if (eth_dev->data->mac_addrs != NULL) { 657 rte_free(eth_dev->data->mac_addrs); 658 eth_dev->data->mac_addrs = NULL; 659 } 660 if (bp->grp_info != NULL) { 661 rte_free(bp->grp_info); 662 bp->grp_info = NULL; 663 } 664 } 665 666 static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, 667 uint32_t index) 668 { 669 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 670 uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; 671 struct bnxt_vnic_info *vnic; 672 struct bnxt_filter_info *filter, *temp_filter; 673 uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); 674 uint32_t i; 675 676 /* 677 * Loop through all VNICs from the specified filter flow pools to 678 * remove the corresponding MAC addr filter 679 */ 680 for (i = 0; i < pool; i++) { 681 if (!(pool_mask & (1ULL << i))) 682 continue; 683 684 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 685 filter = STAILQ_FIRST(&vnic->filter); 686 while (filter) { 687 temp_filter = STAILQ_NEXT(filter, next); 688 if (filter->mac_index == index) { 689 STAILQ_REMOVE(&vnic->filter, filter, 690 bnxt_filter_info, next); 691 bnxt_hwrm_clear_l2_filter(bp, filter); 692 filter->mac_index = INVALID_MAC_INDEX; 693 memset(&filter->l2_addr, 0, 694 ETHER_ADDR_LEN); 695 STAILQ_INSERT_TAIL( 696 &bp->free_filter_list, 697 filter, next); 698 } 699 filter = temp_filter; 700 } 701 } 702 } 703 } 704 705 static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, 706 struct ether_addr *mac_addr, 707 uint32_t index, uint32_t pool) 708 { 709 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 710 struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); 711 struct bnxt_filter_info *filter; 712 713 if (BNXT_VF(bp)) { 714 RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n"); 715 return -ENOTSUP; 716 } 717 718 if (!vnic) { 719 RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); 720 return -EINVAL; 721 } 722 /* Attach requested MAC address to the new l2_filter */ 723 STAILQ_FOREACH(filter, &vnic->filter, next) { 724 if (filter->mac_index == index) { 725 RTE_LOG(ERR, PMD, 726 "MAC addr already existed for pool %d\n", pool); 727 return 0; 728 } 729 } 730 filter = bnxt_alloc_filter(bp); 731 if (!filter) { 732 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); 733 return -ENODEV; 734 } 735 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 736 filter->mac_index = index; 737 memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); 738 return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 739 } 740 741 int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) 742 { 743 int rc = 0; 744 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 745 struct rte_eth_link new; 746 unsigned int cnt = BNXT_LINK_WAIT_CNT; 747 748 memset(&new, 0, sizeof(new)); 749 do { 750 /* Retrieve link info from hardware */ 751 rc = bnxt_get_hwrm_link_config(bp, &new); 752 if (rc) { 753 new.link_speed = ETH_LINK_SPEED_100M; 754 new.link_duplex = ETH_LINK_FULL_DUPLEX; 755 RTE_LOG(ERR, PMD, 756 "Failed to retrieve link rc = 0x%x!\n", rc); 757 goto out; 758 } 759 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); 760 761 if (!wait_to_complete) 762 break; 763 } while (!new.link_status && cnt--); 764 765 out: 766 /* Timed out or success */ 767 if (new.link_status != eth_dev->data->dev_link.link_status || 768 new.link_speed != eth_dev->data->dev_link.link_speed) { 769 memcpy(ð_dev->data->dev_link, &new, 770 sizeof(struct rte_eth_link)); 771 bnxt_print_link_info(eth_dev); 772 } 773 774 return rc; 775 } 776 777 static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) 778 { 779 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 780 struct bnxt_vnic_info *vnic; 781 782 if (bp->vnic_info == NULL) 783 return; 784 785 vnic = &bp->vnic_info[0]; 786 787 vnic->flags |= BNXT_VNIC_INFO_PROMISC; 788 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 789 } 790 791 static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) 792 { 793 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 794 struct bnxt_vnic_info *vnic; 795 796 if (bp->vnic_info == NULL) 797 return; 798 799 vnic = &bp->vnic_info[0]; 800 801 vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; 802 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 803 } 804 805 static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) 806 { 807 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 808 struct bnxt_vnic_info *vnic; 809 810 if (bp->vnic_info == NULL) 811 return; 812 813 vnic = &bp->vnic_info[0]; 814 815 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 816 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 817 } 818 819 static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) 820 { 821 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 822 struct bnxt_vnic_info *vnic; 823 824 if (bp->vnic_info == NULL) 825 return; 826 827 vnic = &bp->vnic_info[0]; 828 829 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 830 bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 831 } 832 833 static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, 834 struct rte_eth_rss_reta_entry64 *reta_conf, 835 uint16_t reta_size) 836 { 837 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 838 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 839 struct bnxt_vnic_info *vnic; 840 int i; 841 842 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) 843 return -EINVAL; 844 845 if (reta_size != HW_HASH_INDEX_SIZE) { 846 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 847 "(%d) must equal the size supported by the hardware " 848 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 849 return -EINVAL; 850 } 851 /* Update the RSS VNIC(s) */ 852 for (i = 0; i < MAX_FF_POOLS; i++) { 853 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 854 memcpy(vnic->rss_table, reta_conf, reta_size); 855 856 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 857 } 858 } 859 return 0; 860 } 861 862 static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, 863 struct rte_eth_rss_reta_entry64 *reta_conf, 864 uint16_t reta_size) 865 { 866 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 867 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 868 struct rte_intr_handle *intr_handle 869 = &bp->pdev->intr_handle; 870 871 /* Retrieve from the default VNIC */ 872 if (!vnic) 873 return -EINVAL; 874 if (!vnic->rss_table) 875 return -EINVAL; 876 877 if (reta_size != HW_HASH_INDEX_SIZE) { 878 RTE_LOG(ERR, PMD, "The configured hash table lookup size " 879 "(%d) must equal the size supported by the hardware " 880 "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); 881 return -EINVAL; 882 } 883 /* EW - need to revisit here copying from u64 to u16 */ 884 memcpy(reta_conf, vnic->rss_table, reta_size); 885 886 if (rte_intr_allow_others(intr_handle)) { 887 if (eth_dev->data->dev_conf.intr_conf.lsc != 0) 888 bnxt_dev_lsc_intr_setup(eth_dev); 889 } 890 891 return 0; 892 } 893 894 static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, 895 struct rte_eth_rss_conf *rss_conf) 896 { 897 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 898 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; 899 struct bnxt_vnic_info *vnic; 900 uint16_t hash_type = 0; 901 int i; 902 903 /* 904 * If RSS enablement were different than dev_configure, 905 * then return -EINVAL 906 */ 907 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { 908 if (!rss_conf->rss_hf) 909 RTE_LOG(ERR, PMD, "Hash type NONE\n"); 910 } else { 911 if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) 912 return -EINVAL; 913 } 914 915 bp->flags |= BNXT_FLAG_UPDATE_HASH; 916 memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); 917 918 if (rss_conf->rss_hf & ETH_RSS_IPV4) 919 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 920 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) 921 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 922 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) 923 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 924 if (rss_conf->rss_hf & ETH_RSS_IPV6) 925 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 926 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) 927 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 928 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) 929 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 930 931 /* Update the RSS VNIC(s) */ 932 for (i = 0; i < MAX_FF_POOLS; i++) { 933 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 934 vnic->hash_type = hash_type; 935 936 /* 937 * Use the supplied key if the key length is 938 * acceptable and the rss_key is not NULL 939 */ 940 if (rss_conf->rss_key && 941 rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) 942 memcpy(vnic->rss_hash_key, rss_conf->rss_key, 943 rss_conf->rss_key_len); 944 945 bnxt_hwrm_vnic_rss_cfg(bp, vnic); 946 } 947 } 948 return 0; 949 } 950 951 static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, 952 struct rte_eth_rss_conf *rss_conf) 953 { 954 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 955 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 956 int len; 957 uint32_t hash_types; 958 959 /* RSS configuration is the same for all VNICs */ 960 if (vnic && vnic->rss_hash_key) { 961 if (rss_conf->rss_key) { 962 len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? 963 rss_conf->rss_key_len : HW_HASH_KEY_SIZE; 964 memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); 965 } 966 967 hash_types = vnic->hash_type; 968 rss_conf->rss_hf = 0; 969 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { 970 rss_conf->rss_hf |= ETH_RSS_IPV4; 971 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; 972 } 973 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { 974 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 975 hash_types &= 976 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; 977 } 978 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { 979 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; 980 hash_types &= 981 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; 982 } 983 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { 984 rss_conf->rss_hf |= ETH_RSS_IPV6; 985 hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; 986 } 987 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { 988 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 989 hash_types &= 990 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; 991 } 992 if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { 993 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; 994 hash_types &= 995 ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; 996 } 997 if (hash_types) { 998 RTE_LOG(ERR, PMD, 999 "Unknwon RSS config from firmware (%08x), RSS disabled", 1000 vnic->hash_type); 1001 return -ENOTSUP; 1002 } 1003 } else { 1004 rss_conf->rss_hf = 0; 1005 } 1006 return 0; 1007 } 1008 1009 static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, 1010 struct rte_eth_fc_conf *fc_conf) 1011 { 1012 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1013 struct rte_eth_link link_info; 1014 int rc; 1015 1016 rc = bnxt_get_hwrm_link_config(bp, &link_info); 1017 if (rc) 1018 return rc; 1019 1020 memset(fc_conf, 0, sizeof(*fc_conf)); 1021 if (bp->link_info.auto_pause) 1022 fc_conf->autoneg = 1; 1023 switch (bp->link_info.pause) { 1024 case 0: 1025 fc_conf->mode = RTE_FC_NONE; 1026 break; 1027 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: 1028 fc_conf->mode = RTE_FC_TX_PAUSE; 1029 break; 1030 case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: 1031 fc_conf->mode = RTE_FC_RX_PAUSE; 1032 break; 1033 case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | 1034 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): 1035 fc_conf->mode = RTE_FC_FULL; 1036 break; 1037 } 1038 return 0; 1039 } 1040 1041 static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, 1042 struct rte_eth_fc_conf *fc_conf) 1043 { 1044 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1045 1046 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1047 RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n"); 1048 return -ENOTSUP; 1049 } 1050 1051 switch (fc_conf->mode) { 1052 case RTE_FC_NONE: 1053 bp->link_info.auto_pause = 0; 1054 bp->link_info.force_pause = 0; 1055 break; 1056 case RTE_FC_RX_PAUSE: 1057 if (fc_conf->autoneg) { 1058 bp->link_info.auto_pause = 1059 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1060 bp->link_info.force_pause = 0; 1061 } else { 1062 bp->link_info.auto_pause = 0; 1063 bp->link_info.force_pause = 1064 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1065 } 1066 break; 1067 case RTE_FC_TX_PAUSE: 1068 if (fc_conf->autoneg) { 1069 bp->link_info.auto_pause = 1070 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; 1071 bp->link_info.force_pause = 0; 1072 } else { 1073 bp->link_info.auto_pause = 0; 1074 bp->link_info.force_pause = 1075 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; 1076 } 1077 break; 1078 case RTE_FC_FULL: 1079 if (fc_conf->autoneg) { 1080 bp->link_info.auto_pause = 1081 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | 1082 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; 1083 bp->link_info.force_pause = 0; 1084 } else { 1085 bp->link_info.auto_pause = 0; 1086 bp->link_info.force_pause = 1087 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | 1088 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; 1089 } 1090 break; 1091 } 1092 return bnxt_set_hwrm_link_config(bp, true); 1093 } 1094 1095 /* Add UDP tunneling port */ 1096 static int 1097 bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, 1098 struct rte_eth_udp_tunnel *udp_tunnel) 1099 { 1100 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1101 uint16_t tunnel_type = 0; 1102 int rc = 0; 1103 1104 switch (udp_tunnel->prot_type) { 1105 case RTE_TUNNEL_TYPE_VXLAN: 1106 if (bp->vxlan_port_cnt) { 1107 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1108 udp_tunnel->udp_port); 1109 if (bp->vxlan_port != udp_tunnel->udp_port) { 1110 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1111 return -ENOSPC; 1112 } 1113 bp->vxlan_port_cnt++; 1114 return 0; 1115 } 1116 tunnel_type = 1117 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN; 1118 bp->vxlan_port_cnt++; 1119 break; 1120 case RTE_TUNNEL_TYPE_GENEVE: 1121 if (bp->geneve_port_cnt) { 1122 RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n", 1123 udp_tunnel->udp_port); 1124 if (bp->geneve_port != udp_tunnel->udp_port) { 1125 RTE_LOG(ERR, PMD, "Only one port allowed\n"); 1126 return -ENOSPC; 1127 } 1128 bp->geneve_port_cnt++; 1129 return 0; 1130 } 1131 tunnel_type = 1132 HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE; 1133 bp->geneve_port_cnt++; 1134 break; 1135 default: 1136 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1137 return -ENOTSUP; 1138 } 1139 rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port, 1140 tunnel_type); 1141 return rc; 1142 } 1143 1144 static int 1145 bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, 1146 struct rte_eth_udp_tunnel *udp_tunnel) 1147 { 1148 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1149 uint16_t tunnel_type = 0; 1150 uint16_t port = 0; 1151 int rc = 0; 1152 1153 switch (udp_tunnel->prot_type) { 1154 case RTE_TUNNEL_TYPE_VXLAN: 1155 if (!bp->vxlan_port_cnt) { 1156 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1157 return -EINVAL; 1158 } 1159 if (bp->vxlan_port != udp_tunnel->udp_port) { 1160 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1161 udp_tunnel->udp_port, bp->vxlan_port); 1162 return -EINVAL; 1163 } 1164 if (--bp->vxlan_port_cnt) 1165 return 0; 1166 1167 tunnel_type = 1168 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN; 1169 port = bp->vxlan_fw_dst_port_id; 1170 break; 1171 case RTE_TUNNEL_TYPE_GENEVE: 1172 if (!bp->geneve_port_cnt) { 1173 RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n"); 1174 return -EINVAL; 1175 } 1176 if (bp->geneve_port != udp_tunnel->udp_port) { 1177 RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n", 1178 udp_tunnel->udp_port, bp->geneve_port); 1179 return -EINVAL; 1180 } 1181 if (--bp->geneve_port_cnt) 1182 return 0; 1183 1184 tunnel_type = 1185 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE; 1186 port = bp->geneve_fw_dst_port_id; 1187 break; 1188 default: 1189 RTE_LOG(ERR, PMD, "Tunnel type is not supported\n"); 1190 return -ENOTSUP; 1191 } 1192 1193 rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type); 1194 if (!rc) { 1195 if (tunnel_type == 1196 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) 1197 bp->vxlan_port = 0; 1198 if (tunnel_type == 1199 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) 1200 bp->geneve_port = 0; 1201 } 1202 return rc; 1203 } 1204 1205 static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1206 { 1207 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1208 struct bnxt_vnic_info *vnic; 1209 unsigned int i; 1210 int rc = 0; 1211 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; 1212 1213 /* Cycle through all VNICs */ 1214 for (i = 0; i < bp->nr_vnics; i++) { 1215 /* 1216 * For each VNIC and each associated filter(s) 1217 * if VLAN exists && VLAN matches vlan_id 1218 * remove the MAC+VLAN filter 1219 * add a new MAC only filter 1220 * else 1221 * VLAN filter doesn't exist, just skip and continue 1222 */ 1223 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1224 filter = STAILQ_FIRST(&vnic->filter); 1225 while (filter) { 1226 temp_filter = STAILQ_NEXT(filter, next); 1227 1228 if (filter->enables & chk && 1229 filter->l2_ovlan == vlan_id) { 1230 /* Must delete the filter */ 1231 STAILQ_REMOVE(&vnic->filter, filter, 1232 bnxt_filter_info, next); 1233 bnxt_hwrm_clear_l2_filter(bp, filter); 1234 STAILQ_INSERT_TAIL( 1235 &bp->free_filter_list, 1236 filter, next); 1237 1238 /* 1239 * Need to examine to see if the MAC 1240 * filter already existed or not before 1241 * allocating a new one 1242 */ 1243 1244 new_filter = bnxt_alloc_filter(bp); 1245 if (!new_filter) { 1246 RTE_LOG(ERR, PMD, 1247 "MAC/VLAN filter alloc failed\n"); 1248 rc = -ENOMEM; 1249 goto exit; 1250 } 1251 STAILQ_INSERT_TAIL(&vnic->filter, 1252 new_filter, next); 1253 /* Inherit MAC from previous filter */ 1254 new_filter->mac_index = 1255 filter->mac_index; 1256 memcpy(new_filter->l2_addr, 1257 filter->l2_addr, ETHER_ADDR_LEN); 1258 /* MAC only filter */ 1259 rc = bnxt_hwrm_set_l2_filter(bp, 1260 vnic->fw_vnic_id, 1261 new_filter); 1262 if (rc) 1263 goto exit; 1264 RTE_LOG(INFO, PMD, 1265 "Del Vlan filter for %d\n", 1266 vlan_id); 1267 } 1268 filter = temp_filter; 1269 } 1270 } 1271 } 1272 exit: 1273 return rc; 1274 } 1275 1276 static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) 1277 { 1278 struct bnxt_filter_info *filter, *temp_filter, *new_filter; 1279 struct bnxt_vnic_info *vnic; 1280 unsigned int i; 1281 int rc = 0; 1282 uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | 1283 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; 1284 uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; 1285 1286 /* Cycle through all VNICs */ 1287 for (i = 0; i < bp->nr_vnics; i++) { 1288 /* 1289 * For each VNIC and each associated filter(s) 1290 * if VLAN exists: 1291 * if VLAN matches vlan_id 1292 * VLAN filter already exists, just skip and continue 1293 * else 1294 * add a new MAC+VLAN filter 1295 * else 1296 * Remove the old MAC only filter 1297 * Add a new MAC+VLAN filter 1298 */ 1299 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { 1300 filter = STAILQ_FIRST(&vnic->filter); 1301 while (filter) { 1302 temp_filter = STAILQ_NEXT(filter, next); 1303 1304 if (filter->enables & chk) { 1305 if (filter->l2_ovlan == vlan_id) 1306 goto cont; 1307 } else { 1308 /* Must delete the MAC filter */ 1309 STAILQ_REMOVE(&vnic->filter, filter, 1310 bnxt_filter_info, next); 1311 bnxt_hwrm_clear_l2_filter(bp, filter); 1312 filter->l2_ovlan = 0; 1313 STAILQ_INSERT_TAIL( 1314 &bp->free_filter_list, 1315 filter, next); 1316 } 1317 new_filter = bnxt_alloc_filter(bp); 1318 if (!new_filter) { 1319 RTE_LOG(ERR, PMD, 1320 "MAC/VLAN filter alloc failed\n"); 1321 rc = -ENOMEM; 1322 goto exit; 1323 } 1324 STAILQ_INSERT_TAIL(&vnic->filter, new_filter, 1325 next); 1326 /* Inherit MAC from the previous filter */ 1327 new_filter->mac_index = filter->mac_index; 1328 memcpy(new_filter->l2_addr, filter->l2_addr, 1329 ETHER_ADDR_LEN); 1330 /* MAC + VLAN ID filter */ 1331 new_filter->l2_ivlan = vlan_id; 1332 new_filter->l2_ivlan_mask = 0xF000; 1333 new_filter->enables |= en; 1334 rc = bnxt_hwrm_set_l2_filter(bp, 1335 vnic->fw_vnic_id, 1336 new_filter); 1337 if (rc) 1338 goto exit; 1339 RTE_LOG(INFO, PMD, 1340 "Added Vlan filter for %d\n", vlan_id); 1341 cont: 1342 filter = temp_filter; 1343 } 1344 } 1345 } 1346 exit: 1347 return rc; 1348 } 1349 1350 static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, 1351 uint16_t vlan_id, int on) 1352 { 1353 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1354 1355 /* These operations apply to ALL existing MAC/VLAN filters */ 1356 if (on) 1357 return bnxt_add_vlan_filter(bp, vlan_id); 1358 else 1359 return bnxt_del_vlan_filter(bp, vlan_id); 1360 } 1361 1362 static int 1363 bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) 1364 { 1365 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1366 unsigned int i; 1367 1368 if (mask & ETH_VLAN_FILTER_MASK) { 1369 if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { 1370 /* Remove any VLAN filters programmed */ 1371 for (i = 0; i < 4095; i++) 1372 bnxt_del_vlan_filter(bp, i); 1373 } 1374 RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n", 1375 dev->data->dev_conf.rxmode.hw_vlan_filter); 1376 } 1377 1378 if (mask & ETH_VLAN_STRIP_MASK) { 1379 /* Enable or disable VLAN stripping */ 1380 for (i = 0; i < bp->nr_vnics; i++) { 1381 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1382 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1383 vnic->vlan_strip = true; 1384 else 1385 vnic->vlan_strip = false; 1386 bnxt_hwrm_vnic_cfg(bp, vnic); 1387 } 1388 RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n", 1389 dev->data->dev_conf.rxmode.hw_vlan_strip); 1390 } 1391 1392 if (mask & ETH_VLAN_EXTEND_MASK) 1393 RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); 1394 1395 return 0; 1396 } 1397 1398 static void 1399 bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) 1400 { 1401 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1402 /* Default Filter is tied to VNIC 0 */ 1403 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 1404 struct bnxt_filter_info *filter; 1405 int rc; 1406 1407 if (BNXT_VF(bp)) 1408 return; 1409 1410 memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); 1411 memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 1412 1413 STAILQ_FOREACH(filter, &vnic->filter, next) { 1414 /* Default Filter is at Index 0 */ 1415 if (filter->mac_index != 0) 1416 continue; 1417 rc = bnxt_hwrm_clear_l2_filter(bp, filter); 1418 if (rc) 1419 break; 1420 memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); 1421 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); 1422 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; 1423 filter->enables |= 1424 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | 1425 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; 1426 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); 1427 if (rc) 1428 break; 1429 filter->mac_index = 0; 1430 RTE_LOG(DEBUG, PMD, "Set MAC addr\n"); 1431 } 1432 } 1433 1434 static int 1435 bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, 1436 struct ether_addr *mc_addr_set, 1437 uint32_t nb_mc_addr) 1438 { 1439 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; 1440 char *mc_addr_list = (char *)mc_addr_set; 1441 struct bnxt_vnic_info *vnic; 1442 uint32_t off = 0, i = 0; 1443 1444 vnic = &bp->vnic_info[0]; 1445 1446 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { 1447 vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; 1448 goto allmulti; 1449 } 1450 1451 /* TODO Check for Duplicate mcast addresses */ 1452 vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; 1453 for (i = 0; i < nb_mc_addr; i++) { 1454 memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN); 1455 off += ETHER_ADDR_LEN; 1456 } 1457 1458 vnic->mc_addr_cnt = i; 1459 1460 allmulti: 1461 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); 1462 } 1463 1464 static int 1465 bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 1466 { 1467 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1468 uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; 1469 uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; 1470 uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; 1471 int ret; 1472 1473 ret = snprintf(fw_version, fw_size, "%d.%d.%d", 1474 fw_major, fw_minor, fw_updt); 1475 1476 ret += 1; /* add the size of '\0' */ 1477 if (fw_size < (uint32_t)ret) 1478 return ret; 1479 else 1480 return 0; 1481 } 1482 1483 static void 1484 bnxt_rxq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1485 struct rte_eth_rxq_info *qinfo) 1486 { 1487 struct bnxt_rx_queue *rxq; 1488 1489 rxq = dev->data->rx_queues[queue_id]; 1490 1491 qinfo->mp = rxq->mb_pool; 1492 qinfo->scattered_rx = dev->data->scattered_rx; 1493 qinfo->nb_desc = rxq->nb_rx_desc; 1494 1495 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1496 qinfo->conf.rx_drop_en = 0; 1497 qinfo->conf.rx_deferred_start = 0; 1498 } 1499 1500 static void 1501 bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, 1502 struct rte_eth_txq_info *qinfo) 1503 { 1504 struct bnxt_tx_queue *txq; 1505 1506 txq = dev->data->tx_queues[queue_id]; 1507 1508 qinfo->nb_desc = txq->nb_tx_desc; 1509 1510 qinfo->conf.tx_thresh.pthresh = txq->pthresh; 1511 qinfo->conf.tx_thresh.hthresh = txq->hthresh; 1512 qinfo->conf.tx_thresh.wthresh = txq->wthresh; 1513 1514 qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1515 qinfo->conf.tx_rs_thresh = 0; 1516 qinfo->conf.txq_flags = txq->txq_flags; 1517 qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1518 } 1519 1520 static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) 1521 { 1522 struct bnxt *bp = eth_dev->data->dev_private; 1523 struct rte_eth_dev_info dev_info; 1524 uint32_t max_dev_mtu; 1525 uint32_t rc = 0; 1526 uint32_t i; 1527 1528 bnxt_dev_info_get_op(eth_dev, &dev_info); 1529 max_dev_mtu = dev_info.max_rx_pktlen - 1530 ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2; 1531 1532 if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) { 1533 RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n", 1534 ETHER_MIN_MTU, max_dev_mtu); 1535 return -EINVAL; 1536 } 1537 1538 1539 if (new_mtu > ETHER_MTU) { 1540 bp->flags |= BNXT_FLAG_JUMBO; 1541 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 1542 } else { 1543 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 1544 bp->flags &= ~BNXT_FLAG_JUMBO; 1545 } 1546 1547 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = 1548 new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1549 1550 eth_dev->data->mtu = new_mtu; 1551 RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu); 1552 1553 for (i = 0; i < bp->nr_vnics; i++) { 1554 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1555 uint16_t size = 0; 1556 1557 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + 1558 ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; 1559 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 1560 if (rc) 1561 break; 1562 1563 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); 1564 size -= RTE_PKTMBUF_HEADROOM; 1565 1566 if (size < new_mtu) { 1567 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); 1568 if (rc) 1569 return rc; 1570 } 1571 } 1572 1573 return rc; 1574 } 1575 1576 static int 1577 bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) 1578 { 1579 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1580 uint16_t vlan = bp->vlan; 1581 int rc; 1582 1583 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) { 1584 RTE_LOG(ERR, PMD, 1585 "PVID cannot be modified for this function\n"); 1586 return -ENOTSUP; 1587 } 1588 bp->vlan = on ? pvid : 0; 1589 1590 rc = bnxt_hwrm_set_default_vlan(bp, 0, 0); 1591 if (rc) 1592 bp->vlan = vlan; 1593 return rc; 1594 } 1595 1596 static int 1597 bnxt_dev_led_on_op(struct rte_eth_dev *dev) 1598 { 1599 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1600 1601 return bnxt_hwrm_port_led_cfg(bp, true); 1602 } 1603 1604 static int 1605 bnxt_dev_led_off_op(struct rte_eth_dev *dev) 1606 { 1607 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1608 1609 return bnxt_hwrm_port_led_cfg(bp, false); 1610 } 1611 1612 static uint32_t 1613 bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1614 { 1615 uint32_t desc = 0, raw_cons = 0, cons; 1616 struct bnxt_cp_ring_info *cpr; 1617 struct bnxt_rx_queue *rxq; 1618 struct rx_pkt_cmpl *rxcmp; 1619 uint16_t cmp_type; 1620 uint8_t cmp = 1; 1621 bool valid; 1622 1623 rxq = dev->data->rx_queues[rx_queue_id]; 1624 cpr = rxq->cp_ring; 1625 valid = cpr->valid; 1626 1627 while (raw_cons < rxq->nb_rx_desc) { 1628 cons = RING_CMP(cpr->cp_ring_struct, raw_cons); 1629 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1630 1631 if (!CMPL_VALID(rxcmp, valid)) 1632 goto nothing_to_do; 1633 valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); 1634 cmp_type = CMP_TYPE(rxcmp); 1635 if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { 1636 cmp = (rte_le_to_cpu_32( 1637 ((struct rx_tpa_end_cmpl *) 1638 (rxcmp))->agg_bufs_v1) & 1639 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> 1640 RX_TPA_END_CMPL_AGG_BUFS_SFT; 1641 desc++; 1642 } else if (cmp_type == 0x11) { 1643 desc++; 1644 cmp = (rxcmp->agg_bufs_v1 & 1645 RX_PKT_CMPL_AGG_BUFS_MASK) >> 1646 RX_PKT_CMPL_AGG_BUFS_SFT; 1647 } else { 1648 cmp = 1; 1649 } 1650 nothing_to_do: 1651 raw_cons += cmp ? cmp : 2; 1652 } 1653 1654 return desc; 1655 } 1656 1657 static int 1658 bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) 1659 { 1660 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; 1661 struct bnxt_rx_ring_info *rxr; 1662 struct bnxt_cp_ring_info *cpr; 1663 struct bnxt_sw_rx_bd *rx_buf; 1664 struct rx_pkt_cmpl *rxcmp; 1665 uint32_t cons, cp_cons; 1666 1667 if (!rxq) 1668 return -EINVAL; 1669 1670 cpr = rxq->cp_ring; 1671 rxr = rxq->rx_ring; 1672 1673 if (offset >= rxq->nb_rx_desc) 1674 return -EINVAL; 1675 1676 cons = RING_CMP(cpr->cp_ring_struct, offset); 1677 cp_cons = cpr->cp_raw_cons; 1678 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1679 1680 if (cons > cp_cons) { 1681 if (CMPL_VALID(rxcmp, cpr->valid)) 1682 return RTE_ETH_RX_DESC_DONE; 1683 } else { 1684 if (CMPL_VALID(rxcmp, !cpr->valid)) 1685 return RTE_ETH_RX_DESC_DONE; 1686 } 1687 rx_buf = &rxr->rx_buf_ring[cons]; 1688 if (rx_buf->mbuf == NULL) 1689 return RTE_ETH_RX_DESC_UNAVAIL; 1690 1691 1692 return RTE_ETH_RX_DESC_AVAIL; 1693 } 1694 1695 static int 1696 bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) 1697 { 1698 struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; 1699 struct bnxt_tx_ring_info *txr; 1700 struct bnxt_cp_ring_info *cpr; 1701 struct bnxt_sw_tx_bd *tx_buf; 1702 struct tx_pkt_cmpl *txcmp; 1703 uint32_t cons, cp_cons; 1704 1705 if (!txq) 1706 return -EINVAL; 1707 1708 cpr = txq->cp_ring; 1709 txr = txq->tx_ring; 1710 1711 if (offset >= txq->nb_tx_desc) 1712 return -EINVAL; 1713 1714 cons = RING_CMP(cpr->cp_ring_struct, offset); 1715 txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; 1716 cp_cons = cpr->cp_raw_cons; 1717 1718 if (cons > cp_cons) { 1719 if (CMPL_VALID(txcmp, cpr->valid)) 1720 return RTE_ETH_TX_DESC_UNAVAIL; 1721 } else { 1722 if (CMPL_VALID(txcmp, !cpr->valid)) 1723 return RTE_ETH_TX_DESC_UNAVAIL; 1724 } 1725 tx_buf = &txr->tx_buf_ring[cons]; 1726 if (tx_buf->mbuf == NULL) 1727 return RTE_ETH_TX_DESC_DONE; 1728 1729 return RTE_ETH_TX_DESC_FULL; 1730 } 1731 1732 static struct bnxt_filter_info * 1733 bnxt_match_and_validate_ether_filter(struct bnxt *bp, 1734 struct rte_eth_ethertype_filter *efilter, 1735 struct bnxt_vnic_info *vnic0, 1736 struct bnxt_vnic_info *vnic, 1737 int *ret) 1738 { 1739 struct bnxt_filter_info *mfilter = NULL; 1740 int match = 0; 1741 *ret = 0; 1742 1743 if (efilter->ether_type == ETHER_TYPE_IPv4 || 1744 efilter->ether_type == ETHER_TYPE_IPv6) { 1745 RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in" 1746 " ethertype filter.", efilter->ether_type); 1747 *ret = -EINVAL; 1748 goto exit; 1749 } 1750 if (efilter->queue >= bp->rx_nr_rings) { 1751 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1752 *ret = -EINVAL; 1753 goto exit; 1754 } 1755 1756 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1757 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1758 if (vnic == NULL) { 1759 RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); 1760 *ret = -EINVAL; 1761 goto exit; 1762 } 1763 1764 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1765 STAILQ_FOREACH(mfilter, &vnic0->filter, next) { 1766 if ((!memcmp(efilter->mac_addr.addr_bytes, 1767 mfilter->l2_addr, ETHER_ADDR_LEN) && 1768 mfilter->flags == 1769 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && 1770 mfilter->ethertype == efilter->ether_type)) { 1771 match = 1; 1772 break; 1773 } 1774 } 1775 } else { 1776 STAILQ_FOREACH(mfilter, &vnic->filter, next) 1777 if ((!memcmp(efilter->mac_addr.addr_bytes, 1778 mfilter->l2_addr, ETHER_ADDR_LEN) && 1779 mfilter->ethertype == efilter->ether_type && 1780 mfilter->flags == 1781 HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { 1782 match = 1; 1783 break; 1784 } 1785 } 1786 1787 if (match) 1788 *ret = -EEXIST; 1789 1790 exit: 1791 return mfilter; 1792 } 1793 1794 static int 1795 bnxt_ethertype_filter(struct rte_eth_dev *dev, 1796 enum rte_filter_op filter_op, 1797 void *arg) 1798 { 1799 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 1800 struct rte_eth_ethertype_filter *efilter = 1801 (struct rte_eth_ethertype_filter *)arg; 1802 struct bnxt_filter_info *bfilter, *filter1; 1803 struct bnxt_vnic_info *vnic, *vnic0; 1804 int ret; 1805 1806 if (filter_op == RTE_ETH_FILTER_NOP) 1807 return 0; 1808 1809 if (arg == NULL) { 1810 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 1811 filter_op); 1812 return -EINVAL; 1813 } 1814 1815 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 1816 vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); 1817 1818 switch (filter_op) { 1819 case RTE_ETH_FILTER_ADD: 1820 bnxt_match_and_validate_ether_filter(bp, efilter, 1821 vnic0, vnic, &ret); 1822 if (ret < 0) 1823 return ret; 1824 1825 bfilter = bnxt_get_unused_filter(bp); 1826 if (bfilter == NULL) { 1827 RTE_LOG(ERR, PMD, 1828 "Not enough resources for a new filter.\n"); 1829 return -ENOMEM; 1830 } 1831 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 1832 memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, 1833 ETHER_ADDR_LEN); 1834 memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, 1835 ETHER_ADDR_LEN); 1836 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 1837 bfilter->ethertype = efilter->ether_type; 1838 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 1839 1840 filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); 1841 if (filter1 == NULL) { 1842 ret = -1; 1843 goto cleanup; 1844 } 1845 bfilter->enables |= 1846 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 1847 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 1848 1849 bfilter->dst_id = vnic->fw_vnic_id; 1850 1851 if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { 1852 bfilter->flags = 1853 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 1854 } 1855 1856 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 1857 if (ret) 1858 goto cleanup; 1859 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 1860 break; 1861 case RTE_ETH_FILTER_DELETE: 1862 filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, 1863 vnic0, vnic, &ret); 1864 if (ret == -EEXIST) { 1865 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); 1866 1867 STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, 1868 next); 1869 bnxt_free_filter(bp, filter1); 1870 } else if (ret == 0) { 1871 RTE_LOG(ERR, PMD, "No matching filter found\n"); 1872 } 1873 break; 1874 default: 1875 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 1876 ret = -EINVAL; 1877 goto error; 1878 } 1879 return ret; 1880 cleanup: 1881 bnxt_free_filter(bp, bfilter); 1882 error: 1883 return ret; 1884 } 1885 1886 static inline int 1887 parse_ntuple_filter(struct bnxt *bp, 1888 struct rte_eth_ntuple_filter *nfilter, 1889 struct bnxt_filter_info *bfilter) 1890 { 1891 uint32_t en = 0; 1892 1893 if (nfilter->queue >= bp->rx_nr_rings) { 1894 RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); 1895 return -EINVAL; 1896 } 1897 1898 switch (nfilter->dst_port_mask) { 1899 case UINT16_MAX: 1900 bfilter->dst_port_mask = -1; 1901 bfilter->dst_port = nfilter->dst_port; 1902 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | 1903 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 1904 break; 1905 default: 1906 RTE_LOG(ERR, PMD, "invalid dst_port mask."); 1907 return -EINVAL; 1908 } 1909 1910 bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 1911 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1912 1913 switch (nfilter->proto_mask) { 1914 case UINT8_MAX: 1915 if (nfilter->proto == 17) /* IPPROTO_UDP */ 1916 bfilter->ip_protocol = 17; 1917 else if (nfilter->proto == 6) /* IPPROTO_TCP */ 1918 bfilter->ip_protocol = 6; 1919 else 1920 return -EINVAL; 1921 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 1922 break; 1923 default: 1924 RTE_LOG(ERR, PMD, "invalid protocol mask."); 1925 return -EINVAL; 1926 } 1927 1928 switch (nfilter->dst_ip_mask) { 1929 case UINT32_MAX: 1930 bfilter->dst_ipaddr_mask[0] = -1; 1931 bfilter->dst_ipaddr[0] = nfilter->dst_ip; 1932 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | 1933 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 1934 break; 1935 default: 1936 RTE_LOG(ERR, PMD, "invalid dst_ip mask."); 1937 return -EINVAL; 1938 } 1939 1940 switch (nfilter->src_ip_mask) { 1941 case UINT32_MAX: 1942 bfilter->src_ipaddr_mask[0] = -1; 1943 bfilter->src_ipaddr[0] = nfilter->src_ip; 1944 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | 1945 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 1946 break; 1947 default: 1948 RTE_LOG(ERR, PMD, "invalid src_ip mask."); 1949 return -EINVAL; 1950 } 1951 1952 switch (nfilter->src_port_mask) { 1953 case UINT16_MAX: 1954 bfilter->src_port_mask = -1; 1955 bfilter->src_port = nfilter->src_port; 1956 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | 1957 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 1958 break; 1959 default: 1960 RTE_LOG(ERR, PMD, "invalid src_port mask."); 1961 return -EINVAL; 1962 } 1963 1964 //TODO Priority 1965 //nfilter->priority = (uint8_t)filter->priority; 1966 1967 bfilter->enables = en; 1968 return 0; 1969 } 1970 1971 static struct bnxt_filter_info* 1972 bnxt_match_ntuple_filter(struct bnxt *bp, 1973 struct bnxt_filter_info *bfilter, 1974 struct bnxt_vnic_info **mvnic) 1975 { 1976 struct bnxt_filter_info *mfilter = NULL; 1977 int i; 1978 1979 for (i = bp->nr_vnics - 1; i >= 0; i--) { 1980 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 1981 STAILQ_FOREACH(mfilter, &vnic->filter, next) { 1982 if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && 1983 bfilter->src_ipaddr_mask[0] == 1984 mfilter->src_ipaddr_mask[0] && 1985 bfilter->src_port == mfilter->src_port && 1986 bfilter->src_port_mask == mfilter->src_port_mask && 1987 bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && 1988 bfilter->dst_ipaddr_mask[0] == 1989 mfilter->dst_ipaddr_mask[0] && 1990 bfilter->dst_port == mfilter->dst_port && 1991 bfilter->dst_port_mask == mfilter->dst_port_mask && 1992 bfilter->flags == mfilter->flags && 1993 bfilter->enables == mfilter->enables) { 1994 if (mvnic) 1995 *mvnic = vnic; 1996 return mfilter; 1997 } 1998 } 1999 } 2000 return NULL; 2001 } 2002 2003 static int 2004 bnxt_cfg_ntuple_filter(struct bnxt *bp, 2005 struct rte_eth_ntuple_filter *nfilter, 2006 enum rte_filter_op filter_op) 2007 { 2008 struct bnxt_filter_info *bfilter, *mfilter, *filter1; 2009 struct bnxt_vnic_info *vnic, *vnic0, *mvnic; 2010 int ret; 2011 2012 if (nfilter->flags != RTE_5TUPLE_FLAGS) { 2013 RTE_LOG(ERR, PMD, "only 5tuple is supported."); 2014 return -EINVAL; 2015 } 2016 2017 if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { 2018 RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); 2019 return -EINVAL; 2020 } 2021 2022 bfilter = bnxt_get_unused_filter(bp); 2023 if (bfilter == NULL) { 2024 RTE_LOG(ERR, PMD, 2025 "Not enough resources for a new filter.\n"); 2026 return -ENOMEM; 2027 } 2028 ret = parse_ntuple_filter(bp, nfilter, bfilter); 2029 if (ret < 0) 2030 goto free_filter; 2031 2032 vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); 2033 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2034 filter1 = STAILQ_FIRST(&vnic0->filter); 2035 if (filter1 == NULL) { 2036 ret = -1; 2037 goto free_filter; 2038 } 2039 2040 bfilter->dst_id = vnic->fw_vnic_id; 2041 bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2042 bfilter->enables |= 2043 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2044 bfilter->ethertype = 0x800; 2045 bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2046 2047 mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic); 2048 2049 if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2050 bfilter->dst_id == mfilter->dst_id) { 2051 RTE_LOG(ERR, PMD, "filter exists.\n"); 2052 ret = -EEXIST; 2053 goto free_filter; 2054 } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD && 2055 bfilter->dst_id != mfilter->dst_id) { 2056 mfilter->dst_id = vnic->fw_vnic_id; 2057 ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter); 2058 STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next); 2059 STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next); 2060 RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n"); 2061 RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n"); 2062 goto free_filter; 2063 } 2064 if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2065 RTE_LOG(ERR, PMD, "filter doesn't exist."); 2066 ret = -ENOENT; 2067 goto free_filter; 2068 } 2069 2070 if (filter_op == RTE_ETH_FILTER_ADD) { 2071 bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2072 ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); 2073 if (ret) 2074 goto free_filter; 2075 STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); 2076 } else { 2077 if (mfilter == NULL) { 2078 /* This should not happen. But for Coverity! */ 2079 ret = -ENOENT; 2080 goto free_filter; 2081 } 2082 ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); 2083 2084 STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next); 2085 bnxt_free_filter(bp, mfilter); 2086 mfilter->fw_l2_filter_id = -1; 2087 bnxt_free_filter(bp, bfilter); 2088 bfilter->fw_l2_filter_id = -1; 2089 } 2090 2091 return 0; 2092 free_filter: 2093 bfilter->fw_l2_filter_id = -1; 2094 bnxt_free_filter(bp, bfilter); 2095 return ret; 2096 } 2097 2098 static int 2099 bnxt_ntuple_filter(struct rte_eth_dev *dev, 2100 enum rte_filter_op filter_op, 2101 void *arg) 2102 { 2103 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2104 int ret; 2105 2106 if (filter_op == RTE_ETH_FILTER_NOP) 2107 return 0; 2108 2109 if (arg == NULL) { 2110 RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", 2111 filter_op); 2112 return -EINVAL; 2113 } 2114 2115 switch (filter_op) { 2116 case RTE_ETH_FILTER_ADD: 2117 ret = bnxt_cfg_ntuple_filter(bp, 2118 (struct rte_eth_ntuple_filter *)arg, 2119 filter_op); 2120 break; 2121 case RTE_ETH_FILTER_DELETE: 2122 ret = bnxt_cfg_ntuple_filter(bp, 2123 (struct rte_eth_ntuple_filter *)arg, 2124 filter_op); 2125 break; 2126 default: 2127 RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); 2128 ret = -EINVAL; 2129 break; 2130 } 2131 return ret; 2132 } 2133 2134 static int 2135 bnxt_parse_fdir_filter(struct bnxt *bp, 2136 struct rte_eth_fdir_filter *fdir, 2137 struct bnxt_filter_info *filter) 2138 { 2139 enum rte_fdir_mode fdir_mode = 2140 bp->eth_dev->data->dev_conf.fdir_conf.mode; 2141 struct bnxt_vnic_info *vnic0, *vnic; 2142 struct bnxt_filter_info *filter1; 2143 uint32_t en = 0; 2144 int i; 2145 2146 if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) 2147 return -EINVAL; 2148 2149 filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; 2150 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; 2151 2152 switch (fdir->input.flow_type) { 2153 case RTE_ETH_FLOW_IPV4: 2154 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: 2155 /* FALLTHROUGH */ 2156 filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; 2157 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2158 filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; 2159 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2160 filter->ip_protocol = fdir->input.flow.ip4_flow.proto; 2161 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2162 filter->ip_addr_type = 2163 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2164 filter->src_ipaddr_mask[0] = 0xffffffff; 2165 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2166 filter->dst_ipaddr_mask[0] = 0xffffffff; 2167 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2168 filter->ethertype = 0x800; 2169 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2170 break; 2171 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: 2172 filter->src_port = fdir->input.flow.tcp4_flow.src_port; 2173 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2174 filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; 2175 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2176 filter->dst_port_mask = 0xffff; 2177 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2178 filter->src_port_mask = 0xffff; 2179 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2180 filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; 2181 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2182 filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; 2183 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2184 filter->ip_protocol = 6; 2185 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2186 filter->ip_addr_type = 2187 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2188 filter->src_ipaddr_mask[0] = 0xffffffff; 2189 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2190 filter->dst_ipaddr_mask[0] = 0xffffffff; 2191 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2192 filter->ethertype = 0x800; 2193 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2194 break; 2195 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: 2196 filter->src_port = fdir->input.flow.udp4_flow.src_port; 2197 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2198 filter->dst_port = fdir->input.flow.udp4_flow.dst_port; 2199 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2200 filter->dst_port_mask = 0xffff; 2201 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2202 filter->src_port_mask = 0xffff; 2203 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2204 filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; 2205 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2206 filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; 2207 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2208 filter->ip_protocol = 17; 2209 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2210 filter->ip_addr_type = 2211 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; 2212 filter->src_ipaddr_mask[0] = 0xffffffff; 2213 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2214 filter->dst_ipaddr_mask[0] = 0xffffffff; 2215 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2216 filter->ethertype = 0x800; 2217 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2218 break; 2219 case RTE_ETH_FLOW_IPV6: 2220 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: 2221 /* FALLTHROUGH */ 2222 filter->ip_addr_type = 2223 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2224 filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; 2225 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2226 rte_memcpy(filter->src_ipaddr, 2227 fdir->input.flow.ipv6_flow.src_ip, 16); 2228 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2229 rte_memcpy(filter->dst_ipaddr, 2230 fdir->input.flow.ipv6_flow.dst_ip, 16); 2231 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2232 memset(filter->dst_ipaddr_mask, 0xff, 16); 2233 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2234 memset(filter->src_ipaddr_mask, 0xff, 16); 2235 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2236 filter->ethertype = 0x86dd; 2237 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2238 break; 2239 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: 2240 filter->src_port = fdir->input.flow.tcp6_flow.src_port; 2241 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2242 filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; 2243 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2244 filter->dst_port_mask = 0xffff; 2245 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2246 filter->src_port_mask = 0xffff; 2247 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2248 filter->ip_addr_type = 2249 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2250 filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; 2251 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2252 rte_memcpy(filter->src_ipaddr, 2253 fdir->input.flow.tcp6_flow.ip.src_ip, 16); 2254 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2255 rte_memcpy(filter->dst_ipaddr, 2256 fdir->input.flow.tcp6_flow.ip.dst_ip, 16); 2257 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2258 memset(filter->dst_ipaddr_mask, 0xff, 16); 2259 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2260 memset(filter->src_ipaddr_mask, 0xff, 16); 2261 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2262 filter->ethertype = 0x86dd; 2263 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2264 break; 2265 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: 2266 filter->src_port = fdir->input.flow.udp6_flow.src_port; 2267 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; 2268 filter->dst_port = fdir->input.flow.udp6_flow.dst_port; 2269 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; 2270 filter->dst_port_mask = 0xffff; 2271 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; 2272 filter->src_port_mask = 0xffff; 2273 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; 2274 filter->ip_addr_type = 2275 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; 2276 filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; 2277 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; 2278 rte_memcpy(filter->src_ipaddr, 2279 fdir->input.flow.udp6_flow.ip.src_ip, 16); 2280 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; 2281 rte_memcpy(filter->dst_ipaddr, 2282 fdir->input.flow.udp6_flow.ip.dst_ip, 16); 2283 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; 2284 memset(filter->dst_ipaddr_mask, 0xff, 16); 2285 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; 2286 memset(filter->src_ipaddr_mask, 0xff, 16); 2287 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; 2288 filter->ethertype = 0x86dd; 2289 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2290 break; 2291 case RTE_ETH_FLOW_L2_PAYLOAD: 2292 filter->ethertype = fdir->input.flow.l2_flow.ether_type; 2293 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; 2294 break; 2295 case RTE_ETH_FLOW_VXLAN: 2296 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2297 return -EINVAL; 2298 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2299 filter->tunnel_type = 2300 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 2301 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2302 break; 2303 case RTE_ETH_FLOW_NVGRE: 2304 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2305 return -EINVAL; 2306 filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; 2307 filter->tunnel_type = 2308 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; 2309 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; 2310 break; 2311 case RTE_ETH_FLOW_UNKNOWN: 2312 case RTE_ETH_FLOW_RAW: 2313 case RTE_ETH_FLOW_FRAG_IPV4: 2314 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: 2315 case RTE_ETH_FLOW_FRAG_IPV6: 2316 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: 2317 case RTE_ETH_FLOW_IPV6_EX: 2318 case RTE_ETH_FLOW_IPV6_TCP_EX: 2319 case RTE_ETH_FLOW_IPV6_UDP_EX: 2320 case RTE_ETH_FLOW_GENEVE: 2321 /* FALLTHROUGH */ 2322 default: 2323 return -EINVAL; 2324 } 2325 2326 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); 2327 vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2328 if (vnic == NULL) { 2329 RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue); 2330 return -EINVAL; 2331 } 2332 2333 2334 if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { 2335 rte_memcpy(filter->dst_macaddr, 2336 fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); 2337 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; 2338 } 2339 2340 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { 2341 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; 2342 filter1 = STAILQ_FIRST(&vnic0->filter); 2343 //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); 2344 } else { 2345 filter->dst_id = vnic->fw_vnic_id; 2346 for (i = 0; i < ETHER_ADDR_LEN; i++) 2347 if (filter->dst_macaddr[i] == 0x00) 2348 filter1 = STAILQ_FIRST(&vnic0->filter); 2349 else 2350 filter1 = bnxt_get_l2_filter(bp, filter, vnic); 2351 } 2352 2353 if (filter1 == NULL) 2354 return -EINVAL; 2355 2356 en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; 2357 filter->fw_l2_filter_id = filter1->fw_l2_filter_id; 2358 2359 filter->enables = en; 2360 2361 return 0; 2362 } 2363 2364 static struct bnxt_filter_info * 2365 bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) 2366 { 2367 struct bnxt_filter_info *mf = NULL; 2368 int i; 2369 2370 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2371 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2372 2373 STAILQ_FOREACH(mf, &vnic->filter, next) { 2374 if (mf->filter_type == nf->filter_type && 2375 mf->flags == nf->flags && 2376 mf->src_port == nf->src_port && 2377 mf->src_port_mask == nf->src_port_mask && 2378 mf->dst_port == nf->dst_port && 2379 mf->dst_port_mask == nf->dst_port_mask && 2380 mf->ip_protocol == nf->ip_protocol && 2381 mf->ip_addr_type == nf->ip_addr_type && 2382 mf->ethertype == nf->ethertype && 2383 mf->vni == nf->vni && 2384 mf->tunnel_type == nf->tunnel_type && 2385 mf->l2_ovlan == nf->l2_ovlan && 2386 mf->l2_ovlan_mask == nf->l2_ovlan_mask && 2387 mf->l2_ivlan == nf->l2_ivlan && 2388 mf->l2_ivlan_mask == nf->l2_ivlan_mask && 2389 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && 2390 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, 2391 ETHER_ADDR_LEN) && 2392 !memcmp(mf->src_macaddr, nf->src_macaddr, 2393 ETHER_ADDR_LEN) && 2394 !memcmp(mf->dst_macaddr, nf->dst_macaddr, 2395 ETHER_ADDR_LEN) && 2396 !memcmp(mf->src_ipaddr, nf->src_ipaddr, 2397 sizeof(nf->src_ipaddr)) && 2398 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, 2399 sizeof(nf->src_ipaddr_mask)) && 2400 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, 2401 sizeof(nf->dst_ipaddr)) && 2402 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, 2403 sizeof(nf->dst_ipaddr_mask))) 2404 return mf; 2405 } 2406 } 2407 return NULL; 2408 } 2409 2410 static int 2411 bnxt_fdir_filter(struct rte_eth_dev *dev, 2412 enum rte_filter_op filter_op, 2413 void *arg) 2414 { 2415 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2416 struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; 2417 struct bnxt_filter_info *filter, *match; 2418 struct bnxt_vnic_info *vnic; 2419 int ret = 0, i; 2420 2421 if (filter_op == RTE_ETH_FILTER_NOP) 2422 return 0; 2423 2424 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 2425 return -EINVAL; 2426 2427 switch (filter_op) { 2428 case RTE_ETH_FILTER_ADD: 2429 case RTE_ETH_FILTER_DELETE: 2430 /* FALLTHROUGH */ 2431 filter = bnxt_get_unused_filter(bp); 2432 if (filter == NULL) { 2433 RTE_LOG(ERR, PMD, 2434 "Not enough resources for a new flow.\n"); 2435 return -ENOMEM; 2436 } 2437 2438 ret = bnxt_parse_fdir_filter(bp, fdir, filter); 2439 if (ret != 0) 2440 goto free_filter; 2441 filter->filter_type = HWRM_CFA_NTUPLE_FILTER; 2442 2443 match = bnxt_match_fdir(bp, filter); 2444 if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { 2445 RTE_LOG(ERR, PMD, "Flow already exists.\n"); 2446 ret = -EEXIST; 2447 goto free_filter; 2448 } 2449 if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { 2450 RTE_LOG(ERR, PMD, "Flow does not exist.\n"); 2451 ret = -ENOENT; 2452 goto free_filter; 2453 } 2454 2455 if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) 2456 vnic = STAILQ_FIRST(&bp->ff_pool[0]); 2457 else 2458 vnic = 2459 STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); 2460 2461 if (filter_op == RTE_ETH_FILTER_ADD) { 2462 ret = bnxt_hwrm_set_ntuple_filter(bp, 2463 filter->dst_id, 2464 filter); 2465 if (ret) 2466 goto free_filter; 2467 STAILQ_INSERT_TAIL(&vnic->filter, filter, next); 2468 } else { 2469 ret = bnxt_hwrm_clear_ntuple_filter(bp, match); 2470 STAILQ_REMOVE(&vnic->filter, match, 2471 bnxt_filter_info, next); 2472 bnxt_free_filter(bp, match); 2473 filter->fw_l2_filter_id = -1; 2474 bnxt_free_filter(bp, filter); 2475 } 2476 break; 2477 case RTE_ETH_FILTER_FLUSH: 2478 for (i = bp->nr_vnics - 1; i >= 0; i--) { 2479 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 2480 2481 STAILQ_FOREACH(filter, &vnic->filter, next) { 2482 if (filter->filter_type == 2483 HWRM_CFA_NTUPLE_FILTER) { 2484 ret = 2485 bnxt_hwrm_clear_ntuple_filter(bp, 2486 filter); 2487 STAILQ_REMOVE(&vnic->filter, filter, 2488 bnxt_filter_info, next); 2489 } 2490 } 2491 } 2492 return ret; 2493 case RTE_ETH_FILTER_UPDATE: 2494 case RTE_ETH_FILTER_STATS: 2495 case RTE_ETH_FILTER_INFO: 2496 /* FALLTHROUGH */ 2497 RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op); 2498 break; 2499 default: 2500 RTE_LOG(ERR, PMD, "unknown operation %u", filter_op); 2501 ret = -EINVAL; 2502 break; 2503 } 2504 return ret; 2505 2506 free_filter: 2507 filter->fw_l2_filter_id = -1; 2508 bnxt_free_filter(bp, filter); 2509 return ret; 2510 } 2511 2512 static int 2513 bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, 2514 enum rte_filter_type filter_type, 2515 enum rte_filter_op filter_op, void *arg) 2516 { 2517 int ret = 0; 2518 2519 switch (filter_type) { 2520 case RTE_ETH_FILTER_TUNNEL: 2521 RTE_LOG(ERR, PMD, 2522 "filter type: %d: To be implemented\n", filter_type); 2523 break; 2524 case RTE_ETH_FILTER_FDIR: 2525 ret = bnxt_fdir_filter(dev, filter_op, arg); 2526 break; 2527 case RTE_ETH_FILTER_NTUPLE: 2528 ret = bnxt_ntuple_filter(dev, filter_op, arg); 2529 break; 2530 case RTE_ETH_FILTER_ETHERTYPE: 2531 ret = bnxt_ethertype_filter(dev, filter_op, arg); 2532 break; 2533 case RTE_ETH_FILTER_GENERIC: 2534 if (filter_op != RTE_ETH_FILTER_GET) 2535 return -EINVAL; 2536 *(const void **)arg = &bnxt_flow_ops; 2537 break; 2538 default: 2539 RTE_LOG(ERR, PMD, 2540 "Filter type (%d) not supported", filter_type); 2541 ret = -EINVAL; 2542 break; 2543 } 2544 return ret; 2545 } 2546 2547 static const uint32_t * 2548 bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) 2549 { 2550 static const uint32_t ptypes[] = { 2551 RTE_PTYPE_L2_ETHER_VLAN, 2552 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2553 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2554 RTE_PTYPE_L4_ICMP, 2555 RTE_PTYPE_L4_TCP, 2556 RTE_PTYPE_L4_UDP, 2557 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2558 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2559 RTE_PTYPE_INNER_L4_ICMP, 2560 RTE_PTYPE_INNER_L4_TCP, 2561 RTE_PTYPE_INNER_L4_UDP, 2562 RTE_PTYPE_UNKNOWN 2563 }; 2564 2565 if (dev->rx_pkt_burst == bnxt_recv_pkts) 2566 return ptypes; 2567 return NULL; 2568 } 2569 2570 2571 2572 static int 2573 bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) 2574 { 2575 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2576 int rc; 2577 uint32_t dir_entries; 2578 uint32_t entry_length; 2579 2580 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", 2581 __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, 2582 bp->pdev->addr.devid, bp->pdev->addr.function); 2583 2584 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); 2585 if (rc != 0) 2586 return rc; 2587 2588 return dir_entries * entry_length; 2589 } 2590 2591 static int 2592 bnxt_get_eeprom_op(struct rte_eth_dev *dev, 2593 struct rte_dev_eeprom_info *in_eeprom) 2594 { 2595 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2596 uint32_t index; 2597 uint32_t offset; 2598 2599 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2600 "len = %d\n", __func__, bp->pdev->addr.domain, 2601 bp->pdev->addr.bus, bp->pdev->addr.devid, 2602 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2603 2604 if (in_eeprom->offset == 0) /* special offset value to get directory */ 2605 return bnxt_get_nvram_directory(bp, in_eeprom->length, 2606 in_eeprom->data); 2607 2608 index = in_eeprom->offset >> 24; 2609 offset = in_eeprom->offset & 0xffffff; 2610 2611 if (index != 0) 2612 return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, 2613 in_eeprom->length, in_eeprom->data); 2614 2615 return 0; 2616 } 2617 2618 static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) 2619 { 2620 switch (dir_type) { 2621 case BNX_DIR_TYPE_CHIMP_PATCH: 2622 case BNX_DIR_TYPE_BOOTCODE: 2623 case BNX_DIR_TYPE_BOOTCODE_2: 2624 case BNX_DIR_TYPE_APE_FW: 2625 case BNX_DIR_TYPE_APE_PATCH: 2626 case BNX_DIR_TYPE_KONG_FW: 2627 case BNX_DIR_TYPE_KONG_PATCH: 2628 case BNX_DIR_TYPE_BONO_FW: 2629 case BNX_DIR_TYPE_BONO_PATCH: 2630 return true; 2631 } 2632 2633 return false; 2634 } 2635 2636 static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) 2637 { 2638 switch (dir_type) { 2639 case BNX_DIR_TYPE_AVS: 2640 case BNX_DIR_TYPE_EXP_ROM_MBA: 2641 case BNX_DIR_TYPE_PCIE: 2642 case BNX_DIR_TYPE_TSCF_UCODE: 2643 case BNX_DIR_TYPE_EXT_PHY: 2644 case BNX_DIR_TYPE_CCM: 2645 case BNX_DIR_TYPE_ISCSI_BOOT: 2646 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: 2647 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: 2648 return true; 2649 } 2650 2651 return false; 2652 } 2653 2654 static bool bnxt_dir_type_is_executable(uint16_t dir_type) 2655 { 2656 return bnxt_dir_type_is_ape_bin_format(dir_type) || 2657 bnxt_dir_type_is_other_exec_format(dir_type); 2658 } 2659 2660 static int 2661 bnxt_set_eeprom_op(struct rte_eth_dev *dev, 2662 struct rte_dev_eeprom_info *in_eeprom) 2663 { 2664 struct bnxt *bp = (struct bnxt *)dev->data->dev_private; 2665 uint8_t index, dir_op; 2666 uint16_t type, ext, ordinal, attr; 2667 2668 RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " 2669 "len = %d\n", __func__, bp->pdev->addr.domain, 2670 bp->pdev->addr.bus, bp->pdev->addr.devid, 2671 bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); 2672 2673 if (!BNXT_PF(bp)) { 2674 RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); 2675 return -EINVAL; 2676 } 2677 2678 type = in_eeprom->magic >> 16; 2679 2680 if (type == 0xffff) { /* special value for directory operations */ 2681 index = in_eeprom->magic & 0xff; 2682 dir_op = in_eeprom->magic >> 8; 2683 if (index == 0) 2684 return -EINVAL; 2685 switch (dir_op) { 2686 case 0x0e: /* erase */ 2687 if (in_eeprom->offset != ~in_eeprom->magic) 2688 return -EINVAL; 2689 return bnxt_hwrm_erase_nvram_directory(bp, index - 1); 2690 default: 2691 return -EINVAL; 2692 } 2693 } 2694 2695 /* Create or re-write an NVM item: */ 2696 if (bnxt_dir_type_is_executable(type) == true) 2697 return -EOPNOTSUPP; 2698 ext = in_eeprom->magic & 0xffff; 2699 ordinal = in_eeprom->offset >> 16; 2700 attr = in_eeprom->offset & 0xffff; 2701 2702 return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, 2703 in_eeprom->data, in_eeprom->length); 2704 return 0; 2705 } 2706 2707 /* 2708 * Initialization 2709 */ 2710 2711 static const struct eth_dev_ops bnxt_dev_ops = { 2712 .dev_infos_get = bnxt_dev_info_get_op, 2713 .dev_close = bnxt_dev_close_op, 2714 .dev_configure = bnxt_dev_configure_op, 2715 .dev_start = bnxt_dev_start_op, 2716 .dev_stop = bnxt_dev_stop_op, 2717 .dev_set_link_up = bnxt_dev_set_link_up_op, 2718 .dev_set_link_down = bnxt_dev_set_link_down_op, 2719 .stats_get = bnxt_stats_get_op, 2720 .stats_reset = bnxt_stats_reset_op, 2721 .rx_queue_setup = bnxt_rx_queue_setup_op, 2722 .rx_queue_release = bnxt_rx_queue_release_op, 2723 .tx_queue_setup = bnxt_tx_queue_setup_op, 2724 .tx_queue_release = bnxt_tx_queue_release_op, 2725 .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, 2726 .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, 2727 .reta_update = bnxt_reta_update_op, 2728 .reta_query = bnxt_reta_query_op, 2729 .rss_hash_update = bnxt_rss_hash_update_op, 2730 .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, 2731 .link_update = bnxt_link_update_op, 2732 .promiscuous_enable = bnxt_promiscuous_enable_op, 2733 .promiscuous_disable = bnxt_promiscuous_disable_op, 2734 .allmulticast_enable = bnxt_allmulticast_enable_op, 2735 .allmulticast_disable = bnxt_allmulticast_disable_op, 2736 .mac_addr_add = bnxt_mac_addr_add_op, 2737 .mac_addr_remove = bnxt_mac_addr_remove_op, 2738 .flow_ctrl_get = bnxt_flow_ctrl_get_op, 2739 .flow_ctrl_set = bnxt_flow_ctrl_set_op, 2740 .udp_tunnel_port_add = bnxt_udp_tunnel_port_add_op, 2741 .udp_tunnel_port_del = bnxt_udp_tunnel_port_del_op, 2742 .vlan_filter_set = bnxt_vlan_filter_set_op, 2743 .vlan_offload_set = bnxt_vlan_offload_set_op, 2744 .vlan_pvid_set = bnxt_vlan_pvid_set_op, 2745 .mtu_set = bnxt_mtu_set_op, 2746 .mac_addr_set = bnxt_set_default_mac_addr_op, 2747 .xstats_get = bnxt_dev_xstats_get_op, 2748 .xstats_get_names = bnxt_dev_xstats_get_names_op, 2749 .xstats_reset = bnxt_dev_xstats_reset_op, 2750 .fw_version_get = bnxt_fw_version_get, 2751 .set_mc_addr_list = bnxt_dev_set_mc_addr_list_op, 2752 .rxq_info_get = bnxt_rxq_info_get_op, 2753 .txq_info_get = bnxt_txq_info_get_op, 2754 .dev_led_on = bnxt_dev_led_on_op, 2755 .dev_led_off = bnxt_dev_led_off_op, 2756 .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, 2757 .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, 2758 .rx_queue_count = bnxt_rx_queue_count_op, 2759 .rx_descriptor_status = bnxt_rx_descriptor_status_op, 2760 .tx_descriptor_status = bnxt_tx_descriptor_status_op, 2761 .filter_ctrl = bnxt_filter_ctrl_op, 2762 .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, 2763 .get_eeprom_length = bnxt_get_eeprom_length_op, 2764 .get_eeprom = bnxt_get_eeprom_op, 2765 .set_eeprom = bnxt_set_eeprom_op, 2766 }; 2767 2768 static bool bnxt_vf_pciid(uint16_t id) 2769 { 2770 if (id == BROADCOM_DEV_ID_57304_VF || 2771 id == BROADCOM_DEV_ID_57406_VF || 2772 id == BROADCOM_DEV_ID_5731X_VF || 2773 id == BROADCOM_DEV_ID_5741X_VF || 2774 id == BROADCOM_DEV_ID_57414_VF || 2775 id == BROADCOM_DEV_ID_STRATUS_NIC_VF) 2776 return true; 2777 return false; 2778 } 2779 2780 static int bnxt_init_board(struct rte_eth_dev *eth_dev) 2781 { 2782 struct bnxt *bp = eth_dev->data->dev_private; 2783 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2784 int rc; 2785 2786 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 2787 if (!pci_dev->mem_resource[0].addr) { 2788 RTE_LOG(ERR, PMD, 2789 "Cannot find PCI device base address, aborting\n"); 2790 rc = -ENODEV; 2791 goto init_err_disable; 2792 } 2793 2794 bp->eth_dev = eth_dev; 2795 bp->pdev = pci_dev; 2796 2797 bp->bar0 = (void *)pci_dev->mem_resource[0].addr; 2798 if (!bp->bar0) { 2799 RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); 2800 rc = -ENOMEM; 2801 goto init_err_release; 2802 } 2803 return 0; 2804 2805 init_err_release: 2806 if (bp->bar0) 2807 bp->bar0 = NULL; 2808 2809 init_err_disable: 2810 2811 return rc; 2812 } 2813 2814 static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); 2815 2816 #define ALLOW_FUNC(x) \ 2817 { \ 2818 typeof(x) arg = (x); \ 2819 bp->pf.vf_req_fwd[((arg) >> 5)] &= \ 2820 ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ 2821 } 2822 static int 2823 bnxt_dev_init(struct rte_eth_dev *eth_dev) 2824 { 2825 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2826 char mz_name[RTE_MEMZONE_NAMESIZE]; 2827 const struct rte_memzone *mz = NULL; 2828 static int version_printed; 2829 uint32_t total_alloc_len; 2830 rte_iova_t mz_phys_addr; 2831 struct bnxt *bp; 2832 int rc; 2833 2834 if (version_printed++ == 0) 2835 RTE_LOG(INFO, PMD, "%s\n", bnxt_version); 2836 2837 rte_eth_copy_pci_info(eth_dev, pci_dev); 2838 2839 bp = eth_dev->data->dev_private; 2840 2841 rte_atomic64_init(&bp->rx_mbuf_alloc_fail); 2842 bp->dev_stopped = 1; 2843 2844 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2845 goto skip_init; 2846 2847 if (bnxt_vf_pciid(pci_dev->id.device_id)) 2848 bp->flags |= BNXT_FLAG_VF; 2849 2850 rc = bnxt_init_board(eth_dev); 2851 if (rc) { 2852 RTE_LOG(ERR, PMD, 2853 "Board initialization failed rc: %x\n", rc); 2854 goto error; 2855 } 2856 skip_init: 2857 eth_dev->dev_ops = &bnxt_dev_ops; 2858 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2859 return 0; 2860 eth_dev->rx_pkt_burst = &bnxt_recv_pkts; 2861 eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; 2862 2863 if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { 2864 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 2865 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 2866 pci_dev->addr.bus, pci_dev->addr.devid, 2867 pci_dev->addr.function, "rx_port_stats"); 2868 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 2869 mz = rte_memzone_lookup(mz_name); 2870 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 2871 sizeof(struct rx_port_stats) + 512); 2872 if (!mz) { 2873 mz = rte_memzone_reserve(mz_name, total_alloc_len, 2874 SOCKET_ID_ANY, 2875 RTE_MEMZONE_2MB | 2876 RTE_MEMZONE_SIZE_HINT_ONLY); 2877 if (mz == NULL) 2878 return -ENOMEM; 2879 } 2880 memset(mz->addr, 0, mz->len); 2881 mz_phys_addr = mz->iova; 2882 if ((unsigned long)mz->addr == mz_phys_addr) { 2883 RTE_LOG(WARNING, PMD, 2884 "Memzone physical address same as virtual.\n"); 2885 RTE_LOG(WARNING, PMD, 2886 "Using rte_mem_virt2iova()\n"); 2887 mz_phys_addr = rte_mem_virt2iova(mz->addr); 2888 if (mz_phys_addr == 0) { 2889 RTE_LOG(ERR, PMD, 2890 "unable to map address to physical memory\n"); 2891 return -ENOMEM; 2892 } 2893 } 2894 2895 bp->rx_mem_zone = (const void *)mz; 2896 bp->hw_rx_port_stats = mz->addr; 2897 bp->hw_rx_port_stats_map = mz_phys_addr; 2898 2899 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, 2900 "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, 2901 pci_dev->addr.bus, pci_dev->addr.devid, 2902 pci_dev->addr.function, "tx_port_stats"); 2903 mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; 2904 mz = rte_memzone_lookup(mz_name); 2905 total_alloc_len = RTE_CACHE_LINE_ROUNDUP( 2906 sizeof(struct tx_port_stats) + 512); 2907 if (!mz) { 2908 mz = rte_memzone_reserve(mz_name, total_alloc_len, 2909 SOCKET_ID_ANY, 2910 RTE_MEMZONE_2MB | 2911 RTE_MEMZONE_SIZE_HINT_ONLY); 2912 if (mz == NULL) 2913 return -ENOMEM; 2914 } 2915 memset(mz->addr, 0, mz->len); 2916 mz_phys_addr = mz->iova; 2917 if ((unsigned long)mz->addr == mz_phys_addr) { 2918 RTE_LOG(WARNING, PMD, 2919 "Memzone physical address same as virtual.\n"); 2920 RTE_LOG(WARNING, PMD, 2921 "Using rte_mem_virt2iova()\n"); 2922 mz_phys_addr = rte_mem_virt2iova(mz->addr); 2923 if (mz_phys_addr == 0) { 2924 RTE_LOG(ERR, PMD, 2925 "unable to map address to physical memory\n"); 2926 return -ENOMEM; 2927 } 2928 } 2929 2930 bp->tx_mem_zone = (const void *)mz; 2931 bp->hw_tx_port_stats = mz->addr; 2932 bp->hw_tx_port_stats_map = mz_phys_addr; 2933 2934 bp->flags |= BNXT_FLAG_PORT_STATS; 2935 } 2936 2937 rc = bnxt_alloc_hwrm_resources(bp); 2938 if (rc) { 2939 RTE_LOG(ERR, PMD, 2940 "hwrm resource allocation failure rc: %x\n", rc); 2941 goto error_free; 2942 } 2943 rc = bnxt_hwrm_ver_get(bp); 2944 if (rc) 2945 goto error_free; 2946 bnxt_hwrm_queue_qportcfg(bp); 2947 2948 bnxt_hwrm_func_qcfg(bp); 2949 2950 /* Get the MAX capabilities for this function */ 2951 rc = bnxt_hwrm_func_qcaps(bp); 2952 if (rc) { 2953 RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); 2954 goto error_free; 2955 } 2956 if (bp->max_tx_rings == 0) { 2957 RTE_LOG(ERR, PMD, "No TX rings available!\n"); 2958 rc = -EBUSY; 2959 goto error_free; 2960 } 2961 eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", 2962 ETHER_ADDR_LEN * bp->max_l2_ctx, 0); 2963 if (eth_dev->data->mac_addrs == NULL) { 2964 RTE_LOG(ERR, PMD, 2965 "Failed to alloc %u bytes needed to store MAC addr tbl", 2966 ETHER_ADDR_LEN * bp->max_l2_ctx); 2967 rc = -ENOMEM; 2968 goto error_free; 2969 } 2970 /* Copy the permanent MAC from the qcap response address now. */ 2971 memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr)); 2972 memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); 2973 2974 if (bp->max_ring_grps < bp->rx_cp_nr_rings) { 2975 /* 1 ring is for default completion ring */ 2976 RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n"); 2977 rc = -ENOSPC; 2978 goto error_free; 2979 } 2980 2981 bp->grp_info = rte_zmalloc("bnxt_grp_info", 2982 sizeof(*bp->grp_info) * bp->max_ring_grps, 0); 2983 if (!bp->grp_info) { 2984 RTE_LOG(ERR, PMD, 2985 "Failed to alloc %zu bytes to store group info table\n", 2986 sizeof(*bp->grp_info) * bp->max_ring_grps); 2987 rc = -ENOMEM; 2988 goto error_free; 2989 } 2990 2991 /* Forward all requests if firmware is new enough */ 2992 if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) && 2993 (bp->fw_ver < ((20 << 24) | (7 << 16)))) || 2994 ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) { 2995 memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd)); 2996 } else { 2997 RTE_LOG(WARNING, PMD, 2998 "Firmware too old for VF mailbox functionality\n"); 2999 memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd)); 3000 } 3001 3002 /* 3003 * The following are used for driver cleanup. If we disallow these, 3004 * VF drivers can't clean up cleanly. 3005 */ 3006 ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR); 3007 ALLOW_FUNC(HWRM_VNIC_FREE); 3008 ALLOW_FUNC(HWRM_RING_FREE); 3009 ALLOW_FUNC(HWRM_RING_GRP_FREE); 3010 ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); 3011 ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); 3012 ALLOW_FUNC(HWRM_STAT_CTX_FREE); 3013 ALLOW_FUNC(HWRM_PORT_PHY_QCFG); 3014 ALLOW_FUNC(HWRM_VNIC_TPA_CFG); 3015 rc = bnxt_hwrm_func_driver_register(bp); 3016 if (rc) { 3017 RTE_LOG(ERR, PMD, 3018 "Failed to register driver"); 3019 rc = -EBUSY; 3020 goto error_free; 3021 } 3022 3023 RTE_LOG(INFO, PMD, 3024 DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", 3025 pci_dev->mem_resource[0].phys_addr, 3026 pci_dev->mem_resource[0].addr); 3027 3028 rc = bnxt_hwrm_func_reset(bp); 3029 if (rc) { 3030 RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); 3031 rc = -1; 3032 goto error_free; 3033 } 3034 3035 if (BNXT_PF(bp)) { 3036 //if (bp->pf.active_vfs) { 3037 // TODO: Deallocate VF resources? 3038 //} 3039 if (bp->pdev->max_vfs) { 3040 rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs); 3041 if (rc) { 3042 RTE_LOG(ERR, PMD, "Failed to allocate VFs\n"); 3043 goto error_free; 3044 } 3045 } else { 3046 rc = bnxt_hwrm_allocate_pf_only(bp); 3047 if (rc) { 3048 RTE_LOG(ERR, PMD, 3049 "Failed to allocate PF resources\n"); 3050 goto error_free; 3051 } 3052 } 3053 } 3054 3055 bnxt_hwrm_port_led_qcaps(bp); 3056 3057 rc = bnxt_setup_int(bp); 3058 if (rc) 3059 goto error_free; 3060 3061 rc = bnxt_alloc_mem(bp); 3062 if (rc) 3063 goto error_free_int; 3064 3065 rc = bnxt_request_int(bp); 3066 if (rc) 3067 goto error_free_int; 3068 3069 rc = bnxt_alloc_def_cp_ring(bp); 3070 if (rc) 3071 goto error_free_int; 3072 3073 bnxt_enable_int(bp); 3074 bnxt_init_nic(bp); 3075 3076 return 0; 3077 3078 error_free_int: 3079 bnxt_disable_int(bp); 3080 bnxt_free_def_cp_ring(bp); 3081 bnxt_hwrm_func_buf_unrgtr(bp); 3082 bnxt_free_int(bp); 3083 bnxt_free_mem(bp); 3084 error_free: 3085 bnxt_dev_uninit(eth_dev); 3086 error: 3087 return rc; 3088 } 3089 3090 static int 3091 bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { 3092 struct bnxt *bp = eth_dev->data->dev_private; 3093 int rc; 3094 3095 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3096 return -EPERM; 3097 3098 bnxt_disable_int(bp); 3099 bnxt_free_int(bp); 3100 bnxt_free_mem(bp); 3101 if (eth_dev->data->mac_addrs != NULL) { 3102 rte_free(eth_dev->data->mac_addrs); 3103 eth_dev->data->mac_addrs = NULL; 3104 } 3105 if (bp->grp_info != NULL) { 3106 rte_free(bp->grp_info); 3107 bp->grp_info = NULL; 3108 } 3109 rc = bnxt_hwrm_func_driver_unregister(bp, 0); 3110 bnxt_free_hwrm_resources(bp); 3111 rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); 3112 rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); 3113 if (bp->dev_stopped == 0) 3114 bnxt_dev_close_op(eth_dev); 3115 if (bp->pf.vf_info) 3116 rte_free(bp->pf.vf_info); 3117 eth_dev->dev_ops = NULL; 3118 eth_dev->rx_pkt_burst = NULL; 3119 eth_dev->tx_pkt_burst = NULL; 3120 3121 return rc; 3122 } 3123 3124 static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3125 struct rte_pci_device *pci_dev) 3126 { 3127 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt), 3128 bnxt_dev_init); 3129 } 3130 3131 static int bnxt_pci_remove(struct rte_pci_device *pci_dev) 3132 { 3133 return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit); 3134 } 3135 3136 static struct rte_pci_driver bnxt_rte_pmd = { 3137 .id_table = bnxt_pci_id_map, 3138 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | 3139 RTE_PCI_DRV_INTR_LSC, 3140 .probe = bnxt_pci_probe, 3141 .remove = bnxt_pci_remove, 3142 }; 3143 3144 static bool 3145 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 3146 { 3147 if (strcmp(dev->device->driver->name, drv->driver.name)) 3148 return false; 3149 3150 return true; 3151 } 3152 3153 bool is_bnxt_supported(struct rte_eth_dev *dev) 3154 { 3155 return is_device_supported(dev, &bnxt_rte_pmd); 3156 } 3157 3158 RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); 3159 RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); 3160 RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci"); 3161