1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2017 Intel Corporation 4 */ 5 6 #include <sys/queue.h> 7 #include <stdio.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <rte_log.h> 12 #include <ethdev_pci.h> 13 #include <rte_alarm.h> 14 15 #include "txgbe_logs.h" 16 #include "base/txgbe.h" 17 #include "txgbe_ethdev.h" 18 #include "txgbe_rxtx.h" 19 #include "txgbe_regs_group.h" 20 21 static const struct reg_info txgbevf_regs_general[] = { 22 {TXGBE_VFRST, 1, 1, "TXGBE_VFRST"}, 23 {TXGBE_VFSTATUS, 1, 1, "TXGBE_VFSTATUS"}, 24 {TXGBE_VFMBCTL, 1, 1, "TXGBE_VFMAILBOX"}, 25 {TXGBE_VFMBX, 16, 4, "TXGBE_VFMBX"}, 26 {TXGBE_VFPBWRAP, 1, 1, "TXGBE_VFPBWRAP"}, 27 {0, 0, 0, ""} 28 }; 29 30 static const struct reg_info txgbevf_regs_interrupt[] = { 31 {0, 0, 0, ""} 32 }; 33 34 static const struct reg_info txgbevf_regs_rxdma[] = { 35 {0, 0, 0, ""} 36 }; 37 38 static const struct reg_info txgbevf_regs_tx[] = { 39 {0, 0, 0, ""} 40 }; 41 42 /* VF registers */ 43 static const struct reg_info *txgbevf_regs[] = { 44 txgbevf_regs_general, 45 txgbevf_regs_interrupt, 46 txgbevf_regs_rxdma, 47 txgbevf_regs_tx, 48 NULL}; 49 50 static int txgbevf_dev_xstats_get(struct rte_eth_dev *dev, 51 struct rte_eth_xstat *xstats, unsigned int n); 52 static int txgbevf_dev_info_get(struct rte_eth_dev *dev, 53 struct rte_eth_dev_info *dev_info); 54 static int txgbevf_dev_configure(struct rte_eth_dev *dev); 55 static int txgbevf_dev_start(struct rte_eth_dev *dev); 56 static int txgbevf_dev_link_update(struct rte_eth_dev *dev, 57 int wait_to_complete); 58 static int txgbevf_dev_stop(struct rte_eth_dev *dev); 59 static int txgbevf_dev_close(struct rte_eth_dev *dev); 60 static void txgbevf_intr_disable(struct rte_eth_dev *dev); 61 static void txgbevf_intr_enable(struct rte_eth_dev *dev); 62 static int txgbevf_dev_stats_reset(struct rte_eth_dev *dev); 63 static int txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 64 static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 65 static void txgbevf_configure_msix(struct rte_eth_dev *dev); 66 static int txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 67 static int txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 68 static void txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 69 static void txgbevf_dev_interrupt_handler(void *param); 70 71 /* 72 * The set of PCI devices this driver supports (for VF) 73 */ 74 static const struct rte_pci_id pci_id_txgbevf_map[] = { 75 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000_VF) }, 76 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_VF) }, 77 { .vendor_id = 0, /* sentinel */ }, 78 }; 79 80 static const struct rte_eth_desc_lim rx_desc_lim = { 81 .nb_max = TXGBE_RING_DESC_MAX, 82 .nb_min = TXGBE_RING_DESC_MIN, 83 .nb_align = TXGBE_RXD_ALIGN, 84 }; 85 86 static const struct rte_eth_desc_lim tx_desc_lim = { 87 .nb_max = TXGBE_RING_DESC_MAX, 88 .nb_min = TXGBE_RING_DESC_MIN, 89 .nb_align = TXGBE_TXD_ALIGN, 90 .nb_seg_max = TXGBE_TX_MAX_SEG, 91 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG, 92 }; 93 94 static const struct eth_dev_ops txgbevf_eth_dev_ops; 95 96 static const struct rte_txgbe_xstats_name_off rte_txgbevf_stats_strings[] = { 97 {"rx_multicast_packets_0", 98 offsetof(struct txgbevf_hw_stats, qp[0].vfmprc)}, 99 {"rx_multicast_packets_1", 100 offsetof(struct txgbevf_hw_stats, qp[1].vfmprc)}, 101 {"rx_multicast_packets_2", 102 offsetof(struct txgbevf_hw_stats, qp[2].vfmprc)}, 103 {"rx_multicast_packets_3", 104 offsetof(struct txgbevf_hw_stats, qp[3].vfmprc)}, 105 {"rx_multicast_packets_4", 106 offsetof(struct txgbevf_hw_stats, qp[4].vfmprc)}, 107 {"rx_multicast_packets_5", 108 offsetof(struct txgbevf_hw_stats, qp[5].vfmprc)}, 109 {"rx_multicast_packets_6", 110 offsetof(struct txgbevf_hw_stats, qp[6].vfmprc)}, 111 {"rx_multicast_packets_7", 112 offsetof(struct txgbevf_hw_stats, qp[7].vfmprc)} 113 }; 114 115 #define TXGBEVF_NB_XSTATS (sizeof(rte_txgbevf_stats_strings) / \ 116 sizeof(rte_txgbevf_stats_strings[0])) 117 118 /* 119 * Negotiate mailbox API version with the PF. 120 * After reset API version is always set to the basic one (txgbe_mbox_api_10). 121 * Then we try to negotiate starting with the most recent one. 122 * If all negotiation attempts fail, then we will proceed with 123 * the default one (txgbe_mbox_api_10). 124 */ 125 static void 126 txgbevf_negotiate_api(struct txgbe_hw *hw) 127 { 128 int32_t i; 129 130 /* start with highest supported, proceed down */ 131 static const int sup_ver[] = { 132 txgbe_mbox_api_13, 133 txgbe_mbox_api_12, 134 txgbe_mbox_api_11, 135 txgbe_mbox_api_10, 136 }; 137 138 for (i = 0; i < ARRAY_SIZE(sup_ver); i++) { 139 if (txgbevf_negotiate_api_version(hw, sup_ver[i]) == 0) 140 break; 141 } 142 } 143 144 static void 145 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 146 { 147 uint64_t random; 148 149 /* Set Organizationally Unique Identifier (OUI) prefix. */ 150 mac_addr->addr_bytes[0] = 0x00; 151 mac_addr->addr_bytes[1] = 0x09; 152 mac_addr->addr_bytes[2] = 0xC0; 153 /* Force indication of locally assigned MAC address. */ 154 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 155 /* Generate the last 3 bytes of the MAC address with a random number. */ 156 random = rte_rand(); 157 memcpy(&mac_addr->addr_bytes[3], &random, 3); 158 } 159 160 /* 161 * Virtual Function device init 162 */ 163 static int 164 eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev) 165 { 166 int err; 167 uint32_t tc, tcs; 168 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 169 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 170 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); 171 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); 172 struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev); 173 struct rte_ether_addr *perm_addr = 174 (struct rte_ether_addr *)hw->mac.perm_addr; 175 176 PMD_INIT_FUNC_TRACE(); 177 178 eth_dev->dev_ops = &txgbevf_eth_dev_ops; 179 eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status; 180 eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status; 181 eth_dev->rx_pkt_burst = &txgbe_recv_pkts; 182 eth_dev->tx_pkt_burst = &txgbe_xmit_pkts; 183 184 /* for secondary processes, we don't initialise any further as primary 185 * has already done this work. Only check we don't need a different 186 * RX function 187 */ 188 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 189 struct txgbe_tx_queue *txq; 190 uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; 191 /* TX queue function in primary, set by last queue initialized 192 * Tx queue may not initialized by primary process 193 */ 194 if (eth_dev->data->tx_queues) { 195 txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; 196 txgbe_set_tx_function(eth_dev, txq); 197 } else { 198 /* Use default TX function if we get here */ 199 PMD_INIT_LOG(NOTICE, 200 "No TX queues configured yet. Using default TX function."); 201 } 202 203 txgbe_set_rx_function(eth_dev); 204 205 return 0; 206 } 207 208 rte_eth_copy_pci_info(eth_dev, pci_dev); 209 210 hw->device_id = pci_dev->id.device_id; 211 hw->vendor_id = pci_dev->id.vendor_id; 212 hw->subsystem_device_id = pci_dev->id.subsystem_device_id; 213 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; 214 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 215 216 /* initialize the vfta */ 217 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 218 219 /* initialize the hw strip bitmap*/ 220 memset(hwstrip, 0, sizeof(*hwstrip)); 221 222 /* Initialize the shared code (base driver) */ 223 err = txgbe_init_shared_code(hw); 224 if (err != 0) { 225 PMD_INIT_LOG(ERR, 226 "Shared code init failed for txgbevf: %d", err); 227 return -EIO; 228 } 229 230 /* init_mailbox_params */ 231 hw->mbx.init_params(hw); 232 233 /* Reset the hw statistics */ 234 txgbevf_dev_stats_reset(eth_dev); 235 236 /* Disable the interrupts for VF */ 237 txgbevf_intr_disable(eth_dev); 238 239 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 240 err = hw->mac.reset_hw(hw); 241 242 /* 243 * The VF reset operation returns the TXGBE_ERR_INVALID_MAC_ADDR when 244 * the underlying PF driver has not assigned a MAC address to the VF. 245 * In this case, assign a random MAC address. 246 */ 247 if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) { 248 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err); 249 /* 250 * This error code will be propagated to the app by 251 * rte_eth_dev_reset, so use a public error code rather than 252 * the internal-only TXGBE_ERR_RESET_FAILED 253 */ 254 return -EAGAIN; 255 } 256 257 /* negotiate mailbox API version to use with the PF. */ 258 txgbevf_negotiate_api(hw); 259 260 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 261 txgbevf_get_queues(hw, &tcs, &tc); 262 263 /* Allocate memory for storing MAC addresses */ 264 eth_dev->data->mac_addrs = rte_zmalloc("txgbevf", RTE_ETHER_ADDR_LEN * 265 hw->mac.num_rar_entries, 0); 266 if (eth_dev->data->mac_addrs == NULL) { 267 PMD_INIT_LOG(ERR, 268 "Failed to allocate %u bytes needed to store " 269 "MAC addresses", 270 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 271 return -ENOMEM; 272 } 273 274 /* Generate a random MAC address, if none was assigned by PF. */ 275 if (rte_is_zero_ether_addr(perm_addr)) { 276 generate_random_mac_addr(perm_addr); 277 err = txgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 278 if (err) { 279 rte_free(eth_dev->data->mac_addrs); 280 eth_dev->data->mac_addrs = NULL; 281 return err; 282 } 283 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 284 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 285 RTE_ETHER_ADDR_PRT_FMT, 286 RTE_ETHER_ADDR_BYTES(perm_addr)); 287 } 288 289 /* Copy the permanent MAC address */ 290 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 291 292 /* reset the hardware with the new settings */ 293 err = hw->mac.start_hw(hw); 294 if (err) { 295 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err); 296 return -EIO; 297 } 298 299 /* enter promiscuous mode */ 300 txgbevf_dev_promiscuous_enable(eth_dev); 301 302 rte_intr_callback_register(intr_handle, 303 txgbevf_dev_interrupt_handler, eth_dev); 304 rte_intr_enable(intr_handle); 305 txgbevf_intr_enable(eth_dev); 306 307 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 308 eth_dev->data->port_id, pci_dev->id.vendor_id, 309 pci_dev->id.device_id, "txgbe_mac_raptor_vf"); 310 311 return 0; 312 } 313 314 /* Virtual Function device uninit */ 315 static int 316 eth_txgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 317 { 318 PMD_INIT_FUNC_TRACE(); 319 320 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 321 return 0; 322 323 txgbevf_dev_close(eth_dev); 324 325 return 0; 326 } 327 328 static int eth_txgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 329 struct rte_pci_device *pci_dev) 330 { 331 return rte_eth_dev_pci_generic_probe(pci_dev, 332 sizeof(struct txgbe_adapter), eth_txgbevf_dev_init); 333 } 334 335 static int eth_txgbevf_pci_remove(struct rte_pci_device *pci_dev) 336 { 337 return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbevf_dev_uninit); 338 } 339 340 /* 341 * virtual function driver struct 342 */ 343 static struct rte_pci_driver rte_txgbevf_pmd = { 344 .id_table = pci_id_txgbevf_map, 345 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 346 .probe = eth_txgbevf_pci_probe, 347 .remove = eth_txgbevf_pci_remove, 348 }; 349 350 static int txgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 351 struct rte_eth_xstat_name *xstats_names, unsigned int limit) 352 { 353 unsigned int i; 354 355 if (limit < TXGBEVF_NB_XSTATS && xstats_names != NULL) 356 return -ENOMEM; 357 358 if (xstats_names != NULL) 359 for (i = 0; i < TXGBEVF_NB_XSTATS; i++) 360 snprintf(xstats_names[i].name, 361 sizeof(xstats_names[i].name), 362 "%s", rte_txgbevf_stats_strings[i].name); 363 return TXGBEVF_NB_XSTATS; 364 } 365 366 static void 367 txgbevf_update_stats(struct rte_eth_dev *dev) 368 { 369 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 370 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *) 371 TXGBE_DEV_STATS(dev); 372 unsigned int i; 373 374 for (i = 0; i < dev->data->nb_rx_queues; i++) { 375 /* Good Rx packet, include VF loopback */ 376 TXGBE_UPDCNT32(TXGBE_QPRXPKT(i), 377 hw_stats->qp[i].last_vfgprc, hw_stats->qp[i].vfgprc); 378 379 /* Good Rx octets, include VF loopback */ 380 TXGBE_UPDCNT36(TXGBE_QPRXOCTL(i), 381 hw_stats->qp[i].last_vfgorc, hw_stats->qp[i].vfgorc); 382 383 /* Rx Multicst Packet */ 384 TXGBE_UPDCNT32(TXGBE_QPRXMPKT(i), 385 hw_stats->qp[i].last_vfmprc, hw_stats->qp[i].vfmprc); 386 } 387 hw->rx_loaded = 0; 388 389 for (i = 0; i < dev->data->nb_tx_queues; i++) { 390 /* Good Tx packet, include VF loopback */ 391 TXGBE_UPDCNT32(TXGBE_QPTXPKT(i), 392 hw_stats->qp[i].last_vfgptc, hw_stats->qp[i].vfgptc); 393 394 /* Good Tx octets, include VF loopback */ 395 TXGBE_UPDCNT36(TXGBE_QPTXOCTL(i), 396 hw_stats->qp[i].last_vfgotc, hw_stats->qp[i].vfgotc); 397 } 398 hw->offset_loaded = 0; 399 } 400 401 static int 402 txgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 403 unsigned int n) 404 { 405 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *) 406 TXGBE_DEV_STATS(dev); 407 unsigned int i; 408 409 if (n < TXGBEVF_NB_XSTATS) 410 return TXGBEVF_NB_XSTATS; 411 412 txgbevf_update_stats(dev); 413 414 if (!xstats) 415 return 0; 416 417 /* Extended stats */ 418 for (i = 0; i < TXGBEVF_NB_XSTATS; i++) { 419 xstats[i].id = i; 420 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 421 rte_txgbevf_stats_strings[i].offset); 422 } 423 424 return TXGBEVF_NB_XSTATS; 425 } 426 427 static int 428 txgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 429 { 430 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *) 431 TXGBE_DEV_STATS(dev); 432 uint32_t i; 433 434 txgbevf_update_stats(dev); 435 436 if (stats == NULL) 437 return -EINVAL; 438 439 stats->ipackets = 0; 440 stats->ibytes = 0; 441 stats->opackets = 0; 442 stats->obytes = 0; 443 444 for (i = 0; i < 8; i++) { 445 stats->ipackets += hw_stats->qp[i].vfgprc; 446 stats->ibytes += hw_stats->qp[i].vfgorc; 447 stats->opackets += hw_stats->qp[i].vfgptc; 448 stats->obytes += hw_stats->qp[i].vfgotc; 449 } 450 451 return 0; 452 } 453 454 static int 455 txgbevf_dev_stats_reset(struct rte_eth_dev *dev) 456 { 457 struct txgbevf_hw_stats *hw_stats = (struct txgbevf_hw_stats *) 458 TXGBE_DEV_STATS(dev); 459 uint32_t i; 460 461 /* Sync HW register to the last stats */ 462 txgbevf_dev_stats_get(dev, NULL); 463 464 /* reset HW current stats*/ 465 for (i = 0; i < 8; i++) { 466 hw_stats->qp[i].vfgprc = 0; 467 hw_stats->qp[i].vfgorc = 0; 468 hw_stats->qp[i].vfgptc = 0; 469 hw_stats->qp[i].vfgotc = 0; 470 } 471 472 return 0; 473 } 474 475 static int 476 txgbevf_dev_info_get(struct rte_eth_dev *dev, 477 struct rte_eth_dev_info *dev_info) 478 { 479 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 480 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 481 482 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 483 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 484 dev_info->min_rx_bufsize = 1024; 485 dev_info->max_rx_pktlen = TXGBE_FRAME_SIZE_MAX; 486 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 487 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; 488 dev_info->max_vfs = pci_dev->max_vfs; 489 dev_info->max_vmdq_pools = RTE_ETH_64_POOLS; 490 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev); 491 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) | 492 dev_info->rx_queue_offload_capa); 493 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev); 494 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev); 495 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 496 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128; 497 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL; 498 499 dev_info->default_rxconf = (struct rte_eth_rxconf) { 500 .rx_thresh = { 501 .pthresh = TXGBE_DEFAULT_RX_PTHRESH, 502 .hthresh = TXGBE_DEFAULT_RX_HTHRESH, 503 .wthresh = TXGBE_DEFAULT_RX_WTHRESH, 504 }, 505 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH, 506 .rx_drop_en = 0, 507 .offloads = 0, 508 }; 509 510 dev_info->default_txconf = (struct rte_eth_txconf) { 511 .tx_thresh = { 512 .pthresh = TXGBE_DEFAULT_TX_PTHRESH, 513 .hthresh = TXGBE_DEFAULT_TX_HTHRESH, 514 .wthresh = TXGBE_DEFAULT_TX_WTHRESH, 515 }, 516 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH, 517 .offloads = 0, 518 }; 519 520 dev_info->rx_desc_lim = rx_desc_lim; 521 dev_info->tx_desc_lim = tx_desc_lim; 522 523 return 0; 524 } 525 526 static int 527 txgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 528 { 529 return txgbe_dev_link_update_share(dev, wait_to_complete); 530 } 531 532 /* 533 * Virtual Function operations 534 */ 535 static void 536 txgbevf_intr_disable(struct rte_eth_dev *dev) 537 { 538 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 539 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 540 541 PMD_INIT_FUNC_TRACE(); 542 543 /* Clear interrupt mask to stop from interrupts being generated */ 544 wr32(hw, TXGBE_VFIMS, TXGBE_VFIMS_MASK); 545 546 txgbe_flush(hw); 547 548 /* Clear mask value. */ 549 intr->mask_misc = TXGBE_VFIMS_MASK; 550 } 551 552 static void 553 txgbevf_intr_enable(struct rte_eth_dev *dev) 554 { 555 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 556 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 557 558 PMD_INIT_FUNC_TRACE(); 559 560 /* VF enable interrupt autoclean */ 561 wr32(hw, TXGBE_VFIMC, TXGBE_VFIMC_MASK); 562 563 txgbe_flush(hw); 564 565 intr->mask_misc = 0; 566 } 567 568 static int 569 txgbevf_dev_configure(struct rte_eth_dev *dev) 570 { 571 struct rte_eth_conf *conf = &dev->data->dev_conf; 572 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 573 574 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 575 dev->data->port_id); 576 577 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 578 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 579 580 /* 581 * VF has no ability to enable/disable HW CRC 582 * Keep the persistent behavior the same as Host PF 583 */ 584 #ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC 585 if (conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 586 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 587 conf->rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC; 588 } 589 #else 590 if (!(conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)) { 591 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 592 conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 593 } 594 #endif 595 596 /* 597 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 598 * allocation or vector Rx preconditions we will reset it. 599 */ 600 adapter->rx_bulk_alloc_allowed = true; 601 602 return 0; 603 } 604 605 static int 606 txgbevf_dev_start(struct rte_eth_dev *dev) 607 { 608 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 609 uint32_t intr_vector = 0; 610 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 611 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 612 613 int err, mask = 0; 614 615 PMD_INIT_FUNC_TRACE(); 616 617 /* Stop the link setup handler before resetting the HW. */ 618 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); 619 620 err = hw->mac.reset_hw(hw); 621 if (err) { 622 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 623 return err; 624 } 625 hw->mac.get_link_status = true; 626 hw->dev_start = true; 627 628 /* negotiate mailbox API version to use with the PF. */ 629 txgbevf_negotiate_api(hw); 630 631 txgbevf_dev_tx_init(dev); 632 633 /* This can fail when allocating mbufs for descriptor rings */ 634 err = txgbevf_dev_rx_init(dev); 635 636 /** 637 * In this case, reuses the MAC address assigned by VF 638 * initialization. 639 */ 640 if (err != 0 && err != TXGBE_ERR_INVALID_MAC_ADDR) { 641 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 642 txgbe_dev_clear_queues(dev); 643 return err; 644 } 645 646 /* Set vfta */ 647 txgbevf_set_vfta_all(dev, 1); 648 649 /* Set HW strip */ 650 mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | 651 RTE_ETH_VLAN_EXTEND_MASK; 652 err = txgbevf_vlan_offload_config(dev, mask); 653 if (err) { 654 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 655 txgbe_dev_clear_queues(dev); 656 return err; 657 } 658 659 txgbevf_dev_rxtx_start(dev); 660 661 /* check and configure queue intr-vector mapping */ 662 if (rte_intr_cap_multiple(intr_handle) && 663 dev->data->dev_conf.intr_conf.rxq) { 664 /* According to datasheet, only vector 0/1/2 can be used, 665 * now only one vector is used for Rx queue 666 */ 667 intr_vector = 1; 668 if (rte_intr_efd_enable(intr_handle, intr_vector)) 669 return -1; 670 } 671 672 if (rte_intr_dp_is_en(intr_handle)) { 673 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", 674 dev->data->nb_rx_queues)) { 675 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 676 " intr_vec", dev->data->nb_rx_queues); 677 return -ENOMEM; 678 } 679 } 680 txgbevf_configure_msix(dev); 681 682 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 683 * is mapped to VFIO vector 0 in eth_txgbevf_dev_init( ). 684 * If previous VFIO interrupt mapping setting in eth_txgbevf_dev_init( ) 685 * is not cleared, it will fail when following rte_intr_enable( ) tries 686 * to map Rx queue interrupt to other VFIO vectors. 687 * So clear uio/vfio intr/evevnfd first to avoid failure. 688 */ 689 rte_intr_disable(intr_handle); 690 691 rte_intr_enable(intr_handle); 692 693 /* Re-enable interrupt for VF */ 694 txgbevf_intr_enable(dev); 695 696 /* 697 * Update link status right before return, because it may 698 * start link configuration process in a separate thread. 699 */ 700 txgbevf_dev_link_update(dev, 0); 701 702 hw->adapter_stopped = false; 703 704 return 0; 705 } 706 707 static int 708 txgbevf_dev_stop(struct rte_eth_dev *dev) 709 { 710 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 711 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); 712 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 713 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 714 715 if (hw->adapter_stopped) 716 return 0; 717 718 PMD_INIT_FUNC_TRACE(); 719 720 rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); 721 722 txgbevf_intr_disable(dev); 723 724 hw->adapter_stopped = 1; 725 hw->mac.stop_hw(hw); 726 727 /* 728 * Clear what we set, but we still keep shadow_vfta to 729 * restore after device starts 730 */ 731 txgbevf_set_vfta_all(dev, 0); 732 733 /* Clear stored conf */ 734 dev->data->scattered_rx = 0; 735 736 txgbe_dev_clear_queues(dev); 737 738 /* Clean datapath event and queue/vec mapping */ 739 rte_intr_efd_disable(intr_handle); 740 rte_intr_vec_list_free(intr_handle); 741 742 adapter->rss_reta_updated = 0; 743 hw->dev_start = false; 744 745 return 0; 746 } 747 748 static int 749 txgbevf_dev_close(struct rte_eth_dev *dev) 750 { 751 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 752 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 753 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 754 int ret; 755 756 PMD_INIT_FUNC_TRACE(); 757 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 758 return 0; 759 760 hw->mac.reset_hw(hw); 761 762 ret = txgbevf_dev_stop(dev); 763 764 txgbe_dev_free_queues(dev); 765 766 /** 767 * Remove the VF MAC address ro ensure 768 * that the VF traffic goes to the PF 769 * after stop, close and detach of the VF 770 **/ 771 txgbevf_remove_mac_addr(dev, 0); 772 773 dev->rx_pkt_burst = NULL; 774 dev->tx_pkt_burst = NULL; 775 776 /* Disable the interrupts for VF */ 777 txgbevf_intr_disable(dev); 778 779 rte_free(dev->data->mac_addrs); 780 dev->data->mac_addrs = NULL; 781 782 rte_intr_disable(intr_handle); 783 rte_intr_callback_unregister(intr_handle, 784 txgbevf_dev_interrupt_handler, dev); 785 786 return ret; 787 } 788 789 /* 790 * Reset VF device 791 */ 792 static int 793 txgbevf_dev_reset(struct rte_eth_dev *dev) 794 { 795 int ret; 796 797 ret = eth_txgbevf_dev_uninit(dev); 798 if (ret) 799 return ret; 800 801 ret = eth_txgbevf_dev_init(dev); 802 803 return ret; 804 } 805 806 static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 807 { 808 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 809 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev); 810 int i = 0, j = 0, vfta = 0, mask = 1; 811 812 for (i = 0; i < TXGBE_VFTA_SIZE; i++) { 813 vfta = shadow_vfta->vfta[i]; 814 if (vfta) { 815 mask = 1; 816 for (j = 0; j < 32; j++) { 817 if (vfta & mask) 818 hw->mac.set_vfta(hw, (i << 5) + j, 0, 819 on, false); 820 mask <<= 1; 821 } 822 } 823 } 824 } 825 826 static int 827 txgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 828 { 829 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 830 struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev); 831 uint32_t vid_idx = 0; 832 uint32_t vid_bit = 0; 833 int ret = 0; 834 835 PMD_INIT_FUNC_TRACE(); 836 837 /* vind is not used in VF driver, set to 0, check txgbe_set_vfta_vf */ 838 ret = hw->mac.set_vfta(hw, vlan_id, 0, !!on, false); 839 if (ret) { 840 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 841 return ret; 842 } 843 vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F); 844 vid_bit = (uint32_t)(1 << (vlan_id & 0x1F)); 845 846 /* Save what we set and restore it after device reset */ 847 if (on) 848 shadow_vfta->vfta[vid_idx] |= vid_bit; 849 else 850 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 851 852 return 0; 853 } 854 855 static void 856 txgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 857 { 858 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 859 uint32_t ctrl; 860 861 PMD_INIT_FUNC_TRACE(); 862 863 if (queue >= hw->mac.max_rx_queues) 864 return; 865 866 ctrl = rd32(hw, TXGBE_RXCFG(queue)); 867 txgbe_dev_save_rx_queue(hw, queue); 868 if (on) 869 ctrl |= TXGBE_RXCFG_VLAN; 870 else 871 ctrl &= ~TXGBE_RXCFG_VLAN; 872 wr32(hw, TXGBE_RXCFG(queue), 0); 873 msec_delay(100); 874 txgbe_dev_store_rx_queue(hw, queue); 875 wr32m(hw, TXGBE_RXCFG(queue), 876 TXGBE_RXCFG_VLAN | TXGBE_RXCFG_ENA, ctrl); 877 878 txgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 879 } 880 881 static int 882 txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 883 { 884 struct txgbe_rx_queue *rxq; 885 uint16_t i; 886 int on = 0; 887 888 /* VF function only support hw strip feature, others are not support */ 889 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 890 for (i = 0; i < dev->data->nb_rx_queues; i++) { 891 rxq = dev->data->rx_queues[i]; 892 on = !!(rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 893 txgbevf_vlan_strip_queue_set(dev, i, on); 894 } 895 } 896 897 return 0; 898 } 899 900 static int 901 txgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 902 { 903 txgbe_config_vlan_strip_on_all_queues(dev, mask); 904 905 txgbevf_vlan_offload_config(dev, mask); 906 907 return 0; 908 } 909 910 static int 911 txgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 912 { 913 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 914 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 915 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 916 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 917 uint32_t vec = TXGBE_MISC_VEC_ID; 918 919 if (rte_intr_allow_others(intr_handle)) 920 vec = TXGBE_RX_VEC_START; 921 intr->mask_misc &= ~(1 << vec); 922 RTE_SET_USED(queue_id); 923 wr32(hw, TXGBE_VFIMC, ~intr->mask_misc); 924 925 rte_intr_enable(intr_handle); 926 927 return 0; 928 } 929 930 static int 931 txgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 932 { 933 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 934 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 935 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 936 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 937 uint32_t vec = TXGBE_MISC_VEC_ID; 938 939 if (rte_intr_allow_others(intr_handle)) 940 vec = TXGBE_RX_VEC_START; 941 intr->mask_misc |= (1 << vec); 942 RTE_SET_USED(queue_id); 943 wr32(hw, TXGBE_VFIMS, intr->mask_misc); 944 945 return 0; 946 } 947 948 static void 949 txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction, 950 uint8_t queue, uint8_t msix_vector) 951 { 952 uint32_t tmp, idx; 953 954 if (direction == -1) { 955 /* other causes */ 956 msix_vector |= TXGBE_VFIVAR_VLD; 957 tmp = rd32(hw, TXGBE_VFIVARMISC); 958 tmp &= ~0xFF; 959 tmp |= msix_vector; 960 wr32(hw, TXGBE_VFIVARMISC, tmp); 961 } else { 962 /* rx or tx cause */ 963 /* Workround for ICR lost */ 964 idx = ((16 * (queue & 1)) + (8 * direction)); 965 tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1)); 966 tmp &= ~(0xFF << idx); 967 tmp |= (msix_vector << idx); 968 wr32(hw, TXGBE_VFIVAR(queue >> 1), tmp); 969 } 970 } 971 972 static void 973 txgbevf_configure_msix(struct rte_eth_dev *dev) 974 { 975 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 976 struct rte_intr_handle *intr_handle = pci_dev->intr_handle; 977 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 978 uint32_t q_idx; 979 uint32_t vector_idx = TXGBE_MISC_VEC_ID; 980 uint32_t base = TXGBE_MISC_VEC_ID; 981 982 /* Configure VF other cause ivar */ 983 txgbevf_set_ivar_map(hw, -1, 1, vector_idx); 984 985 /* won't configure msix register if no mapping is done 986 * between intr vector and event fd. 987 */ 988 if (!rte_intr_dp_is_en(intr_handle)) 989 return; 990 991 if (rte_intr_allow_others(intr_handle)) { 992 base = TXGBE_RX_VEC_START; 993 vector_idx = TXGBE_RX_VEC_START; 994 } 995 996 /* Configure all RX queues of VF */ 997 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 998 /* Force all queue use vector 0, 999 * as TXGBE_VF_MAXMSIVECOTR = 1 1000 */ 1001 txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 1002 rte_intr_vec_list_index_set(intr_handle, q_idx, 1003 vector_idx); 1004 if (vector_idx < base + rte_intr_nb_efd_get(intr_handle) 1005 - 1) 1006 vector_idx++; 1007 } 1008 1009 /* As RX queue setting above show, all queues use the vector 0. 1010 * Set only the ITR value of TXGBE_MISC_VEC_ID. 1011 */ 1012 wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID), 1013 TXGBE_ITR_IVAL(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 1014 | TXGBE_ITR_WRDSA); 1015 } 1016 1017 static int 1018 txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 1019 __rte_unused uint32_t index, 1020 __rte_unused uint32_t pool) 1021 { 1022 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1023 int err; 1024 1025 /* 1026 * On a VF, adding again the same MAC addr is not an idempotent 1027 * operation. Trap this case to avoid exhausting the [very limited] 1028 * set of PF resources used to store VF MAC addresses. 1029 */ 1030 if (memcmp(hw->mac.perm_addr, mac_addr, 1031 sizeof(struct rte_ether_addr)) == 0) 1032 return -1; 1033 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 1034 if (err != 0) 1035 PMD_DRV_LOG(ERR, "Unable to add MAC address " 1036 RTE_ETHER_ADDR_PRT_FMT " - err=%d", 1037 RTE_ETHER_ADDR_BYTES(mac_addr), err); 1038 return err; 1039 } 1040 1041 static void 1042 txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 1043 { 1044 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1045 struct rte_ether_addr *perm_addr = 1046 (struct rte_ether_addr *)hw->mac.perm_addr; 1047 struct rte_ether_addr *mac_addr; 1048 uint32_t i; 1049 int err; 1050 1051 /* 1052 * The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does 1053 * not support the deletion of a given MAC address. 1054 * Instead, it imposes to delete all MAC addresses, then to add again 1055 * all MAC addresses with the exception of the one to be deleted. 1056 */ 1057 (void)txgbevf_set_uc_addr_vf(hw, 0, NULL); 1058 1059 /* 1060 * Add again all MAC addresses, with the exception of the deleted one 1061 * and of the permanent MAC address. 1062 */ 1063 for (i = 0, mac_addr = dev->data->mac_addrs; 1064 i < hw->mac.num_rar_entries; i++, mac_addr++) { 1065 /* Skip the deleted MAC address */ 1066 if (i == index) 1067 continue; 1068 /* Skip NULL MAC addresses */ 1069 if (rte_is_zero_ether_addr(mac_addr)) 1070 continue; 1071 /* Skip the permanent MAC address */ 1072 if (memcmp(perm_addr, mac_addr, 1073 sizeof(struct rte_ether_addr)) == 0) 1074 continue; 1075 err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 1076 if (err != 0) 1077 PMD_DRV_LOG(ERR, 1078 "Adding again MAC address " 1079 RTE_ETHER_ADDR_PRT_FMT " failed " 1080 "err=%d", 1081 RTE_ETHER_ADDR_BYTES(mac_addr), err); 1082 } 1083 } 1084 1085 static int 1086 txgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 1087 struct rte_ether_addr *addr) 1088 { 1089 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1090 1091 hw->mac.set_rar(hw, 0, (void *)addr, 0, 0); 1092 1093 return 0; 1094 } 1095 1096 static int 1097 txgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1098 { 1099 struct txgbe_hw *hw; 1100 uint32_t max_frame = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1101 struct rte_eth_dev_data *dev_data = dev->data; 1102 1103 hw = TXGBE_DEV_HW(dev); 1104 1105 if (mtu < RTE_ETHER_MIN_MTU || 1106 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 1107 return -EINVAL; 1108 1109 /* If device is started, refuse mtu that requires the support of 1110 * scattered packets when this feature has not been enabled before. 1111 */ 1112 if (dev_data->dev_started && !dev_data->scattered_rx && 1113 (max_frame + 2 * TXGBE_VLAN_TAG_SIZE > 1114 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 1115 PMD_INIT_LOG(ERR, "Stop port first."); 1116 return -EINVAL; 1117 } 1118 1119 /* 1120 * When supported by the underlying PF driver, use the TXGBE_VF_SET_MTU 1121 * request of the version 2.0 of the mailbox API. 1122 * For now, use the TXGBE_VF_SET_LPE request of the version 1.0 1123 * of the mailbox API. 1124 */ 1125 if (txgbevf_rlpml_set_vf(hw, max_frame)) 1126 return -EINVAL; 1127 1128 return 0; 1129 } 1130 1131 static int 1132 txgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 1133 { 1134 int count = 0; 1135 int g_ind = 0; 1136 const struct reg_info *reg_group; 1137 1138 while ((reg_group = txgbevf_regs[g_ind++])) 1139 count += txgbe_regs_group_count(reg_group); 1140 1141 return count; 1142 } 1143 1144 static int 1145 txgbevf_get_regs(struct rte_eth_dev *dev, 1146 struct rte_dev_reg_info *regs) 1147 { 1148 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1149 uint32_t *data = regs->data; 1150 int g_ind = 0; 1151 int count = 0; 1152 const struct reg_info *reg_group; 1153 1154 if (data == NULL) { 1155 regs->length = txgbevf_get_reg_length(dev); 1156 regs->width = sizeof(uint32_t); 1157 return 0; 1158 } 1159 1160 /* Support only full register dump */ 1161 if (regs->length == 0 || 1162 regs->length == (uint32_t)txgbevf_get_reg_length(dev)) { 1163 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 1164 hw->device_id; 1165 while ((reg_group = txgbevf_regs[g_ind++])) 1166 count += txgbe_read_regs_group(dev, &data[count], 1167 reg_group); 1168 return 0; 1169 } 1170 1171 return -ENOTSUP; 1172 } 1173 1174 static int 1175 txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 1176 { 1177 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1178 int ret; 1179 1180 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_PROMISC)) { 1181 case 0: 1182 ret = 0; 1183 break; 1184 case TXGBE_ERR_FEATURE_NOT_SUPPORTED: 1185 ret = -ENOTSUP; 1186 break; 1187 default: 1188 ret = -EAGAIN; 1189 break; 1190 } 1191 1192 return ret; 1193 } 1194 1195 static int 1196 txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 1197 { 1198 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1199 int ret; 1200 1201 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) { 1202 case 0: 1203 ret = 0; 1204 break; 1205 case TXGBE_ERR_FEATURE_NOT_SUPPORTED: 1206 ret = -ENOTSUP; 1207 break; 1208 default: 1209 ret = -EAGAIN; 1210 break; 1211 } 1212 1213 return ret; 1214 } 1215 1216 static int 1217 txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 1218 { 1219 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1220 int ret; 1221 1222 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_ALLMULTI)) { 1223 case 0: 1224 ret = 0; 1225 break; 1226 case TXGBE_ERR_FEATURE_NOT_SUPPORTED: 1227 ret = -ENOTSUP; 1228 break; 1229 default: 1230 ret = -EAGAIN; 1231 break; 1232 } 1233 1234 return ret; 1235 } 1236 1237 static int 1238 txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 1239 { 1240 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1241 int ret; 1242 1243 switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) { 1244 case 0: 1245 ret = 0; 1246 break; 1247 case TXGBE_ERR_FEATURE_NOT_SUPPORTED: 1248 ret = -ENOTSUP; 1249 break; 1250 default: 1251 ret = -EAGAIN; 1252 break; 1253 } 1254 1255 return ret; 1256 } 1257 1258 static void txgbevf_mbx_process(struct rte_eth_dev *dev) 1259 { 1260 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1261 u32 in_msg = 0; 1262 1263 /* peek the message first */ 1264 in_msg = rd32(hw, TXGBE_VFMBX); 1265 1266 /* PF reset VF event */ 1267 if (in_msg == TXGBE_PF_CONTROL_MSG) { 1268 /* dummy mbx read to ack pf */ 1269 if (txgbe_read_mbx(hw, &in_msg, 1, 0)) 1270 return; 1271 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1272 NULL); 1273 } 1274 } 1275 1276 static int 1277 txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 1278 { 1279 uint32_t eicr; 1280 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); 1281 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 1282 txgbevf_intr_disable(dev); 1283 1284 /* read-on-clear nic registers here */ 1285 eicr = rd32(hw, TXGBE_VFICR); 1286 intr->flags = 0; 1287 1288 /* only one misc vector supported - mailbox */ 1289 eicr &= TXGBE_VFICR_MASK; 1290 /* Workround for ICR lost */ 1291 intr->flags |= TXGBE_FLAG_MAILBOX; 1292 1293 return 0; 1294 } 1295 1296 static int 1297 txgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 1298 { 1299 struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); 1300 1301 if (intr->flags & TXGBE_FLAG_MAILBOX) { 1302 txgbevf_mbx_process(dev); 1303 intr->flags &= ~TXGBE_FLAG_MAILBOX; 1304 } 1305 1306 txgbevf_intr_enable(dev); 1307 1308 return 0; 1309 } 1310 1311 static void 1312 txgbevf_dev_interrupt_handler(void *param) 1313 { 1314 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 1315 1316 txgbevf_dev_interrupt_get_status(dev); 1317 txgbevf_dev_interrupt_action(dev); 1318 } 1319 1320 /* 1321 * dev_ops for virtual function, bare necessities for basic vf 1322 * operation have been implemented 1323 */ 1324 static const struct eth_dev_ops txgbevf_eth_dev_ops = { 1325 .dev_configure = txgbevf_dev_configure, 1326 .dev_start = txgbevf_dev_start, 1327 .dev_stop = txgbevf_dev_stop, 1328 .link_update = txgbevf_dev_link_update, 1329 .stats_get = txgbevf_dev_stats_get, 1330 .xstats_get = txgbevf_dev_xstats_get, 1331 .stats_reset = txgbevf_dev_stats_reset, 1332 .xstats_reset = txgbevf_dev_stats_reset, 1333 .xstats_get_names = txgbevf_dev_xstats_get_names, 1334 .dev_close = txgbevf_dev_close, 1335 .dev_reset = txgbevf_dev_reset, 1336 .promiscuous_enable = txgbevf_dev_promiscuous_enable, 1337 .promiscuous_disable = txgbevf_dev_promiscuous_disable, 1338 .allmulticast_enable = txgbevf_dev_allmulticast_enable, 1339 .allmulticast_disable = txgbevf_dev_allmulticast_disable, 1340 .dev_infos_get = txgbevf_dev_info_get, 1341 .dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get, 1342 .mtu_set = txgbevf_dev_set_mtu, 1343 .vlan_filter_set = txgbevf_vlan_filter_set, 1344 .vlan_strip_queue_set = txgbevf_vlan_strip_queue_set, 1345 .vlan_offload_set = txgbevf_vlan_offload_set, 1346 .rx_queue_setup = txgbe_dev_rx_queue_setup, 1347 .rx_queue_release = txgbe_dev_rx_queue_release, 1348 .tx_queue_setup = txgbe_dev_tx_queue_setup, 1349 .tx_queue_release = txgbe_dev_tx_queue_release, 1350 .rx_queue_intr_enable = txgbevf_dev_rx_queue_intr_enable, 1351 .rx_queue_intr_disable = txgbevf_dev_rx_queue_intr_disable, 1352 .mac_addr_add = txgbevf_add_mac_addr, 1353 .mac_addr_remove = txgbevf_remove_mac_addr, 1354 .set_mc_addr_list = txgbe_dev_set_mc_addr_list, 1355 .rxq_info_get = txgbe_rxq_info_get, 1356 .txq_info_get = txgbe_txq_info_get, 1357 .mac_addr_set = txgbevf_set_default_mac_addr, 1358 .get_reg = txgbevf_get_regs, 1359 .reta_update = txgbe_dev_rss_reta_update, 1360 .reta_query = txgbe_dev_rss_reta_query, 1361 .rss_hash_update = txgbe_dev_rss_hash_update, 1362 .rss_hash_conf_get = txgbe_dev_rss_hash_conf_get, 1363 .tx_done_cleanup = txgbe_dev_tx_done_cleanup, 1364 }; 1365 1366 RTE_PMD_REGISTER_PCI(net_txgbe_vf, rte_txgbevf_pmd); 1367 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe_vf, pci_id_txgbevf_map); 1368 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe_vf, "* igb_uio | vfio-pci"); 1369