1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. 3 */ 4 5 #include <rte_malloc.h> 6 #include <ethdev_driver.h> 7 8 #include "ionic.h" 9 #include "ionic_logs.h" 10 #include "ionic_lif.h" 11 #include "ionic_ethdev.h" 12 #include "ionic_rx_filter.h" 13 #include "ionic_rxtx.h" 14 15 /* queuetype support level */ 16 static const uint8_t ionic_qtype_vers[IONIC_QTYPE_MAX] = { 17 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 18 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 19 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support 20 * 1 = ... with EQ 21 * 2 = ... with CMB 22 */ 23 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support 24 * 1 = ... with Tx SG version 1 25 * 2 = ... with EQ 26 * 3 = ... with CMB 27 */ 28 }; 29 30 static int ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr); 31 static int ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr); 32 33 int 34 ionic_qcq_enable(struct ionic_qcq *qcq) 35 { 36 struct ionic_queue *q = &qcq->q; 37 struct ionic_lif *lif = q->lif; 38 struct ionic_admin_ctx ctx = { 39 .pending_work = true, 40 .cmd.q_control = { 41 .opcode = IONIC_CMD_Q_CONTROL, 42 .type = q->type, 43 .index = rte_cpu_to_le_32(q->index), 44 .oper = IONIC_Q_ENABLE, 45 }, 46 }; 47 48 return ionic_adminq_post_wait(lif, &ctx); 49 } 50 51 int 52 ionic_qcq_disable(struct ionic_qcq *qcq) 53 { 54 struct ionic_queue *q = &qcq->q; 55 struct ionic_lif *lif = q->lif; 56 struct ionic_admin_ctx ctx = { 57 .pending_work = true, 58 .cmd.q_control = { 59 .opcode = IONIC_CMD_Q_CONTROL, 60 .type = q->type, 61 .index = rte_cpu_to_le_32(q->index), 62 .oper = IONIC_Q_DISABLE, 63 }, 64 }; 65 66 return ionic_adminq_post_wait(lif, &ctx); 67 } 68 69 void 70 ionic_lif_stop(struct ionic_lif *lif) 71 { 72 uint32_t i; 73 74 IONIC_PRINT_CALL(); 75 76 lif->state &= ~IONIC_LIF_F_UP; 77 78 for (i = 0; i < lif->nrxqcqs; i++) { 79 struct ionic_qcq *rxq = lif->rxqcqs[i]; 80 if (rxq->flags & IONIC_QCQ_F_INITED) 81 (void)ionic_dev_rx_queue_stop(lif->eth_dev, i); 82 } 83 84 for (i = 0; i < lif->ntxqcqs; i++) { 85 struct ionic_qcq *txq = lif->txqcqs[i]; 86 if (txq->flags & IONIC_QCQ_F_INITED) 87 (void)ionic_dev_tx_queue_stop(lif->eth_dev, i); 88 } 89 } 90 91 void 92 ionic_lif_reset(struct ionic_lif *lif) 93 { 94 struct ionic_dev *idev = &lif->adapter->idev; 95 int err; 96 97 IONIC_PRINT_CALL(); 98 99 ionic_dev_cmd_lif_reset(idev); 100 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 101 if (err) 102 IONIC_PRINT(WARNING, "Failed to reset %s", lif->name); 103 } 104 105 static void 106 ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats) 107 { 108 struct ionic_lif_stats *ls = &lif->info->stats; 109 uint32_t i; 110 uint32_t num_rx_q_counters = RTE_MIN(lif->nrxqcqs, (uint32_t) 111 RTE_ETHDEV_QUEUE_STAT_CNTRS); 112 uint32_t num_tx_q_counters = RTE_MIN(lif->ntxqcqs, (uint32_t) 113 RTE_ETHDEV_QUEUE_STAT_CNTRS); 114 115 memset(stats, 0, sizeof(*stats)); 116 117 if (ls == NULL) { 118 IONIC_PRINT(DEBUG, "Stats on port %u not yet initialized", 119 lif->port_id); 120 return; 121 } 122 123 /* RX */ 124 125 stats->ipackets = ls->rx_ucast_packets + 126 ls->rx_mcast_packets + 127 ls->rx_bcast_packets; 128 129 stats->ibytes = ls->rx_ucast_bytes + 130 ls->rx_mcast_bytes + 131 ls->rx_bcast_bytes; 132 133 for (i = 0; i < lif->nrxqcqs; i++) { 134 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; 135 stats->imissed += 136 rx_stats->no_cb_arg + 137 rx_stats->bad_cq_status + 138 rx_stats->no_room + 139 rx_stats->bad_len; 140 } 141 142 stats->imissed += 143 ls->rx_ucast_drop_packets + 144 ls->rx_mcast_drop_packets + 145 ls->rx_bcast_drop_packets; 146 147 stats->imissed += 148 ls->rx_queue_empty + 149 ls->rx_dma_error + 150 ls->rx_queue_disabled + 151 ls->rx_desc_fetch_error + 152 ls->rx_desc_data_error; 153 154 for (i = 0; i < num_rx_q_counters; i++) { 155 struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; 156 stats->q_ipackets[i] = rx_stats->packets; 157 stats->q_ibytes[i] = rx_stats->bytes; 158 stats->q_errors[i] = 159 rx_stats->no_cb_arg + 160 rx_stats->bad_cq_status + 161 rx_stats->no_room + 162 rx_stats->bad_len; 163 } 164 165 /* TX */ 166 167 stats->opackets = ls->tx_ucast_packets + 168 ls->tx_mcast_packets + 169 ls->tx_bcast_packets; 170 171 stats->obytes = ls->tx_ucast_bytes + 172 ls->tx_mcast_bytes + 173 ls->tx_bcast_bytes; 174 175 for (i = 0; i < lif->ntxqcqs; i++) { 176 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx; 177 stats->oerrors += tx_stats->drop; 178 } 179 180 stats->oerrors += 181 ls->tx_ucast_drop_packets + 182 ls->tx_mcast_drop_packets + 183 ls->tx_bcast_drop_packets; 184 185 stats->oerrors += 186 ls->tx_dma_error + 187 ls->tx_queue_disabled + 188 ls->tx_desc_fetch_error + 189 ls->tx_desc_data_error; 190 191 for (i = 0; i < num_tx_q_counters; i++) { 192 struct ionic_tx_stats *tx_stats = &lif->txqcqs[i]->stats.tx; 193 stats->q_opackets[i] = tx_stats->packets; 194 stats->q_obytes[i] = tx_stats->bytes; 195 } 196 } 197 198 void 199 ionic_lif_get_stats(const struct ionic_lif *lif, 200 struct rte_eth_stats *stats) 201 { 202 ionic_lif_get_abs_stats(lif, stats); 203 204 stats->ipackets -= lif->stats_base.ipackets; 205 stats->opackets -= lif->stats_base.opackets; 206 stats->ibytes -= lif->stats_base.ibytes; 207 stats->obytes -= lif->stats_base.obytes; 208 stats->imissed -= lif->stats_base.imissed; 209 stats->ierrors -= lif->stats_base.ierrors; 210 stats->oerrors -= lif->stats_base.oerrors; 211 stats->rx_nombuf -= lif->stats_base.rx_nombuf; 212 } 213 214 void 215 ionic_lif_reset_stats(struct ionic_lif *lif) 216 { 217 uint32_t i; 218 219 for (i = 0; i < lif->nrxqcqs; i++) { 220 memset(&lif->rxqcqs[i]->stats.rx, 0, 221 sizeof(struct ionic_rx_stats)); 222 memset(&lif->txqcqs[i]->stats.tx, 0, 223 sizeof(struct ionic_tx_stats)); 224 } 225 226 ionic_lif_get_abs_stats(lif, &lif->stats_base); 227 } 228 229 void 230 ionic_lif_get_hw_stats(struct ionic_lif *lif, struct ionic_lif_stats *stats) 231 { 232 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); 233 uint64_t *stats64 = (uint64_t *)stats; 234 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; 235 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; 236 237 for (i = 0; i < count; i++) 238 stats64[i] = lif_stats64[i] - lif_stats64_base[i]; 239 } 240 241 void 242 ionic_lif_reset_hw_stats(struct ionic_lif *lif) 243 { 244 uint16_t i, count = sizeof(struct ionic_lif_stats) / sizeof(uint64_t); 245 uint64_t *lif_stats64 = (uint64_t *)&lif->info->stats; 246 uint64_t *lif_stats64_base = (uint64_t *)&lif->lif_stats_base; 247 248 for (i = 0; i < count; i++) 249 lif_stats64_base[i] = lif_stats64[i]; 250 } 251 252 static int 253 ionic_lif_addr_add(struct ionic_lif *lif, const uint8_t *addr) 254 { 255 struct ionic_admin_ctx ctx = { 256 .pending_work = true, 257 .cmd.rx_filter_add = { 258 .opcode = IONIC_CMD_RX_FILTER_ADD, 259 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_MAC), 260 }, 261 }; 262 int err; 263 264 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, RTE_ETHER_ADDR_LEN); 265 266 err = ionic_adminq_post_wait(lif, &ctx); 267 if (err) 268 return err; 269 270 IONIC_PRINT(INFO, "rx_filter add (id %d)", 271 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id)); 272 273 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); 274 } 275 276 static int 277 ionic_lif_addr_del(struct ionic_lif *lif, const uint8_t *addr) 278 { 279 struct ionic_admin_ctx ctx = { 280 .pending_work = true, 281 .cmd.rx_filter_del = { 282 .opcode = IONIC_CMD_RX_FILTER_DEL, 283 }, 284 }; 285 struct ionic_rx_filter *f; 286 int err; 287 288 IONIC_PRINT_CALL(); 289 290 rte_spinlock_lock(&lif->rx_filters.lock); 291 292 f = ionic_rx_filter_by_addr(lif, addr); 293 if (!f) { 294 rte_spinlock_unlock(&lif->rx_filters.lock); 295 return -ENOENT; 296 } 297 298 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id); 299 ionic_rx_filter_free(f); 300 301 rte_spinlock_unlock(&lif->rx_filters.lock); 302 303 err = ionic_adminq_post_wait(lif, &ctx); 304 if (err) 305 return err; 306 307 IONIC_PRINT(INFO, "rx_filter del (id %d)", 308 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id)); 309 310 return 0; 311 } 312 313 int 314 ionic_dev_add_mac(struct rte_eth_dev *eth_dev, 315 struct rte_ether_addr *mac_addr, 316 uint32_t index __rte_unused, uint32_t pool __rte_unused) 317 { 318 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 319 320 IONIC_PRINT_CALL(); 321 322 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); 323 } 324 325 void 326 ionic_dev_remove_mac(struct rte_eth_dev *eth_dev, uint32_t index) 327 { 328 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 329 struct ionic_adapter *adapter = lif->adapter; 330 struct rte_ether_addr *mac_addr; 331 332 IONIC_PRINT_CALL(); 333 334 if (index >= adapter->max_mac_addrs) { 335 IONIC_PRINT(WARNING, 336 "Index %u is above MAC filter limit %u", 337 index, adapter->max_mac_addrs); 338 return; 339 } 340 341 mac_addr = ð_dev->data->mac_addrs[index]; 342 343 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 344 return; 345 346 ionic_lif_addr_del(lif, (const uint8_t *)mac_addr); 347 } 348 349 int 350 ionic_dev_set_mac(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 351 { 352 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 353 354 IONIC_PRINT_CALL(); 355 356 if (mac_addr == NULL) { 357 IONIC_PRINT(NOTICE, "New mac is null"); 358 return -1; 359 } 360 361 if (!rte_is_zero_ether_addr((struct rte_ether_addr *)lif->mac_addr)) { 362 IONIC_PRINT(INFO, "Deleting mac addr %pM", 363 lif->mac_addr); 364 ionic_lif_addr_del(lif, lif->mac_addr); 365 memset(lif->mac_addr, 0, RTE_ETHER_ADDR_LEN); 366 } 367 368 IONIC_PRINT(INFO, "Updating mac addr"); 369 370 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)lif->mac_addr); 371 372 return ionic_lif_addr_add(lif, (const uint8_t *)mac_addr); 373 } 374 375 static int 376 ionic_vlan_rx_add_vid(struct ionic_lif *lif, uint16_t vid) 377 { 378 struct ionic_admin_ctx ctx = { 379 .pending_work = true, 380 .cmd.rx_filter_add = { 381 .opcode = IONIC_CMD_RX_FILTER_ADD, 382 .match = rte_cpu_to_le_16(IONIC_RX_FILTER_MATCH_VLAN), 383 .vlan.vlan = rte_cpu_to_le_16(vid), 384 }, 385 }; 386 int err; 387 388 err = ionic_adminq_post_wait(lif, &ctx); 389 if (err) 390 return err; 391 392 IONIC_PRINT(INFO, "rx_filter add VLAN %d (id %d)", vid, 393 rte_le_to_cpu_32(ctx.comp.rx_filter_add.filter_id)); 394 395 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, &ctx); 396 } 397 398 static int 399 ionic_vlan_rx_kill_vid(struct ionic_lif *lif, uint16_t vid) 400 { 401 struct ionic_admin_ctx ctx = { 402 .pending_work = true, 403 .cmd.rx_filter_del = { 404 .opcode = IONIC_CMD_RX_FILTER_DEL, 405 }, 406 }; 407 struct ionic_rx_filter *f; 408 int err; 409 410 IONIC_PRINT_CALL(); 411 412 rte_spinlock_lock(&lif->rx_filters.lock); 413 414 f = ionic_rx_filter_by_vlan(lif, vid); 415 if (!f) { 416 rte_spinlock_unlock(&lif->rx_filters.lock); 417 return -ENOENT; 418 } 419 420 ctx.cmd.rx_filter_del.filter_id = rte_cpu_to_le_32(f->filter_id); 421 ionic_rx_filter_free(f); 422 rte_spinlock_unlock(&lif->rx_filters.lock); 423 424 err = ionic_adminq_post_wait(lif, &ctx); 425 if (err) 426 return err; 427 428 IONIC_PRINT(INFO, "rx_filter del VLAN %d (id %d)", vid, 429 rte_le_to_cpu_32(ctx.cmd.rx_filter_del.filter_id)); 430 431 return 0; 432 } 433 434 int 435 ionic_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, 436 int on) 437 { 438 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 439 int err; 440 441 if (on) 442 err = ionic_vlan_rx_add_vid(lif, vlan_id); 443 else 444 err = ionic_vlan_rx_kill_vid(lif, vlan_id); 445 446 return err; 447 } 448 449 static void 450 ionic_lif_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) 451 { 452 struct ionic_admin_ctx ctx = { 453 .pending_work = true, 454 .cmd.rx_mode_set = { 455 .opcode = IONIC_CMD_RX_MODE_SET, 456 .rx_mode = rte_cpu_to_le_16(rx_mode), 457 }, 458 }; 459 int err; 460 461 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 462 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_UNICAST"); 463 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 464 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_MULTICAST"); 465 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 466 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_BROADCAST"); 467 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 468 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_PROMISC"); 469 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 470 IONIC_PRINT(DEBUG, "rx_mode IONIC_RX_MODE_F_ALLMULTI"); 471 472 err = ionic_adminq_post_wait(lif, &ctx); 473 if (err) 474 IONIC_PRINT(ERR, "Failure setting RX mode"); 475 } 476 477 static void 478 ionic_set_rx_mode(struct ionic_lif *lif, uint32_t rx_mode) 479 { 480 if (lif->rx_mode != rx_mode) { 481 lif->rx_mode = rx_mode; 482 ionic_lif_rx_mode(lif, rx_mode); 483 } 484 } 485 486 int 487 ionic_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 488 { 489 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 490 uint32_t rx_mode = lif->rx_mode; 491 492 IONIC_PRINT_CALL(); 493 494 rx_mode |= IONIC_RX_MODE_F_PROMISC; 495 496 ionic_set_rx_mode(lif, rx_mode); 497 498 return 0; 499 } 500 501 int 502 ionic_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 503 { 504 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 505 uint32_t rx_mode = lif->rx_mode; 506 507 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 508 509 ionic_set_rx_mode(lif, rx_mode); 510 511 return 0; 512 } 513 514 int 515 ionic_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 516 { 517 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 518 uint32_t rx_mode = lif->rx_mode; 519 520 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 521 522 ionic_set_rx_mode(lif, rx_mode); 523 524 return 0; 525 } 526 527 int 528 ionic_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 529 { 530 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 531 uint32_t rx_mode = lif->rx_mode; 532 533 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 534 535 ionic_set_rx_mode(lif, rx_mode); 536 537 return 0; 538 } 539 540 int 541 ionic_lif_change_mtu(struct ionic_lif *lif, int new_mtu) 542 { 543 struct ionic_admin_ctx ctx = { 544 .pending_work = true, 545 .cmd.lif_setattr = { 546 .opcode = IONIC_CMD_LIF_SETATTR, 547 .attr = IONIC_LIF_ATTR_MTU, 548 .mtu = rte_cpu_to_le_32(new_mtu), 549 }, 550 }; 551 int err; 552 553 err = ionic_adminq_post_wait(lif, &ctx); 554 if (err) 555 return err; 556 557 return 0; 558 } 559 560 int 561 ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 562 { 563 struct ionic_adapter *adapter = lif->adapter; 564 struct ionic_dev *idev = &adapter->idev; 565 unsigned long index; 566 567 /* 568 * Note: interrupt handler is called for index = 0 only 569 * (we use interrupts for the notifyq only anyway, 570 * which has index = 0) 571 */ 572 573 for (index = 0; index < adapter->nintrs; index++) 574 if (!adapter->intrs[index]) 575 break; 576 577 if (index == adapter->nintrs) 578 return -ENOSPC; 579 580 adapter->intrs[index] = true; 581 582 ionic_intr_init(idev, intr, index); 583 584 return 0; 585 } 586 587 void 588 ionic_intr_free(struct ionic_lif *lif, struct ionic_intr_info *intr) 589 { 590 if (intr->index != IONIC_INTR_NONE) 591 lif->adapter->intrs[intr->index] = false; 592 } 593 594 static int 595 ionic_qcq_alloc(struct ionic_lif *lif, uint8_t type, 596 uint32_t index, 597 const char *base, uint32_t flags, 598 uint32_t num_descs, 599 uint32_t desc_size, 600 uint32_t cq_desc_size, 601 uint32_t sg_desc_size, 602 struct ionic_qcq **qcq) 603 { 604 struct ionic_dev *idev = &lif->adapter->idev; 605 struct ionic_qcq *new; 606 uint32_t q_size, cq_size, sg_size, total_size; 607 void *q_base, *cq_base, *sg_base; 608 rte_iova_t q_base_pa = 0; 609 rte_iova_t cq_base_pa = 0; 610 rte_iova_t sg_base_pa = 0; 611 uint32_t socket_id = rte_socket_id(); 612 int err; 613 614 *qcq = NULL; 615 616 q_size = num_descs * desc_size; 617 cq_size = num_descs * cq_desc_size; 618 sg_size = num_descs * sg_desc_size; 619 620 total_size = RTE_ALIGN(q_size, PAGE_SIZE) + 621 RTE_ALIGN(cq_size, PAGE_SIZE); 622 /* 623 * Note: aligning q_size/cq_size is not enough due to cq_base address 624 * aligning as q_base could be not aligned to the page. 625 * Adding PAGE_SIZE. 626 */ 627 total_size += PAGE_SIZE; 628 629 if (flags & IONIC_QCQ_F_SG) { 630 total_size += RTE_ALIGN(sg_size, PAGE_SIZE); 631 total_size += PAGE_SIZE; 632 } 633 634 new = rte_zmalloc("ionic", sizeof(*new), 0); 635 if (!new) { 636 IONIC_PRINT(ERR, "Cannot allocate queue structure"); 637 return -ENOMEM; 638 } 639 640 new->lif = lif; 641 new->flags = flags; 642 643 new->q.info = rte_zmalloc("ionic", sizeof(*new->q.info) * num_descs, 0); 644 if (!new->q.info) { 645 IONIC_PRINT(ERR, "Cannot allocate queue info"); 646 err = -ENOMEM; 647 goto err_out_free_qcq; 648 } 649 650 new->q.type = type; 651 652 err = ionic_q_init(lif, idev, &new->q, index, num_descs, 653 desc_size, sg_desc_size); 654 if (err) { 655 IONIC_PRINT(ERR, "Queue initialization failed"); 656 goto err_out_free_info; 657 } 658 659 err = ionic_cq_init(&new->cq, num_descs); 660 if (err) { 661 IONIC_PRINT(ERR, "Completion queue initialization failed"); 662 goto err_out_free_info; 663 } 664 665 new->base_z = rte_eth_dma_zone_reserve(lif->eth_dev, 666 base /* name */, index /* queue_idx */, 667 total_size, IONIC_ALIGN, socket_id); 668 669 if (!new->base_z) { 670 IONIC_PRINT(ERR, "Cannot reserve queue DMA memory"); 671 err = -ENOMEM; 672 goto err_out_free_info; 673 } 674 675 new->base = new->base_z->addr; 676 new->base_pa = new->base_z->iova; 677 new->total_size = total_size; 678 679 q_base = new->base; 680 q_base_pa = new->base_pa; 681 682 cq_base = (void *)RTE_ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); 683 cq_base_pa = RTE_ALIGN(q_base_pa + q_size, PAGE_SIZE); 684 685 if (flags & IONIC_QCQ_F_SG) { 686 sg_base = (void *)RTE_ALIGN((uintptr_t)cq_base + cq_size, 687 PAGE_SIZE); 688 sg_base_pa = RTE_ALIGN(cq_base_pa + cq_size, PAGE_SIZE); 689 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 690 } 691 692 IONIC_PRINT(DEBUG, "Q-Base-PA = %#jx CQ-Base-PA = %#jx " 693 "SG-base-PA = %#jx", 694 q_base_pa, cq_base_pa, sg_base_pa); 695 696 ionic_q_map(&new->q, q_base, q_base_pa); 697 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 698 ionic_cq_bind(&new->cq, &new->q); 699 700 *qcq = new; 701 702 return 0; 703 704 err_out_free_info: 705 rte_free(new->q.info); 706 err_out_free_qcq: 707 rte_free(new); 708 709 return err; 710 } 711 712 void 713 ionic_qcq_free(struct ionic_qcq *qcq) 714 { 715 if (qcq->base_z) { 716 qcq->base = NULL; 717 qcq->base_pa = 0; 718 rte_memzone_free(qcq->base_z); 719 qcq->base_z = NULL; 720 } 721 722 if (qcq->q.info) { 723 rte_free(qcq->q.info); 724 qcq->q.info = NULL; 725 } 726 727 rte_free(qcq); 728 } 729 730 int 731 ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t nrxq_descs, 732 struct ionic_qcq **qcq) 733 { 734 uint32_t flags; 735 int err = -ENOMEM; 736 737 flags = IONIC_QCQ_F_SG; 738 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, index, "rx", flags, 739 nrxq_descs, 740 sizeof(struct ionic_rxq_desc), 741 sizeof(struct ionic_rxq_comp), 742 sizeof(struct ionic_rxq_sg_desc), 743 &lif->rxqcqs[index]); 744 if (err) 745 return err; 746 747 *qcq = lif->rxqcqs[index]; 748 749 return 0; 750 } 751 752 int 753 ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t index, uint16_t ntxq_descs, 754 struct ionic_qcq **qcq) 755 { 756 uint32_t flags; 757 int err = -ENOMEM; 758 759 flags = IONIC_QCQ_F_SG; 760 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, index, "tx", flags, 761 ntxq_descs, 762 sizeof(struct ionic_txq_desc), 763 sizeof(struct ionic_txq_comp), 764 sizeof(struct ionic_txq_sg_desc_v1), 765 &lif->txqcqs[index]); 766 if (err) 767 return err; 768 769 *qcq = lif->txqcqs[index]; 770 771 return 0; 772 } 773 774 static int 775 ionic_admin_qcq_alloc(struct ionic_lif *lif) 776 { 777 uint32_t flags; 778 int err = -ENOMEM; 779 780 flags = 0; 781 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 782 IONIC_ADMINQ_LENGTH, 783 sizeof(struct ionic_admin_cmd), 784 sizeof(struct ionic_admin_comp), 785 0, 786 &lif->adminqcq); 787 if (err) 788 return err; 789 790 return 0; 791 } 792 793 static int 794 ionic_notify_qcq_alloc(struct ionic_lif *lif) 795 { 796 struct ionic_qcq *nqcq; 797 struct ionic_dev *idev = &lif->adapter->idev; 798 uint32_t flags = 0; 799 int err = -ENOMEM; 800 801 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notify", 802 flags, 803 IONIC_NOTIFYQ_LENGTH, 804 sizeof(struct ionic_notifyq_cmd), 805 sizeof(union ionic_notifyq_comp), 806 0, 807 &nqcq); 808 if (err) 809 return err; 810 811 err = ionic_intr_alloc(lif, &nqcq->intr); 812 if (err) { 813 ionic_qcq_free(nqcq); 814 return err; 815 } 816 817 ionic_intr_mask_assert(idev->intr_ctrl, nqcq->intr.index, 818 IONIC_INTR_MASK_SET); 819 820 lif->notifyqcq = nqcq; 821 822 return 0; 823 } 824 825 static void * 826 ionic_bus_map_dbpage(struct ionic_adapter *adapter, int page_num) 827 { 828 char *vaddr = adapter->bars[IONIC_PCI_BAR_DBELL].vaddr; 829 830 if (adapter->num_bars <= IONIC_PCI_BAR_DBELL) 831 return NULL; 832 833 return (void *)&vaddr[page_num << PAGE_SHIFT]; 834 } 835 836 static void 837 ionic_lif_queue_identify(struct ionic_lif *lif) 838 { 839 struct ionic_adapter *adapter = lif->adapter; 840 struct ionic_dev *idev = &adapter->idev; 841 union ionic_q_identity *q_ident = &adapter->ident.txq; 842 uint32_t q_words = RTE_DIM(q_ident->words); 843 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data); 844 uint32_t i, nwords, qtype; 845 int err; 846 847 for (qtype = 0; qtype < RTE_DIM(ionic_qtype_vers); qtype++) { 848 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 849 850 /* Filter out the types this driver knows about */ 851 switch (qtype) { 852 case IONIC_QTYPE_ADMINQ: 853 case IONIC_QTYPE_NOTIFYQ: 854 case IONIC_QTYPE_RXQ: 855 case IONIC_QTYPE_TXQ: 856 break; 857 default: 858 continue; 859 } 860 861 memset(qti, 0, sizeof(*qti)); 862 863 ionic_dev_cmd_queue_identify(idev, IONIC_LIF_TYPE_CLASSIC, 864 qtype, ionic_qtype_vers[qtype]); 865 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 866 if (err == -EINVAL) { 867 IONIC_PRINT(ERR, "qtype %d not supported\n", qtype); 868 continue; 869 } else if (err == -EIO) { 870 IONIC_PRINT(ERR, "q_ident failed, older FW\n"); 871 return; 872 } else if (err) { 873 IONIC_PRINT(ERR, "q_ident failed, qtype %d: %d\n", 874 qtype, err); 875 return; 876 } 877 878 nwords = RTE_MIN(q_words, cmd_words); 879 for (i = 0; i < nwords; i++) 880 q_ident->words[i] = ioread32(&idev->dev_cmd->data[i]); 881 882 qti->version = q_ident->version; 883 qti->supported = q_ident->supported; 884 qti->features = rte_le_to_cpu_64(q_ident->features); 885 qti->desc_sz = rte_le_to_cpu_16(q_ident->desc_sz); 886 qti->comp_sz = rte_le_to_cpu_16(q_ident->comp_sz); 887 qti->sg_desc_sz = rte_le_to_cpu_16(q_ident->sg_desc_sz); 888 qti->max_sg_elems = rte_le_to_cpu_16(q_ident->max_sg_elems); 889 qti->sg_desc_stride = 890 rte_le_to_cpu_16(q_ident->sg_desc_stride); 891 892 IONIC_PRINT(DEBUG, " qtype[%d].version = %d", 893 qtype, qti->version); 894 IONIC_PRINT(DEBUG, " qtype[%d].supported = %#x", 895 qtype, qti->supported); 896 IONIC_PRINT(DEBUG, " qtype[%d].features = %#jx", 897 qtype, qti->features); 898 IONIC_PRINT(DEBUG, " qtype[%d].desc_sz = %d", 899 qtype, qti->desc_sz); 900 IONIC_PRINT(DEBUG, " qtype[%d].comp_sz = %d", 901 qtype, qti->comp_sz); 902 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_sz = %d", 903 qtype, qti->sg_desc_sz); 904 IONIC_PRINT(DEBUG, " qtype[%d].max_sg_elems = %d", 905 qtype, qti->max_sg_elems); 906 IONIC_PRINT(DEBUG, " qtype[%d].sg_desc_stride = %d", 907 qtype, qti->sg_desc_stride); 908 } 909 } 910 911 int 912 ionic_lif_alloc(struct ionic_lif *lif) 913 { 914 struct ionic_adapter *adapter = lif->adapter; 915 uint32_t socket_id = rte_socket_id(); 916 int err; 917 918 /* 919 * lif->name was zeroed on allocation. 920 * Copy (sizeof() - 1) bytes to ensure that it is NULL terminated. 921 */ 922 memcpy(lif->name, lif->eth_dev->data->name, sizeof(lif->name) - 1); 923 924 IONIC_PRINT(DEBUG, "LIF: %s", lif->name); 925 926 ionic_lif_queue_identify(lif); 927 928 if (lif->qtype_info[IONIC_QTYPE_TXQ].version < 1) { 929 IONIC_PRINT(ERR, "FW too old, please upgrade"); 930 return -ENXIO; 931 } 932 933 IONIC_PRINT(DEBUG, "Allocating Lif Info"); 934 935 rte_spinlock_init(&lif->adminq_lock); 936 rte_spinlock_init(&lif->adminq_service_lock); 937 938 lif->kern_dbpage = ionic_bus_map_dbpage(adapter, 0); 939 if (!lif->kern_dbpage) { 940 IONIC_PRINT(ERR, "Cannot map dbpage, aborting"); 941 return -ENOMEM; 942 } 943 944 lif->txqcqs = rte_zmalloc("ionic", sizeof(*lif->txqcqs) * 945 adapter->max_ntxqs_per_lif, 0); 946 947 if (!lif->txqcqs) { 948 IONIC_PRINT(ERR, "Cannot allocate tx queues array"); 949 return -ENOMEM; 950 } 951 952 lif->rxqcqs = rte_zmalloc("ionic", sizeof(*lif->rxqcqs) * 953 adapter->max_nrxqs_per_lif, 0); 954 955 if (!lif->rxqcqs) { 956 IONIC_PRINT(ERR, "Cannot allocate rx queues array"); 957 return -ENOMEM; 958 } 959 960 IONIC_PRINT(DEBUG, "Allocating Notify Queue"); 961 962 err = ionic_notify_qcq_alloc(lif); 963 if (err) { 964 IONIC_PRINT(ERR, "Cannot allocate notify queue"); 965 return err; 966 } 967 968 IONIC_PRINT(DEBUG, "Allocating Admin Queue"); 969 970 err = ionic_admin_qcq_alloc(lif); 971 if (err) { 972 IONIC_PRINT(ERR, "Cannot allocate admin queue"); 973 return err; 974 } 975 976 IONIC_PRINT(DEBUG, "Allocating Lif Info"); 977 978 lif->info_sz = RTE_ALIGN(sizeof(*lif->info), PAGE_SIZE); 979 980 lif->info_z = rte_eth_dma_zone_reserve(lif->eth_dev, 981 "lif_info", 0 /* queue_idx*/, 982 lif->info_sz, IONIC_ALIGN, socket_id); 983 if (!lif->info_z) { 984 IONIC_PRINT(ERR, "Cannot allocate lif info memory"); 985 return -ENOMEM; 986 } 987 988 lif->info = lif->info_z->addr; 989 lif->info_pa = lif->info_z->iova; 990 991 return 0; 992 } 993 994 void 995 ionic_lif_free(struct ionic_lif *lif) 996 { 997 if (lif->notifyqcq) { 998 ionic_qcq_free(lif->notifyqcq); 999 lif->notifyqcq = NULL; 1000 } 1001 1002 if (lif->adminqcq) { 1003 ionic_qcq_free(lif->adminqcq); 1004 lif->adminqcq = NULL; 1005 } 1006 1007 if (lif->txqcqs) { 1008 rte_free(lif->txqcqs); 1009 lif->txqcqs = NULL; 1010 } 1011 1012 if (lif->rxqcqs) { 1013 rte_free(lif->rxqcqs); 1014 lif->rxqcqs = NULL; 1015 } 1016 1017 if (lif->info) { 1018 rte_memzone_free(lif->info_z); 1019 lif->info = NULL; 1020 } 1021 } 1022 1023 void 1024 ionic_lif_free_queues(struct ionic_lif *lif) 1025 { 1026 uint32_t i; 1027 1028 for (i = 0; i < lif->ntxqcqs; i++) { 1029 ionic_dev_tx_queue_release(lif->eth_dev->data->tx_queues[i]); 1030 lif->eth_dev->data->tx_queues[i] = NULL; 1031 } 1032 for (i = 0; i < lif->nrxqcqs; i++) { 1033 ionic_dev_rx_queue_release(lif->eth_dev->data->rx_queues[i]); 1034 lif->eth_dev->data->rx_queues[i] = NULL; 1035 } 1036 } 1037 1038 int 1039 ionic_lif_rss_config(struct ionic_lif *lif, 1040 const uint16_t types, const uint8_t *key, const uint32_t *indir) 1041 { 1042 struct ionic_adapter *adapter = lif->adapter; 1043 struct ionic_admin_ctx ctx = { 1044 .pending_work = true, 1045 .cmd.lif_setattr = { 1046 .opcode = IONIC_CMD_LIF_SETATTR, 1047 .attr = IONIC_LIF_ATTR_RSS, 1048 .rss.types = rte_cpu_to_le_16(types), 1049 .rss.addr = rte_cpu_to_le_64(lif->rss_ind_tbl_pa), 1050 }, 1051 }; 1052 unsigned int i; 1053 uint16_t tbl_sz = 1054 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz); 1055 1056 IONIC_PRINT_CALL(); 1057 1058 lif->rss_types = types; 1059 1060 if (key) 1061 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1062 1063 if (indir) 1064 for (i = 0; i < tbl_sz; i++) 1065 lif->rss_ind_tbl[i] = indir[i]; 1066 1067 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1068 IONIC_RSS_HASH_KEY_SIZE); 1069 1070 return ionic_adminq_post_wait(lif, &ctx); 1071 } 1072 1073 static int 1074 ionic_lif_rss_setup(struct ionic_lif *lif) 1075 { 1076 struct ionic_adapter *adapter = lif->adapter; 1077 static const uint8_t toeplitz_symmetric_key[] = { 1078 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1079 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1080 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1081 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1082 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 1083 }; 1084 uint32_t i; 1085 uint16_t tbl_sz = 1086 rte_le_to_cpu_16(adapter->ident.lif.eth.rss_ind_tbl_sz); 1087 1088 IONIC_PRINT_CALL(); 1089 1090 if (!lif->rss_ind_tbl_z) { 1091 lif->rss_ind_tbl_z = rte_eth_dma_zone_reserve(lif->eth_dev, 1092 "rss_ind_tbl", 0 /* queue_idx */, 1093 sizeof(*lif->rss_ind_tbl) * tbl_sz, 1094 IONIC_ALIGN, rte_socket_id()); 1095 if (!lif->rss_ind_tbl_z) { 1096 IONIC_PRINT(ERR, "OOM"); 1097 return -ENOMEM; 1098 } 1099 1100 lif->rss_ind_tbl = lif->rss_ind_tbl_z->addr; 1101 lif->rss_ind_tbl_pa = lif->rss_ind_tbl_z->iova; 1102 } 1103 1104 if (lif->rss_ind_tbl_nrxqcqs != lif->nrxqcqs) { 1105 lif->rss_ind_tbl_nrxqcqs = lif->nrxqcqs; 1106 1107 /* Fill indirection table with 'default' values */ 1108 for (i = 0; i < tbl_sz; i++) 1109 lif->rss_ind_tbl[i] = i % lif->nrxqcqs; 1110 } 1111 1112 return ionic_lif_rss_config(lif, IONIC_RSS_OFFLOAD_ALL, 1113 toeplitz_symmetric_key, NULL); 1114 } 1115 1116 static void 1117 ionic_lif_rss_teardown(struct ionic_lif *lif) 1118 { 1119 if (!lif->rss_ind_tbl) 1120 return; 1121 1122 if (lif->rss_ind_tbl_z) { 1123 /* Disable RSS on the NIC */ 1124 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1125 1126 lif->rss_ind_tbl = NULL; 1127 lif->rss_ind_tbl_pa = 0; 1128 rte_memzone_free(lif->rss_ind_tbl_z); 1129 lif->rss_ind_tbl_z = NULL; 1130 } 1131 } 1132 1133 static void 1134 ionic_lif_qcq_deinit(struct ionic_qcq *qcq) 1135 { 1136 qcq->flags &= ~IONIC_QCQ_F_INITED; 1137 } 1138 1139 void 1140 ionic_lif_txq_deinit(struct ionic_qcq *qcq) 1141 { 1142 ionic_lif_qcq_deinit(qcq); 1143 } 1144 1145 void 1146 ionic_lif_rxq_deinit(struct ionic_qcq *qcq) 1147 { 1148 ionic_lif_qcq_deinit(qcq); 1149 } 1150 1151 static void 1152 ionic_lif_notifyq_deinit(struct ionic_lif *lif) 1153 { 1154 struct ionic_qcq *nqcq = lif->notifyqcq; 1155 struct ionic_dev *idev = &lif->adapter->idev; 1156 1157 if (!(nqcq->flags & IONIC_QCQ_F_INITED)) 1158 return; 1159 1160 ionic_intr_mask(idev->intr_ctrl, nqcq->intr.index, 1161 IONIC_INTR_MASK_SET); 1162 1163 nqcq->flags &= ~IONIC_QCQ_F_INITED; 1164 } 1165 1166 bool 1167 ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index, 1168 void *cb_arg __rte_unused) 1169 { 1170 struct ionic_admin_comp *cq_desc_base = cq->base; 1171 struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index]; 1172 struct ionic_qcq *qcq = IONIC_CQ_TO_QCQ(cq); 1173 1174 if (!color_match(cq_desc->color, cq->done_color)) 1175 return false; 1176 1177 ionic_q_service(&qcq->q, cq_desc_index, cq_desc->comp_index, NULL); 1178 1179 return true; 1180 } 1181 1182 /* This acts like ionic_napi */ 1183 int 1184 ionic_qcq_service(struct ionic_qcq *qcq, int budget, ionic_cq_cb cb, 1185 void *cb_arg) 1186 { 1187 struct ionic_cq *cq = &qcq->cq; 1188 uint32_t work_done; 1189 1190 work_done = ionic_cq_service(cq, budget, cb, cb_arg); 1191 1192 return work_done; 1193 } 1194 1195 static void 1196 ionic_link_status_check(struct ionic_lif *lif) 1197 { 1198 struct ionic_adapter *adapter = lif->adapter; 1199 bool link_up; 1200 1201 lif->state &= ~IONIC_LIF_F_LINK_CHECK_NEEDED; 1202 1203 if (!lif->info) 1204 return; 1205 1206 link_up = (lif->info->status.link_status == IONIC_PORT_OPER_STATUS_UP); 1207 1208 if ((link_up && adapter->link_up) || 1209 (!link_up && !adapter->link_up)) 1210 return; 1211 1212 if (link_up) { 1213 adapter->link_speed = 1214 rte_le_to_cpu_32(lif->info->status.link_speed); 1215 IONIC_PRINT(DEBUG, "Link up - %d Gbps", 1216 adapter->link_speed); 1217 } else { 1218 IONIC_PRINT(DEBUG, "Link down"); 1219 } 1220 1221 adapter->link_up = link_up; 1222 ionic_dev_link_update(lif->eth_dev, 0); 1223 } 1224 1225 static void 1226 ionic_lif_handle_fw_down(struct ionic_lif *lif) 1227 { 1228 if (lif->state & IONIC_LIF_F_FW_RESET) 1229 return; 1230 1231 lif->state |= IONIC_LIF_F_FW_RESET; 1232 1233 if (lif->state & IONIC_LIF_F_UP) { 1234 IONIC_PRINT(NOTICE, 1235 "Surprise FW stop, stopping %s\n", lif->name); 1236 ionic_lif_stop(lif); 1237 } 1238 1239 IONIC_PRINT(NOTICE, "FW down, %s stopped", lif->name); 1240 } 1241 1242 static bool 1243 ionic_notifyq_cb(struct ionic_cq *cq, uint32_t cq_desc_index, void *cb_arg) 1244 { 1245 union ionic_notifyq_comp *cq_desc_base = cq->base; 1246 union ionic_notifyq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 1247 struct ionic_lif *lif = cb_arg; 1248 1249 IONIC_PRINT(DEBUG, "Notifyq callback eid = %jd ecode = %d", 1250 cq_desc->event.eid, cq_desc->event.ecode); 1251 1252 /* Have we run out of new completions to process? */ 1253 if (!(cq_desc->event.eid > lif->last_eid)) 1254 return false; 1255 1256 lif->last_eid = cq_desc->event.eid; 1257 1258 switch (cq_desc->event.ecode) { 1259 case IONIC_EVENT_LINK_CHANGE: 1260 IONIC_PRINT(DEBUG, 1261 "Notifyq IONIC_EVENT_LINK_CHANGE %s " 1262 "eid=%jd link_status=%d link_speed=%d", 1263 lif->name, 1264 cq_desc->event.eid, 1265 cq_desc->link_change.link_status, 1266 cq_desc->link_change.link_speed); 1267 1268 lif->state |= IONIC_LIF_F_LINK_CHECK_NEEDED; 1269 break; 1270 1271 case IONIC_EVENT_RESET: 1272 IONIC_PRINT(NOTICE, 1273 "Notifyq IONIC_EVENT_RESET %s " 1274 "eid=%jd, reset_code=%d state=%d", 1275 lif->name, 1276 cq_desc->event.eid, 1277 cq_desc->reset.reset_code, 1278 cq_desc->reset.state); 1279 ionic_lif_handle_fw_down(lif); 1280 break; 1281 1282 default: 1283 IONIC_PRINT(WARNING, "Notifyq bad event ecode=%d eid=%jd", 1284 cq_desc->event.ecode, cq_desc->event.eid); 1285 break; 1286 } 1287 1288 return true; 1289 } 1290 1291 int 1292 ionic_notifyq_handler(struct ionic_lif *lif, int budget) 1293 { 1294 struct ionic_dev *idev = &lif->adapter->idev; 1295 struct ionic_qcq *qcq = lif->notifyqcq; 1296 uint32_t work_done; 1297 1298 if (!(qcq->flags & IONIC_QCQ_F_INITED)) { 1299 IONIC_PRINT(DEBUG, "Notifyq not yet initialized"); 1300 return -1; 1301 } 1302 1303 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 1304 IONIC_INTR_MASK_SET); 1305 1306 work_done = ionic_qcq_service(qcq, budget, ionic_notifyq_cb, lif); 1307 1308 if (lif->state & IONIC_LIF_F_LINK_CHECK_NEEDED) 1309 ionic_link_status_check(lif); 1310 1311 ionic_intr_credits(idev->intr_ctrl, qcq->intr.index, 1312 work_done, IONIC_INTR_CRED_RESET_COALESCE); 1313 1314 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 1315 IONIC_INTR_MASK_CLEAR); 1316 1317 return 0; 1318 } 1319 1320 static int 1321 ionic_lif_adminq_init(struct ionic_lif *lif) 1322 { 1323 struct ionic_dev *idev = &lif->adapter->idev; 1324 struct ionic_qcq *qcq = lif->adminqcq; 1325 struct ionic_queue *q = &qcq->q; 1326 struct ionic_q_init_comp comp; 1327 int err; 1328 1329 ionic_dev_cmd_adminq_init(idev, qcq); 1330 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 1331 if (err) 1332 return err; 1333 1334 ionic_dev_cmd_comp(idev, &comp); 1335 1336 q->hw_type = comp.hw_type; 1337 q->hw_index = rte_le_to_cpu_32(comp.hw_index); 1338 q->db = ionic_db_map(lif, q); 1339 1340 IONIC_PRINT(DEBUG, "adminq->hw_type %d", q->hw_type); 1341 IONIC_PRINT(DEBUG, "adminq->hw_index %d", q->hw_index); 1342 IONIC_PRINT(DEBUG, "adminq->db %p", q->db); 1343 1344 qcq->flags |= IONIC_QCQ_F_INITED; 1345 1346 return 0; 1347 } 1348 1349 static int 1350 ionic_lif_notifyq_init(struct ionic_lif *lif) 1351 { 1352 struct ionic_dev *idev = &lif->adapter->idev; 1353 struct ionic_qcq *qcq = lif->notifyqcq; 1354 struct ionic_queue *q = &qcq->q; 1355 int err; 1356 1357 struct ionic_admin_ctx ctx = { 1358 .pending_work = true, 1359 .cmd.q_init = { 1360 .opcode = IONIC_CMD_Q_INIT, 1361 .type = q->type, 1362 .ver = lif->qtype_info[q->type].version, 1363 .index = rte_cpu_to_le_32(q->index), 1364 .intr_index = rte_cpu_to_le_16(qcq->intr.index), 1365 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_IRQ | 1366 IONIC_QINIT_F_ENA), 1367 .ring_size = rte_log2_u32(q->num_descs), 1368 .ring_base = rte_cpu_to_le_64(q->base_pa), 1369 } 1370 }; 1371 1372 IONIC_PRINT(DEBUG, "notifyq_init.index %d", q->index); 1373 IONIC_PRINT(DEBUG, "notifyq_init.ring_base 0x%" PRIx64 "", q->base_pa); 1374 IONIC_PRINT(DEBUG, "notifyq_init.ring_size %d", 1375 ctx.cmd.q_init.ring_size); 1376 IONIC_PRINT(DEBUG, "notifyq_init.ver %u", ctx.cmd.q_init.ver); 1377 1378 err = ionic_adminq_post_wait(lif, &ctx); 1379 if (err) 1380 return err; 1381 1382 q->hw_type = ctx.comp.q_init.hw_type; 1383 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 1384 q->db = NULL; 1385 1386 IONIC_PRINT(DEBUG, "notifyq->hw_type %d", q->hw_type); 1387 IONIC_PRINT(DEBUG, "notifyq->hw_index %d", q->hw_index); 1388 IONIC_PRINT(DEBUG, "notifyq->db %p", q->db); 1389 1390 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 1391 IONIC_INTR_MASK_CLEAR); 1392 1393 qcq->flags |= IONIC_QCQ_F_INITED; 1394 1395 return 0; 1396 } 1397 1398 int 1399 ionic_lif_set_features(struct ionic_lif *lif) 1400 { 1401 struct ionic_admin_ctx ctx = { 1402 .pending_work = true, 1403 .cmd.lif_setattr = { 1404 .opcode = IONIC_CMD_LIF_SETATTR, 1405 .attr = IONIC_LIF_ATTR_FEATURES, 1406 .features = rte_cpu_to_le_64(lif->features), 1407 }, 1408 }; 1409 int err; 1410 1411 err = ionic_adminq_post_wait(lif, &ctx); 1412 if (err) 1413 return err; 1414 1415 lif->hw_features = rte_le_to_cpu_64(ctx.cmd.lif_setattr.features & 1416 ctx.comp.lif_setattr.features); 1417 1418 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1419 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_TX_TAG"); 1420 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1421 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_STRIP"); 1422 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1423 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_VLAN_RX_FILTER"); 1424 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1425 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_HASH"); 1426 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1427 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_SG"); 1428 if (lif->hw_features & IONIC_ETH_HW_RX_SG) 1429 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_SG"); 1430 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1431 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TX_CSUM"); 1432 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1433 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_RX_CSUM"); 1434 if (lif->hw_features & IONIC_ETH_HW_TSO) 1435 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO"); 1436 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1437 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPV6"); 1438 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1439 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_ECN"); 1440 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1441 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE"); 1442 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1443 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_GRE_CSUM"); 1444 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1445 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP4"); 1446 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1447 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_IPXIP6"); 1448 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1449 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP"); 1450 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1451 IONIC_PRINT(DEBUG, "feature IONIC_ETH_HW_TSO_UDP_CSUM"); 1452 1453 return 0; 1454 } 1455 1456 int 1457 ionic_lif_txq_init(struct ionic_qcq *qcq) 1458 { 1459 struct ionic_queue *q = &qcq->q; 1460 struct ionic_lif *lif = qcq->lif; 1461 struct ionic_cq *cq = &qcq->cq; 1462 struct ionic_admin_ctx ctx = { 1463 .pending_work = true, 1464 .cmd.q_init = { 1465 .opcode = IONIC_CMD_Q_INIT, 1466 .type = q->type, 1467 .ver = lif->qtype_info[q->type].version, 1468 .index = rte_cpu_to_le_32(q->index), 1469 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG | 1470 IONIC_QINIT_F_ENA), 1471 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), 1472 .ring_size = rte_log2_u32(q->num_descs), 1473 .ring_base = rte_cpu_to_le_64(q->base_pa), 1474 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), 1475 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), 1476 }, 1477 }; 1478 int err; 1479 1480 IONIC_PRINT(DEBUG, "txq_init.index %d", q->index); 1481 IONIC_PRINT(DEBUG, "txq_init.ring_base 0x%" PRIx64 "", q->base_pa); 1482 IONIC_PRINT(DEBUG, "txq_init.ring_size %d", 1483 ctx.cmd.q_init.ring_size); 1484 IONIC_PRINT(DEBUG, "txq_init.ver %u", ctx.cmd.q_init.ver); 1485 1486 err = ionic_adminq_post_wait(qcq->lif, &ctx); 1487 if (err) 1488 return err; 1489 1490 q->hw_type = ctx.comp.q_init.hw_type; 1491 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 1492 q->db = ionic_db_map(lif, q); 1493 1494 IONIC_PRINT(DEBUG, "txq->hw_type %d", q->hw_type); 1495 IONIC_PRINT(DEBUG, "txq->hw_index %d", q->hw_index); 1496 IONIC_PRINT(DEBUG, "txq->db %p", q->db); 1497 1498 qcq->flags |= IONIC_QCQ_F_INITED; 1499 1500 return 0; 1501 } 1502 1503 int 1504 ionic_lif_rxq_init(struct ionic_qcq *qcq) 1505 { 1506 struct ionic_queue *q = &qcq->q; 1507 struct ionic_lif *lif = qcq->lif; 1508 struct ionic_cq *cq = &qcq->cq; 1509 struct ionic_admin_ctx ctx = { 1510 .pending_work = true, 1511 .cmd.q_init = { 1512 .opcode = IONIC_CMD_Q_INIT, 1513 .type = q->type, 1514 .ver = lif->qtype_info[q->type].version, 1515 .index = rte_cpu_to_le_32(q->index), 1516 .flags = rte_cpu_to_le_16(IONIC_QINIT_F_SG | 1517 IONIC_QINIT_F_ENA), 1518 .intr_index = rte_cpu_to_le_16(IONIC_INTR_NONE), 1519 .ring_size = rte_log2_u32(q->num_descs), 1520 .ring_base = rte_cpu_to_le_64(q->base_pa), 1521 .cq_ring_base = rte_cpu_to_le_64(cq->base_pa), 1522 .sg_ring_base = rte_cpu_to_le_64(q->sg_base_pa), 1523 }, 1524 }; 1525 int err; 1526 1527 IONIC_PRINT(DEBUG, "rxq_init.index %d", q->index); 1528 IONIC_PRINT(DEBUG, "rxq_init.ring_base 0x%" PRIx64 "", q->base_pa); 1529 IONIC_PRINT(DEBUG, "rxq_init.ring_size %d", 1530 ctx.cmd.q_init.ring_size); 1531 IONIC_PRINT(DEBUG, "rxq_init.ver %u", ctx.cmd.q_init.ver); 1532 1533 err = ionic_adminq_post_wait(qcq->lif, &ctx); 1534 if (err) 1535 return err; 1536 1537 q->hw_type = ctx.comp.q_init.hw_type; 1538 q->hw_index = rte_le_to_cpu_32(ctx.comp.q_init.hw_index); 1539 q->db = ionic_db_map(lif, q); 1540 1541 qcq->flags |= IONIC_QCQ_F_INITED; 1542 1543 IONIC_PRINT(DEBUG, "rxq->hw_type %d", q->hw_type); 1544 IONIC_PRINT(DEBUG, "rxq->hw_index %d", q->hw_index); 1545 IONIC_PRINT(DEBUG, "rxq->db %p", q->db); 1546 1547 return 0; 1548 } 1549 1550 static int 1551 ionic_station_set(struct ionic_lif *lif) 1552 { 1553 struct ionic_admin_ctx ctx = { 1554 .pending_work = true, 1555 .cmd.lif_getattr = { 1556 .opcode = IONIC_CMD_LIF_GETATTR, 1557 .attr = IONIC_LIF_ATTR_MAC, 1558 }, 1559 }; 1560 int err; 1561 1562 IONIC_PRINT_CALL(); 1563 1564 err = ionic_adminq_post_wait(lif, &ctx); 1565 if (err) 1566 return err; 1567 1568 memcpy(lif->mac_addr, ctx.comp.lif_getattr.mac, RTE_ETHER_ADDR_LEN); 1569 1570 return 0; 1571 } 1572 1573 static void 1574 ionic_lif_set_name(struct ionic_lif *lif) 1575 { 1576 struct ionic_admin_ctx ctx = { 1577 .pending_work = true, 1578 .cmd.lif_setattr = { 1579 .opcode = IONIC_CMD_LIF_SETATTR, 1580 .attr = IONIC_LIF_ATTR_NAME, 1581 }, 1582 }; 1583 1584 memcpy(ctx.cmd.lif_setattr.name, lif->name, 1585 sizeof(ctx.cmd.lif_setattr.name) - 1); 1586 1587 ionic_adminq_post_wait(lif, &ctx); 1588 } 1589 1590 int 1591 ionic_lif_init(struct ionic_lif *lif) 1592 { 1593 struct ionic_dev *idev = &lif->adapter->idev; 1594 struct ionic_q_init_comp comp; 1595 int err; 1596 1597 memset(&lif->stats_base, 0, sizeof(lif->stats_base)); 1598 1599 ionic_dev_cmd_lif_init(idev, lif->info_pa); 1600 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 1601 ionic_dev_cmd_comp(idev, &comp); 1602 if (err) 1603 return err; 1604 1605 lif->hw_index = rte_cpu_to_le_16(comp.hw_index); 1606 1607 err = ionic_lif_adminq_init(lif); 1608 if (err) 1609 return err; 1610 1611 err = ionic_lif_notifyq_init(lif); 1612 if (err) 1613 goto err_out_adminq_deinit; 1614 1615 /* 1616 * Configure initial feature set 1617 * This will be updated later by the dev_configure() step 1618 */ 1619 lif->features = IONIC_ETH_HW_RX_HASH | IONIC_ETH_HW_VLAN_RX_FILTER; 1620 1621 err = ionic_lif_set_features(lif); 1622 if (err) 1623 goto err_out_notifyq_deinit; 1624 1625 err = ionic_rx_filters_init(lif); 1626 if (err) 1627 goto err_out_notifyq_deinit; 1628 1629 err = ionic_station_set(lif); 1630 if (err) 1631 goto err_out_rx_filter_deinit; 1632 1633 ionic_lif_set_name(lif); 1634 1635 lif->state |= IONIC_LIF_F_INITED; 1636 1637 return 0; 1638 1639 err_out_rx_filter_deinit: 1640 ionic_rx_filters_deinit(lif); 1641 1642 err_out_notifyq_deinit: 1643 ionic_lif_notifyq_deinit(lif); 1644 1645 err_out_adminq_deinit: 1646 ionic_lif_qcq_deinit(lif->adminqcq); 1647 1648 return err; 1649 } 1650 1651 void 1652 ionic_lif_deinit(struct ionic_lif *lif) 1653 { 1654 if (!(lif->state & IONIC_LIF_F_INITED)) 1655 return; 1656 1657 ionic_rx_filters_deinit(lif); 1658 ionic_lif_rss_teardown(lif); 1659 ionic_lif_notifyq_deinit(lif); 1660 ionic_lif_qcq_deinit(lif->adminqcq); 1661 1662 lif->state &= ~IONIC_LIF_F_INITED; 1663 } 1664 1665 void 1666 ionic_lif_configure_vlan_offload(struct ionic_lif *lif, int mask) 1667 { 1668 struct rte_eth_dev *eth_dev = lif->eth_dev; 1669 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1670 1671 /* 1672 * IONIC_ETH_HW_VLAN_RX_FILTER cannot be turned off, so 1673 * set DEV_RX_OFFLOAD_VLAN_FILTER and ignore ETH_VLAN_FILTER_MASK 1674 */ 1675 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; 1676 1677 if (mask & ETH_VLAN_STRIP_MASK) { 1678 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1679 lif->features |= IONIC_ETH_HW_VLAN_RX_STRIP; 1680 else 1681 lif->features &= ~IONIC_ETH_HW_VLAN_RX_STRIP; 1682 } 1683 } 1684 1685 void 1686 ionic_lif_configure(struct ionic_lif *lif) 1687 { 1688 struct rte_eth_rxmode *rxmode = &lif->eth_dev->data->dev_conf.rxmode; 1689 struct rte_eth_txmode *txmode = &lif->eth_dev->data->dev_conf.txmode; 1690 struct ionic_identity *ident = &lif->adapter->ident; 1691 union ionic_lif_config *cfg = &ident->lif.eth.config; 1692 uint32_t ntxqs_per_lif = 1693 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]); 1694 uint32_t nrxqs_per_lif = 1695 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]); 1696 uint32_t nrxqs = lif->eth_dev->data->nb_rx_queues; 1697 uint32_t ntxqs = lif->eth_dev->data->nb_tx_queues; 1698 1699 lif->port_id = lif->eth_dev->data->port_id; 1700 1701 IONIC_PRINT(DEBUG, "Configuring LIF on port %u", 1702 lif->port_id); 1703 1704 if (nrxqs > 0) 1705 nrxqs_per_lif = RTE_MIN(nrxqs_per_lif, nrxqs); 1706 1707 if (ntxqs > 0) 1708 ntxqs_per_lif = RTE_MIN(ntxqs_per_lif, ntxqs); 1709 1710 lif->nrxqcqs = nrxqs_per_lif; 1711 lif->ntxqcqs = ntxqs_per_lif; 1712 1713 /* Update the LIF configuration based on the eth_dev */ 1714 1715 /* 1716 * NB: While it is true that RSS_HASH is always enabled on ionic, 1717 * setting this flag unconditionally causes problems in DTS. 1718 * rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1719 */ 1720 1721 /* RX per-port */ 1722 1723 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM || 1724 rxmode->offloads & DEV_RX_OFFLOAD_UDP_CKSUM || 1725 rxmode->offloads & DEV_RX_OFFLOAD_TCP_CKSUM) 1726 lif->features |= IONIC_ETH_HW_RX_CSUM; 1727 else 1728 lif->features &= ~IONIC_ETH_HW_RX_CSUM; 1729 1730 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) { 1731 lif->features |= IONIC_ETH_HW_RX_SG; 1732 lif->eth_dev->data->scattered_rx = 1; 1733 } else { 1734 lif->features &= ~IONIC_ETH_HW_RX_SG; 1735 lif->eth_dev->data->scattered_rx = 0; 1736 } 1737 1738 /* Covers VLAN_STRIP */ 1739 ionic_lif_configure_vlan_offload(lif, ETH_VLAN_STRIP_MASK); 1740 1741 /* TX per-port */ 1742 1743 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || 1744 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM || 1745 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM || 1746 txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM || 1747 txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) 1748 lif->features |= IONIC_ETH_HW_TX_CSUM; 1749 else 1750 lif->features &= ~IONIC_ETH_HW_TX_CSUM; 1751 1752 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) 1753 lif->features |= IONIC_ETH_HW_VLAN_TX_TAG; 1754 else 1755 lif->features &= ~IONIC_ETH_HW_VLAN_TX_TAG; 1756 1757 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) 1758 lif->features |= IONIC_ETH_HW_TX_SG; 1759 else 1760 lif->features &= ~IONIC_ETH_HW_TX_SG; 1761 1762 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) { 1763 lif->features |= IONIC_ETH_HW_TSO; 1764 lif->features |= IONIC_ETH_HW_TSO_IPV6; 1765 lif->features |= IONIC_ETH_HW_TSO_ECN; 1766 } else { 1767 lif->features &= ~IONIC_ETH_HW_TSO; 1768 lif->features &= ~IONIC_ETH_HW_TSO_IPV6; 1769 lif->features &= ~IONIC_ETH_HW_TSO_ECN; 1770 } 1771 } 1772 1773 int 1774 ionic_lif_start(struct ionic_lif *lif) 1775 { 1776 uint32_t rx_mode; 1777 uint32_t i; 1778 int err; 1779 1780 err = ionic_lif_rss_setup(lif); 1781 if (err) 1782 return err; 1783 1784 if (!lif->rx_mode) { 1785 IONIC_PRINT(DEBUG, "Setting RX mode on %s", 1786 lif->name); 1787 1788 rx_mode = IONIC_RX_MODE_F_UNICAST; 1789 rx_mode |= IONIC_RX_MODE_F_MULTICAST; 1790 rx_mode |= IONIC_RX_MODE_F_BROADCAST; 1791 1792 ionic_set_rx_mode(lif, rx_mode); 1793 } 1794 1795 IONIC_PRINT(DEBUG, "Starting %u RX queues and %u TX queues " 1796 "on port %u", 1797 lif->nrxqcqs, lif->ntxqcqs, lif->port_id); 1798 1799 for (i = 0; i < lif->nrxqcqs; i++) { 1800 struct ionic_qcq *rxq = lif->rxqcqs[i]; 1801 if (!(rxq->flags & IONIC_QCQ_F_DEFERRED)) { 1802 err = ionic_dev_rx_queue_start(lif->eth_dev, i); 1803 1804 if (err) 1805 return err; 1806 } 1807 } 1808 1809 for (i = 0; i < lif->ntxqcqs; i++) { 1810 struct ionic_qcq *txq = lif->txqcqs[i]; 1811 if (!(txq->flags & IONIC_QCQ_F_DEFERRED)) { 1812 err = ionic_dev_tx_queue_start(lif->eth_dev, i); 1813 1814 if (err) 1815 return err; 1816 } 1817 } 1818 1819 /* Carrier ON here */ 1820 lif->state |= IONIC_LIF_F_UP; 1821 1822 ionic_link_status_check(lif); 1823 1824 return 0; 1825 } 1826 1827 int 1828 ionic_lif_identify(struct ionic_adapter *adapter) 1829 { 1830 struct ionic_dev *idev = &adapter->idev; 1831 struct ionic_identity *ident = &adapter->ident; 1832 union ionic_lif_config *cfg = &ident->lif.eth.config; 1833 uint32_t lif_words = RTE_DIM(ident->lif.words); 1834 uint32_t cmd_words = RTE_DIM(idev->dev_cmd->data); 1835 uint32_t i, nwords; 1836 int err; 1837 1838 ionic_dev_cmd_lif_identify(idev, IONIC_LIF_TYPE_CLASSIC, 1839 IONIC_IDENTITY_VERSION_1); 1840 err = ionic_dev_cmd_wait_check(idev, IONIC_DEVCMD_TIMEOUT); 1841 if (err) 1842 return (err); 1843 1844 nwords = RTE_MIN(lif_words, cmd_words); 1845 for (i = 0; i < nwords; i++) 1846 ident->lif.words[i] = ioread32(&idev->dev_cmd->data[i]); 1847 1848 IONIC_PRINT(INFO, "capabilities 0x%" PRIx64 " ", 1849 rte_le_to_cpu_64(ident->lif.capabilities)); 1850 1851 IONIC_PRINT(INFO, "eth.max_ucast_filters 0x%" PRIx32 " ", 1852 rte_le_to_cpu_32(ident->lif.eth.max_ucast_filters)); 1853 IONIC_PRINT(INFO, "eth.max_mcast_filters 0x%" PRIx32 " ", 1854 rte_le_to_cpu_32(ident->lif.eth.max_mcast_filters)); 1855 1856 IONIC_PRINT(INFO, "eth.features 0x%" PRIx64 " ", 1857 rte_le_to_cpu_64(cfg->features)); 1858 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_ADMINQ] 0x%" PRIx32 " ", 1859 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_ADMINQ])); 1860 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] 0x%" PRIx32 " ", 1861 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_NOTIFYQ])); 1862 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_RXQ] 0x%" PRIx32 " ", 1863 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ])); 1864 IONIC_PRINT(INFO, "eth.queue_count[IONIC_QTYPE_TXQ] 0x%" PRIx32 " ", 1865 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ])); 1866 1867 return 0; 1868 } 1869 1870 int 1871 ionic_lifs_size(struct ionic_adapter *adapter) 1872 { 1873 struct ionic_identity *ident = &adapter->ident; 1874 union ionic_lif_config *cfg = &ident->lif.eth.config; 1875 uint32_t nintrs, dev_nintrs = rte_le_to_cpu_32(ident->dev.nintrs); 1876 1877 adapter->max_ntxqs_per_lif = 1878 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_TXQ]); 1879 adapter->max_nrxqs_per_lif = 1880 rte_le_to_cpu_32(cfg->queue_count[IONIC_QTYPE_RXQ]); 1881 1882 nintrs = 1 /* notifyq */; 1883 1884 if (nintrs > dev_nintrs) { 1885 IONIC_PRINT(ERR, 1886 "At most %d intr supported, minimum req'd is %u", 1887 dev_nintrs, nintrs); 1888 return -ENOSPC; 1889 } 1890 1891 adapter->nintrs = nintrs; 1892 1893 return 0; 1894 } 1895