1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018-2019 NXP 3 */ 4 5 #include <sys/ioctl.h> 6 #include <sys/epoll.h> 7 #include <rte_kvargs.h> 8 #include <rte_ethdev_vdev.h> 9 #include <rte_bus_vdev.h> 10 #include <rte_ether.h> 11 #include <dpaa_of.h> 12 13 #include "pfe_logs.h" 14 #include "pfe_mod.h" 15 16 #define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */ 17 #define PFE_VDEV_GEM_ID_ARG "intf" 18 19 struct pfe_vdev_init_params { 20 int8_t gem_id; 21 }; 22 static struct pfe *g_pfe; 23 /* Supported Rx offloads */ 24 static uint64_t dev_rx_offloads_sup = 25 DEV_RX_OFFLOAD_IPV4_CKSUM | 26 DEV_RX_OFFLOAD_UDP_CKSUM | 27 DEV_RX_OFFLOAD_TCP_CKSUM; 28 29 /* Supported Tx offloads */ 30 static uint64_t dev_tx_offloads_sup = 31 DEV_TX_OFFLOAD_IPV4_CKSUM | 32 DEV_TX_OFFLOAD_UDP_CKSUM | 33 DEV_TX_OFFLOAD_TCP_CKSUM; 34 35 /* TODO: make pfe_svr a runtime option. 36 * Driver should be able to get the SVR 37 * information from HW. 38 */ 39 unsigned int pfe_svr = SVR_LS1012A_REV1; 40 static void *cbus_emac_base[3]; 41 static void *cbus_gpi_base[3]; 42 43 int pfe_logtype_pmd; 44 45 /* pfe_gemac_init 46 */ 47 static int 48 pfe_gemac_init(struct pfe_eth_priv_s *priv) 49 { 50 struct gemac_cfg cfg; 51 52 cfg.speed = SPEED_1000M; 53 cfg.duplex = DUPLEX_FULL; 54 55 gemac_set_config(priv->EMAC_baseaddr, &cfg); 56 gemac_allow_broadcast(priv->EMAC_baseaddr); 57 gemac_enable_1536_rx(priv->EMAC_baseaddr); 58 gemac_enable_stacked_vlan(priv->EMAC_baseaddr); 59 gemac_enable_pause_rx(priv->EMAC_baseaddr); 60 gemac_set_bus_width(priv->EMAC_baseaddr, 64); 61 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr); 62 63 return 0; 64 } 65 66 static void 67 pfe_soc_version_get(void) 68 { 69 FILE *svr_file = NULL; 70 unsigned int svr_ver = 0; 71 72 PMD_INIT_FUNC_TRACE(); 73 74 svr_file = fopen(PFE_SOC_ID_FILE, "r"); 75 if (!svr_file) { 76 PFE_PMD_ERR("Unable to open SoC device"); 77 return; /* Not supported on this infra */ 78 } 79 80 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 81 pfe_svr = svr_ver; 82 else 83 PFE_PMD_ERR("Unable to read SoC device"); 84 85 fclose(svr_file); 86 } 87 88 static int pfe_eth_start(struct pfe_eth_priv_s *priv) 89 { 90 gpi_enable(priv->GPI_baseaddr); 91 gemac_enable(priv->EMAC_baseaddr); 92 93 return 0; 94 } 95 96 static void 97 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int 98 __rte_unused from_tx, __rte_unused int n_desc) 99 { 100 struct rte_mbuf *mbuf; 101 unsigned int flags; 102 103 /* Clean HIF and client queue */ 104 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client, 105 tx_q_num, &flags, 106 HIF_TX_DESC_NT))) { 107 if (mbuf) { 108 mbuf->next = NULL; 109 mbuf->nb_segs = 1; 110 rte_pktmbuf_free(mbuf); 111 } 112 } 113 } 114 115 116 static void 117 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv) 118 { 119 unsigned int ii; 120 121 for (ii = 0; ii < emac_txq_cnt; ii++) 122 pfe_eth_flush_txQ(priv, ii, 0, 0); 123 } 124 125 static int 126 pfe_eth_event_handler(void *data, int event, __rte_unused int qno) 127 { 128 struct pfe_eth_priv_s *priv = data; 129 130 switch (event) { 131 case EVENT_TXDONE_IND: 132 pfe_eth_flush_tx(priv); 133 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0); 134 break; 135 case EVENT_HIGH_RX_WM: 136 default: 137 break; 138 } 139 140 return 0; 141 } 142 143 static uint16_t 144 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 145 { 146 struct hif_client_rx_queue *queue = rxq; 147 struct pfe_eth_priv_s *priv = queue->priv; 148 struct epoll_event epoll_ev; 149 uint64_t ticks = 1; /* 1 msec */ 150 int ret; 151 int have_something, work_done; 152 153 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT) 154 155 /*TODO can we remove this cleanup from here?*/ 156 pfe_tx_do_cleanup(priv->pfe); 157 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts); 158 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool, 159 rx_pkts, nb_pkts); 160 161 if (!have_something || !work_done) { 162 writel(RESET_STATUS, HIF_INT_SRC); 163 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); 164 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); 165 if (ret < 0 && errno != EINTR) 166 PFE_PMD_ERR("epoll_wait fails with %d\n", errno); 167 } 168 169 return work_done; 170 } 171 172 static uint16_t 173 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 174 { 175 struct hif_client_rx_queue *queue = rxq; 176 struct pfe_eth_priv_s *priv = queue->priv; 177 struct rte_mempool *pool; 178 179 /*TODO can we remove this cleanup from here?*/ 180 pfe_tx_do_cleanup(priv->pfe); 181 pfe_hif_rx_process(priv->pfe, nb_pkts); 182 pool = priv->pfe->hif.shm->pool; 183 184 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts); 185 } 186 187 static uint16_t 188 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 189 { 190 struct hif_client_tx_queue *queue = tx_queue; 191 struct pfe_eth_priv_s *priv = queue->priv; 192 struct rte_eth_stats *stats = &priv->stats; 193 int i; 194 195 for (i = 0; i < nb_pkts; i++) { 196 if (tx_pkts[i]->nb_segs > 1) { 197 struct rte_mbuf *mbuf; 198 int j; 199 200 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 201 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 202 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 203 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, 204 tx_pkts[i]); 205 206 mbuf = tx_pkts[i]->next; 207 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) { 208 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 209 (void *)(size_t)rte_pktmbuf_iova(mbuf), 210 mbuf->buf_addr + mbuf->data_off, 211 mbuf->data_len, 212 0x0, 0x0, mbuf); 213 mbuf = mbuf->next; 214 } 215 216 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 217 (void *)(size_t)rte_pktmbuf_iova(mbuf), 218 mbuf->buf_addr + mbuf->data_off, 219 mbuf->data_len, 220 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID, 221 mbuf); 222 } else { 223 hif_lib_xmit_pkt(&priv->client, queue->queue_id, 224 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), 225 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, 226 tx_pkts[i]->pkt_len, 0 /*ctrl*/, 227 HIF_FIRST_BUFFER | HIF_LAST_BUFFER | 228 HIF_DATA_VALID, 229 tx_pkts[i]); 230 } 231 stats->obytes += tx_pkts[i]->pkt_len; 232 hif_tx_dma_start(); 233 } 234 stats->opackets += nb_pkts; 235 pfe_tx_do_cleanup(priv->pfe); 236 237 return nb_pkts; 238 } 239 240 static uint16_t 241 pfe_dummy_xmit_pkts(__rte_unused void *tx_queue, 242 __rte_unused struct rte_mbuf **tx_pkts, 243 __rte_unused uint16_t nb_pkts) 244 { 245 return 0; 246 } 247 248 static uint16_t 249 pfe_dummy_recv_pkts(__rte_unused void *rxq, 250 __rte_unused struct rte_mbuf **rx_pkts, 251 __rte_unused uint16_t nb_pkts) 252 { 253 return 0; 254 } 255 256 static int 257 pfe_eth_open(struct rte_eth_dev *dev) 258 { 259 struct pfe_eth_priv_s *priv = dev->data->dev_private; 260 struct hif_client_s *client; 261 struct hif_shm *hif_shm; 262 int rc; 263 264 /* Register client driver with HIF */ 265 client = &priv->client; 266 267 if (client->pfe) { 268 hif_shm = client->pfe->hif.shm; 269 /* TODO please remove the below code of if block, once we add 270 * the proper cleanup in eth_close 271 */ 272 if (!test_bit(PFE_CL_GEM0 + priv->id, 273 &hif_shm->g_client_status[0])) { 274 /* Register client driver with HIF */ 275 memset(client, 0, sizeof(*client)); 276 client->id = PFE_CL_GEM0 + priv->id; 277 client->tx_qn = emac_txq_cnt; 278 client->rx_qn = EMAC_RXQ_CNT; 279 client->priv = priv; 280 client->pfe = priv->pfe; 281 client->port_id = dev->data->port_id; 282 client->event_handler = pfe_eth_event_handler; 283 284 client->tx_qsize = EMAC_TXQ_DEPTH; 285 client->rx_qsize = EMAC_RXQ_DEPTH; 286 287 rc = hif_lib_client_register(client); 288 if (rc) { 289 PFE_PMD_ERR("hif_lib_client_register(%d)" 290 " failed", client->id); 291 goto err0; 292 } 293 } else { 294 /* Freeing the packets if already exists */ 295 int ret = 0; 296 struct rte_mbuf *rx_pkts[32]; 297 /* TODO multiqueue support */ 298 ret = hif_lib_receive_pkt(&client->rx_q[0], 299 hif_shm->pool, rx_pkts, 32); 300 while (ret) { 301 int i; 302 for (i = 0; i < ret; i++) 303 rte_pktmbuf_free(rx_pkts[i]); 304 ret = hif_lib_receive_pkt(&client->rx_q[0], 305 hif_shm->pool, 306 rx_pkts, 32); 307 } 308 } 309 } else { 310 /* Register client driver with HIF */ 311 memset(client, 0, sizeof(*client)); 312 client->id = PFE_CL_GEM0 + priv->id; 313 client->tx_qn = emac_txq_cnt; 314 client->rx_qn = EMAC_RXQ_CNT; 315 client->priv = priv; 316 client->pfe = priv->pfe; 317 client->port_id = dev->data->port_id; 318 client->event_handler = pfe_eth_event_handler; 319 320 client->tx_qsize = EMAC_TXQ_DEPTH; 321 client->rx_qsize = EMAC_RXQ_DEPTH; 322 323 rc = hif_lib_client_register(client); 324 if (rc) { 325 PFE_PMD_ERR("hif_lib_client_register(%d) failed", 326 client->id); 327 goto err0; 328 } 329 } 330 rc = pfe_eth_start(priv); 331 dev->rx_pkt_burst = &pfe_recv_pkts; 332 dev->tx_pkt_burst = &pfe_xmit_pkts; 333 /* If no prefetch is configured. */ 334 if (getenv("PFE_INTR_SUPPORT")) { 335 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr; 336 PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); 337 } 338 339 340 err0: 341 return rc; 342 } 343 344 static int 345 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) 346 { 347 int pfe_cdev_fd; 348 349 if (priv == NULL) 350 return -1; 351 352 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY); 353 if (pfe_cdev_fd < 0) { 354 PFE_PMD_WARN("Unable to open PFE device file (%s).\n", 355 PFE_CDEV_PATH); 356 PFE_PMD_WARN("Link status update will not be available.\n"); 357 priv->link_fd = PFE_CDEV_INVALID_FD; 358 return -1; 359 } 360 361 priv->link_fd = pfe_cdev_fd; 362 363 return 0; 364 } 365 366 static void 367 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv) 368 { 369 if (priv == NULL) 370 return; 371 372 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 373 close(priv->link_fd); 374 priv->link_fd = PFE_CDEV_INVALID_FD; 375 } 376 } 377 378 static void 379 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) 380 { 381 struct pfe_eth_priv_s *priv = dev->data->dev_private; 382 383 gemac_disable(priv->EMAC_baseaddr); 384 gpi_disable(priv->GPI_baseaddr); 385 386 dev->rx_pkt_burst = &pfe_dummy_recv_pkts; 387 dev->tx_pkt_burst = &pfe_dummy_xmit_pkts; 388 } 389 390 static void 391 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe) 392 { 393 PMD_INIT_FUNC_TRACE(); 394 395 pfe_eth_stop(dev); 396 /* Close the device file for link status */ 397 pfe_eth_close_cdev(dev->data->dev_private); 398 399 rte_eth_dev_release_port(dev); 400 pfe->nb_devs--; 401 } 402 403 static void 404 pfe_eth_close(struct rte_eth_dev *dev) 405 { 406 if (!dev) 407 return; 408 409 if (!g_pfe) 410 return; 411 412 pfe_eth_exit(dev, g_pfe); 413 414 if (g_pfe->nb_devs == 0) { 415 pfe_hif_exit(g_pfe); 416 pfe_hif_lib_exit(g_pfe); 417 rte_free(g_pfe); 418 g_pfe = NULL; 419 } 420 } 421 422 static int 423 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused) 424 { 425 return 0; 426 } 427 428 static int 429 pfe_eth_info(struct rte_eth_dev *dev, 430 struct rte_eth_dev_info *dev_info) 431 { 432 dev_info->max_mac_addrs = PFE_MAX_MACS; 433 dev_info->max_rx_queues = dev->data->nb_rx_queues; 434 dev_info->max_tx_queues = dev->data->nb_tx_queues; 435 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; 436 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 437 dev_info->rx_offload_capa = dev_rx_offloads_sup; 438 dev_info->tx_offload_capa = dev_tx_offloads_sup; 439 if (pfe_svr == SVR_LS1012A_REV1) { 440 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; 441 dev_info->max_mtu = MAX_MTU_ON_REV1; 442 } else { 443 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE; 444 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD; 445 } 446 447 return 0; 448 } 449 450 /* Only first mb_pool given on first call of this API will be used 451 * in whole system, also nb_rx_desc and rx_conf are unused params 452 */ 453 static int 454 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 455 __rte_unused uint16_t nb_rx_desc, 456 __rte_unused unsigned int socket_id, 457 __rte_unused const struct rte_eth_rxconf *rx_conf, 458 struct rte_mempool *mb_pool) 459 { 460 int rc = 0; 461 struct pfe *pfe; 462 struct pfe_eth_priv_s *priv = dev->data->dev_private; 463 464 pfe = priv->pfe; 465 466 if (queue_idx >= EMAC_RXQ_CNT) { 467 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 468 queue_idx, EMAC_RXQ_CNT); 469 return -1; 470 } 471 472 if (!pfe->hif.setuped) { 473 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); 474 if (rc) { 475 PFE_PMD_ERR("Could not allocate buffer descriptors"); 476 return -1; 477 } 478 479 pfe->hif.shm->pool = mb_pool; 480 if (pfe_hif_init_buffers(&pfe->hif)) { 481 PFE_PMD_ERR("Could not initialize buffer descriptors"); 482 return -1; 483 } 484 hif_init(); 485 hif_rx_enable(); 486 hif_tx_enable(); 487 pfe->hif.setuped = 1; 488 } 489 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; 490 priv->client.rx_q[queue_idx].queue_id = queue_idx; 491 492 return 0; 493 } 494 495 static void 496 pfe_rx_queue_release(void *q __rte_unused) 497 { 498 PMD_INIT_FUNC_TRACE(); 499 } 500 501 static void 502 pfe_tx_queue_release(void *q __rte_unused) 503 { 504 PMD_INIT_FUNC_TRACE(); 505 } 506 507 static int 508 pfe_tx_queue_setup(struct rte_eth_dev *dev, 509 uint16_t queue_idx, 510 __rte_unused uint16_t nb_desc, 511 __rte_unused unsigned int socket_id, 512 __rte_unused const struct rte_eth_txconf *tx_conf) 513 { 514 struct pfe_eth_priv_s *priv = dev->data->dev_private; 515 516 if (queue_idx >= emac_txq_cnt) { 517 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", 518 queue_idx, emac_txq_cnt); 519 return -1; 520 } 521 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; 522 priv->client.tx_q[queue_idx].queue_id = queue_idx; 523 return 0; 524 } 525 526 static const uint32_t * 527 pfe_supported_ptypes_get(struct rte_eth_dev *dev) 528 { 529 static const uint32_t ptypes[] = { 530 /*todo -= add more types */ 531 RTE_PTYPE_L2_ETHER, 532 RTE_PTYPE_L3_IPV4, 533 RTE_PTYPE_L3_IPV4_EXT, 534 RTE_PTYPE_L3_IPV6, 535 RTE_PTYPE_L3_IPV6_EXT, 536 RTE_PTYPE_L4_TCP, 537 RTE_PTYPE_L4_UDP, 538 RTE_PTYPE_L4_SCTP 539 }; 540 541 if (dev->rx_pkt_burst == pfe_recv_pkts || 542 dev->rx_pkt_burst == pfe_recv_pkts_on_intr) 543 return ptypes; 544 return NULL; 545 } 546 547 static inline int 548 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev, 549 struct rte_eth_link *link) 550 { 551 struct rte_eth_link *dst = link; 552 struct rte_eth_link *src = &dev->data->dev_link; 553 554 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 555 *(uint64_t *)src) == 0) 556 return -1; 557 558 return 0; 559 } 560 561 static inline int 562 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev, 563 struct rte_eth_link *link) 564 { 565 struct rte_eth_link *dst = &dev->data->dev_link; 566 struct rte_eth_link *src = link; 567 568 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 569 *(uint64_t *)src) == 0) 570 return -1; 571 572 return 0; 573 } 574 575 static int 576 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 577 { 578 int ret, ioctl_cmd = 0; 579 struct pfe_eth_priv_s *priv = dev->data->dev_private; 580 struct rte_eth_link link, old; 581 unsigned int lstatus = 1; 582 583 if (dev == NULL) { 584 PFE_PMD_ERR("Invalid device in link_update.\n"); 585 return 0; 586 } 587 588 memset(&old, 0, sizeof(old)); 589 memset(&link, 0, sizeof(struct rte_eth_link)); 590 591 pfe_eth_atomic_read_link_status(dev, &old); 592 593 /* Read from PFE CDEV, status of link, if file was successfully 594 * opened. 595 */ 596 if (priv->link_fd != PFE_CDEV_INVALID_FD) { 597 if (priv->id == 0) 598 ioctl_cmd = PFE_CDEV_ETH0_STATE_GET; 599 if (priv->id == 1) 600 ioctl_cmd = PFE_CDEV_ETH1_STATE_GET; 601 602 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); 603 if (ret != 0) { 604 PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); 605 /* use dummy link value */ 606 link.link_status = 1; 607 } 608 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", 609 lstatus, priv->id); 610 } 611 612 if (old.link_status == lstatus) { 613 /* no change in status */ 614 PFE_PMD_DEBUG("No change in link status; Not updating.\n"); 615 return -1; 616 } 617 618 link.link_status = lstatus; 619 link.link_speed = ETH_LINK_SPEED_1G; 620 link.link_duplex = ETH_LINK_FULL_DUPLEX; 621 link.link_autoneg = ETH_LINK_AUTONEG; 622 623 pfe_eth_atomic_write_link_status(dev, &link); 624 625 PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, 626 link.link_status ? "up" : "down"); 627 628 return 0; 629 } 630 631 static int 632 pfe_promiscuous_enable(struct rte_eth_dev *dev) 633 { 634 struct pfe_eth_priv_s *priv = dev->data->dev_private; 635 636 priv->promisc = 1; 637 dev->data->promiscuous = 1; 638 gemac_enable_copy_all(priv->EMAC_baseaddr); 639 640 return 0; 641 } 642 643 static int 644 pfe_promiscuous_disable(struct rte_eth_dev *dev) 645 { 646 struct pfe_eth_priv_s *priv = dev->data->dev_private; 647 648 priv->promisc = 0; 649 dev->data->promiscuous = 0; 650 gemac_disable_copy_all(priv->EMAC_baseaddr); 651 652 return 0; 653 } 654 655 static int 656 pfe_allmulticast_enable(struct rte_eth_dev *dev) 657 { 658 struct pfe_eth_priv_s *priv = dev->data->dev_private; 659 struct pfe_mac_addr hash_addr; /* hash register structure */ 660 661 /* Set the hash to rx all multicast frames */ 662 hash_addr.bottom = 0xFFFFFFFF; 663 hash_addr.top = 0xFFFFFFFF; 664 gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); 665 dev->data->all_multicast = 1; 666 667 return 0; 668 } 669 670 static int 671 pfe_link_down(struct rte_eth_dev *dev) 672 { 673 pfe_eth_stop(dev); 674 return 0; 675 } 676 677 static int 678 pfe_link_up(struct rte_eth_dev *dev) 679 { 680 struct pfe_eth_priv_s *priv = dev->data->dev_private; 681 682 pfe_eth_start(priv); 683 return 0; 684 } 685 686 static int 687 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 688 { 689 int ret; 690 struct pfe_eth_priv_s *priv = dev->data->dev_private; 691 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 692 693 /*TODO Support VLAN*/ 694 ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size); 695 if (!ret) 696 dev->data->mtu = mtu; 697 698 return ret; 699 } 700 701 /* pfe_eth_enet_addr_byte_mac 702 */ 703 static int 704 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr, 705 struct pfe_mac_addr *enet_addr) 706 { 707 if (!enet_byte_addr || !enet_addr) { 708 return -1; 709 710 } else { 711 enet_addr->bottom = enet_byte_addr[0] | 712 (enet_byte_addr[1] << 8) | 713 (enet_byte_addr[2] << 16) | 714 (enet_byte_addr[3] << 24); 715 enet_addr->top = enet_byte_addr[4] | 716 (enet_byte_addr[5] << 8); 717 return 0; 718 } 719 } 720 721 static int 722 pfe_dev_set_mac_addr(struct rte_eth_dev *dev, 723 struct rte_ether_addr *addr) 724 { 725 struct pfe_eth_priv_s *priv = dev->data->dev_private; 726 struct pfe_mac_addr spec_addr; 727 int ret; 728 729 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr); 730 if (ret) 731 return ret; 732 733 gemac_set_laddrN(priv->EMAC_baseaddr, 734 (struct pfe_mac_addr *)&spec_addr, 1); 735 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); 736 return 0; 737 } 738 739 static int 740 pfe_stats_get(struct rte_eth_dev *dev, 741 struct rte_eth_stats *stats) 742 { 743 struct pfe_eth_priv_s *priv = dev->data->dev_private; 744 struct rte_eth_stats *eth_stats = &priv->stats; 745 746 if (stats == NULL) 747 return -1; 748 749 memset(stats, 0, sizeof(struct rte_eth_stats)); 750 751 stats->ipackets = eth_stats->ipackets; 752 stats->ibytes = eth_stats->ibytes; 753 stats->opackets = eth_stats->opackets; 754 stats->obytes = eth_stats->obytes; 755 756 return 0; 757 } 758 759 static const struct eth_dev_ops ops = { 760 .dev_start = pfe_eth_open, 761 .dev_stop = pfe_eth_stop, 762 .dev_close = pfe_eth_close, 763 .dev_configure = pfe_eth_configure, 764 .dev_infos_get = pfe_eth_info, 765 .rx_queue_setup = pfe_rx_queue_setup, 766 .rx_queue_release = pfe_rx_queue_release, 767 .tx_queue_setup = pfe_tx_queue_setup, 768 .tx_queue_release = pfe_tx_queue_release, 769 .dev_supported_ptypes_get = pfe_supported_ptypes_get, 770 .link_update = pfe_eth_link_update, 771 .promiscuous_enable = pfe_promiscuous_enable, 772 .promiscuous_disable = pfe_promiscuous_disable, 773 .allmulticast_enable = pfe_allmulticast_enable, 774 .dev_set_link_down = pfe_link_down, 775 .dev_set_link_up = pfe_link_up, 776 .mtu_set = pfe_mtu_set, 777 .mac_addr_set = pfe_dev_set_mac_addr, 778 .stats_get = pfe_stats_get, 779 }; 780 781 static int 782 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) 783 { 784 struct rte_eth_dev *eth_dev = NULL; 785 struct pfe_eth_priv_s *priv = NULL; 786 struct ls1012a_eth_platform_data *einfo; 787 struct ls1012a_pfe_platform_data *pfe_info; 788 struct rte_ether_addr addr; 789 int err; 790 791 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); 792 if (eth_dev == NULL) 793 return -ENOMEM; 794 795 /* Extract pltform data */ 796 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data; 797 if (!pfe_info) { 798 PFE_PMD_ERR("pfe missing additional platform data"); 799 err = -ENODEV; 800 goto err0; 801 } 802 803 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata; 804 805 /* einfo never be NULL, but no harm in having this check */ 806 if (!einfo) { 807 PFE_PMD_ERR("pfe missing additional gemacs platform data"); 808 err = -ENODEV; 809 goto err0; 810 } 811 812 priv = eth_dev->data->dev_private; 813 priv->ndev = eth_dev; 814 priv->id = einfo[id].gem_id; 815 priv->pfe = pfe; 816 817 pfe->eth.eth_priv[id] = priv; 818 819 /* Set the info in the priv to the current info */ 820 priv->einfo = &einfo[id]; 821 priv->EMAC_baseaddr = cbus_emac_base[id]; 822 priv->PHY_baseaddr = cbus_emac_base[id]; 823 priv->GPI_baseaddr = cbus_gpi_base[id]; 824 825 #define HIF_GEMAC_TMUQ_BASE 6 826 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2); 827 priv->high_tmu_q = priv->low_tmu_q + 1; 828 829 rte_spinlock_init(&priv->lock); 830 831 /* Copy the station address into the dev structure, */ 832 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", 833 ETHER_ADDR_LEN * PFE_MAX_MACS, 0); 834 if (eth_dev->data->mac_addrs == NULL) { 835 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses", 836 ETHER_ADDR_LEN * PFE_MAX_MACS); 837 err = -ENOMEM; 838 goto err0; 839 } 840 841 memcpy(addr.addr_bytes, priv->einfo->mac_addr, 842 ETH_ALEN); 843 844 pfe_dev_set_mac_addr(eth_dev, &addr); 845 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]); 846 847 eth_dev->data->mtu = 1500; 848 eth_dev->dev_ops = &ops; 849 pfe_eth_stop(eth_dev); 850 pfe_gemac_init(priv); 851 852 eth_dev->data->nb_rx_queues = 1; 853 eth_dev->data->nb_tx_queues = 1; 854 855 /* For link status, open the PFE CDEV; Error from this function 856 * is silently ignored; In case of error, the link status will not 857 * be available. 858 */ 859 pfe_eth_open_cdev(priv); 860 rte_eth_dev_probing_finish(eth_dev); 861 862 return 0; 863 err0: 864 rte_eth_dev_release_port(eth_dev); 865 return err; 866 } 867 868 static int 869 pfe_get_gemac_if_proprties(struct pfe *pfe, 870 __rte_unused const struct device_node *parent, 871 unsigned int port, unsigned int if_cnt, 872 struct ls1012a_pfe_platform_data *pdata) 873 { 874 const struct device_node *gem = NULL; 875 size_t size; 876 unsigned int ii = 0, phy_id = 0; 877 const u32 *addr; 878 const void *mac_addr; 879 880 for (ii = 0; ii < if_cnt; ii++) { 881 gem = of_get_next_child(parent, gem); 882 if (!gem) 883 goto err; 884 addr = of_get_property(gem, "reg", &size); 885 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port)) 886 break; 887 } 888 889 if (ii >= if_cnt) { 890 PFE_PMD_ERR("Failed to find interface = %d", if_cnt); 891 goto err; 892 } 893 894 pdata->ls1012a_eth_pdata[port].gem_id = port; 895 896 mac_addr = of_get_mac_address(gem); 897 898 if (mac_addr) { 899 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr, 900 ETH_ALEN); 901 } 902 903 addr = of_get_property(gem, "fsl,mdio-mux-val", &size); 904 if (!addr) { 905 PFE_PMD_ERR("Invalid mdio-mux-val...."); 906 } else { 907 phy_id = rte_be_to_cpu_32((unsigned int)*addr); 908 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id; 909 } 910 if (pdata->ls1012a_eth_pdata[port].phy_id < 32) 911 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] = 912 pdata->ls1012a_eth_pdata[port].mdio_muxval; 913 914 return 0; 915 916 err: 917 return -1; 918 } 919 920 /* Parse integer from integer argument */ 921 static int 922 parse_integer_arg(const char *key __rte_unused, 923 const char *value, void *extra_args) 924 { 925 int i; 926 char *end; 927 errno = 0; 928 929 i = strtol(value, &end, 10); 930 if (*end != 0 || errno != 0 || i < 0 || i > 1) { 931 PFE_PMD_ERR("Supported Port IDS are 0 and 1"); 932 return -EINVAL; 933 } 934 935 *((uint32_t *)extra_args) = i; 936 937 return 0; 938 } 939 940 static int 941 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params, 942 struct rte_vdev_device *dev) 943 { 944 struct rte_kvargs *kvlist = NULL; 945 int ret = 0; 946 947 static const char * const pfe_vdev_valid_params[] = { 948 PFE_VDEV_GEM_ID_ARG, 949 NULL 950 }; 951 952 const char *input_args = rte_vdev_device_args(dev); 953 954 if (!input_args) 955 return -1; 956 957 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params); 958 if (kvlist == NULL) 959 return -1; 960 961 ret = rte_kvargs_process(kvlist, 962 PFE_VDEV_GEM_ID_ARG, 963 &parse_integer_arg, 964 ¶ms->gem_id); 965 rte_kvargs_free(kvlist); 966 return ret; 967 } 968 969 static int 970 pmd_pfe_probe(struct rte_vdev_device *vdev) 971 { 972 const u32 *prop; 973 const struct device_node *np; 974 const char *name; 975 const uint32_t *addr; 976 uint64_t cbus_addr, ddr_size, cbus_size; 977 int rc = -1, fd = -1, gem_id; 978 unsigned int ii, interface_count = 0; 979 size_t size = 0; 980 struct pfe_vdev_init_params init_params = { 981 .gem_id = -1 982 }; 983 984 name = rte_vdev_device_name(vdev); 985 rc = pfe_parse_vdev_init_params(&init_params, vdev); 986 if (rc < 0) 987 return -EINVAL; 988 989 PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d", 990 name, init_params.gem_id); 991 992 if (g_pfe) { 993 if (g_pfe->nb_devs >= g_pfe->max_intf) { 994 PFE_PMD_ERR("PFE %d dev already created Max is %d", 995 g_pfe->nb_devs, g_pfe->max_intf); 996 return -EINVAL; 997 } 998 goto eth_init; 999 } 1000 1001 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE); 1002 if (g_pfe == NULL) 1003 return -EINVAL; 1004 1005 /* Load the device-tree driver */ 1006 rc = of_init(); 1007 if (rc) { 1008 PFE_PMD_ERR("of_init failed with ret: %d", rc); 1009 goto err; 1010 } 1011 1012 np = of_find_compatible_node(NULL, NULL, "fsl,pfe"); 1013 if (!np) { 1014 PFE_PMD_ERR("Invalid device node"); 1015 rc = -EINVAL; 1016 goto err; 1017 } 1018 1019 addr = of_get_address(np, 0, &cbus_size, NULL); 1020 if (!addr) { 1021 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1022 goto err; 1023 } 1024 cbus_addr = of_translate_address(np, addr); 1025 if (!cbus_addr) { 1026 PFE_PMD_ERR("of_translate_address failed\n"); 1027 goto err; 1028 } 1029 1030 addr = of_get_address(np, 1, &ddr_size, NULL); 1031 if (!addr) { 1032 PFE_PMD_ERR("of_get_address cannot return qman address\n"); 1033 goto err; 1034 } 1035 1036 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr); 1037 if (!g_pfe->ddr_phys_baseaddr) { 1038 PFE_PMD_ERR("of_translate_address failed\n"); 1039 goto err; 1040 } 1041 1042 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr); 1043 g_pfe->ddr_size = ddr_size; 1044 g_pfe->cbus_size = cbus_size; 1045 1046 fd = open("/dev/mem", O_RDWR); 1047 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE, 1048 MAP_SHARED, fd, cbus_addr); 1049 close(fd); 1050 if (g_pfe->cbus_baseaddr == MAP_FAILED) { 1051 PFE_PMD_ERR("Can not map cbus base"); 1052 rc = -EINVAL; 1053 goto err; 1054 } 1055 1056 /* Read interface count */ 1057 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size); 1058 if (!prop) { 1059 PFE_PMD_ERR("Failed to read number of interfaces"); 1060 rc = -ENXIO; 1061 goto err_prop; 1062 } 1063 1064 interface_count = rte_be_to_cpu_32((unsigned int)*prop); 1065 if (interface_count <= 0) { 1066 PFE_PMD_ERR("No ethernet interface count : %d", 1067 interface_count); 1068 rc = -ENXIO; 1069 goto err_prop; 1070 } 1071 PFE_PMD_INFO("num interfaces = %d ", interface_count); 1072 1073 g_pfe->max_intf = interface_count; 1074 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff; 1075 1076 for (ii = 0; ii < interface_count; ii++) { 1077 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count, 1078 &g_pfe->platform_data); 1079 } 1080 1081 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr, 1082 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size); 1083 1084 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION)); 1085 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION)); 1086 1087 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION)); 1088 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION)); 1089 1090 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION)); 1091 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION)); 1092 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION)); 1093 1094 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION)); 1095 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION)); 1096 1097 cbus_emac_base[0] = EMAC1_BASE_ADDR; 1098 cbus_emac_base[1] = EMAC2_BASE_ADDR; 1099 1100 cbus_gpi_base[0] = EGPI1_BASE_ADDR; 1101 cbus_gpi_base[1] = EGPI2_BASE_ADDR; 1102 1103 rc = pfe_hif_lib_init(g_pfe); 1104 if (rc < 0) 1105 goto err_hif_lib; 1106 1107 rc = pfe_hif_init(g_pfe); 1108 if (rc < 0) 1109 goto err_hif; 1110 pfe_soc_version_get(); 1111 eth_init: 1112 if (init_params.gem_id < 0) 1113 gem_id = g_pfe->nb_devs; 1114 else 1115 gem_id = init_params.gem_id; 1116 1117 PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)", 1118 name, gem_id, init_params.gem_id); 1119 1120 rc = pfe_eth_init(vdev, g_pfe, gem_id); 1121 if (rc < 0) 1122 goto err_eth; 1123 else 1124 g_pfe->nb_devs++; 1125 1126 return 0; 1127 1128 err_eth: 1129 pfe_hif_exit(g_pfe); 1130 1131 err_hif: 1132 pfe_hif_lib_exit(g_pfe); 1133 1134 err_hif_lib: 1135 err_prop: 1136 munmap(g_pfe->cbus_baseaddr, cbus_size); 1137 err: 1138 rte_free(g_pfe); 1139 return rc; 1140 } 1141 1142 static int 1143 pmd_pfe_remove(struct rte_vdev_device *vdev) 1144 { 1145 const char *name; 1146 struct rte_eth_dev *eth_dev = NULL; 1147 1148 name = rte_vdev_device_name(vdev); 1149 if (name == NULL) 1150 return -EINVAL; 1151 1152 PFE_PMD_INFO("Closing eventdev sw device %s", name); 1153 1154 if (!g_pfe) 1155 return 0; 1156 1157 eth_dev = rte_eth_dev_allocated(name); 1158 if (eth_dev == NULL) 1159 return -ENODEV; 1160 1161 pfe_eth_exit(eth_dev, g_pfe); 1162 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size); 1163 1164 if (g_pfe->nb_devs == 0) { 1165 pfe_hif_exit(g_pfe); 1166 pfe_hif_lib_exit(g_pfe); 1167 rte_free(g_pfe); 1168 g_pfe = NULL; 1169 } 1170 return 0; 1171 } 1172 1173 static 1174 struct rte_vdev_driver pmd_pfe_drv = { 1175 .probe = pmd_pfe_probe, 1176 .remove = pmd_pfe_remove, 1177 }; 1178 1179 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv); 1180 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> "); 1181 1182 RTE_INIT(pfe_pmd_init_log) 1183 { 1184 pfe_logtype_pmd = rte_log_register("pmd.net.pfe"); 1185 if (pfe_logtype_pmd >= 0) 1186 rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE); 1187 } 1188