1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
3 */
4
5 #include <sys/ioctl.h>
6 #include <sys/epoll.h>
7 #include <rte_kvargs.h>
8 #include <ethdev_vdev.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_ether.h>
11 #include <dpaa_of.h>
12
13 #include "pfe_logs.h"
14 #include "pfe_mod.h"
15
16 #define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */
17 #define PFE_VDEV_GEM_ID_ARG "intf"
18
19 struct pfe_vdev_init_params {
20 int8_t gem_id;
21 };
22 static struct pfe *g_pfe;
23 /* Supported Rx offloads */
24 static uint64_t dev_rx_offloads_sup =
25 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
26 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
27 RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
28
29 /* Supported Tx offloads */
30 static uint64_t dev_tx_offloads_sup =
31 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
32 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
33 RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
34
35 /* TODO: make pfe_svr a runtime option.
36 * Driver should be able to get the SVR
37 * information from HW.
38 */
39 unsigned int pfe_svr = SVR_LS1012A_REV1;
40 static void *cbus_emac_base[3];
41 static void *cbus_gpi_base[3];
42
43 /* pfe_gemac_init
44 */
45 static int
pfe_gemac_init(struct pfe_eth_priv_s * priv)46 pfe_gemac_init(struct pfe_eth_priv_s *priv)
47 {
48 struct gemac_cfg cfg;
49
50 cfg.speed = SPEED_1000M;
51 cfg.duplex = DUPLEX_FULL;
52
53 gemac_set_config(priv->EMAC_baseaddr, &cfg);
54 gemac_allow_broadcast(priv->EMAC_baseaddr);
55 gemac_enable_1536_rx(priv->EMAC_baseaddr);
56 gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
57 gemac_enable_pause_rx(priv->EMAC_baseaddr);
58 gemac_set_bus_width(priv->EMAC_baseaddr, 64);
59 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
60
61 return 0;
62 }
63
64 static void
pfe_soc_version_get(void)65 pfe_soc_version_get(void)
66 {
67 FILE *svr_file = NULL;
68 unsigned int svr_ver = 0;
69
70 PMD_INIT_FUNC_TRACE();
71
72 svr_file = fopen(PFE_SOC_ID_FILE, "r");
73 if (!svr_file) {
74 PFE_PMD_ERR("Unable to open SoC device");
75 return; /* Not supported on this infra */
76 }
77
78 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
79 pfe_svr = svr_ver;
80 else
81 PFE_PMD_ERR("Unable to read SoC device");
82
83 fclose(svr_file);
84 }
85
pfe_eth_start(struct pfe_eth_priv_s * priv)86 static int pfe_eth_start(struct pfe_eth_priv_s *priv)
87 {
88 gpi_enable(priv->GPI_baseaddr);
89 gemac_enable(priv->EMAC_baseaddr);
90
91 return 0;
92 }
93
94 static void
pfe_eth_flush_txQ(struct pfe_eth_priv_s * priv,int tx_q_num,int __rte_unused from_tx,__rte_unused int n_desc)95 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
96 __rte_unused from_tx, __rte_unused int n_desc)
97 {
98 struct rte_mbuf *mbuf;
99 unsigned int flags;
100
101 /* Clean HIF and client queue */
102 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
103 tx_q_num, &flags,
104 HIF_TX_DESC_NT))) {
105 if (mbuf) {
106 mbuf->next = NULL;
107 mbuf->nb_segs = 1;
108 rte_pktmbuf_free(mbuf);
109 }
110 }
111 }
112
113
114 static void
pfe_eth_flush_tx(struct pfe_eth_priv_s * priv)115 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
116 {
117 unsigned int ii;
118
119 for (ii = 0; ii < emac_txq_cnt; ii++)
120 pfe_eth_flush_txQ(priv, ii, 0, 0);
121 }
122
123 static int
pfe_eth_event_handler(void * data,int event,__rte_unused int qno)124 pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
125 {
126 struct pfe_eth_priv_s *priv = data;
127
128 switch (event) {
129 case EVENT_TXDONE_IND:
130 pfe_eth_flush_tx(priv);
131 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
132 break;
133 case EVENT_HIGH_RX_WM:
134 default:
135 break;
136 }
137
138 return 0;
139 }
140
141 static uint16_t
pfe_recv_pkts_on_intr(void * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)142 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
143 {
144 struct hif_client_rx_queue *queue = rxq;
145 struct pfe_eth_priv_s *priv = queue->priv;
146 struct epoll_event epoll_ev;
147 uint64_t ticks = 1; /* 1 msec */
148 int ret;
149 int have_something, work_done;
150
151 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
152
153 /*TODO can we remove this cleanup from here?*/
154 pfe_tx_do_cleanup(priv->pfe);
155 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
156 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
157 rx_pkts, nb_pkts);
158
159 if (!have_something || !work_done) {
160 writel(RESET_STATUS, HIF_INT_SRC);
161 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
162 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
163 if (ret < 0 && errno != EINTR)
164 PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
165 }
166
167 return work_done;
168 }
169
170 static uint16_t
pfe_recv_pkts(void * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)171 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
172 {
173 struct hif_client_rx_queue *queue = rxq;
174 struct pfe_eth_priv_s *priv = queue->priv;
175 struct rte_mempool *pool;
176
177 /*TODO can we remove this cleanup from here?*/
178 pfe_tx_do_cleanup(priv->pfe);
179 pfe_hif_rx_process(priv->pfe, nb_pkts);
180 pool = priv->pfe->hif.shm->pool;
181
182 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
183 }
184
185 static uint16_t
pfe_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)186 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
187 {
188 struct hif_client_tx_queue *queue = tx_queue;
189 struct pfe_eth_priv_s *priv = queue->priv;
190 struct rte_eth_stats *stats = &priv->stats;
191 int i;
192
193 for (i = 0; i < nb_pkts; i++) {
194 if (tx_pkts[i]->nb_segs > 1) {
195 struct rte_mbuf *mbuf;
196 int j;
197
198 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
199 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
200 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
201 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
202 tx_pkts[i]);
203
204 mbuf = tx_pkts[i]->next;
205 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
206 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
207 (void *)(size_t)rte_pktmbuf_iova(mbuf),
208 mbuf->buf_addr + mbuf->data_off,
209 mbuf->data_len,
210 0x0, 0x0, mbuf);
211 mbuf = mbuf->next;
212 }
213
214 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
215 (void *)(size_t)rte_pktmbuf_iova(mbuf),
216 mbuf->buf_addr + mbuf->data_off,
217 mbuf->data_len,
218 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
219 mbuf);
220 } else {
221 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
222 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
223 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
224 tx_pkts[i]->pkt_len, 0 /*ctrl*/,
225 HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
226 HIF_DATA_VALID,
227 tx_pkts[i]);
228 }
229 stats->obytes += tx_pkts[i]->pkt_len;
230 hif_tx_dma_start();
231 }
232 stats->opackets += nb_pkts;
233 pfe_tx_do_cleanup(priv->pfe);
234
235 return nb_pkts;
236 }
237
238 static int
pfe_eth_open(struct rte_eth_dev * dev)239 pfe_eth_open(struct rte_eth_dev *dev)
240 {
241 struct pfe_eth_priv_s *priv = dev->data->dev_private;
242 struct hif_client_s *client;
243 struct hif_shm *hif_shm;
244 int rc;
245
246 /* Register client driver with HIF */
247 client = &priv->client;
248
249 if (client->pfe) {
250 hif_shm = client->pfe->hif.shm;
251 /* TODO please remove the below code of if block, once we add
252 * the proper cleanup in eth_close
253 */
254 if (!test_bit(PFE_CL_GEM0 + priv->id,
255 &hif_shm->g_client_status[0])) {
256 /* Register client driver with HIF */
257 memset(client, 0, sizeof(*client));
258 client->id = PFE_CL_GEM0 + priv->id;
259 client->tx_qn = emac_txq_cnt;
260 client->rx_qn = EMAC_RXQ_CNT;
261 client->priv = priv;
262 client->pfe = priv->pfe;
263 client->port_id = dev->data->port_id;
264 client->event_handler = pfe_eth_event_handler;
265
266 client->tx_qsize = EMAC_TXQ_DEPTH;
267 client->rx_qsize = EMAC_RXQ_DEPTH;
268
269 rc = hif_lib_client_register(client);
270 if (rc) {
271 PFE_PMD_ERR("hif_lib_client_register(%d)"
272 " failed", client->id);
273 goto err0;
274 }
275 } else {
276 /* Freeing the packets if already exists */
277 int ret = 0;
278 struct rte_mbuf *rx_pkts[32];
279 /* TODO multiqueue support */
280 ret = hif_lib_receive_pkt(&client->rx_q[0],
281 hif_shm->pool, rx_pkts, 32);
282 while (ret) {
283 int i;
284 for (i = 0; i < ret; i++)
285 rte_pktmbuf_free(rx_pkts[i]);
286 ret = hif_lib_receive_pkt(&client->rx_q[0],
287 hif_shm->pool,
288 rx_pkts, 32);
289 }
290 }
291 } else {
292 /* Register client driver with HIF */
293 memset(client, 0, sizeof(*client));
294 client->id = PFE_CL_GEM0 + priv->id;
295 client->tx_qn = emac_txq_cnt;
296 client->rx_qn = EMAC_RXQ_CNT;
297 client->priv = priv;
298 client->pfe = priv->pfe;
299 client->port_id = dev->data->port_id;
300 client->event_handler = pfe_eth_event_handler;
301
302 client->tx_qsize = EMAC_TXQ_DEPTH;
303 client->rx_qsize = EMAC_RXQ_DEPTH;
304
305 rc = hif_lib_client_register(client);
306 if (rc) {
307 PFE_PMD_ERR("hif_lib_client_register(%d) failed",
308 client->id);
309 goto err0;
310 }
311 }
312 rc = pfe_eth_start(priv);
313 dev->rx_pkt_burst = &pfe_recv_pkts;
314 dev->tx_pkt_burst = &pfe_xmit_pkts;
315 /* If no prefetch is configured. */
316 if (getenv("PFE_INTR_SUPPORT")) {
317 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
318 PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
319 }
320
321
322 err0:
323 return rc;
324 }
325
326 static int
pfe_eth_open_cdev(struct pfe_eth_priv_s * priv)327 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
328 {
329 int pfe_cdev_fd;
330
331 if (priv == NULL)
332 return -1;
333
334 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
335 if (pfe_cdev_fd < 0) {
336 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
337 PFE_CDEV_PATH);
338 PFE_PMD_WARN("Link status update will not be available.\n");
339 priv->link_fd = PFE_CDEV_INVALID_FD;
340 return -1;
341 }
342
343 priv->link_fd = pfe_cdev_fd;
344
345 return 0;
346 }
347
348 static void
pfe_eth_close_cdev(struct pfe_eth_priv_s * priv)349 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
350 {
351 if (priv == NULL)
352 return;
353
354 if (priv->link_fd != PFE_CDEV_INVALID_FD) {
355 close(priv->link_fd);
356 priv->link_fd = PFE_CDEV_INVALID_FD;
357 }
358 }
359
360 static int
pfe_eth_stop(struct rte_eth_dev * dev)361 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
362 {
363 struct pfe_eth_priv_s *priv = dev->data->dev_private;
364
365 dev->data->dev_started = 0;
366
367 gemac_disable(priv->EMAC_baseaddr);
368 gpi_disable(priv->GPI_baseaddr);
369
370 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
371 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
372
373 return 0;
374 }
375
376 static int
pfe_eth_close(struct rte_eth_dev * dev)377 pfe_eth_close(struct rte_eth_dev *dev)
378 {
379 int ret;
380 PMD_INIT_FUNC_TRACE();
381
382 if (!dev)
383 return -1;
384
385 if (!g_pfe)
386 return -1;
387
388 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
389 return 0;
390
391 ret = pfe_eth_stop(dev);
392 /* Close the device file for link status */
393 pfe_eth_close_cdev(dev->data->dev_private);
394
395 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
396 g_pfe->nb_devs--;
397
398 if (g_pfe->nb_devs == 0) {
399 pfe_hif_exit(g_pfe);
400 pfe_hif_lib_exit(g_pfe);
401 rte_free(g_pfe);
402 g_pfe = NULL;
403 }
404
405 return ret;
406 }
407
408 static int
pfe_eth_configure(struct rte_eth_dev * dev __rte_unused)409 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
410 {
411 return 0;
412 }
413
414 static int
pfe_eth_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)415 pfe_eth_info(struct rte_eth_dev *dev,
416 struct rte_eth_dev_info *dev_info)
417 {
418 dev_info->max_mac_addrs = PFE_MAX_MACS;
419 dev_info->max_rx_queues = dev->data->nb_rx_queues;
420 dev_info->max_tx_queues = dev->data->nb_tx_queues;
421 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
422 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
423 dev_info->rx_offload_capa = dev_rx_offloads_sup;
424 dev_info->tx_offload_capa = dev_tx_offloads_sup;
425 if (pfe_svr == SVR_LS1012A_REV1) {
426 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
427 dev_info->max_mtu = MAX_MTU_ON_REV1;
428 } else {
429 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
430 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
431 }
432
433 return 0;
434 }
435
436 /* Only first mb_pool given on first call of this API will be used
437 * in whole system, also nb_rx_desc and rx_conf are unused params
438 */
439 static int
pfe_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,__rte_unused uint16_t nb_rx_desc,__rte_unused unsigned int socket_id,__rte_unused const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)440 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
441 __rte_unused uint16_t nb_rx_desc,
442 __rte_unused unsigned int socket_id,
443 __rte_unused const struct rte_eth_rxconf *rx_conf,
444 struct rte_mempool *mb_pool)
445 {
446 int rc = 0;
447 struct pfe *pfe;
448 struct pfe_eth_priv_s *priv = dev->data->dev_private;
449
450 pfe = priv->pfe;
451
452 if (queue_idx >= EMAC_RXQ_CNT) {
453 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
454 queue_idx, EMAC_RXQ_CNT);
455 return -1;
456 }
457
458 if (!pfe->hif.setuped) {
459 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
460 if (rc) {
461 PFE_PMD_ERR("Could not allocate buffer descriptors");
462 return -1;
463 }
464
465 pfe->hif.shm->pool = mb_pool;
466 if (pfe_hif_init_buffers(&pfe->hif)) {
467 PFE_PMD_ERR("Could not initialize buffer descriptors");
468 return -1;
469 }
470 hif_init();
471 hif_rx_enable();
472 hif_tx_enable();
473 pfe->hif.setuped = 1;
474 }
475 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
476 priv->client.rx_q[queue_idx].queue_id = queue_idx;
477
478 return 0;
479 }
480
481 static int
pfe_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,__rte_unused uint16_t nb_desc,__rte_unused unsigned int socket_id,__rte_unused const struct rte_eth_txconf * tx_conf)482 pfe_tx_queue_setup(struct rte_eth_dev *dev,
483 uint16_t queue_idx,
484 __rte_unused uint16_t nb_desc,
485 __rte_unused unsigned int socket_id,
486 __rte_unused const struct rte_eth_txconf *tx_conf)
487 {
488 struct pfe_eth_priv_s *priv = dev->data->dev_private;
489
490 if (queue_idx >= emac_txq_cnt) {
491 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
492 queue_idx, emac_txq_cnt);
493 return -1;
494 }
495 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
496 priv->client.tx_q[queue_idx].queue_id = queue_idx;
497 return 0;
498 }
499
500 static const uint32_t *
pfe_supported_ptypes_get(struct rte_eth_dev * dev)501 pfe_supported_ptypes_get(struct rte_eth_dev *dev)
502 {
503 static const uint32_t ptypes[] = {
504 /*todo -= add more types */
505 RTE_PTYPE_L2_ETHER,
506 RTE_PTYPE_L3_IPV4,
507 RTE_PTYPE_L3_IPV4_EXT,
508 RTE_PTYPE_L3_IPV6,
509 RTE_PTYPE_L3_IPV6_EXT,
510 RTE_PTYPE_L4_TCP,
511 RTE_PTYPE_L4_UDP,
512 RTE_PTYPE_L4_SCTP
513 };
514
515 if (dev->rx_pkt_burst == pfe_recv_pkts ||
516 dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
517 return ptypes;
518 return NULL;
519 }
520
521 static inline int
pfe_eth_atomic_read_link_status(struct rte_eth_dev * dev,struct rte_eth_link * link)522 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev,
523 struct rte_eth_link *link)
524 {
525 struct rte_eth_link *dst = link;
526 struct rte_eth_link *src = &dev->data->dev_link;
527
528 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
529 *(uint64_t *)src) == 0)
530 return -1;
531
532 return 0;
533 }
534
535 static inline int
pfe_eth_atomic_write_link_status(struct rte_eth_dev * dev,struct rte_eth_link * link)536 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev,
537 struct rte_eth_link *link)
538 {
539 struct rte_eth_link *dst = &dev->data->dev_link;
540 struct rte_eth_link *src = link;
541
542 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
543 *(uint64_t *)src) == 0)
544 return -1;
545
546 return 0;
547 }
548
549 static int
pfe_eth_link_update(struct rte_eth_dev * dev,int wait_to_complete __rte_unused)550 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
551 {
552 int ret, ioctl_cmd = 0;
553 struct pfe_eth_priv_s *priv = dev->data->dev_private;
554 struct rte_eth_link link, old;
555 unsigned int lstatus = 1;
556
557 memset(&old, 0, sizeof(old));
558 memset(&link, 0, sizeof(struct rte_eth_link));
559
560 pfe_eth_atomic_read_link_status(dev, &old);
561
562 /* Read from PFE CDEV, status of link, if file was successfully
563 * opened.
564 */
565 if (priv->link_fd != PFE_CDEV_INVALID_FD) {
566 if (priv->id == 0)
567 ioctl_cmd = PFE_CDEV_ETH0_STATE_GET;
568 if (priv->id == 1)
569 ioctl_cmd = PFE_CDEV_ETH1_STATE_GET;
570
571 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
572 if (ret != 0) {
573 PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
574 return -1;
575 }
576 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
577 lstatus, priv->id);
578 }
579
580 if (old.link_status == lstatus) {
581 /* no change in status */
582 PFE_PMD_DEBUG("No change in link status; Not updating.\n");
583 return -1;
584 }
585
586 link.link_status = lstatus;
587 link.link_speed = RTE_ETH_LINK_SPEED_1G;
588 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
589 link.link_autoneg = RTE_ETH_LINK_AUTONEG;
590
591 pfe_eth_atomic_write_link_status(dev, &link);
592
593 PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
594 link.link_status ? "up" : "down");
595
596 return 0;
597 }
598
599 static int
pfe_promiscuous_enable(struct rte_eth_dev * dev)600 pfe_promiscuous_enable(struct rte_eth_dev *dev)
601 {
602 struct pfe_eth_priv_s *priv = dev->data->dev_private;
603
604 priv->promisc = 1;
605 dev->data->promiscuous = 1;
606 gemac_enable_copy_all(priv->EMAC_baseaddr);
607
608 return 0;
609 }
610
611 static int
pfe_promiscuous_disable(struct rte_eth_dev * dev)612 pfe_promiscuous_disable(struct rte_eth_dev *dev)
613 {
614 struct pfe_eth_priv_s *priv = dev->data->dev_private;
615
616 priv->promisc = 0;
617 dev->data->promiscuous = 0;
618 gemac_disable_copy_all(priv->EMAC_baseaddr);
619
620 return 0;
621 }
622
623 static int
pfe_allmulticast_enable(struct rte_eth_dev * dev)624 pfe_allmulticast_enable(struct rte_eth_dev *dev)
625 {
626 struct pfe_eth_priv_s *priv = dev->data->dev_private;
627 struct pfe_mac_addr hash_addr; /* hash register structure */
628
629 /* Set the hash to rx all multicast frames */
630 hash_addr.bottom = 0xFFFFFFFF;
631 hash_addr.top = 0xFFFFFFFF;
632 gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
633 dev->data->all_multicast = 1;
634
635 return 0;
636 }
637
638 static int
pfe_link_down(struct rte_eth_dev * dev)639 pfe_link_down(struct rte_eth_dev *dev)
640 {
641 return pfe_eth_stop(dev);
642 }
643
644 static int
pfe_link_up(struct rte_eth_dev * dev)645 pfe_link_up(struct rte_eth_dev *dev)
646 {
647 struct pfe_eth_priv_s *priv = dev->data->dev_private;
648
649 pfe_eth_start(priv);
650 return 0;
651 }
652
653 static int
pfe_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)654 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
655 {
656 struct pfe_eth_priv_s *priv = dev->data->dev_private;
657 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
658
659 /*TODO Support VLAN*/
660 return gemac_set_rx(priv->EMAC_baseaddr, frame_size);
661 }
662
663 /* pfe_eth_enet_addr_byte_mac
664 */
665 static int
pfe_eth_enet_addr_byte_mac(u8 * enet_byte_addr,struct pfe_mac_addr * enet_addr)666 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
667 struct pfe_mac_addr *enet_addr)
668 {
669 if (!enet_byte_addr || !enet_addr) {
670 return -1;
671
672 } else {
673 enet_addr->bottom = enet_byte_addr[0] |
674 (enet_byte_addr[1] << 8) |
675 (enet_byte_addr[2] << 16) |
676 (enet_byte_addr[3] << 24);
677 enet_addr->top = enet_byte_addr[4] |
678 (enet_byte_addr[5] << 8);
679 return 0;
680 }
681 }
682
683 static int
pfe_dev_set_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)684 pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
685 struct rte_ether_addr *addr)
686 {
687 struct pfe_eth_priv_s *priv = dev->data->dev_private;
688 struct pfe_mac_addr spec_addr;
689 int ret;
690
691 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
692 if (ret)
693 return ret;
694
695 gemac_set_laddrN(priv->EMAC_baseaddr,
696 (struct pfe_mac_addr *)&spec_addr, 1);
697 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
698 return 0;
699 }
700
701 static int
pfe_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)702 pfe_stats_get(struct rte_eth_dev *dev,
703 struct rte_eth_stats *stats)
704 {
705 struct pfe_eth_priv_s *priv = dev->data->dev_private;
706 struct rte_eth_stats *eth_stats = &priv->stats;
707
708 if (stats == NULL)
709 return -1;
710
711 memset(stats, 0, sizeof(struct rte_eth_stats));
712
713 stats->ipackets = eth_stats->ipackets;
714 stats->ibytes = eth_stats->ibytes;
715 stats->opackets = eth_stats->opackets;
716 stats->obytes = eth_stats->obytes;
717
718 return 0;
719 }
720
721 static const struct eth_dev_ops ops = {
722 .dev_start = pfe_eth_open,
723 .dev_stop = pfe_eth_stop,
724 .dev_close = pfe_eth_close,
725 .dev_configure = pfe_eth_configure,
726 .dev_infos_get = pfe_eth_info,
727 .rx_queue_setup = pfe_rx_queue_setup,
728 .tx_queue_setup = pfe_tx_queue_setup,
729 .dev_supported_ptypes_get = pfe_supported_ptypes_get,
730 .link_update = pfe_eth_link_update,
731 .promiscuous_enable = pfe_promiscuous_enable,
732 .promiscuous_disable = pfe_promiscuous_disable,
733 .allmulticast_enable = pfe_allmulticast_enable,
734 .dev_set_link_down = pfe_link_down,
735 .dev_set_link_up = pfe_link_up,
736 .mtu_set = pfe_mtu_set,
737 .mac_addr_set = pfe_dev_set_mac_addr,
738 .stats_get = pfe_stats_get,
739 };
740
741 static int
pfe_eth_init(struct rte_vdev_device * vdev,struct pfe * pfe,int id)742 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
743 {
744 struct rte_eth_dev *eth_dev = NULL;
745 struct pfe_eth_priv_s *priv = NULL;
746 struct ls1012a_eth_platform_data *einfo;
747 struct ls1012a_pfe_platform_data *pfe_info;
748 struct rte_ether_addr addr;
749 int err;
750
751 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
752 if (eth_dev == NULL)
753 return -ENOMEM;
754
755 /* Extract platform data */
756 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
757 if (!pfe_info) {
758 PFE_PMD_ERR("pfe missing additional platform data");
759 err = -ENODEV;
760 goto err0;
761 }
762
763 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
764
765 /* einfo never be NULL, but no harm in having this check */
766 if (!einfo) {
767 PFE_PMD_ERR("pfe missing additional gemacs platform data");
768 err = -ENODEV;
769 goto err0;
770 }
771
772 priv = eth_dev->data->dev_private;
773 priv->ndev = eth_dev;
774 priv->id = einfo[id].gem_id;
775 priv->pfe = pfe;
776
777 pfe->eth.eth_priv[id] = priv;
778
779 /* Set the info in the priv to the current info */
780 priv->einfo = &einfo[id];
781 priv->EMAC_baseaddr = cbus_emac_base[id];
782 priv->PHY_baseaddr = cbus_emac_base[id];
783 priv->GPI_baseaddr = cbus_gpi_base[id];
784
785 #define HIF_GEMAC_TMUQ_BASE 6
786 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
787 priv->high_tmu_q = priv->low_tmu_q + 1;
788
789 rte_spinlock_init(&priv->lock);
790
791 /* Copy the station address into the dev structure, */
792 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
793 ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
794 if (eth_dev->data->mac_addrs == NULL) {
795 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
796 ETHER_ADDR_LEN * PFE_MAX_MACS);
797 err = -ENOMEM;
798 goto err0;
799 }
800
801 memcpy(addr.addr_bytes, priv->einfo->mac_addr,
802 ETH_ALEN);
803
804 pfe_dev_set_mac_addr(eth_dev, &addr);
805 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]);
806
807 eth_dev->data->mtu = 1500;
808 eth_dev->dev_ops = &ops;
809 err = pfe_eth_stop(eth_dev);
810 if (err != 0)
811 goto err0;
812 pfe_gemac_init(priv);
813
814 eth_dev->data->nb_rx_queues = 1;
815 eth_dev->data->nb_tx_queues = 1;
816
817 /* For link status, open the PFE CDEV; Error from this function
818 * is silently ignored; In case of error, the link status will not
819 * be available.
820 */
821 pfe_eth_open_cdev(priv);
822 rte_eth_dev_probing_finish(eth_dev);
823
824 return 0;
825 err0:
826 rte_eth_dev_release_port(eth_dev);
827 return err;
828 }
829
830 static int
pfe_get_gemac_if_proprties(struct pfe * pfe,__rte_unused const struct device_node * parent,unsigned int port,unsigned int if_cnt,struct ls1012a_pfe_platform_data * pdata)831 pfe_get_gemac_if_proprties(struct pfe *pfe,
832 __rte_unused const struct device_node *parent,
833 unsigned int port, unsigned int if_cnt,
834 struct ls1012a_pfe_platform_data *pdata)
835 {
836 const struct device_node *gem = NULL;
837 size_t size;
838 unsigned int ii = 0, phy_id = 0;
839 const u32 *addr;
840 const void *mac_addr;
841
842 for (ii = 0; ii < if_cnt; ii++) {
843 gem = of_get_next_child(parent, gem);
844 if (!gem)
845 goto err;
846 addr = of_get_property(gem, "reg", &size);
847 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
848 break;
849 }
850
851 if (ii >= if_cnt) {
852 PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
853 goto err;
854 }
855
856 pdata->ls1012a_eth_pdata[port].gem_id = port;
857
858 mac_addr = of_get_mac_address(gem);
859
860 if (mac_addr) {
861 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
862 ETH_ALEN);
863 }
864
865 addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
866 if (!addr) {
867 PFE_PMD_ERR("Invalid mdio-mux-val....");
868 } else {
869 phy_id = rte_be_to_cpu_32((unsigned int)*addr);
870 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
871 }
872 if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
873 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
874 pdata->ls1012a_eth_pdata[port].mdio_muxval;
875
876 return 0;
877
878 err:
879 return -1;
880 }
881
882 /* Parse integer from integer argument */
883 static int
parse_integer_arg(const char * key __rte_unused,const char * value,void * extra_args)884 parse_integer_arg(const char *key __rte_unused,
885 const char *value, void *extra_args)
886 {
887 int i;
888 char *end;
889 errno = 0;
890
891 i = strtol(value, &end, 10);
892 if (*end != 0 || errno != 0 || i < 0 || i > 1) {
893 PFE_PMD_ERR("Supported Port IDS are 0 and 1");
894 return -EINVAL;
895 }
896
897 *((uint32_t *)extra_args) = i;
898
899 return 0;
900 }
901
902 static int
pfe_parse_vdev_init_params(struct pfe_vdev_init_params * params,struct rte_vdev_device * dev)903 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
904 struct rte_vdev_device *dev)
905 {
906 struct rte_kvargs *kvlist = NULL;
907 int ret = 0;
908
909 static const char * const pfe_vdev_valid_params[] = {
910 PFE_VDEV_GEM_ID_ARG,
911 NULL
912 };
913
914 const char *input_args = rte_vdev_device_args(dev);
915
916 if (!input_args)
917 return -1;
918
919 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
920 if (kvlist == NULL)
921 return -1;
922
923 ret = rte_kvargs_process(kvlist,
924 PFE_VDEV_GEM_ID_ARG,
925 &parse_integer_arg,
926 ¶ms->gem_id);
927 rte_kvargs_free(kvlist);
928 return ret;
929 }
930
931 static int
pmd_pfe_probe(struct rte_vdev_device * vdev)932 pmd_pfe_probe(struct rte_vdev_device *vdev)
933 {
934 const u32 *prop;
935 const struct device_node *np;
936 const char *name;
937 const uint32_t *addr;
938 uint64_t cbus_addr, ddr_size, cbus_size;
939 int rc = -1, fd = -1, gem_id;
940 unsigned int ii, interface_count = 0;
941 size_t size = 0;
942 struct pfe_vdev_init_params init_params = {
943 .gem_id = -1
944 };
945
946 name = rte_vdev_device_name(vdev);
947 rc = pfe_parse_vdev_init_params(&init_params, vdev);
948 if (rc < 0)
949 return -EINVAL;
950
951 PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d",
952 name, init_params.gem_id);
953
954 if (g_pfe) {
955 if (g_pfe->nb_devs >= g_pfe->max_intf) {
956 PFE_PMD_ERR("PFE %d dev already created Max is %d",
957 g_pfe->nb_devs, g_pfe->max_intf);
958 return -EINVAL;
959 }
960 goto eth_init;
961 }
962
963 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
964 if (g_pfe == NULL)
965 return -EINVAL;
966
967 /* Load the device-tree driver */
968 rc = of_init();
969 if (rc) {
970 PFE_PMD_ERR("of_init failed with ret: %d", rc);
971 goto err;
972 }
973
974 np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
975 if (!np) {
976 PFE_PMD_ERR("Invalid device node");
977 rc = -EINVAL;
978 goto err;
979 }
980
981 addr = of_get_address(np, 0, &cbus_size, NULL);
982 if (!addr) {
983 PFE_PMD_ERR("of_get_address cannot return qman address\n");
984 goto err;
985 }
986 cbus_addr = of_translate_address(np, addr);
987 if (!cbus_addr) {
988 PFE_PMD_ERR("of_translate_address failed\n");
989 goto err;
990 }
991
992 addr = of_get_address(np, 1, &ddr_size, NULL);
993 if (!addr) {
994 PFE_PMD_ERR("of_get_address cannot return qman address\n");
995 goto err;
996 }
997
998 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
999 if (!g_pfe->ddr_phys_baseaddr) {
1000 PFE_PMD_ERR("of_translate_address failed\n");
1001 goto err;
1002 }
1003
1004 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
1005 g_pfe->ddr_size = ddr_size;
1006 g_pfe->cbus_size = cbus_size;
1007
1008 fd = open("/dev/mem", O_RDWR);
1009 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
1010 MAP_SHARED, fd, cbus_addr);
1011 close(fd);
1012 if (g_pfe->cbus_baseaddr == MAP_FAILED) {
1013 PFE_PMD_ERR("Can not map cbus base");
1014 rc = -EINVAL;
1015 goto err;
1016 }
1017
1018 /* Read interface count */
1019 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
1020 if (!prop) {
1021 PFE_PMD_ERR("Failed to read number of interfaces");
1022 rc = -ENXIO;
1023 goto err_prop;
1024 }
1025
1026 interface_count = rte_be_to_cpu_32((unsigned int)*prop);
1027 if (interface_count <= 0) {
1028 PFE_PMD_ERR("No ethernet interface count : %d",
1029 interface_count);
1030 rc = -ENXIO;
1031 goto err_prop;
1032 }
1033 PFE_PMD_INFO("num interfaces = %d ", interface_count);
1034
1035 g_pfe->max_intf = interface_count;
1036 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
1037
1038 for (ii = 0; ii < interface_count; ii++) {
1039 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
1040 &g_pfe->platform_data);
1041 }
1042
1043 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
1044 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
1045
1046 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
1047 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
1048
1049 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
1050 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
1051
1052 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
1053 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
1054 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
1055
1056 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
1057 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
1058
1059 cbus_emac_base[0] = EMAC1_BASE_ADDR;
1060 cbus_emac_base[1] = EMAC2_BASE_ADDR;
1061
1062 cbus_gpi_base[0] = EGPI1_BASE_ADDR;
1063 cbus_gpi_base[1] = EGPI2_BASE_ADDR;
1064
1065 rc = pfe_hif_lib_init(g_pfe);
1066 if (rc < 0)
1067 goto err_hif_lib;
1068
1069 rc = pfe_hif_init(g_pfe);
1070 if (rc < 0)
1071 goto err_hif;
1072 pfe_soc_version_get();
1073 eth_init:
1074 if (init_params.gem_id < 0)
1075 gem_id = g_pfe->nb_devs;
1076 else
1077 gem_id = init_params.gem_id;
1078
1079 PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)",
1080 name, gem_id, init_params.gem_id);
1081
1082 rc = pfe_eth_init(vdev, g_pfe, gem_id);
1083 if (rc < 0)
1084 goto err_eth;
1085 else
1086 g_pfe->nb_devs++;
1087
1088 return 0;
1089
1090 err_eth:
1091 pfe_hif_exit(g_pfe);
1092
1093 err_hif:
1094 pfe_hif_lib_exit(g_pfe);
1095
1096 err_hif_lib:
1097 err_prop:
1098 munmap(g_pfe->cbus_baseaddr, cbus_size);
1099 err:
1100 rte_free(g_pfe);
1101 return rc;
1102 }
1103
1104 static int
pmd_pfe_remove(struct rte_vdev_device * vdev)1105 pmd_pfe_remove(struct rte_vdev_device *vdev)
1106 {
1107 const char *name;
1108 struct rte_eth_dev *eth_dev = NULL;
1109 int ret = 0;
1110
1111 name = rte_vdev_device_name(vdev);
1112 if (name == NULL)
1113 return -EINVAL;
1114
1115 PFE_PMD_INFO("Closing eventdev sw device %s", name);
1116
1117 if (!g_pfe)
1118 return 0;
1119
1120 eth_dev = rte_eth_dev_allocated(name);
1121 if (eth_dev) {
1122 pfe_eth_close(eth_dev);
1123 ret = rte_eth_dev_release_port(eth_dev);
1124 }
1125
1126 return ret;
1127 }
1128
1129 static
1130 struct rte_vdev_driver pmd_pfe_drv = {
1131 .probe = pmd_pfe_probe,
1132 .remove = pmd_pfe_remove,
1133 };
1134
1135 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
1136 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
1137 RTE_LOG_REGISTER_DEFAULT(pfe_logtype_pmd, NOTICE);
1138