1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Ruslan Bukin <[email protected]>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/rman.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_var.h>
56
57 #include <machine/bus.h>
58
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61 #include <dev/mii/tiphy.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #include <dev/xilinx/if_xaereg.h>
65 #include <dev/xilinx/if_xaevar.h>
66
67 #include "miibus_if.h"
68
69 #define READ4(_sc, _reg) \
70 bus_read_4((_sc)->res[0], _reg)
71 #define WRITE4(_sc, _reg, _val) \
72 bus_write_4((_sc)->res[0], _reg, _val)
73
74 #define READ8(_sc, _reg) \
75 bus_read_8((_sc)->res[0], _reg)
76 #define WRITE8(_sc, _reg, _val) \
77 bus_write_8((_sc)->res[0], _reg, _val)
78
79 #define XAE_LOCK(sc) mtx_lock(&(sc)->mtx)
80 #define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
81 #define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
82 #define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
83
84 #define XAE_DEBUG
85 #undef XAE_DEBUG
86
87 #ifdef XAE_DEBUG
88 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
89 #else
90 #define dprintf(fmt, ...)
91 #endif
92
93 #define RX_QUEUE_SIZE 64
94 #define TX_QUEUE_SIZE 64
95 #define NUM_RX_MBUF 16
96 #define BUFRING_SIZE 8192
97 #define MDIO_CLK_DIV_DEFAULT 29
98
99 #define PHY1_RD(sc, _r) \
100 xae_miibus_read_reg(sc->dev, 1, _r)
101 #define PHY1_WR(sc, _r, _v) \
102 xae_miibus_write_reg(sc->dev, 1, _r, _v)
103
104 #define PHY_RD(sc, _r) \
105 xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
106 #define PHY_WR(sc, _r, _v) \
107 xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
108
109 /* Use this macro to access regs > 0x1f */
110 #define WRITE_TI_EREG(sc, reg, data) { \
111 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \
112 PHY_WR(sc, MII_MMDAADR, reg); \
113 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \
114 PHY_WR(sc, MII_MMDAADR, data); \
115 }
116
117 /* Not documented, Xilinx VCU118 workaround */
118 #define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */
119 #define DP83867_SGMIICTL1 0xD3 /* not documented register */
120 #define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */
121
122 static struct resource_spec xae_spec[] = {
123 { SYS_RES_MEMORY, 0, RF_ACTIVE },
124 { SYS_RES_IRQ, 0, RF_ACTIVE },
125 { -1, 0 }
126 };
127
128 static void xae_stop_locked(struct xae_softc *sc);
129 static void xae_setup_rxfilter(struct xae_softc *sc);
130
131 static int
xae_rx_enqueue(struct xae_softc * sc,uint32_t n)132 xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
133 {
134 struct mbuf *m;
135 int i;
136
137 for (i = 0; i < n; i++) {
138 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
139 if (m == NULL) {
140 device_printf(sc->dev,
141 "%s: Can't alloc rx mbuf\n", __func__);
142 return (-1);
143 }
144
145 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
146 xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
147 }
148
149 return (0);
150 }
151
152 static int
xae_get_phyaddr(phandle_t node,int * phy_addr)153 xae_get_phyaddr(phandle_t node, int *phy_addr)
154 {
155 phandle_t phy_node;
156 pcell_t phy_handle, phy_reg;
157
158 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
159 sizeof(phy_handle)) <= 0)
160 return (ENXIO);
161
162 phy_node = OF_node_from_xref(phy_handle);
163
164 if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
165 sizeof(phy_reg)) <= 0)
166 return (ENXIO);
167
168 *phy_addr = phy_reg;
169
170 return (0);
171 }
172
173 static int
xae_xdma_tx_intr(void * arg,xdma_transfer_status_t * status)174 xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
175 {
176 xdma_transfer_status_t st;
177 struct xae_softc *sc;
178 struct ifnet *ifp;
179 struct mbuf *m;
180 int err;
181
182 sc = arg;
183
184 XAE_LOCK(sc);
185
186 ifp = sc->ifp;
187
188 for (;;) {
189 err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
190 if (err != 0) {
191 break;
192 }
193
194 if (st.error != 0) {
195 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
196 }
197
198 m_freem(m);
199 }
200
201 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
202
203 XAE_UNLOCK(sc);
204
205 return (0);
206 }
207
208 static int
xae_xdma_rx_intr(void * arg,xdma_transfer_status_t * status)209 xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
210 {
211 xdma_transfer_status_t st;
212 struct xae_softc *sc;
213 struct ifnet *ifp;
214 struct mbuf *m;
215 int err;
216 uint32_t cnt_processed;
217
218 sc = arg;
219
220 dprintf("%s\n", __func__);
221
222 XAE_LOCK(sc);
223
224 ifp = sc->ifp;
225
226 cnt_processed = 0;
227 for (;;) {
228 err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
229 if (err != 0) {
230 break;
231 }
232 cnt_processed++;
233
234 if (st.error != 0) {
235 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
236 m_freem(m);
237 continue;
238 }
239
240 m->m_pkthdr.len = m->m_len = st.transferred;
241 m->m_pkthdr.rcvif = ifp;
242 XAE_UNLOCK(sc);
243 (*ifp->if_input)(ifp, m);
244 XAE_LOCK(sc);
245 }
246
247 xae_rx_enqueue(sc, cnt_processed);
248
249 XAE_UNLOCK(sc);
250
251 return (0);
252 }
253
254 static void
xae_qflush(struct ifnet * ifp)255 xae_qflush(struct ifnet *ifp)
256 {
257 struct xae_softc *sc;
258
259 sc = ifp->if_softc;
260 }
261
262 static int
xae_transmit_locked(struct ifnet * ifp)263 xae_transmit_locked(struct ifnet *ifp)
264 {
265 struct xae_softc *sc;
266 struct mbuf *m;
267 struct buf_ring *br;
268 int error;
269 int enq;
270
271 dprintf("%s\n", __func__);
272
273 sc = ifp->if_softc;
274 br = sc->br;
275
276 enq = 0;
277
278 while ((m = drbr_peek(ifp, br)) != NULL) {
279 error = xdma_enqueue_mbuf(sc->xchan_tx,
280 &m, 0, 4, 4, XDMA_MEM_TO_DEV);
281 if (error != 0) {
282 /* No space in request queue available yet. */
283 drbr_putback(ifp, br, m);
284 break;
285 }
286
287 drbr_advance(ifp, br);
288
289 enq++;
290
291 /* If anyone is interested give them a copy. */
292 ETHER_BPF_MTAP(ifp, m);
293 }
294
295 if (enq > 0)
296 xdma_queue_submit(sc->xchan_tx);
297
298 return (0);
299 }
300
301 static int
xae_transmit(struct ifnet * ifp,struct mbuf * m)302 xae_transmit(struct ifnet *ifp, struct mbuf *m)
303 {
304 struct xae_softc *sc;
305 int error;
306
307 dprintf("%s\n", __func__);
308
309 sc = ifp->if_softc;
310
311 XAE_LOCK(sc);
312
313 error = drbr_enqueue(ifp, sc->br, m);
314 if (error) {
315 XAE_UNLOCK(sc);
316 return (error);
317 }
318
319 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
320 IFF_DRV_RUNNING) {
321 XAE_UNLOCK(sc);
322 return (0);
323 }
324
325 if (!sc->link_is_up) {
326 XAE_UNLOCK(sc);
327 return (0);
328 }
329
330 error = xae_transmit_locked(ifp);
331
332 XAE_UNLOCK(sc);
333
334 return (error);
335 }
336
337 static void
xae_stop_locked(struct xae_softc * sc)338 xae_stop_locked(struct xae_softc *sc)
339 {
340 struct ifnet *ifp;
341 uint32_t reg;
342
343 XAE_ASSERT_LOCKED(sc);
344
345 ifp = sc->ifp;
346 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
347
348 callout_stop(&sc->xae_callout);
349
350 /* Stop the transmitter */
351 reg = READ4(sc, XAE_TC);
352 reg &= ~TC_TX;
353 WRITE4(sc, XAE_TC, reg);
354
355 /* Stop the receiver. */
356 reg = READ4(sc, XAE_RCW1);
357 reg &= ~RCW1_RX;
358 WRITE4(sc, XAE_RCW1, reg);
359 }
360
361 static uint64_t
xae_stat(struct xae_softc * sc,int counter_id)362 xae_stat(struct xae_softc *sc, int counter_id)
363 {
364 uint64_t new, old;
365 uint64_t delta;
366
367 KASSERT(counter_id < XAE_MAX_COUNTERS,
368 ("counter %d is out of range", counter_id));
369
370 new = READ8(sc, XAE_STATCNT(counter_id));
371 old = sc->counters[counter_id];
372
373 if (new >= old)
374 delta = new - old;
375 else
376 delta = UINT64_MAX - old + new;
377 sc->counters[counter_id] = new;
378
379 return (delta);
380 }
381
382 static void
xae_harvest_stats(struct xae_softc * sc)383 xae_harvest_stats(struct xae_softc *sc)
384 {
385 struct ifnet *ifp;
386
387 ifp = sc->ifp;
388
389 if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
390 if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
391 if_inc_counter(ifp, IFCOUNTER_IERRORS,
392 xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
393 xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
394 xae_stat(sc, RX_ALIGNMENT_ERRORS));
395
396 if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
397 if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
398 if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
399 if_inc_counter(ifp, IFCOUNTER_OERRORS,
400 xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
401
402 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
403 xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
404 xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
405 xae_stat(sc, TX_LATE_COLLISIONS) +
406 xae_stat(sc, TX_EXCESS_COLLISIONS));
407 }
408
409 static void
xae_tick(void * arg)410 xae_tick(void *arg)
411 {
412 struct xae_softc *sc;
413 struct ifnet *ifp;
414 int link_was_up;
415
416 sc = arg;
417
418 XAE_ASSERT_LOCKED(sc);
419
420 ifp = sc->ifp;
421
422 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
423 return;
424
425 /* Gather stats from hardware counters. */
426 xae_harvest_stats(sc);
427
428 /* Check the media status. */
429 link_was_up = sc->link_is_up;
430 mii_tick(sc->mii_softc);
431 if (sc->link_is_up && !link_was_up)
432 xae_transmit_locked(sc->ifp);
433
434 /* Schedule another check one second from now. */
435 callout_reset(&sc->xae_callout, hz, xae_tick, sc);
436 }
437
438 static void
xae_init_locked(struct xae_softc * sc)439 xae_init_locked(struct xae_softc *sc)
440 {
441 struct ifnet *ifp;
442
443 XAE_ASSERT_LOCKED(sc);
444
445 ifp = sc->ifp;
446 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
447 return;
448
449 ifp->if_drv_flags |= IFF_DRV_RUNNING;
450
451 xae_setup_rxfilter(sc);
452
453 /* Enable the transmitter */
454 WRITE4(sc, XAE_TC, TC_TX);
455
456 /* Enable the receiver. */
457 WRITE4(sc, XAE_RCW1, RCW1_RX);
458
459 /*
460 * Call mii_mediachg() which will call back into xae_miibus_statchg()
461 * to set up the remaining config registers based on current media.
462 */
463 mii_mediachg(sc->mii_softc);
464 callout_reset(&sc->xae_callout, hz, xae_tick, sc);
465 }
466
467 static void
xae_init(void * arg)468 xae_init(void *arg)
469 {
470 struct xae_softc *sc;
471
472 sc = arg;
473
474 XAE_LOCK(sc);
475 xae_init_locked(sc);
476 XAE_UNLOCK(sc);
477 }
478
479 static void
xae_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)480 xae_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
481 {
482 struct xae_softc *sc;
483 struct mii_data *mii;
484
485 sc = ifp->if_softc;
486 mii = sc->mii_softc;
487
488 XAE_LOCK(sc);
489 mii_pollstat(mii);
490 ifmr->ifm_active = mii->mii_media_active;
491 ifmr->ifm_status = mii->mii_media_status;
492 XAE_UNLOCK(sc);
493 }
494
495 static int
xae_media_change_locked(struct xae_softc * sc)496 xae_media_change_locked(struct xae_softc *sc)
497 {
498
499 return (mii_mediachg(sc->mii_softc));
500 }
501
502 static int
xae_media_change(struct ifnet * ifp)503 xae_media_change(struct ifnet * ifp)
504 {
505 struct xae_softc *sc;
506 int error;
507
508 sc = ifp->if_softc;
509
510 XAE_LOCK(sc);
511 error = xae_media_change_locked(sc);
512 XAE_UNLOCK(sc);
513
514 return (error);
515 }
516
517 static void
xae_setup_rxfilter(struct xae_softc * sc)518 xae_setup_rxfilter(struct xae_softc *sc)
519 {
520 struct ifmultiaddr *ifma;
521 struct ifnet *ifp;
522 uint32_t reg;
523 uint8_t *ma;
524 int i;
525
526 XAE_ASSERT_LOCKED(sc);
527
528 ifp = sc->ifp;
529
530 /*
531 * Set the multicast (group) filter hash.
532 */
533 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
534 reg = READ4(sc, XAE_FFC);
535 reg |= FFC_PM;
536 WRITE4(sc, XAE_FFC, reg);
537 } else {
538 reg = READ4(sc, XAE_FFC);
539 reg &= ~FFC_PM;
540 WRITE4(sc, XAE_FFC, reg);
541
542 if_maddr_rlock(ifp);
543
544 i = 0;
545 CK_STAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
546 if (ifma->ifma_addr->sa_family != AF_LINK)
547 continue;
548
549 if (i >= XAE_MULTICAST_TABLE_SIZE)
550 break;
551
552 ma = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
553
554 reg = READ4(sc, XAE_FFC) & 0xffffff00;
555 reg |= i++;
556 WRITE4(sc, XAE_FFC, reg);
557
558 reg = (ma[0]);
559 reg |= (ma[1] << 8);
560 reg |= (ma[2] << 16);
561 reg |= (ma[3] << 24);
562 WRITE4(sc, XAE_FFV(0), reg);
563
564 reg = ma[4];
565 reg |= ma[5] << 8;
566 WRITE4(sc, XAE_FFV(1), reg);
567 }
568 if_maddr_runlock(ifp);
569 }
570
571 /*
572 * Set the primary address.
573 */
574 reg = sc->macaddr[0];
575 reg |= (sc->macaddr[1] << 8);
576 reg |= (sc->macaddr[2] << 16);
577 reg |= (sc->macaddr[3] << 24);
578 WRITE4(sc, XAE_UAW0, reg);
579
580 reg = sc->macaddr[4];
581 reg |= (sc->macaddr[5] << 8);
582 WRITE4(sc, XAE_UAW1, reg);
583 }
584
585 static int
xae_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)586 xae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
587 {
588 struct xae_softc *sc;
589 struct mii_data *mii;
590 struct ifreq *ifr;
591 int mask, error;
592
593 sc = ifp->if_softc;
594 ifr = (struct ifreq *)data;
595
596 error = 0;
597 switch (cmd) {
598 case SIOCSIFFLAGS:
599 XAE_LOCK(sc);
600 if (ifp->if_flags & IFF_UP) {
601 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
602 if ((ifp->if_flags ^ sc->if_flags) &
603 (IFF_PROMISC | IFF_ALLMULTI))
604 xae_setup_rxfilter(sc);
605 } else {
606 if (!sc->is_detaching)
607 xae_init_locked(sc);
608 }
609 } else {
610 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
611 xae_stop_locked(sc);
612 }
613 sc->if_flags = ifp->if_flags;
614 XAE_UNLOCK(sc);
615 break;
616 case SIOCADDMULTI:
617 case SIOCDELMULTI:
618 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
619 XAE_LOCK(sc);
620 xae_setup_rxfilter(sc);
621 XAE_UNLOCK(sc);
622 }
623 break;
624 case SIOCSIFMEDIA:
625 case SIOCGIFMEDIA:
626 mii = sc->mii_softc;
627 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
628 break;
629 case SIOCSIFCAP:
630 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
631 if (mask & IFCAP_VLAN_MTU) {
632 /* No work to do except acknowledge the change took */
633 ifp->if_capenable ^= IFCAP_VLAN_MTU;
634 }
635 break;
636
637 default:
638 error = ether_ioctl(ifp, cmd, data);
639 break;
640 }
641
642 return (error);
643 }
644
645 static void
xae_intr(void * arg)646 xae_intr(void *arg)
647 {
648
649 }
650
651 static int
xae_get_hwaddr(struct xae_softc * sc,uint8_t * hwaddr)652 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
653 {
654 phandle_t node;
655 int len;
656
657 node = ofw_bus_get_node(sc->dev);
658
659 /* Check if there is property */
660 if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
661 return (EINVAL);
662
663 if (len != ETHER_ADDR_LEN)
664 return (EINVAL);
665
666 OF_getprop(node, "local-mac-address", hwaddr,
667 ETHER_ADDR_LEN);
668
669 return (0);
670 }
671
672 static int
mdio_wait(struct xae_softc * sc)673 mdio_wait(struct xae_softc *sc)
674 {
675 uint32_t reg;
676 int timeout;
677
678 timeout = 200;
679
680 do {
681 reg = READ4(sc, XAE_MDIO_CTRL);
682 if (reg & MDIO_CTRL_READY)
683 break;
684 DELAY(1);
685 } while (timeout--);
686
687 if (timeout <= 0) {
688 printf("Failed to get MDIO ready\n");
689 return (1);
690 }
691
692 return (0);
693 }
694
695 static int
xae_miibus_read_reg(device_t dev,int phy,int reg)696 xae_miibus_read_reg(device_t dev, int phy, int reg)
697 {
698 struct xae_softc *sc;
699 uint32_t mii;
700 int rv;
701
702 sc = device_get_softc(dev);
703
704 if (mdio_wait(sc))
705 return (0);
706
707 mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
708 mii |= (reg << MDIO_TX_REGAD_S);
709 mii |= (phy << MDIO_TX_PHYAD_S);
710
711 WRITE4(sc, XAE_MDIO_CTRL, mii);
712
713 if (mdio_wait(sc))
714 return (0);
715
716 rv = READ4(sc, XAE_MDIO_READ);
717
718 return (rv);
719 }
720
721 static int
xae_miibus_write_reg(device_t dev,int phy,int reg,int val)722 xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
723 {
724 struct xae_softc *sc;
725 uint32_t mii;
726
727 sc = device_get_softc(dev);
728
729 if (mdio_wait(sc))
730 return (1);
731
732 mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
733 mii |= (reg << MDIO_TX_REGAD_S);
734 mii |= (phy << MDIO_TX_PHYAD_S);
735
736 WRITE4(sc, XAE_MDIO_WRITE, val);
737 WRITE4(sc, XAE_MDIO_CTRL, mii);
738
739 if (mdio_wait(sc))
740 return (1);
741
742 return (0);
743 }
744
745 static void
xae_phy_fixup(struct xae_softc * sc)746 xae_phy_fixup(struct xae_softc *sc)
747 {
748 uint32_t reg;
749 device_t dev;
750
751 dev = sc->dev;
752
753 do {
754 WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
755 PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
756
757 reg = PHY_RD(sc, DP83867_CFG2);
758 reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
759 reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
760 reg |= CFG2_INTERRUPT_POLARITY;
761 reg |= CFG2_SPEED_OPT_ENHANCED_EN;
762 reg |= CFG2_SPEED_OPT_10M_EN;
763 PHY_WR(sc, DP83867_CFG2, reg);
764
765 WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
766 PHY_WR(sc, MII_BMCR,
767 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
768 } while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
769
770 do {
771 PHY1_WR(sc, MII_BMCR,
772 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
773 DELAY(40000);
774 } while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
775 }
776
777 static int
setup_xdma(struct xae_softc * sc)778 setup_xdma(struct xae_softc *sc)
779 {
780 device_t dev;
781 vmem_t *vmem;
782 int error;
783
784 dev = sc->dev;
785
786 /* Get xDMA controller */
787 sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
788 if (sc->xdma_tx == NULL) {
789 device_printf(dev, "Could not find DMA controller.\n");
790 return (ENXIO);
791 }
792
793 sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
794 if (sc->xdma_rx == NULL) {
795 device_printf(dev, "Could not find DMA controller.\n");
796 return (ENXIO);
797 }
798
799 /* Alloc xDMA TX virtual channel. */
800 sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
801 if (sc->xchan_tx == NULL) {
802 device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
803 return (ENXIO);
804 }
805
806 /* Setup interrupt handler. */
807 error = xdma_setup_intr(sc->xchan_tx,
808 xae_xdma_tx_intr, sc, &sc->ih_tx);
809 if (error) {
810 device_printf(sc->dev,
811 "Can't setup xDMA TX interrupt handler.\n");
812 return (ENXIO);
813 }
814
815 /* Alloc xDMA RX virtual channel. */
816 sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
817 if (sc->xchan_rx == NULL) {
818 device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
819 return (ENXIO);
820 }
821
822 /* Setup interrupt handler. */
823 error = xdma_setup_intr(sc->xchan_rx,
824 xae_xdma_rx_intr, sc, &sc->ih_rx);
825 if (error) {
826 device_printf(sc->dev,
827 "Can't setup xDMA RX interrupt handler.\n");
828 return (ENXIO);
829 }
830
831 /* Setup bounce buffer */
832 vmem = xdma_get_memory(dev);
833 if (vmem) {
834 xchan_set_memory(sc->xchan_tx, vmem);
835 xchan_set_memory(sc->xchan_rx, vmem);
836 }
837
838 xdma_prep_sg(sc->xchan_tx,
839 TX_QUEUE_SIZE, /* xchan requests queue size */
840 MCLBYTES, /* maxsegsize */
841 8, /* maxnsegs */
842 16, /* alignment */
843 0, /* boundary */
844 BUS_SPACE_MAXADDR_32BIT,
845 BUS_SPACE_MAXADDR);
846
847 xdma_prep_sg(sc->xchan_rx,
848 RX_QUEUE_SIZE, /* xchan requests queue size */
849 MCLBYTES, /* maxsegsize */
850 1, /* maxnsegs */
851 16, /* alignment */
852 0, /* boundary */
853 BUS_SPACE_MAXADDR_32BIT,
854 BUS_SPACE_MAXADDR);
855
856 return (0);
857 }
858
859 static int
xae_probe(device_t dev)860 xae_probe(device_t dev)
861 {
862
863 if (!ofw_bus_status_okay(dev))
864 return (ENXIO);
865
866 if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
867 return (ENXIO);
868
869 device_set_desc(dev, "Xilinx AXI Ethernet");
870
871 return (BUS_PROBE_DEFAULT);
872 }
873
874 static int
xae_attach(device_t dev)875 xae_attach(device_t dev)
876 {
877 struct xae_softc *sc;
878 struct ifnet *ifp;
879 phandle_t node;
880 uint32_t reg;
881 int error;
882
883 sc = device_get_softc(dev);
884 sc->dev = dev;
885 node = ofw_bus_get_node(dev);
886
887 if (setup_xdma(sc) != 0) {
888 device_printf(dev, "Could not setup xDMA.\n");
889 return (ENXIO);
890 }
891
892 mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
893 MTX_NETWORK_LOCK, MTX_DEF);
894
895 sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
896 M_NOWAIT, &sc->mtx);
897 if (sc->br == NULL)
898 return (ENOMEM);
899
900 if (bus_alloc_resources(dev, xae_spec, sc->res)) {
901 device_printf(dev, "could not allocate resources\n");
902 return (ENXIO);
903 }
904
905 /* Memory interface */
906 sc->bst = rman_get_bustag(sc->res[0]);
907 sc->bsh = rman_get_bushandle(sc->res[0]);
908
909 device_printf(sc->dev, "Identification: %x\n",
910 READ4(sc, XAE_IDENT));
911
912 /* Get MAC addr */
913 if (xae_get_hwaddr(sc, sc->macaddr)) {
914 device_printf(sc->dev, "can't get mac\n");
915 return (ENXIO);
916 }
917
918 /* Enable MII clock */
919 reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
920 reg |= MDIO_SETUP_ENABLE;
921 WRITE4(sc, XAE_MDIO_SETUP, reg);
922 if (mdio_wait(sc))
923 return (ENXIO);
924
925 callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
926
927 /* Setup interrupt handler. */
928 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
929 NULL, xae_intr, sc, &sc->intr_cookie);
930 if (error != 0) {
931 device_printf(dev, "could not setup interrupt handler.\n");
932 return (ENXIO);
933 }
934
935 /* Set up the ethernet interface. */
936 sc->ifp = ifp = if_alloc(IFT_ETHER);
937 if (ifp == NULL) {
938 device_printf(dev, "could not allocate ifp.\n");
939 return (ENXIO);
940 }
941
942 ifp->if_softc = sc;
943 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
944 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
945 ifp->if_capabilities = IFCAP_VLAN_MTU;
946 ifp->if_capenable = ifp->if_capabilities;
947 ifp->if_transmit = xae_transmit;
948 ifp->if_qflush = xae_qflush;
949 ifp->if_ioctl = xae_ioctl;
950 ifp->if_init = xae_init;
951 IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
952 ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
953 IFQ_SET_READY(&ifp->if_snd);
954
955 if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
956 return (ENXIO);
957
958 /* Attach the mii driver. */
959 error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
960 xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
961 MII_OFFSET_ANY, 0);
962
963 if (error != 0) {
964 device_printf(dev, "PHY attach failed\n");
965 return (ENXIO);
966 }
967 sc->mii_softc = device_get_softc(sc->miibus);
968
969 /* Apply vcu118 workaround. */
970 if (OF_getproplen(node, "xlnx,vcu118") >= 0)
971 xae_phy_fixup(sc);
972
973 /* All ready to run, attach the ethernet interface. */
974 ether_ifattach(ifp, sc->macaddr);
975 sc->is_attached = true;
976
977 xae_rx_enqueue(sc, NUM_RX_MBUF);
978 xdma_queue_submit(sc->xchan_rx);
979
980 return (0);
981 }
982
983 static int
xae_detach(device_t dev)984 xae_detach(device_t dev)
985 {
986 struct xae_softc *sc;
987 struct ifnet *ifp;
988
989 sc = device_get_softc(dev);
990
991 KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
992 device_get_nameunit(dev)));
993
994 ifp = sc->ifp;
995
996 /* Only cleanup if attach succeeded. */
997 if (device_is_attached(dev)) {
998 XAE_LOCK(sc);
999 xae_stop_locked(sc);
1000 XAE_UNLOCK(sc);
1001 callout_drain(&sc->xae_callout);
1002 ether_ifdetach(ifp);
1003 }
1004
1005 if (sc->miibus != NULL)
1006 device_delete_child(dev, sc->miibus);
1007
1008 if (ifp != NULL)
1009 if_free(ifp);
1010
1011 mtx_destroy(&sc->mtx);
1012
1013 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
1014
1015 bus_release_resources(dev, xae_spec, sc->res);
1016
1017 xdma_channel_free(sc->xchan_tx);
1018 xdma_channel_free(sc->xchan_rx);
1019 xdma_put(sc->xdma_tx);
1020 xdma_put(sc->xdma_rx);
1021
1022 return (0);
1023 }
1024
1025 static void
xae_miibus_statchg(device_t dev)1026 xae_miibus_statchg(device_t dev)
1027 {
1028 struct xae_softc *sc;
1029 struct mii_data *mii;
1030 uint32_t reg;
1031
1032 /*
1033 * Called by the MII bus driver when the PHY establishes
1034 * link to set the MAC interface registers.
1035 */
1036
1037 sc = device_get_softc(dev);
1038
1039 XAE_ASSERT_LOCKED(sc);
1040
1041 mii = sc->mii_softc;
1042
1043 if (mii->mii_media_status & IFM_ACTIVE)
1044 sc->link_is_up = true;
1045 else
1046 sc->link_is_up = false;
1047
1048 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1049 case IFM_1000_T:
1050 case IFM_1000_SX:
1051 reg = SPEED_1000;
1052 break;
1053 case IFM_100_TX:
1054 reg = SPEED_100;
1055 break;
1056 case IFM_10_T:
1057 reg = SPEED_10;
1058 break;
1059 case IFM_NONE:
1060 sc->link_is_up = false;
1061 return;
1062 default:
1063 sc->link_is_up = false;
1064 device_printf(dev, "Unsupported media %u\n",
1065 IFM_SUBTYPE(mii->mii_media_active));
1066 return;
1067 }
1068
1069 WRITE4(sc, XAE_SPEED, reg);
1070 }
1071
1072 static device_method_t xae_methods[] = {
1073 DEVMETHOD(device_probe, xae_probe),
1074 DEVMETHOD(device_attach, xae_attach),
1075 DEVMETHOD(device_detach, xae_detach),
1076
1077 /* MII Interface */
1078 DEVMETHOD(miibus_readreg, xae_miibus_read_reg),
1079 DEVMETHOD(miibus_writereg, xae_miibus_write_reg),
1080 DEVMETHOD(miibus_statchg, xae_miibus_statchg),
1081
1082 { 0, 0 }
1083 };
1084
1085 driver_t xae_driver = {
1086 "xae",
1087 xae_methods,
1088 sizeof(struct xae_softc),
1089 };
1090
1091 static devclass_t xae_devclass;
1092
1093 DRIVER_MODULE(xae, simplebus, xae_driver, xae_devclass, 0, 0);
1094 DRIVER_MODULE(miibus, xae, miibus_driver, miibus_devclass, 0, 0);
1095
1096 MODULE_DEPEND(xae, ether, 1, 1, 1);
1097 MODULE_DEPEND(xae, miibus, 1, 1, 1);
1098