1 /*-
2 * Copyright (c) 2020 Michael J Karels
3 * Copyright (c) 2016, 2020 Jared McNeill <[email protected]>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 /*
30 * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
31 *
32 * This driver is derived in large part from bcmgenet.c from NetBSD by
33 * Jared McNeill. Parts of the structure and other common code in
34 * this driver have been copied from if_awg.c for the Allwinner EMAC,
35 * also by Jared McNeill.
36 */
37
38 #include "opt_device_polling.h"
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/kernel.h>
48 #include <sys/endian.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/module.h>
53 #include <sys/taskqueue.h>
54 #include <sys/gpio.h>
55
56 #include <net/bpf.h>
57 #include <net/if.h>
58 #include <net/ethernet.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/if_var.h>
63
64 #include <machine/bus.h>
65
66 #include <dev/ofw/ofw_bus.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68
69 #define __BIT(_x) (1 << (_x))
70 #include "if_genetreg.h"
71
72 #include <dev/mii/mii.h>
73 #include <dev/mii/miivar.h>
74 #include <dev/mii/mii_fdt.h>
75
76 #include <netinet/in.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip6.h>
79 #define ICMPV6_HACK /* workaround for chip issue */
80 #ifdef ICMPV6_HACK
81 #include <netinet/icmp6.h>
82 #endif
83
84 #include "syscon_if.h"
85 #include "miibus_if.h"
86 #include "gpio_if.h"
87
88 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg))
89 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val))
90
91 #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx)
92 #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
93 #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
94 #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
95
96 #define TX_DESC_COUNT GENET_DMA_DESC_COUNT
97 #define RX_DESC_COUNT GENET_DMA_DESC_COUNT
98
99 #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1))
100 #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1))
101
102 #define TX_MAX_SEGS 20
103
104 /* Maximum number of mbufs to send to if_input */
105 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
106 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);
107
108 static struct ofw_compat_data compat_data[] = {
109 { "brcm,genet-v1", 1 },
110 { "brcm,genet-v2", 2 },
111 { "brcm,genet-v3", 3 },
112 { "brcm,genet-v4", 4 },
113 { "brcm,genet-v5", 5 },
114 { "brcm,bcm2711-genet-v5", 5 },
115 { NULL, 0 }
116 };
117
118 enum {
119 _RES_MAC, /* what to call this? */
120 _RES_IRQ1,
121 _RES_IRQ2,
122 _RES_NITEMS
123 };
124
125 static struct resource_spec gen_spec[] = {
126 { SYS_RES_MEMORY, 0, RF_ACTIVE },
127 { SYS_RES_IRQ, 0, RF_ACTIVE },
128 { SYS_RES_IRQ, 1, RF_ACTIVE },
129 { -1, 0 }
130 };
131
132 /* structure per ring entry */
133 struct gen_ring_ent {
134 bus_dmamap_t map;
135 struct mbuf *mbuf;
136 };
137
138 struct tx_queue {
139 int hwindex; /* hardware index */
140 int nentries;
141 u_int queued; /* or avail? */
142 u_int cur;
143 u_int next;
144 u_int prod_idx;
145 u_int cons_idx;
146 struct gen_ring_ent *entries;
147 };
148
149 struct rx_queue {
150 int hwindex; /* hardware index */
151 int nentries;
152 u_int cur;
153 u_int prod_idx;
154 u_int cons_idx;
155 struct gen_ring_ent *entries;
156 };
157
158 struct gen_softc {
159 struct resource *res[_RES_NITEMS];
160 struct mtx mtx;
161 if_t ifp;
162 device_t dev;
163 device_t miibus;
164 mii_contype_t phy_mode;
165
166 struct callout stat_ch;
167 struct task link_task;
168 void *ih;
169 void *ih2;
170 int type;
171 int if_flags;
172 int link;
173 bus_dma_tag_t tx_buf_tag;
174 /*
175 * The genet chip has multiple queues for transmit and receive.
176 * This driver uses only one (queue 16, the default), but is cast
177 * with multiple rings. The additional rings are used for different
178 * priorities.
179 */
180 #define DEF_TXQUEUE 0
181 #define NTXQUEUE 1
182 struct tx_queue tx_queue[NTXQUEUE];
183 struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */
184
185 bus_dma_tag_t rx_buf_tag;
186 #define DEF_RXQUEUE 0
187 #define NRXQUEUE 1
188 struct rx_queue rx_queue[NRXQUEUE];
189 struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */
190 };
191
192 static void gen_init(void *softc);
193 static void gen_start(if_t ifp);
194 static void gen_destroy(struct gen_softc *sc);
195 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
196 static int gen_parse_tx(struct mbuf *m, int csum_flags);
197 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
198 static int gen_get_phy_mode(device_t dev);
199 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
200 static void gen_set_enaddr(struct gen_softc *sc);
201 static void gen_setup_rxfilter(struct gen_softc *sc);
202 static void gen_reset(struct gen_softc *sc);
203 static void gen_enable(struct gen_softc *sc);
204 static void gen_dma_disable(device_t dev);
205 static int gen_bus_dma_init(struct gen_softc *sc);
206 static void gen_bus_dma_teardown(struct gen_softc *sc);
207 static void gen_enable_intr(struct gen_softc *sc);
208 static void gen_init_txrings(struct gen_softc *sc);
209 static void gen_init_rxrings(struct gen_softc *sc);
210 static void gen_intr(void *softc);
211 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
212 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
213 static void gen_intr2(void *softc);
214 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
215 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
216 struct mbuf *m);
217 static void gen_link_task(void *arg, int pending);
218 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
219 static int gen_media_change(if_t ifp);
220 static void gen_tick(void *softc);
221
222 static int
gen_probe(device_t dev)223 gen_probe(device_t dev)
224 {
225 if (!ofw_bus_status_okay(dev))
226 return (ENXIO);
227
228 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
229 return (ENXIO);
230
231 device_set_desc(dev, "RPi4 Gigabit Ethernet");
232 return (BUS_PROBE_DEFAULT);
233 }
234
235 static int
gen_attach(device_t dev)236 gen_attach(device_t dev)
237 {
238 struct ether_addr eaddr;
239 struct gen_softc *sc;
240 int major, minor, error, mii_flags;
241 bool eaddr_found;
242
243 sc = device_get_softc(dev);
244 sc->dev = dev;
245 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
246
247 if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
248 device_printf(dev, "cannot allocate resources for device\n");
249 error = ENXIO;
250 goto fail;
251 }
252
253 major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
254 if (major != REV_MAJOR_V5) {
255 device_printf(dev, "version %d is not supported\n", major);
256 error = ENXIO;
257 goto fail;
258 }
259 minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
260 device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
261 RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
262
263 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
264 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
265 TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
266
267 error = gen_get_phy_mode(dev);
268 if (error != 0)
269 goto fail;
270
271 bzero(&eaddr, sizeof(eaddr));
272 eaddr_found = gen_get_eaddr(dev, &eaddr);
273
274 /* reset core */
275 gen_reset(sc);
276
277 gen_dma_disable(dev);
278
279 /* Setup DMA */
280 error = gen_bus_dma_init(sc);
281 if (error != 0) {
282 device_printf(dev, "cannot setup bus dma\n");
283 goto fail;
284 }
285
286 /* Install interrupt handlers */
287 error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
288 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
289 if (error != 0) {
290 device_printf(dev, "cannot setup interrupt handler1\n");
291 goto fail;
292 }
293
294 error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
295 INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
296 if (error != 0) {
297 device_printf(dev, "cannot setup interrupt handler2\n");
298 goto fail;
299 }
300
301 /* Setup ethernet interface */
302 sc->ifp = if_alloc(IFT_ETHER);
303 if_setsoftc(sc->ifp, sc);
304 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
305 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
306 if_setstartfn(sc->ifp, gen_start);
307 if_setioctlfn(sc->ifp, gen_ioctl);
308 if_setinitfn(sc->ifp, gen_init);
309 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
310 if_setsendqready(sc->ifp);
311 #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP)
312 if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
313 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
314 IFCAP_HWCSUM_IPV6);
315 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
316
317 /* Attach MII driver */
318 mii_flags = 0;
319 switch (sc->phy_mode)
320 {
321 case MII_CONTYPE_RGMII_ID:
322 mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
323 break;
324 case MII_CONTYPE_RGMII_RXID:
325 mii_flags |= MIIF_RX_DELAY;
326 break;
327 case MII_CONTYPE_RGMII_TXID:
328 mii_flags |= MIIF_TX_DELAY;
329 break;
330 default:
331 break;
332 }
333 error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
334 gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
335 mii_flags);
336 if (error != 0) {
337 device_printf(dev, "cannot attach PHY\n");
338 goto fail;
339 }
340
341 /* If address was not found, create one based on the hostid and name. */
342 if (eaddr_found == 0)
343 ether_gen_addr(sc->ifp, &eaddr);
344 /* Attach ethernet interface */
345 ether_ifattach(sc->ifp, eaddr.octet);
346
347 fail:
348 if (error)
349 gen_destroy(sc);
350 return (error);
351 }
352
353 /* Free resources after failed attach. This is not a complete detach. */
354 static void
gen_destroy(struct gen_softc * sc)355 gen_destroy(struct gen_softc *sc)
356 {
357
358 if (sc->miibus) { /* can't happen */
359 device_delete_child(sc->dev, sc->miibus);
360 sc->miibus = NULL;
361 }
362 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
363 bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
364 gen_bus_dma_teardown(sc);
365 callout_drain(&sc->stat_ch);
366 if (mtx_initialized(&sc->mtx))
367 mtx_destroy(&sc->mtx);
368 bus_release_resources(sc->dev, gen_spec, sc->res);
369 if (sc->ifp != NULL) {
370 if_free(sc->ifp);
371 sc->ifp = NULL;
372 }
373 }
374
375 static int
gen_get_phy_mode(device_t dev)376 gen_get_phy_mode(device_t dev)
377 {
378 struct gen_softc *sc;
379 phandle_t node;
380 mii_contype_t type;
381 int error = 0;
382
383 sc = device_get_softc(dev);
384 node = ofw_bus_get_node(dev);
385 type = mii_fdt_get_contype(node);
386
387 switch (type) {
388 case MII_CONTYPE_RGMII:
389 case MII_CONTYPE_RGMII_ID:
390 case MII_CONTYPE_RGMII_RXID:
391 case MII_CONTYPE_RGMII_TXID:
392 sc->phy_mode = type;
393 break;
394 default:
395 device_printf(dev, "unknown phy-mode '%s'\n",
396 mii_fdt_contype_to_name(type));
397 error = ENXIO;
398 break;
399 }
400
401 return (error);
402 }
403
404 static bool
gen_get_eaddr(device_t dev,struct ether_addr * eaddr)405 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
406 {
407 struct gen_softc *sc;
408 uint32_t maclo, machi, val;
409 phandle_t node;
410
411 sc = device_get_softc(dev);
412
413 node = ofw_bus_get_node(dev);
414 if (OF_getprop(node, "mac-address", eaddr->octet,
415 ETHER_ADDR_LEN) != -1 ||
416 OF_getprop(node, "local-mac-address", eaddr->octet,
417 ETHER_ADDR_LEN) != -1 ||
418 OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
419 return (true);
420
421 device_printf(dev, "No Ethernet address found in fdt!\n");
422 maclo = machi = 0;
423
424 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
425 if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
426 maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
427 machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
428 }
429
430 if (maclo == 0 && machi == 0) {
431 if (bootverbose)
432 device_printf(dev,
433 "No Ethernet address found in controller\n");
434 return (false);
435 } else {
436 eaddr->octet[0] = maclo & 0xff;
437 eaddr->octet[1] = (maclo >> 8) & 0xff;
438 eaddr->octet[2] = (maclo >> 16) & 0xff;
439 eaddr->octet[3] = (maclo >> 24) & 0xff;
440 eaddr->octet[4] = machi & 0xff;
441 eaddr->octet[5] = (machi >> 8) & 0xff;
442 return (true);
443 }
444 }
445
446 static void
gen_reset(struct gen_softc * sc)447 gen_reset(struct gen_softc *sc)
448 {
449 uint32_t val;
450
451 val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
452 val |= GENET_SYS_RBUF_FLUSH_RESET;
453 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
454 DELAY(10);
455
456 val &= ~GENET_SYS_RBUF_FLUSH_RESET;
457 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
458 DELAY(10);
459
460 WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
461 DELAY(10);
462
463 WR4(sc, GENET_UMAC_CMD, 0);
464 WR4(sc, GENET_UMAC_CMD,
465 GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
466 DELAY(10);
467 WR4(sc, GENET_UMAC_CMD, 0);
468
469 WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
470 GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
471 WR4(sc, GENET_UMAC_MIB_CTRL, 0);
472
473 WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
474
475 val = RD4(sc, GENET_RBUF_CTRL);
476 val |= GENET_RBUF_ALIGN_2B;
477 WR4(sc, GENET_RBUF_CTRL, val);
478
479 WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
480 }
481
482 static void
gen_enable(struct gen_softc * sc)483 gen_enable(struct gen_softc *sc)
484 {
485 u_int val;
486
487 /* Enable transmitter and receiver */
488 val = RD4(sc, GENET_UMAC_CMD);
489 val |= GENET_UMAC_CMD_TXEN;
490 val |= GENET_UMAC_CMD_RXEN;
491 WR4(sc, GENET_UMAC_CMD, val);
492
493 /* Enable interrupts */
494 gen_enable_intr(sc);
495 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
496 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
497 }
498
499 static void
gen_enable_offload(struct gen_softc * sc)500 gen_enable_offload(struct gen_softc *sc)
501 {
502 uint32_t check_ctrl, buf_ctrl;
503
504 check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
505 buf_ctrl = RD4(sc, GENET_RBUF_CTRL);
506 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
507 check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
508 buf_ctrl |= GENET_RBUF_64B_EN;
509 } else {
510 check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
511 buf_ctrl &= ~GENET_RBUF_64B_EN;
512 }
513 WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
514 WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
515
516 buf_ctrl = RD4(sc, GENET_TBUF_CTRL);
517 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
518 0)
519 buf_ctrl |= GENET_RBUF_64B_EN;
520 else
521 buf_ctrl &= ~GENET_RBUF_64B_EN;
522 WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
523 }
524
525 static void
gen_dma_disable(device_t dev)526 gen_dma_disable(device_t dev)
527 {
528 struct gen_softc *sc = device_get_softc(dev);
529 int val;
530
531 val = RD4(sc, GENET_TX_DMA_CTRL);
532 val &= ~GENET_TX_DMA_CTRL_EN;
533 val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
534 WR4(sc, GENET_TX_DMA_CTRL, val);
535
536 val = RD4(sc, GENET_RX_DMA_CTRL);
537 val &= ~GENET_RX_DMA_CTRL_EN;
538 val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
539 WR4(sc, GENET_RX_DMA_CTRL, val);
540 }
541
542 static int
gen_bus_dma_init(struct gen_softc * sc)543 gen_bus_dma_init(struct gen_softc *sc)
544 {
545 struct device *dev = sc->dev;
546 int i, error;
547
548 error = bus_dma_tag_create(
549 bus_get_dma_tag(dev), /* Parent tag */
550 4, 0, /* alignment, boundary */
551 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
552 BUS_SPACE_MAXADDR, /* highaddr */
553 NULL, NULL, /* filter, filterarg */
554 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
555 MCLBYTES, /* maxsegsize */
556 0, /* flags */
557 NULL, NULL, /* lockfunc, lockarg */
558 &sc->tx_buf_tag);
559 if (error != 0) {
560 device_printf(dev, "cannot create TX buffer tag\n");
561 return (error);
562 }
563
564 for (i = 0; i < TX_DESC_COUNT; i++) {
565 error = bus_dmamap_create(sc->tx_buf_tag, 0,
566 &sc->tx_ring_ent[i].map);
567 if (error != 0) {
568 device_printf(dev, "cannot create TX buffer map\n");
569 return (error);
570 }
571 }
572
573 error = bus_dma_tag_create(
574 bus_get_dma_tag(dev), /* Parent tag */
575 4, 0, /* alignment, boundary */
576 BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
577 BUS_SPACE_MAXADDR, /* highaddr */
578 NULL, NULL, /* filter, filterarg */
579 MCLBYTES, 1, /* maxsize, nsegs */
580 MCLBYTES, /* maxsegsize */
581 0, /* flags */
582 NULL, NULL, /* lockfunc, lockarg */
583 &sc->rx_buf_tag);
584 if (error != 0) {
585 device_printf(dev, "cannot create RX buffer tag\n");
586 return (error);
587 }
588
589 for (i = 0; i < RX_DESC_COUNT; i++) {
590 error = bus_dmamap_create(sc->rx_buf_tag, 0,
591 &sc->rx_ring_ent[i].map);
592 if (error != 0) {
593 device_printf(dev, "cannot create RX buffer map\n");
594 return (error);
595 }
596 }
597 return (0);
598 }
599
600 static void
gen_bus_dma_teardown(struct gen_softc * sc)601 gen_bus_dma_teardown(struct gen_softc *sc)
602 {
603 int i, error;
604
605 if (sc->tx_buf_tag != NULL) {
606 for (i = 0; i < TX_DESC_COUNT; i++) {
607 error = bus_dmamap_destroy(sc->tx_buf_tag,
608 sc->tx_ring_ent[i].map);
609 sc->tx_ring_ent[i].map = NULL;
610 if (error)
611 device_printf(sc->dev,
612 "%s: bus_dmamap_destroy failed: %d\n",
613 __func__, error);
614 }
615 error = bus_dma_tag_destroy(sc->tx_buf_tag);
616 sc->tx_buf_tag = NULL;
617 if (error)
618 device_printf(sc->dev,
619 "%s: bus_dma_tag_destroy failed: %d\n", __func__,
620 error);
621 }
622
623 if (sc->tx_buf_tag != NULL) {
624 for (i = 0; i < RX_DESC_COUNT; i++) {
625 error = bus_dmamap_destroy(sc->rx_buf_tag,
626 sc->rx_ring_ent[i].map);
627 sc->rx_ring_ent[i].map = NULL;
628 if (error)
629 device_printf(sc->dev,
630 "%s: bus_dmamap_destroy failed: %d\n",
631 __func__, error);
632 }
633 error = bus_dma_tag_destroy(sc->rx_buf_tag);
634 sc->rx_buf_tag = NULL;
635 if (error)
636 device_printf(sc->dev,
637 "%s: bus_dma_tag_destroy failed: %d\n", __func__,
638 error);
639 }
640 }
641
642 static void
gen_enable_intr(struct gen_softc * sc)643 gen_enable_intr(struct gen_softc *sc)
644 {
645
646 WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
647 GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
648 }
649
650 /*
651 * "queue" is the software queue index (0-4); "qid" is the hardware index
652 * (0-16). "base" is the starting index in the ring array.
653 */
654 static void
gen_init_txring(struct gen_softc * sc,int queue,int qid,int base,int nentries)655 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
656 int nentries)
657 {
658 struct tx_queue *q;
659 uint32_t val;
660
661 q = &sc->tx_queue[queue];
662 q->entries = &sc->tx_ring_ent[base];
663 q->hwindex = qid;
664 q->nentries = nentries;
665
666 /* TX ring */
667
668 q->queued = 0;
669 q->cons_idx = q->prod_idx = 0;
670
671 WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
672
673 WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
674 WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
675 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
676 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
677 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
678 (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
679 (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
680 WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
681 WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
682 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
683 TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
684 WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
685 WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
686 WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
687 WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
688 WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
689
690 WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
691
692 /* Enable transmit DMA */
693 val = RD4(sc, GENET_TX_DMA_CTRL);
694 val |= GENET_TX_DMA_CTRL_EN;
695 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
696 WR4(sc, GENET_TX_DMA_CTRL, val);
697 }
698
699 /*
700 * "queue" is the software queue index (0-4); "qid" is the hardware index
701 * (0-16). "base" is the starting index in the ring array.
702 */
703 static void
gen_init_rxring(struct gen_softc * sc,int queue,int qid,int base,int nentries)704 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
705 int nentries)
706 {
707 struct rx_queue *q;
708 uint32_t val;
709 int i;
710
711 q = &sc->rx_queue[queue];
712 q->entries = &sc->rx_ring_ent[base];
713 q->hwindex = qid;
714 q->nentries = nentries;
715 q->cons_idx = q->prod_idx = 0;
716
717 WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
718
719 WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
720 WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
721 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
722 WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
723 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
724 (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
725 (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
726 WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
727 WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
728 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
729 RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
730 WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
731 WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
732 (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
733 WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
734 WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
735
736 WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
737
738 /* fill ring */
739 for (i = 0; i < RX_DESC_COUNT; i++)
740 gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
741
742 /* Enable receive DMA */
743 val = RD4(sc, GENET_RX_DMA_CTRL);
744 val |= GENET_RX_DMA_CTRL_EN;
745 val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
746 WR4(sc, GENET_RX_DMA_CTRL, val);
747 }
748
749 static void
gen_init_txrings(struct gen_softc * sc)750 gen_init_txrings(struct gen_softc *sc)
751 {
752 int base = 0;
753 #ifdef PRI_RINGS
754 int i;
755
756 /* init priority rings */
757 for (i = 0; i < PRI_RINGS; i++) {
758 gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
759 sc->tx_queue[i].queue = i;
760 base += TX_DESC_PRICOUNT;
761 dma_ring_conf |= 1 << i;
762 dma_control |= DMA_RENABLE(i);
763 }
764 #endif
765
766 /* init GENET_DMA_DEFAULT_QUEUE (16) */
767 gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
768 TX_DESC_COUNT);
769 sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
770 }
771
772 static void
gen_init_rxrings(struct gen_softc * sc)773 gen_init_rxrings(struct gen_softc *sc)
774 {
775 int base = 0;
776 #ifdef PRI_RINGS
777 int i;
778
779 /* init priority rings */
780 for (i = 0; i < PRI_RINGS; i++) {
781 gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
782 sc->rx_queue[i].queue = i;
783 base += TX_DESC_PRICOUNT;
784 dma_ring_conf |= 1 << i;
785 dma_control |= DMA_RENABLE(i);
786 }
787 #endif
788
789 /* init GENET_DMA_DEFAULT_QUEUE (16) */
790 gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
791 RX_DESC_COUNT);
792 sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
793
794 }
795
796 static void
gen_init_locked(struct gen_softc * sc)797 gen_init_locked(struct gen_softc *sc)
798 {
799 struct mii_data *mii;
800 if_t ifp;
801
802 mii = device_get_softc(sc->miibus);
803 ifp = sc->ifp;
804
805 GEN_ASSERT_LOCKED(sc);
806
807 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
808 return;
809
810 switch (sc->phy_mode)
811 {
812 case MII_CONTYPE_RGMII:
813 case MII_CONTYPE_RGMII_ID:
814 case MII_CONTYPE_RGMII_RXID:
815 case MII_CONTYPE_RGMII_TXID:
816 WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
817 break;
818 default:
819 WR4(sc, GENET_SYS_PORT_CTRL, 0);
820 }
821
822 gen_set_enaddr(sc);
823
824 /* Setup RX filter */
825 gen_setup_rxfilter(sc);
826
827 gen_init_txrings(sc);
828 gen_init_rxrings(sc);
829 gen_enable(sc);
830 gen_enable_offload(sc);
831
832 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
833
834 mii_mediachg(mii);
835 callout_reset(&sc->stat_ch, hz, gen_tick, sc);
836 }
837
838 static void
gen_init(void * softc)839 gen_init(void *softc)
840 {
841 struct gen_softc *sc;
842
843 sc = softc;
844 GEN_LOCK(sc);
845 gen_init_locked(sc);
846 GEN_UNLOCK(sc);
847 }
848
849 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
850
851 static void
gen_setup_rxfilter_mdf(struct gen_softc * sc,u_int n,const uint8_t * ea)852 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
853 {
854 uint32_t addr0 = (ea[0] << 8) | ea[1];
855 uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
856
857 WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
858 WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
859 }
860
861 static u_int
gen_setup_multi(void * arg,struct sockaddr_dl * sdl,u_int count)862 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
863 {
864 struct gen_softc *sc = arg;
865
866 /* "count + 2" to account for unicast and broadcast */
867 gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
868 return (1); /* increment to count */
869 }
870
871 static void
gen_setup_rxfilter(struct gen_softc * sc)872 gen_setup_rxfilter(struct gen_softc *sc)
873 {
874 struct ifnet *ifp = sc->ifp;
875 uint32_t cmd, mdf_ctrl;
876 u_int n;
877
878 GEN_ASSERT_LOCKED(sc);
879
880 cmd = RD4(sc, GENET_UMAC_CMD);
881
882 /*
883 * Count the required number of hardware filters. We need one
884 * for each multicast address, plus one for our own address and
885 * the broadcast address.
886 */
887 n = if_llmaddr_count(ifp) + 2;
888
889 if (n > GENET_MAX_MDF_FILTER)
890 ifp->if_flags |= IFF_ALLMULTI;
891 else
892 ifp->if_flags &= ~IFF_ALLMULTI;
893
894 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
895 cmd |= GENET_UMAC_CMD_PROMISC;
896 mdf_ctrl = 0;
897 } else {
898 cmd &= ~GENET_UMAC_CMD_PROMISC;
899 gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
900 gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp));
901 (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
902 mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~
903 (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
904 }
905
906 WR4(sc, GENET_UMAC_CMD, cmd);
907 WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
908 }
909
910 static void
gen_set_enaddr(struct gen_softc * sc)911 gen_set_enaddr(struct gen_softc *sc)
912 {
913 uint8_t *enaddr;
914 uint32_t val;
915 if_t ifp;
916
917 GEN_ASSERT_LOCKED(sc);
918
919 ifp = sc->ifp;
920
921 /* Write our unicast address */
922 enaddr = IF_LLADDR(ifp);
923 /* Write hardware address */
924 val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
925 (enaddr[0] << 24);
926 WR4(sc, GENET_UMAC_MAC0, val);
927 val = enaddr[5] | (enaddr[4] << 8);
928 WR4(sc, GENET_UMAC_MAC1, val);
929 }
930
931 static void
gen_start_locked(struct gen_softc * sc)932 gen_start_locked(struct gen_softc *sc)
933 {
934 struct mbuf *m;
935 if_t ifp;
936 int cnt, err;
937
938 GEN_ASSERT_LOCKED(sc);
939
940 if (!sc->link)
941 return;
942
943 ifp = sc->ifp;
944
945 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
946 IFF_DRV_RUNNING)
947 return;
948
949 for (cnt = 0; ; cnt++) {
950 m = if_dequeue(ifp);
951 if (m == NULL)
952 break;
953
954 err = gen_encap(sc, &m);
955 if (err != 0) {
956 if (err == ENOBUFS)
957 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
958 if (m != NULL)
959 if_sendq_prepend(ifp, m);
960 break;
961 }
962 if_bpfmtap(ifp, m);
963 }
964 }
965
966 static void
gen_start(if_t ifp)967 gen_start(if_t ifp)
968 {
969 struct gen_softc *sc;
970
971 sc = if_getsoftc(ifp);
972
973 GEN_LOCK(sc);
974 gen_start_locked(sc);
975 GEN_UNLOCK(sc);
976 }
977
978 /* Test for any delayed checksum */
979 #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
980
981 static int
gen_encap(struct gen_softc * sc,struct mbuf ** mp)982 gen_encap(struct gen_softc *sc, struct mbuf **mp)
983 {
984 bus_dmamap_t map;
985 bus_dma_segment_t segs[TX_MAX_SEGS];
986 int error, nsegs, cur, first, i, index, offset;
987 uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
988 struct mbuf *m;
989 struct statusblock *sb = NULL;
990 struct tx_queue *q;
991 struct gen_ring_ent *ent;
992
993 GEN_ASSERT_LOCKED(sc);
994
995 q = &sc->tx_queue[DEF_TXQUEUE];
996
997 m = *mp;
998 #ifdef ICMPV6_HACK
999 /*
1000 * Reflected ICMPv6 packets, e.g. echo replies, tend to get laid
1001 * out with only the Ethernet header in the first mbuf, and this
1002 * doesn't seem to work.
1003 */
1004 #define ICMP6_LEN (sizeof(struct ether_header) + sizeof(struct ip6_hdr) + \
1005 sizeof(struct icmp6_hdr))
1006 if (m->m_len == sizeof(struct ether_header)) {
1007 int ether_type = mtod(m, struct ether_header *)->ether_type;
1008 if (ntohs(ether_type) == ETHERTYPE_IPV6 &&
1009 m->m_next->m_len >= sizeof(struct ip6_hdr)) {
1010 struct ip6_hdr *ip6;
1011
1012 ip6 = mtod(m->m_next, struct ip6_hdr *);
1013 if (ip6->ip6_nxt == IPPROTO_ICMPV6) {
1014 m = m_pullup(m,
1015 MIN(m->m_pkthdr.len, ICMP6_LEN));
1016 if (m == NULL) {
1017 if (sc->ifp->if_flags & IFF_DEBUG)
1018 device_printf(sc->dev,
1019 "ICMPV6 pullup fail\n");
1020 *mp = NULL;
1021 return (ENOMEM);
1022 }
1023 }
1024 }
1025 }
1026 #undef ICMP6_LEN
1027 #endif
1028 if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1029 0) {
1030 csum_flags = m->m_pkthdr.csum_flags;
1031 csumdata = m->m_pkthdr.csum_data;
1032 M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1033 if (m == NULL) {
1034 if (sc->ifp->if_flags & IFF_DEBUG)
1035 device_printf(sc->dev, "prepend fail\n");
1036 *mp = NULL;
1037 return (ENOMEM);
1038 }
1039 offset = gen_parse_tx(m, csum_flags);
1040 sb = mtod(m, struct statusblock *);
1041 if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1042 csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1043 (offset + csumdata);
1044 csuminfo |= TXCSUM_LEN_VALID;
1045 if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1046 csuminfo |= TXCSUM_UDP;
1047 sb->txcsuminfo = csuminfo;
1048 } else
1049 sb->txcsuminfo = 0;
1050 }
1051
1052 *mp = m;
1053
1054 cur = first = q->cur;
1055 ent = &q->entries[cur];
1056 map = ent->map;
1057 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1058 &nsegs, BUS_DMA_NOWAIT);
1059 if (error == EFBIG) {
1060 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1061 if (m == NULL) {
1062 device_printf(sc->dev,
1063 "gen_encap: m_collapse failed\n");
1064 m_freem(*mp);
1065 *mp = NULL;
1066 return (ENOMEM);
1067 }
1068 *mp = m;
1069 error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1070 segs, &nsegs, BUS_DMA_NOWAIT);
1071 if (error != 0) {
1072 m_freem(*mp);
1073 *mp = NULL;
1074 }
1075 }
1076 if (error != 0) {
1077 device_printf(sc->dev,
1078 "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1079 return (error);
1080 }
1081 if (nsegs == 0) {
1082 m_freem(*mp);
1083 *mp = NULL;
1084 return (EIO);
1085 }
1086
1087 /* Remove statusblock after mapping, before possible requeue or bpf. */
1088 if (sb != NULL) {
1089 m->m_data += sizeof(struct statusblock);
1090 m->m_len -= sizeof(struct statusblock);
1091 m->m_pkthdr.len -= sizeof(struct statusblock);
1092 }
1093 if (q->queued + nsegs > q->nentries) {
1094 bus_dmamap_unload(sc->tx_buf_tag, map);
1095 return (ENOBUFS);
1096 }
1097
1098 bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1099
1100 index = q->prod_idx & (q->nentries - 1);
1101 for (i = 0; i < nsegs; i++) {
1102 ent = &q->entries[cur];
1103 length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1104 if (i == 0) {
1105 length_status |= GENET_TX_DESC_STATUS_SOP |
1106 GENET_TX_DESC_STATUS_CRC;
1107 if ((csum_flags & CSUM_DELAY_ANY) != 0)
1108 length_status |= GENET_TX_DESC_STATUS_CKSUM;
1109 }
1110 if (i == nsegs - 1)
1111 length_status |= GENET_TX_DESC_STATUS_EOP;
1112
1113 length_status |= segs[i].ds_len <<
1114 GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1115
1116 WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1117 (uint32_t)segs[i].ds_addr);
1118 WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1119 (uint32_t)(segs[i].ds_addr >> 32));
1120 WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1121
1122 ++q->queued;
1123 cur = TX_NEXT(cur, q->nentries);
1124 index = TX_NEXT(index, q->nentries);
1125 }
1126
1127 q->prod_idx += nsegs;
1128 q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1129 /* We probably don't need to write the producer index on every iter */
1130 if (nsegs != 0)
1131 WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1132 q->cur = cur;
1133
1134 /* Store mbuf in the last segment */
1135 q->entries[first].mbuf = m;
1136
1137 return (0);
1138 }
1139
1140 /*
1141 * Parse a packet to find the offset of the transport header for checksum
1142 * offload. Ensure that the link and network headers are contiguous with
1143 * the status block, or transmission fails.
1144 */
1145 static int
gen_parse_tx(struct mbuf * m,int csum_flags)1146 gen_parse_tx(struct mbuf *m, int csum_flags)
1147 {
1148 int offset, off_in_m;
1149 bool copy = false, shift = false;
1150 u_char *p, *copy_p = NULL;
1151 struct mbuf *m0 = m;
1152 uint16_t ether_type;
1153
1154 if (m->m_len == sizeof(struct statusblock)) {
1155 /* M_PREPEND placed statusblock at end; move to beginning */
1156 m->m_data = m->m_pktdat;
1157 copy_p = mtodo(m, sizeof(struct statusblock));
1158 m = m->m_next;
1159 off_in_m = 0;
1160 p = mtod(m, u_char *);
1161 copy = true;
1162 } else {
1163 /*
1164 * If statusblock is not at beginning of mbuf (likely),
1165 * then remember to move mbuf contents down before copying
1166 * after them.
1167 */
1168 if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1169 shift = true;
1170 p = mtodo(m, sizeof(struct statusblock));
1171 off_in_m = sizeof(struct statusblock);
1172 }
1173
1174 /*
1175 * If headers need to be copied contiguous to statusblock, do so.
1176 * If copying to the internal mbuf data area, and the status block
1177 * is not at the beginning of that area, shift the status block (which
1178 * is empty) and following data.
1179 */
1180 #define COPY(size) { \
1181 int hsize = size; \
1182 if (copy) { \
1183 if (shift) { \
1184 u_char *p0; \
1185 shift = false; \
1186 p0 = mtodo(m0, sizeof(struct statusblock)); \
1187 m0->m_data = m0->m_pktdat; \
1188 bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1189 m0->m_len - sizeof(struct statusblock)); \
1190 copy_p = mtodo(m0, sizeof(struct statusblock)); \
1191 } \
1192 bcopy(p, copy_p, hsize); \
1193 m0->m_len += hsize; \
1194 m0->m_pkthdr.len += hsize; /* unneeded */ \
1195 m->m_len -= hsize; \
1196 m->m_data += hsize; \
1197 } \
1198 copy_p += hsize; \
1199 }
1200
1201 KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1202 sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1203
1204 if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1205 offset = sizeof(struct ether_vlan_header);
1206 ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1207 COPY(sizeof(struct ether_vlan_header));
1208 if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1209 m = m->m_next;
1210 off_in_m = 0;
1211 p = mtod(m, u_char *);
1212 copy = true;
1213 } else {
1214 off_in_m += sizeof(struct ether_vlan_header);
1215 p += sizeof(struct ether_vlan_header);
1216 }
1217 } else {
1218 offset = sizeof(struct ether_header);
1219 ether_type = ntohs(((struct ether_header *)p)->ether_type);
1220 COPY(sizeof(struct ether_header));
1221 if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1222 m = m->m_next;
1223 off_in_m = 0;
1224 p = mtod(m, u_char *);
1225 copy = true;
1226 } else {
1227 off_in_m += sizeof(struct ether_header);
1228 p += sizeof(struct ether_header);
1229 }
1230 }
1231 if (ether_type == ETHERTYPE_IP) {
1232 COPY(((struct ip *)p)->ip_hl << 2);
1233 offset += ((struct ip *)p)->ip_hl << 2;
1234 } else if (ether_type == ETHERTYPE_IPV6) {
1235 COPY(sizeof(struct ip6_hdr));
1236 offset += sizeof(struct ip6_hdr);
1237 } else {
1238 /*
1239 * Unknown whether other cases require moving a header;
1240 * ARP works without.
1241 */
1242 }
1243 return (offset);
1244 #undef COPY
1245 }
1246
1247 static void
gen_intr(void * arg)1248 gen_intr(void *arg)
1249 {
1250 struct gen_softc *sc = arg;
1251 uint32_t val;
1252
1253 GEN_LOCK(sc);
1254
1255 val = RD4(sc, GENET_INTRL2_CPU_STAT);
1256 val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1257 WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1258
1259 if (val & GENET_IRQ_RXDMA_DONE)
1260 gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1261
1262 if (val & GENET_IRQ_TXDMA_DONE) {
1263 gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1264 if (!if_sendq_empty(sc->ifp))
1265 gen_start_locked(sc);
1266 }
1267
1268 GEN_UNLOCK(sc);
1269 }
1270
1271 static int
gen_rxintr(struct gen_softc * sc,struct rx_queue * q)1272 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1273 {
1274 if_t ifp;
1275 struct mbuf *m, *mh, *mt;
1276 struct statusblock *sb = NULL;
1277 int error, index, len, cnt, npkt, n;
1278 uint32_t status, prod_idx, total;
1279
1280 ifp = sc->ifp;
1281 mh = mt = NULL;
1282 cnt = 0;
1283 npkt = 0;
1284
1285 prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1286 GENET_RX_DMA_PROD_CONS_MASK;
1287 total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1288
1289 index = q->cons_idx & (RX_DESC_COUNT - 1);
1290 for (n = 0; n < total; n++) {
1291 bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1292 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1293 bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1294
1295 m = q->entries[index].mbuf;
1296
1297 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1298 sb = mtod(m, struct statusblock *);
1299 status = sb->status_buflen;
1300 } else
1301 status = RD4(sc, GENET_RX_DESC_STATUS(index));
1302
1303 len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1304 GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1305
1306 /* check for errors */
1307 if ((status &
1308 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1309 GENET_RX_DESC_STATUS_RX_ERROR)) !=
1310 (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1311 if (ifp->if_flags & IFF_DEBUG)
1312 device_printf(sc->dev,
1313 "error/frag %x csum %x\n", status,
1314 sb->rxcsum);
1315 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1316 continue;
1317 }
1318
1319 error = gen_newbuf_rx(sc, q, index);
1320 if (error != 0) {
1321 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1322 if (ifp->if_flags & IFF_DEBUG)
1323 device_printf(sc->dev, "gen_newbuf_rx %d\n",
1324 error);
1325 /* reuse previous mbuf */
1326 (void) gen_mapbuf_rx(sc, q, index, m);
1327 continue;
1328 }
1329
1330 if (sb != NULL) {
1331 if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1332 /* L4 checksum checked; not sure about L3. */
1333 m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1334 CSUM_PSEUDO_HDR;
1335 m->m_pkthdr.csum_data = 0xffff;
1336 }
1337 m->m_data += sizeof(struct statusblock);
1338 m->m_len -= sizeof(struct statusblock);
1339 len -= sizeof(struct statusblock);
1340 }
1341 if (len > ETHER_ALIGN) {
1342 m_adj(m, ETHER_ALIGN);
1343 len -= ETHER_ALIGN;
1344 }
1345
1346 m->m_pkthdr.rcvif = ifp;
1347 m->m_pkthdr.len = len;
1348 m->m_len = len;
1349 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1350
1351 m->m_nextpkt = NULL;
1352 if (mh == NULL)
1353 mh = m;
1354 else
1355 mt->m_nextpkt = m;
1356 mt = m;
1357 ++cnt;
1358 ++npkt;
1359
1360 index = RX_NEXT(index, q->nentries);
1361
1362 q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1363 WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1364
1365 if (cnt == gen_rx_batch) {
1366 GEN_UNLOCK(sc);
1367 if_input(ifp, mh);
1368 GEN_LOCK(sc);
1369 mh = mt = NULL;
1370 cnt = 0;
1371 }
1372 }
1373
1374 if (mh != NULL) {
1375 GEN_UNLOCK(sc);
1376 if_input(ifp, mh);
1377 GEN_LOCK(sc);
1378 }
1379
1380 return (npkt);
1381 }
1382
1383 static void
gen_txintr(struct gen_softc * sc,struct tx_queue * q)1384 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1385 {
1386 uint32_t cons_idx, total;
1387 struct gen_ring_ent *ent;
1388 if_t ifp;
1389 int i, prog;
1390
1391 GEN_ASSERT_LOCKED(sc);
1392
1393 ifp = sc->ifp;
1394
1395 cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1396 GENET_TX_DMA_PROD_CONS_MASK;
1397 total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1398
1399 prog = 0;
1400 for (i = q->next; q->queued > 0 && total > 0;
1401 i = TX_NEXT(i, q->nentries), total--) {
1402 /* XXX check for errors */
1403
1404 ent = &q->entries[i];
1405 if (ent->mbuf != NULL) {
1406 bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1407 BUS_DMASYNC_POSTWRITE);
1408 bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1409 m_freem(ent->mbuf);
1410 ent->mbuf = NULL;
1411 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1412 }
1413
1414 prog++;
1415 --q->queued;
1416 }
1417
1418 if (prog > 0) {
1419 q->next = i;
1420 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1421 }
1422
1423 q->cons_idx = cons_idx;
1424 }
1425
1426 static void
gen_intr2(void * arg)1427 gen_intr2(void *arg)
1428 {
1429 struct gen_softc *sc = arg;
1430
1431 device_printf(sc->dev, "gen_intr2\n");
1432 }
1433
1434 static int
gen_newbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index)1435 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1436 {
1437 struct mbuf *m;
1438
1439 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1440 if (m == NULL)
1441 return (ENOBUFS);
1442
1443 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1444 m_adj(m, ETHER_ALIGN);
1445
1446 return (gen_mapbuf_rx(sc, q, index, m));
1447 }
1448
1449 static int
gen_mapbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index,struct mbuf * m)1450 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1451 struct mbuf *m)
1452 {
1453 bus_dma_segment_t seg;
1454 bus_dmamap_t map;
1455 int nsegs;
1456
1457 map = q->entries[index].map;
1458 if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1459 BUS_DMA_NOWAIT) != 0) {
1460 m_freem(m);
1461 return (ENOBUFS);
1462 }
1463
1464 bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1465
1466 q->entries[index].mbuf = m;
1467 WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1468 WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1469
1470 return (0);
1471 }
1472
1473 static int
gen_ioctl(if_t ifp,u_long cmd,caddr_t data)1474 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1475 {
1476 struct gen_softc *sc;
1477 struct mii_data *mii;
1478 struct ifreq *ifr;
1479 int flags, enable, error;
1480
1481 sc = if_getsoftc(ifp);
1482 mii = device_get_softc(sc->miibus);
1483 ifr = (struct ifreq *)data;
1484 error = 0;
1485
1486 switch (cmd) {
1487 case SIOCSIFFLAGS:
1488 GEN_LOCK(sc);
1489 if (if_getflags(ifp) & IFF_UP) {
1490 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1491 flags = if_getflags(ifp) ^ sc->if_flags;
1492 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1493 gen_setup_rxfilter(sc);
1494 } else
1495 gen_init_locked(sc);
1496 } else {
1497 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1498 gen_reset(sc);
1499 }
1500 sc->if_flags = if_getflags(ifp);
1501 GEN_UNLOCK(sc);
1502 break;
1503
1504 case SIOCADDMULTI:
1505 case SIOCDELMULTI:
1506 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1507 GEN_LOCK(sc);
1508 gen_setup_rxfilter(sc);
1509 GEN_UNLOCK(sc);
1510 }
1511 break;
1512
1513 case SIOCSIFMEDIA:
1514 case SIOCGIFMEDIA:
1515 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1516 break;
1517
1518 case SIOCSIFCAP:
1519 enable = if_getcapenable(ifp);
1520 flags = ifr->ifr_reqcap ^ enable;
1521 if (flags & IFCAP_RXCSUM)
1522 enable ^= IFCAP_RXCSUM;
1523 if (flags & IFCAP_RXCSUM_IPV6)
1524 enable ^= IFCAP_RXCSUM_IPV6;
1525 if (flags & IFCAP_TXCSUM)
1526 enable ^= IFCAP_TXCSUM;
1527 if (flags & IFCAP_TXCSUM_IPV6)
1528 enable ^= IFCAP_TXCSUM_IPV6;
1529 if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1530 if_sethwassist(ifp, GEN_CSUM_FEATURES);
1531 else
1532 if_sethwassist(ifp, 0);
1533 if_setcapenable(ifp, enable);
1534 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1535 gen_enable_offload(sc);
1536 break;
1537
1538 default:
1539 error = ether_ioctl(ifp, cmd, data);
1540 break;
1541 }
1542 return (error);
1543 }
1544
1545 static void
gen_tick(void * softc)1546 gen_tick(void *softc)
1547 {
1548 struct gen_softc *sc;
1549 struct mii_data *mii;
1550 if_t ifp;
1551 int link;
1552
1553 sc = softc;
1554 ifp = sc->ifp;
1555 mii = device_get_softc(sc->miibus);
1556
1557 GEN_ASSERT_LOCKED(sc);
1558
1559 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1560 return;
1561
1562 link = sc->link;
1563 mii_tick(mii);
1564 if (sc->link && !link)
1565 gen_start_locked(sc);
1566
1567 callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1568 }
1569
1570 #define MII_BUSY_RETRY 1000
1571
1572 static int
gen_miibus_readreg(device_t dev,int phy,int reg)1573 gen_miibus_readreg(device_t dev, int phy, int reg)
1574 {
1575 struct gen_softc *sc;
1576 int retry, val;
1577
1578 sc = device_get_softc(dev);
1579 val = 0;
1580
1581 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1582 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1583 val = RD4(sc, GENET_MDIO_CMD);
1584 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1585 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1586 if (((val = RD4(sc, GENET_MDIO_CMD)) &
1587 GENET_MDIO_START_BUSY) == 0) {
1588 if (val & GENET_MDIO_READ_FAILED)
1589 return (0); /* -1? */
1590 val &= GENET_MDIO_VAL_MASK;
1591 break;
1592 }
1593 DELAY(10);
1594 }
1595
1596 if (retry == 0)
1597 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1598 phy, reg);
1599
1600 return (val);
1601 }
1602
1603 static int
gen_miibus_writereg(device_t dev,int phy,int reg,int val)1604 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1605 {
1606 struct gen_softc *sc;
1607 int retry;
1608
1609 sc = device_get_softc(dev);
1610
1611 WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1612 (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1613 (val & GENET_MDIO_VAL_MASK));
1614 val = RD4(sc, GENET_MDIO_CMD);
1615 WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1616 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1617 val = RD4(sc, GENET_MDIO_CMD);
1618 if ((val & GENET_MDIO_START_BUSY) == 0)
1619 break;
1620 DELAY(10);
1621 }
1622 if (retry == 0)
1623 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1624 phy, reg);
1625
1626 return (0);
1627 }
1628
1629 static void
gen_update_link_locked(struct gen_softc * sc)1630 gen_update_link_locked(struct gen_softc *sc)
1631 {
1632 struct mii_data *mii;
1633 uint32_t val;
1634 u_int speed;
1635
1636 GEN_ASSERT_LOCKED(sc);
1637
1638 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1639 return;
1640 mii = device_get_softc(sc->miibus);
1641
1642 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1643 (IFM_ACTIVE | IFM_AVALID)) {
1644 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1645 case IFM_1000_T:
1646 case IFM_1000_SX:
1647 speed = GENET_UMAC_CMD_SPEED_1000;
1648 sc->link = 1;
1649 break;
1650 case IFM_100_TX:
1651 speed = GENET_UMAC_CMD_SPEED_100;
1652 sc->link = 1;
1653 break;
1654 case IFM_10_T:
1655 speed = GENET_UMAC_CMD_SPEED_10;
1656 sc->link = 1;
1657 break;
1658 default:
1659 sc->link = 0;
1660 break;
1661 }
1662 } else
1663 sc->link = 0;
1664
1665 if (sc->link == 0)
1666 return;
1667
1668 val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1669 val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1670 val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1671 val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1672 if (sc->phy_mode == MII_CONTYPE_RGMII)
1673 val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1674 else
1675 val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1676 WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1677
1678 val = RD4(sc, GENET_UMAC_CMD);
1679 val &= ~GENET_UMAC_CMD_SPEED;
1680 val |= speed;
1681 WR4(sc, GENET_UMAC_CMD, val);
1682 }
1683
1684 static void
gen_link_task(void * arg,int pending)1685 gen_link_task(void *arg, int pending)
1686 {
1687 struct gen_softc *sc;
1688
1689 sc = arg;
1690
1691 GEN_LOCK(sc);
1692 gen_update_link_locked(sc);
1693 GEN_UNLOCK(sc);
1694 }
1695
1696 static void
gen_miibus_statchg(device_t dev)1697 gen_miibus_statchg(device_t dev)
1698 {
1699 struct gen_softc *sc;
1700
1701 sc = device_get_softc(dev);
1702
1703 taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1704 }
1705
1706 static void
gen_media_status(if_t ifp,struct ifmediareq * ifmr)1707 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1708 {
1709 struct gen_softc *sc;
1710 struct mii_data *mii;
1711
1712 sc = if_getsoftc(ifp);
1713 mii = device_get_softc(sc->miibus);
1714
1715 GEN_LOCK(sc);
1716 mii_pollstat(mii);
1717 ifmr->ifm_active = mii->mii_media_active;
1718 ifmr->ifm_status = mii->mii_media_status;
1719 GEN_UNLOCK(sc);
1720 }
1721
1722 static int
gen_media_change(if_t ifp)1723 gen_media_change(if_t ifp)
1724 {
1725 struct gen_softc *sc;
1726 struct mii_data *mii;
1727 int error;
1728
1729 sc = if_getsoftc(ifp);
1730 mii = device_get_softc(sc->miibus);
1731
1732 GEN_LOCK(sc);
1733 error = mii_mediachg(mii);
1734 GEN_UNLOCK(sc);
1735
1736 return (error);
1737 }
1738
1739 static device_method_t gen_methods[] = {
1740 /* Device interface */
1741 DEVMETHOD(device_probe, gen_probe),
1742 DEVMETHOD(device_attach, gen_attach),
1743
1744 /* MII interface */
1745 DEVMETHOD(miibus_readreg, gen_miibus_readreg),
1746 DEVMETHOD(miibus_writereg, gen_miibus_writereg),
1747 DEVMETHOD(miibus_statchg, gen_miibus_statchg),
1748
1749 DEVMETHOD_END
1750 };
1751
1752 static driver_t gen_driver = {
1753 "genet",
1754 gen_methods,
1755 sizeof(struct gen_softc),
1756 };
1757
1758 static devclass_t gen_devclass;
1759
1760 DRIVER_MODULE(genet, simplebus, gen_driver, gen_devclass, 0, 0);
1761 DRIVER_MODULE(miibus, genet, miibus_driver, miibus_devclass, 0, 0);
1762 MODULE_DEPEND(genet, ether, 1, 1, 1);
1763 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1764