xref: /freebsd-12.1/sys/dev/rt/if_rt.c (revision 718cf2cc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2015-2016, Stanislav Galabov
5  * Copyright (c) 2014, Aleksandr A. Mityaev
6  * Copyright (c) 2011, Aleksandr Rybalko
7  * based on hard work
8  * by Alexander Egorenkov <[email protected]>
9  * and by Damien Bergamini <[email protected]>
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice unmodified, this list of conditions, and the following
17  *    disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "if_rtvar.h"
39 #include "if_rtreg.h"
40 
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_vlan_var.h>
49 
50 #include <net/bpf.h>
51 
52 #include <machine/bus.h>
53 #include <machine/cache.h>
54 #include <machine/cpufunc.h>
55 #include <machine/resource.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm.h>
58 #include <vm/pmap.h>
59 #include <machine/pmap.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62 
63 #include "opt_platform.h"
64 #include "opt_rt305x.h"
65 
66 #ifdef FDT
67 #include <dev/ofw/openfirm.h>
68 #include <dev/ofw/ofw_bus.h>
69 #include <dev/ofw/ofw_bus_subr.h>
70 #endif
71 
72 #include <dev/mii/mii.h>
73 #include <dev/mii/miivar.h>
74 
75 #ifdef RT_MDIO
76 #include <dev/mdio/mdio.h>
77 #include <dev/etherswitch/miiproxy.h>
78 #include "mdio_if.h"
79 #endif
80 
81 #if 0
82 #include <mips/rt305x/rt305x_sysctlvar.h>
83 #include <mips/rt305x/rt305xreg.h>
84 #endif
85 
86 #ifdef IF_RT_PHY_SUPPORT
87 #include "miibus_if.h"
88 #endif
89 
90 /*
91  * Defines and macros
92  */
93 #define	RT_MAX_AGG_SIZE			3840
94 
95 #define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
96 
97 #define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
98 #define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
99 
100 #define	RT_TX_WATCHDOG_TIMEOUT		5
101 
102 #define RT_CHIPID_RT2880 0x2880
103 #define RT_CHIPID_RT3050 0x3050
104 #define RT_CHIPID_RT5350 0x5350
105 #define RT_CHIPID_MT7620 0x7620
106 #define RT_CHIPID_MT7621 0x7621
107 
108 #ifdef FDT
109 /* more specific and new models should go first */
110 static const struct ofw_compat_data rt_compat_data[] = {
111 	{ "ralink,rt2880-eth",		RT_CHIPID_RT2880 },
112 	{ "ralink,rt3050-eth",		RT_CHIPID_RT3050 },
113 	{ "ralink,rt3352-eth",		RT_CHIPID_RT3050 },
114 	{ "ralink,rt3883-eth",		RT_CHIPID_RT3050 },
115 	{ "ralink,rt5350-eth",		RT_CHIPID_RT5350 },
116 	{ "ralink,mt7620a-eth",		RT_CHIPID_MT7620 },
117 	{ "mediatek,mt7620-eth",	RT_CHIPID_MT7620 },
118 	{ "ralink,mt7621-eth",		RT_CHIPID_MT7621 },
119 	{ "mediatek,mt7621-eth",	RT_CHIPID_MT7621 },
120 	{ NULL,				0 }
121 };
122 #endif
123 
124 /*
125  * Static function prototypes
126  */
127 static int	rt_probe(device_t dev);
128 static int	rt_attach(device_t dev);
129 static int	rt_detach(device_t dev);
130 static int	rt_shutdown(device_t dev);
131 static int	rt_suspend(device_t dev);
132 static int	rt_resume(device_t dev);
133 static void	rt_init_locked(void *priv);
134 static void	rt_init(void *priv);
135 static void	rt_stop_locked(void *priv);
136 static void	rt_stop(void *priv);
137 static void	rt_start(struct ifnet *ifp);
138 static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
139 static void	rt_periodic(void *arg);
140 static void	rt_tx_watchdog(void *arg);
141 static void	rt_intr(void *arg);
142 static void	rt_rt5350_intr(void *arg);
143 static void	rt_tx_coherent_intr(struct rt_softc *sc);
144 static void	rt_rx_coherent_intr(struct rt_softc *sc);
145 static void	rt_rx_delay_intr(struct rt_softc *sc);
146 static void	rt_tx_delay_intr(struct rt_softc *sc);
147 static void	rt_rx_intr(struct rt_softc *sc, int qid);
148 static void	rt_tx_intr(struct rt_softc *sc, int qid);
149 static void	rt_rx_done_task(void *context, int pending);
150 static void	rt_tx_done_task(void *context, int pending);
151 static void	rt_periodic_task(void *context, int pending);
152 static int	rt_rx_eof(struct rt_softc *sc,
153 		    struct rt_softc_rx_ring *ring, int limit);
154 static void	rt_tx_eof(struct rt_softc *sc,
155 		    struct rt_softc_tx_ring *ring);
156 static void	rt_update_stats(struct rt_softc *sc);
157 static void	rt_watchdog(struct rt_softc *sc);
158 static void	rt_update_raw_counters(struct rt_softc *sc);
159 static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
160 static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
161 static int	rt_txrx_enable(struct rt_softc *sc);
162 static int	rt_alloc_rx_ring(struct rt_softc *sc,
163 		    struct rt_softc_rx_ring *ring, int qid);
164 static void	rt_reset_rx_ring(struct rt_softc *sc,
165 		    struct rt_softc_rx_ring *ring);
166 static void	rt_free_rx_ring(struct rt_softc *sc,
167 		    struct rt_softc_rx_ring *ring);
168 static int	rt_alloc_tx_ring(struct rt_softc *sc,
169 		    struct rt_softc_tx_ring *ring, int qid);
170 static void	rt_reset_tx_ring(struct rt_softc *sc,
171 		    struct rt_softc_tx_ring *ring);
172 static void	rt_free_tx_ring(struct rt_softc *sc,
173 		    struct rt_softc_tx_ring *ring);
174 static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
175 		    int nseg, int error);
176 static void	rt_sysctl_attach(struct rt_softc *sc);
177 #ifdef IF_RT_PHY_SUPPORT
178 void		rt_miibus_statchg(device_t);
179 #endif
180 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
181 static int	rt_miibus_readreg(device_t, int, int);
182 static int	rt_miibus_writereg(device_t, int, int, int);
183 #endif
184 static int	rt_ifmedia_upd(struct ifnet *);
185 static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
186 
187 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
188 #ifdef IF_RT_DEBUG
189 static int rt_debug = 0;
190 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
191     "RT debug level");
192 #endif
193 
194 static int
rt_probe(device_t dev)195 rt_probe(device_t dev)
196 {
197 	struct rt_softc *sc = device_get_softc(dev);
198 	char buf[80];
199 #ifdef FDT
200 	const struct ofw_compat_data * cd;
201 
202 	cd = ofw_bus_search_compatible(dev, rt_compat_data);
203 	if (cd->ocd_data == 0)
204 	        return (ENXIO);
205 
206 	sc->rt_chipid = (unsigned int)(cd->ocd_data);
207 #else
208 #if defined(MT7620)
209 	sc->rt_chipid = RT_CHIPID_MT7620;
210 #elif defined(MT7621)
211 	sc->rt_chipid = RT_CHIPID_MT7621;
212 #elif defined(RT5350)
213 	sc->rt_chipid = RT_CHIPID_RT5350;
214 #else
215 	sc->rt_chipid = RT_CHIPID_RT3050;
216 #endif
217 #endif
218 	snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
219 		sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
220 	device_set_desc_copy(dev, buf);
221 	return (BUS_PROBE_GENERIC);
222 }
223 
224 /*
225  * macaddr_atoi - translate string MAC address to uint8_t array
226  */
227 static int
macaddr_atoi(const char * str,uint8_t * mac)228 macaddr_atoi(const char *str, uint8_t *mac)
229 {
230 	int count, i;
231 	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
232 
233 	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
234 	    &amac[0], &amac[1], &amac[2],
235 	    &amac[3], &amac[4], &amac[5]);
236 	if (count < ETHER_ADDR_LEN) {
237 		memset(mac, 0, ETHER_ADDR_LEN);
238 		return (1);
239 	}
240 
241 	/* Copy aligned to result */
242 	for (i = 0; i < ETHER_ADDR_LEN; i ++)
243 		mac[i] = (amac[i] & 0xff);
244 
245 	return (0);
246 }
247 
248 #ifdef USE_GENERATED_MAC_ADDRESS
249 /*
250  * generate_mac(uin8_t *mac)
251  * This is MAC address generator for cases when real device MAC address
252  * unknown or not yet accessible.
253  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
254  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
255  *
256  * Output - MAC address, that do not change between reboots, if hints or
257  * bootloader info unchange.
258  */
259 static void
generate_mac(uint8_t * mac)260 generate_mac(uint8_t *mac)
261 {
262 	unsigned char *cp;
263 	int i = 0;
264 	uint32_t crc = 0xffffffff;
265 
266 	/* Generate CRC32 on kenv */
267 	for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
268 		crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
269 	}
270 	crc = ~crc;
271 
272 	mac[0] = 'b';
273 	mac[1] = 's';
274 	mac[2] = 'd';
275 	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
276 	mac[4] = (crc >> 8) & 0xff;
277 	mac[5] = crc & 0xff;
278 }
279 #endif
280 
281 /*
282  * ether_request_mac - try to find usable MAC address.
283  */
284 static int
ether_request_mac(device_t dev,uint8_t * mac)285 ether_request_mac(device_t dev, uint8_t *mac)
286 {
287 	char *var;
288 
289 	/*
290 	 * "ethaddr" is passed via envp on RedBoot platforms
291 	 * "kmac" is passed via argv on RouterBOOT platforms
292 	 */
293 #if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
294 	if ((var = kern_getenv("ethaddr")) != NULL ||
295 	    (var = kern_getenv("kmac")) != NULL ) {
296 
297 		if(!macaddr_atoi(var, mac)) {
298 			printf("%s: use %s macaddr from KENV\n",
299 			    device_get_nameunit(dev), var);
300 			freeenv(var);
301 			return (0);
302 		}
303 		freeenv(var);
304 	}
305 #endif
306 
307 	/*
308 	 * Try from hints
309 	 * hint.[dev].[unit].macaddr
310 	 */
311 	if (!resource_string_value(device_get_name(dev),
312 	    device_get_unit(dev), "macaddr", (const char **)&var)) {
313 
314 		if(!macaddr_atoi(var, mac)) {
315 			printf("%s: use %s macaddr from hints\n",
316 			    device_get_nameunit(dev), var);
317 			return (0);
318 		}
319 	}
320 
321 #ifdef USE_GENERATED_MAC_ADDRESS
322 	generate_mac(mac);
323 
324 	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
325 	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
326 #else
327 	/* Hardcoded */
328 	mac[0] = 0x00;
329 	mac[1] = 0x18;
330 	mac[2] = 0xe7;
331 	mac[3] = 0xd5;
332 	mac[4] = 0x83;
333 	mac[5] = 0x90;
334 
335 	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
336 #endif
337 
338 	return (0);
339 }
340 
341 /*
342  * Reset hardware
343  */
344 static void
reset_freng(struct rt_softc * sc)345 reset_freng(struct rt_softc *sc)
346 {
347 	/* XXX hard reset kills everything so skip it ... */
348 	return;
349 }
350 
351 static int
rt_attach(device_t dev)352 rt_attach(device_t dev)
353 {
354 	struct rt_softc *sc;
355 	struct ifnet *ifp;
356 	int error, i;
357 
358 	sc = device_get_softc(dev);
359 	sc->dev = dev;
360 
361 	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
362 	    MTX_DEF | MTX_RECURSE);
363 
364 	sc->mem_rid = 0;
365 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
366 	    RF_ACTIVE | RF_SHAREABLE);
367 	if (sc->mem == NULL) {
368 		device_printf(dev, "could not allocate memory resource\n");
369 		error = ENXIO;
370 		goto fail;
371 	}
372 
373 	sc->bst = rman_get_bustag(sc->mem);
374 	sc->bsh = rman_get_bushandle(sc->mem);
375 
376 	sc->irq_rid = 0;
377 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
378 	    RF_ACTIVE);
379 	if (sc->irq == NULL) {
380 		device_printf(dev,
381 		    "could not allocate interrupt resource\n");
382 		error = ENXIO;
383 		goto fail;
384 	}
385 
386 #ifdef IF_RT_DEBUG
387 	sc->debug = rt_debug;
388 
389 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
390 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
391 		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
392 #endif
393 
394 	/* Reset hardware */
395 	reset_freng(sc);
396 
397 
398 	if (sc->rt_chipid == RT_CHIPID_MT7620) {
399 		sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
400 		sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
401 	} else if (sc->rt_chipid == RT_CHIPID_MT7621) {
402 		sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
403 		sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
404 	} else {
405 		sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
406 		sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
407 	}
408 
409 	/* Fill in soc-specific registers map */
410 	switch(sc->rt_chipid) {
411 	  case RT_CHIPID_MT7620:
412 	  case RT_CHIPID_MT7621:
413 		sc->gdma1_base = MT7620_GDMA1_BASE;
414 		/* fallthrough */
415 	  case RT_CHIPID_RT5350:
416 	  	device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
417 			sc->rt_chipid >= 0x7600 ? 'M' : 'R',
418 	  		sc->rt_chipid, sc->mac_rev);
419 		/* RT5350: No GDMA, PSE, CDMA, PPE */
420 		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
421 			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
422 		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
423 		sc->fe_int_status=RT5350_FE_INT_STATUS;
424 		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
425 		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
426 		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
427 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
428 		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
429 		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
430 		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
431 		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
432 		}
433 		sc->rx_ring_count=2;
434 		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
435 		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
436 		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
437 		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
438 		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
439 		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
440 		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
441 		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
442 		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
443 		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
444 	  	break;
445 	  default:
446 		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
447 			sc->mac_rev);
448 		sc->gdma1_base = GDMA1_BASE;
449 		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
450 		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
451 		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
452 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
453 		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
454 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
455 		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
456 		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
457 		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
458 		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
459 		}
460 		sc->rx_ring_count=1;
461 		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
462 		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
463 		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
464 		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
465 		sc->int_rx_done_mask=INT_RX_DONE;
466 		sc->int_tx_done_mask=INT_TXQ0_DONE;
467 	}
468 
469 	if (sc->gdma1_base != 0)
470 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
471 		(
472 		GDM_ICS_EN | /* Enable IP Csum */
473 		GDM_TCS_EN | /* Enable TCP Csum */
474 		GDM_UCS_EN | /* Enable UDP Csum */
475 		GDM_STRPCRC | /* Strip CRC from packet */
476 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
477 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
478 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
479 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
480 		));
481 
482 	if (sc->rt_chipid == RT_CHIPID_RT2880)
483 		RT_WRITE(sc, MDIO_CFG, MDIO_2880_100T_INIT);
484 
485 	/* allocate Tx and Rx rings */
486 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
487 		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
488 		if (error != 0) {
489 			device_printf(dev, "could not allocate Tx ring #%d\n",
490 			    i);
491 			goto fail;
492 		}
493 	}
494 
495 	sc->tx_ring_mgtqid = 5;
496 	for (i = 0; i < sc->rx_ring_count; i++) {
497 		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
498 		if (error != 0) {
499 			device_printf(dev, "could not allocate Rx ring\n");
500 			goto fail;
501 		}
502 	}
503 
504 	callout_init(&sc->periodic_ch, 0);
505 	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
506 
507 	ifp = sc->ifp = if_alloc(IFT_ETHER);
508 	if (ifp == NULL) {
509 		device_printf(dev, "could not if_alloc()\n");
510 		error = ENOMEM;
511 		goto fail;
512 	}
513 
514 	ifp->if_softc = sc;
515 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
516 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
517 	ifp->if_init = rt_init;
518 	ifp->if_ioctl = rt_ioctl;
519 	ifp->if_start = rt_start;
520 #define	RT_TX_QLEN	256
521 
522 	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
523 	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
524 	IFQ_SET_READY(&ifp->if_snd);
525 
526 #ifdef IF_RT_PHY_SUPPORT
527 	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
528 	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
529 	if (error != 0) {
530 		device_printf(dev, "attaching PHYs failed\n");
531 		error = ENXIO;
532 		goto fail;
533 	}
534 #else
535 	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
536 	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
537 	    NULL);
538 	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
539 
540 #endif /* IF_RT_PHY_SUPPORT */
541 
542 	ether_request_mac(dev, sc->mac_addr);
543 	ether_ifattach(ifp, sc->mac_addr);
544 
545 	/*
546 	 * Tell the upper layer(s) we support long frames.
547 	 */
548 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
549 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
550 	ifp->if_capenable |= IFCAP_VLAN_MTU;
551 	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
552 	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
553 
554 	/* init task queue */
555 	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
556 	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
557 	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
558 
559 	sc->rx_process_limit = 100;
560 
561 	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
562 	    taskqueue_thread_enqueue, &sc->taskqueue);
563 
564 	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
565 	    device_get_nameunit(sc->dev));
566 
567 	rt_sysctl_attach(sc);
568 
569 	/* set up interrupt */
570 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
571 	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
572 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
573 	    sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
574 	    sc, &sc->irqh);
575 	if (error != 0) {
576 		printf("%s: could not set up interrupt\n",
577 			device_get_nameunit(dev));
578 		goto fail;
579 	}
580 #ifdef IF_RT_DEBUG
581 	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
582 #endif
583 
584 	return (0);
585 
586 fail:
587 	/* free Tx and Rx rings */
588 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
589 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
590 
591 	for (i = 0; i < sc->rx_ring_count; i++)
592 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
593 
594 	mtx_destroy(&sc->lock);
595 
596 	if (sc->mem != NULL)
597 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
598 		    sc->mem);
599 
600 	if (sc->irq != NULL)
601 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
602 		    sc->irq);
603 
604 	return (error);
605 }
606 
607 /*
608  * Set media options.
609  */
610 static int
rt_ifmedia_upd(struct ifnet * ifp)611 rt_ifmedia_upd(struct ifnet *ifp)
612 {
613 	struct rt_softc *sc;
614 #ifdef IF_RT_PHY_SUPPORT
615 	struct mii_data *mii;
616 	struct mii_softc *miisc;
617 	int error = 0;
618 
619 	sc = ifp->if_softc;
620 	RT_SOFTC_LOCK(sc);
621 
622 	mii = device_get_softc(sc->rt_miibus);
623 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
624 		PHY_RESET(miisc);
625 	error = mii_mediachg(mii);
626 	RT_SOFTC_UNLOCK(sc);
627 
628 	return (error);
629 
630 #else /* !IF_RT_PHY_SUPPORT */
631 
632 	struct ifmedia *ifm;
633 	struct ifmedia_entry *ife;
634 
635 	sc = ifp->if_softc;
636 	ifm = &sc->rt_ifmedia;
637 	ife = ifm->ifm_cur;
638 
639 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
640 		return (EINVAL);
641 
642 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
643 		device_printf(sc->dev,
644 		    "AUTO is not supported for multiphy MAC");
645 		return (EINVAL);
646 	}
647 
648 	/*
649 	 * Ignore everything
650 	 */
651 	return (0);
652 #endif /* IF_RT_PHY_SUPPORT */
653 }
654 
655 /*
656  * Report current media status.
657  */
658 static void
rt_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)659 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
660 {
661 #ifdef IF_RT_PHY_SUPPORT
662 	struct rt_softc *sc;
663 	struct mii_data *mii;
664 
665 	sc = ifp->if_softc;
666 
667 	RT_SOFTC_LOCK(sc);
668 	mii = device_get_softc(sc->rt_miibus);
669 	mii_pollstat(mii);
670 	ifmr->ifm_active = mii->mii_media_active;
671 	ifmr->ifm_status = mii->mii_media_status;
672 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
673 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
674 	RT_SOFTC_UNLOCK(sc);
675 #else /* !IF_RT_PHY_SUPPORT */
676 
677 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
678 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
679 #endif /* IF_RT_PHY_SUPPORT */
680 }
681 
682 static int
rt_detach(device_t dev)683 rt_detach(device_t dev)
684 {
685 	struct rt_softc *sc;
686 	struct ifnet *ifp;
687 	int i;
688 
689 	sc = device_get_softc(dev);
690 	ifp = sc->ifp;
691 
692 	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
693 
694 	RT_SOFTC_LOCK(sc);
695 
696 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
697 
698 	callout_stop(&sc->periodic_ch);
699 	callout_stop(&sc->tx_watchdog_ch);
700 
701 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
702 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
703 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
704 
705 	/* free Tx and Rx rings */
706 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
707 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
708 	for (i = 0; i < sc->rx_ring_count; i++)
709 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
710 
711 	RT_SOFTC_UNLOCK(sc);
712 
713 #ifdef IF_RT_PHY_SUPPORT
714 	if (sc->rt_miibus != NULL)
715 		device_delete_child(dev, sc->rt_miibus);
716 #endif
717 
718 	ether_ifdetach(ifp);
719 	if_free(ifp);
720 
721 	taskqueue_free(sc->taskqueue);
722 
723 	mtx_destroy(&sc->lock);
724 
725 	bus_generic_detach(dev);
726 	bus_teardown_intr(dev, sc->irq, sc->irqh);
727 	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
728 	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
729 
730 	return (0);
731 }
732 
733 static int
rt_shutdown(device_t dev)734 rt_shutdown(device_t dev)
735 {
736 	struct rt_softc *sc;
737 
738 	sc = device_get_softc(dev);
739 	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
740 	rt_stop(sc);
741 
742 	return (0);
743 }
744 
745 static int
rt_suspend(device_t dev)746 rt_suspend(device_t dev)
747 {
748 	struct rt_softc *sc;
749 
750 	sc = device_get_softc(dev);
751 	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
752 	rt_stop(sc);
753 
754 	return (0);
755 }
756 
757 static int
rt_resume(device_t dev)758 rt_resume(device_t dev)
759 {
760 	struct rt_softc *sc;
761 	struct ifnet *ifp;
762 
763 	sc = device_get_softc(dev);
764 	ifp = sc->ifp;
765 
766 	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
767 
768 	if (ifp->if_flags & IFF_UP)
769 		rt_init(sc);
770 
771 	return (0);
772 }
773 
774 /*
775  * rt_init_locked - Run initialization process having locked mtx.
776  */
777 static void
rt_init_locked(void * priv)778 rt_init_locked(void *priv)
779 {
780 	struct rt_softc *sc;
781 	struct ifnet *ifp;
782 #ifdef IF_RT_PHY_SUPPORT
783 	struct mii_data *mii;
784 #endif
785 	int i, ntries;
786 	uint32_t tmp;
787 
788 	sc = priv;
789 	ifp = sc->ifp;
790 #ifdef IF_RT_PHY_SUPPORT
791 	mii = device_get_softc(sc->rt_miibus);
792 #endif
793 
794 	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
795 
796 	RT_SOFTC_ASSERT_LOCKED(sc);
797 
798 	/* hardware reset */
799 	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
800 	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
801 
802 	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
803 	if (sc->gdma1_base != 0)
804 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
805 		(
806 		GDM_ICS_EN | /* Enable IP Csum */
807 		GDM_TCS_EN | /* Enable TCP Csum */
808 		GDM_UCS_EN | /* Enable UDP Csum */
809 		GDM_STRPCRC | /* Strip CRC from packet */
810 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
811 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
812 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
813 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
814 		));
815 
816 	/* disable DMA engine */
817 	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
818 	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
819 
820 	/* wait while DMA engine is busy */
821 	for (ntries = 0; ntries < 100; ntries++) {
822 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
823 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
824 			break;
825 		DELAY(1000);
826 	}
827 
828 	if (ntries == 100) {
829 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
830 		goto fail;
831 	}
832 
833 	/* reset Rx and Tx rings */
834 	tmp = FE_RST_DRX_IDX0 |
835 		FE_RST_DTX_IDX3 |
836 		FE_RST_DTX_IDX2 |
837 		FE_RST_DTX_IDX1 |
838 		FE_RST_DTX_IDX0;
839 
840 	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
841 
842 	/* XXX switch set mac address */
843 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
844 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
845 
846 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
847 		/* update TX_BASE_PTRx */
848 		RT_WRITE(sc, sc->tx_base_ptr[i],
849 			sc->tx_ring[i].desc_phys_addr);
850 		RT_WRITE(sc, sc->tx_max_cnt[i],
851 			RT_SOFTC_TX_RING_DESC_COUNT);
852 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
853 	}
854 
855 	/* init Rx ring */
856 	for (i = 0; i < sc->rx_ring_count; i++)
857 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
858 
859 	/* update RX_BASE_PTRx */
860 	for (i = 0; i < sc->rx_ring_count; i++) {
861 		RT_WRITE(sc, sc->rx_base_ptr[i],
862 			sc->rx_ring[i].desc_phys_addr);
863 		RT_WRITE(sc, sc->rx_max_cnt[i],
864 			RT_SOFTC_RX_RING_DATA_COUNT);
865 		RT_WRITE(sc, sc->rx_calc_idx[i],
866 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
867 	}
868 
869 	/* write back DDONE, 16byte burst enable RX/TX DMA */
870 	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
871 	if (sc->rt_chipid == RT_CHIPID_MT7620 ||
872 	    sc->rt_chipid == RT_CHIPID_MT7621)
873 		tmp |= (1<<31);
874 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
875 
876 	/* disable interrupts mitigation */
877 	RT_WRITE(sc, sc->delay_int_cfg, 0);
878 
879 	/* clear pending interrupts */
880 	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
881 
882 	/* enable interrupts */
883 	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
884 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
885 	    sc->rt_chipid == RT_CHIPID_MT7621)
886 	  tmp = RT5350_INT_TX_COHERENT |
887 	  	RT5350_INT_RX_COHERENT |
888 	  	RT5350_INT_TXQ3_DONE |
889 	  	RT5350_INT_TXQ2_DONE |
890 	  	RT5350_INT_TXQ1_DONE |
891 	  	RT5350_INT_TXQ0_DONE |
892 	  	RT5350_INT_RXQ1_DONE |
893 	  	RT5350_INT_RXQ0_DONE;
894 	else
895 	  tmp = CNT_PPE_AF |
896 		CNT_GDM_AF |
897 		PSE_P2_FC |
898 		GDM_CRC_DROP |
899 		PSE_BUF_DROP |
900 		GDM_OTHER_DROP |
901 		PSE_P1_FC |
902 		PSE_P0_FC |
903 		PSE_FQ_EMPTY |
904 		INT_TX_COHERENT |
905 		INT_RX_COHERENT |
906 		INT_TXQ3_DONE |
907 		INT_TXQ2_DONE |
908 		INT_TXQ1_DONE |
909 		INT_TXQ0_DONE |
910 		INT_RX_DONE;
911 
912 	sc->intr_enable_mask = tmp;
913 
914 	RT_WRITE(sc, sc->fe_int_enable, tmp);
915 
916 	if (rt_txrx_enable(sc) != 0)
917 		goto fail;
918 
919 #ifdef IF_RT_PHY_SUPPORT
920 	if (mii) mii_mediachg(mii);
921 #endif /* IF_RT_PHY_SUPPORT */
922 
923 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
924 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
925 
926 	sc->periodic_round = 0;
927 
928 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
929 
930 	return;
931 
932 fail:
933 	rt_stop_locked(sc);
934 }
935 
936 /*
937  * rt_init - lock and initialize device.
938  */
939 static void
rt_init(void * priv)940 rt_init(void *priv)
941 {
942 	struct rt_softc *sc;
943 
944 	sc = priv;
945 	RT_SOFTC_LOCK(sc);
946 	rt_init_locked(sc);
947 	RT_SOFTC_UNLOCK(sc);
948 }
949 
950 /*
951  * rt_stop_locked - stop TX/RX w/ lock
952  */
953 static void
rt_stop_locked(void * priv)954 rt_stop_locked(void *priv)
955 {
956 	struct rt_softc *sc;
957 	struct ifnet *ifp;
958 
959 	sc = priv;
960 	ifp = sc->ifp;
961 
962 	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
963 
964 	RT_SOFTC_ASSERT_LOCKED(sc);
965 	sc->tx_timer = 0;
966 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
967 	callout_stop(&sc->periodic_ch);
968 	callout_stop(&sc->tx_watchdog_ch);
969 	RT_SOFTC_UNLOCK(sc);
970 	taskqueue_block(sc->taskqueue);
971 
972 	/*
973 	 * Sometime rt_stop_locked called from isr and we get panic
974 	 * When found, I fix it
975 	 */
976 #ifdef notyet
977 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
978 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
979 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
980 #endif
981 	RT_SOFTC_LOCK(sc);
982 
983 	/* disable interrupts */
984 	RT_WRITE(sc, sc->fe_int_enable, 0);
985 
986 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
987 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
988 	   sc->rt_chipid != RT_CHIPID_MT7621) {
989 		/* reset adapter */
990 		RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
991 	}
992 
993 	if (sc->gdma1_base != 0)
994 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
995 		(
996 		GDM_ICS_EN | /* Enable IP Csum */
997 		GDM_TCS_EN | /* Enable TCP Csum */
998 		GDM_UCS_EN | /* Enable UDP Csum */
999 		GDM_STRPCRC | /* Strip CRC from packet */
1000 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
1001 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
1002 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
1003 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
1004 		));
1005 }
1006 
1007 static void
rt_stop(void * priv)1008 rt_stop(void *priv)
1009 {
1010 	struct rt_softc *sc;
1011 
1012 	sc = priv;
1013 	RT_SOFTC_LOCK(sc);
1014 	rt_stop_locked(sc);
1015 	RT_SOFTC_UNLOCK(sc);
1016 }
1017 
1018 /*
1019  * rt_tx_data - transmit packet.
1020  */
1021 static int
rt_tx_data(struct rt_softc * sc,struct mbuf * m,int qid)1022 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1023 {
1024 	struct ifnet *ifp;
1025 	struct rt_softc_tx_ring *ring;
1026 	struct rt_softc_tx_data *data;
1027 	struct rt_txdesc *desc;
1028 	struct mbuf *m_d;
1029 	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1030 	int error, ndmasegs, ndescs, i;
1031 
1032 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1033 		("%s: Tx data: invalid qid=%d\n",
1034 		 device_get_nameunit(sc->dev), qid));
1035 
1036 	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1037 
1038 	ifp = sc->ifp;
1039 	ring = &sc->tx_ring[qid];
1040 	desc = &ring->desc[ring->desc_cur];
1041 	data = &ring->data[ring->data_cur];
1042 
1043 	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1044 	    dma_seg, &ndmasegs, 0);
1045 	if (error != 0)	{
1046 		/* too many fragments, linearize */
1047 
1048 		RT_DPRINTF(sc, RT_DEBUG_TX,
1049 			"could not load mbuf DMA map, trying to linearize "
1050 			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1051 			ndmasegs, m->m_pkthdr.len, error);
1052 
1053 		m_d = m_collapse(m, M_NOWAIT, 16);
1054 		if (m_d == NULL) {
1055 			m_freem(m);
1056 			m = NULL;
1057 			return (ENOMEM);
1058 		}
1059 		m = m_d;
1060 
1061 		sc->tx_defrag_packets++;
1062 
1063 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1064 		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1065 		if (error != 0)	{
1066 			device_printf(sc->dev, "could not load mbuf DMA map: "
1067 			    "ndmasegs=%d, len=%d, error=%d\n",
1068 			    ndmasegs, m->m_pkthdr.len, error);
1069 			m_freem(m);
1070 			return (error);
1071 		}
1072 	}
1073 
1074 	if (m->m_pkthdr.len == 0)
1075 		ndmasegs = 0;
1076 
1077 	/* determine how many Tx descs are required */
1078 	ndescs = 1 + ndmasegs / 2;
1079 	if ((ring->desc_queued + ndescs) >
1080 	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1081 		RT_DPRINTF(sc, RT_DEBUG_TX,
1082 		    "there are not enough Tx descs\n");
1083 
1084 		sc->no_tx_desc_avail++;
1085 
1086 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1087 		m_freem(m);
1088 		return (EFBIG);
1089 	}
1090 
1091 	data->m = m;
1092 
1093 	/* set up Tx descs */
1094 	for (i = 0; i < ndmasegs; i += 2) {
1095 
1096 		/* TODO: this needs to be refined as MT7620 for example has
1097 		 * a different word3 layout than RT305x and RT5350 (the last
1098 		 * one doesn't use word3 at all). And so does MT7621...
1099 		 */
1100 
1101 		if (sc->rt_chipid != RT_CHIPID_MT7621) {
1102 			/* Set destination */
1103 			if (sc->rt_chipid != RT_CHIPID_MT7620)
1104 			    desc->dst = (TXDSCR_DST_PORT_GDMA1);
1105 
1106 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1107 				desc->dst |= (TXDSCR_IP_CSUM_GEN |
1108 				    TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
1109 			/* Set queue id */
1110 			desc->qn = qid;
1111 			/* No PPPoE */
1112 			desc->pppoe = 0;
1113 			/* No VLAN */
1114 			desc->vid = 0;
1115 		} else {
1116 			desc->vid = 0;
1117 			desc->pppoe = 0;
1118 			desc->qn = 0;
1119 			desc->dst = 2;
1120 		}
1121 
1122 		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1123 		desc->sdl0 = htole16(dma_seg[i].ds_len |
1124 		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1125 
1126 		if ((i+1) < ndmasegs) {
1127 			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1128 			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1129 			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1130 		} else {
1131 			desc->sdp1 = 0;
1132 			desc->sdl1 = 0;
1133 		}
1134 
1135 		if ((i+2) < ndmasegs) {
1136 			ring->desc_queued++;
1137 			ring->desc_cur = (ring->desc_cur + 1) %
1138 			    RT_SOFTC_TX_RING_DESC_COUNT;
1139 		}
1140 		desc = &ring->desc[ring->desc_cur];
1141 	}
1142 
1143 	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1144 	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1145 	    m->m_pkthdr.len, ndmasegs,
1146 	    (int) dma_seg[0].ds_len,
1147 	    (int) dma_seg[1].ds_len,
1148 	    (int) dma_seg[2].ds_len,
1149 	    (int) dma_seg[3].ds_len,
1150 	    (int) dma_seg[4].ds_len);
1151 
1152 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1153 		BUS_DMASYNC_PREWRITE);
1154 	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1155 		BUS_DMASYNC_PREWRITE);
1156 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1157 		BUS_DMASYNC_PREWRITE);
1158 
1159 	ring->desc_queued++;
1160 	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1161 
1162 	ring->data_queued++;
1163 	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1164 
1165 	/* kick Tx */
1166 	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1167 
1168 	return (0);
1169 }
1170 
1171 /*
1172  * rt_start - start Transmit/Receive
1173  */
1174 static void
rt_start(struct ifnet * ifp)1175 rt_start(struct ifnet *ifp)
1176 {
1177 	struct rt_softc *sc;
1178 	struct mbuf *m;
1179 	int qid = 0 /* XXX must check QoS priority */;
1180 
1181 	sc = ifp->if_softc;
1182 
1183 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1184 		return;
1185 
1186 	for (;;) {
1187 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1188 		if (m == NULL)
1189 			break;
1190 
1191 		m->m_pkthdr.rcvif = NULL;
1192 
1193 		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1194 
1195 		if (sc->tx_ring[qid].data_queued >=
1196 		    RT_SOFTC_TX_RING_DATA_COUNT) {
1197 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1198 
1199 			RT_DPRINTF(sc, RT_DEBUG_TX,
1200 			    "if_start: Tx ring with qid=%d is full\n", qid);
1201 
1202 			m_freem(m);
1203 
1204 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1205 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1206 
1207 			sc->tx_data_queue_full[qid]++;
1208 
1209 			break;
1210 		}
1211 
1212 		if (rt_tx_data(sc, m, qid) != 0) {
1213 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1214 
1215 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1216 
1217 			break;
1218 		}
1219 
1220 		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1221 		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1222 		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1223 	}
1224 }
1225 
1226 /*
1227  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1228  * filtering done by attached Ethernet switch.
1229  */
1230 static void
rt_update_promisc(struct ifnet * ifp)1231 rt_update_promisc(struct ifnet *ifp)
1232 {
1233 	struct rt_softc *sc;
1234 
1235 	sc = ifp->if_softc;
1236 	printf("%s: %s promiscuous mode\n",
1237 		device_get_nameunit(sc->dev),
1238 		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1239 }
1240 
1241 /*
1242  * rt_ioctl - ioctl handler.
1243  */
1244 static int
rt_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1245 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1246 {
1247 	struct rt_softc *sc;
1248 	struct ifreq *ifr;
1249 #ifdef IF_RT_PHY_SUPPORT
1250 	struct mii_data *mii;
1251 #endif /* IF_RT_PHY_SUPPORT */
1252 	int error, startall;
1253 
1254 	sc = ifp->if_softc;
1255 	ifr = (struct ifreq *) data;
1256 
1257 	error = 0;
1258 
1259 	switch (cmd) {
1260 	case SIOCSIFFLAGS:
1261 		startall = 0;
1262 		RT_SOFTC_LOCK(sc);
1263 		if (ifp->if_flags & IFF_UP) {
1264 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1265 				if ((ifp->if_flags ^ sc->if_flags) &
1266 				    IFF_PROMISC)
1267 					rt_update_promisc(ifp);
1268 			} else {
1269 				rt_init_locked(sc);
1270 				startall = 1;
1271 			}
1272 		} else {
1273 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1274 				rt_stop_locked(sc);
1275 		}
1276 		sc->if_flags = ifp->if_flags;
1277 		RT_SOFTC_UNLOCK(sc);
1278 		break;
1279 	case SIOCGIFMEDIA:
1280 	case SIOCSIFMEDIA:
1281 #ifdef IF_RT_PHY_SUPPORT
1282 		mii = device_get_softc(sc->rt_miibus);
1283 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1284 #else
1285 		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1286 #endif /* IF_RT_PHY_SUPPORT */
1287 		break;
1288 	default:
1289 		error = ether_ioctl(ifp, cmd, data);
1290 		break;
1291 	}
1292 	return (error);
1293 }
1294 
1295 /*
1296  * rt_periodic - Handler of PERIODIC interrupt
1297  */
1298 static void
rt_periodic(void * arg)1299 rt_periodic(void *arg)
1300 {
1301 	struct rt_softc *sc;
1302 
1303 	sc = arg;
1304 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1305 	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1306 }
1307 
1308 /*
1309  * rt_tx_watchdog - Handler of TX Watchdog
1310  */
1311 static void
rt_tx_watchdog(void * arg)1312 rt_tx_watchdog(void *arg)
1313 {
1314 	struct rt_softc *sc;
1315 	struct ifnet *ifp;
1316 
1317 	sc = arg;
1318 	ifp = sc->ifp;
1319 
1320 	if (sc->tx_timer == 0)
1321 		return;
1322 
1323 	if (--sc->tx_timer == 0) {
1324 		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1325 #ifdef notyet
1326 		/*
1327 		 * XXX: Commented out, because reset break input.
1328 		 */
1329 		rt_stop_locked(sc);
1330 		rt_init_locked(sc);
1331 #endif
1332 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1333 		sc->tx_watchdog_timeouts++;
1334 	}
1335 	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1336 }
1337 
1338 /*
1339  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1340  */
1341 static void
rt_cnt_ppe_af(struct rt_softc * sc)1342 rt_cnt_ppe_af(struct rt_softc *sc)
1343 {
1344 
1345 	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1346 }
1347 
1348 /*
1349  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1350  */
1351 static void
rt_cnt_gdm_af(struct rt_softc * sc)1352 rt_cnt_gdm_af(struct rt_softc *sc)
1353 {
1354 
1355 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1356 	    "GDMA 1 & 2 Counter Table Almost Full\n");
1357 }
1358 
1359 /*
1360  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1361  */
1362 static void
rt_pse_p2_fc(struct rt_softc * sc)1363 rt_pse_p2_fc(struct rt_softc *sc)
1364 {
1365 
1366 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1367 	    "PSE port2 (GDMA 2) flow control asserted.\n");
1368 }
1369 
1370 /*
1371  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1372  * interrupt
1373  */
1374 static void
rt_gdm_crc_drop(struct rt_softc * sc)1375 rt_gdm_crc_drop(struct rt_softc *sc)
1376 {
1377 
1378 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1379 	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1380 }
1381 
1382 /*
1383  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1384  */
1385 static void
rt_pse_buf_drop(struct rt_softc * sc)1386 rt_pse_buf_drop(struct rt_softc *sc)
1387 {
1388 
1389 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1390 	    "PSE discards a packet due to buffer sharing limitation\n");
1391 }
1392 
1393 /*
1394  * rt_gdm_other_drop - Handler of discard on other reason interrupt
1395  */
1396 static void
rt_gdm_other_drop(struct rt_softc * sc)1397 rt_gdm_other_drop(struct rt_softc *sc)
1398 {
1399 
1400 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1401 	    "GDMA 1 & 2 discard a packet due to other reason\n");
1402 }
1403 
1404 /*
1405  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1406  */
1407 static void
rt_pse_p1_fc(struct rt_softc * sc)1408 rt_pse_p1_fc(struct rt_softc *sc)
1409 {
1410 
1411 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1412 	    "PSE port1 (GDMA 1) flow control asserted.\n");
1413 }
1414 
1415 /*
1416  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1417  */
1418 static void
rt_pse_p0_fc(struct rt_softc * sc)1419 rt_pse_p0_fc(struct rt_softc *sc)
1420 {
1421 
1422 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1423 	    "PSE port0 (CDMA) flow control asserted.\n");
1424 }
1425 
1426 /*
1427  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1428  */
1429 static void
rt_pse_fq_empty(struct rt_softc * sc)1430 rt_pse_fq_empty(struct rt_softc *sc)
1431 {
1432 
1433 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1434 	    "PSE free Q empty threshold reached & forced drop "
1435 		    "condition occurred.\n");
1436 }
1437 
1438 /*
1439  * rt_intr - main ISR
1440  */
1441 static void
rt_intr(void * arg)1442 rt_intr(void *arg)
1443 {
1444 	struct rt_softc *sc;
1445 	struct ifnet *ifp;
1446 	uint32_t status;
1447 
1448 	sc = arg;
1449 	ifp = sc->ifp;
1450 
1451 	/* acknowledge interrupts */
1452 	status = RT_READ(sc, sc->fe_int_status);
1453 	RT_WRITE(sc, sc->fe_int_status, status);
1454 
1455 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1456 
1457 	if (status == 0xffffffff ||	/* device likely went away */
1458 		status == 0)		/* not for us */
1459 		return;
1460 
1461 	sc->interrupts++;
1462 
1463 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1464 		return;
1465 
1466 	if (status & CNT_PPE_AF)
1467 		rt_cnt_ppe_af(sc);
1468 
1469 	if (status & CNT_GDM_AF)
1470 		rt_cnt_gdm_af(sc);
1471 
1472 	if (status & PSE_P2_FC)
1473 		rt_pse_p2_fc(sc);
1474 
1475 	if (status & GDM_CRC_DROP)
1476 		rt_gdm_crc_drop(sc);
1477 
1478 	if (status & PSE_BUF_DROP)
1479 		rt_pse_buf_drop(sc);
1480 
1481 	if (status & GDM_OTHER_DROP)
1482 		rt_gdm_other_drop(sc);
1483 
1484 	if (status & PSE_P1_FC)
1485 		rt_pse_p1_fc(sc);
1486 
1487 	if (status & PSE_P0_FC)
1488 		rt_pse_p0_fc(sc);
1489 
1490 	if (status & PSE_FQ_EMPTY)
1491 		rt_pse_fq_empty(sc);
1492 
1493 	if (status & INT_TX_COHERENT)
1494 		rt_tx_coherent_intr(sc);
1495 
1496 	if (status & INT_RX_COHERENT)
1497 		rt_rx_coherent_intr(sc);
1498 
1499 	if (status & RX_DLY_INT)
1500 		rt_rx_delay_intr(sc);
1501 
1502 	if (status & TX_DLY_INT)
1503 		rt_tx_delay_intr(sc);
1504 
1505 	if (status & INT_RX_DONE)
1506 		rt_rx_intr(sc, 0);
1507 
1508 	if (status & INT_TXQ3_DONE)
1509 		rt_tx_intr(sc, 3);
1510 
1511 	if (status & INT_TXQ2_DONE)
1512 		rt_tx_intr(sc, 2);
1513 
1514 	if (status & INT_TXQ1_DONE)
1515 		rt_tx_intr(sc, 1);
1516 
1517 	if (status & INT_TXQ0_DONE)
1518 		rt_tx_intr(sc, 0);
1519 }
1520 
1521 /*
1522  * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1523  */
1524 static void
rt_rt5350_intr(void * arg)1525 rt_rt5350_intr(void *arg)
1526 {
1527 	struct rt_softc *sc;
1528 	struct ifnet *ifp;
1529 	uint32_t status;
1530 
1531 	sc = arg;
1532 	ifp = sc->ifp;
1533 
1534 	/* acknowledge interrupts */
1535 	status = RT_READ(sc, sc->fe_int_status);
1536 	RT_WRITE(sc, sc->fe_int_status, status);
1537 
1538 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1539 
1540 	if (status == 0xffffffff ||     /* device likely went away */
1541 		status == 0)            /* not for us */
1542 		return;
1543 
1544 	sc->interrupts++;
1545 
1546 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1547 	        return;
1548 
1549 	if (status & RT5350_INT_TX_COHERENT)
1550 		rt_tx_coherent_intr(sc);
1551 	if (status & RT5350_INT_RX_COHERENT)
1552 		rt_rx_coherent_intr(sc);
1553 	if (status & RT5350_RX_DLY_INT)
1554 	        rt_rx_delay_intr(sc);
1555 	if (status & RT5350_TX_DLY_INT)
1556 	        rt_tx_delay_intr(sc);
1557 	if (status & RT5350_INT_RXQ1_DONE)
1558 		rt_rx_intr(sc, 1);
1559 	if (status & RT5350_INT_RXQ0_DONE)
1560 		rt_rx_intr(sc, 0);
1561 	if (status & RT5350_INT_TXQ3_DONE)
1562 		rt_tx_intr(sc, 3);
1563 	if (status & RT5350_INT_TXQ2_DONE)
1564 		rt_tx_intr(sc, 2);
1565 	if (status & RT5350_INT_TXQ1_DONE)
1566 		rt_tx_intr(sc, 1);
1567 	if (status & RT5350_INT_TXQ0_DONE)
1568 		rt_tx_intr(sc, 0);
1569 }
1570 
1571 static void
rt_tx_coherent_intr(struct rt_softc * sc)1572 rt_tx_coherent_intr(struct rt_softc *sc)
1573 {
1574 	uint32_t tmp;
1575 	int i;
1576 
1577 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1578 
1579 	sc->tx_coherent_interrupts++;
1580 
1581 	/* restart DMA engine */
1582 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1583 	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1584 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1585 
1586 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1587 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1588 
1589 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1590 		RT_WRITE(sc, sc->tx_base_ptr[i],
1591 			sc->tx_ring[i].desc_phys_addr);
1592 		RT_WRITE(sc, sc->tx_max_cnt[i],
1593 			RT_SOFTC_TX_RING_DESC_COUNT);
1594 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1595 	}
1596 
1597 	rt_txrx_enable(sc);
1598 }
1599 
1600 /*
1601  * rt_rx_coherent_intr
1602  */
1603 static void
rt_rx_coherent_intr(struct rt_softc * sc)1604 rt_rx_coherent_intr(struct rt_softc *sc)
1605 {
1606 	uint32_t tmp;
1607 	int i;
1608 
1609 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1610 
1611 	sc->rx_coherent_interrupts++;
1612 
1613 	/* restart DMA engine */
1614 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1615 	tmp &= ~(FE_RX_DMA_EN);
1616 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1617 
1618 	/* init Rx ring */
1619 	for (i = 0; i < sc->rx_ring_count; i++)
1620 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1621 
1622 	for (i = 0; i < sc->rx_ring_count; i++) {
1623 		RT_WRITE(sc, sc->rx_base_ptr[i],
1624 			sc->rx_ring[i].desc_phys_addr);
1625 		RT_WRITE(sc, sc->rx_max_cnt[i],
1626 			RT_SOFTC_RX_RING_DATA_COUNT);
1627 		RT_WRITE(sc, sc->rx_calc_idx[i],
1628 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1629 	}
1630 
1631 	rt_txrx_enable(sc);
1632 }
1633 
1634 /*
1635  * rt_rx_intr - a packet received
1636  */
1637 static void
rt_rx_intr(struct rt_softc * sc,int qid)1638 rt_rx_intr(struct rt_softc *sc, int qid)
1639 {
1640 	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1641 		("%s: Rx interrupt: invalid qid=%d\n",
1642 		 device_get_nameunit(sc->dev), qid));
1643 
1644 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1645 	sc->rx_interrupts[qid]++;
1646 	RT_SOFTC_LOCK(sc);
1647 
1648 	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1649 		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1650 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1651 	}
1652 
1653 	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1654 	RT_SOFTC_UNLOCK(sc);
1655 }
1656 
1657 static void
rt_rx_delay_intr(struct rt_softc * sc)1658 rt_rx_delay_intr(struct rt_softc *sc)
1659 {
1660 
1661 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1662 	sc->rx_delay_interrupts++;
1663 }
1664 
1665 static void
rt_tx_delay_intr(struct rt_softc * sc)1666 rt_tx_delay_intr(struct rt_softc *sc)
1667 {
1668 
1669 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1670 	sc->tx_delay_interrupts++;
1671 }
1672 
1673 /*
1674  * rt_tx_intr - Transsmition of packet done
1675  */
1676 static void
rt_tx_intr(struct rt_softc * sc,int qid)1677 rt_tx_intr(struct rt_softc *sc, int qid)
1678 {
1679 
1680 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1681 		("%s: Tx interrupt: invalid qid=%d\n",
1682 		 device_get_nameunit(sc->dev), qid));
1683 
1684 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1685 
1686 	sc->tx_interrupts[qid]++;
1687 	RT_SOFTC_LOCK(sc);
1688 
1689 	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1690 		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1691 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1692 	}
1693 
1694 	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1695 	RT_SOFTC_UNLOCK(sc);
1696 }
1697 
1698 /*
1699  * rt_rx_done_task - run RX task
1700  */
1701 static void
rt_rx_done_task(void * context,int pending)1702 rt_rx_done_task(void *context, int pending)
1703 {
1704 	struct rt_softc *sc;
1705 	struct ifnet *ifp;
1706 	int again;
1707 
1708 	sc = context;
1709 	ifp = sc->ifp;
1710 
1711 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1712 
1713 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1714 		return;
1715 
1716 	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1717 
1718 	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1719 
1720 	RT_SOFTC_LOCK(sc);
1721 
1722 	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1723 		RT_DPRINTF(sc, RT_DEBUG_RX,
1724 		    "Rx done task: scheduling again\n");
1725 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1726 	} else {
1727 		rt_intr_enable(sc, sc->int_rx_done_mask);
1728 	}
1729 
1730 	RT_SOFTC_UNLOCK(sc);
1731 }
1732 
1733 /*
1734  * rt_tx_done_task - check for pending TX task in all queues
1735  */
1736 static void
rt_tx_done_task(void * context,int pending)1737 rt_tx_done_task(void *context, int pending)
1738 {
1739 	struct rt_softc *sc;
1740 	struct ifnet *ifp;
1741 	uint32_t intr_mask;
1742 	int i;
1743 
1744 	sc = context;
1745 	ifp = sc->ifp;
1746 
1747 	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1748 
1749 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1750 		return;
1751 
1752 	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1753 		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1754 			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1755 			rt_tx_eof(sc, &sc->tx_ring[i]);
1756 		}
1757 	}
1758 
1759 	sc->tx_timer = 0;
1760 
1761 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1762 
1763 	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1764 	   sc->rt_chipid == RT_CHIPID_MT7620 ||
1765 	   sc->rt_chipid == RT_CHIPID_MT7621)
1766 	  intr_mask = (
1767 		RT5350_INT_TXQ3_DONE |
1768 		RT5350_INT_TXQ2_DONE |
1769 		RT5350_INT_TXQ1_DONE |
1770 		RT5350_INT_TXQ0_DONE);
1771 	else
1772 	  intr_mask = (
1773 		INT_TXQ3_DONE |
1774 		INT_TXQ2_DONE |
1775 		INT_TXQ1_DONE |
1776 		INT_TXQ0_DONE);
1777 
1778 	RT_SOFTC_LOCK(sc);
1779 
1780 	rt_intr_enable(sc, ~sc->intr_pending_mask &
1781 	    (sc->intr_disable_mask & intr_mask));
1782 
1783 	if (sc->intr_pending_mask & intr_mask) {
1784 		RT_DPRINTF(sc, RT_DEBUG_TX,
1785 		    "Tx done task: scheduling again\n");
1786 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1787 	}
1788 
1789 	RT_SOFTC_UNLOCK(sc);
1790 
1791 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1792 		rt_start(ifp);
1793 }
1794 
1795 /*
1796  * rt_periodic_task - run periodic task
1797  */
1798 static void
rt_periodic_task(void * context,int pending)1799 rt_periodic_task(void *context, int pending)
1800 {
1801 	struct rt_softc *sc;
1802 	struct ifnet *ifp;
1803 
1804 	sc = context;
1805 	ifp = sc->ifp;
1806 
1807 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1808 	    sc->periodic_round);
1809 
1810 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1811 		return;
1812 
1813 	RT_SOFTC_LOCK(sc);
1814 	sc->periodic_round++;
1815 	rt_update_stats(sc);
1816 
1817 	if ((sc->periodic_round % 10) == 0) {
1818 		rt_update_raw_counters(sc);
1819 		rt_watchdog(sc);
1820 	}
1821 
1822 	RT_SOFTC_UNLOCK(sc);
1823 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1824 }
1825 
1826 /*
1827  * rt_rx_eof - check for frames that done by DMA engine and pass it into
1828  * network subsystem.
1829  */
1830 static int
rt_rx_eof(struct rt_softc * sc,struct rt_softc_rx_ring * ring,int limit)1831 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1832 {
1833 	struct ifnet *ifp;
1834 /*	struct rt_softc_rx_ring *ring; */
1835 	struct rt_rxdesc *desc;
1836 	struct rt_softc_rx_data *data;
1837 	struct mbuf *m, *mnew;
1838 	bus_dma_segment_t segs[1];
1839 	bus_dmamap_t dma_map;
1840 	uint32_t index, desc_flags;
1841 	int error, nsegs, len, nframes;
1842 
1843 	ifp = sc->ifp;
1844 /*	ring = &sc->rx_ring[0]; */
1845 
1846 	nframes = 0;
1847 
1848 	while (limit != 0) {
1849 		index = RT_READ(sc, sc->rx_drx_idx[0]);
1850 		if (ring->cur == index)
1851 			break;
1852 
1853 		desc = &ring->desc[ring->cur];
1854 		data = &ring->data[ring->cur];
1855 
1856 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1857 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1858 
1859 #ifdef IF_RT_DEBUG
1860 		if ( sc->debug & RT_DEBUG_RX ) {
1861 			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1862 		        hexdump(desc, 16, 0, 0);
1863 			printf("-----------------------------------\n");
1864 		}
1865 #endif
1866 
1867 		/* XXX Sometime device don`t set DDONE bit */
1868 #ifdef DDONE_FIXED
1869 		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1870 			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1871 			break;
1872 		}
1873 #endif
1874 
1875 		len = le16toh(desc->sdl0) & 0x3fff;
1876 		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1877 
1878 		nframes++;
1879 
1880 		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1881 		    MJUMPAGESIZE);
1882 		if (mnew == NULL) {
1883 			sc->rx_mbuf_alloc_errors++;
1884 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1885 			goto skip;
1886 		}
1887 
1888 		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1889 
1890 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1891 		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1892 		if (error != 0) {
1893 			RT_DPRINTF(sc, RT_DEBUG_RX,
1894 			    "could not load Rx mbuf DMA map: "
1895 			    "error=%d, nsegs=%d\n",
1896 			    error, nsegs);
1897 
1898 			m_freem(mnew);
1899 
1900 			sc->rx_mbuf_dmamap_errors++;
1901 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1902 
1903 			goto skip;
1904 		}
1905 
1906 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1907 			device_get_nameunit(sc->dev)));
1908 
1909 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1910 			BUS_DMASYNC_POSTREAD);
1911 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1912 
1913 		dma_map = data->dma_map;
1914 		data->dma_map = ring->spare_dma_map;
1915 		ring->spare_dma_map = dma_map;
1916 
1917 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1918 			BUS_DMASYNC_PREREAD);
1919 
1920 		m = data->m;
1921 		desc_flags = desc->word3;
1922 
1923 		data->m = mnew;
1924 		/* Add 2 for proper align of RX IP header */
1925 		desc->sdp0 = htole32(segs[0].ds_addr+2);
1926 		desc->sdl0 = htole32(segs[0].ds_len-2);
1927 		desc->word3 = 0;
1928 
1929 		RT_DPRINTF(sc, RT_DEBUG_RX,
1930 		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1931 
1932 		m->m_pkthdr.rcvif = ifp;
1933 		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1934 		m->m_data += 2;
1935 		m->m_pkthdr.len = m->m_len = len;
1936 
1937 		/* check for crc errors */
1938 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1939 			/*check for valid checksum*/
1940 			if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
1941 				RT_DPRINTF(sc, RT_DEBUG_RX,
1942 				    "rxdesc: crc error\n");
1943 
1944 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1945 
1946 				if (!(ifp->if_flags & IFF_PROMISC)) {
1947 				    m_freem(m);
1948 				    goto skip;
1949 				}
1950 			}
1951 			if ((desc_flags & sc->csum_fail_ip) == 0) {
1952 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1953 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1954 				m->m_pkthdr.csum_data = 0xffff;
1955 			}
1956 			m->m_flags &= ~M_HASFCS;
1957 		}
1958 
1959 		(*ifp->if_input)(ifp, m);
1960 skip:
1961 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1962 
1963 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1964 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1965 
1966 		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1967 
1968 		limit--;
1969 	}
1970 
1971 	if (ring->cur == 0)
1972 		RT_WRITE(sc, sc->rx_calc_idx[0],
1973 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1974 	else
1975 		RT_WRITE(sc, sc->rx_calc_idx[0],
1976 			ring->cur - 1);
1977 
1978 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1979 
1980 	sc->rx_packets += nframes;
1981 
1982 	return (limit == 0);
1983 }
1984 
1985 /*
1986  * rt_tx_eof - check for successful transmitted frames and mark their
1987  * descriptor as free.
1988  */
1989 static void
rt_tx_eof(struct rt_softc * sc,struct rt_softc_tx_ring * ring)1990 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1991 {
1992 	struct ifnet *ifp;
1993 	struct rt_txdesc *desc;
1994 	struct rt_softc_tx_data *data;
1995 	uint32_t index;
1996 	int ndescs, nframes;
1997 
1998 	ifp = sc->ifp;
1999 
2000 	ndescs = 0;
2001 	nframes = 0;
2002 
2003 	for (;;) {
2004 		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
2005 		if (ring->desc_next == index)
2006 			break;
2007 
2008 		ndescs++;
2009 
2010 		desc = &ring->desc[ring->desc_next];
2011 
2012 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2013 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2014 
2015 		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
2016 			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
2017 			nframes++;
2018 
2019 			data = &ring->data[ring->data_next];
2020 
2021 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2022 				BUS_DMASYNC_POSTWRITE);
2023 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2024 
2025 			m_freem(data->m);
2026 
2027 			data->m = NULL;
2028 
2029 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2030 
2031 			RT_SOFTC_TX_RING_LOCK(ring);
2032 			ring->data_queued--;
2033 			ring->data_next = (ring->data_next + 1) %
2034 			    RT_SOFTC_TX_RING_DATA_COUNT;
2035 			RT_SOFTC_TX_RING_UNLOCK(ring);
2036 		}
2037 
2038 		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2039 
2040 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2041 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2042 
2043 		RT_SOFTC_TX_RING_LOCK(ring);
2044 		ring->desc_queued--;
2045 		ring->desc_next = (ring->desc_next + 1) %
2046 		    RT_SOFTC_TX_RING_DESC_COUNT;
2047 		RT_SOFTC_TX_RING_UNLOCK(ring);
2048 	}
2049 
2050 	RT_DPRINTF(sc, RT_DEBUG_TX,
2051 	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2052 	    nframes);
2053 }
2054 
2055 /*
2056  * rt_update_stats - query statistics counters and update related variables.
2057  */
2058 static void
rt_update_stats(struct rt_softc * sc)2059 rt_update_stats(struct rt_softc *sc)
2060 {
2061 	struct ifnet *ifp;
2062 
2063 	ifp = sc->ifp;
2064 	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2065 	/* XXX do update stats here */
2066 }
2067 
2068 /*
2069  * rt_watchdog - reinit device on watchdog event.
2070  */
2071 static void
rt_watchdog(struct rt_softc * sc)2072 rt_watchdog(struct rt_softc *sc)
2073 {
2074 	uint32_t tmp;
2075 #ifdef notyet
2076 	int ntries;
2077 #endif
2078 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2079 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
2080 	   sc->rt_chipid != RT_CHIPID_MT7621) {
2081 		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2082 
2083 		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2084 			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2085 	}
2086 	/* XXX: do not reset */
2087 #ifdef notyet
2088 	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2089 		sc->tx_queue_not_empty[0]++;
2090 
2091 		for (ntries = 0; ntries < 10; ntries++) {
2092 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2093 			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2094 				break;
2095 
2096 			DELAY(1);
2097 		}
2098 	}
2099 
2100 	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2101 		sc->tx_queue_not_empty[1]++;
2102 
2103 		for (ntries = 0; ntries < 10; ntries++) {
2104 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2105 			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2106 				break;
2107 
2108 			DELAY(1);
2109 		}
2110 	}
2111 #endif
2112 }
2113 
2114 /*
2115  * rt_update_raw_counters - update counters.
2116  */
2117 static void
rt_update_raw_counters(struct rt_softc * sc)2118 rt_update_raw_counters(struct rt_softc *sc)
2119 {
2120 
2121 	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2122 	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2123 	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2124 	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2125 
2126 	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2127 	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2128 	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2129 	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2130 	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2131 	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2132 	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2133 }
2134 
2135 static void
rt_intr_enable(struct rt_softc * sc,uint32_t intr_mask)2136 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2137 {
2138 	uint32_t tmp;
2139 
2140 	sc->intr_disable_mask &= ~intr_mask;
2141 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2142 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2143 }
2144 
2145 static void
rt_intr_disable(struct rt_softc * sc,uint32_t intr_mask)2146 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2147 {
2148 	uint32_t tmp;
2149 
2150 	sc->intr_disable_mask |= intr_mask;
2151 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2152 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2153 }
2154 
2155 /*
2156  * rt_txrx_enable - enable TX/RX DMA
2157  */
2158 static int
rt_txrx_enable(struct rt_softc * sc)2159 rt_txrx_enable(struct rt_softc *sc)
2160 {
2161 	struct ifnet *ifp;
2162 	uint32_t tmp;
2163 	int ntries;
2164 
2165 	ifp = sc->ifp;
2166 
2167 	/* enable Tx/Rx DMA engine */
2168 	for (ntries = 0; ntries < 200; ntries++) {
2169 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2170 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2171 			break;
2172 
2173 		DELAY(1000);
2174 	}
2175 
2176 	if (ntries == 200) {
2177 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2178 		return (-1);
2179 	}
2180 
2181 	DELAY(50);
2182 
2183 	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2184 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2185 
2186 	/* XXX set Rx filter */
2187 	return (0);
2188 }
2189 
2190 /*
2191  * rt_alloc_rx_ring - allocate RX DMA ring buffer
2192  */
2193 static int
rt_alloc_rx_ring(struct rt_softc * sc,struct rt_softc_rx_ring * ring,int qid)2194 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2195 {
2196 	struct rt_rxdesc *desc;
2197 	struct rt_softc_rx_data *data;
2198 	bus_dma_segment_t segs[1];
2199 	int i, nsegs, error;
2200 
2201 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2202 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2203 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2204 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2205 		0, NULL, NULL, &ring->desc_dma_tag);
2206 	if (error != 0)	{
2207 		device_printf(sc->dev,
2208 		    "could not create Rx desc DMA tag\n");
2209 		goto fail;
2210 	}
2211 
2212 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2213 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2214 	if (error != 0) {
2215 		device_printf(sc->dev,
2216 		    "could not allocate Rx desc DMA memory\n");
2217 		goto fail;
2218 	}
2219 
2220 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2221 		ring->desc,
2222 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2223 		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2224 	if (error != 0) {
2225 		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2226 		goto fail;
2227 	}
2228 
2229 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2230 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2231 		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2232 		&ring->data_dma_tag);
2233 	if (error != 0)	{
2234 		device_printf(sc->dev,
2235 		    "could not create Rx data DMA tag\n");
2236 		goto fail;
2237 	}
2238 
2239 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2240 		desc = &ring->desc[i];
2241 		data = &ring->data[i];
2242 
2243 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2244 		    &data->dma_map);
2245 		if (error != 0)	{
2246 			device_printf(sc->dev, "could not create Rx data DMA "
2247 			    "map\n");
2248 			goto fail;
2249 		}
2250 
2251 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2252 		    MJUMPAGESIZE);
2253 		if (data->m == NULL) {
2254 			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2255 			error = ENOMEM;
2256 			goto fail;
2257 		}
2258 
2259 		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2260 
2261 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2262 		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2263 		if (error != 0)	{
2264 			device_printf(sc->dev,
2265 			    "could not load Rx mbuf DMA map\n");
2266 			goto fail;
2267 		}
2268 
2269 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2270 			device_get_nameunit(sc->dev)));
2271 
2272 		/* Add 2 for proper align of RX IP header */
2273 		desc->sdp0 = htole32(segs[0].ds_addr+2);
2274 		desc->sdl0 = htole32(segs[0].ds_len-2);
2275 	}
2276 
2277 	error = bus_dmamap_create(ring->data_dma_tag, 0,
2278 	    &ring->spare_dma_map);
2279 	if (error != 0) {
2280 		device_printf(sc->dev,
2281 		    "could not create Rx spare DMA map\n");
2282 		goto fail;
2283 	}
2284 
2285 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2286 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2287 	ring->qid = qid;
2288 	return (0);
2289 
2290 fail:
2291 	rt_free_rx_ring(sc, ring);
2292 	return (error);
2293 }
2294 
2295 /*
2296  * rt_reset_rx_ring - reset RX ring buffer
2297  */
2298 static void
rt_reset_rx_ring(struct rt_softc * sc,struct rt_softc_rx_ring * ring)2299 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2300 {
2301 	struct rt_rxdesc *desc;
2302 	int i;
2303 
2304 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2305 		desc = &ring->desc[i];
2306 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2307 	}
2308 
2309 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2310 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2311 	ring->cur = 0;
2312 }
2313 
2314 /*
2315  * rt_free_rx_ring - free memory used by RX ring buffer
2316  */
2317 static void
rt_free_rx_ring(struct rt_softc * sc,struct rt_softc_rx_ring * ring)2318 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2319 {
2320 	struct rt_softc_rx_data *data;
2321 	int i;
2322 
2323 	if (ring->desc != NULL) {
2324 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2325 			BUS_DMASYNC_POSTWRITE);
2326 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2327 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2328 			ring->desc_dma_map);
2329 	}
2330 
2331 	if (ring->desc_dma_tag != NULL)
2332 		bus_dma_tag_destroy(ring->desc_dma_tag);
2333 
2334 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2335 		data = &ring->data[i];
2336 
2337 		if (data->m != NULL) {
2338 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2339 				BUS_DMASYNC_POSTREAD);
2340 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2341 			m_freem(data->m);
2342 		}
2343 
2344 		if (data->dma_map != NULL)
2345 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2346 	}
2347 
2348 	if (ring->spare_dma_map != NULL)
2349 		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2350 
2351 	if (ring->data_dma_tag != NULL)
2352 		bus_dma_tag_destroy(ring->data_dma_tag);
2353 }
2354 
2355 /*
2356  * rt_alloc_tx_ring - allocate TX ring buffer
2357  */
2358 static int
rt_alloc_tx_ring(struct rt_softc * sc,struct rt_softc_tx_ring * ring,int qid)2359 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2360 {
2361 	struct rt_softc_tx_data *data;
2362 	int error, i;
2363 
2364 	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2365 
2366 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2367 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2368 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2369 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2370 		0, NULL, NULL, &ring->desc_dma_tag);
2371 	if (error != 0) {
2372 		device_printf(sc->dev,
2373 		    "could not create Tx desc DMA tag\n");
2374 		goto fail;
2375 	}
2376 
2377 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2378 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2379 	if (error != 0)	{
2380 		device_printf(sc->dev,
2381 		    "could not allocate Tx desc DMA memory\n");
2382 		goto fail;
2383 	}
2384 
2385 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2386 	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2387 	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2388 	    &ring->desc_phys_addr, 0);
2389 	if (error != 0) {
2390 		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2391 		goto fail;
2392 	}
2393 
2394 	ring->desc_queued = 0;
2395 	ring->desc_cur = 0;
2396 	ring->desc_next = 0;
2397 
2398 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2399 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2400 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2401 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2402 	    0, NULL, NULL, &ring->seg0_dma_tag);
2403 	if (error != 0) {
2404 		device_printf(sc->dev,
2405 		    "could not create Tx seg0 DMA tag\n");
2406 		goto fail;
2407 	}
2408 
2409 	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2410 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2411 	if (error != 0) {
2412 		device_printf(sc->dev,
2413 		    "could not allocate Tx seg0 DMA memory\n");
2414 		goto fail;
2415 	}
2416 
2417 	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2418 	    ring->seg0,
2419 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2420 	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2421 	if (error != 0) {
2422 		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2423 		goto fail;
2424 	}
2425 
2426 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2427 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2428 	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2429 	    &ring->data_dma_tag);
2430 	if (error != 0) {
2431 		device_printf(sc->dev,
2432 		    "could not create Tx data DMA tag\n");
2433 		goto fail;
2434 	}
2435 
2436 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2437 		data = &ring->data[i];
2438 
2439 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2440 		    &data->dma_map);
2441 		if (error != 0) {
2442 			device_printf(sc->dev, "could not create Tx data DMA "
2443 			    "map\n");
2444 			goto fail;
2445 		}
2446 	}
2447 
2448 	ring->data_queued = 0;
2449 	ring->data_cur = 0;
2450 	ring->data_next = 0;
2451 
2452 	ring->qid = qid;
2453 	return (0);
2454 
2455 fail:
2456 	rt_free_tx_ring(sc, ring);
2457 	return (error);
2458 }
2459 
2460 /*
2461  * rt_reset_tx_ring - reset TX ring buffer to empty state
2462  */
2463 static void
rt_reset_tx_ring(struct rt_softc * sc,struct rt_softc_tx_ring * ring)2464 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2465 {
2466 	struct rt_softc_tx_data *data;
2467 	struct rt_txdesc *desc;
2468 	int i;
2469 
2470 	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2471 		desc = &ring->desc[i];
2472 
2473 		desc->sdl0 = 0;
2474 		desc->sdl1 = 0;
2475 	}
2476 
2477 	ring->desc_queued = 0;
2478 	ring->desc_cur = 0;
2479 	ring->desc_next = 0;
2480 
2481 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2482 		BUS_DMASYNC_PREWRITE);
2483 
2484 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2485 		BUS_DMASYNC_PREWRITE);
2486 
2487 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2488 		data = &ring->data[i];
2489 
2490 		if (data->m != NULL) {
2491 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2492 				BUS_DMASYNC_POSTWRITE);
2493 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2494 			m_freem(data->m);
2495 			data->m = NULL;
2496 		}
2497 	}
2498 
2499 	ring->data_queued = 0;
2500 	ring->data_cur = 0;
2501 	ring->data_next = 0;
2502 }
2503 
2504 /*
2505  * rt_free_tx_ring - free RX ring buffer
2506  */
2507 static void
rt_free_tx_ring(struct rt_softc * sc,struct rt_softc_tx_ring * ring)2508 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2509 {
2510 	struct rt_softc_tx_data *data;
2511 	int i;
2512 
2513 	if (ring->desc != NULL) {
2514 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2515 			BUS_DMASYNC_POSTWRITE);
2516 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2517 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2518 			ring->desc_dma_map);
2519 	}
2520 
2521 	if (ring->desc_dma_tag != NULL)
2522 		bus_dma_tag_destroy(ring->desc_dma_tag);
2523 
2524 	if (ring->seg0 != NULL) {
2525 		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2526 			BUS_DMASYNC_POSTWRITE);
2527 		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2528 		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2529 			ring->seg0_dma_map);
2530 	}
2531 
2532 	if (ring->seg0_dma_tag != NULL)
2533 		bus_dma_tag_destroy(ring->seg0_dma_tag);
2534 
2535 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2536 		data = &ring->data[i];
2537 
2538 		if (data->m != NULL) {
2539 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2540 				BUS_DMASYNC_POSTWRITE);
2541 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2542 			m_freem(data->m);
2543 		}
2544 
2545 		if (data->dma_map != NULL)
2546 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2547 	}
2548 
2549 	if (ring->data_dma_tag != NULL)
2550 		bus_dma_tag_destroy(ring->data_dma_tag);
2551 
2552 	mtx_destroy(&ring->lock);
2553 }
2554 
2555 /*
2556  * rt_dma_map_addr - get address of busdma segment
2557  */
2558 static void
rt_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)2559 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2560 {
2561 	if (error != 0)
2562 		return;
2563 
2564 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2565 
2566 	*(bus_addr_t *) arg = segs[0].ds_addr;
2567 }
2568 
2569 /*
2570  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2571  */
2572 static void
rt_sysctl_attach(struct rt_softc * sc)2573 rt_sysctl_attach(struct rt_softc *sc)
2574 {
2575 	struct sysctl_ctx_list *ctx;
2576 	struct sysctl_oid *tree;
2577 	struct sysctl_oid *stats;
2578 
2579 	ctx = device_get_sysctl_ctx(sc->dev);
2580 	tree = device_get_sysctl_tree(sc->dev);
2581 
2582 	/* statistic counters */
2583 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2584 	    "stats", CTLFLAG_RD, 0, "statistic");
2585 
2586 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2587 	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2588 	    "all interrupts");
2589 
2590 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2591 	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2592 	    "Tx coherent interrupts");
2593 
2594 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2595 	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2596 	    "Rx coherent interrupts");
2597 
2598 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2599 	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2600 	    "Rx interrupts");
2601 
2602 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2603 	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2604 	    "Rx delay interrupts");
2605 
2606 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2607 	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2608 	    "Tx AC3 interrupts");
2609 
2610 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2611 	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2612 	    "Tx AC2 interrupts");
2613 
2614 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2615 	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2616 	    "Tx AC1 interrupts");
2617 
2618 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2619 	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2620 	    "Tx AC0 interrupts");
2621 
2622 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2623 	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2624 	    "Tx delay interrupts");
2625 
2626 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2627 	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2628 	    0, "Tx AC3 descriptors queued");
2629 
2630 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2631 	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2632 	    0, "Tx AC3 data queued");
2633 
2634 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2635 	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2636 	    0, "Tx AC2 descriptors queued");
2637 
2638 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2639 	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2640 	    0, "Tx AC2 data queued");
2641 
2642 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2643 	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2644 	    0, "Tx AC1 descriptors queued");
2645 
2646 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2647 	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2648 	    0, "Tx AC1 data queued");
2649 
2650 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2651 	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2652 	    0, "Tx AC0 descriptors queued");
2653 
2654 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2655 	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2656 	    0, "Tx AC0 data queued");
2657 
2658 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2659 	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2660 	    "Tx AC3 data queue full");
2661 
2662 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2663 	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2664 	    "Tx AC2 data queue full");
2665 
2666 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2667 	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2668 	    "Tx AC1 data queue full");
2669 
2670 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2671 	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2672 	    "Tx AC0 data queue full");
2673 
2674 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2675 	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2676 	    "Tx watchdog timeouts");
2677 
2678 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2679 	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2680 	    "Tx defragmented packets");
2681 
2682 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2683 	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2684 	    "no Tx descriptors available");
2685 
2686 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2687 	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2688 	    "Rx mbuf allocation errors");
2689 
2690 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2691 	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2692 	    "Rx mbuf DMA mapping errors");
2693 
2694 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2695 	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2696 	    "Tx queue 0 not empty");
2697 
2698 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2699 	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2700 	    "Tx queue 1 not empty");
2701 
2702 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2703 	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2704 	    "Rx packets");
2705 
2706 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2707 	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2708 	    "Rx CRC errors");
2709 
2710 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2711 	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2712 	    "Rx PHY errors");
2713 
2714 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2715 	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2716 	    "Rx duplicate packets");
2717 
2718 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2719 	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2720 	    "Rx FIFO overflows");
2721 
2722 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2723 	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2724 	    "Rx bytes");
2725 
2726 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2727 	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2728 	    "Rx too long frame errors");
2729 
2730 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2731 	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2732 	    "Rx too short frame errors");
2733 
2734 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2735 	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2736 	    "Tx bytes");
2737 
2738 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2739 	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2740 	    "Tx packets");
2741 
2742 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2743 	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2744 	    "Tx skip count for GDMA ports");
2745 
2746 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2747 	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2748 	    "Tx collision count for GDMA ports");
2749 }
2750 
2751 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
2752 /* This code is only work RT2880 and same chip. */
2753 /* TODO: make RT3052 and later support code. But nobody need it? */
2754 static int
rt_miibus_readreg(device_t dev,int phy,int reg)2755 rt_miibus_readreg(device_t dev, int phy, int reg)
2756 {
2757 	struct rt_softc *sc = device_get_softc(dev);
2758 	int dat;
2759 
2760 	/*
2761 	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2762 	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2763 	 */
2764 #ifndef RT_MDIO
2765 	if (phy == 31) {
2766 		/* Fake PHY ID for bfeswitch attach */
2767 		switch (reg) {
2768 		case MII_BMSR:
2769 			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2770 		case MII_PHYIDR1:
2771 			return (0x40);		/* As result of faking */
2772 		case MII_PHYIDR2:		/* PHY will detect as */
2773 			return (0x6250);		/* bfeswitch */
2774 		}
2775 	}
2776 #endif
2777 
2778 	/* Wait prev command done if any */
2779 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2780 	dat = ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
2781 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK);
2782 	RT_WRITE(sc, MDIO_ACCESS, dat);
2783 	RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
2784 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2785 
2786 	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2787 }
2788 
2789 static int
rt_miibus_writereg(device_t dev,int phy,int reg,int val)2790 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2791 {
2792 	struct rt_softc *sc = device_get_softc(dev);
2793 	int dat;
2794 
2795 	/* Wait prev command done if any */
2796 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2797 	dat = MDIO_CMD_WR |
2798 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
2799 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) |
2800 	    (val & MDIO_PHY_DATA_MASK);
2801 	RT_WRITE(sc, MDIO_ACCESS, dat);
2802 	RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
2803 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2804 
2805 	return (0);
2806 }
2807 #endif
2808 
2809 #ifdef IF_RT_PHY_SUPPORT
2810 void
rt_miibus_statchg(device_t dev)2811 rt_miibus_statchg(device_t dev)
2812 {
2813 	struct rt_softc *sc = device_get_softc(dev);
2814 	struct mii_data *mii;
2815 
2816 	mii = device_get_softc(sc->rt_miibus);
2817 
2818 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2819 	    (IFM_ACTIVE | IFM_AVALID)) {
2820 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2821 		case IFM_10_T:
2822 		case IFM_100_TX:
2823 			/* XXX check link here */
2824 			sc->flags |= 1;
2825 			break;
2826 		default:
2827 			break;
2828 		}
2829 	}
2830 }
2831 #endif /* IF_RT_PHY_SUPPORT */
2832 
2833 static device_method_t rt_dev_methods[] =
2834 {
2835 	DEVMETHOD(device_probe, rt_probe),
2836 	DEVMETHOD(device_attach, rt_attach),
2837 	DEVMETHOD(device_detach, rt_detach),
2838 	DEVMETHOD(device_shutdown, rt_shutdown),
2839 	DEVMETHOD(device_suspend, rt_suspend),
2840 	DEVMETHOD(device_resume, rt_resume),
2841 
2842 #ifdef IF_RT_PHY_SUPPORT
2843 	/* MII interface */
2844 	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2845 	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2846 	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2847 #endif
2848 
2849 	DEVMETHOD_END
2850 };
2851 
2852 static driver_t rt_driver =
2853 {
2854 	"rt",
2855 	rt_dev_methods,
2856 	sizeof(struct rt_softc)
2857 };
2858 
2859 static devclass_t rt_dev_class;
2860 
2861 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2862 #ifdef FDT
2863 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2864 #endif
2865 
2866 MODULE_DEPEND(rt, ether, 1, 1, 1);
2867 MODULE_DEPEND(rt, miibus, 1, 1, 1);
2868 
2869 #ifdef RT_MDIO
2870 MODULE_DEPEND(rt, mdio, 1, 1, 1);
2871 
2872 static int rtmdio_probe(device_t);
2873 static int rtmdio_attach(device_t);
2874 static int rtmdio_detach(device_t);
2875 
2876 static struct mtx miibus_mtx;
2877 
2878 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "rt mii lock", MTX_DEF);
2879 
2880 /*
2881  * Declare an additional, separate driver for accessing the MDIO bus.
2882  */
2883 static device_method_t rtmdio_methods[] = {
2884 	/* Device interface */
2885 	DEVMETHOD(device_probe,         rtmdio_probe),
2886 	DEVMETHOD(device_attach,        rtmdio_attach),
2887 	DEVMETHOD(device_detach,        rtmdio_detach),
2888 
2889 	/* bus interface */
2890 	DEVMETHOD(bus_add_child,        device_add_child_ordered),
2891 
2892 	/* MDIO access */
2893 	DEVMETHOD(mdio_readreg,         rt_miibus_readreg),
2894 	DEVMETHOD(mdio_writereg,        rt_miibus_writereg),
2895 };
2896 
2897 DEFINE_CLASS_0(rtmdio, rtmdio_driver, rtmdio_methods,
2898     sizeof(struct rt_softc));
2899 static devclass_t rtmdio_devclass;
2900 
2901 DRIVER_MODULE(miiproxy, rt, miiproxy_driver, miiproxy_devclass, 0, 0);
2902 DRIVER_MODULE(rtmdio, simplebus, rtmdio_driver, rtmdio_devclass, 0, 0);
2903 DRIVER_MODULE(mdio, rtmdio, mdio_driver, mdio_devclass, 0, 0);
2904 
2905 static int
rtmdio_probe(device_t dev)2906 rtmdio_probe(device_t dev)
2907 {
2908 	if (!ofw_bus_status_okay(dev))
2909 		return (ENXIO);
2910 
2911 	if (!ofw_bus_is_compatible(dev, "ralink,rt2880-mdio"))
2912 		return (ENXIO);
2913 
2914 	device_set_desc(dev, "FV built-in ethernet interface, MDIO controller");
2915 	return(0);
2916 }
2917 
2918 static int
rtmdio_attach(device_t dev)2919 rtmdio_attach(device_t dev)
2920 {
2921 	struct rt_softc	*sc;
2922 	int	error;
2923 
2924 	sc = device_get_softc(dev);
2925 	sc->dev = dev;
2926 	sc->mem_rid = 0;
2927 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2928 	    &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE);
2929 	if (sc->mem == NULL) {
2930 		device_printf(dev, "couldn't map memory\n");
2931 		error = ENXIO;
2932 		goto fail;
2933 	}
2934 
2935 	sc->bst = rman_get_bustag(sc->mem);
2936 	sc->bsh = rman_get_bushandle(sc->mem);
2937 
2938         bus_generic_probe(dev);
2939 	bus_enumerate_hinted_children(dev);
2940 	error = bus_generic_attach(dev);
2941 fail:
2942 	return(error);
2943 }
2944 
2945 static int
rtmdio_detach(device_t dev)2946 rtmdio_detach(device_t dev)
2947 {
2948 	return(0);
2949 }
2950 #endif
2951