xref: /freebsd-12.1/sys/dev/rt/if_rt.c (revision bb487d2b)
1 /*-
2  * Copyright (c) 2015, Stanislav Galabov
3  * Copyright (c) 2014, Aleksandr A. Mityaev
4  * Copyright (c) 2011, Aleksandr Rybalko
5  * based on hard work
6  * by Alexander Egorenkov <[email protected]>
7  * and by Damien Bergamini <[email protected]>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "if_rtvar.h"
37 #include "if_rtreg.h"
38 
39 #include <net/if.h>
40 #include <net/if_var.h>
41 #include <net/if_arp.h>
42 #include <net/ethernet.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_vlan_var.h>
47 
48 #include <net/bpf.h>
49 
50 #include <machine/bus.h>
51 #include <machine/cache.h>
52 #include <machine/cpufunc.h>
53 #include <machine/resource.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <sys/bus.h>
58 #include <sys/rman.h>
59 
60 #include "opt_platform.h"
61 #include "opt_rt305x.h"
62 
63 #ifdef FDT
64 #include <dev/ofw/openfirm.h>
65 #include <dev/ofw/ofw_bus.h>
66 #include <dev/ofw/ofw_bus_subr.h>
67 #endif
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #include <mips/rt305x/rt305x_sysctlvar.h>
73 #include <mips/rt305x/rt305xreg.h>
74 
75 #ifdef IF_RT_PHY_SUPPORT
76 #include "miibus_if.h"
77 #endif
78 
79 /*
80  * Defines and macros
81  */
82 #define	RT_MAX_AGG_SIZE			3840
83 
84 #define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
85 
86 #define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
87 #define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
88 
89 #define	RT_TX_WATCHDOG_TIMEOUT		5
90 
91 #define RT_CHIPID_RT3050 0x3050
92 #define RT_CHIPID_RT3052 0x3052
93 #define RT_CHIPID_RT5350 0x5350
94 #define RT_CHIPID_RT6855 0x6855
95 #define RT_CHIPID_MT7620 0x7620
96 
97 #ifdef FDT
98 /* more specific and new models should go first */
99 static const struct ofw_compat_data rt_compat_data[] = {
100 	{ "ralink,rt6855-eth", (uintptr_t)RT_CHIPID_RT6855 },
101 	{ "ralink,rt5350-eth", (uintptr_t)RT_CHIPID_RT5350 },
102 	{ "ralink,rt3052-eth", (uintptr_t)RT_CHIPID_RT3052 },
103 	{ "ralink,rt305x-eth", (uintptr_t)RT_CHIPID_RT3050 },
104 	{ NULL, (uintptr_t)NULL }
105 };
106 #endif
107 
108 /*
109  * Static function prototypes
110  */
111 static int	rt_probe(device_t dev);
112 static int	rt_attach(device_t dev);
113 static int	rt_detach(device_t dev);
114 static int	rt_shutdown(device_t dev);
115 static int	rt_suspend(device_t dev);
116 static int	rt_resume(device_t dev);
117 static void	rt_init_locked(void *priv);
118 static void	rt_init(void *priv);
119 static void	rt_stop_locked(void *priv);
120 static void	rt_stop(void *priv);
121 static void	rt_start(struct ifnet *ifp);
122 static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
123 static void	rt_periodic(void *arg);
124 static void	rt_tx_watchdog(void *arg);
125 static void	rt_intr(void *arg);
126 static void	rt_rt5350_intr(void *arg);
127 static void	rt_tx_coherent_intr(struct rt_softc *sc);
128 static void	rt_rx_coherent_intr(struct rt_softc *sc);
129 static void	rt_rx_delay_intr(struct rt_softc *sc);
130 static void	rt_tx_delay_intr(struct rt_softc *sc);
131 static void	rt_rx_intr(struct rt_softc *sc, int qid);
132 static void	rt_tx_intr(struct rt_softc *sc, int qid);
133 static void	rt_rx_done_task(void *context, int pending);
134 static void	rt_tx_done_task(void *context, int pending);
135 static void	rt_periodic_task(void *context, int pending);
136 static int	rt_rx_eof(struct rt_softc *sc,
137 		    struct rt_softc_rx_ring *ring, int limit);
138 static void	rt_tx_eof(struct rt_softc *sc,
139 		    struct rt_softc_tx_ring *ring);
140 static void	rt_update_stats(struct rt_softc *sc);
141 static void	rt_watchdog(struct rt_softc *sc);
142 static void	rt_update_raw_counters(struct rt_softc *sc);
143 static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
144 static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
145 static int	rt_txrx_enable(struct rt_softc *sc);
146 static int	rt_alloc_rx_ring(struct rt_softc *sc,
147 		    struct rt_softc_rx_ring *ring, int qid);
148 static void	rt_reset_rx_ring(struct rt_softc *sc,
149 		    struct rt_softc_rx_ring *ring);
150 static void	rt_free_rx_ring(struct rt_softc *sc,
151 		    struct rt_softc_rx_ring *ring);
152 static int	rt_alloc_tx_ring(struct rt_softc *sc,
153 		    struct rt_softc_tx_ring *ring, int qid);
154 static void	rt_reset_tx_ring(struct rt_softc *sc,
155 		    struct rt_softc_tx_ring *ring);
156 static void	rt_free_tx_ring(struct rt_softc *sc,
157 		    struct rt_softc_tx_ring *ring);
158 static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
159 		    int nseg, int error);
160 static void	rt_sysctl_attach(struct rt_softc *sc);
161 #ifdef IF_RT_PHY_SUPPORT
162 void		rt_miibus_statchg(device_t);
163 static int	rt_miibus_readreg(device_t, int, int);
164 static int	rt_miibus_writereg(device_t, int, int, int);
165 #endif
166 static int	rt_ifmedia_upd(struct ifnet *);
167 static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
168 
169 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
170 #ifdef IF_RT_DEBUG
171 static int rt_debug = 0;
172 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
173     "RT debug level");
174 #endif
175 
176 static int
177 rt_probe(device_t dev)
178 {
179 	struct rt_softc *sc = device_get_softc(dev);
180 	char buf[80];
181 #ifdef FDT
182 	const struct ofw_compat_data * cd;
183 
184 	cd = ofw_bus_search_compatible(dev, rt_compat_data);
185 	if (cd->ocd_data == (uintptr_t)NULL)
186 	        return (ENXIO);
187 
188 	sc->rt_chipid = (unsigned int)(cd->ocd_data);
189 #else
190 #if defined(MT7620)
191 	sc->rt_chipid = RT_CHIPID_MT7620;
192 #elif defined(RT5350)
193 	sc->rt_chipid = RT_CHIPID_RT5350;
194 #else
195 	sc->rt_chipid = RT_CHIPID_RT3050;
196 #endif
197 #endif
198 	snprintf(buf, sizeof(buf), "Ralink RT%x onChip Ethernet driver",
199 		sc->rt_chipid);
200 	device_set_desc_copy(dev, buf);
201 	return (BUS_PROBE_GENERIC);
202 }
203 
204 /*
205  * macaddr_atoi - translate string MAC address to uint8_t array
206  */
207 static int
208 macaddr_atoi(const char *str, uint8_t *mac)
209 {
210 	int count, i;
211 	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
212 
213 	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
214 	    &amac[0], &amac[1], &amac[2],
215 	    &amac[3], &amac[4], &amac[5]);
216 	if (count < ETHER_ADDR_LEN) {
217 		memset(mac, 0, ETHER_ADDR_LEN);
218 		return (1);
219 	}
220 
221 	/* Copy aligned to result */
222 	for (i = 0; i < ETHER_ADDR_LEN; i ++)
223 		mac[i] = (amac[i] & 0xff);
224 
225 	return (0);
226 }
227 
228 #ifdef USE_GENERATED_MAC_ADDRESS
229 /*
230  * generate_mac(uin8_t *mac)
231  * This is MAC address generator for cases when real device MAC address
232  * unknown or not yet accessible.
233  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
234  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
235  *
236  * Output - MAC address, that do not change between reboots, if hints or
237  * bootloader info unchange.
238  */
239 static void
240 generate_mac(uint8_t *mac)
241 {
242 	unsigned char *cp;
243 	int i = 0;
244 	uint32_t crc = 0xffffffff;
245 
246 	/* Generate CRC32 on kenv */
247 	for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
248 		crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
249 	}
250 	crc = ~crc;
251 
252 	mac[0] = 'b';
253 	mac[1] = 's';
254 	mac[2] = 'd';
255 	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
256 	mac[4] = (crc >> 8) & 0xff;
257 	mac[5] = crc & 0xff;
258 }
259 #endif
260 
261 /*
262  * ether_request_mac - try to find usable MAC address.
263  */
264 static int
265 ether_request_mac(device_t dev, uint8_t *mac)
266 {
267 	char *var;
268 
269 	/*
270 	 * "ethaddr" is passed via envp on RedBoot platforms
271 	 * "kmac" is passed via argv on RouterBOOT platforms
272 	 */
273 #if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
274 	if ((var = kern_getenv("ethaddr")) != NULL ||
275 	    (var = kern_getenv("kmac")) != NULL ) {
276 
277 		if(!macaddr_atoi(var, mac)) {
278 			printf("%s: use %s macaddr from KENV\n",
279 			    device_get_nameunit(dev), var);
280 			freeenv(var);
281 			return (0);
282 		}
283 		freeenv(var);
284 	}
285 #endif
286 
287 	/*
288 	 * Try from hints
289 	 * hint.[dev].[unit].macaddr
290 	 */
291 	if (!resource_string_value(device_get_name(dev),
292 	    device_get_unit(dev), "macaddr", (const char **)&var)) {
293 
294 		if(!macaddr_atoi(var, mac)) {
295 			printf("%s: use %s macaddr from hints\n",
296 			    device_get_nameunit(dev), var);
297 			return (0);
298 		}
299 	}
300 
301 #ifdef USE_GENERATED_MAC_ADDRESS
302 	generate_mac(mac);
303 
304 	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
305 	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
306 #else
307 	/* Hardcoded */
308 	mac[0] = 0x00;
309 	mac[1] = 0x18;
310 	mac[2] = 0xe7;
311 	mac[3] = 0xd5;
312 	mac[4] = 0x83;
313 	mac[5] = 0x90;
314 
315 	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
316 #endif
317 
318 	return (0);
319 }
320 
321 /*
322  * Reset hardware
323  */
324 static void
325 reset_freng(struct rt_softc *sc)
326 {
327 	/* XXX hard reset kills everything so skip it ... */
328 	return;
329 }
330 
331 static int
332 rt_attach(device_t dev)
333 {
334 	struct rt_softc *sc;
335 	struct ifnet *ifp;
336 	int error, i;
337 
338 	sc = device_get_softc(dev);
339 	sc->dev = dev;
340 
341 	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
342 	    MTX_DEF | MTX_RECURSE);
343 
344 	sc->mem_rid = 0;
345 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
346 	    RF_ACTIVE);
347 	if (sc->mem == NULL) {
348 		device_printf(dev, "could not allocate memory resource\n");
349 		error = ENXIO;
350 		goto fail;
351 	}
352 
353 	sc->bst = rman_get_bustag(sc->mem);
354 	sc->bsh = rman_get_bushandle(sc->mem);
355 
356 	sc->irq_rid = 0;
357 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
358 	    RF_ACTIVE);
359 	if (sc->irq == NULL) {
360 		device_printf(dev,
361 		    "could not allocate interrupt resource\n");
362 		error = ENXIO;
363 		goto fail;
364 	}
365 
366 #ifdef IF_RT_DEBUG
367 	sc->debug = rt_debug;
368 
369 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
370 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
371 		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
372 #endif
373 
374 	/* Reset hardware */
375 	reset_freng(sc);
376 
377 	/* Fill in soc-specific registers map */
378 	switch(sc->rt_chipid) {
379 	  case RT_CHIPID_MT7620:
380 	  case RT_CHIPID_RT5350:
381 	  	device_printf(dev, "RT%x Ethernet MAC (rev 0x%08x)\n",
382 	  		sc->rt_chipid, sc->mac_rev);
383 		/* RT5350: No GDMA, PSE, CDMA, PPE */
384 		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
385 			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
386 		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
387 		sc->fe_int_status=RT5350_FE_INT_STATUS;
388 		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
389 		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
390 		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
391 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
392 		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
393 		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
394 		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
395 		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
396 		}
397 		sc->rx_ring_count=2;
398 		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
399 		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
400 		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
401 		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
402 		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
403 		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
404 		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
405 		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
406 		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
407 		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
408 	  	break;
409 	  case RT_CHIPID_RT6855:
410 	  	device_printf(dev, "RT6855 Ethernet MAC (rev 0x%08x)\n",
411 	  		sc->mac_rev);
412 	  	break;
413 	  default:
414 		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
415 			sc->mac_rev);
416 		RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
417 		(
418 		GDM_ICS_EN | /* Enable IP Csum */
419 		GDM_TCS_EN | /* Enable TCP Csum */
420 		GDM_UCS_EN | /* Enable UDP Csum */
421 		GDM_STRPCRC | /* Strip CRC from packet */
422 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
423 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
424 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
425 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
426 		));
427 
428 		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
429 		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
430 		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
431 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
432 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
433 		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
434 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
435 		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
436 		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
437 		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
438 		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
439 		}
440 		sc->rx_ring_count=1;
441 		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
442 		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
443 		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
444 		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
445 		sc->int_rx_done_mask=INT_RX_DONE;
446 		sc->int_tx_done_mask=INT_TXQ0_DONE;
447 	};
448 
449 	/* allocate Tx and Rx rings */
450 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
451 		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
452 		if (error != 0) {
453 			device_printf(dev, "could not allocate Tx ring #%d\n",
454 			    i);
455 			goto fail;
456 		}
457 	}
458 
459 	sc->tx_ring_mgtqid = 5;
460 	for (i = 0; i < sc->rx_ring_count; i++) {
461 		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
462 		if (error != 0) {
463 			device_printf(dev, "could not allocate Rx ring\n");
464 			goto fail;
465 		}
466 	}
467 
468 	callout_init(&sc->periodic_ch, 0);
469 	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
470 
471 	ifp = sc->ifp = if_alloc(IFT_ETHER);
472 	if (ifp == NULL) {
473 		device_printf(dev, "could not if_alloc()\n");
474 		error = ENOMEM;
475 		goto fail;
476 	}
477 
478 	ifp->if_softc = sc;
479 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
480 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
481 	ifp->if_init = rt_init;
482 	ifp->if_ioctl = rt_ioctl;
483 	ifp->if_start = rt_start;
484 #define	RT_TX_QLEN	256
485 
486 	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
487 	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
488 	IFQ_SET_READY(&ifp->if_snd);
489 
490 #ifdef IF_RT_PHY_SUPPORT
491 	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
492 	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
493 	if (error != 0) {
494 		device_printf(dev, "attaching PHYs failed\n");
495 		error = ENXIO;
496 		goto fail;
497 	}
498 #else
499 	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
500 	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
501 	    NULL);
502 	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
503 
504 #endif /* IF_RT_PHY_SUPPORT */
505 
506 	ether_request_mac(dev, sc->mac_addr);
507 	ether_ifattach(ifp, sc->mac_addr);
508 
509 	/*
510 	 * Tell the upper layer(s) we support long frames.
511 	 */
512 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
513 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
514 	ifp->if_capenable |= IFCAP_VLAN_MTU;
515 	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
516 	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
517 
518 	/* init task queue */
519 	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
520 	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
521 	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
522 
523 	sc->rx_process_limit = 100;
524 
525 	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
526 	    taskqueue_thread_enqueue, &sc->taskqueue);
527 
528 	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
529 	    device_get_nameunit(sc->dev));
530 
531 	rt_sysctl_attach(sc);
532 
533 	/* set up interrupt */
534 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
535 	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
536 	    sc->rt_chipid == RT_CHIPID_MT7620) ? rt_rt5350_intr : rt_intr,
537 	    sc, &sc->irqh);
538 	if (error != 0) {
539 		printf("%s: could not set up interrupt\n",
540 			device_get_nameunit(dev));
541 		goto fail;
542 	}
543 #ifdef IF_RT_DEBUG
544 	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
545 #endif
546 
547 	return (0);
548 
549 fail:
550 	/* free Tx and Rx rings */
551 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
552 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
553 
554 	for (i = 0; i < sc->rx_ring_count; i++)
555 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
556 
557 	mtx_destroy(&sc->lock);
558 
559 	if (sc->mem != NULL)
560 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
561 		    sc->mem);
562 
563 	if (sc->irq != NULL)
564 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
565 		    sc->irq);
566 
567 	return (error);
568 }
569 
570 /*
571  * Set media options.
572  */
573 static int
574 rt_ifmedia_upd(struct ifnet *ifp)
575 {
576 	struct rt_softc *sc;
577 #ifdef IF_RT_PHY_SUPPORT
578 	struct mii_data *mii;
579 	struct mii_softc *miisc;
580 	int error = 0;
581 
582 	sc = ifp->if_softc;
583 	RT_SOFTC_LOCK(sc);
584 
585 	mii = device_get_softc(sc->rt_miibus);
586 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
587 		PHY_RESET(miisc);
588 	error = mii_mediachg(mii);
589 	RT_SOFTC_UNLOCK(sc);
590 
591 	return (error);
592 
593 #else /* !IF_RT_PHY_SUPPORT */
594 
595 	struct ifmedia *ifm;
596 	struct ifmedia_entry *ife;
597 
598 	sc = ifp->if_softc;
599 	ifm = &sc->rt_ifmedia;
600 	ife = ifm->ifm_cur;
601 
602 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
603 		return (EINVAL);
604 
605 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
606 		device_printf(sc->dev,
607 		    "AUTO is not supported for multiphy MAC");
608 		return (EINVAL);
609 	}
610 
611 	/*
612 	 * Ignore everything
613 	 */
614 	return (0);
615 #endif /* IF_RT_PHY_SUPPORT */
616 }
617 
618 /*
619  * Report current media status.
620  */
621 static void
622 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
623 {
624 #ifdef IF_RT_PHY_SUPPORT
625 	struct rt_softc *sc;
626 	struct mii_data *mii;
627 
628 	sc = ifp->if_softc;
629 
630 	RT_SOFTC_LOCK(sc);
631 	mii = device_get_softc(sc->rt_miibus);
632 	mii_pollstat(mii);
633 	ifmr->ifm_active = mii->mii_media_active;
634 	ifmr->ifm_status = mii->mii_media_status;
635 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
636 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
637 	RT_SOFTC_UNLOCK(sc);
638 #else /* !IF_RT_PHY_SUPPORT */
639 
640 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
641 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
642 #endif /* IF_RT_PHY_SUPPORT */
643 }
644 
645 static int
646 rt_detach(device_t dev)
647 {
648 	struct rt_softc *sc;
649 	struct ifnet *ifp;
650 	int i;
651 
652 	sc = device_get_softc(dev);
653 	ifp = sc->ifp;
654 
655 	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
656 
657 	RT_SOFTC_LOCK(sc);
658 
659 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
660 
661 	callout_stop(&sc->periodic_ch);
662 	callout_stop(&sc->tx_watchdog_ch);
663 
664 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
665 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
666 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
667 
668 	/* free Tx and Rx rings */
669 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
670 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
671 	for (i = 0; i < sc->rx_ring_count; i++)
672 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
673 
674 	RT_SOFTC_UNLOCK(sc);
675 
676 #ifdef IF_RT_PHY_SUPPORT
677 	if (sc->rt_miibus != NULL)
678 		device_delete_child(dev, sc->rt_miibus);
679 #endif
680 
681 	ether_ifdetach(ifp);
682 	if_free(ifp);
683 
684 	taskqueue_free(sc->taskqueue);
685 
686 	mtx_destroy(&sc->lock);
687 
688 	bus_generic_detach(dev);
689 	bus_teardown_intr(dev, sc->irq, sc->irqh);
690 	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
691 	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
692 
693 	return (0);
694 }
695 
696 static int
697 rt_shutdown(device_t dev)
698 {
699 	struct rt_softc *sc;
700 
701 	sc = device_get_softc(dev);
702 	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
703 	rt_stop(sc);
704 
705 	return (0);
706 }
707 
708 static int
709 rt_suspend(device_t dev)
710 {
711 	struct rt_softc *sc;
712 
713 	sc = device_get_softc(dev);
714 	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
715 	rt_stop(sc);
716 
717 	return (0);
718 }
719 
720 static int
721 rt_resume(device_t dev)
722 {
723 	struct rt_softc *sc;
724 	struct ifnet *ifp;
725 
726 	sc = device_get_softc(dev);
727 	ifp = sc->ifp;
728 
729 	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
730 
731 	if (ifp->if_flags & IFF_UP)
732 		rt_init(sc);
733 
734 	return (0);
735 }
736 
737 /*
738  * rt_init_locked - Run initialization process having locked mtx.
739  */
740 static void
741 rt_init_locked(void *priv)
742 {
743 	struct rt_softc *sc;
744 	struct ifnet *ifp;
745 #ifdef IF_RT_PHY_SUPPORT
746 	struct mii_data *mii;
747 #endif
748 	int i, ntries;
749 	uint32_t tmp;
750 
751 	sc = priv;
752 	ifp = sc->ifp;
753 #ifdef IF_RT_PHY_SUPPORT
754 	mii = device_get_softc(sc->rt_miibus);
755 #endif
756 
757 	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
758 
759 	RT_SOFTC_ASSERT_LOCKED(sc);
760 
761 	/* hardware reset */
762 	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
763 	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
764 
765 	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
766 	if(sc->rt_chipid == RT_CHIPID_RT3050 || sc->rt_chipid == RT_CHIPID_RT3052)
767 	  RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
768 	    (
769 	    GDM_ICS_EN | /* Enable IP Csum */
770 	    GDM_TCS_EN | /* Enable TCP Csum */
771 	    GDM_UCS_EN | /* Enable UDP Csum */
772 	    GDM_STRPCRC | /* Strip CRC from packet */
773 	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
774 	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
775 	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
776 	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
777 	    ));
778 
779 	/* disable DMA engine */
780 	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
781 	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
782 
783 	/* wait while DMA engine is busy */
784 	for (ntries = 0; ntries < 100; ntries++) {
785 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
786 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
787 			break;
788 		DELAY(1000);
789 	}
790 
791 	if (ntries == 100) {
792 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
793 		goto fail;
794 	}
795 
796 	/* reset Rx and Tx rings */
797 	tmp = FE_RST_DRX_IDX0 |
798 		FE_RST_DTX_IDX3 |
799 		FE_RST_DTX_IDX2 |
800 		FE_RST_DTX_IDX1 |
801 		FE_RST_DTX_IDX0;
802 
803 	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
804 
805 	/* XXX switch set mac address */
806 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
807 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
808 
809 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
810 		/* update TX_BASE_PTRx */
811 		RT_WRITE(sc, sc->tx_base_ptr[i],
812 			sc->tx_ring[i].desc_phys_addr);
813 		RT_WRITE(sc, sc->tx_max_cnt[i],
814 			RT_SOFTC_TX_RING_DESC_COUNT);
815 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
816 	}
817 
818 	/* init Rx ring */
819 	for (i = 0; i < sc->rx_ring_count; i++)
820 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
821 
822 	/* update RX_BASE_PTRx */
823 	for (i = 0; i < sc->rx_ring_count; i++) {
824 		RT_WRITE(sc, sc->rx_base_ptr[i],
825 			sc->rx_ring[i].desc_phys_addr);
826 		RT_WRITE(sc, sc->rx_max_cnt[i],
827 			RT_SOFTC_RX_RING_DATA_COUNT);
828 		RT_WRITE(sc, sc->rx_calc_idx[i],
829 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
830 	}
831 
832 	/* write back DDONE, 16byte burst enable RX/TX DMA */
833 	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
834 	if (sc->rt_chipid == RT_CHIPID_MT7620)
835 		tmp |= (1<<31);
836 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
837 
838 	/* disable interrupts mitigation */
839 	RT_WRITE(sc, sc->delay_int_cfg, 0);
840 
841 	/* clear pending interrupts */
842 	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
843 
844 	/* enable interrupts */
845 	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
846 	    sc->rt_chipid == RT_CHIPID_MT7620)
847 	  tmp = RT5350_INT_TX_COHERENT |
848 	  	RT5350_INT_RX_COHERENT |
849 	  	RT5350_INT_TXQ3_DONE |
850 	  	RT5350_INT_TXQ2_DONE |
851 	  	RT5350_INT_TXQ1_DONE |
852 	  	RT5350_INT_TXQ0_DONE |
853 	  	RT5350_INT_RXQ1_DONE |
854 	  	RT5350_INT_RXQ0_DONE;
855 	else
856 	  tmp = CNT_PPE_AF |
857 		CNT_GDM_AF |
858 		PSE_P2_FC |
859 		GDM_CRC_DROP |
860 		PSE_BUF_DROP |
861 		GDM_OTHER_DROP |
862 		PSE_P1_FC |
863 		PSE_P0_FC |
864 		PSE_FQ_EMPTY |
865 		INT_TX_COHERENT |
866 		INT_RX_COHERENT |
867 		INT_TXQ3_DONE |
868 		INT_TXQ2_DONE |
869 		INT_TXQ1_DONE |
870 		INT_TXQ0_DONE |
871 		INT_RX_DONE;
872 
873 	sc->intr_enable_mask = tmp;
874 
875 	RT_WRITE(sc, sc->fe_int_enable, tmp);
876 
877 	if (rt_txrx_enable(sc) != 0)
878 		goto fail;
879 
880 #ifdef IF_RT_PHY_SUPPORT
881 	if (mii) mii_mediachg(mii);
882 #endif /* IF_RT_PHY_SUPPORT */
883 
884 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
885 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
886 
887 	sc->periodic_round = 0;
888 
889 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
890 
891 	return;
892 
893 fail:
894 	rt_stop_locked(sc);
895 }
896 
897 /*
898  * rt_init - lock and initialize device.
899  */
900 static void
901 rt_init(void *priv)
902 {
903 	struct rt_softc *sc;
904 
905 	sc = priv;
906 	RT_SOFTC_LOCK(sc);
907 	rt_init_locked(sc);
908 	RT_SOFTC_UNLOCK(sc);
909 }
910 
911 /*
912  * rt_stop_locked - stop TX/RX w/ lock
913  */
914 static void
915 rt_stop_locked(void *priv)
916 {
917 	struct rt_softc *sc;
918 	struct ifnet *ifp;
919 
920 	sc = priv;
921 	ifp = sc->ifp;
922 
923 	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
924 
925 	RT_SOFTC_ASSERT_LOCKED(sc);
926 	sc->tx_timer = 0;
927 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
928 	callout_stop(&sc->periodic_ch);
929 	callout_stop(&sc->tx_watchdog_ch);
930 	RT_SOFTC_UNLOCK(sc);
931 	taskqueue_block(sc->taskqueue);
932 
933 	/*
934 	 * Sometime rt_stop_locked called from isr and we get panic
935 	 * When found, I fix it
936 	 */
937 #ifdef notyet
938 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
939 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
940 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
941 #endif
942 	RT_SOFTC_LOCK(sc);
943 
944 	/* disable interrupts */
945 	RT_WRITE(sc, sc->fe_int_enable, 0);
946 
947 	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
948 	   sc->rt_chipid == RT_CHIPID_MT7620) {
949 	} else {
950 	  /* reset adapter */
951 	  RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
952 
953 	  RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
954 	    (
955 	    GDM_ICS_EN | /* Enable IP Csum */
956 	    GDM_TCS_EN | /* Enable TCP Csum */
957 	    GDM_UCS_EN | /* Enable UDP Csum */
958 	    GDM_STRPCRC | /* Strip CRC from packet */
959 	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
960 	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
961 	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
962 	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
963 	    ));
964 	}
965 }
966 
967 static void
968 rt_stop(void *priv)
969 {
970 	struct rt_softc *sc;
971 
972 	sc = priv;
973 	RT_SOFTC_LOCK(sc);
974 	rt_stop_locked(sc);
975 	RT_SOFTC_UNLOCK(sc);
976 }
977 
978 /*
979  * rt_tx_data - transmit packet.
980  */
981 static int
982 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
983 {
984 	struct ifnet *ifp;
985 	struct rt_softc_tx_ring *ring;
986 	struct rt_softc_tx_data *data;
987 	struct rt_txdesc *desc;
988 	struct mbuf *m_d;
989 	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
990 	int error, ndmasegs, ndescs, i;
991 
992 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
993 		("%s: Tx data: invalid qid=%d\n",
994 		 device_get_nameunit(sc->dev), qid));
995 
996 	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
997 
998 	ifp = sc->ifp;
999 	ring = &sc->tx_ring[qid];
1000 	desc = &ring->desc[ring->desc_cur];
1001 	data = &ring->data[ring->data_cur];
1002 
1003 	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1004 	    dma_seg, &ndmasegs, 0);
1005 	if (error != 0)	{
1006 		/* too many fragments, linearize */
1007 
1008 		RT_DPRINTF(sc, RT_DEBUG_TX,
1009 			"could not load mbuf DMA map, trying to linearize "
1010 			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1011 			ndmasegs, m->m_pkthdr.len, error);
1012 
1013 		m_d = m_collapse(m, M_NOWAIT, 16);
1014 		if (m_d == NULL) {
1015 			m_freem(m);
1016 			m = NULL;
1017 			return (ENOMEM);
1018 		}
1019 		m = m_d;
1020 
1021 		sc->tx_defrag_packets++;
1022 
1023 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1024 		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1025 		if (error != 0)	{
1026 			device_printf(sc->dev, "could not load mbuf DMA map: "
1027 			    "ndmasegs=%d, len=%d, error=%d\n",
1028 			    ndmasegs, m->m_pkthdr.len, error);
1029 			m_freem(m);
1030 			return (error);
1031 		}
1032 	}
1033 
1034 	if (m->m_pkthdr.len == 0)
1035 		ndmasegs = 0;
1036 
1037 	/* determine how many Tx descs are required */
1038 	ndescs = 1 + ndmasegs / 2;
1039 	if ((ring->desc_queued + ndescs) >
1040 	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1041 		RT_DPRINTF(sc, RT_DEBUG_TX,
1042 		    "there are not enough Tx descs\n");
1043 
1044 		sc->no_tx_desc_avail++;
1045 
1046 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1047 		m_freem(m);
1048 		return (EFBIG);
1049 	}
1050 
1051 	data->m = m;
1052 
1053 	/* set up Tx descs */
1054 	for (i = 0; i < ndmasegs; i += 2) {
1055 
1056 		/* TODO: this needs to be refined as MT7620 for example has
1057 		 * a different word3 layout than RT305x and RT5350 (the last
1058 		 * one doesn't use word3 at all).
1059 		 */
1060 
1061 		/* Set destination */
1062 		if (sc->rt_chipid != RT_CHIPID_MT7620)
1063 			desc->dst = (TXDSCR_DST_PORT_GDMA1);
1064 
1065 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1066 			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
1067 			    TXDSCR_TCP_CSUM_GEN);
1068 		/* Set queue id */
1069 		desc->qn = qid;
1070 		/* No PPPoE */
1071 		desc->pppoe = 0;
1072 		/* No VLAN */
1073 		desc->vid = 0;
1074 
1075 		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1076 		desc->sdl0 = htole16(dma_seg[i].ds_len |
1077 		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1078 
1079 		if ((i+1) < ndmasegs) {
1080 			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1081 			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1082 			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1083 		} else {
1084 			desc->sdp1 = 0;
1085 			desc->sdl1 = 0;
1086 		}
1087 
1088 		if ((i+2) < ndmasegs) {
1089 			ring->desc_queued++;
1090 			ring->desc_cur = (ring->desc_cur + 1) %
1091 			    RT_SOFTC_TX_RING_DESC_COUNT;
1092 		}
1093 		desc = &ring->desc[ring->desc_cur];
1094 	}
1095 
1096 	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1097 	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1098 	    m->m_pkthdr.len, ndmasegs,
1099 	    (int) dma_seg[0].ds_len,
1100 	    (int) dma_seg[1].ds_len,
1101 	    (int) dma_seg[2].ds_len,
1102 	    (int) dma_seg[3].ds_len,
1103 	    (int) dma_seg[4].ds_len);
1104 
1105 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1106 		BUS_DMASYNC_PREWRITE);
1107 	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1108 		BUS_DMASYNC_PREWRITE);
1109 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1110 		BUS_DMASYNC_PREWRITE);
1111 
1112 	ring->desc_queued++;
1113 	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1114 
1115 	ring->data_queued++;
1116 	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1117 
1118 	/* kick Tx */
1119 	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1120 
1121 	return (0);
1122 }
1123 
1124 /*
1125  * rt_start - start Transmit/Receive
1126  */
1127 static void
1128 rt_start(struct ifnet *ifp)
1129 {
1130 	struct rt_softc *sc;
1131 	struct mbuf *m;
1132 	int qid = 0 /* XXX must check QoS priority */;
1133 
1134 	sc = ifp->if_softc;
1135 
1136 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1137 		return;
1138 
1139 	for (;;) {
1140 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1141 		if (m == NULL)
1142 			break;
1143 
1144 		m->m_pkthdr.rcvif = NULL;
1145 
1146 		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1147 
1148 		if (sc->tx_ring[qid].data_queued >=
1149 		    RT_SOFTC_TX_RING_DATA_COUNT) {
1150 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1151 
1152 			RT_DPRINTF(sc, RT_DEBUG_TX,
1153 			    "if_start: Tx ring with qid=%d is full\n", qid);
1154 
1155 			m_freem(m);
1156 
1157 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1158 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1159 
1160 			sc->tx_data_queue_full[qid]++;
1161 
1162 			break;
1163 		}
1164 
1165 		if (rt_tx_data(sc, m, qid) != 0) {
1166 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1167 
1168 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1169 
1170 			break;
1171 		}
1172 
1173 		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1174 		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1175 		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1176 	}
1177 }
1178 
1179 /*
1180  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1181  * filtering done by attached Ethernet switch.
1182  */
1183 static void
1184 rt_update_promisc(struct ifnet *ifp)
1185 {
1186 	struct rt_softc *sc;
1187 
1188 	sc = ifp->if_softc;
1189 	printf("%s: %s promiscuous mode\n",
1190 		device_get_nameunit(sc->dev),
1191 		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1192 }
1193 
1194 /*
1195  * rt_ioctl - ioctl handler.
1196  */
1197 static int
1198 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1199 {
1200 	struct rt_softc *sc;
1201 	struct ifreq *ifr;
1202 #ifdef IF_RT_PHY_SUPPORT
1203 	struct mii_data *mii;
1204 #endif /* IF_RT_PHY_SUPPORT */
1205 	int error, startall;
1206 
1207 	sc = ifp->if_softc;
1208 	ifr = (struct ifreq *) data;
1209 
1210 	error = 0;
1211 
1212 	switch (cmd) {
1213 	case SIOCSIFFLAGS:
1214 		startall = 0;
1215 		RT_SOFTC_LOCK(sc);
1216 		if (ifp->if_flags & IFF_UP) {
1217 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1218 				if ((ifp->if_flags ^ sc->if_flags) &
1219 				    IFF_PROMISC)
1220 					rt_update_promisc(ifp);
1221 			} else {
1222 				rt_init_locked(sc);
1223 				startall = 1;
1224 			}
1225 		} else {
1226 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1227 				rt_stop_locked(sc);
1228 		}
1229 		sc->if_flags = ifp->if_flags;
1230 		RT_SOFTC_UNLOCK(sc);
1231 		break;
1232 	case SIOCGIFMEDIA:
1233 	case SIOCSIFMEDIA:
1234 #ifdef IF_RT_PHY_SUPPORT
1235 		mii = device_get_softc(sc->rt_miibus);
1236 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1237 #else
1238 		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1239 #endif /* IF_RT_PHY_SUPPORT */
1240 		break;
1241 	default:
1242 		error = ether_ioctl(ifp, cmd, data);
1243 		break;
1244 	}
1245 	return (error);
1246 }
1247 
1248 /*
1249  * rt_periodic - Handler of PERIODIC interrupt
1250  */
1251 static void
1252 rt_periodic(void *arg)
1253 {
1254 	struct rt_softc *sc;
1255 
1256 	sc = arg;
1257 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1258 	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1259 }
1260 
1261 /*
1262  * rt_tx_watchdog - Handler of TX Watchdog
1263  */
1264 static void
1265 rt_tx_watchdog(void *arg)
1266 {
1267 	struct rt_softc *sc;
1268 	struct ifnet *ifp;
1269 
1270 	sc = arg;
1271 	ifp = sc->ifp;
1272 
1273 	if (sc->tx_timer == 0)
1274 		return;
1275 
1276 	if (--sc->tx_timer == 0) {
1277 		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1278 #ifdef notyet
1279 		/*
1280 		 * XXX: Commented out, because reset break input.
1281 		 */
1282 		rt_stop_locked(sc);
1283 		rt_init_locked(sc);
1284 #endif
1285 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1286 		sc->tx_watchdog_timeouts++;
1287 	}
1288 	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1289 }
1290 
1291 /*
1292  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1293  */
1294 static void
1295 rt_cnt_ppe_af(struct rt_softc *sc)
1296 {
1297 
1298 	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1299 }
1300 
1301 /*
1302  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1303  */
1304 static void
1305 rt_cnt_gdm_af(struct rt_softc *sc)
1306 {
1307 
1308 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1309 	    "GDMA 1 & 2 Counter Table Almost Full\n");
1310 }
1311 
1312 /*
1313  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1314  */
1315 static void
1316 rt_pse_p2_fc(struct rt_softc *sc)
1317 {
1318 
1319 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1320 	    "PSE port2 (GDMA 2) flow control asserted.\n");
1321 }
1322 
1323 /*
1324  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1325  * interrupt
1326  */
1327 static void
1328 rt_gdm_crc_drop(struct rt_softc *sc)
1329 {
1330 
1331 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1332 	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1333 }
1334 
1335 /*
1336  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1337  */
1338 static void
1339 rt_pse_buf_drop(struct rt_softc *sc)
1340 {
1341 
1342 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1343 	    "PSE discards a packet due to buffer sharing limitation\n");
1344 }
1345 
1346 /*
1347  * rt_gdm_other_drop - Handler of discard on other reason interrupt
1348  */
1349 static void
1350 rt_gdm_other_drop(struct rt_softc *sc)
1351 {
1352 
1353 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1354 	    "GDMA 1 & 2 discard a packet due to other reason\n");
1355 }
1356 
1357 /*
1358  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1359  */
1360 static void
1361 rt_pse_p1_fc(struct rt_softc *sc)
1362 {
1363 
1364 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1365 	    "PSE port1 (GDMA 1) flow control asserted.\n");
1366 }
1367 
1368 /*
1369  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1370  */
1371 static void
1372 rt_pse_p0_fc(struct rt_softc *sc)
1373 {
1374 
1375 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1376 	    "PSE port0 (CDMA) flow control asserted.\n");
1377 }
1378 
1379 /*
1380  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1381  */
1382 static void
1383 rt_pse_fq_empty(struct rt_softc *sc)
1384 {
1385 
1386 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1387 	    "PSE free Q empty threshold reached & forced drop "
1388 		    "condition occurred.\n");
1389 }
1390 
1391 /*
1392  * rt_intr - main ISR
1393  */
1394 static void
1395 rt_intr(void *arg)
1396 {
1397 	struct rt_softc *sc;
1398 	struct ifnet *ifp;
1399 	uint32_t status;
1400 
1401 	sc = arg;
1402 	ifp = sc->ifp;
1403 
1404 	/* acknowledge interrupts */
1405 	status = RT_READ(sc, sc->fe_int_status);
1406 	RT_WRITE(sc, sc->fe_int_status, status);
1407 
1408 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1409 
1410 	if (status == 0xffffffff ||	/* device likely went away */
1411 		status == 0)		/* not for us */
1412 		return;
1413 
1414 	sc->interrupts++;
1415 
1416 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1417 		return;
1418 
1419 	if (status & CNT_PPE_AF)
1420 		rt_cnt_ppe_af(sc);
1421 
1422 	if (status & CNT_GDM_AF)
1423 		rt_cnt_gdm_af(sc);
1424 
1425 	if (status & PSE_P2_FC)
1426 		rt_pse_p2_fc(sc);
1427 
1428 	if (status & GDM_CRC_DROP)
1429 		rt_gdm_crc_drop(sc);
1430 
1431 	if (status & PSE_BUF_DROP)
1432 		rt_pse_buf_drop(sc);
1433 
1434 	if (status & GDM_OTHER_DROP)
1435 		rt_gdm_other_drop(sc);
1436 
1437 	if (status & PSE_P1_FC)
1438 		rt_pse_p1_fc(sc);
1439 
1440 	if (status & PSE_P0_FC)
1441 		rt_pse_p0_fc(sc);
1442 
1443 	if (status & PSE_FQ_EMPTY)
1444 		rt_pse_fq_empty(sc);
1445 
1446 	if (status & INT_TX_COHERENT)
1447 		rt_tx_coherent_intr(sc);
1448 
1449 	if (status & INT_RX_COHERENT)
1450 		rt_rx_coherent_intr(sc);
1451 
1452 	if (status & RX_DLY_INT)
1453 		rt_rx_delay_intr(sc);
1454 
1455 	if (status & TX_DLY_INT)
1456 		rt_tx_delay_intr(sc);
1457 
1458 	if (status & INT_RX_DONE)
1459 		rt_rx_intr(sc, 0);
1460 
1461 	if (status & INT_TXQ3_DONE)
1462 		rt_tx_intr(sc, 3);
1463 
1464 	if (status & INT_TXQ2_DONE)
1465 		rt_tx_intr(sc, 2);
1466 
1467 	if (status & INT_TXQ1_DONE)
1468 		rt_tx_intr(sc, 1);
1469 
1470 	if (status & INT_TXQ0_DONE)
1471 		rt_tx_intr(sc, 0);
1472 }
1473 
1474 /*
1475  * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1476  */
1477 static void
1478 rt_rt5350_intr(void *arg)
1479 {
1480 	struct rt_softc *sc;
1481 	struct ifnet *ifp;
1482 	uint32_t status;
1483 
1484 	sc = arg;
1485 	ifp = sc->ifp;
1486 
1487 	/* acknowledge interrupts */
1488 	status = RT_READ(sc, sc->fe_int_status);
1489 	RT_WRITE(sc, sc->fe_int_status, status);
1490 
1491 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1492 
1493 	if (status == 0xffffffff ||     /* device likely went away */
1494 		status == 0)            /* not for us */
1495 		return;
1496 
1497 	sc->interrupts++;
1498 
1499 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1500 	        return;
1501 
1502 	if (status & RT5350_INT_TX_COHERENT)
1503 		rt_tx_coherent_intr(sc);
1504 	if (status & RT5350_INT_RX_COHERENT)
1505 		rt_rx_coherent_intr(sc);
1506 	if (status & RT5350_RX_DLY_INT)
1507 	        rt_rx_delay_intr(sc);
1508 	if (status & RT5350_TX_DLY_INT)
1509 	        rt_tx_delay_intr(sc);
1510 	if (status & RT5350_INT_RXQ1_DONE)
1511 		rt_rx_intr(sc, 1);
1512 	if (status & RT5350_INT_RXQ0_DONE)
1513 		rt_rx_intr(sc, 0);
1514 	if (status & RT5350_INT_TXQ3_DONE)
1515 		rt_tx_intr(sc, 3);
1516 	if (status & RT5350_INT_TXQ2_DONE)
1517 		rt_tx_intr(sc, 2);
1518 	if (status & RT5350_INT_TXQ1_DONE)
1519 		rt_tx_intr(sc, 1);
1520 	if (status & RT5350_INT_TXQ0_DONE)
1521 		rt_tx_intr(sc, 0);
1522 }
1523 
1524 static void
1525 rt_tx_coherent_intr(struct rt_softc *sc)
1526 {
1527 	uint32_t tmp;
1528 	int i;
1529 
1530 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1531 
1532 	sc->tx_coherent_interrupts++;
1533 
1534 	/* restart DMA engine */
1535 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1536 	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1537 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1538 
1539 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1540 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1541 
1542 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1543 		RT_WRITE(sc, sc->tx_base_ptr[i],
1544 			sc->tx_ring[i].desc_phys_addr);
1545 		RT_WRITE(sc, sc->tx_max_cnt[i],
1546 			RT_SOFTC_TX_RING_DESC_COUNT);
1547 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1548 	}
1549 
1550 	rt_txrx_enable(sc);
1551 }
1552 
1553 /*
1554  * rt_rx_coherent_intr
1555  */
1556 static void
1557 rt_rx_coherent_intr(struct rt_softc *sc)
1558 {
1559 	uint32_t tmp;
1560 	int i;
1561 
1562 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1563 
1564 	sc->rx_coherent_interrupts++;
1565 
1566 	/* restart DMA engine */
1567 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1568 	tmp &= ~(FE_RX_DMA_EN);
1569 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1570 
1571 	/* init Rx ring */
1572 	for (i = 0; i < sc->rx_ring_count; i++)
1573 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1574 
1575 	for (i = 0; i < sc->rx_ring_count; i++) {
1576 		RT_WRITE(sc, sc->rx_base_ptr[i],
1577 			sc->rx_ring[i].desc_phys_addr);
1578 		RT_WRITE(sc, sc->rx_max_cnt[i],
1579 			RT_SOFTC_RX_RING_DATA_COUNT);
1580 		RT_WRITE(sc, sc->rx_calc_idx[i],
1581 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1582 	}
1583 
1584 	rt_txrx_enable(sc);
1585 }
1586 
1587 /*
1588  * rt_rx_intr - a packet received
1589  */
1590 static void
1591 rt_rx_intr(struct rt_softc *sc, int qid)
1592 {
1593 	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1594 		("%s: Rx interrupt: invalid qid=%d\n",
1595 		 device_get_nameunit(sc->dev), qid));
1596 
1597 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1598 	sc->rx_interrupts[qid]++;
1599 	RT_SOFTC_LOCK(sc);
1600 
1601 	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1602 		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1603 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1604 	}
1605 
1606 	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1607 	RT_SOFTC_UNLOCK(sc);
1608 }
1609 
1610 static void
1611 rt_rx_delay_intr(struct rt_softc *sc)
1612 {
1613 
1614 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1615 	sc->rx_delay_interrupts++;
1616 }
1617 
1618 static void
1619 rt_tx_delay_intr(struct rt_softc *sc)
1620 {
1621 
1622 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1623 	sc->tx_delay_interrupts++;
1624 }
1625 
1626 /*
1627  * rt_tx_intr - Transsmition of packet done
1628  */
1629 static void
1630 rt_tx_intr(struct rt_softc *sc, int qid)
1631 {
1632 
1633 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1634 		("%s: Tx interrupt: invalid qid=%d\n",
1635 		 device_get_nameunit(sc->dev), qid));
1636 
1637 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1638 
1639 	sc->tx_interrupts[qid]++;
1640 	RT_SOFTC_LOCK(sc);
1641 
1642 	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1643 		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1644 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1645 	}
1646 
1647 	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1648 	RT_SOFTC_UNLOCK(sc);
1649 }
1650 
1651 /*
1652  * rt_rx_done_task - run RX task
1653  */
1654 static void
1655 rt_rx_done_task(void *context, int pending)
1656 {
1657 	struct rt_softc *sc;
1658 	struct ifnet *ifp;
1659 	int again;
1660 
1661 	sc = context;
1662 	ifp = sc->ifp;
1663 
1664 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1665 
1666 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1667 		return;
1668 
1669 	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1670 
1671 	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1672 
1673 	RT_SOFTC_LOCK(sc);
1674 
1675 	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1676 		RT_DPRINTF(sc, RT_DEBUG_RX,
1677 		    "Rx done task: scheduling again\n");
1678 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1679 	} else {
1680 		rt_intr_enable(sc, sc->int_rx_done_mask);
1681 	}
1682 
1683 	RT_SOFTC_UNLOCK(sc);
1684 }
1685 
1686 /*
1687  * rt_tx_done_task - check for pending TX task in all queues
1688  */
1689 static void
1690 rt_tx_done_task(void *context, int pending)
1691 {
1692 	struct rt_softc *sc;
1693 	struct ifnet *ifp;
1694 	uint32_t intr_mask;
1695 	int i;
1696 
1697 	sc = context;
1698 	ifp = sc->ifp;
1699 
1700 	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1701 
1702 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1703 		return;
1704 
1705 	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1706 		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1707 			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1708 			rt_tx_eof(sc, &sc->tx_ring[i]);
1709 		}
1710 	}
1711 
1712 	sc->tx_timer = 0;
1713 
1714 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1715 
1716 	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1717 	   sc->rt_chipid == RT_CHIPID_MT7620)
1718 	  intr_mask = (
1719 		RT5350_INT_TXQ3_DONE |
1720 		RT5350_INT_TXQ2_DONE |
1721 		RT5350_INT_TXQ1_DONE |
1722 		RT5350_INT_TXQ0_DONE);
1723 	else
1724 	  intr_mask = (
1725 		INT_TXQ3_DONE |
1726 		INT_TXQ2_DONE |
1727 		INT_TXQ1_DONE |
1728 		INT_TXQ0_DONE);
1729 
1730 	RT_SOFTC_LOCK(sc);
1731 
1732 	rt_intr_enable(sc, ~sc->intr_pending_mask &
1733 	    (sc->intr_disable_mask & intr_mask));
1734 
1735 	if (sc->intr_pending_mask & intr_mask) {
1736 		RT_DPRINTF(sc, RT_DEBUG_TX,
1737 		    "Tx done task: scheduling again\n");
1738 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1739 	}
1740 
1741 	RT_SOFTC_UNLOCK(sc);
1742 
1743 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1744 		rt_start(ifp);
1745 }
1746 
1747 /*
1748  * rt_periodic_task - run periodic task
1749  */
1750 static void
1751 rt_periodic_task(void *context, int pending)
1752 {
1753 	struct rt_softc *sc;
1754 	struct ifnet *ifp;
1755 
1756 	sc = context;
1757 	ifp = sc->ifp;
1758 
1759 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1760 	    sc->periodic_round);
1761 
1762 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1763 		return;
1764 
1765 	RT_SOFTC_LOCK(sc);
1766 	sc->periodic_round++;
1767 	rt_update_stats(sc);
1768 
1769 	if ((sc->periodic_round % 10) == 0) {
1770 		rt_update_raw_counters(sc);
1771 		rt_watchdog(sc);
1772 	}
1773 
1774 	RT_SOFTC_UNLOCK(sc);
1775 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1776 }
1777 
1778 /*
1779  * rt_rx_eof - check for frames that done by DMA engine and pass it into
1780  * network subsystem.
1781  */
1782 static int
1783 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1784 {
1785 	struct ifnet *ifp;
1786 /*	struct rt_softc_rx_ring *ring; */
1787 	struct rt_rxdesc *desc;
1788 	struct rt_softc_rx_data *data;
1789 	struct mbuf *m, *mnew;
1790 	bus_dma_segment_t segs[1];
1791 	bus_dmamap_t dma_map;
1792 	uint32_t index, desc_flags;
1793 	int error, nsegs, len, nframes;
1794 
1795 	ifp = sc->ifp;
1796 /*	ring = &sc->rx_ring[0]; */
1797 
1798 	nframes = 0;
1799 
1800 	while (limit != 0) {
1801 		index = RT_READ(sc, sc->rx_drx_idx[0]);
1802 		if (ring->cur == index)
1803 			break;
1804 
1805 		desc = &ring->desc[ring->cur];
1806 		data = &ring->data[ring->cur];
1807 
1808 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1809 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1810 
1811 #ifdef IF_RT_DEBUG
1812 		if ( sc->debug & RT_DEBUG_RX ) {
1813 			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1814 		        hexdump(desc, 16, 0, 0);
1815 			printf("-----------------------------------\n");
1816 		}
1817 #endif
1818 
1819 		/* XXX Sometime device don`t set DDONE bit */
1820 #ifdef DDONE_FIXED
1821 		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1822 			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1823 			break;
1824 		}
1825 #endif
1826 
1827 		len = le16toh(desc->sdl0) & 0x3fff;
1828 		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1829 
1830 		nframes++;
1831 
1832 		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1833 		    MJUMPAGESIZE);
1834 		if (mnew == NULL) {
1835 			sc->rx_mbuf_alloc_errors++;
1836 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1837 			goto skip;
1838 		}
1839 
1840 		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1841 
1842 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1843 		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1844 		if (error != 0) {
1845 			RT_DPRINTF(sc, RT_DEBUG_RX,
1846 			    "could not load Rx mbuf DMA map: "
1847 			    "error=%d, nsegs=%d\n",
1848 			    error, nsegs);
1849 
1850 			m_freem(mnew);
1851 
1852 			sc->rx_mbuf_dmamap_errors++;
1853 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1854 
1855 			goto skip;
1856 		}
1857 
1858 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1859 			device_get_nameunit(sc->dev)));
1860 
1861 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1862 			BUS_DMASYNC_POSTREAD);
1863 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1864 
1865 		dma_map = data->dma_map;
1866 		data->dma_map = ring->spare_dma_map;
1867 		ring->spare_dma_map = dma_map;
1868 
1869 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1870 			BUS_DMASYNC_PREREAD);
1871 
1872 		m = data->m;
1873 		desc_flags = desc->src;
1874 
1875 		data->m = mnew;
1876 		/* Add 2 for proper align of RX IP header */
1877 		desc->sdp0 = htole32(segs[0].ds_addr+2);
1878 		desc->sdl0 = htole32(segs[0].ds_len-2);
1879 		desc->src = 0;
1880 		desc->ai = 0;
1881 		desc->foe = 0;
1882 
1883 		RT_DPRINTF(sc, RT_DEBUG_RX,
1884 		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1885 
1886 		m->m_pkthdr.rcvif = ifp;
1887 		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1888 		m->m_data += 2;
1889 		m->m_pkthdr.len = m->m_len = len;
1890 
1891 		/* check for crc errors */
1892 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1893 			/*check for valid checksum*/
1894 			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1895 			    RXDSXR_SRC_L4_CSUM_FAIL)) {
1896 				RT_DPRINTF(sc, RT_DEBUG_RX,
1897 				    "rxdesc: crc error\n");
1898 
1899 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1900 
1901 				if (!(ifp->if_flags & IFF_PROMISC)) {
1902 				    m_freem(m);
1903 				    goto skip;
1904 				}
1905 			}
1906 			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1907 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1908 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1909 				m->m_pkthdr.csum_data = 0xffff;
1910 			}
1911 			m->m_flags &= ~M_HASFCS;
1912 		}
1913 
1914 		(*ifp->if_input)(ifp, m);
1915 skip:
1916 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1917 
1918 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1919 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1920 
1921 		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1922 
1923 		limit--;
1924 	}
1925 
1926 	if (ring->cur == 0)
1927 		RT_WRITE(sc, sc->rx_calc_idx[0],
1928 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1929 	else
1930 		RT_WRITE(sc, sc->rx_calc_idx[0],
1931 			ring->cur - 1);
1932 
1933 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1934 
1935 	sc->rx_packets += nframes;
1936 
1937 	return (limit == 0);
1938 }
1939 
1940 /*
1941  * rt_tx_eof - check for successful transmitted frames and mark their
1942  * descriptor as free.
1943  */
1944 static void
1945 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1946 {
1947 	struct ifnet *ifp;
1948 	struct rt_txdesc *desc;
1949 	struct rt_softc_tx_data *data;
1950 	uint32_t index;
1951 	int ndescs, nframes;
1952 
1953 	ifp = sc->ifp;
1954 
1955 	ndescs = 0;
1956 	nframes = 0;
1957 
1958 	for (;;) {
1959 		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
1960 		if (ring->desc_next == index)
1961 			break;
1962 
1963 		ndescs++;
1964 
1965 		desc = &ring->desc[ring->desc_next];
1966 
1967 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1968 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1969 
1970 		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1971 			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1972 			nframes++;
1973 
1974 			data = &ring->data[ring->data_next];
1975 
1976 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1977 				BUS_DMASYNC_POSTWRITE);
1978 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1979 
1980 			m_freem(data->m);
1981 
1982 			data->m = NULL;
1983 
1984 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1985 
1986 			RT_SOFTC_TX_RING_LOCK(ring);
1987 			ring->data_queued--;
1988 			ring->data_next = (ring->data_next + 1) %
1989 			    RT_SOFTC_TX_RING_DATA_COUNT;
1990 			RT_SOFTC_TX_RING_UNLOCK(ring);
1991 		}
1992 
1993 		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
1994 
1995 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1996 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1997 
1998 		RT_SOFTC_TX_RING_LOCK(ring);
1999 		ring->desc_queued--;
2000 		ring->desc_next = (ring->desc_next + 1) %
2001 		    RT_SOFTC_TX_RING_DESC_COUNT;
2002 		RT_SOFTC_TX_RING_UNLOCK(ring);
2003 	}
2004 
2005 	RT_DPRINTF(sc, RT_DEBUG_TX,
2006 	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2007 	    nframes);
2008 }
2009 
2010 /*
2011  * rt_update_stats - query statistics counters and update related variables.
2012  */
2013 static void
2014 rt_update_stats(struct rt_softc *sc)
2015 {
2016 	struct ifnet *ifp;
2017 
2018 	ifp = sc->ifp;
2019 	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2020 	/* XXX do update stats here */
2021 }
2022 
2023 /*
2024  * rt_watchdog - reinit device on watchdog event.
2025  */
2026 static void
2027 rt_watchdog(struct rt_softc *sc)
2028 {
2029 	uint32_t tmp;
2030 #ifdef notyet
2031 	int ntries;
2032 #endif
2033 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2034 	   sc->rt_chipid != RT_CHIPID_MT7620) {
2035 		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2036 
2037 		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2038 			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2039 	}
2040 	/* XXX: do not reset */
2041 #ifdef notyet
2042 	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2043 		sc->tx_queue_not_empty[0]++;
2044 
2045 		for (ntries = 0; ntries < 10; ntries++) {
2046 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2047 			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2048 				break;
2049 
2050 			DELAY(1);
2051 		}
2052 	}
2053 
2054 	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2055 		sc->tx_queue_not_empty[1]++;
2056 
2057 		for (ntries = 0; ntries < 10; ntries++) {
2058 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2059 			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2060 				break;
2061 
2062 			DELAY(1);
2063 		}
2064 	}
2065 #endif
2066 }
2067 
2068 /*
2069  * rt_update_raw_counters - update counters.
2070  */
2071 static void
2072 rt_update_raw_counters(struct rt_softc *sc)
2073 {
2074 
2075 	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2076 	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2077 	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2078 	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2079 
2080 	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2081 	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2082 	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2083 	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2084 	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2085 	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2086 	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2087 }
2088 
2089 static void
2090 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2091 {
2092 	uint32_t tmp;
2093 
2094 	sc->intr_disable_mask &= ~intr_mask;
2095 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2096 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2097 }
2098 
2099 static void
2100 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2101 {
2102 	uint32_t tmp;
2103 
2104 	sc->intr_disable_mask |= intr_mask;
2105 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2106 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2107 }
2108 
2109 /*
2110  * rt_txrx_enable - enable TX/RX DMA
2111  */
2112 static int
2113 rt_txrx_enable(struct rt_softc *sc)
2114 {
2115 	struct ifnet *ifp;
2116 	uint32_t tmp;
2117 	int ntries;
2118 
2119 	ifp = sc->ifp;
2120 
2121 	/* enable Tx/Rx DMA engine */
2122 	for (ntries = 0; ntries < 200; ntries++) {
2123 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2124 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2125 			break;
2126 
2127 		DELAY(1000);
2128 	}
2129 
2130 	if (ntries == 200) {
2131 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2132 		return (-1);
2133 	}
2134 
2135 	DELAY(50);
2136 
2137 	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2138 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2139 
2140 	/* XXX set Rx filter */
2141 	return (0);
2142 }
2143 
2144 /*
2145  * rt_alloc_rx_ring - allocate RX DMA ring buffer
2146  */
2147 static int
2148 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2149 {
2150 	struct rt_rxdesc *desc;
2151 	struct rt_softc_rx_data *data;
2152 	bus_dma_segment_t segs[1];
2153 	int i, nsegs, error;
2154 
2155 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2156 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2157 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2158 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2159 		0, NULL, NULL, &ring->desc_dma_tag);
2160 	if (error != 0)	{
2161 		device_printf(sc->dev,
2162 		    "could not create Rx desc DMA tag\n");
2163 		goto fail;
2164 	}
2165 
2166 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2167 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2168 	if (error != 0) {
2169 		device_printf(sc->dev,
2170 		    "could not allocate Rx desc DMA memory\n");
2171 		goto fail;
2172 	}
2173 
2174 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2175 		ring->desc,
2176 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2177 		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2178 	if (error != 0) {
2179 		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2180 		goto fail;
2181 	}
2182 
2183 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2184 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2185 		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2186 		&ring->data_dma_tag);
2187 	if (error != 0)	{
2188 		device_printf(sc->dev,
2189 		    "could not create Rx data DMA tag\n");
2190 		goto fail;
2191 	}
2192 
2193 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2194 		desc = &ring->desc[i];
2195 		data = &ring->data[i];
2196 
2197 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2198 		    &data->dma_map);
2199 		if (error != 0)	{
2200 			device_printf(sc->dev, "could not create Rx data DMA "
2201 			    "map\n");
2202 			goto fail;
2203 		}
2204 
2205 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2206 		    MJUMPAGESIZE);
2207 		if (data->m == NULL) {
2208 			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2209 			error = ENOMEM;
2210 			goto fail;
2211 		}
2212 
2213 		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2214 
2215 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2216 		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2217 		if (error != 0)	{
2218 			device_printf(sc->dev,
2219 			    "could not load Rx mbuf DMA map\n");
2220 			goto fail;
2221 		}
2222 
2223 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2224 			device_get_nameunit(sc->dev)));
2225 
2226 		/* Add 2 for proper align of RX IP header */
2227 		desc->sdp0 = htole32(segs[0].ds_addr+2);
2228 		desc->sdl0 = htole32(segs[0].ds_len-2);
2229 	}
2230 
2231 	error = bus_dmamap_create(ring->data_dma_tag, 0,
2232 	    &ring->spare_dma_map);
2233 	if (error != 0) {
2234 		device_printf(sc->dev,
2235 		    "could not create Rx spare DMA map\n");
2236 		goto fail;
2237 	}
2238 
2239 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2240 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2241 	ring->qid = qid;
2242 	return (0);
2243 
2244 fail:
2245 	rt_free_rx_ring(sc, ring);
2246 	return (error);
2247 }
2248 
2249 /*
2250  * rt_reset_rx_ring - reset RX ring buffer
2251  */
2252 static void
2253 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2254 {
2255 	struct rt_rxdesc *desc;
2256 	int i;
2257 
2258 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2259 		desc = &ring->desc[i];
2260 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2261 	}
2262 
2263 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2264 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2265 	ring->cur = 0;
2266 }
2267 
2268 /*
2269  * rt_free_rx_ring - free memory used by RX ring buffer
2270  */
2271 static void
2272 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2273 {
2274 	struct rt_softc_rx_data *data;
2275 	int i;
2276 
2277 	if (ring->desc != NULL) {
2278 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2279 			BUS_DMASYNC_POSTWRITE);
2280 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2281 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2282 			ring->desc_dma_map);
2283 	}
2284 
2285 	if (ring->desc_dma_tag != NULL)
2286 		bus_dma_tag_destroy(ring->desc_dma_tag);
2287 
2288 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2289 		data = &ring->data[i];
2290 
2291 		if (data->m != NULL) {
2292 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2293 				BUS_DMASYNC_POSTREAD);
2294 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2295 			m_freem(data->m);
2296 		}
2297 
2298 		if (data->dma_map != NULL)
2299 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2300 	}
2301 
2302 	if (ring->spare_dma_map != NULL)
2303 		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2304 
2305 	if (ring->data_dma_tag != NULL)
2306 		bus_dma_tag_destroy(ring->data_dma_tag);
2307 }
2308 
2309 /*
2310  * rt_alloc_tx_ring - allocate TX ring buffer
2311  */
2312 static int
2313 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2314 {
2315 	struct rt_softc_tx_data *data;
2316 	int error, i;
2317 
2318 	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2319 
2320 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2321 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2322 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2323 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2324 		0, NULL, NULL, &ring->desc_dma_tag);
2325 	if (error != 0) {
2326 		device_printf(sc->dev,
2327 		    "could not create Tx desc DMA tag\n");
2328 		goto fail;
2329 	}
2330 
2331 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2332 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2333 	if (error != 0)	{
2334 		device_printf(sc->dev,
2335 		    "could not allocate Tx desc DMA memory\n");
2336 		goto fail;
2337 	}
2338 
2339 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2340 	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2341 	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2342 	    &ring->desc_phys_addr, 0);
2343 	if (error != 0) {
2344 		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2345 		goto fail;
2346 	}
2347 
2348 	ring->desc_queued = 0;
2349 	ring->desc_cur = 0;
2350 	ring->desc_next = 0;
2351 
2352 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2353 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2354 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2355 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2356 	    0, NULL, NULL, &ring->seg0_dma_tag);
2357 	if (error != 0) {
2358 		device_printf(sc->dev,
2359 		    "could not create Tx seg0 DMA tag\n");
2360 		goto fail;
2361 	}
2362 
2363 	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2364 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2365 	if (error != 0) {
2366 		device_printf(sc->dev,
2367 		    "could not allocate Tx seg0 DMA memory\n");
2368 		goto fail;
2369 	}
2370 
2371 	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2372 	    ring->seg0,
2373 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2374 	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2375 	if (error != 0) {
2376 		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2377 		goto fail;
2378 	}
2379 
2380 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2381 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2382 	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2383 	    &ring->data_dma_tag);
2384 	if (error != 0) {
2385 		device_printf(sc->dev,
2386 		    "could not create Tx data DMA tag\n");
2387 		goto fail;
2388 	}
2389 
2390 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2391 		data = &ring->data[i];
2392 
2393 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2394 		    &data->dma_map);
2395 		if (error != 0) {
2396 			device_printf(sc->dev, "could not create Tx data DMA "
2397 			    "map\n");
2398 			goto fail;
2399 		}
2400 	}
2401 
2402 	ring->data_queued = 0;
2403 	ring->data_cur = 0;
2404 	ring->data_next = 0;
2405 
2406 	ring->qid = qid;
2407 	return (0);
2408 
2409 fail:
2410 	rt_free_tx_ring(sc, ring);
2411 	return (error);
2412 }
2413 
2414 /*
2415  * rt_reset_tx_ring - reset TX ring buffer to empty state
2416  */
2417 static void
2418 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2419 {
2420 	struct rt_softc_tx_data *data;
2421 	struct rt_txdesc *desc;
2422 	int i;
2423 
2424 	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2425 		desc = &ring->desc[i];
2426 
2427 		desc->sdl0 = 0;
2428 		desc->sdl1 = 0;
2429 	}
2430 
2431 	ring->desc_queued = 0;
2432 	ring->desc_cur = 0;
2433 	ring->desc_next = 0;
2434 
2435 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2436 		BUS_DMASYNC_PREWRITE);
2437 
2438 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2439 		BUS_DMASYNC_PREWRITE);
2440 
2441 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2442 		data = &ring->data[i];
2443 
2444 		if (data->m != NULL) {
2445 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2446 				BUS_DMASYNC_POSTWRITE);
2447 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2448 			m_freem(data->m);
2449 			data->m = NULL;
2450 		}
2451 	}
2452 
2453 	ring->data_queued = 0;
2454 	ring->data_cur = 0;
2455 	ring->data_next = 0;
2456 }
2457 
2458 /*
2459  * rt_free_tx_ring - free RX ring buffer
2460  */
2461 static void
2462 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2463 {
2464 	struct rt_softc_tx_data *data;
2465 	int i;
2466 
2467 	if (ring->desc != NULL) {
2468 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2469 			BUS_DMASYNC_POSTWRITE);
2470 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2471 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2472 			ring->desc_dma_map);
2473 	}
2474 
2475 	if (ring->desc_dma_tag != NULL)
2476 		bus_dma_tag_destroy(ring->desc_dma_tag);
2477 
2478 	if (ring->seg0 != NULL) {
2479 		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2480 			BUS_DMASYNC_POSTWRITE);
2481 		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2482 		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2483 			ring->seg0_dma_map);
2484 	}
2485 
2486 	if (ring->seg0_dma_tag != NULL)
2487 		bus_dma_tag_destroy(ring->seg0_dma_tag);
2488 
2489 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2490 		data = &ring->data[i];
2491 
2492 		if (data->m != NULL) {
2493 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2494 				BUS_DMASYNC_POSTWRITE);
2495 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2496 			m_freem(data->m);
2497 		}
2498 
2499 		if (data->dma_map != NULL)
2500 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2501 	}
2502 
2503 	if (ring->data_dma_tag != NULL)
2504 		bus_dma_tag_destroy(ring->data_dma_tag);
2505 
2506 	mtx_destroy(&ring->lock);
2507 }
2508 
2509 /*
2510  * rt_dma_map_addr - get address of busdma segment
2511  */
2512 static void
2513 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2514 {
2515 	if (error != 0)
2516 		return;
2517 
2518 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2519 
2520 	*(bus_addr_t *) arg = segs[0].ds_addr;
2521 }
2522 
2523 /*
2524  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2525  */
2526 static void
2527 rt_sysctl_attach(struct rt_softc *sc)
2528 {
2529 	struct sysctl_ctx_list *ctx;
2530 	struct sysctl_oid *tree;
2531 	struct sysctl_oid *stats;
2532 
2533 	ctx = device_get_sysctl_ctx(sc->dev);
2534 	tree = device_get_sysctl_tree(sc->dev);
2535 
2536 	/* statistic counters */
2537 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2538 	    "stats", CTLFLAG_RD, 0, "statistic");
2539 
2540 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2541 	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2542 	    "all interrupts");
2543 
2544 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2545 	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2546 	    "Tx coherent interrupts");
2547 
2548 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2549 	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2550 	    "Rx coherent interrupts");
2551 
2552 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2553 	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2554 	    "Rx interrupts");
2555 
2556 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2557 	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2558 	    "Rx delay interrupts");
2559 
2560 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2561 	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2562 	    "Tx AC3 interrupts");
2563 
2564 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2565 	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2566 	    "Tx AC2 interrupts");
2567 
2568 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2569 	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2570 	    "Tx AC1 interrupts");
2571 
2572 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2573 	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2574 	    "Tx AC0 interrupts");
2575 
2576 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2577 	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2578 	    "Tx delay interrupts");
2579 
2580 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2581 	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2582 	    0, "Tx AC3 descriptors queued");
2583 
2584 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2585 	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2586 	    0, "Tx AC3 data queued");
2587 
2588 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2589 	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2590 	    0, "Tx AC2 descriptors queued");
2591 
2592 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2593 	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2594 	    0, "Tx AC2 data queued");
2595 
2596 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2597 	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2598 	    0, "Tx AC1 descriptors queued");
2599 
2600 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2601 	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2602 	    0, "Tx AC1 data queued");
2603 
2604 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2605 	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2606 	    0, "Tx AC0 descriptors queued");
2607 
2608 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2609 	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2610 	    0, "Tx AC0 data queued");
2611 
2612 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2613 	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2614 	    "Tx AC3 data queue full");
2615 
2616 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2617 	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2618 	    "Tx AC2 data queue full");
2619 
2620 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2621 	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2622 	    "Tx AC1 data queue full");
2623 
2624 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2625 	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2626 	    "Tx AC0 data queue full");
2627 
2628 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2629 	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2630 	    "Tx watchdog timeouts");
2631 
2632 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2633 	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2634 	    "Tx defragmented packets");
2635 
2636 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2637 	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2638 	    "no Tx descriptors available");
2639 
2640 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2641 	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2642 	    "Rx mbuf allocation errors");
2643 
2644 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2645 	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2646 	    "Rx mbuf DMA mapping errors");
2647 
2648 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2649 	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2650 	    "Tx queue 0 not empty");
2651 
2652 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2653 	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2654 	    "Tx queue 1 not empty");
2655 
2656 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2657 	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2658 	    "Rx packets");
2659 
2660 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2661 	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2662 	    "Rx CRC errors");
2663 
2664 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2665 	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2666 	    "Rx PHY errors");
2667 
2668 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2669 	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2670 	    "Rx duplicate packets");
2671 
2672 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2673 	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2674 	    "Rx FIFO overflows");
2675 
2676 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2677 	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2678 	    "Rx bytes");
2679 
2680 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2681 	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2682 	    "Rx too long frame errors");
2683 
2684 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2685 	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2686 	    "Rx too short frame errors");
2687 
2688 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2689 	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2690 	    "Tx bytes");
2691 
2692 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2693 	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2694 	    "Tx packets");
2695 
2696 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2697 	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2698 	    "Tx skip count for GDMA ports");
2699 
2700 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2701 	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2702 	    "Tx collision count for GDMA ports");
2703 }
2704 
2705 #ifdef IF_RT_PHY_SUPPORT
2706 static int
2707 rt_miibus_readreg(device_t dev, int phy, int reg)
2708 {
2709 	struct rt_softc *sc = device_get_softc(dev);
2710 
2711 	/*
2712 	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2713 	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2714 	 */
2715 	if (phy == 31) {
2716 		/* Fake PHY ID for bfeswitch attach */
2717 		switch (reg) {
2718 		case MII_BMSR:
2719 			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2720 		case MII_PHYIDR1:
2721 			return (0x40);		/* As result of faking */
2722 		case MII_PHYIDR2:		/* PHY will detect as */
2723 			return (0x6250);		/* bfeswitch */
2724 		}
2725 	}
2726 
2727 	/* Wait prev command done if any */
2728 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2729 	RT_WRITE(sc, MDIO_ACCESS,
2730 	    MDIO_CMD_ONGO ||
2731 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2732 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2733 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2734 
2735 	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2736 }
2737 
2738 static int
2739 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2740 {
2741 	struct rt_softc *sc = device_get_softc(dev);
2742 
2743 	/* Wait prev command done if any */
2744 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2745 	RT_WRITE(sc, MDIO_ACCESS,
2746 	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2747 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2748 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2749 	    (val & MDIO_PHY_DATA_MASK));
2750 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2751 
2752 	return (0);
2753 }
2754 
2755 void
2756 rt_miibus_statchg(device_t dev)
2757 {
2758 	struct rt_softc *sc = device_get_softc(dev);
2759 	struct mii_data *mii;
2760 
2761 	mii = device_get_softc(sc->rt_miibus);
2762 
2763 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2764 	    (IFM_ACTIVE | IFM_AVALID)) {
2765 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2766 		case IFM_10_T:
2767 		case IFM_100_TX:
2768 			/* XXX check link here */
2769 			sc->flags |= 1;
2770 			break;
2771 		default:
2772 			break;
2773 		}
2774 	}
2775 }
2776 #endif /* IF_RT_PHY_SUPPORT */
2777 
2778 static device_method_t rt_dev_methods[] =
2779 {
2780 	DEVMETHOD(device_probe, rt_probe),
2781 	DEVMETHOD(device_attach, rt_attach),
2782 	DEVMETHOD(device_detach, rt_detach),
2783 	DEVMETHOD(device_shutdown, rt_shutdown),
2784 	DEVMETHOD(device_suspend, rt_suspend),
2785 	DEVMETHOD(device_resume, rt_resume),
2786 
2787 #ifdef IF_RT_PHY_SUPPORT
2788 	/* MII interface */
2789 	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2790 	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2791 	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2792 #endif
2793 
2794 	DEVMETHOD_END
2795 };
2796 
2797 static driver_t rt_driver =
2798 {
2799 	"rt",
2800 	rt_dev_methods,
2801 	sizeof(struct rt_softc)
2802 };
2803 
2804 static devclass_t rt_dev_class;
2805 
2806 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2807 #ifdef FDT
2808 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2809 #endif
2810 
2811 MODULE_DEPEND(rt, ether, 1, 1, 1);
2812 MODULE_DEPEND(rt, miibus, 1, 1, 1);
2813 
2814