xref: /freebsd-12.1/sys/dev/rt/if_rt.c (revision bb7d0109)
1 /*-
2  * Copyright (c) 2015-2016, Stanislav Galabov
3  * Copyright (c) 2014, Aleksandr A. Mityaev
4  * Copyright (c) 2011, Aleksandr Rybalko
5  * based on hard work
6  * by Alexander Egorenkov <[email protected]>
7  * and by Damien Bergamini <[email protected]>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "if_rtvar.h"
37 #include "if_rtreg.h"
38 
39 #include <net/if.h>
40 #include <net/if_var.h>
41 #include <net/if_arp.h>
42 #include <net/ethernet.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_vlan_var.h>
47 
48 #include <net/bpf.h>
49 
50 #include <machine/bus.h>
51 #include <machine/cache.h>
52 #include <machine/cpufunc.h>
53 #include <machine/resource.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <machine/pmap.h>
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60 
61 #include "opt_platform.h"
62 #include "opt_rt305x.h"
63 
64 #ifdef FDT
65 #include <dev/ofw/openfirm.h>
66 #include <dev/ofw/ofw_bus.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #endif
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #if 0
74 #include <mips/rt305x/rt305x_sysctlvar.h>
75 #include <mips/rt305x/rt305xreg.h>
76 #endif
77 
78 #ifdef IF_RT_PHY_SUPPORT
79 #include "miibus_if.h"
80 #endif
81 
82 /*
83  * Defines and macros
84  */
85 #define	RT_MAX_AGG_SIZE			3840
86 
87 #define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
88 
89 #define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
90 #define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
91 
92 #define	RT_TX_WATCHDOG_TIMEOUT		5
93 
94 #define RT_CHIPID_RT3050 0x3050
95 #define RT_CHIPID_RT5350 0x5350
96 #define RT_CHIPID_MT7620 0x7620
97 #define RT_CHIPID_MT7621 0x7621
98 
99 #ifdef FDT
100 /* more specific and new models should go first */
101 static const struct ofw_compat_data rt_compat_data[] = {
102 	{ "ralink,rt3050-eth",		RT_CHIPID_RT3050 },
103 	{ "ralink,rt3352-eth",		RT_CHIPID_RT3050 },
104 	{ "ralink,rt3883-eth",		RT_CHIPID_RT3050 },
105 	{ "ralink,rt5350-eth",		RT_CHIPID_RT5350 },
106 	{ "ralink,mt7620a-eth",		RT_CHIPID_MT7620 },
107 	{ "mediatek,mt7620-eth",	RT_CHIPID_MT7620 },
108 	{ "ralink,mt7621-eth",		RT_CHIPID_MT7621 },
109 	{ "mediatek,mt7621-eth",	RT_CHIPID_MT7621 },
110 	{ NULL,				0 }
111 };
112 #endif
113 
114 /*
115  * Static function prototypes
116  */
117 static int	rt_probe(device_t dev);
118 static int	rt_attach(device_t dev);
119 static int	rt_detach(device_t dev);
120 static int	rt_shutdown(device_t dev);
121 static int	rt_suspend(device_t dev);
122 static int	rt_resume(device_t dev);
123 static void	rt_init_locked(void *priv);
124 static void	rt_init(void *priv);
125 static void	rt_stop_locked(void *priv);
126 static void	rt_stop(void *priv);
127 static void	rt_start(struct ifnet *ifp);
128 static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
129 static void	rt_periodic(void *arg);
130 static void	rt_tx_watchdog(void *arg);
131 static void	rt_intr(void *arg);
132 static void	rt_rt5350_intr(void *arg);
133 static void	rt_tx_coherent_intr(struct rt_softc *sc);
134 static void	rt_rx_coherent_intr(struct rt_softc *sc);
135 static void	rt_rx_delay_intr(struct rt_softc *sc);
136 static void	rt_tx_delay_intr(struct rt_softc *sc);
137 static void	rt_rx_intr(struct rt_softc *sc, int qid);
138 static void	rt_tx_intr(struct rt_softc *sc, int qid);
139 static void	rt_rx_done_task(void *context, int pending);
140 static void	rt_tx_done_task(void *context, int pending);
141 static void	rt_periodic_task(void *context, int pending);
142 static int	rt_rx_eof(struct rt_softc *sc,
143 		    struct rt_softc_rx_ring *ring, int limit);
144 static void	rt_tx_eof(struct rt_softc *sc,
145 		    struct rt_softc_tx_ring *ring);
146 static void	rt_update_stats(struct rt_softc *sc);
147 static void	rt_watchdog(struct rt_softc *sc);
148 static void	rt_update_raw_counters(struct rt_softc *sc);
149 static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
150 static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
151 static int	rt_txrx_enable(struct rt_softc *sc);
152 static int	rt_alloc_rx_ring(struct rt_softc *sc,
153 		    struct rt_softc_rx_ring *ring, int qid);
154 static void	rt_reset_rx_ring(struct rt_softc *sc,
155 		    struct rt_softc_rx_ring *ring);
156 static void	rt_free_rx_ring(struct rt_softc *sc,
157 		    struct rt_softc_rx_ring *ring);
158 static int	rt_alloc_tx_ring(struct rt_softc *sc,
159 		    struct rt_softc_tx_ring *ring, int qid);
160 static void	rt_reset_tx_ring(struct rt_softc *sc,
161 		    struct rt_softc_tx_ring *ring);
162 static void	rt_free_tx_ring(struct rt_softc *sc,
163 		    struct rt_softc_tx_ring *ring);
164 static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
165 		    int nseg, int error);
166 static void	rt_sysctl_attach(struct rt_softc *sc);
167 #ifdef IF_RT_PHY_SUPPORT
168 void		rt_miibus_statchg(device_t);
169 static int	rt_miibus_readreg(device_t, int, int);
170 static int	rt_miibus_writereg(device_t, int, int, int);
171 #endif
172 static int	rt_ifmedia_upd(struct ifnet *);
173 static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174 
175 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
176 #ifdef IF_RT_DEBUG
177 static int rt_debug = 0;
178 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
179     "RT debug level");
180 #endif
181 
182 static int
183 rt_probe(device_t dev)
184 {
185 	struct rt_softc *sc = device_get_softc(dev);
186 	char buf[80];
187 #ifdef FDT
188 	const struct ofw_compat_data * cd;
189 
190 	cd = ofw_bus_search_compatible(dev, rt_compat_data);
191 	if (cd->ocd_data == 0)
192 	        return (ENXIO);
193 
194 	sc->rt_chipid = (unsigned int)(cd->ocd_data);
195 #else
196 #if defined(MT7620)
197 	sc->rt_chipid = RT_CHIPID_MT7620;
198 #elif defined(MT7621)
199 	sc->rt_chipid = RT_CHIPID_MT7621;
200 #elif defined(RT5350)
201 	sc->rt_chipid = RT_CHIPID_RT5350;
202 #else
203 	sc->rt_chipid = RT_CHIPID_RT3050;
204 #endif
205 #endif
206 	snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
207 		sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
208 	device_set_desc_copy(dev, buf);
209 	return (BUS_PROBE_GENERIC);
210 }
211 
212 /*
213  * macaddr_atoi - translate string MAC address to uint8_t array
214  */
215 static int
216 macaddr_atoi(const char *str, uint8_t *mac)
217 {
218 	int count, i;
219 	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
220 
221 	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
222 	    &amac[0], &amac[1], &amac[2],
223 	    &amac[3], &amac[4], &amac[5]);
224 	if (count < ETHER_ADDR_LEN) {
225 		memset(mac, 0, ETHER_ADDR_LEN);
226 		return (1);
227 	}
228 
229 	/* Copy aligned to result */
230 	for (i = 0; i < ETHER_ADDR_LEN; i ++)
231 		mac[i] = (amac[i] & 0xff);
232 
233 	return (0);
234 }
235 
236 #ifdef USE_GENERATED_MAC_ADDRESS
237 /*
238  * generate_mac(uin8_t *mac)
239  * This is MAC address generator for cases when real device MAC address
240  * unknown or not yet accessible.
241  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
242  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
243  *
244  * Output - MAC address, that do not change between reboots, if hints or
245  * bootloader info unchange.
246  */
247 static void
248 generate_mac(uint8_t *mac)
249 {
250 	unsigned char *cp;
251 	int i = 0;
252 	uint32_t crc = 0xffffffff;
253 
254 	/* Generate CRC32 on kenv */
255 	for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
256 		crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
257 	}
258 	crc = ~crc;
259 
260 	mac[0] = 'b';
261 	mac[1] = 's';
262 	mac[2] = 'd';
263 	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
264 	mac[4] = (crc >> 8) & 0xff;
265 	mac[5] = crc & 0xff;
266 }
267 #endif
268 
269 /*
270  * ether_request_mac - try to find usable MAC address.
271  */
272 static int
273 ether_request_mac(device_t dev, uint8_t *mac)
274 {
275 	char *var;
276 
277 	/*
278 	 * "ethaddr" is passed via envp on RedBoot platforms
279 	 * "kmac" is passed via argv on RouterBOOT platforms
280 	 */
281 #if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
282 	if ((var = kern_getenv("ethaddr")) != NULL ||
283 	    (var = kern_getenv("kmac")) != NULL ) {
284 
285 		if(!macaddr_atoi(var, mac)) {
286 			printf("%s: use %s macaddr from KENV\n",
287 			    device_get_nameunit(dev), var);
288 			freeenv(var);
289 			return (0);
290 		}
291 		freeenv(var);
292 	}
293 #endif
294 
295 	/*
296 	 * Try from hints
297 	 * hint.[dev].[unit].macaddr
298 	 */
299 	if (!resource_string_value(device_get_name(dev),
300 	    device_get_unit(dev), "macaddr", (const char **)&var)) {
301 
302 		if(!macaddr_atoi(var, mac)) {
303 			printf("%s: use %s macaddr from hints\n",
304 			    device_get_nameunit(dev), var);
305 			return (0);
306 		}
307 	}
308 
309 #ifdef USE_GENERATED_MAC_ADDRESS
310 	generate_mac(mac);
311 
312 	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
313 	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
314 #else
315 	/* Hardcoded */
316 	mac[0] = 0x00;
317 	mac[1] = 0x18;
318 	mac[2] = 0xe7;
319 	mac[3] = 0xd5;
320 	mac[4] = 0x83;
321 	mac[5] = 0x90;
322 
323 	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
324 #endif
325 
326 	return (0);
327 }
328 
329 /*
330  * Reset hardware
331  */
332 static void
333 reset_freng(struct rt_softc *sc)
334 {
335 	/* XXX hard reset kills everything so skip it ... */
336 	return;
337 }
338 
339 static int
340 rt_attach(device_t dev)
341 {
342 	struct rt_softc *sc;
343 	struct ifnet *ifp;
344 	int error, i;
345 
346 	sc = device_get_softc(dev);
347 	sc->dev = dev;
348 
349 	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
350 	    MTX_DEF | MTX_RECURSE);
351 
352 	sc->mem_rid = 0;
353 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
354 	    RF_ACTIVE);
355 	if (sc->mem == NULL) {
356 		device_printf(dev, "could not allocate memory resource\n");
357 		error = ENXIO;
358 		goto fail;
359 	}
360 
361 	sc->bst = rman_get_bustag(sc->mem);
362 	sc->bsh = rman_get_bushandle(sc->mem);
363 
364 	sc->irq_rid = 0;
365 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
366 	    RF_ACTIVE);
367 	if (sc->irq == NULL) {
368 		device_printf(dev,
369 		    "could not allocate interrupt resource\n");
370 		error = ENXIO;
371 		goto fail;
372 	}
373 
374 #ifdef IF_RT_DEBUG
375 	sc->debug = rt_debug;
376 
377 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
378 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
379 		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
380 #endif
381 
382 	/* Reset hardware */
383 	reset_freng(sc);
384 
385 
386 	if (sc->rt_chipid == RT_CHIPID_MT7620) {
387 		sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
388 		sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
389 	} else if (sc->rt_chipid == RT_CHIPID_MT7621) {
390 		sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
391 		sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
392 	} else {
393 		sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
394 		sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
395 	}
396 
397 	/* Fill in soc-specific registers map */
398 	switch(sc->rt_chipid) {
399 	  case RT_CHIPID_MT7620:
400 	  case RT_CHIPID_MT7621:
401 		sc->gdma1_base = MT7620_GDMA1_BASE;
402 		/* fallthrough */
403 	  case RT_CHIPID_RT5350:
404 	  	device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
405 			sc->rt_chipid >= 0x7600 ? 'M' : 'R',
406 	  		sc->rt_chipid, sc->mac_rev);
407 		/* RT5350: No GDMA, PSE, CDMA, PPE */
408 		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
409 			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
410 		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
411 		sc->fe_int_status=RT5350_FE_INT_STATUS;
412 		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
413 		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
414 		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
415 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
416 		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
417 		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
418 		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
419 		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
420 		}
421 		sc->rx_ring_count=2;
422 		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
423 		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
424 		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
425 		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
426 		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
427 		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
428 		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
429 		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
430 		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
431 		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
432 	  	break;
433 	  default:
434 		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
435 			sc->mac_rev);
436 		sc->gdma1_base = GDMA1_BASE;
437 		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
438 		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
439 		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
440 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
441 		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
442 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
443 		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
444 		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
445 		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
446 		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
447 		}
448 		sc->rx_ring_count=1;
449 		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
450 		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
451 		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
452 		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
453 		sc->int_rx_done_mask=INT_RX_DONE;
454 		sc->int_tx_done_mask=INT_TXQ0_DONE;
455 	}
456 
457 	if (sc->gdma1_base != 0)
458 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
459 		(
460 		GDM_ICS_EN | /* Enable IP Csum */
461 		GDM_TCS_EN | /* Enable TCP Csum */
462 		GDM_UCS_EN | /* Enable UDP Csum */
463 		GDM_STRPCRC | /* Strip CRC from packet */
464 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
465 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
466 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
467 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
468 		));
469 
470 	/* allocate Tx and Rx rings */
471 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
472 		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
473 		if (error != 0) {
474 			device_printf(dev, "could not allocate Tx ring #%d\n",
475 			    i);
476 			goto fail;
477 		}
478 	}
479 
480 	sc->tx_ring_mgtqid = 5;
481 	for (i = 0; i < sc->rx_ring_count; i++) {
482 		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
483 		if (error != 0) {
484 			device_printf(dev, "could not allocate Rx ring\n");
485 			goto fail;
486 		}
487 	}
488 
489 	callout_init(&sc->periodic_ch, 0);
490 	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
491 
492 	ifp = sc->ifp = if_alloc(IFT_ETHER);
493 	if (ifp == NULL) {
494 		device_printf(dev, "could not if_alloc()\n");
495 		error = ENOMEM;
496 		goto fail;
497 	}
498 
499 	ifp->if_softc = sc;
500 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
501 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
502 	ifp->if_init = rt_init;
503 	ifp->if_ioctl = rt_ioctl;
504 	ifp->if_start = rt_start;
505 #define	RT_TX_QLEN	256
506 
507 	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
508 	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
509 	IFQ_SET_READY(&ifp->if_snd);
510 
511 #ifdef IF_RT_PHY_SUPPORT
512 	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
513 	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
514 	if (error != 0) {
515 		device_printf(dev, "attaching PHYs failed\n");
516 		error = ENXIO;
517 		goto fail;
518 	}
519 #else
520 	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
521 	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
522 	    NULL);
523 	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
524 
525 #endif /* IF_RT_PHY_SUPPORT */
526 
527 	ether_request_mac(dev, sc->mac_addr);
528 	ether_ifattach(ifp, sc->mac_addr);
529 
530 	/*
531 	 * Tell the upper layer(s) we support long frames.
532 	 */
533 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
534 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
535 	ifp->if_capenable |= IFCAP_VLAN_MTU;
536 	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
537 	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
538 
539 	/* init task queue */
540 	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
541 	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
542 	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
543 
544 	sc->rx_process_limit = 100;
545 
546 	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
547 	    taskqueue_thread_enqueue, &sc->taskqueue);
548 
549 	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
550 	    device_get_nameunit(sc->dev));
551 
552 	rt_sysctl_attach(sc);
553 
554 	/* set up interrupt */
555 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
556 	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
557 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
558 	    sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
559 	    sc, &sc->irqh);
560 	if (error != 0) {
561 		printf("%s: could not set up interrupt\n",
562 			device_get_nameunit(dev));
563 		goto fail;
564 	}
565 #ifdef IF_RT_DEBUG
566 	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
567 #endif
568 
569 	return (0);
570 
571 fail:
572 	/* free Tx and Rx rings */
573 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
574 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
575 
576 	for (i = 0; i < sc->rx_ring_count; i++)
577 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
578 
579 	mtx_destroy(&sc->lock);
580 
581 	if (sc->mem != NULL)
582 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
583 		    sc->mem);
584 
585 	if (sc->irq != NULL)
586 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
587 		    sc->irq);
588 
589 	return (error);
590 }
591 
592 /*
593  * Set media options.
594  */
595 static int
596 rt_ifmedia_upd(struct ifnet *ifp)
597 {
598 	struct rt_softc *sc;
599 #ifdef IF_RT_PHY_SUPPORT
600 	struct mii_data *mii;
601 	struct mii_softc *miisc;
602 	int error = 0;
603 
604 	sc = ifp->if_softc;
605 	RT_SOFTC_LOCK(sc);
606 
607 	mii = device_get_softc(sc->rt_miibus);
608 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
609 		PHY_RESET(miisc);
610 	error = mii_mediachg(mii);
611 	RT_SOFTC_UNLOCK(sc);
612 
613 	return (error);
614 
615 #else /* !IF_RT_PHY_SUPPORT */
616 
617 	struct ifmedia *ifm;
618 	struct ifmedia_entry *ife;
619 
620 	sc = ifp->if_softc;
621 	ifm = &sc->rt_ifmedia;
622 	ife = ifm->ifm_cur;
623 
624 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
625 		return (EINVAL);
626 
627 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
628 		device_printf(sc->dev,
629 		    "AUTO is not supported for multiphy MAC");
630 		return (EINVAL);
631 	}
632 
633 	/*
634 	 * Ignore everything
635 	 */
636 	return (0);
637 #endif /* IF_RT_PHY_SUPPORT */
638 }
639 
640 /*
641  * Report current media status.
642  */
643 static void
644 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
645 {
646 #ifdef IF_RT_PHY_SUPPORT
647 	struct rt_softc *sc;
648 	struct mii_data *mii;
649 
650 	sc = ifp->if_softc;
651 
652 	RT_SOFTC_LOCK(sc);
653 	mii = device_get_softc(sc->rt_miibus);
654 	mii_pollstat(mii);
655 	ifmr->ifm_active = mii->mii_media_active;
656 	ifmr->ifm_status = mii->mii_media_status;
657 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
658 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
659 	RT_SOFTC_UNLOCK(sc);
660 #else /* !IF_RT_PHY_SUPPORT */
661 
662 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
663 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
664 #endif /* IF_RT_PHY_SUPPORT */
665 }
666 
667 static int
668 rt_detach(device_t dev)
669 {
670 	struct rt_softc *sc;
671 	struct ifnet *ifp;
672 	int i;
673 
674 	sc = device_get_softc(dev);
675 	ifp = sc->ifp;
676 
677 	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
678 
679 	RT_SOFTC_LOCK(sc);
680 
681 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
682 
683 	callout_stop(&sc->periodic_ch);
684 	callout_stop(&sc->tx_watchdog_ch);
685 
686 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
687 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
688 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
689 
690 	/* free Tx and Rx rings */
691 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
692 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
693 	for (i = 0; i < sc->rx_ring_count; i++)
694 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
695 
696 	RT_SOFTC_UNLOCK(sc);
697 
698 #ifdef IF_RT_PHY_SUPPORT
699 	if (sc->rt_miibus != NULL)
700 		device_delete_child(dev, sc->rt_miibus);
701 #endif
702 
703 	ether_ifdetach(ifp);
704 	if_free(ifp);
705 
706 	taskqueue_free(sc->taskqueue);
707 
708 	mtx_destroy(&sc->lock);
709 
710 	bus_generic_detach(dev);
711 	bus_teardown_intr(dev, sc->irq, sc->irqh);
712 	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
713 	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
714 
715 	return (0);
716 }
717 
718 static int
719 rt_shutdown(device_t dev)
720 {
721 	struct rt_softc *sc;
722 
723 	sc = device_get_softc(dev);
724 	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
725 	rt_stop(sc);
726 
727 	return (0);
728 }
729 
730 static int
731 rt_suspend(device_t dev)
732 {
733 	struct rt_softc *sc;
734 
735 	sc = device_get_softc(dev);
736 	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
737 	rt_stop(sc);
738 
739 	return (0);
740 }
741 
742 static int
743 rt_resume(device_t dev)
744 {
745 	struct rt_softc *sc;
746 	struct ifnet *ifp;
747 
748 	sc = device_get_softc(dev);
749 	ifp = sc->ifp;
750 
751 	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
752 
753 	if (ifp->if_flags & IFF_UP)
754 		rt_init(sc);
755 
756 	return (0);
757 }
758 
759 /*
760  * rt_init_locked - Run initialization process having locked mtx.
761  */
762 static void
763 rt_init_locked(void *priv)
764 {
765 	struct rt_softc *sc;
766 	struct ifnet *ifp;
767 #ifdef IF_RT_PHY_SUPPORT
768 	struct mii_data *mii;
769 #endif
770 	int i, ntries;
771 	uint32_t tmp;
772 
773 	sc = priv;
774 	ifp = sc->ifp;
775 #ifdef IF_RT_PHY_SUPPORT
776 	mii = device_get_softc(sc->rt_miibus);
777 #endif
778 
779 	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
780 
781 	RT_SOFTC_ASSERT_LOCKED(sc);
782 
783 	/* hardware reset */
784 	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
785 	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
786 
787 	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
788 	if (sc->gdma1_base != 0)
789 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
790 		(
791 		GDM_ICS_EN | /* Enable IP Csum */
792 		GDM_TCS_EN | /* Enable TCP Csum */
793 		GDM_UCS_EN | /* Enable UDP Csum */
794 		GDM_STRPCRC | /* Strip CRC from packet */
795 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
796 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
797 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
798 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
799 		));
800 
801 	/* disable DMA engine */
802 	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
803 	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
804 
805 	/* wait while DMA engine is busy */
806 	for (ntries = 0; ntries < 100; ntries++) {
807 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
808 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
809 			break;
810 		DELAY(1000);
811 	}
812 
813 	if (ntries == 100) {
814 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
815 		goto fail;
816 	}
817 
818 	/* reset Rx and Tx rings */
819 	tmp = FE_RST_DRX_IDX0 |
820 		FE_RST_DTX_IDX3 |
821 		FE_RST_DTX_IDX2 |
822 		FE_RST_DTX_IDX1 |
823 		FE_RST_DTX_IDX0;
824 
825 	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
826 
827 	/* XXX switch set mac address */
828 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
829 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
830 
831 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
832 		/* update TX_BASE_PTRx */
833 		RT_WRITE(sc, sc->tx_base_ptr[i],
834 			sc->tx_ring[i].desc_phys_addr);
835 		RT_WRITE(sc, sc->tx_max_cnt[i],
836 			RT_SOFTC_TX_RING_DESC_COUNT);
837 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
838 	}
839 
840 	/* init Rx ring */
841 	for (i = 0; i < sc->rx_ring_count; i++)
842 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
843 
844 	/* update RX_BASE_PTRx */
845 	for (i = 0; i < sc->rx_ring_count; i++) {
846 		RT_WRITE(sc, sc->rx_base_ptr[i],
847 			sc->rx_ring[i].desc_phys_addr);
848 		RT_WRITE(sc, sc->rx_max_cnt[i],
849 			RT_SOFTC_RX_RING_DATA_COUNT);
850 		RT_WRITE(sc, sc->rx_calc_idx[i],
851 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
852 	}
853 
854 	/* write back DDONE, 16byte burst enable RX/TX DMA */
855 	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
856 	if (sc->rt_chipid == RT_CHIPID_MT7620 ||
857 	    sc->rt_chipid == RT_CHIPID_MT7621)
858 		tmp |= (1<<31);
859 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
860 
861 	/* disable interrupts mitigation */
862 	RT_WRITE(sc, sc->delay_int_cfg, 0);
863 
864 	/* clear pending interrupts */
865 	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
866 
867 	/* enable interrupts */
868 	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
869 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
870 	    sc->rt_chipid == RT_CHIPID_MT7621)
871 	  tmp = RT5350_INT_TX_COHERENT |
872 	  	RT5350_INT_RX_COHERENT |
873 	  	RT5350_INT_TXQ3_DONE |
874 	  	RT5350_INT_TXQ2_DONE |
875 	  	RT5350_INT_TXQ1_DONE |
876 	  	RT5350_INT_TXQ0_DONE |
877 	  	RT5350_INT_RXQ1_DONE |
878 	  	RT5350_INT_RXQ0_DONE;
879 	else
880 	  tmp = CNT_PPE_AF |
881 		CNT_GDM_AF |
882 		PSE_P2_FC |
883 		GDM_CRC_DROP |
884 		PSE_BUF_DROP |
885 		GDM_OTHER_DROP |
886 		PSE_P1_FC |
887 		PSE_P0_FC |
888 		PSE_FQ_EMPTY |
889 		INT_TX_COHERENT |
890 		INT_RX_COHERENT |
891 		INT_TXQ3_DONE |
892 		INT_TXQ2_DONE |
893 		INT_TXQ1_DONE |
894 		INT_TXQ0_DONE |
895 		INT_RX_DONE;
896 
897 	sc->intr_enable_mask = tmp;
898 
899 	RT_WRITE(sc, sc->fe_int_enable, tmp);
900 
901 	if (rt_txrx_enable(sc) != 0)
902 		goto fail;
903 
904 #ifdef IF_RT_PHY_SUPPORT
905 	if (mii) mii_mediachg(mii);
906 #endif /* IF_RT_PHY_SUPPORT */
907 
908 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
909 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
910 
911 	sc->periodic_round = 0;
912 
913 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
914 
915 	return;
916 
917 fail:
918 	rt_stop_locked(sc);
919 }
920 
921 /*
922  * rt_init - lock and initialize device.
923  */
924 static void
925 rt_init(void *priv)
926 {
927 	struct rt_softc *sc;
928 
929 	sc = priv;
930 	RT_SOFTC_LOCK(sc);
931 	rt_init_locked(sc);
932 	RT_SOFTC_UNLOCK(sc);
933 }
934 
935 /*
936  * rt_stop_locked - stop TX/RX w/ lock
937  */
938 static void
939 rt_stop_locked(void *priv)
940 {
941 	struct rt_softc *sc;
942 	struct ifnet *ifp;
943 
944 	sc = priv;
945 	ifp = sc->ifp;
946 
947 	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
948 
949 	RT_SOFTC_ASSERT_LOCKED(sc);
950 	sc->tx_timer = 0;
951 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
952 	callout_stop(&sc->periodic_ch);
953 	callout_stop(&sc->tx_watchdog_ch);
954 	RT_SOFTC_UNLOCK(sc);
955 	taskqueue_block(sc->taskqueue);
956 
957 	/*
958 	 * Sometime rt_stop_locked called from isr and we get panic
959 	 * When found, I fix it
960 	 */
961 #ifdef notyet
962 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
963 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
964 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
965 #endif
966 	RT_SOFTC_LOCK(sc);
967 
968 	/* disable interrupts */
969 	RT_WRITE(sc, sc->fe_int_enable, 0);
970 
971 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
972 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
973 	   sc->rt_chipid != RT_CHIPID_MT7621) {
974 		/* reset adapter */
975 		RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
976 	}
977 
978 	if (sc->gdma1_base != 0)
979 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
980 		(
981 		GDM_ICS_EN | /* Enable IP Csum */
982 		GDM_TCS_EN | /* Enable TCP Csum */
983 		GDM_UCS_EN | /* Enable UDP Csum */
984 		GDM_STRPCRC | /* Strip CRC from packet */
985 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
986 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
987 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
988 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
989 		));
990 }
991 
992 static void
993 rt_stop(void *priv)
994 {
995 	struct rt_softc *sc;
996 
997 	sc = priv;
998 	RT_SOFTC_LOCK(sc);
999 	rt_stop_locked(sc);
1000 	RT_SOFTC_UNLOCK(sc);
1001 }
1002 
1003 /*
1004  * rt_tx_data - transmit packet.
1005  */
1006 static int
1007 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1008 {
1009 	struct ifnet *ifp;
1010 	struct rt_softc_tx_ring *ring;
1011 	struct rt_softc_tx_data *data;
1012 	struct rt_txdesc *desc;
1013 	struct mbuf *m_d;
1014 	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1015 	int error, ndmasegs, ndescs, i;
1016 
1017 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1018 		("%s: Tx data: invalid qid=%d\n",
1019 		 device_get_nameunit(sc->dev), qid));
1020 
1021 	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1022 
1023 	ifp = sc->ifp;
1024 	ring = &sc->tx_ring[qid];
1025 	desc = &ring->desc[ring->desc_cur];
1026 	data = &ring->data[ring->data_cur];
1027 
1028 	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1029 	    dma_seg, &ndmasegs, 0);
1030 	if (error != 0)	{
1031 		/* too many fragments, linearize */
1032 
1033 		RT_DPRINTF(sc, RT_DEBUG_TX,
1034 			"could not load mbuf DMA map, trying to linearize "
1035 			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1036 			ndmasegs, m->m_pkthdr.len, error);
1037 
1038 		m_d = m_collapse(m, M_NOWAIT, 16);
1039 		if (m_d == NULL) {
1040 			m_freem(m);
1041 			m = NULL;
1042 			return (ENOMEM);
1043 		}
1044 		m = m_d;
1045 
1046 		sc->tx_defrag_packets++;
1047 
1048 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1049 		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1050 		if (error != 0)	{
1051 			device_printf(sc->dev, "could not load mbuf DMA map: "
1052 			    "ndmasegs=%d, len=%d, error=%d\n",
1053 			    ndmasegs, m->m_pkthdr.len, error);
1054 			m_freem(m);
1055 			return (error);
1056 		}
1057 	}
1058 
1059 	if (m->m_pkthdr.len == 0)
1060 		ndmasegs = 0;
1061 
1062 	/* determine how many Tx descs are required */
1063 	ndescs = 1 + ndmasegs / 2;
1064 	if ((ring->desc_queued + ndescs) >
1065 	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1066 		RT_DPRINTF(sc, RT_DEBUG_TX,
1067 		    "there are not enough Tx descs\n");
1068 
1069 		sc->no_tx_desc_avail++;
1070 
1071 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1072 		m_freem(m);
1073 		return (EFBIG);
1074 	}
1075 
1076 	data->m = m;
1077 
1078 	/* set up Tx descs */
1079 	for (i = 0; i < ndmasegs; i += 2) {
1080 
1081 		/* TODO: this needs to be refined as MT7620 for example has
1082 		 * a different word3 layout than RT305x and RT5350 (the last
1083 		 * one doesn't use word3 at all). And so does MT7621...
1084 		 */
1085 
1086 		if (sc->rt_chipid != RT_CHIPID_MT7621) {
1087 			/* Set destination */
1088 			if (sc->rt_chipid != RT_CHIPID_MT7620)
1089 			    desc->dst = (TXDSCR_DST_PORT_GDMA1);
1090 
1091 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1092 				desc->dst |= (TXDSCR_IP_CSUM_GEN |
1093 				    TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
1094 			/* Set queue id */
1095 			desc->qn = qid;
1096 			/* No PPPoE */
1097 			desc->pppoe = 0;
1098 			/* No VLAN */
1099 			desc->vid = 0;
1100 		} else {
1101 			desc->vid = 0;
1102 			desc->pppoe = 0;
1103 			desc->qn = 0;
1104 			desc->dst = 2;
1105 		}
1106 
1107 		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1108 		desc->sdl0 = htole16(dma_seg[i].ds_len |
1109 		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1110 
1111 		if ((i+1) < ndmasegs) {
1112 			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1113 			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1114 			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1115 		} else {
1116 			desc->sdp1 = 0;
1117 			desc->sdl1 = 0;
1118 		}
1119 
1120 		if ((i+2) < ndmasegs) {
1121 			ring->desc_queued++;
1122 			ring->desc_cur = (ring->desc_cur + 1) %
1123 			    RT_SOFTC_TX_RING_DESC_COUNT;
1124 		}
1125 		desc = &ring->desc[ring->desc_cur];
1126 	}
1127 
1128 	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1129 	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1130 	    m->m_pkthdr.len, ndmasegs,
1131 	    (int) dma_seg[0].ds_len,
1132 	    (int) dma_seg[1].ds_len,
1133 	    (int) dma_seg[2].ds_len,
1134 	    (int) dma_seg[3].ds_len,
1135 	    (int) dma_seg[4].ds_len);
1136 
1137 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1138 		BUS_DMASYNC_PREWRITE);
1139 	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1140 		BUS_DMASYNC_PREWRITE);
1141 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1142 		BUS_DMASYNC_PREWRITE);
1143 
1144 	ring->desc_queued++;
1145 	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1146 
1147 	ring->data_queued++;
1148 	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1149 
1150 	/* kick Tx */
1151 	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1152 
1153 	return (0);
1154 }
1155 
1156 /*
1157  * rt_start - start Transmit/Receive
1158  */
1159 static void
1160 rt_start(struct ifnet *ifp)
1161 {
1162 	struct rt_softc *sc;
1163 	struct mbuf *m;
1164 	int qid = 0 /* XXX must check QoS priority */;
1165 
1166 	sc = ifp->if_softc;
1167 
1168 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1169 		return;
1170 
1171 	for (;;) {
1172 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1173 		if (m == NULL)
1174 			break;
1175 
1176 		m->m_pkthdr.rcvif = NULL;
1177 
1178 		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1179 
1180 		if (sc->tx_ring[qid].data_queued >=
1181 		    RT_SOFTC_TX_RING_DATA_COUNT) {
1182 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1183 
1184 			RT_DPRINTF(sc, RT_DEBUG_TX,
1185 			    "if_start: Tx ring with qid=%d is full\n", qid);
1186 
1187 			m_freem(m);
1188 
1189 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1190 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1191 
1192 			sc->tx_data_queue_full[qid]++;
1193 
1194 			break;
1195 		}
1196 
1197 		if (rt_tx_data(sc, m, qid) != 0) {
1198 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1199 
1200 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1201 
1202 			break;
1203 		}
1204 
1205 		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1206 		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1207 		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1208 	}
1209 }
1210 
1211 /*
1212  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1213  * filtering done by attached Ethernet switch.
1214  */
1215 static void
1216 rt_update_promisc(struct ifnet *ifp)
1217 {
1218 	struct rt_softc *sc;
1219 
1220 	sc = ifp->if_softc;
1221 	printf("%s: %s promiscuous mode\n",
1222 		device_get_nameunit(sc->dev),
1223 		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1224 }
1225 
1226 /*
1227  * rt_ioctl - ioctl handler.
1228  */
1229 static int
1230 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1231 {
1232 	struct rt_softc *sc;
1233 	struct ifreq *ifr;
1234 #ifdef IF_RT_PHY_SUPPORT
1235 	struct mii_data *mii;
1236 #endif /* IF_RT_PHY_SUPPORT */
1237 	int error, startall;
1238 
1239 	sc = ifp->if_softc;
1240 	ifr = (struct ifreq *) data;
1241 
1242 	error = 0;
1243 
1244 	switch (cmd) {
1245 	case SIOCSIFFLAGS:
1246 		startall = 0;
1247 		RT_SOFTC_LOCK(sc);
1248 		if (ifp->if_flags & IFF_UP) {
1249 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1250 				if ((ifp->if_flags ^ sc->if_flags) &
1251 				    IFF_PROMISC)
1252 					rt_update_promisc(ifp);
1253 			} else {
1254 				rt_init_locked(sc);
1255 				startall = 1;
1256 			}
1257 		} else {
1258 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1259 				rt_stop_locked(sc);
1260 		}
1261 		sc->if_flags = ifp->if_flags;
1262 		RT_SOFTC_UNLOCK(sc);
1263 		break;
1264 	case SIOCGIFMEDIA:
1265 	case SIOCSIFMEDIA:
1266 #ifdef IF_RT_PHY_SUPPORT
1267 		mii = device_get_softc(sc->rt_miibus);
1268 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1269 #else
1270 		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1271 #endif /* IF_RT_PHY_SUPPORT */
1272 		break;
1273 	default:
1274 		error = ether_ioctl(ifp, cmd, data);
1275 		break;
1276 	}
1277 	return (error);
1278 }
1279 
1280 /*
1281  * rt_periodic - Handler of PERIODIC interrupt
1282  */
1283 static void
1284 rt_periodic(void *arg)
1285 {
1286 	struct rt_softc *sc;
1287 
1288 	sc = arg;
1289 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1290 	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1291 }
1292 
1293 /*
1294  * rt_tx_watchdog - Handler of TX Watchdog
1295  */
1296 static void
1297 rt_tx_watchdog(void *arg)
1298 {
1299 	struct rt_softc *sc;
1300 	struct ifnet *ifp;
1301 
1302 	sc = arg;
1303 	ifp = sc->ifp;
1304 
1305 	if (sc->tx_timer == 0)
1306 		return;
1307 
1308 	if (--sc->tx_timer == 0) {
1309 		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1310 #ifdef notyet
1311 		/*
1312 		 * XXX: Commented out, because reset break input.
1313 		 */
1314 		rt_stop_locked(sc);
1315 		rt_init_locked(sc);
1316 #endif
1317 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1318 		sc->tx_watchdog_timeouts++;
1319 	}
1320 	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1321 }
1322 
1323 /*
1324  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1325  */
1326 static void
1327 rt_cnt_ppe_af(struct rt_softc *sc)
1328 {
1329 
1330 	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1331 }
1332 
1333 /*
1334  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1335  */
1336 static void
1337 rt_cnt_gdm_af(struct rt_softc *sc)
1338 {
1339 
1340 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1341 	    "GDMA 1 & 2 Counter Table Almost Full\n");
1342 }
1343 
1344 /*
1345  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1346  */
1347 static void
1348 rt_pse_p2_fc(struct rt_softc *sc)
1349 {
1350 
1351 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1352 	    "PSE port2 (GDMA 2) flow control asserted.\n");
1353 }
1354 
1355 /*
1356  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1357  * interrupt
1358  */
1359 static void
1360 rt_gdm_crc_drop(struct rt_softc *sc)
1361 {
1362 
1363 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1364 	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1365 }
1366 
1367 /*
1368  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1369  */
1370 static void
1371 rt_pse_buf_drop(struct rt_softc *sc)
1372 {
1373 
1374 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1375 	    "PSE discards a packet due to buffer sharing limitation\n");
1376 }
1377 
1378 /*
1379  * rt_gdm_other_drop - Handler of discard on other reason interrupt
1380  */
1381 static void
1382 rt_gdm_other_drop(struct rt_softc *sc)
1383 {
1384 
1385 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1386 	    "GDMA 1 & 2 discard a packet due to other reason\n");
1387 }
1388 
1389 /*
1390  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1391  */
1392 static void
1393 rt_pse_p1_fc(struct rt_softc *sc)
1394 {
1395 
1396 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1397 	    "PSE port1 (GDMA 1) flow control asserted.\n");
1398 }
1399 
1400 /*
1401  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1402  */
1403 static void
1404 rt_pse_p0_fc(struct rt_softc *sc)
1405 {
1406 
1407 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1408 	    "PSE port0 (CDMA) flow control asserted.\n");
1409 }
1410 
1411 /*
1412  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1413  */
1414 static void
1415 rt_pse_fq_empty(struct rt_softc *sc)
1416 {
1417 
1418 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1419 	    "PSE free Q empty threshold reached & forced drop "
1420 		    "condition occurred.\n");
1421 }
1422 
1423 /*
1424  * rt_intr - main ISR
1425  */
1426 static void
1427 rt_intr(void *arg)
1428 {
1429 	struct rt_softc *sc;
1430 	struct ifnet *ifp;
1431 	uint32_t status;
1432 
1433 	sc = arg;
1434 	ifp = sc->ifp;
1435 
1436 	/* acknowledge interrupts */
1437 	status = RT_READ(sc, sc->fe_int_status);
1438 	RT_WRITE(sc, sc->fe_int_status, status);
1439 
1440 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1441 
1442 	if (status == 0xffffffff ||	/* device likely went away */
1443 		status == 0)		/* not for us */
1444 		return;
1445 
1446 	sc->interrupts++;
1447 
1448 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1449 		return;
1450 
1451 	if (status & CNT_PPE_AF)
1452 		rt_cnt_ppe_af(sc);
1453 
1454 	if (status & CNT_GDM_AF)
1455 		rt_cnt_gdm_af(sc);
1456 
1457 	if (status & PSE_P2_FC)
1458 		rt_pse_p2_fc(sc);
1459 
1460 	if (status & GDM_CRC_DROP)
1461 		rt_gdm_crc_drop(sc);
1462 
1463 	if (status & PSE_BUF_DROP)
1464 		rt_pse_buf_drop(sc);
1465 
1466 	if (status & GDM_OTHER_DROP)
1467 		rt_gdm_other_drop(sc);
1468 
1469 	if (status & PSE_P1_FC)
1470 		rt_pse_p1_fc(sc);
1471 
1472 	if (status & PSE_P0_FC)
1473 		rt_pse_p0_fc(sc);
1474 
1475 	if (status & PSE_FQ_EMPTY)
1476 		rt_pse_fq_empty(sc);
1477 
1478 	if (status & INT_TX_COHERENT)
1479 		rt_tx_coherent_intr(sc);
1480 
1481 	if (status & INT_RX_COHERENT)
1482 		rt_rx_coherent_intr(sc);
1483 
1484 	if (status & RX_DLY_INT)
1485 		rt_rx_delay_intr(sc);
1486 
1487 	if (status & TX_DLY_INT)
1488 		rt_tx_delay_intr(sc);
1489 
1490 	if (status & INT_RX_DONE)
1491 		rt_rx_intr(sc, 0);
1492 
1493 	if (status & INT_TXQ3_DONE)
1494 		rt_tx_intr(sc, 3);
1495 
1496 	if (status & INT_TXQ2_DONE)
1497 		rt_tx_intr(sc, 2);
1498 
1499 	if (status & INT_TXQ1_DONE)
1500 		rt_tx_intr(sc, 1);
1501 
1502 	if (status & INT_TXQ0_DONE)
1503 		rt_tx_intr(sc, 0);
1504 }
1505 
1506 /*
1507  * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1508  */
1509 static void
1510 rt_rt5350_intr(void *arg)
1511 {
1512 	struct rt_softc *sc;
1513 	struct ifnet *ifp;
1514 	uint32_t status;
1515 
1516 	sc = arg;
1517 	ifp = sc->ifp;
1518 
1519 	/* acknowledge interrupts */
1520 	status = RT_READ(sc, sc->fe_int_status);
1521 	RT_WRITE(sc, sc->fe_int_status, status);
1522 
1523 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1524 
1525 	if (status == 0xffffffff ||     /* device likely went away */
1526 		status == 0)            /* not for us */
1527 		return;
1528 
1529 	sc->interrupts++;
1530 
1531 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1532 	        return;
1533 
1534 	if (status & RT5350_INT_TX_COHERENT)
1535 		rt_tx_coherent_intr(sc);
1536 	if (status & RT5350_INT_RX_COHERENT)
1537 		rt_rx_coherent_intr(sc);
1538 	if (status & RT5350_RX_DLY_INT)
1539 	        rt_rx_delay_intr(sc);
1540 	if (status & RT5350_TX_DLY_INT)
1541 	        rt_tx_delay_intr(sc);
1542 	if (status & RT5350_INT_RXQ1_DONE)
1543 		rt_rx_intr(sc, 1);
1544 	if (status & RT5350_INT_RXQ0_DONE)
1545 		rt_rx_intr(sc, 0);
1546 	if (status & RT5350_INT_TXQ3_DONE)
1547 		rt_tx_intr(sc, 3);
1548 	if (status & RT5350_INT_TXQ2_DONE)
1549 		rt_tx_intr(sc, 2);
1550 	if (status & RT5350_INT_TXQ1_DONE)
1551 		rt_tx_intr(sc, 1);
1552 	if (status & RT5350_INT_TXQ0_DONE)
1553 		rt_tx_intr(sc, 0);
1554 }
1555 
1556 static void
1557 rt_tx_coherent_intr(struct rt_softc *sc)
1558 {
1559 	uint32_t tmp;
1560 	int i;
1561 
1562 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1563 
1564 	sc->tx_coherent_interrupts++;
1565 
1566 	/* restart DMA engine */
1567 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1568 	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1569 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1570 
1571 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1572 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1573 
1574 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1575 		RT_WRITE(sc, sc->tx_base_ptr[i],
1576 			sc->tx_ring[i].desc_phys_addr);
1577 		RT_WRITE(sc, sc->tx_max_cnt[i],
1578 			RT_SOFTC_TX_RING_DESC_COUNT);
1579 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1580 	}
1581 
1582 	rt_txrx_enable(sc);
1583 }
1584 
1585 /*
1586  * rt_rx_coherent_intr
1587  */
1588 static void
1589 rt_rx_coherent_intr(struct rt_softc *sc)
1590 {
1591 	uint32_t tmp;
1592 	int i;
1593 
1594 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1595 
1596 	sc->rx_coherent_interrupts++;
1597 
1598 	/* restart DMA engine */
1599 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1600 	tmp &= ~(FE_RX_DMA_EN);
1601 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1602 
1603 	/* init Rx ring */
1604 	for (i = 0; i < sc->rx_ring_count; i++)
1605 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1606 
1607 	for (i = 0; i < sc->rx_ring_count; i++) {
1608 		RT_WRITE(sc, sc->rx_base_ptr[i],
1609 			sc->rx_ring[i].desc_phys_addr);
1610 		RT_WRITE(sc, sc->rx_max_cnt[i],
1611 			RT_SOFTC_RX_RING_DATA_COUNT);
1612 		RT_WRITE(sc, sc->rx_calc_idx[i],
1613 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1614 	}
1615 
1616 	rt_txrx_enable(sc);
1617 }
1618 
1619 /*
1620  * rt_rx_intr - a packet received
1621  */
1622 static void
1623 rt_rx_intr(struct rt_softc *sc, int qid)
1624 {
1625 	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1626 		("%s: Rx interrupt: invalid qid=%d\n",
1627 		 device_get_nameunit(sc->dev), qid));
1628 
1629 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1630 	sc->rx_interrupts[qid]++;
1631 	RT_SOFTC_LOCK(sc);
1632 
1633 	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1634 		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1635 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1636 	}
1637 
1638 	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1639 	RT_SOFTC_UNLOCK(sc);
1640 }
1641 
1642 static void
1643 rt_rx_delay_intr(struct rt_softc *sc)
1644 {
1645 
1646 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1647 	sc->rx_delay_interrupts++;
1648 }
1649 
1650 static void
1651 rt_tx_delay_intr(struct rt_softc *sc)
1652 {
1653 
1654 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1655 	sc->tx_delay_interrupts++;
1656 }
1657 
1658 /*
1659  * rt_tx_intr - Transsmition of packet done
1660  */
1661 static void
1662 rt_tx_intr(struct rt_softc *sc, int qid)
1663 {
1664 
1665 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1666 		("%s: Tx interrupt: invalid qid=%d\n",
1667 		 device_get_nameunit(sc->dev), qid));
1668 
1669 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1670 
1671 	sc->tx_interrupts[qid]++;
1672 	RT_SOFTC_LOCK(sc);
1673 
1674 	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1675 		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1676 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1677 	}
1678 
1679 	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1680 	RT_SOFTC_UNLOCK(sc);
1681 }
1682 
1683 /*
1684  * rt_rx_done_task - run RX task
1685  */
1686 static void
1687 rt_rx_done_task(void *context, int pending)
1688 {
1689 	struct rt_softc *sc;
1690 	struct ifnet *ifp;
1691 	int again;
1692 
1693 	sc = context;
1694 	ifp = sc->ifp;
1695 
1696 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1697 
1698 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1699 		return;
1700 
1701 	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1702 
1703 	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1704 
1705 	RT_SOFTC_LOCK(sc);
1706 
1707 	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1708 		RT_DPRINTF(sc, RT_DEBUG_RX,
1709 		    "Rx done task: scheduling again\n");
1710 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1711 	} else {
1712 		rt_intr_enable(sc, sc->int_rx_done_mask);
1713 	}
1714 
1715 	RT_SOFTC_UNLOCK(sc);
1716 }
1717 
1718 /*
1719  * rt_tx_done_task - check for pending TX task in all queues
1720  */
1721 static void
1722 rt_tx_done_task(void *context, int pending)
1723 {
1724 	struct rt_softc *sc;
1725 	struct ifnet *ifp;
1726 	uint32_t intr_mask;
1727 	int i;
1728 
1729 	sc = context;
1730 	ifp = sc->ifp;
1731 
1732 	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1733 
1734 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1735 		return;
1736 
1737 	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1738 		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1739 			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1740 			rt_tx_eof(sc, &sc->tx_ring[i]);
1741 		}
1742 	}
1743 
1744 	sc->tx_timer = 0;
1745 
1746 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1747 
1748 	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1749 	   sc->rt_chipid == RT_CHIPID_MT7620 ||
1750 	   sc->rt_chipid == RT_CHIPID_MT7621)
1751 	  intr_mask = (
1752 		RT5350_INT_TXQ3_DONE |
1753 		RT5350_INT_TXQ2_DONE |
1754 		RT5350_INT_TXQ1_DONE |
1755 		RT5350_INT_TXQ0_DONE);
1756 	else
1757 	  intr_mask = (
1758 		INT_TXQ3_DONE |
1759 		INT_TXQ2_DONE |
1760 		INT_TXQ1_DONE |
1761 		INT_TXQ0_DONE);
1762 
1763 	RT_SOFTC_LOCK(sc);
1764 
1765 	rt_intr_enable(sc, ~sc->intr_pending_mask &
1766 	    (sc->intr_disable_mask & intr_mask));
1767 
1768 	if (sc->intr_pending_mask & intr_mask) {
1769 		RT_DPRINTF(sc, RT_DEBUG_TX,
1770 		    "Tx done task: scheduling again\n");
1771 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1772 	}
1773 
1774 	RT_SOFTC_UNLOCK(sc);
1775 
1776 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1777 		rt_start(ifp);
1778 }
1779 
1780 /*
1781  * rt_periodic_task - run periodic task
1782  */
1783 static void
1784 rt_periodic_task(void *context, int pending)
1785 {
1786 	struct rt_softc *sc;
1787 	struct ifnet *ifp;
1788 
1789 	sc = context;
1790 	ifp = sc->ifp;
1791 
1792 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1793 	    sc->periodic_round);
1794 
1795 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1796 		return;
1797 
1798 	RT_SOFTC_LOCK(sc);
1799 	sc->periodic_round++;
1800 	rt_update_stats(sc);
1801 
1802 	if ((sc->periodic_round % 10) == 0) {
1803 		rt_update_raw_counters(sc);
1804 		rt_watchdog(sc);
1805 	}
1806 
1807 	RT_SOFTC_UNLOCK(sc);
1808 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1809 }
1810 
1811 /*
1812  * rt_rx_eof - check for frames that done by DMA engine and pass it into
1813  * network subsystem.
1814  */
1815 static int
1816 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1817 {
1818 	struct ifnet *ifp;
1819 /*	struct rt_softc_rx_ring *ring; */
1820 	struct rt_rxdesc *desc;
1821 	struct rt_softc_rx_data *data;
1822 	struct mbuf *m, *mnew;
1823 	bus_dma_segment_t segs[1];
1824 	bus_dmamap_t dma_map;
1825 	uint32_t index, desc_flags;
1826 	int error, nsegs, len, nframes;
1827 
1828 	ifp = sc->ifp;
1829 /*	ring = &sc->rx_ring[0]; */
1830 
1831 	nframes = 0;
1832 
1833 	while (limit != 0) {
1834 		index = RT_READ(sc, sc->rx_drx_idx[0]);
1835 		if (ring->cur == index)
1836 			break;
1837 
1838 		desc = &ring->desc[ring->cur];
1839 		data = &ring->data[ring->cur];
1840 
1841 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1842 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1843 
1844 #ifdef IF_RT_DEBUG
1845 		if ( sc->debug & RT_DEBUG_RX ) {
1846 			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1847 		        hexdump(desc, 16, 0, 0);
1848 			printf("-----------------------------------\n");
1849 		}
1850 #endif
1851 
1852 		/* XXX Sometime device don`t set DDONE bit */
1853 #ifdef DDONE_FIXED
1854 		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1855 			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1856 			break;
1857 		}
1858 #endif
1859 
1860 		len = le16toh(desc->sdl0) & 0x3fff;
1861 		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1862 
1863 		nframes++;
1864 
1865 		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1866 		    MJUMPAGESIZE);
1867 		if (mnew == NULL) {
1868 			sc->rx_mbuf_alloc_errors++;
1869 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1870 			goto skip;
1871 		}
1872 
1873 		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1874 
1875 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1876 		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1877 		if (error != 0) {
1878 			RT_DPRINTF(sc, RT_DEBUG_RX,
1879 			    "could not load Rx mbuf DMA map: "
1880 			    "error=%d, nsegs=%d\n",
1881 			    error, nsegs);
1882 
1883 			m_freem(mnew);
1884 
1885 			sc->rx_mbuf_dmamap_errors++;
1886 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1887 
1888 			goto skip;
1889 		}
1890 
1891 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1892 			device_get_nameunit(sc->dev)));
1893 
1894 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1895 			BUS_DMASYNC_POSTREAD);
1896 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1897 
1898 		dma_map = data->dma_map;
1899 		data->dma_map = ring->spare_dma_map;
1900 		ring->spare_dma_map = dma_map;
1901 
1902 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1903 			BUS_DMASYNC_PREREAD);
1904 
1905 		m = data->m;
1906 		desc_flags = desc->word3;
1907 
1908 		data->m = mnew;
1909 		/* Add 2 for proper align of RX IP header */
1910 		desc->sdp0 = htole32(segs[0].ds_addr+2);
1911 		desc->sdl0 = htole32(segs[0].ds_len-2);
1912 		desc->word3 = 0;
1913 
1914 		RT_DPRINTF(sc, RT_DEBUG_RX,
1915 		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1916 
1917 		m->m_pkthdr.rcvif = ifp;
1918 		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1919 		m->m_data += 2;
1920 		m->m_pkthdr.len = m->m_len = len;
1921 
1922 		/* check for crc errors */
1923 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1924 			/*check for valid checksum*/
1925 			if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
1926 				RT_DPRINTF(sc, RT_DEBUG_RX,
1927 				    "rxdesc: crc error\n");
1928 
1929 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1930 
1931 				if (!(ifp->if_flags & IFF_PROMISC)) {
1932 				    m_freem(m);
1933 				    goto skip;
1934 				}
1935 			}
1936 			if ((desc_flags & sc->csum_fail_ip) == 0) {
1937 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1938 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1939 				m->m_pkthdr.csum_data = 0xffff;
1940 			}
1941 			m->m_flags &= ~M_HASFCS;
1942 		}
1943 
1944 		(*ifp->if_input)(ifp, m);
1945 skip:
1946 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1947 
1948 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1949 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1950 
1951 		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1952 
1953 		limit--;
1954 	}
1955 
1956 	if (ring->cur == 0)
1957 		RT_WRITE(sc, sc->rx_calc_idx[0],
1958 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1959 	else
1960 		RT_WRITE(sc, sc->rx_calc_idx[0],
1961 			ring->cur - 1);
1962 
1963 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1964 
1965 	sc->rx_packets += nframes;
1966 
1967 	return (limit == 0);
1968 }
1969 
1970 /*
1971  * rt_tx_eof - check for successful transmitted frames and mark their
1972  * descriptor as free.
1973  */
1974 static void
1975 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1976 {
1977 	struct ifnet *ifp;
1978 	struct rt_txdesc *desc;
1979 	struct rt_softc_tx_data *data;
1980 	uint32_t index;
1981 	int ndescs, nframes;
1982 
1983 	ifp = sc->ifp;
1984 
1985 	ndescs = 0;
1986 	nframes = 0;
1987 
1988 	for (;;) {
1989 		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
1990 		if (ring->desc_next == index)
1991 			break;
1992 
1993 		ndescs++;
1994 
1995 		desc = &ring->desc[ring->desc_next];
1996 
1997 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1998 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1999 
2000 		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
2001 			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
2002 			nframes++;
2003 
2004 			data = &ring->data[ring->data_next];
2005 
2006 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2007 				BUS_DMASYNC_POSTWRITE);
2008 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2009 
2010 			m_freem(data->m);
2011 
2012 			data->m = NULL;
2013 
2014 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2015 
2016 			RT_SOFTC_TX_RING_LOCK(ring);
2017 			ring->data_queued--;
2018 			ring->data_next = (ring->data_next + 1) %
2019 			    RT_SOFTC_TX_RING_DATA_COUNT;
2020 			RT_SOFTC_TX_RING_UNLOCK(ring);
2021 		}
2022 
2023 		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2024 
2025 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2026 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2027 
2028 		RT_SOFTC_TX_RING_LOCK(ring);
2029 		ring->desc_queued--;
2030 		ring->desc_next = (ring->desc_next + 1) %
2031 		    RT_SOFTC_TX_RING_DESC_COUNT;
2032 		RT_SOFTC_TX_RING_UNLOCK(ring);
2033 	}
2034 
2035 	RT_DPRINTF(sc, RT_DEBUG_TX,
2036 	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2037 	    nframes);
2038 }
2039 
2040 /*
2041  * rt_update_stats - query statistics counters and update related variables.
2042  */
2043 static void
2044 rt_update_stats(struct rt_softc *sc)
2045 {
2046 	struct ifnet *ifp;
2047 
2048 	ifp = sc->ifp;
2049 	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2050 	/* XXX do update stats here */
2051 }
2052 
2053 /*
2054  * rt_watchdog - reinit device on watchdog event.
2055  */
2056 static void
2057 rt_watchdog(struct rt_softc *sc)
2058 {
2059 	uint32_t tmp;
2060 #ifdef notyet
2061 	int ntries;
2062 #endif
2063 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2064 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
2065 	   sc->rt_chipid != RT_CHIPID_MT7621) {
2066 		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2067 
2068 		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2069 			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2070 	}
2071 	/* XXX: do not reset */
2072 #ifdef notyet
2073 	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2074 		sc->tx_queue_not_empty[0]++;
2075 
2076 		for (ntries = 0; ntries < 10; ntries++) {
2077 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2078 			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2079 				break;
2080 
2081 			DELAY(1);
2082 		}
2083 	}
2084 
2085 	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2086 		sc->tx_queue_not_empty[1]++;
2087 
2088 		for (ntries = 0; ntries < 10; ntries++) {
2089 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2090 			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2091 				break;
2092 
2093 			DELAY(1);
2094 		}
2095 	}
2096 #endif
2097 }
2098 
2099 /*
2100  * rt_update_raw_counters - update counters.
2101  */
2102 static void
2103 rt_update_raw_counters(struct rt_softc *sc)
2104 {
2105 
2106 	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2107 	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2108 	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2109 	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2110 
2111 	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2112 	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2113 	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2114 	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2115 	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2116 	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2117 	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2118 }
2119 
2120 static void
2121 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2122 {
2123 	uint32_t tmp;
2124 
2125 	sc->intr_disable_mask &= ~intr_mask;
2126 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2127 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2128 }
2129 
2130 static void
2131 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2132 {
2133 	uint32_t tmp;
2134 
2135 	sc->intr_disable_mask |= intr_mask;
2136 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2137 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2138 }
2139 
2140 /*
2141  * rt_txrx_enable - enable TX/RX DMA
2142  */
2143 static int
2144 rt_txrx_enable(struct rt_softc *sc)
2145 {
2146 	struct ifnet *ifp;
2147 	uint32_t tmp;
2148 	int ntries;
2149 
2150 	ifp = sc->ifp;
2151 
2152 	/* enable Tx/Rx DMA engine */
2153 	for (ntries = 0; ntries < 200; ntries++) {
2154 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2155 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2156 			break;
2157 
2158 		DELAY(1000);
2159 	}
2160 
2161 	if (ntries == 200) {
2162 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2163 		return (-1);
2164 	}
2165 
2166 	DELAY(50);
2167 
2168 	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2169 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2170 
2171 	/* XXX set Rx filter */
2172 	return (0);
2173 }
2174 
2175 /*
2176  * rt_alloc_rx_ring - allocate RX DMA ring buffer
2177  */
2178 static int
2179 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2180 {
2181 	struct rt_rxdesc *desc;
2182 	struct rt_softc_rx_data *data;
2183 	bus_dma_segment_t segs[1];
2184 	int i, nsegs, error;
2185 
2186 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2187 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2188 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2189 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2190 		0, NULL, NULL, &ring->desc_dma_tag);
2191 	if (error != 0)	{
2192 		device_printf(sc->dev,
2193 		    "could not create Rx desc DMA tag\n");
2194 		goto fail;
2195 	}
2196 
2197 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2198 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2199 	if (error != 0) {
2200 		device_printf(sc->dev,
2201 		    "could not allocate Rx desc DMA memory\n");
2202 		goto fail;
2203 	}
2204 
2205 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2206 		ring->desc,
2207 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2208 		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2209 	if (error != 0) {
2210 		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2211 		goto fail;
2212 	}
2213 
2214 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2215 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2216 		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2217 		&ring->data_dma_tag);
2218 	if (error != 0)	{
2219 		device_printf(sc->dev,
2220 		    "could not create Rx data DMA tag\n");
2221 		goto fail;
2222 	}
2223 
2224 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2225 		desc = &ring->desc[i];
2226 		data = &ring->data[i];
2227 
2228 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2229 		    &data->dma_map);
2230 		if (error != 0)	{
2231 			device_printf(sc->dev, "could not create Rx data DMA "
2232 			    "map\n");
2233 			goto fail;
2234 		}
2235 
2236 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2237 		    MJUMPAGESIZE);
2238 		if (data->m == NULL) {
2239 			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2240 			error = ENOMEM;
2241 			goto fail;
2242 		}
2243 
2244 		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2245 
2246 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2247 		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2248 		if (error != 0)	{
2249 			device_printf(sc->dev,
2250 			    "could not load Rx mbuf DMA map\n");
2251 			goto fail;
2252 		}
2253 
2254 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2255 			device_get_nameunit(sc->dev)));
2256 
2257 		/* Add 2 for proper align of RX IP header */
2258 		desc->sdp0 = htole32(segs[0].ds_addr+2);
2259 		desc->sdl0 = htole32(segs[0].ds_len-2);
2260 	}
2261 
2262 	error = bus_dmamap_create(ring->data_dma_tag, 0,
2263 	    &ring->spare_dma_map);
2264 	if (error != 0) {
2265 		device_printf(sc->dev,
2266 		    "could not create Rx spare DMA map\n");
2267 		goto fail;
2268 	}
2269 
2270 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2271 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2272 	ring->qid = qid;
2273 	return (0);
2274 
2275 fail:
2276 	rt_free_rx_ring(sc, ring);
2277 	return (error);
2278 }
2279 
2280 /*
2281  * rt_reset_rx_ring - reset RX ring buffer
2282  */
2283 static void
2284 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2285 {
2286 	struct rt_rxdesc *desc;
2287 	int i;
2288 
2289 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2290 		desc = &ring->desc[i];
2291 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2292 	}
2293 
2294 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2295 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2296 	ring->cur = 0;
2297 }
2298 
2299 /*
2300  * rt_free_rx_ring - free memory used by RX ring buffer
2301  */
2302 static void
2303 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2304 {
2305 	struct rt_softc_rx_data *data;
2306 	int i;
2307 
2308 	if (ring->desc != NULL) {
2309 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2310 			BUS_DMASYNC_POSTWRITE);
2311 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2312 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2313 			ring->desc_dma_map);
2314 	}
2315 
2316 	if (ring->desc_dma_tag != NULL)
2317 		bus_dma_tag_destroy(ring->desc_dma_tag);
2318 
2319 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2320 		data = &ring->data[i];
2321 
2322 		if (data->m != NULL) {
2323 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2324 				BUS_DMASYNC_POSTREAD);
2325 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2326 			m_freem(data->m);
2327 		}
2328 
2329 		if (data->dma_map != NULL)
2330 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2331 	}
2332 
2333 	if (ring->spare_dma_map != NULL)
2334 		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2335 
2336 	if (ring->data_dma_tag != NULL)
2337 		bus_dma_tag_destroy(ring->data_dma_tag);
2338 }
2339 
2340 /*
2341  * rt_alloc_tx_ring - allocate TX ring buffer
2342  */
2343 static int
2344 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2345 {
2346 	struct rt_softc_tx_data *data;
2347 	int error, i;
2348 
2349 	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2350 
2351 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2352 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2353 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2354 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2355 		0, NULL, NULL, &ring->desc_dma_tag);
2356 	if (error != 0) {
2357 		device_printf(sc->dev,
2358 		    "could not create Tx desc DMA tag\n");
2359 		goto fail;
2360 	}
2361 
2362 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2363 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2364 	if (error != 0)	{
2365 		device_printf(sc->dev,
2366 		    "could not allocate Tx desc DMA memory\n");
2367 		goto fail;
2368 	}
2369 
2370 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2371 	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2372 	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2373 	    &ring->desc_phys_addr, 0);
2374 	if (error != 0) {
2375 		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2376 		goto fail;
2377 	}
2378 
2379 	ring->desc_queued = 0;
2380 	ring->desc_cur = 0;
2381 	ring->desc_next = 0;
2382 
2383 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2384 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2385 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2386 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2387 	    0, NULL, NULL, &ring->seg0_dma_tag);
2388 	if (error != 0) {
2389 		device_printf(sc->dev,
2390 		    "could not create Tx seg0 DMA tag\n");
2391 		goto fail;
2392 	}
2393 
2394 	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2395 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2396 	if (error != 0) {
2397 		device_printf(sc->dev,
2398 		    "could not allocate Tx seg0 DMA memory\n");
2399 		goto fail;
2400 	}
2401 
2402 	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2403 	    ring->seg0,
2404 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2405 	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2406 	if (error != 0) {
2407 		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2408 		goto fail;
2409 	}
2410 
2411 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2412 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2413 	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2414 	    &ring->data_dma_tag);
2415 	if (error != 0) {
2416 		device_printf(sc->dev,
2417 		    "could not create Tx data DMA tag\n");
2418 		goto fail;
2419 	}
2420 
2421 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2422 		data = &ring->data[i];
2423 
2424 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2425 		    &data->dma_map);
2426 		if (error != 0) {
2427 			device_printf(sc->dev, "could not create Tx data DMA "
2428 			    "map\n");
2429 			goto fail;
2430 		}
2431 	}
2432 
2433 	ring->data_queued = 0;
2434 	ring->data_cur = 0;
2435 	ring->data_next = 0;
2436 
2437 	ring->qid = qid;
2438 	return (0);
2439 
2440 fail:
2441 	rt_free_tx_ring(sc, ring);
2442 	return (error);
2443 }
2444 
2445 /*
2446  * rt_reset_tx_ring - reset TX ring buffer to empty state
2447  */
2448 static void
2449 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2450 {
2451 	struct rt_softc_tx_data *data;
2452 	struct rt_txdesc *desc;
2453 	int i;
2454 
2455 	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2456 		desc = &ring->desc[i];
2457 
2458 		desc->sdl0 = 0;
2459 		desc->sdl1 = 0;
2460 	}
2461 
2462 	ring->desc_queued = 0;
2463 	ring->desc_cur = 0;
2464 	ring->desc_next = 0;
2465 
2466 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2467 		BUS_DMASYNC_PREWRITE);
2468 
2469 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2470 		BUS_DMASYNC_PREWRITE);
2471 
2472 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2473 		data = &ring->data[i];
2474 
2475 		if (data->m != NULL) {
2476 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2477 				BUS_DMASYNC_POSTWRITE);
2478 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2479 			m_freem(data->m);
2480 			data->m = NULL;
2481 		}
2482 	}
2483 
2484 	ring->data_queued = 0;
2485 	ring->data_cur = 0;
2486 	ring->data_next = 0;
2487 }
2488 
2489 /*
2490  * rt_free_tx_ring - free RX ring buffer
2491  */
2492 static void
2493 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2494 {
2495 	struct rt_softc_tx_data *data;
2496 	int i;
2497 
2498 	if (ring->desc != NULL) {
2499 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2500 			BUS_DMASYNC_POSTWRITE);
2501 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2502 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2503 			ring->desc_dma_map);
2504 	}
2505 
2506 	if (ring->desc_dma_tag != NULL)
2507 		bus_dma_tag_destroy(ring->desc_dma_tag);
2508 
2509 	if (ring->seg0 != NULL) {
2510 		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2511 			BUS_DMASYNC_POSTWRITE);
2512 		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2513 		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2514 			ring->seg0_dma_map);
2515 	}
2516 
2517 	if (ring->seg0_dma_tag != NULL)
2518 		bus_dma_tag_destroy(ring->seg0_dma_tag);
2519 
2520 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2521 		data = &ring->data[i];
2522 
2523 		if (data->m != NULL) {
2524 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2525 				BUS_DMASYNC_POSTWRITE);
2526 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2527 			m_freem(data->m);
2528 		}
2529 
2530 		if (data->dma_map != NULL)
2531 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2532 	}
2533 
2534 	if (ring->data_dma_tag != NULL)
2535 		bus_dma_tag_destroy(ring->data_dma_tag);
2536 
2537 	mtx_destroy(&ring->lock);
2538 }
2539 
2540 /*
2541  * rt_dma_map_addr - get address of busdma segment
2542  */
2543 static void
2544 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2545 {
2546 	if (error != 0)
2547 		return;
2548 
2549 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2550 
2551 	*(bus_addr_t *) arg = segs[0].ds_addr;
2552 }
2553 
2554 /*
2555  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2556  */
2557 static void
2558 rt_sysctl_attach(struct rt_softc *sc)
2559 {
2560 	struct sysctl_ctx_list *ctx;
2561 	struct sysctl_oid *tree;
2562 	struct sysctl_oid *stats;
2563 
2564 	ctx = device_get_sysctl_ctx(sc->dev);
2565 	tree = device_get_sysctl_tree(sc->dev);
2566 
2567 	/* statistic counters */
2568 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2569 	    "stats", CTLFLAG_RD, 0, "statistic");
2570 
2571 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2572 	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2573 	    "all interrupts");
2574 
2575 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2576 	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2577 	    "Tx coherent interrupts");
2578 
2579 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2580 	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2581 	    "Rx coherent interrupts");
2582 
2583 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2584 	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2585 	    "Rx interrupts");
2586 
2587 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2588 	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2589 	    "Rx delay interrupts");
2590 
2591 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2592 	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2593 	    "Tx AC3 interrupts");
2594 
2595 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2596 	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2597 	    "Tx AC2 interrupts");
2598 
2599 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2600 	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2601 	    "Tx AC1 interrupts");
2602 
2603 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2604 	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2605 	    "Tx AC0 interrupts");
2606 
2607 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2608 	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2609 	    "Tx delay interrupts");
2610 
2611 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2612 	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2613 	    0, "Tx AC3 descriptors queued");
2614 
2615 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2616 	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2617 	    0, "Tx AC3 data queued");
2618 
2619 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2620 	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2621 	    0, "Tx AC2 descriptors queued");
2622 
2623 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2624 	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2625 	    0, "Tx AC2 data queued");
2626 
2627 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2628 	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2629 	    0, "Tx AC1 descriptors queued");
2630 
2631 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2632 	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2633 	    0, "Tx AC1 data queued");
2634 
2635 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2636 	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2637 	    0, "Tx AC0 descriptors queued");
2638 
2639 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2640 	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2641 	    0, "Tx AC0 data queued");
2642 
2643 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2644 	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2645 	    "Tx AC3 data queue full");
2646 
2647 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2648 	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2649 	    "Tx AC2 data queue full");
2650 
2651 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2652 	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2653 	    "Tx AC1 data queue full");
2654 
2655 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2656 	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2657 	    "Tx AC0 data queue full");
2658 
2659 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2660 	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2661 	    "Tx watchdog timeouts");
2662 
2663 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2664 	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2665 	    "Tx defragmented packets");
2666 
2667 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2668 	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2669 	    "no Tx descriptors available");
2670 
2671 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2672 	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2673 	    "Rx mbuf allocation errors");
2674 
2675 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2676 	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2677 	    "Rx mbuf DMA mapping errors");
2678 
2679 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2680 	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2681 	    "Tx queue 0 not empty");
2682 
2683 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2684 	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2685 	    "Tx queue 1 not empty");
2686 
2687 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2688 	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2689 	    "Rx packets");
2690 
2691 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2692 	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2693 	    "Rx CRC errors");
2694 
2695 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2696 	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2697 	    "Rx PHY errors");
2698 
2699 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2700 	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2701 	    "Rx duplicate packets");
2702 
2703 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2704 	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2705 	    "Rx FIFO overflows");
2706 
2707 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2708 	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2709 	    "Rx bytes");
2710 
2711 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2712 	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2713 	    "Rx too long frame errors");
2714 
2715 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2716 	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2717 	    "Rx too short frame errors");
2718 
2719 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2720 	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2721 	    "Tx bytes");
2722 
2723 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2724 	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2725 	    "Tx packets");
2726 
2727 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2728 	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2729 	    "Tx skip count for GDMA ports");
2730 
2731 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2732 	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2733 	    "Tx collision count for GDMA ports");
2734 }
2735 
2736 #ifdef IF_RT_PHY_SUPPORT
2737 static int
2738 rt_miibus_readreg(device_t dev, int phy, int reg)
2739 {
2740 	struct rt_softc *sc = device_get_softc(dev);
2741 
2742 	/*
2743 	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2744 	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2745 	 */
2746 	if (phy == 31) {
2747 		/* Fake PHY ID for bfeswitch attach */
2748 		switch (reg) {
2749 		case MII_BMSR:
2750 			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2751 		case MII_PHYIDR1:
2752 			return (0x40);		/* As result of faking */
2753 		case MII_PHYIDR2:		/* PHY will detect as */
2754 			return (0x6250);		/* bfeswitch */
2755 		}
2756 	}
2757 
2758 	/* Wait prev command done if any */
2759 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2760 	RT_WRITE(sc, MDIO_ACCESS,
2761 	    MDIO_CMD_ONGO ||
2762 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2763 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2764 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2765 
2766 	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2767 }
2768 
2769 static int
2770 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2771 {
2772 	struct rt_softc *sc = device_get_softc(dev);
2773 
2774 	/* Wait prev command done if any */
2775 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2776 	RT_WRITE(sc, MDIO_ACCESS,
2777 	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2778 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2779 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2780 	    (val & MDIO_PHY_DATA_MASK));
2781 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2782 
2783 	return (0);
2784 }
2785 
2786 void
2787 rt_miibus_statchg(device_t dev)
2788 {
2789 	struct rt_softc *sc = device_get_softc(dev);
2790 	struct mii_data *mii;
2791 
2792 	mii = device_get_softc(sc->rt_miibus);
2793 
2794 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2795 	    (IFM_ACTIVE | IFM_AVALID)) {
2796 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2797 		case IFM_10_T:
2798 		case IFM_100_TX:
2799 			/* XXX check link here */
2800 			sc->flags |= 1;
2801 			break;
2802 		default:
2803 			break;
2804 		}
2805 	}
2806 }
2807 #endif /* IF_RT_PHY_SUPPORT */
2808 
2809 static device_method_t rt_dev_methods[] =
2810 {
2811 	DEVMETHOD(device_probe, rt_probe),
2812 	DEVMETHOD(device_attach, rt_attach),
2813 	DEVMETHOD(device_detach, rt_detach),
2814 	DEVMETHOD(device_shutdown, rt_shutdown),
2815 	DEVMETHOD(device_suspend, rt_suspend),
2816 	DEVMETHOD(device_resume, rt_resume),
2817 
2818 #ifdef IF_RT_PHY_SUPPORT
2819 	/* MII interface */
2820 	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2821 	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2822 	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2823 #endif
2824 
2825 	DEVMETHOD_END
2826 };
2827 
2828 static driver_t rt_driver =
2829 {
2830 	"rt",
2831 	rt_dev_methods,
2832 	sizeof(struct rt_softc)
2833 };
2834 
2835 static devclass_t rt_dev_class;
2836 
2837 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2838 #ifdef FDT
2839 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2840 #endif
2841 
2842 MODULE_DEPEND(rt, ether, 1, 1, 1);
2843 MODULE_DEPEND(rt, miibus, 1, 1, 1);
2844 
2845