xref: /freebsd-12.1/sys/dev/rt/if_rt.c (revision cc5bbcb2)
1 /*-
2  * Copyright (c) 2015-2016, Stanislav Galabov
3  * Copyright (c) 2014, Aleksandr A. Mityaev
4  * Copyright (c) 2011, Aleksandr Rybalko
5  * based on hard work
6  * by Alexander Egorenkov <[email protected]>
7  * and by Damien Bergamini <[email protected]>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice unmodified, this list of conditions, and the following
15  *    disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "if_rtvar.h"
37 #include "if_rtreg.h"
38 
39 #include <net/if.h>
40 #include <net/if_var.h>
41 #include <net/if_arp.h>
42 #include <net/ethernet.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_vlan_var.h>
47 
48 #include <net/bpf.h>
49 
50 #include <machine/bus.h>
51 #include <machine/cache.h>
52 #include <machine/cpufunc.h>
53 #include <machine/resource.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <machine/pmap.h>
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60 
61 #include "opt_platform.h"
62 #include "opt_rt305x.h"
63 
64 #ifdef FDT
65 #include <dev/ofw/openfirm.h>
66 #include <dev/ofw/ofw_bus.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #endif
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #if 0
74 #include <mips/rt305x/rt305x_sysctlvar.h>
75 #include <mips/rt305x/rt305xreg.h>
76 #endif
77 
78 #ifdef IF_RT_PHY_SUPPORT
79 #include "miibus_if.h"
80 #endif
81 
82 /*
83  * Defines and macros
84  */
85 #define	RT_MAX_AGG_SIZE			3840
86 
87 #define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
88 
89 #define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
90 #define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
91 
92 #define	RT_TX_WATCHDOG_TIMEOUT		5
93 
94 #define RT_CHIPID_RT3050 0x3050
95 #define RT_CHIPID_RT5350 0x5350
96 #define RT_CHIPID_MT7620 0x7620
97 #define RT_CHIPID_MT7621 0x7621
98 
99 #ifdef FDT
100 /* more specific and new models should go first */
101 static const struct ofw_compat_data rt_compat_data[] = {
102 	{ "ralink,rt3050-eth",		RT_CHIPID_RT3050 },
103 	{ "ralink,rt3352-eth",		RT_CHIPID_RT3050 },
104 	{ "ralink,rt3883-eth",		RT_CHIPID_RT3050 },
105 	{ "ralink,rt5350-eth",		RT_CHIPID_RT5350 },
106 	{ "ralink,mt7620a-eth",		RT_CHIPID_MT7620 },
107 	{ "mediatek,mt7620-eth",	RT_CHIPID_MT7620 },
108 	{ "ralink,mt7621-eth",		RT_CHIPID_MT7621 },
109 	{ "mediatek,mt7621-eth",	RT_CHIPID_MT7621 },
110 	{ NULL,				0 }
111 };
112 #endif
113 
114 /*
115  * Static function prototypes
116  */
117 static int	rt_probe(device_t dev);
118 static int	rt_attach(device_t dev);
119 static int	rt_detach(device_t dev);
120 static int	rt_shutdown(device_t dev);
121 static int	rt_suspend(device_t dev);
122 static int	rt_resume(device_t dev);
123 static void	rt_init_locked(void *priv);
124 static void	rt_init(void *priv);
125 static void	rt_stop_locked(void *priv);
126 static void	rt_stop(void *priv);
127 static void	rt_start(struct ifnet *ifp);
128 static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
129 static void	rt_periodic(void *arg);
130 static void	rt_tx_watchdog(void *arg);
131 static void	rt_intr(void *arg);
132 static void	rt_rt5350_intr(void *arg);
133 static void	rt_tx_coherent_intr(struct rt_softc *sc);
134 static void	rt_rx_coherent_intr(struct rt_softc *sc);
135 static void	rt_rx_delay_intr(struct rt_softc *sc);
136 static void	rt_tx_delay_intr(struct rt_softc *sc);
137 static void	rt_rx_intr(struct rt_softc *sc, int qid);
138 static void	rt_tx_intr(struct rt_softc *sc, int qid);
139 static void	rt_rx_done_task(void *context, int pending);
140 static void	rt_tx_done_task(void *context, int pending);
141 static void	rt_periodic_task(void *context, int pending);
142 static int	rt_rx_eof(struct rt_softc *sc,
143 		    struct rt_softc_rx_ring *ring, int limit);
144 static void	rt_tx_eof(struct rt_softc *sc,
145 		    struct rt_softc_tx_ring *ring);
146 static void	rt_update_stats(struct rt_softc *sc);
147 static void	rt_watchdog(struct rt_softc *sc);
148 static void	rt_update_raw_counters(struct rt_softc *sc);
149 static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
150 static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
151 static int	rt_txrx_enable(struct rt_softc *sc);
152 static int	rt_alloc_rx_ring(struct rt_softc *sc,
153 		    struct rt_softc_rx_ring *ring, int qid);
154 static void	rt_reset_rx_ring(struct rt_softc *sc,
155 		    struct rt_softc_rx_ring *ring);
156 static void	rt_free_rx_ring(struct rt_softc *sc,
157 		    struct rt_softc_rx_ring *ring);
158 static int	rt_alloc_tx_ring(struct rt_softc *sc,
159 		    struct rt_softc_tx_ring *ring, int qid);
160 static void	rt_reset_tx_ring(struct rt_softc *sc,
161 		    struct rt_softc_tx_ring *ring);
162 static void	rt_free_tx_ring(struct rt_softc *sc,
163 		    struct rt_softc_tx_ring *ring);
164 static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
165 		    int nseg, int error);
166 static void	rt_sysctl_attach(struct rt_softc *sc);
167 #ifdef IF_RT_PHY_SUPPORT
168 void		rt_miibus_statchg(device_t);
169 static int	rt_miibus_readreg(device_t, int, int);
170 static int	rt_miibus_writereg(device_t, int, int, int);
171 #endif
172 static int	rt_ifmedia_upd(struct ifnet *);
173 static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174 
175 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
176 #ifdef IF_RT_DEBUG
177 static int rt_debug = 0;
178 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
179     "RT debug level");
180 #endif
181 
182 static int
183 rt_probe(device_t dev)
184 {
185 	struct rt_softc *sc = device_get_softc(dev);
186 	char buf[80];
187 #ifdef FDT
188 	const struct ofw_compat_data * cd;
189 
190 	cd = ofw_bus_search_compatible(dev, rt_compat_data);
191 	if (cd->ocd_data == 0)
192 	        return (ENXIO);
193 
194 	sc->rt_chipid = (unsigned int)(cd->ocd_data);
195 #else
196 #if defined(MT7620)
197 	sc->rt_chipid = RT_CHIPID_MT7620;
198 #elif defined(MT7621)
199 	sc->rt_chipid = RT_CHIPID_MT7621;
200 #elif defined(RT5350)
201 	sc->rt_chipid = RT_CHIPID_RT5350;
202 #else
203 	sc->rt_chipid = RT_CHIPID_RT3050;
204 #endif
205 #endif
206 	snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
207 		sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
208 	device_set_desc_copy(dev, buf);
209 	return (BUS_PROBE_GENERIC);
210 }
211 
212 /*
213  * macaddr_atoi - translate string MAC address to uint8_t array
214  */
215 static int
216 macaddr_atoi(const char *str, uint8_t *mac)
217 {
218 	int count, i;
219 	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
220 
221 	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
222 	    &amac[0], &amac[1], &amac[2],
223 	    &amac[3], &amac[4], &amac[5]);
224 	if (count < ETHER_ADDR_LEN) {
225 		memset(mac, 0, ETHER_ADDR_LEN);
226 		return (1);
227 	}
228 
229 	/* Copy aligned to result */
230 	for (i = 0; i < ETHER_ADDR_LEN; i ++)
231 		mac[i] = (amac[i] & 0xff);
232 
233 	return (0);
234 }
235 
236 #ifdef USE_GENERATED_MAC_ADDRESS
237 /*
238  * generate_mac(uin8_t *mac)
239  * This is MAC address generator for cases when real device MAC address
240  * unknown or not yet accessible.
241  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
242  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
243  *
244  * Output - MAC address, that do not change between reboots, if hints or
245  * bootloader info unchange.
246  */
247 static void
248 generate_mac(uint8_t *mac)
249 {
250 	unsigned char *cp;
251 	int i = 0;
252 	uint32_t crc = 0xffffffff;
253 
254 	/* Generate CRC32 on kenv */
255 	for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
256 		crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
257 	}
258 	crc = ~crc;
259 
260 	mac[0] = 'b';
261 	mac[1] = 's';
262 	mac[2] = 'd';
263 	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
264 	mac[4] = (crc >> 8) & 0xff;
265 	mac[5] = crc & 0xff;
266 }
267 #endif
268 
269 /*
270  * ether_request_mac - try to find usable MAC address.
271  */
272 static int
273 ether_request_mac(device_t dev, uint8_t *mac)
274 {
275 	char *var;
276 
277 	/*
278 	 * "ethaddr" is passed via envp on RedBoot platforms
279 	 * "kmac" is passed via argv on RouterBOOT platforms
280 	 */
281 #if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
282 	if ((var = kern_getenv("ethaddr")) != NULL ||
283 	    (var = kern_getenv("kmac")) != NULL ) {
284 
285 		if(!macaddr_atoi(var, mac)) {
286 			printf("%s: use %s macaddr from KENV\n",
287 			    device_get_nameunit(dev), var);
288 			freeenv(var);
289 			return (0);
290 		}
291 		freeenv(var);
292 	}
293 #endif
294 
295 	/*
296 	 * Try from hints
297 	 * hint.[dev].[unit].macaddr
298 	 */
299 	if (!resource_string_value(device_get_name(dev),
300 	    device_get_unit(dev), "macaddr", (const char **)&var)) {
301 
302 		if(!macaddr_atoi(var, mac)) {
303 			printf("%s: use %s macaddr from hints\n",
304 			    device_get_nameunit(dev), var);
305 			return (0);
306 		}
307 	}
308 
309 #ifdef USE_GENERATED_MAC_ADDRESS
310 	generate_mac(mac);
311 
312 	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
313 	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
314 #else
315 	/* Hardcoded */
316 	mac[0] = 0x00;
317 	mac[1] = 0x18;
318 	mac[2] = 0xe7;
319 	mac[3] = 0xd5;
320 	mac[4] = 0x83;
321 	mac[5] = 0x90;
322 
323 	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
324 #endif
325 
326 	return (0);
327 }
328 
329 /*
330  * Reset hardware
331  */
332 static void
333 reset_freng(struct rt_softc *sc)
334 {
335 	/* XXX hard reset kills everything so skip it ... */
336 	return;
337 }
338 
339 static int
340 rt_attach(device_t dev)
341 {
342 	struct rt_softc *sc;
343 	struct ifnet *ifp;
344 	int error, i;
345 
346 	sc = device_get_softc(dev);
347 	sc->dev = dev;
348 
349 	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
350 	    MTX_DEF | MTX_RECURSE);
351 
352 	sc->mem_rid = 0;
353 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
354 	    RF_ACTIVE);
355 	if (sc->mem == NULL) {
356 		device_printf(dev, "could not allocate memory resource\n");
357 		error = ENXIO;
358 		goto fail;
359 	}
360 
361 	sc->bst = rman_get_bustag(sc->mem);
362 	sc->bsh = rman_get_bushandle(sc->mem);
363 
364 	sc->irq_rid = 0;
365 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
366 	    RF_ACTIVE);
367 	if (sc->irq == NULL) {
368 		device_printf(dev,
369 		    "could not allocate interrupt resource\n");
370 		error = ENXIO;
371 		goto fail;
372 	}
373 
374 #ifdef IF_RT_DEBUG
375 	sc->debug = rt_debug;
376 
377 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
378 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
379 		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
380 #endif
381 
382 	/* Reset hardware */
383 	reset_freng(sc);
384 
385 
386 	if (sc->rt_chipid == RT_CHIPID_MT7620) {
387 		sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
388 		sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
389 	} else if (sc->rt_chipid == RT_CHIPID_MT7621) {
390 		sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
391 		sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
392 	} else {
393 		sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
394 		sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
395 	}
396 
397 	/* Fill in soc-specific registers map */
398 	switch(sc->rt_chipid) {
399 	  case RT_CHIPID_MT7620:
400 	  case RT_CHIPID_MT7621:
401 		sc->gdma1_base = MT7620_GDMA1_BASE;
402 		/* fallthrough */
403 	  case RT_CHIPID_RT5350:
404 	  	device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
405 			sc->rt_chipid >= 0x7600 ? 'M' : 'R',
406 	  		sc->rt_chipid, sc->mac_rev);
407 		/* RT5350: No GDMA, PSE, CDMA, PPE */
408 		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
409 			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
410 		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
411 		sc->fe_int_status=RT5350_FE_INT_STATUS;
412 		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
413 		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
414 		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
415 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
416 		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
417 		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
418 		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
419 		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
420 		}
421 		sc->rx_ring_count=2;
422 		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
423 		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
424 		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
425 		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
426 		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
427 		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
428 		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
429 		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
430 		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
431 		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
432 	  	break;
433 	  default:
434 		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
435 			sc->mac_rev);
436 		sc->gdma1_base = GDMA1_BASE;
437 		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
438 		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
439 		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
440 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
441 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
442 		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
443 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
444 		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
445 		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
446 		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
447 		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
448 		}
449 		sc->rx_ring_count=1;
450 		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
451 		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
452 		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
453 		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
454 		sc->int_rx_done_mask=INT_RX_DONE;
455 		sc->int_tx_done_mask=INT_TXQ0_DONE;
456 	}
457 
458 	if (sc->gdma1_base != 0)
459 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
460 		(
461 		GDM_ICS_EN | /* Enable IP Csum */
462 		GDM_TCS_EN | /* Enable TCP Csum */
463 		GDM_UCS_EN | /* Enable UDP Csum */
464 		GDM_STRPCRC | /* Strip CRC from packet */
465 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
466 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
467 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
468 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
469 		));
470 
471 	/* allocate Tx and Rx rings */
472 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
473 		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
474 		if (error != 0) {
475 			device_printf(dev, "could not allocate Tx ring #%d\n",
476 			    i);
477 			goto fail;
478 		}
479 	}
480 
481 	sc->tx_ring_mgtqid = 5;
482 	for (i = 0; i < sc->rx_ring_count; i++) {
483 		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
484 		if (error != 0) {
485 			device_printf(dev, "could not allocate Rx ring\n");
486 			goto fail;
487 		}
488 	}
489 
490 	callout_init(&sc->periodic_ch, 0);
491 	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
492 
493 	ifp = sc->ifp = if_alloc(IFT_ETHER);
494 	if (ifp == NULL) {
495 		device_printf(dev, "could not if_alloc()\n");
496 		error = ENOMEM;
497 		goto fail;
498 	}
499 
500 	ifp->if_softc = sc;
501 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
502 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
503 	ifp->if_init = rt_init;
504 	ifp->if_ioctl = rt_ioctl;
505 	ifp->if_start = rt_start;
506 #define	RT_TX_QLEN	256
507 
508 	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
509 	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
510 	IFQ_SET_READY(&ifp->if_snd);
511 
512 #ifdef IF_RT_PHY_SUPPORT
513 	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
514 	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
515 	if (error != 0) {
516 		device_printf(dev, "attaching PHYs failed\n");
517 		error = ENXIO;
518 		goto fail;
519 	}
520 #else
521 	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
522 	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
523 	    NULL);
524 	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
525 
526 #endif /* IF_RT_PHY_SUPPORT */
527 
528 	ether_request_mac(dev, sc->mac_addr);
529 	ether_ifattach(ifp, sc->mac_addr);
530 
531 	/*
532 	 * Tell the upper layer(s) we support long frames.
533 	 */
534 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
535 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
536 	ifp->if_capenable |= IFCAP_VLAN_MTU;
537 	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
538 	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
539 
540 	/* init task queue */
541 	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
542 	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
543 	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
544 
545 	sc->rx_process_limit = 100;
546 
547 	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
548 	    taskqueue_thread_enqueue, &sc->taskqueue);
549 
550 	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
551 	    device_get_nameunit(sc->dev));
552 
553 	rt_sysctl_attach(sc);
554 
555 	/* set up interrupt */
556 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
557 	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
558 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
559 	    sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
560 	    sc, &sc->irqh);
561 	if (error != 0) {
562 		printf("%s: could not set up interrupt\n",
563 			device_get_nameunit(dev));
564 		goto fail;
565 	}
566 #ifdef IF_RT_DEBUG
567 	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
568 #endif
569 
570 	return (0);
571 
572 fail:
573 	/* free Tx and Rx rings */
574 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
575 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
576 
577 	for (i = 0; i < sc->rx_ring_count; i++)
578 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
579 
580 	mtx_destroy(&sc->lock);
581 
582 	if (sc->mem != NULL)
583 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
584 		    sc->mem);
585 
586 	if (sc->irq != NULL)
587 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
588 		    sc->irq);
589 
590 	return (error);
591 }
592 
593 /*
594  * Set media options.
595  */
596 static int
597 rt_ifmedia_upd(struct ifnet *ifp)
598 {
599 	struct rt_softc *sc;
600 #ifdef IF_RT_PHY_SUPPORT
601 	struct mii_data *mii;
602 	struct mii_softc *miisc;
603 	int error = 0;
604 
605 	sc = ifp->if_softc;
606 	RT_SOFTC_LOCK(sc);
607 
608 	mii = device_get_softc(sc->rt_miibus);
609 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
610 		PHY_RESET(miisc);
611 	error = mii_mediachg(mii);
612 	RT_SOFTC_UNLOCK(sc);
613 
614 	return (error);
615 
616 #else /* !IF_RT_PHY_SUPPORT */
617 
618 	struct ifmedia *ifm;
619 	struct ifmedia_entry *ife;
620 
621 	sc = ifp->if_softc;
622 	ifm = &sc->rt_ifmedia;
623 	ife = ifm->ifm_cur;
624 
625 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
626 		return (EINVAL);
627 
628 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
629 		device_printf(sc->dev,
630 		    "AUTO is not supported for multiphy MAC");
631 		return (EINVAL);
632 	}
633 
634 	/*
635 	 * Ignore everything
636 	 */
637 	return (0);
638 #endif /* IF_RT_PHY_SUPPORT */
639 }
640 
641 /*
642  * Report current media status.
643  */
644 static void
645 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
646 {
647 #ifdef IF_RT_PHY_SUPPORT
648 	struct rt_softc *sc;
649 	struct mii_data *mii;
650 
651 	sc = ifp->if_softc;
652 
653 	RT_SOFTC_LOCK(sc);
654 	mii = device_get_softc(sc->rt_miibus);
655 	mii_pollstat(mii);
656 	ifmr->ifm_active = mii->mii_media_active;
657 	ifmr->ifm_status = mii->mii_media_status;
658 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
659 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
660 	RT_SOFTC_UNLOCK(sc);
661 #else /* !IF_RT_PHY_SUPPORT */
662 
663 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
664 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
665 #endif /* IF_RT_PHY_SUPPORT */
666 }
667 
668 static int
669 rt_detach(device_t dev)
670 {
671 	struct rt_softc *sc;
672 	struct ifnet *ifp;
673 	int i;
674 
675 	sc = device_get_softc(dev);
676 	ifp = sc->ifp;
677 
678 	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
679 
680 	RT_SOFTC_LOCK(sc);
681 
682 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
683 
684 	callout_stop(&sc->periodic_ch);
685 	callout_stop(&sc->tx_watchdog_ch);
686 
687 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
688 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
689 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
690 
691 	/* free Tx and Rx rings */
692 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
693 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
694 	for (i = 0; i < sc->rx_ring_count; i++)
695 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
696 
697 	RT_SOFTC_UNLOCK(sc);
698 
699 #ifdef IF_RT_PHY_SUPPORT
700 	if (sc->rt_miibus != NULL)
701 		device_delete_child(dev, sc->rt_miibus);
702 #endif
703 
704 	ether_ifdetach(ifp);
705 	if_free(ifp);
706 
707 	taskqueue_free(sc->taskqueue);
708 
709 	mtx_destroy(&sc->lock);
710 
711 	bus_generic_detach(dev);
712 	bus_teardown_intr(dev, sc->irq, sc->irqh);
713 	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
714 	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
715 
716 	return (0);
717 }
718 
719 static int
720 rt_shutdown(device_t dev)
721 {
722 	struct rt_softc *sc;
723 
724 	sc = device_get_softc(dev);
725 	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
726 	rt_stop(sc);
727 
728 	return (0);
729 }
730 
731 static int
732 rt_suspend(device_t dev)
733 {
734 	struct rt_softc *sc;
735 
736 	sc = device_get_softc(dev);
737 	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
738 	rt_stop(sc);
739 
740 	return (0);
741 }
742 
743 static int
744 rt_resume(device_t dev)
745 {
746 	struct rt_softc *sc;
747 	struct ifnet *ifp;
748 
749 	sc = device_get_softc(dev);
750 	ifp = sc->ifp;
751 
752 	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
753 
754 	if (ifp->if_flags & IFF_UP)
755 		rt_init(sc);
756 
757 	return (0);
758 }
759 
760 /*
761  * rt_init_locked - Run initialization process having locked mtx.
762  */
763 static void
764 rt_init_locked(void *priv)
765 {
766 	struct rt_softc *sc;
767 	struct ifnet *ifp;
768 #ifdef IF_RT_PHY_SUPPORT
769 	struct mii_data *mii;
770 #endif
771 	int i, ntries;
772 	uint32_t tmp;
773 
774 	sc = priv;
775 	ifp = sc->ifp;
776 #ifdef IF_RT_PHY_SUPPORT
777 	mii = device_get_softc(sc->rt_miibus);
778 #endif
779 
780 	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
781 
782 	RT_SOFTC_ASSERT_LOCKED(sc);
783 
784 	/* hardware reset */
785 	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
786 	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
787 
788 	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
789 	if (sc->gdma1_base != 0)
790 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
791 		(
792 		GDM_ICS_EN | /* Enable IP Csum */
793 		GDM_TCS_EN | /* Enable TCP Csum */
794 		GDM_UCS_EN | /* Enable UDP Csum */
795 		GDM_STRPCRC | /* Strip CRC from packet */
796 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
797 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
798 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
799 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
800 		));
801 
802 	/* disable DMA engine */
803 	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
804 	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
805 
806 	/* wait while DMA engine is busy */
807 	for (ntries = 0; ntries < 100; ntries++) {
808 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
809 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
810 			break;
811 		DELAY(1000);
812 	}
813 
814 	if (ntries == 100) {
815 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
816 		goto fail;
817 	}
818 
819 	/* reset Rx and Tx rings */
820 	tmp = FE_RST_DRX_IDX0 |
821 		FE_RST_DTX_IDX3 |
822 		FE_RST_DTX_IDX2 |
823 		FE_RST_DTX_IDX1 |
824 		FE_RST_DTX_IDX0;
825 
826 	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
827 
828 	/* XXX switch set mac address */
829 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
830 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
831 
832 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
833 		/* update TX_BASE_PTRx */
834 		RT_WRITE(sc, sc->tx_base_ptr[i],
835 			sc->tx_ring[i].desc_phys_addr);
836 		RT_WRITE(sc, sc->tx_max_cnt[i],
837 			RT_SOFTC_TX_RING_DESC_COUNT);
838 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
839 	}
840 
841 	/* init Rx ring */
842 	for (i = 0; i < sc->rx_ring_count; i++)
843 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
844 
845 	/* update RX_BASE_PTRx */
846 	for (i = 0; i < sc->rx_ring_count; i++) {
847 		RT_WRITE(sc, sc->rx_base_ptr[i],
848 			sc->rx_ring[i].desc_phys_addr);
849 		RT_WRITE(sc, sc->rx_max_cnt[i],
850 			RT_SOFTC_RX_RING_DATA_COUNT);
851 		RT_WRITE(sc, sc->rx_calc_idx[i],
852 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
853 	}
854 
855 	/* write back DDONE, 16byte burst enable RX/TX DMA */
856 	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
857 	if (sc->rt_chipid == RT_CHIPID_MT7620 ||
858 	    sc->rt_chipid == RT_CHIPID_MT7621)
859 		tmp |= (1<<31);
860 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
861 
862 	/* disable interrupts mitigation */
863 	RT_WRITE(sc, sc->delay_int_cfg, 0);
864 
865 	/* clear pending interrupts */
866 	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
867 
868 	/* enable interrupts */
869 	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
870 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
871 	    sc->rt_chipid == RT_CHIPID_MT7621)
872 	  tmp = RT5350_INT_TX_COHERENT |
873 	  	RT5350_INT_RX_COHERENT |
874 	  	RT5350_INT_TXQ3_DONE |
875 	  	RT5350_INT_TXQ2_DONE |
876 	  	RT5350_INT_TXQ1_DONE |
877 	  	RT5350_INT_TXQ0_DONE |
878 	  	RT5350_INT_RXQ1_DONE |
879 	  	RT5350_INT_RXQ0_DONE;
880 	else
881 	  tmp = CNT_PPE_AF |
882 		CNT_GDM_AF |
883 		PSE_P2_FC |
884 		GDM_CRC_DROP |
885 		PSE_BUF_DROP |
886 		GDM_OTHER_DROP |
887 		PSE_P1_FC |
888 		PSE_P0_FC |
889 		PSE_FQ_EMPTY |
890 		INT_TX_COHERENT |
891 		INT_RX_COHERENT |
892 		INT_TXQ3_DONE |
893 		INT_TXQ2_DONE |
894 		INT_TXQ1_DONE |
895 		INT_TXQ0_DONE |
896 		INT_RX_DONE;
897 
898 	sc->intr_enable_mask = tmp;
899 
900 	RT_WRITE(sc, sc->fe_int_enable, tmp);
901 
902 	if (rt_txrx_enable(sc) != 0)
903 		goto fail;
904 
905 #ifdef IF_RT_PHY_SUPPORT
906 	if (mii) mii_mediachg(mii);
907 #endif /* IF_RT_PHY_SUPPORT */
908 
909 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
910 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
911 
912 	sc->periodic_round = 0;
913 
914 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
915 
916 	return;
917 
918 fail:
919 	rt_stop_locked(sc);
920 }
921 
922 /*
923  * rt_init - lock and initialize device.
924  */
925 static void
926 rt_init(void *priv)
927 {
928 	struct rt_softc *sc;
929 
930 	sc = priv;
931 	RT_SOFTC_LOCK(sc);
932 	rt_init_locked(sc);
933 	RT_SOFTC_UNLOCK(sc);
934 }
935 
936 /*
937  * rt_stop_locked - stop TX/RX w/ lock
938  */
939 static void
940 rt_stop_locked(void *priv)
941 {
942 	struct rt_softc *sc;
943 	struct ifnet *ifp;
944 
945 	sc = priv;
946 	ifp = sc->ifp;
947 
948 	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
949 
950 	RT_SOFTC_ASSERT_LOCKED(sc);
951 	sc->tx_timer = 0;
952 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
953 	callout_stop(&sc->periodic_ch);
954 	callout_stop(&sc->tx_watchdog_ch);
955 	RT_SOFTC_UNLOCK(sc);
956 	taskqueue_block(sc->taskqueue);
957 
958 	/*
959 	 * Sometime rt_stop_locked called from isr and we get panic
960 	 * When found, I fix it
961 	 */
962 #ifdef notyet
963 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
964 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
965 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
966 #endif
967 	RT_SOFTC_LOCK(sc);
968 
969 	/* disable interrupts */
970 	RT_WRITE(sc, sc->fe_int_enable, 0);
971 
972 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
973 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
974 	   sc->rt_chipid != RT_CHIPID_MT7621) {
975 		/* reset adapter */
976 		RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
977 	}
978 
979 	if (sc->gdma1_base != 0)
980 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
981 		(
982 		GDM_ICS_EN | /* Enable IP Csum */
983 		GDM_TCS_EN | /* Enable TCP Csum */
984 		GDM_UCS_EN | /* Enable UDP Csum */
985 		GDM_STRPCRC | /* Strip CRC from packet */
986 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
987 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
988 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
989 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
990 		));
991 }
992 
993 static void
994 rt_stop(void *priv)
995 {
996 	struct rt_softc *sc;
997 
998 	sc = priv;
999 	RT_SOFTC_LOCK(sc);
1000 	rt_stop_locked(sc);
1001 	RT_SOFTC_UNLOCK(sc);
1002 }
1003 
1004 /*
1005  * rt_tx_data - transmit packet.
1006  */
1007 static int
1008 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1009 {
1010 	struct ifnet *ifp;
1011 	struct rt_softc_tx_ring *ring;
1012 	struct rt_softc_tx_data *data;
1013 	struct rt_txdesc *desc;
1014 	struct mbuf *m_d;
1015 	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1016 	int error, ndmasegs, ndescs, i;
1017 
1018 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1019 		("%s: Tx data: invalid qid=%d\n",
1020 		 device_get_nameunit(sc->dev), qid));
1021 
1022 	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1023 
1024 	ifp = sc->ifp;
1025 	ring = &sc->tx_ring[qid];
1026 	desc = &ring->desc[ring->desc_cur];
1027 	data = &ring->data[ring->data_cur];
1028 
1029 	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1030 	    dma_seg, &ndmasegs, 0);
1031 	if (error != 0)	{
1032 		/* too many fragments, linearize */
1033 
1034 		RT_DPRINTF(sc, RT_DEBUG_TX,
1035 			"could not load mbuf DMA map, trying to linearize "
1036 			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1037 			ndmasegs, m->m_pkthdr.len, error);
1038 
1039 		m_d = m_collapse(m, M_NOWAIT, 16);
1040 		if (m_d == NULL) {
1041 			m_freem(m);
1042 			m = NULL;
1043 			return (ENOMEM);
1044 		}
1045 		m = m_d;
1046 
1047 		sc->tx_defrag_packets++;
1048 
1049 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1050 		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1051 		if (error != 0)	{
1052 			device_printf(sc->dev, "could not load mbuf DMA map: "
1053 			    "ndmasegs=%d, len=%d, error=%d\n",
1054 			    ndmasegs, m->m_pkthdr.len, error);
1055 			m_freem(m);
1056 			return (error);
1057 		}
1058 	}
1059 
1060 	if (m->m_pkthdr.len == 0)
1061 		ndmasegs = 0;
1062 
1063 	/* determine how many Tx descs are required */
1064 	ndescs = 1 + ndmasegs / 2;
1065 	if ((ring->desc_queued + ndescs) >
1066 	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1067 		RT_DPRINTF(sc, RT_DEBUG_TX,
1068 		    "there are not enough Tx descs\n");
1069 
1070 		sc->no_tx_desc_avail++;
1071 
1072 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1073 		m_freem(m);
1074 		return (EFBIG);
1075 	}
1076 
1077 	data->m = m;
1078 
1079 	/* set up Tx descs */
1080 	for (i = 0; i < ndmasegs; i += 2) {
1081 
1082 		/* TODO: this needs to be refined as MT7620 for example has
1083 		 * a different word3 layout than RT305x and RT5350 (the last
1084 		 * one doesn't use word3 at all). And so does MT7621...
1085 		 */
1086 
1087 		if (sc->rt_chipid != RT_CHIPID_MT7621) {
1088 			/* Set destination */
1089 			if (sc->rt_chipid != RT_CHIPID_MT7620)
1090 			    desc->dst = (TXDSCR_DST_PORT_GDMA1);
1091 
1092 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1093 				desc->dst |= (TXDSCR_IP_CSUM_GEN |
1094 				    TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
1095 			/* Set queue id */
1096 			desc->qn = qid;
1097 			/* No PPPoE */
1098 			desc->pppoe = 0;
1099 			/* No VLAN */
1100 			desc->vid = 0;
1101 		} else {
1102 			desc->vid = 0;
1103 			desc->pppoe = 0;
1104 			desc->qn = 0;
1105 			desc->dst = 2;
1106 		}
1107 
1108 		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1109 		desc->sdl0 = htole16(dma_seg[i].ds_len |
1110 		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1111 
1112 		if ((i+1) < ndmasegs) {
1113 			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1114 			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1115 			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1116 		} else {
1117 			desc->sdp1 = 0;
1118 			desc->sdl1 = 0;
1119 		}
1120 
1121 		if ((i+2) < ndmasegs) {
1122 			ring->desc_queued++;
1123 			ring->desc_cur = (ring->desc_cur + 1) %
1124 			    RT_SOFTC_TX_RING_DESC_COUNT;
1125 		}
1126 		desc = &ring->desc[ring->desc_cur];
1127 	}
1128 
1129 	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1130 	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1131 	    m->m_pkthdr.len, ndmasegs,
1132 	    (int) dma_seg[0].ds_len,
1133 	    (int) dma_seg[1].ds_len,
1134 	    (int) dma_seg[2].ds_len,
1135 	    (int) dma_seg[3].ds_len,
1136 	    (int) dma_seg[4].ds_len);
1137 
1138 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1139 		BUS_DMASYNC_PREWRITE);
1140 	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1141 		BUS_DMASYNC_PREWRITE);
1142 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1143 		BUS_DMASYNC_PREWRITE);
1144 
1145 	ring->desc_queued++;
1146 	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1147 
1148 	ring->data_queued++;
1149 	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1150 
1151 	/* kick Tx */
1152 	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1153 
1154 	return (0);
1155 }
1156 
1157 /*
1158  * rt_start - start Transmit/Receive
1159  */
1160 static void
1161 rt_start(struct ifnet *ifp)
1162 {
1163 	struct rt_softc *sc;
1164 	struct mbuf *m;
1165 	int qid = 0 /* XXX must check QoS priority */;
1166 
1167 	sc = ifp->if_softc;
1168 
1169 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1170 		return;
1171 
1172 	for (;;) {
1173 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1174 		if (m == NULL)
1175 			break;
1176 
1177 		m->m_pkthdr.rcvif = NULL;
1178 
1179 		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1180 
1181 		if (sc->tx_ring[qid].data_queued >=
1182 		    RT_SOFTC_TX_RING_DATA_COUNT) {
1183 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1184 
1185 			RT_DPRINTF(sc, RT_DEBUG_TX,
1186 			    "if_start: Tx ring with qid=%d is full\n", qid);
1187 
1188 			m_freem(m);
1189 
1190 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1191 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1192 
1193 			sc->tx_data_queue_full[qid]++;
1194 
1195 			break;
1196 		}
1197 
1198 		if (rt_tx_data(sc, m, qid) != 0) {
1199 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1200 
1201 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1202 
1203 			break;
1204 		}
1205 
1206 		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1207 		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1208 		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1209 	}
1210 }
1211 
1212 /*
1213  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1214  * filtering done by attached Ethernet switch.
1215  */
1216 static void
1217 rt_update_promisc(struct ifnet *ifp)
1218 {
1219 	struct rt_softc *sc;
1220 
1221 	sc = ifp->if_softc;
1222 	printf("%s: %s promiscuous mode\n",
1223 		device_get_nameunit(sc->dev),
1224 		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1225 }
1226 
1227 /*
1228  * rt_ioctl - ioctl handler.
1229  */
1230 static int
1231 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1232 {
1233 	struct rt_softc *sc;
1234 	struct ifreq *ifr;
1235 #ifdef IF_RT_PHY_SUPPORT
1236 	struct mii_data *mii;
1237 #endif /* IF_RT_PHY_SUPPORT */
1238 	int error, startall;
1239 
1240 	sc = ifp->if_softc;
1241 	ifr = (struct ifreq *) data;
1242 
1243 	error = 0;
1244 
1245 	switch (cmd) {
1246 	case SIOCSIFFLAGS:
1247 		startall = 0;
1248 		RT_SOFTC_LOCK(sc);
1249 		if (ifp->if_flags & IFF_UP) {
1250 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1251 				if ((ifp->if_flags ^ sc->if_flags) &
1252 				    IFF_PROMISC)
1253 					rt_update_promisc(ifp);
1254 			} else {
1255 				rt_init_locked(sc);
1256 				startall = 1;
1257 			}
1258 		} else {
1259 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1260 				rt_stop_locked(sc);
1261 		}
1262 		sc->if_flags = ifp->if_flags;
1263 		RT_SOFTC_UNLOCK(sc);
1264 		break;
1265 	case SIOCGIFMEDIA:
1266 	case SIOCSIFMEDIA:
1267 #ifdef IF_RT_PHY_SUPPORT
1268 		mii = device_get_softc(sc->rt_miibus);
1269 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1270 #else
1271 		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1272 #endif /* IF_RT_PHY_SUPPORT */
1273 		break;
1274 	default:
1275 		error = ether_ioctl(ifp, cmd, data);
1276 		break;
1277 	}
1278 	return (error);
1279 }
1280 
1281 /*
1282  * rt_periodic - Handler of PERIODIC interrupt
1283  */
1284 static void
1285 rt_periodic(void *arg)
1286 {
1287 	struct rt_softc *sc;
1288 
1289 	sc = arg;
1290 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1291 	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1292 }
1293 
1294 /*
1295  * rt_tx_watchdog - Handler of TX Watchdog
1296  */
1297 static void
1298 rt_tx_watchdog(void *arg)
1299 {
1300 	struct rt_softc *sc;
1301 	struct ifnet *ifp;
1302 
1303 	sc = arg;
1304 	ifp = sc->ifp;
1305 
1306 	if (sc->tx_timer == 0)
1307 		return;
1308 
1309 	if (--sc->tx_timer == 0) {
1310 		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1311 #ifdef notyet
1312 		/*
1313 		 * XXX: Commented out, because reset break input.
1314 		 */
1315 		rt_stop_locked(sc);
1316 		rt_init_locked(sc);
1317 #endif
1318 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1319 		sc->tx_watchdog_timeouts++;
1320 	}
1321 	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1322 }
1323 
1324 /*
1325  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1326  */
1327 static void
1328 rt_cnt_ppe_af(struct rt_softc *sc)
1329 {
1330 
1331 	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1332 }
1333 
1334 /*
1335  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1336  */
1337 static void
1338 rt_cnt_gdm_af(struct rt_softc *sc)
1339 {
1340 
1341 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1342 	    "GDMA 1 & 2 Counter Table Almost Full\n");
1343 }
1344 
1345 /*
1346  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1347  */
1348 static void
1349 rt_pse_p2_fc(struct rt_softc *sc)
1350 {
1351 
1352 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1353 	    "PSE port2 (GDMA 2) flow control asserted.\n");
1354 }
1355 
1356 /*
1357  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1358  * interrupt
1359  */
1360 static void
1361 rt_gdm_crc_drop(struct rt_softc *sc)
1362 {
1363 
1364 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1365 	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1366 }
1367 
1368 /*
1369  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1370  */
1371 static void
1372 rt_pse_buf_drop(struct rt_softc *sc)
1373 {
1374 
1375 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1376 	    "PSE discards a packet due to buffer sharing limitation\n");
1377 }
1378 
1379 /*
1380  * rt_gdm_other_drop - Handler of discard on other reason interrupt
1381  */
1382 static void
1383 rt_gdm_other_drop(struct rt_softc *sc)
1384 {
1385 
1386 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1387 	    "GDMA 1 & 2 discard a packet due to other reason\n");
1388 }
1389 
1390 /*
1391  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1392  */
1393 static void
1394 rt_pse_p1_fc(struct rt_softc *sc)
1395 {
1396 
1397 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1398 	    "PSE port1 (GDMA 1) flow control asserted.\n");
1399 }
1400 
1401 /*
1402  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1403  */
1404 static void
1405 rt_pse_p0_fc(struct rt_softc *sc)
1406 {
1407 
1408 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1409 	    "PSE port0 (CDMA) flow control asserted.\n");
1410 }
1411 
1412 /*
1413  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1414  */
1415 static void
1416 rt_pse_fq_empty(struct rt_softc *sc)
1417 {
1418 
1419 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1420 	    "PSE free Q empty threshold reached & forced drop "
1421 		    "condition occurred.\n");
1422 }
1423 
1424 /*
1425  * rt_intr - main ISR
1426  */
1427 static void
1428 rt_intr(void *arg)
1429 {
1430 	struct rt_softc *sc;
1431 	struct ifnet *ifp;
1432 	uint32_t status;
1433 
1434 	sc = arg;
1435 	ifp = sc->ifp;
1436 
1437 	/* acknowledge interrupts */
1438 	status = RT_READ(sc, sc->fe_int_status);
1439 	RT_WRITE(sc, sc->fe_int_status, status);
1440 
1441 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1442 
1443 	if (status == 0xffffffff ||	/* device likely went away */
1444 		status == 0)		/* not for us */
1445 		return;
1446 
1447 	sc->interrupts++;
1448 
1449 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1450 		return;
1451 
1452 	if (status & CNT_PPE_AF)
1453 		rt_cnt_ppe_af(sc);
1454 
1455 	if (status & CNT_GDM_AF)
1456 		rt_cnt_gdm_af(sc);
1457 
1458 	if (status & PSE_P2_FC)
1459 		rt_pse_p2_fc(sc);
1460 
1461 	if (status & GDM_CRC_DROP)
1462 		rt_gdm_crc_drop(sc);
1463 
1464 	if (status & PSE_BUF_DROP)
1465 		rt_pse_buf_drop(sc);
1466 
1467 	if (status & GDM_OTHER_DROP)
1468 		rt_gdm_other_drop(sc);
1469 
1470 	if (status & PSE_P1_FC)
1471 		rt_pse_p1_fc(sc);
1472 
1473 	if (status & PSE_P0_FC)
1474 		rt_pse_p0_fc(sc);
1475 
1476 	if (status & PSE_FQ_EMPTY)
1477 		rt_pse_fq_empty(sc);
1478 
1479 	if (status & INT_TX_COHERENT)
1480 		rt_tx_coherent_intr(sc);
1481 
1482 	if (status & INT_RX_COHERENT)
1483 		rt_rx_coherent_intr(sc);
1484 
1485 	if (status & RX_DLY_INT)
1486 		rt_rx_delay_intr(sc);
1487 
1488 	if (status & TX_DLY_INT)
1489 		rt_tx_delay_intr(sc);
1490 
1491 	if (status & INT_RX_DONE)
1492 		rt_rx_intr(sc, 0);
1493 
1494 	if (status & INT_TXQ3_DONE)
1495 		rt_tx_intr(sc, 3);
1496 
1497 	if (status & INT_TXQ2_DONE)
1498 		rt_tx_intr(sc, 2);
1499 
1500 	if (status & INT_TXQ1_DONE)
1501 		rt_tx_intr(sc, 1);
1502 
1503 	if (status & INT_TXQ0_DONE)
1504 		rt_tx_intr(sc, 0);
1505 }
1506 
1507 /*
1508  * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1509  */
1510 static void
1511 rt_rt5350_intr(void *arg)
1512 {
1513 	struct rt_softc *sc;
1514 	struct ifnet *ifp;
1515 	uint32_t status;
1516 
1517 	sc = arg;
1518 	ifp = sc->ifp;
1519 
1520 	/* acknowledge interrupts */
1521 	status = RT_READ(sc, sc->fe_int_status);
1522 	RT_WRITE(sc, sc->fe_int_status, status);
1523 
1524 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1525 
1526 	if (status == 0xffffffff ||     /* device likely went away */
1527 		status == 0)            /* not for us */
1528 		return;
1529 
1530 	sc->interrupts++;
1531 
1532 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1533 	        return;
1534 
1535 	if (status & RT5350_INT_TX_COHERENT)
1536 		rt_tx_coherent_intr(sc);
1537 	if (status & RT5350_INT_RX_COHERENT)
1538 		rt_rx_coherent_intr(sc);
1539 	if (status & RT5350_RX_DLY_INT)
1540 	        rt_rx_delay_intr(sc);
1541 	if (status & RT5350_TX_DLY_INT)
1542 	        rt_tx_delay_intr(sc);
1543 	if (status & RT5350_INT_RXQ1_DONE)
1544 		rt_rx_intr(sc, 1);
1545 	if (status & RT5350_INT_RXQ0_DONE)
1546 		rt_rx_intr(sc, 0);
1547 	if (status & RT5350_INT_TXQ3_DONE)
1548 		rt_tx_intr(sc, 3);
1549 	if (status & RT5350_INT_TXQ2_DONE)
1550 		rt_tx_intr(sc, 2);
1551 	if (status & RT5350_INT_TXQ1_DONE)
1552 		rt_tx_intr(sc, 1);
1553 	if (status & RT5350_INT_TXQ0_DONE)
1554 		rt_tx_intr(sc, 0);
1555 }
1556 
1557 static void
1558 rt_tx_coherent_intr(struct rt_softc *sc)
1559 {
1560 	uint32_t tmp;
1561 	int i;
1562 
1563 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1564 
1565 	sc->tx_coherent_interrupts++;
1566 
1567 	/* restart DMA engine */
1568 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1569 	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1570 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1571 
1572 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1573 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1574 
1575 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1576 		RT_WRITE(sc, sc->tx_base_ptr[i],
1577 			sc->tx_ring[i].desc_phys_addr);
1578 		RT_WRITE(sc, sc->tx_max_cnt[i],
1579 			RT_SOFTC_TX_RING_DESC_COUNT);
1580 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1581 	}
1582 
1583 	rt_txrx_enable(sc);
1584 }
1585 
1586 /*
1587  * rt_rx_coherent_intr
1588  */
1589 static void
1590 rt_rx_coherent_intr(struct rt_softc *sc)
1591 {
1592 	uint32_t tmp;
1593 	int i;
1594 
1595 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1596 
1597 	sc->rx_coherent_interrupts++;
1598 
1599 	/* restart DMA engine */
1600 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1601 	tmp &= ~(FE_RX_DMA_EN);
1602 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1603 
1604 	/* init Rx ring */
1605 	for (i = 0; i < sc->rx_ring_count; i++)
1606 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1607 
1608 	for (i = 0; i < sc->rx_ring_count; i++) {
1609 		RT_WRITE(sc, sc->rx_base_ptr[i],
1610 			sc->rx_ring[i].desc_phys_addr);
1611 		RT_WRITE(sc, sc->rx_max_cnt[i],
1612 			RT_SOFTC_RX_RING_DATA_COUNT);
1613 		RT_WRITE(sc, sc->rx_calc_idx[i],
1614 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1615 	}
1616 
1617 	rt_txrx_enable(sc);
1618 }
1619 
1620 /*
1621  * rt_rx_intr - a packet received
1622  */
1623 static void
1624 rt_rx_intr(struct rt_softc *sc, int qid)
1625 {
1626 	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1627 		("%s: Rx interrupt: invalid qid=%d\n",
1628 		 device_get_nameunit(sc->dev), qid));
1629 
1630 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1631 	sc->rx_interrupts[qid]++;
1632 	RT_SOFTC_LOCK(sc);
1633 
1634 	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1635 		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1636 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1637 	}
1638 
1639 	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1640 	RT_SOFTC_UNLOCK(sc);
1641 }
1642 
1643 static void
1644 rt_rx_delay_intr(struct rt_softc *sc)
1645 {
1646 
1647 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1648 	sc->rx_delay_interrupts++;
1649 }
1650 
1651 static void
1652 rt_tx_delay_intr(struct rt_softc *sc)
1653 {
1654 
1655 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1656 	sc->tx_delay_interrupts++;
1657 }
1658 
1659 /*
1660  * rt_tx_intr - Transsmition of packet done
1661  */
1662 static void
1663 rt_tx_intr(struct rt_softc *sc, int qid)
1664 {
1665 
1666 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1667 		("%s: Tx interrupt: invalid qid=%d\n",
1668 		 device_get_nameunit(sc->dev), qid));
1669 
1670 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1671 
1672 	sc->tx_interrupts[qid]++;
1673 	RT_SOFTC_LOCK(sc);
1674 
1675 	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1676 		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1677 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1678 	}
1679 
1680 	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1681 	RT_SOFTC_UNLOCK(sc);
1682 }
1683 
1684 /*
1685  * rt_rx_done_task - run RX task
1686  */
1687 static void
1688 rt_rx_done_task(void *context, int pending)
1689 {
1690 	struct rt_softc *sc;
1691 	struct ifnet *ifp;
1692 	int again;
1693 
1694 	sc = context;
1695 	ifp = sc->ifp;
1696 
1697 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1698 
1699 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1700 		return;
1701 
1702 	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1703 
1704 	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1705 
1706 	RT_SOFTC_LOCK(sc);
1707 
1708 	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1709 		RT_DPRINTF(sc, RT_DEBUG_RX,
1710 		    "Rx done task: scheduling again\n");
1711 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1712 	} else {
1713 		rt_intr_enable(sc, sc->int_rx_done_mask);
1714 	}
1715 
1716 	RT_SOFTC_UNLOCK(sc);
1717 }
1718 
1719 /*
1720  * rt_tx_done_task - check for pending TX task in all queues
1721  */
1722 static void
1723 rt_tx_done_task(void *context, int pending)
1724 {
1725 	struct rt_softc *sc;
1726 	struct ifnet *ifp;
1727 	uint32_t intr_mask;
1728 	int i;
1729 
1730 	sc = context;
1731 	ifp = sc->ifp;
1732 
1733 	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1734 
1735 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1736 		return;
1737 
1738 	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1739 		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1740 			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1741 			rt_tx_eof(sc, &sc->tx_ring[i]);
1742 		}
1743 	}
1744 
1745 	sc->tx_timer = 0;
1746 
1747 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1748 
1749 	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1750 	   sc->rt_chipid == RT_CHIPID_MT7620 ||
1751 	   sc->rt_chipid == RT_CHIPID_MT7621)
1752 	  intr_mask = (
1753 		RT5350_INT_TXQ3_DONE |
1754 		RT5350_INT_TXQ2_DONE |
1755 		RT5350_INT_TXQ1_DONE |
1756 		RT5350_INT_TXQ0_DONE);
1757 	else
1758 	  intr_mask = (
1759 		INT_TXQ3_DONE |
1760 		INT_TXQ2_DONE |
1761 		INT_TXQ1_DONE |
1762 		INT_TXQ0_DONE);
1763 
1764 	RT_SOFTC_LOCK(sc);
1765 
1766 	rt_intr_enable(sc, ~sc->intr_pending_mask &
1767 	    (sc->intr_disable_mask & intr_mask));
1768 
1769 	if (sc->intr_pending_mask & intr_mask) {
1770 		RT_DPRINTF(sc, RT_DEBUG_TX,
1771 		    "Tx done task: scheduling again\n");
1772 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1773 	}
1774 
1775 	RT_SOFTC_UNLOCK(sc);
1776 
1777 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1778 		rt_start(ifp);
1779 }
1780 
1781 /*
1782  * rt_periodic_task - run periodic task
1783  */
1784 static void
1785 rt_periodic_task(void *context, int pending)
1786 {
1787 	struct rt_softc *sc;
1788 	struct ifnet *ifp;
1789 
1790 	sc = context;
1791 	ifp = sc->ifp;
1792 
1793 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1794 	    sc->periodic_round);
1795 
1796 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1797 		return;
1798 
1799 	RT_SOFTC_LOCK(sc);
1800 	sc->periodic_round++;
1801 	rt_update_stats(sc);
1802 
1803 	if ((sc->periodic_round % 10) == 0) {
1804 		rt_update_raw_counters(sc);
1805 		rt_watchdog(sc);
1806 	}
1807 
1808 	RT_SOFTC_UNLOCK(sc);
1809 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1810 }
1811 
1812 /*
1813  * rt_rx_eof - check for frames that done by DMA engine and pass it into
1814  * network subsystem.
1815  */
1816 static int
1817 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1818 {
1819 	struct ifnet *ifp;
1820 /*	struct rt_softc_rx_ring *ring; */
1821 	struct rt_rxdesc *desc;
1822 	struct rt_softc_rx_data *data;
1823 	struct mbuf *m, *mnew;
1824 	bus_dma_segment_t segs[1];
1825 	bus_dmamap_t dma_map;
1826 	uint32_t index, desc_flags;
1827 	int error, nsegs, len, nframes;
1828 
1829 	ifp = sc->ifp;
1830 /*	ring = &sc->rx_ring[0]; */
1831 
1832 	nframes = 0;
1833 
1834 	while (limit != 0) {
1835 		index = RT_READ(sc, sc->rx_drx_idx[0]);
1836 		if (ring->cur == index)
1837 			break;
1838 
1839 		desc = &ring->desc[ring->cur];
1840 		data = &ring->data[ring->cur];
1841 
1842 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1843 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1844 
1845 #ifdef IF_RT_DEBUG
1846 		if ( sc->debug & RT_DEBUG_RX ) {
1847 			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1848 		        hexdump(desc, 16, 0, 0);
1849 			printf("-----------------------------------\n");
1850 		}
1851 #endif
1852 
1853 		/* XXX Sometime device don`t set DDONE bit */
1854 #ifdef DDONE_FIXED
1855 		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1856 			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1857 			break;
1858 		}
1859 #endif
1860 
1861 		len = le16toh(desc->sdl0) & 0x3fff;
1862 		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1863 
1864 		nframes++;
1865 
1866 		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1867 		    MJUMPAGESIZE);
1868 		if (mnew == NULL) {
1869 			sc->rx_mbuf_alloc_errors++;
1870 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1871 			goto skip;
1872 		}
1873 
1874 		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1875 
1876 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1877 		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1878 		if (error != 0) {
1879 			RT_DPRINTF(sc, RT_DEBUG_RX,
1880 			    "could not load Rx mbuf DMA map: "
1881 			    "error=%d, nsegs=%d\n",
1882 			    error, nsegs);
1883 
1884 			m_freem(mnew);
1885 
1886 			sc->rx_mbuf_dmamap_errors++;
1887 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1888 
1889 			goto skip;
1890 		}
1891 
1892 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1893 			device_get_nameunit(sc->dev)));
1894 
1895 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1896 			BUS_DMASYNC_POSTREAD);
1897 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1898 
1899 		dma_map = data->dma_map;
1900 		data->dma_map = ring->spare_dma_map;
1901 		ring->spare_dma_map = dma_map;
1902 
1903 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1904 			BUS_DMASYNC_PREREAD);
1905 
1906 		m = data->m;
1907 		desc_flags = desc->word3;
1908 
1909 		data->m = mnew;
1910 		/* Add 2 for proper align of RX IP header */
1911 		desc->sdp0 = htole32(segs[0].ds_addr+2);
1912 		desc->sdl0 = htole32(segs[0].ds_len-2);
1913 		desc->word3 = 0;
1914 
1915 		RT_DPRINTF(sc, RT_DEBUG_RX,
1916 		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1917 
1918 		m->m_pkthdr.rcvif = ifp;
1919 		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1920 		m->m_data += 2;
1921 		m->m_pkthdr.len = m->m_len = len;
1922 
1923 		/* check for crc errors */
1924 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1925 			/*check for valid checksum*/
1926 			if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
1927 				RT_DPRINTF(sc, RT_DEBUG_RX,
1928 				    "rxdesc: crc error\n");
1929 
1930 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1931 
1932 				if (!(ifp->if_flags & IFF_PROMISC)) {
1933 				    m_freem(m);
1934 				    goto skip;
1935 				}
1936 			}
1937 			if ((desc_flags & sc->csum_fail_ip) == 0) {
1938 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1939 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1940 				m->m_pkthdr.csum_data = 0xffff;
1941 			}
1942 			m->m_flags &= ~M_HASFCS;
1943 		}
1944 
1945 		(*ifp->if_input)(ifp, m);
1946 skip:
1947 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1948 
1949 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1950 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1951 
1952 		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1953 
1954 		limit--;
1955 	}
1956 
1957 	if (ring->cur == 0)
1958 		RT_WRITE(sc, sc->rx_calc_idx[0],
1959 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1960 	else
1961 		RT_WRITE(sc, sc->rx_calc_idx[0],
1962 			ring->cur - 1);
1963 
1964 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1965 
1966 	sc->rx_packets += nframes;
1967 
1968 	return (limit == 0);
1969 }
1970 
1971 /*
1972  * rt_tx_eof - check for successful transmitted frames and mark their
1973  * descriptor as free.
1974  */
1975 static void
1976 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1977 {
1978 	struct ifnet *ifp;
1979 	struct rt_txdesc *desc;
1980 	struct rt_softc_tx_data *data;
1981 	uint32_t index;
1982 	int ndescs, nframes;
1983 
1984 	ifp = sc->ifp;
1985 
1986 	ndescs = 0;
1987 	nframes = 0;
1988 
1989 	for (;;) {
1990 		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
1991 		if (ring->desc_next == index)
1992 			break;
1993 
1994 		ndescs++;
1995 
1996 		desc = &ring->desc[ring->desc_next];
1997 
1998 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1999 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2000 
2001 		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
2002 			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
2003 			nframes++;
2004 
2005 			data = &ring->data[ring->data_next];
2006 
2007 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2008 				BUS_DMASYNC_POSTWRITE);
2009 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2010 
2011 			m_freem(data->m);
2012 
2013 			data->m = NULL;
2014 
2015 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2016 
2017 			RT_SOFTC_TX_RING_LOCK(ring);
2018 			ring->data_queued--;
2019 			ring->data_next = (ring->data_next + 1) %
2020 			    RT_SOFTC_TX_RING_DATA_COUNT;
2021 			RT_SOFTC_TX_RING_UNLOCK(ring);
2022 		}
2023 
2024 		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2025 
2026 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2027 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2028 
2029 		RT_SOFTC_TX_RING_LOCK(ring);
2030 		ring->desc_queued--;
2031 		ring->desc_next = (ring->desc_next + 1) %
2032 		    RT_SOFTC_TX_RING_DESC_COUNT;
2033 		RT_SOFTC_TX_RING_UNLOCK(ring);
2034 	}
2035 
2036 	RT_DPRINTF(sc, RT_DEBUG_TX,
2037 	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2038 	    nframes);
2039 }
2040 
2041 /*
2042  * rt_update_stats - query statistics counters and update related variables.
2043  */
2044 static void
2045 rt_update_stats(struct rt_softc *sc)
2046 {
2047 	struct ifnet *ifp;
2048 
2049 	ifp = sc->ifp;
2050 	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2051 	/* XXX do update stats here */
2052 }
2053 
2054 /*
2055  * rt_watchdog - reinit device on watchdog event.
2056  */
2057 static void
2058 rt_watchdog(struct rt_softc *sc)
2059 {
2060 	uint32_t tmp;
2061 #ifdef notyet
2062 	int ntries;
2063 #endif
2064 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2065 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
2066 	   sc->rt_chipid != RT_CHIPID_MT7621) {
2067 		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2068 
2069 		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2070 			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2071 	}
2072 	/* XXX: do not reset */
2073 #ifdef notyet
2074 	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2075 		sc->tx_queue_not_empty[0]++;
2076 
2077 		for (ntries = 0; ntries < 10; ntries++) {
2078 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2079 			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2080 				break;
2081 
2082 			DELAY(1);
2083 		}
2084 	}
2085 
2086 	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2087 		sc->tx_queue_not_empty[1]++;
2088 
2089 		for (ntries = 0; ntries < 10; ntries++) {
2090 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2091 			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2092 				break;
2093 
2094 			DELAY(1);
2095 		}
2096 	}
2097 #endif
2098 }
2099 
2100 /*
2101  * rt_update_raw_counters - update counters.
2102  */
2103 static void
2104 rt_update_raw_counters(struct rt_softc *sc)
2105 {
2106 
2107 	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2108 	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2109 	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2110 	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2111 
2112 	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2113 	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2114 	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2115 	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2116 	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2117 	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2118 	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2119 }
2120 
2121 static void
2122 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2123 {
2124 	uint32_t tmp;
2125 
2126 	sc->intr_disable_mask &= ~intr_mask;
2127 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2128 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2129 }
2130 
2131 static void
2132 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2133 {
2134 	uint32_t tmp;
2135 
2136 	sc->intr_disable_mask |= intr_mask;
2137 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2138 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2139 }
2140 
2141 /*
2142  * rt_txrx_enable - enable TX/RX DMA
2143  */
2144 static int
2145 rt_txrx_enable(struct rt_softc *sc)
2146 {
2147 	struct ifnet *ifp;
2148 	uint32_t tmp;
2149 	int ntries;
2150 
2151 	ifp = sc->ifp;
2152 
2153 	/* enable Tx/Rx DMA engine */
2154 	for (ntries = 0; ntries < 200; ntries++) {
2155 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2156 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2157 			break;
2158 
2159 		DELAY(1000);
2160 	}
2161 
2162 	if (ntries == 200) {
2163 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2164 		return (-1);
2165 	}
2166 
2167 	DELAY(50);
2168 
2169 	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2170 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2171 
2172 	/* XXX set Rx filter */
2173 	return (0);
2174 }
2175 
2176 /*
2177  * rt_alloc_rx_ring - allocate RX DMA ring buffer
2178  */
2179 static int
2180 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2181 {
2182 	struct rt_rxdesc *desc;
2183 	struct rt_softc_rx_data *data;
2184 	bus_dma_segment_t segs[1];
2185 	int i, nsegs, error;
2186 
2187 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2188 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2189 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2190 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2191 		0, NULL, NULL, &ring->desc_dma_tag);
2192 	if (error != 0)	{
2193 		device_printf(sc->dev,
2194 		    "could not create Rx desc DMA tag\n");
2195 		goto fail;
2196 	}
2197 
2198 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2199 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2200 	if (error != 0) {
2201 		device_printf(sc->dev,
2202 		    "could not allocate Rx desc DMA memory\n");
2203 		goto fail;
2204 	}
2205 
2206 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2207 		ring->desc,
2208 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2209 		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2210 	if (error != 0) {
2211 		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2212 		goto fail;
2213 	}
2214 
2215 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2216 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2217 		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2218 		&ring->data_dma_tag);
2219 	if (error != 0)	{
2220 		device_printf(sc->dev,
2221 		    "could not create Rx data DMA tag\n");
2222 		goto fail;
2223 	}
2224 
2225 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2226 		desc = &ring->desc[i];
2227 		data = &ring->data[i];
2228 
2229 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2230 		    &data->dma_map);
2231 		if (error != 0)	{
2232 			device_printf(sc->dev, "could not create Rx data DMA "
2233 			    "map\n");
2234 			goto fail;
2235 		}
2236 
2237 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2238 		    MJUMPAGESIZE);
2239 		if (data->m == NULL) {
2240 			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2241 			error = ENOMEM;
2242 			goto fail;
2243 		}
2244 
2245 		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2246 
2247 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2248 		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2249 		if (error != 0)	{
2250 			device_printf(sc->dev,
2251 			    "could not load Rx mbuf DMA map\n");
2252 			goto fail;
2253 		}
2254 
2255 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2256 			device_get_nameunit(sc->dev)));
2257 
2258 		/* Add 2 for proper align of RX IP header */
2259 		desc->sdp0 = htole32(segs[0].ds_addr+2);
2260 		desc->sdl0 = htole32(segs[0].ds_len-2);
2261 	}
2262 
2263 	error = bus_dmamap_create(ring->data_dma_tag, 0,
2264 	    &ring->spare_dma_map);
2265 	if (error != 0) {
2266 		device_printf(sc->dev,
2267 		    "could not create Rx spare DMA map\n");
2268 		goto fail;
2269 	}
2270 
2271 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2272 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2273 	ring->qid = qid;
2274 	return (0);
2275 
2276 fail:
2277 	rt_free_rx_ring(sc, ring);
2278 	return (error);
2279 }
2280 
2281 /*
2282  * rt_reset_rx_ring - reset RX ring buffer
2283  */
2284 static void
2285 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2286 {
2287 	struct rt_rxdesc *desc;
2288 	int i;
2289 
2290 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2291 		desc = &ring->desc[i];
2292 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2293 	}
2294 
2295 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2296 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2297 	ring->cur = 0;
2298 }
2299 
2300 /*
2301  * rt_free_rx_ring - free memory used by RX ring buffer
2302  */
2303 static void
2304 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2305 {
2306 	struct rt_softc_rx_data *data;
2307 	int i;
2308 
2309 	if (ring->desc != NULL) {
2310 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2311 			BUS_DMASYNC_POSTWRITE);
2312 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2313 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2314 			ring->desc_dma_map);
2315 	}
2316 
2317 	if (ring->desc_dma_tag != NULL)
2318 		bus_dma_tag_destroy(ring->desc_dma_tag);
2319 
2320 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2321 		data = &ring->data[i];
2322 
2323 		if (data->m != NULL) {
2324 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2325 				BUS_DMASYNC_POSTREAD);
2326 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2327 			m_freem(data->m);
2328 		}
2329 
2330 		if (data->dma_map != NULL)
2331 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2332 	}
2333 
2334 	if (ring->spare_dma_map != NULL)
2335 		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2336 
2337 	if (ring->data_dma_tag != NULL)
2338 		bus_dma_tag_destroy(ring->data_dma_tag);
2339 }
2340 
2341 /*
2342  * rt_alloc_tx_ring - allocate TX ring buffer
2343  */
2344 static int
2345 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2346 {
2347 	struct rt_softc_tx_data *data;
2348 	int error, i;
2349 
2350 	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2351 
2352 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2353 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2354 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2355 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2356 		0, NULL, NULL, &ring->desc_dma_tag);
2357 	if (error != 0) {
2358 		device_printf(sc->dev,
2359 		    "could not create Tx desc DMA tag\n");
2360 		goto fail;
2361 	}
2362 
2363 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2364 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2365 	if (error != 0)	{
2366 		device_printf(sc->dev,
2367 		    "could not allocate Tx desc DMA memory\n");
2368 		goto fail;
2369 	}
2370 
2371 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2372 	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2373 	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2374 	    &ring->desc_phys_addr, 0);
2375 	if (error != 0) {
2376 		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2377 		goto fail;
2378 	}
2379 
2380 	ring->desc_queued = 0;
2381 	ring->desc_cur = 0;
2382 	ring->desc_next = 0;
2383 
2384 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2385 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2386 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2387 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2388 	    0, NULL, NULL, &ring->seg0_dma_tag);
2389 	if (error != 0) {
2390 		device_printf(sc->dev,
2391 		    "could not create Tx seg0 DMA tag\n");
2392 		goto fail;
2393 	}
2394 
2395 	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2396 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2397 	if (error != 0) {
2398 		device_printf(sc->dev,
2399 		    "could not allocate Tx seg0 DMA memory\n");
2400 		goto fail;
2401 	}
2402 
2403 	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2404 	    ring->seg0,
2405 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2406 	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2407 	if (error != 0) {
2408 		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2409 		goto fail;
2410 	}
2411 
2412 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2413 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2414 	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2415 	    &ring->data_dma_tag);
2416 	if (error != 0) {
2417 		device_printf(sc->dev,
2418 		    "could not create Tx data DMA tag\n");
2419 		goto fail;
2420 	}
2421 
2422 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2423 		data = &ring->data[i];
2424 
2425 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2426 		    &data->dma_map);
2427 		if (error != 0) {
2428 			device_printf(sc->dev, "could not create Tx data DMA "
2429 			    "map\n");
2430 			goto fail;
2431 		}
2432 	}
2433 
2434 	ring->data_queued = 0;
2435 	ring->data_cur = 0;
2436 	ring->data_next = 0;
2437 
2438 	ring->qid = qid;
2439 	return (0);
2440 
2441 fail:
2442 	rt_free_tx_ring(sc, ring);
2443 	return (error);
2444 }
2445 
2446 /*
2447  * rt_reset_tx_ring - reset TX ring buffer to empty state
2448  */
2449 static void
2450 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2451 {
2452 	struct rt_softc_tx_data *data;
2453 	struct rt_txdesc *desc;
2454 	int i;
2455 
2456 	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2457 		desc = &ring->desc[i];
2458 
2459 		desc->sdl0 = 0;
2460 		desc->sdl1 = 0;
2461 	}
2462 
2463 	ring->desc_queued = 0;
2464 	ring->desc_cur = 0;
2465 	ring->desc_next = 0;
2466 
2467 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2468 		BUS_DMASYNC_PREWRITE);
2469 
2470 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2471 		BUS_DMASYNC_PREWRITE);
2472 
2473 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2474 		data = &ring->data[i];
2475 
2476 		if (data->m != NULL) {
2477 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2478 				BUS_DMASYNC_POSTWRITE);
2479 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2480 			m_freem(data->m);
2481 			data->m = NULL;
2482 		}
2483 	}
2484 
2485 	ring->data_queued = 0;
2486 	ring->data_cur = 0;
2487 	ring->data_next = 0;
2488 }
2489 
2490 /*
2491  * rt_free_tx_ring - free RX ring buffer
2492  */
2493 static void
2494 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2495 {
2496 	struct rt_softc_tx_data *data;
2497 	int i;
2498 
2499 	if (ring->desc != NULL) {
2500 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2501 			BUS_DMASYNC_POSTWRITE);
2502 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2503 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2504 			ring->desc_dma_map);
2505 	}
2506 
2507 	if (ring->desc_dma_tag != NULL)
2508 		bus_dma_tag_destroy(ring->desc_dma_tag);
2509 
2510 	if (ring->seg0 != NULL) {
2511 		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2512 			BUS_DMASYNC_POSTWRITE);
2513 		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2514 		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2515 			ring->seg0_dma_map);
2516 	}
2517 
2518 	if (ring->seg0_dma_tag != NULL)
2519 		bus_dma_tag_destroy(ring->seg0_dma_tag);
2520 
2521 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2522 		data = &ring->data[i];
2523 
2524 		if (data->m != NULL) {
2525 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2526 				BUS_DMASYNC_POSTWRITE);
2527 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2528 			m_freem(data->m);
2529 		}
2530 
2531 		if (data->dma_map != NULL)
2532 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2533 	}
2534 
2535 	if (ring->data_dma_tag != NULL)
2536 		bus_dma_tag_destroy(ring->data_dma_tag);
2537 
2538 	mtx_destroy(&ring->lock);
2539 }
2540 
2541 /*
2542  * rt_dma_map_addr - get address of busdma segment
2543  */
2544 static void
2545 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2546 {
2547 	if (error != 0)
2548 		return;
2549 
2550 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2551 
2552 	*(bus_addr_t *) arg = segs[0].ds_addr;
2553 }
2554 
2555 /*
2556  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2557  */
2558 static void
2559 rt_sysctl_attach(struct rt_softc *sc)
2560 {
2561 	struct sysctl_ctx_list *ctx;
2562 	struct sysctl_oid *tree;
2563 	struct sysctl_oid *stats;
2564 
2565 	ctx = device_get_sysctl_ctx(sc->dev);
2566 	tree = device_get_sysctl_tree(sc->dev);
2567 
2568 	/* statistic counters */
2569 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2570 	    "stats", CTLFLAG_RD, 0, "statistic");
2571 
2572 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2573 	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2574 	    "all interrupts");
2575 
2576 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2577 	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2578 	    "Tx coherent interrupts");
2579 
2580 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2581 	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2582 	    "Rx coherent interrupts");
2583 
2584 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2585 	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2586 	    "Rx interrupts");
2587 
2588 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2589 	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2590 	    "Rx delay interrupts");
2591 
2592 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2593 	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2594 	    "Tx AC3 interrupts");
2595 
2596 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2597 	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2598 	    "Tx AC2 interrupts");
2599 
2600 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2601 	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2602 	    "Tx AC1 interrupts");
2603 
2604 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2605 	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2606 	    "Tx AC0 interrupts");
2607 
2608 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2609 	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2610 	    "Tx delay interrupts");
2611 
2612 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2613 	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2614 	    0, "Tx AC3 descriptors queued");
2615 
2616 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2617 	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2618 	    0, "Tx AC3 data queued");
2619 
2620 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2621 	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2622 	    0, "Tx AC2 descriptors queued");
2623 
2624 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2625 	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2626 	    0, "Tx AC2 data queued");
2627 
2628 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2629 	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2630 	    0, "Tx AC1 descriptors queued");
2631 
2632 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2633 	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2634 	    0, "Tx AC1 data queued");
2635 
2636 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2637 	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2638 	    0, "Tx AC0 descriptors queued");
2639 
2640 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2641 	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2642 	    0, "Tx AC0 data queued");
2643 
2644 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2645 	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2646 	    "Tx AC3 data queue full");
2647 
2648 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2649 	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2650 	    "Tx AC2 data queue full");
2651 
2652 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2653 	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2654 	    "Tx AC1 data queue full");
2655 
2656 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2657 	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2658 	    "Tx AC0 data queue full");
2659 
2660 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2661 	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2662 	    "Tx watchdog timeouts");
2663 
2664 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2665 	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2666 	    "Tx defragmented packets");
2667 
2668 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2669 	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2670 	    "no Tx descriptors available");
2671 
2672 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2673 	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2674 	    "Rx mbuf allocation errors");
2675 
2676 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2677 	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2678 	    "Rx mbuf DMA mapping errors");
2679 
2680 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2681 	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2682 	    "Tx queue 0 not empty");
2683 
2684 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2685 	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2686 	    "Tx queue 1 not empty");
2687 
2688 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2689 	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2690 	    "Rx packets");
2691 
2692 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2693 	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2694 	    "Rx CRC errors");
2695 
2696 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2697 	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2698 	    "Rx PHY errors");
2699 
2700 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2701 	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2702 	    "Rx duplicate packets");
2703 
2704 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2705 	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2706 	    "Rx FIFO overflows");
2707 
2708 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2709 	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2710 	    "Rx bytes");
2711 
2712 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2713 	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2714 	    "Rx too long frame errors");
2715 
2716 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2717 	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2718 	    "Rx too short frame errors");
2719 
2720 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2721 	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2722 	    "Tx bytes");
2723 
2724 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2725 	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2726 	    "Tx packets");
2727 
2728 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2729 	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2730 	    "Tx skip count for GDMA ports");
2731 
2732 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2733 	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2734 	    "Tx collision count for GDMA ports");
2735 }
2736 
2737 #ifdef IF_RT_PHY_SUPPORT
2738 static int
2739 rt_miibus_readreg(device_t dev, int phy, int reg)
2740 {
2741 	struct rt_softc *sc = device_get_softc(dev);
2742 
2743 	/*
2744 	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2745 	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2746 	 */
2747 	if (phy == 31) {
2748 		/* Fake PHY ID for bfeswitch attach */
2749 		switch (reg) {
2750 		case MII_BMSR:
2751 			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2752 		case MII_PHYIDR1:
2753 			return (0x40);		/* As result of faking */
2754 		case MII_PHYIDR2:		/* PHY will detect as */
2755 			return (0x6250);		/* bfeswitch */
2756 		}
2757 	}
2758 
2759 	/* Wait prev command done if any */
2760 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2761 	RT_WRITE(sc, MDIO_ACCESS,
2762 	    MDIO_CMD_ONGO ||
2763 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2764 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2765 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2766 
2767 	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2768 }
2769 
2770 static int
2771 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2772 {
2773 	struct rt_softc *sc = device_get_softc(dev);
2774 
2775 	/* Wait prev command done if any */
2776 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2777 	RT_WRITE(sc, MDIO_ACCESS,
2778 	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2779 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2780 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2781 	    (val & MDIO_PHY_DATA_MASK));
2782 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2783 
2784 	return (0);
2785 }
2786 
2787 void
2788 rt_miibus_statchg(device_t dev)
2789 {
2790 	struct rt_softc *sc = device_get_softc(dev);
2791 	struct mii_data *mii;
2792 
2793 	mii = device_get_softc(sc->rt_miibus);
2794 
2795 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2796 	    (IFM_ACTIVE | IFM_AVALID)) {
2797 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2798 		case IFM_10_T:
2799 		case IFM_100_TX:
2800 			/* XXX check link here */
2801 			sc->flags |= 1;
2802 			break;
2803 		default:
2804 			break;
2805 		}
2806 	}
2807 }
2808 #endif /* IF_RT_PHY_SUPPORT */
2809 
2810 static device_method_t rt_dev_methods[] =
2811 {
2812 	DEVMETHOD(device_probe, rt_probe),
2813 	DEVMETHOD(device_attach, rt_attach),
2814 	DEVMETHOD(device_detach, rt_detach),
2815 	DEVMETHOD(device_shutdown, rt_shutdown),
2816 	DEVMETHOD(device_suspend, rt_suspend),
2817 	DEVMETHOD(device_resume, rt_resume),
2818 
2819 #ifdef IF_RT_PHY_SUPPORT
2820 	/* MII interface */
2821 	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2822 	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2823 	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2824 #endif
2825 
2826 	DEVMETHOD_END
2827 };
2828 
2829 static driver_t rt_driver =
2830 {
2831 	"rt",
2832 	rt_dev_methods,
2833 	sizeof(struct rt_softc)
2834 };
2835 
2836 static devclass_t rt_dev_class;
2837 
2838 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2839 #ifdef FDT
2840 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2841 #endif
2842 
2843 MODULE_DEPEND(rt, ether, 1, 1, 1);
2844 MODULE_DEPEND(rt, miibus, 1, 1, 1);
2845 
2846