xref: /freebsd-13.1/sys/dev/rt/if_rt.c (revision bc683a89)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2015-2016, Stanislav Galabov
5  * Copyright (c) 2014, Aleksandr A. Mityaev
6  * Copyright (c) 2011, Aleksandr Rybalko
7  * based on hard work
8  * by Alexander Egorenkov <[email protected]>
9  * and by Damien Bergamini <[email protected]>
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice unmodified, this list of conditions, and the following
17  *    disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "if_rtvar.h"
39 #include "if_rtreg.h"
40 
41 #include <sys/kenv.h>
42 
43 #include <net/if.h>
44 #include <net/if_var.h>
45 #include <net/if_arp.h>
46 #include <net/ethernet.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
51 
52 #include <net/bpf.h>
53 
54 #include <machine/bus.h>
55 #include <machine/cache.h>
56 #include <machine/cpufunc.h>
57 #include <machine/resource.h>
58 #include <vm/vm_param.h>
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <machine/pmap.h>
62 #include <sys/bus.h>
63 #include <sys/rman.h>
64 
65 #include "opt_platform.h"
66 #include "opt_rt305x.h"
67 
68 #ifdef FDT
69 #include <dev/ofw/openfirm.h>
70 #include <dev/ofw/ofw_bus.h>
71 #include <dev/ofw/ofw_bus_subr.h>
72 #endif
73 
74 #include <dev/mii/mii.h>
75 #include <dev/mii/miivar.h>
76 
77 #ifdef RT_MDIO
78 #include <dev/mdio/mdio.h>
79 #include <dev/etherswitch/miiproxy.h>
80 #include "mdio_if.h"
81 #endif
82 
83 #if 0
84 #include <mips/rt305x/rt305x_sysctlvar.h>
85 #include <mips/rt305x/rt305xreg.h>
86 #endif
87 
88 #ifdef IF_RT_PHY_SUPPORT
89 #include "miibus_if.h"
90 #endif
91 
92 /*
93  * Defines and macros
94  */
95 #define	RT_MAX_AGG_SIZE			3840
96 
97 #define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
98 
99 #define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
100 #define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
101 
102 #define	RT_TX_WATCHDOG_TIMEOUT		5
103 
104 #define RT_CHIPID_RT2880 0x2880
105 #define RT_CHIPID_RT3050 0x3050
106 #define RT_CHIPID_RT3883 0x3883
107 #define RT_CHIPID_RT5350 0x5350
108 #define RT_CHIPID_MT7620 0x7620
109 #define RT_CHIPID_MT7621 0x7621
110 
111 #ifdef FDT
112 /* more specific and new models should go first */
113 static const struct ofw_compat_data rt_compat_data[] = {
114 	{ "ralink,rt2880-eth",		RT_CHIPID_RT2880 },
115 	{ "ralink,rt3050-eth",		RT_CHIPID_RT3050 },
116 	{ "ralink,rt3352-eth",		RT_CHIPID_RT3050 },
117 	{ "ralink,rt3883-eth",		RT_CHIPID_RT3883 },
118 	{ "ralink,rt5350-eth",		RT_CHIPID_RT5350 },
119 	{ "ralink,mt7620a-eth",		RT_CHIPID_MT7620 },
120 	{ "mediatek,mt7620-eth",	RT_CHIPID_MT7620 },
121 	{ "ralink,mt7621-eth",		RT_CHIPID_MT7621 },
122 	{ "mediatek,mt7621-eth",	RT_CHIPID_MT7621 },
123 	{ NULL,				0 }
124 };
125 #endif
126 
127 /*
128  * Static function prototypes
129  */
130 static int	rt_probe(device_t dev);
131 static int	rt_attach(device_t dev);
132 static int	rt_detach(device_t dev);
133 static int	rt_shutdown(device_t dev);
134 static int	rt_suspend(device_t dev);
135 static int	rt_resume(device_t dev);
136 static void	rt_init_locked(void *priv);
137 static void	rt_init(void *priv);
138 static void	rt_stop_locked(void *priv);
139 static void	rt_stop(void *priv);
140 static void	rt_start(struct ifnet *ifp);
141 static int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
142 static void	rt_periodic(void *arg);
143 static void	rt_tx_watchdog(void *arg);
144 static void	rt_intr(void *arg);
145 static void	rt_rt5350_intr(void *arg);
146 static void	rt_tx_coherent_intr(struct rt_softc *sc);
147 static void	rt_rx_coherent_intr(struct rt_softc *sc);
148 static void	rt_rx_delay_intr(struct rt_softc *sc);
149 static void	rt_tx_delay_intr(struct rt_softc *sc);
150 static void	rt_rx_intr(struct rt_softc *sc, int qid);
151 static void	rt_tx_intr(struct rt_softc *sc, int qid);
152 static void	rt_rx_done_task(void *context, int pending);
153 static void	rt_tx_done_task(void *context, int pending);
154 static void	rt_periodic_task(void *context, int pending);
155 static int	rt_rx_eof(struct rt_softc *sc,
156 		    struct rt_softc_rx_ring *ring, int limit);
157 static void	rt_tx_eof(struct rt_softc *sc,
158 		    struct rt_softc_tx_ring *ring);
159 static void	rt_update_stats(struct rt_softc *sc);
160 static void	rt_watchdog(struct rt_softc *sc);
161 static void	rt_update_raw_counters(struct rt_softc *sc);
162 static void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
163 static void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
164 static int	rt_txrx_enable(struct rt_softc *sc);
165 static int	rt_alloc_rx_ring(struct rt_softc *sc,
166 		    struct rt_softc_rx_ring *ring, int qid);
167 static void	rt_reset_rx_ring(struct rt_softc *sc,
168 		    struct rt_softc_rx_ring *ring);
169 static void	rt_free_rx_ring(struct rt_softc *sc,
170 		    struct rt_softc_rx_ring *ring);
171 static int	rt_alloc_tx_ring(struct rt_softc *sc,
172 		    struct rt_softc_tx_ring *ring, int qid);
173 static void	rt_reset_tx_ring(struct rt_softc *sc,
174 		    struct rt_softc_tx_ring *ring);
175 static void	rt_free_tx_ring(struct rt_softc *sc,
176 		    struct rt_softc_tx_ring *ring);
177 static void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
178 		    int nseg, int error);
179 static void	rt_sysctl_attach(struct rt_softc *sc);
180 #ifdef IF_RT_PHY_SUPPORT
181 void		rt_miibus_statchg(device_t);
182 #endif
183 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
184 static int	rt_miibus_readreg(device_t, int, int);
185 static int	rt_miibus_writereg(device_t, int, int, int);
186 #endif
187 static int	rt_ifmedia_upd(struct ifnet *);
188 static void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
189 
190 static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
191     "RT driver parameters");
192 #ifdef IF_RT_DEBUG
193 static int rt_debug = 0;
194 SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
195     "RT debug level");
196 #endif
197 
198 static int
rt_probe(device_t dev)199 rt_probe(device_t dev)
200 {
201 	struct rt_softc *sc = device_get_softc(dev);
202 	char buf[80];
203 #ifdef FDT
204 	const struct ofw_compat_data * cd;
205 
206 	cd = ofw_bus_search_compatible(dev, rt_compat_data);
207 	if (cd->ocd_data == 0)
208 	        return (ENXIO);
209 
210 	sc->rt_chipid = (unsigned int)(cd->ocd_data);
211 #else
212 #if defined(MT7620)
213 	sc->rt_chipid = RT_CHIPID_MT7620;
214 #elif defined(MT7621)
215 	sc->rt_chipid = RT_CHIPID_MT7621;
216 #elif defined(RT5350)
217 	sc->rt_chipid = RT_CHIPID_RT5350;
218 #else
219 	sc->rt_chipid = RT_CHIPID_RT3050;
220 #endif
221 #endif
222 	snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver",
223 		sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid);
224 	device_set_desc_copy(dev, buf);
225 	return (BUS_PROBE_GENERIC);
226 }
227 
228 /*
229  * macaddr_atoi - translate string MAC address to uint8_t array
230  */
231 static int
macaddr_atoi(const char * str,uint8_t * mac)232 macaddr_atoi(const char *str, uint8_t *mac)
233 {
234 	int count, i;
235 	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
236 
237 	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
238 	    &amac[0], &amac[1], &amac[2],
239 	    &amac[3], &amac[4], &amac[5]);
240 	if (count < ETHER_ADDR_LEN) {
241 		memset(mac, 0, ETHER_ADDR_LEN);
242 		return (1);
243 	}
244 
245 	/* Copy aligned to result */
246 	for (i = 0; i < ETHER_ADDR_LEN; i ++)
247 		mac[i] = (amac[i] & 0xff);
248 
249 	return (0);
250 }
251 
252 #ifdef USE_GENERATED_MAC_ADDRESS
253 /*
254  * generate_mac(uin8_t *mac)
255  * This is MAC address generator for cases when real device MAC address
256  * unknown or not yet accessible.
257  * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
258  * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
259  *
260  * Output - MAC address, that do not change between reboots, if hints or
261  * bootloader info unchange.
262  */
263 static void
generate_mac(uint8_t * mac)264 generate_mac(uint8_t *mac)
265 {
266 	unsigned char *cp;
267 	int i = 0;
268 	uint32_t crc = 0xffffffff;
269 
270 	/* Generate CRC32 on kenv */
271 	for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
272 		crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
273 	}
274 	crc = ~crc;
275 
276 	mac[0] = 'b';
277 	mac[1] = 's';
278 	mac[2] = 'd';
279 	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
280 	mac[4] = (crc >> 8) & 0xff;
281 	mac[5] = crc & 0xff;
282 }
283 #endif
284 
285 /*
286  * ether_request_mac - try to find usable MAC address.
287  */
288 static int
ether_request_mac(device_t dev,uint8_t * mac)289 ether_request_mac(device_t dev, uint8_t *mac)
290 {
291 	char *var;
292 
293 	/*
294 	 * "ethaddr" is passed via envp on RedBoot platforms
295 	 * "kmac" is passed via argv on RouterBOOT platforms
296 	 */
297 #if defined(RT305X_UBOOT) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
298 	if ((var = kern_getenv("ethaddr")) != NULL ||
299 	    (var = kern_getenv("kmac")) != NULL ) {
300 		if(!macaddr_atoi(var, mac)) {
301 			printf("%s: use %s macaddr from KENV\n",
302 			    device_get_nameunit(dev), var);
303 			freeenv(var);
304 			return (0);
305 		}
306 		freeenv(var);
307 	}
308 #endif
309 
310 	/*
311 	 * Try from hints
312 	 * hint.[dev].[unit].macaddr
313 	 */
314 	if (!resource_string_value(device_get_name(dev),
315 	    device_get_unit(dev), "macaddr", (const char **)&var)) {
316 		if(!macaddr_atoi(var, mac)) {
317 			printf("%s: use %s macaddr from hints\n",
318 			    device_get_nameunit(dev), var);
319 			return (0);
320 		}
321 	}
322 
323 #ifdef USE_GENERATED_MAC_ADDRESS
324 	generate_mac(mac);
325 
326 	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
327 	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
328 #else
329 	/* Hardcoded */
330 	mac[0] = 0x00;
331 	mac[1] = 0x18;
332 	mac[2] = 0xe7;
333 	mac[3] = 0xd5;
334 	mac[4] = 0x83;
335 	mac[5] = 0x90;
336 
337 	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
338 #endif
339 
340 	return (0);
341 }
342 
343 /*
344  * Reset hardware
345  */
346 static void
reset_freng(struct rt_softc * sc)347 reset_freng(struct rt_softc *sc)
348 {
349 	/* XXX hard reset kills everything so skip it ... */
350 	return;
351 }
352 
353 static int
rt_attach(device_t dev)354 rt_attach(device_t dev)
355 {
356 	struct rt_softc *sc;
357 	struct ifnet *ifp;
358 	int error, i;
359 #ifdef FDT
360 	phandle_t node;
361 	char fdtval[32];
362 #endif
363 
364 	sc = device_get_softc(dev);
365 	sc->dev = dev;
366 
367 #ifdef FDT
368 	node = ofw_bus_get_node(sc->dev);
369 #endif
370 
371 	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
372 	    MTX_DEF | MTX_RECURSE);
373 
374 	sc->mem_rid = 0;
375 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
376 	    RF_ACTIVE | RF_SHAREABLE);
377 	if (sc->mem == NULL) {
378 		device_printf(dev, "could not allocate memory resource\n");
379 		error = ENXIO;
380 		goto fail;
381 	}
382 
383 	sc->bst = rman_get_bustag(sc->mem);
384 	sc->bsh = rman_get_bushandle(sc->mem);
385 
386 	sc->irq_rid = 0;
387 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
388 	    RF_ACTIVE);
389 	if (sc->irq == NULL) {
390 		device_printf(dev,
391 		    "could not allocate interrupt resource\n");
392 		error = ENXIO;
393 		goto fail;
394 	}
395 
396 #ifdef IF_RT_DEBUG
397 	sc->debug = rt_debug;
398 
399 	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
400 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
401 		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
402 #endif
403 
404 	/* Reset hardware */
405 	reset_freng(sc);
406 
407 	if (sc->rt_chipid == RT_CHIPID_MT7620) {
408 		sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL;
409 		sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL;
410 	} else if (sc->rt_chipid == RT_CHIPID_MT7621) {
411 		sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL;
412 		sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL;
413 	} else {
414 		sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL;
415 		sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL;
416 	}
417 
418 	/* Fill in soc-specific registers map */
419 	switch(sc->rt_chipid) {
420 	  case RT_CHIPID_MT7620:
421 	  case RT_CHIPID_MT7621:
422 		sc->gdma1_base = MT7620_GDMA1_BASE;
423 		/* fallthrough */
424 	  case RT_CHIPID_RT5350:
425 	  	device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n",
426 			sc->rt_chipid >= 0x7600 ? 'M' : 'R',
427 	  		sc->rt_chipid, sc->mac_rev);
428 		/* RT5350: No GDMA, PSE, CDMA, PPE */
429 		RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1
430 			RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16));
431 		sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG;
432 		sc->fe_int_status=RT5350_FE_INT_STATUS;
433 		sc->fe_int_enable=RT5350_FE_INT_ENABLE;
434 		sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG;
435 		sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX;
436 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
437 		  sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i);
438 		  sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i);
439 		  sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i);
440 		  sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i);
441 		}
442 		sc->rx_ring_count=2;
443 		sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0;
444 		sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0;
445 		sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0;
446 		sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0;
447 		sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1;
448 		sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1;
449 		sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1;
450 		sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1;
451 		sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE;
452 		sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE;
453 	  	break;
454 	  default:
455 		device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
456 			sc->mac_rev);
457 		sc->gdma1_base = GDMA1_BASE;
458 		sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG;
459 		sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS;
460 		sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE;
461 		sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG;
462 		sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX;
463 		for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
464 		  sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i);
465 		  sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i);
466 		  sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i);
467 		  sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i);
468 		}
469 		sc->rx_ring_count=1;
470 		sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0;
471 		sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0;
472 		sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0;
473 		sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0;
474 		sc->int_rx_done_mask=INT_RX_DONE;
475 		sc->int_tx_done_mask=INT_TXQ0_DONE;
476 	}
477 
478 	if (sc->gdma1_base != 0)
479 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
480 		(
481 		GDM_ICS_EN | /* Enable IP Csum */
482 		GDM_TCS_EN | /* Enable TCP Csum */
483 		GDM_UCS_EN | /* Enable UDP Csum */
484 		GDM_STRPCRC | /* Strip CRC from packet */
485 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
486 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
487 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
488 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
489 		));
490 
491 #ifdef FDT
492 	if (sc->rt_chipid == RT_CHIPID_RT2880 ||
493 	    sc->rt_chipid == RT_CHIPID_RT3883) {
494 		if (OF_getprop(node, "port-mode", fdtval, sizeof(fdtval)) > 0 &&
495 		    strcmp(fdtval, "gigasw") == 0)
496 			RT_WRITE(sc, MDIO_CFG, MDIO_2880_GIGA_INIT);
497 		else
498 			RT_WRITE(sc, MDIO_CFG, MDIO_2880_100T_INIT);
499 	}
500 #endif
501 
502 	/* allocate Tx and Rx rings */
503 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
504 		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
505 		if (error != 0) {
506 			device_printf(dev, "could not allocate Tx ring #%d\n",
507 			    i);
508 			goto fail;
509 		}
510 	}
511 
512 	sc->tx_ring_mgtqid = 5;
513 	for (i = 0; i < sc->rx_ring_count; i++) {
514 		error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i);
515 		if (error != 0) {
516 			device_printf(dev, "could not allocate Rx ring\n");
517 			goto fail;
518 		}
519 	}
520 
521 	callout_init(&sc->periodic_ch, 0);
522 	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
523 
524 	ifp = sc->ifp = if_alloc(IFT_ETHER);
525 	if (ifp == NULL) {
526 		device_printf(dev, "could not if_alloc()\n");
527 		error = ENOMEM;
528 		goto fail;
529 	}
530 
531 	ifp->if_softc = sc;
532 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
533 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
534 	ifp->if_init = rt_init;
535 	ifp->if_ioctl = rt_ioctl;
536 	ifp->if_start = rt_start;
537 #define	RT_TX_QLEN	256
538 
539 	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
540 	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
541 	IFQ_SET_READY(&ifp->if_snd);
542 
543 #ifdef IF_RT_PHY_SUPPORT
544 	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
545 	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
546 	if (error != 0) {
547 		device_printf(dev, "attaching PHYs failed\n");
548 		error = ENXIO;
549 		goto fail;
550 	}
551 #else
552 	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
553 	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
554 	    NULL);
555 	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
556 
557 #endif /* IF_RT_PHY_SUPPORT */
558 
559 	ether_request_mac(dev, sc->mac_addr);
560 	ether_ifattach(ifp, sc->mac_addr);
561 
562 	/*
563 	 * Tell the upper layer(s) we support long frames.
564 	 */
565 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
566 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
567 	ifp->if_capenable |= IFCAP_VLAN_MTU;
568 	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
569 	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
570 
571 	/* init task queue */
572 	NET_TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
573 	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
574 	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
575 
576 	sc->rx_process_limit = 100;
577 
578 	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
579 	    taskqueue_thread_enqueue, &sc->taskqueue);
580 
581 	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
582 	    device_get_nameunit(sc->dev));
583 
584 	rt_sysctl_attach(sc);
585 
586 	/* set up interrupt */
587 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
588 	    NULL, (sc->rt_chipid == RT_CHIPID_RT5350 ||
589 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
590 	    sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr,
591 	    sc, &sc->irqh);
592 	if (error != 0) {
593 		printf("%s: could not set up interrupt\n",
594 			device_get_nameunit(dev));
595 		goto fail;
596 	}
597 #ifdef IF_RT_DEBUG
598 	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
599 #endif
600 
601 	return (0);
602 
603 fail:
604 	/* free Tx and Rx rings */
605 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
606 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
607 
608 	for (i = 0; i < sc->rx_ring_count; i++)
609 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
610 
611 	mtx_destroy(&sc->lock);
612 
613 	if (sc->mem != NULL)
614 		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
615 		    sc->mem);
616 
617 	if (sc->irq != NULL)
618 		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
619 		    sc->irq);
620 
621 	return (error);
622 }
623 
624 /*
625  * Set media options.
626  */
627 static int
rt_ifmedia_upd(struct ifnet * ifp)628 rt_ifmedia_upd(struct ifnet *ifp)
629 {
630 	struct rt_softc *sc;
631 #ifdef IF_RT_PHY_SUPPORT
632 	struct mii_data *mii;
633 	struct mii_softc *miisc;
634 	int error = 0;
635 
636 	sc = ifp->if_softc;
637 	RT_SOFTC_LOCK(sc);
638 
639 	mii = device_get_softc(sc->rt_miibus);
640 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
641 		PHY_RESET(miisc);
642 	error = mii_mediachg(mii);
643 	RT_SOFTC_UNLOCK(sc);
644 
645 	return (error);
646 
647 #else /* !IF_RT_PHY_SUPPORT */
648 
649 	struct ifmedia *ifm;
650 	struct ifmedia_entry *ife;
651 
652 	sc = ifp->if_softc;
653 	ifm = &sc->rt_ifmedia;
654 	ife = ifm->ifm_cur;
655 
656 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
657 		return (EINVAL);
658 
659 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
660 		device_printf(sc->dev,
661 		    "AUTO is not supported for multiphy MAC");
662 		return (EINVAL);
663 	}
664 
665 	/*
666 	 * Ignore everything
667 	 */
668 	return (0);
669 #endif /* IF_RT_PHY_SUPPORT */
670 }
671 
672 /*
673  * Report current media status.
674  */
675 static void
rt_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)676 rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
677 {
678 #ifdef IF_RT_PHY_SUPPORT
679 	struct rt_softc *sc;
680 	struct mii_data *mii;
681 
682 	sc = ifp->if_softc;
683 
684 	RT_SOFTC_LOCK(sc);
685 	mii = device_get_softc(sc->rt_miibus);
686 	mii_pollstat(mii);
687 	ifmr->ifm_active = mii->mii_media_active;
688 	ifmr->ifm_status = mii->mii_media_status;
689 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
690 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
691 	RT_SOFTC_UNLOCK(sc);
692 #else /* !IF_RT_PHY_SUPPORT */
693 
694 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
695 	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
696 #endif /* IF_RT_PHY_SUPPORT */
697 }
698 
699 static int
rt_detach(device_t dev)700 rt_detach(device_t dev)
701 {
702 	struct rt_softc *sc;
703 	struct ifnet *ifp;
704 	int i;
705 
706 	sc = device_get_softc(dev);
707 	ifp = sc->ifp;
708 
709 	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
710 
711 	RT_SOFTC_LOCK(sc);
712 
713 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
714 
715 	callout_stop(&sc->periodic_ch);
716 	callout_stop(&sc->tx_watchdog_ch);
717 
718 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
719 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
720 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
721 
722 	/* free Tx and Rx rings */
723 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
724 		rt_free_tx_ring(sc, &sc->tx_ring[i]);
725 	for (i = 0; i < sc->rx_ring_count; i++)
726 		rt_free_rx_ring(sc, &sc->rx_ring[i]);
727 
728 	RT_SOFTC_UNLOCK(sc);
729 
730 #ifdef IF_RT_PHY_SUPPORT
731 	if (sc->rt_miibus != NULL)
732 		device_delete_child(dev, sc->rt_miibus);
733 #endif
734 
735 	ether_ifdetach(ifp);
736 	if_free(ifp);
737 
738 	taskqueue_free(sc->taskqueue);
739 
740 	mtx_destroy(&sc->lock);
741 
742 	bus_generic_detach(dev);
743 	bus_teardown_intr(dev, sc->irq, sc->irqh);
744 	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
745 	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
746 
747 	return (0);
748 }
749 
750 static int
rt_shutdown(device_t dev)751 rt_shutdown(device_t dev)
752 {
753 	struct rt_softc *sc;
754 
755 	sc = device_get_softc(dev);
756 	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
757 	rt_stop(sc);
758 
759 	return (0);
760 }
761 
762 static int
rt_suspend(device_t dev)763 rt_suspend(device_t dev)
764 {
765 	struct rt_softc *sc;
766 
767 	sc = device_get_softc(dev);
768 	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
769 	rt_stop(sc);
770 
771 	return (0);
772 }
773 
774 static int
rt_resume(device_t dev)775 rt_resume(device_t dev)
776 {
777 	struct rt_softc *sc;
778 	struct ifnet *ifp;
779 
780 	sc = device_get_softc(dev);
781 	ifp = sc->ifp;
782 
783 	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
784 
785 	if (ifp->if_flags & IFF_UP)
786 		rt_init(sc);
787 
788 	return (0);
789 }
790 
791 /*
792  * rt_init_locked - Run initialization process having locked mtx.
793  */
794 static void
rt_init_locked(void * priv)795 rt_init_locked(void *priv)
796 {
797 	struct rt_softc *sc;
798 	struct ifnet *ifp;
799 #ifdef IF_RT_PHY_SUPPORT
800 	struct mii_data *mii;
801 #endif
802 	int i, ntries;
803 	uint32_t tmp;
804 
805 	sc = priv;
806 	ifp = sc->ifp;
807 #ifdef IF_RT_PHY_SUPPORT
808 	mii = device_get_softc(sc->rt_miibus);
809 #endif
810 
811 	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
812 
813 	RT_SOFTC_ASSERT_LOCKED(sc);
814 
815 	/* hardware reset */
816 	//RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
817 	//rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
818 
819 	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
820 	if (sc->gdma1_base != 0)
821 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
822 		(
823 		GDM_ICS_EN | /* Enable IP Csum */
824 		GDM_TCS_EN | /* Enable TCP Csum */
825 		GDM_UCS_EN | /* Enable UDP Csum */
826 		GDM_STRPCRC | /* Strip CRC from packet */
827 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
828 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
829 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
830 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
831 		));
832 
833 	/* disable DMA engine */
834 	RT_WRITE(sc, sc->pdma_glo_cfg, 0);
835 	RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff);
836 
837 	/* wait while DMA engine is busy */
838 	for (ntries = 0; ntries < 100; ntries++) {
839 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
840 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
841 			break;
842 		DELAY(1000);
843 	}
844 
845 	if (ntries == 100) {
846 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
847 		goto fail;
848 	}
849 
850 	/* reset Rx and Tx rings */
851 	tmp = FE_RST_DRX_IDX0 |
852 		FE_RST_DTX_IDX3 |
853 		FE_RST_DTX_IDX2 |
854 		FE_RST_DTX_IDX1 |
855 		FE_RST_DTX_IDX0;
856 
857 	RT_WRITE(sc, sc->pdma_rst_idx, tmp);
858 
859 	/* XXX switch set mac address */
860 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
861 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
862 
863 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
864 		/* update TX_BASE_PTRx */
865 		RT_WRITE(sc, sc->tx_base_ptr[i],
866 			sc->tx_ring[i].desc_phys_addr);
867 		RT_WRITE(sc, sc->tx_max_cnt[i],
868 			RT_SOFTC_TX_RING_DESC_COUNT);
869 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
870 	}
871 
872 	/* init Rx ring */
873 	for (i = 0; i < sc->rx_ring_count; i++)
874 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
875 
876 	/* update RX_BASE_PTRx */
877 	for (i = 0; i < sc->rx_ring_count; i++) {
878 		RT_WRITE(sc, sc->rx_base_ptr[i],
879 			sc->rx_ring[i].desc_phys_addr);
880 		RT_WRITE(sc, sc->rx_max_cnt[i],
881 			RT_SOFTC_RX_RING_DATA_COUNT);
882 		RT_WRITE(sc, sc->rx_calc_idx[i],
883 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
884 	}
885 
886 	/* write back DDONE, 16byte burst enable RX/TX DMA */
887 	tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN;
888 	if (sc->rt_chipid == RT_CHIPID_MT7620 ||
889 	    sc->rt_chipid == RT_CHIPID_MT7621)
890 		tmp |= (1<<31);
891 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
892 
893 	/* disable interrupts mitigation */
894 	RT_WRITE(sc, sc->delay_int_cfg, 0);
895 
896 	/* clear pending interrupts */
897 	RT_WRITE(sc, sc->fe_int_status, 0xffffffff);
898 
899 	/* enable interrupts */
900 	if (sc->rt_chipid == RT_CHIPID_RT5350 ||
901 	    sc->rt_chipid == RT_CHIPID_MT7620 ||
902 	    sc->rt_chipid == RT_CHIPID_MT7621)
903 	  tmp = RT5350_INT_TX_COHERENT |
904 	  	RT5350_INT_RX_COHERENT |
905 	  	RT5350_INT_TXQ3_DONE |
906 	  	RT5350_INT_TXQ2_DONE |
907 	  	RT5350_INT_TXQ1_DONE |
908 	  	RT5350_INT_TXQ0_DONE |
909 	  	RT5350_INT_RXQ1_DONE |
910 	  	RT5350_INT_RXQ0_DONE;
911 	else
912 	  tmp = CNT_PPE_AF |
913 		CNT_GDM_AF |
914 		PSE_P2_FC |
915 		GDM_CRC_DROP |
916 		PSE_BUF_DROP |
917 		GDM_OTHER_DROP |
918 		PSE_P1_FC |
919 		PSE_P0_FC |
920 		PSE_FQ_EMPTY |
921 		INT_TX_COHERENT |
922 		INT_RX_COHERENT |
923 		INT_TXQ3_DONE |
924 		INT_TXQ2_DONE |
925 		INT_TXQ1_DONE |
926 		INT_TXQ0_DONE |
927 		INT_RX_DONE;
928 
929 	sc->intr_enable_mask = tmp;
930 
931 	RT_WRITE(sc, sc->fe_int_enable, tmp);
932 
933 	if (rt_txrx_enable(sc) != 0)
934 		goto fail;
935 
936 #ifdef IF_RT_PHY_SUPPORT
937 	if (mii) mii_mediachg(mii);
938 #endif /* IF_RT_PHY_SUPPORT */
939 
940 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
941 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
942 
943 	sc->periodic_round = 0;
944 
945 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
946 
947 	return;
948 
949 fail:
950 	rt_stop_locked(sc);
951 }
952 
953 /*
954  * rt_init - lock and initialize device.
955  */
956 static void
rt_init(void * priv)957 rt_init(void *priv)
958 {
959 	struct rt_softc *sc;
960 
961 	sc = priv;
962 	RT_SOFTC_LOCK(sc);
963 	rt_init_locked(sc);
964 	RT_SOFTC_UNLOCK(sc);
965 }
966 
967 /*
968  * rt_stop_locked - stop TX/RX w/ lock
969  */
970 static void
rt_stop_locked(void * priv)971 rt_stop_locked(void *priv)
972 {
973 	struct rt_softc *sc;
974 	struct ifnet *ifp;
975 
976 	sc = priv;
977 	ifp = sc->ifp;
978 
979 	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
980 
981 	RT_SOFTC_ASSERT_LOCKED(sc);
982 	sc->tx_timer = 0;
983 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
984 	callout_stop(&sc->periodic_ch);
985 	callout_stop(&sc->tx_watchdog_ch);
986 	RT_SOFTC_UNLOCK(sc);
987 	taskqueue_block(sc->taskqueue);
988 
989 	/*
990 	 * Sometime rt_stop_locked called from isr and we get panic
991 	 * When found, I fix it
992 	 */
993 #ifdef notyet
994 	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
995 	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
996 	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
997 #endif
998 	RT_SOFTC_LOCK(sc);
999 
1000 	/* disable interrupts */
1001 	RT_WRITE(sc, sc->fe_int_enable, 0);
1002 
1003 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
1004 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
1005 	   sc->rt_chipid != RT_CHIPID_MT7621) {
1006 		/* reset adapter */
1007 		RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
1008 	}
1009 
1010 	if (sc->gdma1_base != 0)
1011 		RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG,
1012 		(
1013 		GDM_ICS_EN | /* Enable IP Csum */
1014 		GDM_TCS_EN | /* Enable TCP Csum */
1015 		GDM_UCS_EN | /* Enable UDP Csum */
1016 		GDM_STRPCRC | /* Strip CRC from packet */
1017 		GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */
1018 		GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */
1019 		GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */
1020 		GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* fwd Other to CPU */
1021 		));
1022 }
1023 
1024 static void
rt_stop(void * priv)1025 rt_stop(void *priv)
1026 {
1027 	struct rt_softc *sc;
1028 
1029 	sc = priv;
1030 	RT_SOFTC_LOCK(sc);
1031 	rt_stop_locked(sc);
1032 	RT_SOFTC_UNLOCK(sc);
1033 }
1034 
1035 /*
1036  * rt_tx_data - transmit packet.
1037  */
1038 static int
rt_tx_data(struct rt_softc * sc,struct mbuf * m,int qid)1039 rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
1040 {
1041 	struct ifnet *ifp;
1042 	struct rt_softc_tx_ring *ring;
1043 	struct rt_softc_tx_data *data;
1044 	struct rt_txdesc *desc;
1045 	struct mbuf *m_d;
1046 	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
1047 	int error, ndmasegs, ndescs, i;
1048 
1049 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1050 		("%s: Tx data: invalid qid=%d\n",
1051 		 device_get_nameunit(sc->dev), qid));
1052 
1053 	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
1054 
1055 	ifp = sc->ifp;
1056 	ring = &sc->tx_ring[qid];
1057 	desc = &ring->desc[ring->desc_cur];
1058 	data = &ring->data[ring->data_cur];
1059 
1060 	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
1061 	    dma_seg, &ndmasegs, 0);
1062 	if (error != 0)	{
1063 		/* too many fragments, linearize */
1064 
1065 		RT_DPRINTF(sc, RT_DEBUG_TX,
1066 			"could not load mbuf DMA map, trying to linearize "
1067 			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
1068 			ndmasegs, m->m_pkthdr.len, error);
1069 
1070 		m_d = m_collapse(m, M_NOWAIT, 16);
1071 		if (m_d == NULL) {
1072 			m_freem(m);
1073 			m = NULL;
1074 			return (ENOMEM);
1075 		}
1076 		m = m_d;
1077 
1078 		sc->tx_defrag_packets++;
1079 
1080 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1081 		    data->dma_map, m, dma_seg, &ndmasegs, 0);
1082 		if (error != 0)	{
1083 			device_printf(sc->dev, "could not load mbuf DMA map: "
1084 			    "ndmasegs=%d, len=%d, error=%d\n",
1085 			    ndmasegs, m->m_pkthdr.len, error);
1086 			m_freem(m);
1087 			return (error);
1088 		}
1089 	}
1090 
1091 	if (m->m_pkthdr.len == 0)
1092 		ndmasegs = 0;
1093 
1094 	/* determine how many Tx descs are required */
1095 	ndescs = 1 + ndmasegs / 2;
1096 	if ((ring->desc_queued + ndescs) >
1097 	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
1098 		RT_DPRINTF(sc, RT_DEBUG_TX,
1099 		    "there are not enough Tx descs\n");
1100 
1101 		sc->no_tx_desc_avail++;
1102 
1103 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1104 		m_freem(m);
1105 		return (EFBIG);
1106 	}
1107 
1108 	data->m = m;
1109 
1110 	/* set up Tx descs */
1111 	for (i = 0; i < ndmasegs; i += 2) {
1112 		/* TODO: this needs to be refined as MT7620 for example has
1113 		 * a different word3 layout than RT305x and RT5350 (the last
1114 		 * one doesn't use word3 at all). And so does MT7621...
1115 		 */
1116 
1117 		if (sc->rt_chipid != RT_CHIPID_MT7621) {
1118 			/* Set destination */
1119 			if (sc->rt_chipid != RT_CHIPID_MT7620)
1120 			    desc->dst = (TXDSCR_DST_PORT_GDMA1);
1121 
1122 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1123 				desc->dst |= (TXDSCR_IP_CSUM_GEN |
1124 				    TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN);
1125 			/* Set queue id */
1126 			desc->qn = qid;
1127 			/* No PPPoE */
1128 			desc->pppoe = 0;
1129 			/* No VLAN */
1130 			desc->vid = 0;
1131 		} else {
1132 			desc->vid = 0;
1133 			desc->pppoe = 0;
1134 			desc->qn = 0;
1135 			desc->dst = 2;
1136 		}
1137 
1138 		desc->sdp0 = htole32(dma_seg[i].ds_addr);
1139 		desc->sdl0 = htole16(dma_seg[i].ds_len |
1140 		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
1141 
1142 		if ((i+1) < ndmasegs) {
1143 			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
1144 			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
1145 			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
1146 		} else {
1147 			desc->sdp1 = 0;
1148 			desc->sdl1 = 0;
1149 		}
1150 
1151 		if ((i+2) < ndmasegs) {
1152 			ring->desc_queued++;
1153 			ring->desc_cur = (ring->desc_cur + 1) %
1154 			    RT_SOFTC_TX_RING_DESC_COUNT;
1155 		}
1156 		desc = &ring->desc[ring->desc_cur];
1157 	}
1158 
1159 	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
1160 	    "DMA ds_len=%d/%d/%d/%d/%d\n",
1161 	    m->m_pkthdr.len, ndmasegs,
1162 	    (int) dma_seg[0].ds_len,
1163 	    (int) dma_seg[1].ds_len,
1164 	    (int) dma_seg[2].ds_len,
1165 	    (int) dma_seg[3].ds_len,
1166 	    (int) dma_seg[4].ds_len);
1167 
1168 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
1169 		BUS_DMASYNC_PREWRITE);
1170 	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1171 		BUS_DMASYNC_PREWRITE);
1172 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1173 		BUS_DMASYNC_PREWRITE);
1174 
1175 	ring->desc_queued++;
1176 	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
1177 
1178 	ring->data_queued++;
1179 	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
1180 
1181 	/* kick Tx */
1182 	RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur);
1183 
1184 	return (0);
1185 }
1186 
1187 /*
1188  * rt_start - start Transmit/Receive
1189  */
1190 static void
rt_start(struct ifnet * ifp)1191 rt_start(struct ifnet *ifp)
1192 {
1193 	struct rt_softc *sc;
1194 	struct mbuf *m;
1195 	int qid = 0 /* XXX must check QoS priority */;
1196 
1197 	sc = ifp->if_softc;
1198 
1199 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1200 		return;
1201 
1202 	for (;;) {
1203 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1204 		if (m == NULL)
1205 			break;
1206 
1207 		m->m_pkthdr.rcvif = NULL;
1208 
1209 		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1210 
1211 		if (sc->tx_ring[qid].data_queued >=
1212 		    RT_SOFTC_TX_RING_DATA_COUNT) {
1213 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1214 
1215 			RT_DPRINTF(sc, RT_DEBUG_TX,
1216 			    "if_start: Tx ring with qid=%d is full\n", qid);
1217 
1218 			m_freem(m);
1219 
1220 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1221 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1222 
1223 			sc->tx_data_queue_full[qid]++;
1224 
1225 			break;
1226 		}
1227 
1228 		if (rt_tx_data(sc, m, qid) != 0) {
1229 			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1230 
1231 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1232 
1233 			break;
1234 		}
1235 
1236 		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1237 		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1238 		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1239 	}
1240 }
1241 
1242 /*
1243  * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1244  * filtering done by attached Ethernet switch.
1245  */
1246 static void
rt_update_promisc(struct ifnet * ifp)1247 rt_update_promisc(struct ifnet *ifp)
1248 {
1249 	struct rt_softc *sc;
1250 
1251 	sc = ifp->if_softc;
1252 	printf("%s: %s promiscuous mode\n",
1253 		device_get_nameunit(sc->dev),
1254 		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1255 }
1256 
1257 /*
1258  * rt_ioctl - ioctl handler.
1259  */
1260 static int
rt_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1261 rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1262 {
1263 	struct rt_softc *sc;
1264 	struct ifreq *ifr;
1265 #ifdef IF_RT_PHY_SUPPORT
1266 	struct mii_data *mii;
1267 #endif /* IF_RT_PHY_SUPPORT */
1268 	int error, startall;
1269 
1270 	sc = ifp->if_softc;
1271 	ifr = (struct ifreq *) data;
1272 
1273 	error = 0;
1274 
1275 	switch (cmd) {
1276 	case SIOCSIFFLAGS:
1277 		startall = 0;
1278 		RT_SOFTC_LOCK(sc);
1279 		if (ifp->if_flags & IFF_UP) {
1280 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1281 				if ((ifp->if_flags ^ sc->if_flags) &
1282 				    IFF_PROMISC)
1283 					rt_update_promisc(ifp);
1284 			} else {
1285 				rt_init_locked(sc);
1286 				startall = 1;
1287 			}
1288 		} else {
1289 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1290 				rt_stop_locked(sc);
1291 		}
1292 		sc->if_flags = ifp->if_flags;
1293 		RT_SOFTC_UNLOCK(sc);
1294 		break;
1295 	case SIOCGIFMEDIA:
1296 	case SIOCSIFMEDIA:
1297 #ifdef IF_RT_PHY_SUPPORT
1298 		mii = device_get_softc(sc->rt_miibus);
1299 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1300 #else
1301 		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1302 #endif /* IF_RT_PHY_SUPPORT */
1303 		break;
1304 	default:
1305 		error = ether_ioctl(ifp, cmd, data);
1306 		break;
1307 	}
1308 	return (error);
1309 }
1310 
1311 /*
1312  * rt_periodic - Handler of PERIODIC interrupt
1313  */
1314 static void
rt_periodic(void * arg)1315 rt_periodic(void *arg)
1316 {
1317 	struct rt_softc *sc;
1318 
1319 	sc = arg;
1320 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1321 	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1322 }
1323 
1324 /*
1325  * rt_tx_watchdog - Handler of TX Watchdog
1326  */
1327 static void
rt_tx_watchdog(void * arg)1328 rt_tx_watchdog(void *arg)
1329 {
1330 	struct rt_softc *sc;
1331 	struct ifnet *ifp;
1332 
1333 	sc = arg;
1334 	ifp = sc->ifp;
1335 
1336 	if (sc->tx_timer == 0)
1337 		return;
1338 
1339 	if (--sc->tx_timer == 0) {
1340 		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1341 #ifdef notyet
1342 		/*
1343 		 * XXX: Commented out, because reset break input.
1344 		 */
1345 		rt_stop_locked(sc);
1346 		rt_init_locked(sc);
1347 #endif
1348 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1349 		sc->tx_watchdog_timeouts++;
1350 	}
1351 	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1352 }
1353 
1354 /*
1355  * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1356  */
1357 static void
rt_cnt_ppe_af(struct rt_softc * sc)1358 rt_cnt_ppe_af(struct rt_softc *sc)
1359 {
1360 
1361 	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1362 }
1363 
1364 /*
1365  * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1366  */
1367 static void
rt_cnt_gdm_af(struct rt_softc * sc)1368 rt_cnt_gdm_af(struct rt_softc *sc)
1369 {
1370 
1371 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1372 	    "GDMA 1 & 2 Counter Table Almost Full\n");
1373 }
1374 
1375 /*
1376  * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1377  */
1378 static void
rt_pse_p2_fc(struct rt_softc * sc)1379 rt_pse_p2_fc(struct rt_softc *sc)
1380 {
1381 
1382 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1383 	    "PSE port2 (GDMA 2) flow control asserted.\n");
1384 }
1385 
1386 /*
1387  * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1388  * interrupt
1389  */
1390 static void
rt_gdm_crc_drop(struct rt_softc * sc)1391 rt_gdm_crc_drop(struct rt_softc *sc)
1392 {
1393 
1394 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1395 	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1396 }
1397 
1398 /*
1399  * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1400  */
1401 static void
rt_pse_buf_drop(struct rt_softc * sc)1402 rt_pse_buf_drop(struct rt_softc *sc)
1403 {
1404 
1405 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1406 	    "PSE discards a packet due to buffer sharing limitation\n");
1407 }
1408 
1409 /*
1410  * rt_gdm_other_drop - Handler of discard on other reason interrupt
1411  */
1412 static void
rt_gdm_other_drop(struct rt_softc * sc)1413 rt_gdm_other_drop(struct rt_softc *sc)
1414 {
1415 
1416 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1417 	    "GDMA 1 & 2 discard a packet due to other reason\n");
1418 }
1419 
1420 /*
1421  * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1422  */
1423 static void
rt_pse_p1_fc(struct rt_softc * sc)1424 rt_pse_p1_fc(struct rt_softc *sc)
1425 {
1426 
1427 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1428 	    "PSE port1 (GDMA 1) flow control asserted.\n");
1429 }
1430 
1431 /*
1432  * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1433  */
1434 static void
rt_pse_p0_fc(struct rt_softc * sc)1435 rt_pse_p0_fc(struct rt_softc *sc)
1436 {
1437 
1438 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1439 	    "PSE port0 (CDMA) flow control asserted.\n");
1440 }
1441 
1442 /*
1443  * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1444  */
1445 static void
rt_pse_fq_empty(struct rt_softc * sc)1446 rt_pse_fq_empty(struct rt_softc *sc)
1447 {
1448 
1449 	RT_DPRINTF(sc, RT_DEBUG_INTR,
1450 	    "PSE free Q empty threshold reached & forced drop "
1451 		    "condition occurred.\n");
1452 }
1453 
1454 /*
1455  * rt_intr - main ISR
1456  */
1457 static void
rt_intr(void * arg)1458 rt_intr(void *arg)
1459 {
1460 	struct rt_softc *sc;
1461 	struct ifnet *ifp;
1462 	uint32_t status;
1463 
1464 	sc = arg;
1465 	ifp = sc->ifp;
1466 
1467 	/* acknowledge interrupts */
1468 	status = RT_READ(sc, sc->fe_int_status);
1469 	RT_WRITE(sc, sc->fe_int_status, status);
1470 
1471 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1472 
1473 	if (status == 0xffffffff ||	/* device likely went away */
1474 		status == 0)		/* not for us */
1475 		return;
1476 
1477 	sc->interrupts++;
1478 
1479 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1480 		return;
1481 
1482 	if (status & CNT_PPE_AF)
1483 		rt_cnt_ppe_af(sc);
1484 
1485 	if (status & CNT_GDM_AF)
1486 		rt_cnt_gdm_af(sc);
1487 
1488 	if (status & PSE_P2_FC)
1489 		rt_pse_p2_fc(sc);
1490 
1491 	if (status & GDM_CRC_DROP)
1492 		rt_gdm_crc_drop(sc);
1493 
1494 	if (status & PSE_BUF_DROP)
1495 		rt_pse_buf_drop(sc);
1496 
1497 	if (status & GDM_OTHER_DROP)
1498 		rt_gdm_other_drop(sc);
1499 
1500 	if (status & PSE_P1_FC)
1501 		rt_pse_p1_fc(sc);
1502 
1503 	if (status & PSE_P0_FC)
1504 		rt_pse_p0_fc(sc);
1505 
1506 	if (status & PSE_FQ_EMPTY)
1507 		rt_pse_fq_empty(sc);
1508 
1509 	if (status & INT_TX_COHERENT)
1510 		rt_tx_coherent_intr(sc);
1511 
1512 	if (status & INT_RX_COHERENT)
1513 		rt_rx_coherent_intr(sc);
1514 
1515 	if (status & RX_DLY_INT)
1516 		rt_rx_delay_intr(sc);
1517 
1518 	if (status & TX_DLY_INT)
1519 		rt_tx_delay_intr(sc);
1520 
1521 	if (status & INT_RX_DONE)
1522 		rt_rx_intr(sc, 0);
1523 
1524 	if (status & INT_TXQ3_DONE)
1525 		rt_tx_intr(sc, 3);
1526 
1527 	if (status & INT_TXQ2_DONE)
1528 		rt_tx_intr(sc, 2);
1529 
1530 	if (status & INT_TXQ1_DONE)
1531 		rt_tx_intr(sc, 1);
1532 
1533 	if (status & INT_TXQ0_DONE)
1534 		rt_tx_intr(sc, 0);
1535 }
1536 
1537 /*
1538  * rt_rt5350_intr - main ISR for Ralink 5350 SoC
1539  */
1540 static void
rt_rt5350_intr(void * arg)1541 rt_rt5350_intr(void *arg)
1542 {
1543 	struct rt_softc *sc;
1544 	struct ifnet *ifp;
1545 	uint32_t status;
1546 
1547 	sc = arg;
1548 	ifp = sc->ifp;
1549 
1550 	/* acknowledge interrupts */
1551 	status = RT_READ(sc, sc->fe_int_status);
1552 	RT_WRITE(sc, sc->fe_int_status, status);
1553 
1554 	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1555 
1556 	if (status == 0xffffffff ||     /* device likely went away */
1557 		status == 0)            /* not for us */
1558 		return;
1559 
1560 	sc->interrupts++;
1561 
1562 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1563 	        return;
1564 
1565 	if (status & RT5350_INT_TX_COHERENT)
1566 		rt_tx_coherent_intr(sc);
1567 	if (status & RT5350_INT_RX_COHERENT)
1568 		rt_rx_coherent_intr(sc);
1569 	if (status & RT5350_RX_DLY_INT)
1570 	        rt_rx_delay_intr(sc);
1571 	if (status & RT5350_TX_DLY_INT)
1572 	        rt_tx_delay_intr(sc);
1573 	if (status & RT5350_INT_RXQ1_DONE)
1574 		rt_rx_intr(sc, 1);
1575 	if (status & RT5350_INT_RXQ0_DONE)
1576 		rt_rx_intr(sc, 0);
1577 	if (status & RT5350_INT_TXQ3_DONE)
1578 		rt_tx_intr(sc, 3);
1579 	if (status & RT5350_INT_TXQ2_DONE)
1580 		rt_tx_intr(sc, 2);
1581 	if (status & RT5350_INT_TXQ1_DONE)
1582 		rt_tx_intr(sc, 1);
1583 	if (status & RT5350_INT_TXQ0_DONE)
1584 		rt_tx_intr(sc, 0);
1585 }
1586 
1587 static void
rt_tx_coherent_intr(struct rt_softc * sc)1588 rt_tx_coherent_intr(struct rt_softc *sc)
1589 {
1590 	uint32_t tmp;
1591 	int i;
1592 
1593 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1594 
1595 	sc->tx_coherent_interrupts++;
1596 
1597 	/* restart DMA engine */
1598 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1599 	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1600 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1601 
1602 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1603 		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1604 
1605 	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1606 		RT_WRITE(sc, sc->tx_base_ptr[i],
1607 			sc->tx_ring[i].desc_phys_addr);
1608 		RT_WRITE(sc, sc->tx_max_cnt[i],
1609 			RT_SOFTC_TX_RING_DESC_COUNT);
1610 		RT_WRITE(sc, sc->tx_ctx_idx[i], 0);
1611 	}
1612 
1613 	rt_txrx_enable(sc);
1614 }
1615 
1616 /*
1617  * rt_rx_coherent_intr
1618  */
1619 static void
rt_rx_coherent_intr(struct rt_softc * sc)1620 rt_rx_coherent_intr(struct rt_softc *sc)
1621 {
1622 	uint32_t tmp;
1623 	int i;
1624 
1625 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1626 
1627 	sc->rx_coherent_interrupts++;
1628 
1629 	/* restart DMA engine */
1630 	tmp = RT_READ(sc, sc->pdma_glo_cfg);
1631 	tmp &= ~(FE_RX_DMA_EN);
1632 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
1633 
1634 	/* init Rx ring */
1635 	for (i = 0; i < sc->rx_ring_count; i++)
1636 		rt_reset_rx_ring(sc, &sc->rx_ring[i]);
1637 
1638 	for (i = 0; i < sc->rx_ring_count; i++) {
1639 		RT_WRITE(sc, sc->rx_base_ptr[i],
1640 			sc->rx_ring[i].desc_phys_addr);
1641 		RT_WRITE(sc, sc->rx_max_cnt[i],
1642 			RT_SOFTC_RX_RING_DATA_COUNT);
1643 		RT_WRITE(sc, sc->rx_calc_idx[i],
1644 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1645 	}
1646 
1647 	rt_txrx_enable(sc);
1648 }
1649 
1650 /*
1651  * rt_rx_intr - a packet received
1652  */
1653 static void
rt_rx_intr(struct rt_softc * sc,int qid)1654 rt_rx_intr(struct rt_softc *sc, int qid)
1655 {
1656 	KASSERT(qid >= 0 && qid < sc->rx_ring_count,
1657 		("%s: Rx interrupt: invalid qid=%d\n",
1658 		 device_get_nameunit(sc->dev), qid));
1659 
1660 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1661 	sc->rx_interrupts[qid]++;
1662 	RT_SOFTC_LOCK(sc);
1663 
1664 	if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) {
1665 		rt_intr_disable(sc, (sc->int_rx_done_mask << qid));
1666 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1667 	}
1668 
1669 	sc->intr_pending_mask |= (sc->int_rx_done_mask << qid);
1670 	RT_SOFTC_UNLOCK(sc);
1671 }
1672 
1673 static void
rt_rx_delay_intr(struct rt_softc * sc)1674 rt_rx_delay_intr(struct rt_softc *sc)
1675 {
1676 
1677 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1678 	sc->rx_delay_interrupts++;
1679 }
1680 
1681 static void
rt_tx_delay_intr(struct rt_softc * sc)1682 rt_tx_delay_intr(struct rt_softc *sc)
1683 {
1684 
1685 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1686 	sc->tx_delay_interrupts++;
1687 }
1688 
1689 /*
1690  * rt_tx_intr - Transsmition of packet done
1691  */
1692 static void
rt_tx_intr(struct rt_softc * sc,int qid)1693 rt_tx_intr(struct rt_softc *sc, int qid)
1694 {
1695 
1696 	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1697 		("%s: Tx interrupt: invalid qid=%d\n",
1698 		 device_get_nameunit(sc->dev), qid));
1699 
1700 	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1701 
1702 	sc->tx_interrupts[qid]++;
1703 	RT_SOFTC_LOCK(sc);
1704 
1705 	if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) {
1706 		rt_intr_disable(sc, (sc->int_tx_done_mask << qid));
1707 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1708 	}
1709 
1710 	sc->intr_pending_mask |= (sc->int_tx_done_mask << qid);
1711 	RT_SOFTC_UNLOCK(sc);
1712 }
1713 
1714 /*
1715  * rt_rx_done_task - run RX task
1716  */
1717 static void
rt_rx_done_task(void * context,int pending)1718 rt_rx_done_task(void *context, int pending)
1719 {
1720 	struct rt_softc *sc;
1721 	struct ifnet *ifp;
1722 	int again;
1723 
1724 	sc = context;
1725 	ifp = sc->ifp;
1726 
1727 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1728 
1729 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1730 		return;
1731 
1732 	sc->intr_pending_mask &= ~sc->int_rx_done_mask;
1733 
1734 	again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit);
1735 
1736 	RT_SOFTC_LOCK(sc);
1737 
1738 	if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) {
1739 		RT_DPRINTF(sc, RT_DEBUG_RX,
1740 		    "Rx done task: scheduling again\n");
1741 		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1742 	} else {
1743 		rt_intr_enable(sc, sc->int_rx_done_mask);
1744 	}
1745 
1746 	RT_SOFTC_UNLOCK(sc);
1747 }
1748 
1749 /*
1750  * rt_tx_done_task - check for pending TX task in all queues
1751  */
1752 static void
rt_tx_done_task(void * context,int pending)1753 rt_tx_done_task(void *context, int pending)
1754 {
1755 	struct rt_softc *sc;
1756 	struct ifnet *ifp;
1757 	uint32_t intr_mask;
1758 	int i;
1759 
1760 	sc = context;
1761 	ifp = sc->ifp;
1762 
1763 	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1764 
1765 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1766 		return;
1767 
1768 	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1769 		if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) {
1770 			sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i);
1771 			rt_tx_eof(sc, &sc->tx_ring[i]);
1772 		}
1773 	}
1774 
1775 	sc->tx_timer = 0;
1776 
1777 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1778 
1779 	if(sc->rt_chipid == RT_CHIPID_RT5350 ||
1780 	   sc->rt_chipid == RT_CHIPID_MT7620 ||
1781 	   sc->rt_chipid == RT_CHIPID_MT7621)
1782 	  intr_mask = (
1783 		RT5350_INT_TXQ3_DONE |
1784 		RT5350_INT_TXQ2_DONE |
1785 		RT5350_INT_TXQ1_DONE |
1786 		RT5350_INT_TXQ0_DONE);
1787 	else
1788 	  intr_mask = (
1789 		INT_TXQ3_DONE |
1790 		INT_TXQ2_DONE |
1791 		INT_TXQ1_DONE |
1792 		INT_TXQ0_DONE);
1793 
1794 	RT_SOFTC_LOCK(sc);
1795 
1796 	rt_intr_enable(sc, ~sc->intr_pending_mask &
1797 	    (sc->intr_disable_mask & intr_mask));
1798 
1799 	if (sc->intr_pending_mask & intr_mask) {
1800 		RT_DPRINTF(sc, RT_DEBUG_TX,
1801 		    "Tx done task: scheduling again\n");
1802 		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1803 	}
1804 
1805 	RT_SOFTC_UNLOCK(sc);
1806 
1807 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1808 		rt_start(ifp);
1809 }
1810 
1811 /*
1812  * rt_periodic_task - run periodic task
1813  */
1814 static void
rt_periodic_task(void * context,int pending)1815 rt_periodic_task(void *context, int pending)
1816 {
1817 	struct rt_softc *sc;
1818 	struct ifnet *ifp;
1819 
1820 	sc = context;
1821 	ifp = sc->ifp;
1822 
1823 	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1824 	    sc->periodic_round);
1825 
1826 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1827 		return;
1828 
1829 	RT_SOFTC_LOCK(sc);
1830 	sc->periodic_round++;
1831 	rt_update_stats(sc);
1832 
1833 	if ((sc->periodic_round % 10) == 0) {
1834 		rt_update_raw_counters(sc);
1835 		rt_watchdog(sc);
1836 	}
1837 
1838 	RT_SOFTC_UNLOCK(sc);
1839 	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1840 }
1841 
1842 /*
1843  * rt_rx_eof - check for frames that done by DMA engine and pass it into
1844  * network subsystem.
1845  */
1846 static int
rt_rx_eof(struct rt_softc * sc,struct rt_softc_rx_ring * ring,int limit)1847 rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit)
1848 {
1849 	struct ifnet *ifp;
1850 /*	struct rt_softc_rx_ring *ring; */
1851 	struct rt_rxdesc *desc;
1852 	struct rt_softc_rx_data *data;
1853 	struct mbuf *m, *mnew;
1854 	bus_dma_segment_t segs[1];
1855 	bus_dmamap_t dma_map;
1856 	uint32_t index, desc_flags;
1857 	int error, nsegs, len, nframes;
1858 
1859 	ifp = sc->ifp;
1860 /*	ring = &sc->rx_ring[0]; */
1861 
1862 	nframes = 0;
1863 
1864 	while (limit != 0) {
1865 		index = RT_READ(sc, sc->rx_drx_idx[0]);
1866 		if (ring->cur == index)
1867 			break;
1868 
1869 		desc = &ring->desc[ring->cur];
1870 		data = &ring->data[ring->cur];
1871 
1872 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1873 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1874 
1875 #ifdef IF_RT_DEBUG
1876 		if ( sc->debug & RT_DEBUG_RX ) {
1877 			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1878 		        hexdump(desc, 16, 0, 0);
1879 			printf("-----------------------------------\n");
1880 		}
1881 #endif
1882 
1883 		/* XXX Sometime device don`t set DDONE bit */
1884 #ifdef DDONE_FIXED
1885 		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1886 			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1887 			break;
1888 		}
1889 #endif
1890 
1891 		len = le16toh(desc->sdl0) & 0x3fff;
1892 		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1893 
1894 		nframes++;
1895 
1896 		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1897 		    MJUMPAGESIZE);
1898 		if (mnew == NULL) {
1899 			sc->rx_mbuf_alloc_errors++;
1900 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1901 			goto skip;
1902 		}
1903 
1904 		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1905 
1906 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1907 		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1908 		if (error != 0) {
1909 			RT_DPRINTF(sc, RT_DEBUG_RX,
1910 			    "could not load Rx mbuf DMA map: "
1911 			    "error=%d, nsegs=%d\n",
1912 			    error, nsegs);
1913 
1914 			m_freem(mnew);
1915 
1916 			sc->rx_mbuf_dmamap_errors++;
1917 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1918 
1919 			goto skip;
1920 		}
1921 
1922 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1923 			device_get_nameunit(sc->dev)));
1924 
1925 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1926 			BUS_DMASYNC_POSTREAD);
1927 		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1928 
1929 		dma_map = data->dma_map;
1930 		data->dma_map = ring->spare_dma_map;
1931 		ring->spare_dma_map = dma_map;
1932 
1933 		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1934 			BUS_DMASYNC_PREREAD);
1935 
1936 		m = data->m;
1937 		desc_flags = desc->word3;
1938 
1939 		data->m = mnew;
1940 		/* Add 2 for proper align of RX IP header */
1941 		desc->sdp0 = htole32(segs[0].ds_addr+2);
1942 		desc->sdl0 = htole32(segs[0].ds_len-2);
1943 		desc->word3 = 0;
1944 
1945 		RT_DPRINTF(sc, RT_DEBUG_RX,
1946 		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1947 
1948 		m->m_pkthdr.rcvif = ifp;
1949 		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1950 		m->m_data += 2;
1951 		m->m_pkthdr.len = m->m_len = len;
1952 
1953 		/* check for crc errors */
1954 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1955 			/*check for valid checksum*/
1956 			if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) {
1957 				RT_DPRINTF(sc, RT_DEBUG_RX,
1958 				    "rxdesc: crc error\n");
1959 
1960 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1961 
1962 				if (!(ifp->if_flags & IFF_PROMISC)) {
1963 				    m_freem(m);
1964 				    goto skip;
1965 				}
1966 			}
1967 			if ((desc_flags & sc->csum_fail_ip) == 0) {
1968 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1969 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1970 				m->m_pkthdr.csum_data = 0xffff;
1971 			}
1972 			m->m_flags &= ~M_HASFCS;
1973 		}
1974 
1975 		(*ifp->if_input)(ifp, m);
1976 skip:
1977 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1978 
1979 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1980 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1981 
1982 		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1983 
1984 		limit--;
1985 	}
1986 
1987 	if (ring->cur == 0)
1988 		RT_WRITE(sc, sc->rx_calc_idx[0],
1989 			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1990 	else
1991 		RT_WRITE(sc, sc->rx_calc_idx[0],
1992 			ring->cur - 1);
1993 
1994 	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1995 
1996 	sc->rx_packets += nframes;
1997 
1998 	return (limit == 0);
1999 }
2000 
2001 /*
2002  * rt_tx_eof - check for successful transmitted frames and mark their
2003  * descriptor as free.
2004  */
2005 static void
rt_tx_eof(struct rt_softc * sc,struct rt_softc_tx_ring * ring)2006 rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2007 {
2008 	struct ifnet *ifp;
2009 	struct rt_txdesc *desc;
2010 	struct rt_softc_tx_data *data;
2011 	uint32_t index;
2012 	int ndescs, nframes;
2013 
2014 	ifp = sc->ifp;
2015 
2016 	ndescs = 0;
2017 	nframes = 0;
2018 
2019 	for (;;) {
2020 		index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]);
2021 		if (ring->desc_next == index)
2022 			break;
2023 
2024 		ndescs++;
2025 
2026 		desc = &ring->desc[ring->desc_next];
2027 
2028 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2029 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2030 
2031 		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
2032 			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
2033 			nframes++;
2034 
2035 			data = &ring->data[ring->data_next];
2036 
2037 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2038 				BUS_DMASYNC_POSTWRITE);
2039 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2040 
2041 			m_freem(data->m);
2042 
2043 			data->m = NULL;
2044 
2045 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2046 
2047 			RT_SOFTC_TX_RING_LOCK(ring);
2048 			ring->data_queued--;
2049 			ring->data_next = (ring->data_next + 1) %
2050 			    RT_SOFTC_TX_RING_DATA_COUNT;
2051 			RT_SOFTC_TX_RING_UNLOCK(ring);
2052 		}
2053 
2054 		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
2055 
2056 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2057 			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2058 
2059 		RT_SOFTC_TX_RING_LOCK(ring);
2060 		ring->desc_queued--;
2061 		ring->desc_next = (ring->desc_next + 1) %
2062 		    RT_SOFTC_TX_RING_DESC_COUNT;
2063 		RT_SOFTC_TX_RING_UNLOCK(ring);
2064 	}
2065 
2066 	RT_DPRINTF(sc, RT_DEBUG_TX,
2067 	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
2068 	    nframes);
2069 }
2070 
2071 /*
2072  * rt_update_stats - query statistics counters and update related variables.
2073  */
2074 static void
rt_update_stats(struct rt_softc * sc)2075 rt_update_stats(struct rt_softc *sc)
2076 {
2077 	struct ifnet *ifp;
2078 
2079 	ifp = sc->ifp;
2080 	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
2081 	/* XXX do update stats here */
2082 }
2083 
2084 /*
2085  * rt_watchdog - reinit device on watchdog event.
2086  */
2087 static void
rt_watchdog(struct rt_softc * sc)2088 rt_watchdog(struct rt_softc *sc)
2089 {
2090 	uint32_t tmp;
2091 #ifdef notyet
2092 	int ntries;
2093 #endif
2094 	if(sc->rt_chipid != RT_CHIPID_RT5350 &&
2095 	   sc->rt_chipid != RT_CHIPID_MT7620 &&
2096 	   sc->rt_chipid != RT_CHIPID_MT7621) {
2097 		tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
2098 
2099 		RT_DPRINTF(sc, RT_DEBUG_WATCHDOG,
2100 			   "watchdog: PSE_IQ_STA=0x%08x\n", tmp);
2101 	}
2102 	/* XXX: do not reset */
2103 #ifdef notyet
2104 	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
2105 		sc->tx_queue_not_empty[0]++;
2106 
2107 		for (ntries = 0; ntries < 10; ntries++) {
2108 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2109 			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
2110 				break;
2111 
2112 			DELAY(1);
2113 		}
2114 	}
2115 
2116 	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
2117 		sc->tx_queue_not_empty[1]++;
2118 
2119 		for (ntries = 0; ntries < 10; ntries++) {
2120 			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
2121 			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
2122 				break;
2123 
2124 			DELAY(1);
2125 		}
2126 	}
2127 #endif
2128 }
2129 
2130 /*
2131  * rt_update_raw_counters - update counters.
2132  */
2133 static void
rt_update_raw_counters(struct rt_softc * sc)2134 rt_update_raw_counters(struct rt_softc *sc)
2135 {
2136 
2137 	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
2138 	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
2139 	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
2140 	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
2141 
2142 	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
2143 	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
2144 	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
2145 	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
2146 	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
2147 	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
2148 	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
2149 }
2150 
2151 static void
rt_intr_enable(struct rt_softc * sc,uint32_t intr_mask)2152 rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
2153 {
2154 	uint32_t tmp;
2155 
2156 	sc->intr_disable_mask &= ~intr_mask;
2157 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2158 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2159 }
2160 
2161 static void
rt_intr_disable(struct rt_softc * sc,uint32_t intr_mask)2162 rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
2163 {
2164 	uint32_t tmp;
2165 
2166 	sc->intr_disable_mask |= intr_mask;
2167 	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
2168 	RT_WRITE(sc, sc->fe_int_enable, tmp);
2169 }
2170 
2171 /*
2172  * rt_txrx_enable - enable TX/RX DMA
2173  */
2174 static int
rt_txrx_enable(struct rt_softc * sc)2175 rt_txrx_enable(struct rt_softc *sc)
2176 {
2177 	struct ifnet *ifp;
2178 	uint32_t tmp;
2179 	int ntries;
2180 
2181 	ifp = sc->ifp;
2182 
2183 	/* enable Tx/Rx DMA engine */
2184 	for (ntries = 0; ntries < 200; ntries++) {
2185 		tmp = RT_READ(sc, sc->pdma_glo_cfg);
2186 		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
2187 			break;
2188 
2189 		DELAY(1000);
2190 	}
2191 
2192 	if (ntries == 200) {
2193 		device_printf(sc->dev, "timeout waiting for DMA engine\n");
2194 		return (-1);
2195 	}
2196 
2197 	DELAY(50);
2198 
2199 	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
2200 	RT_WRITE(sc, sc->pdma_glo_cfg, tmp);
2201 
2202 	/* XXX set Rx filter */
2203 	return (0);
2204 }
2205 
2206 /*
2207  * rt_alloc_rx_ring - allocate RX DMA ring buffer
2208  */
2209 static int
rt_alloc_rx_ring(struct rt_softc * sc,struct rt_softc_rx_ring * ring,int qid)2210 rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid)
2211 {
2212 	struct rt_rxdesc *desc;
2213 	struct rt_softc_rx_data *data;
2214 	bus_dma_segment_t segs[1];
2215 	int i, nsegs, error;
2216 
2217 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2218 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2219 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
2220 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2221 		0, NULL, NULL, &ring->desc_dma_tag);
2222 	if (error != 0)	{
2223 		device_printf(sc->dev,
2224 		    "could not create Rx desc DMA tag\n");
2225 		goto fail;
2226 	}
2227 
2228 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2229 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2230 	if (error != 0) {
2231 		device_printf(sc->dev,
2232 		    "could not allocate Rx desc DMA memory\n");
2233 		goto fail;
2234 	}
2235 
2236 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2237 		ring->desc,
2238 		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
2239 		rt_dma_map_addr, &ring->desc_phys_addr, 0);
2240 	if (error != 0) {
2241 		device_printf(sc->dev, "could not load Rx desc DMA map\n");
2242 		goto fail;
2243 	}
2244 
2245 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2246 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2247 		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
2248 		&ring->data_dma_tag);
2249 	if (error != 0)	{
2250 		device_printf(sc->dev,
2251 		    "could not create Rx data DMA tag\n");
2252 		goto fail;
2253 	}
2254 
2255 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2256 		desc = &ring->desc[i];
2257 		data = &ring->data[i];
2258 
2259 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2260 		    &data->dma_map);
2261 		if (error != 0)	{
2262 			device_printf(sc->dev, "could not create Rx data DMA "
2263 			    "map\n");
2264 			goto fail;
2265 		}
2266 
2267 		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2268 		    MJUMPAGESIZE);
2269 		if (data->m == NULL) {
2270 			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2271 			error = ENOMEM;
2272 			goto fail;
2273 		}
2274 
2275 		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2276 
2277 		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2278 		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2279 		if (error != 0)	{
2280 			device_printf(sc->dev,
2281 			    "could not load Rx mbuf DMA map\n");
2282 			goto fail;
2283 		}
2284 
2285 		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2286 			device_get_nameunit(sc->dev)));
2287 
2288 		/* Add 2 for proper align of RX IP header */
2289 		desc->sdp0 = htole32(segs[0].ds_addr+2);
2290 		desc->sdl0 = htole32(segs[0].ds_len-2);
2291 	}
2292 
2293 	error = bus_dmamap_create(ring->data_dma_tag, 0,
2294 	    &ring->spare_dma_map);
2295 	if (error != 0) {
2296 		device_printf(sc->dev,
2297 		    "could not create Rx spare DMA map\n");
2298 		goto fail;
2299 	}
2300 
2301 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2302 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2303 	ring->qid = qid;
2304 	return (0);
2305 
2306 fail:
2307 	rt_free_rx_ring(sc, ring);
2308 	return (error);
2309 }
2310 
2311 /*
2312  * rt_reset_rx_ring - reset RX ring buffer
2313  */
2314 static void
rt_reset_rx_ring(struct rt_softc * sc,struct rt_softc_rx_ring * ring)2315 rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2316 {
2317 	struct rt_rxdesc *desc;
2318 	int i;
2319 
2320 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2321 		desc = &ring->desc[i];
2322 		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2323 	}
2324 
2325 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2326 		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2327 	ring->cur = 0;
2328 }
2329 
2330 /*
2331  * rt_free_rx_ring - free memory used by RX ring buffer
2332  */
2333 static void
rt_free_rx_ring(struct rt_softc * sc,struct rt_softc_rx_ring * ring)2334 rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2335 {
2336 	struct rt_softc_rx_data *data;
2337 	int i;
2338 
2339 	if (ring->desc != NULL) {
2340 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2341 			BUS_DMASYNC_POSTWRITE);
2342 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2343 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2344 			ring->desc_dma_map);
2345 	}
2346 
2347 	if (ring->desc_dma_tag != NULL)
2348 		bus_dma_tag_destroy(ring->desc_dma_tag);
2349 
2350 	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2351 		data = &ring->data[i];
2352 
2353 		if (data->m != NULL) {
2354 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2355 				BUS_DMASYNC_POSTREAD);
2356 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2357 			m_freem(data->m);
2358 		}
2359 
2360 		if (data->dma_map != NULL)
2361 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2362 	}
2363 
2364 	if (ring->spare_dma_map != NULL)
2365 		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2366 
2367 	if (ring->data_dma_tag != NULL)
2368 		bus_dma_tag_destroy(ring->data_dma_tag);
2369 }
2370 
2371 /*
2372  * rt_alloc_tx_ring - allocate TX ring buffer
2373  */
2374 static int
rt_alloc_tx_ring(struct rt_softc * sc,struct rt_softc_tx_ring * ring,int qid)2375 rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2376 {
2377 	struct rt_softc_tx_data *data;
2378 	int error, i;
2379 
2380 	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2381 
2382 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2383 		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2384 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2385 		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2386 		0, NULL, NULL, &ring->desc_dma_tag);
2387 	if (error != 0) {
2388 		device_printf(sc->dev,
2389 		    "could not create Tx desc DMA tag\n");
2390 		goto fail;
2391 	}
2392 
2393 	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2394 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2395 	if (error != 0)	{
2396 		device_printf(sc->dev,
2397 		    "could not allocate Tx desc DMA memory\n");
2398 		goto fail;
2399 	}
2400 
2401 	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2402 	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2403 	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2404 	    &ring->desc_phys_addr, 0);
2405 	if (error != 0) {
2406 		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2407 		goto fail;
2408 	}
2409 
2410 	ring->desc_queued = 0;
2411 	ring->desc_cur = 0;
2412 	ring->desc_next = 0;
2413 
2414 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2415 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2416 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2417 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2418 	    0, NULL, NULL, &ring->seg0_dma_tag);
2419 	if (error != 0) {
2420 		device_printf(sc->dev,
2421 		    "could not create Tx seg0 DMA tag\n");
2422 		goto fail;
2423 	}
2424 
2425 	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2426 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2427 	if (error != 0) {
2428 		device_printf(sc->dev,
2429 		    "could not allocate Tx seg0 DMA memory\n");
2430 		goto fail;
2431 	}
2432 
2433 	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2434 	    ring->seg0,
2435 	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2436 	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2437 	if (error != 0) {
2438 		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2439 		goto fail;
2440 	}
2441 
2442 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2443 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2444 	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2445 	    &ring->data_dma_tag);
2446 	if (error != 0) {
2447 		device_printf(sc->dev,
2448 		    "could not create Tx data DMA tag\n");
2449 		goto fail;
2450 	}
2451 
2452 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2453 		data = &ring->data[i];
2454 
2455 		error = bus_dmamap_create(ring->data_dma_tag, 0,
2456 		    &data->dma_map);
2457 		if (error != 0) {
2458 			device_printf(sc->dev, "could not create Tx data DMA "
2459 			    "map\n");
2460 			goto fail;
2461 		}
2462 	}
2463 
2464 	ring->data_queued = 0;
2465 	ring->data_cur = 0;
2466 	ring->data_next = 0;
2467 
2468 	ring->qid = qid;
2469 	return (0);
2470 
2471 fail:
2472 	rt_free_tx_ring(sc, ring);
2473 	return (error);
2474 }
2475 
2476 /*
2477  * rt_reset_tx_ring - reset TX ring buffer to empty state
2478  */
2479 static void
rt_reset_tx_ring(struct rt_softc * sc,struct rt_softc_tx_ring * ring)2480 rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2481 {
2482 	struct rt_softc_tx_data *data;
2483 	struct rt_txdesc *desc;
2484 	int i;
2485 
2486 	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2487 		desc = &ring->desc[i];
2488 
2489 		desc->sdl0 = 0;
2490 		desc->sdl1 = 0;
2491 	}
2492 
2493 	ring->desc_queued = 0;
2494 	ring->desc_cur = 0;
2495 	ring->desc_next = 0;
2496 
2497 	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2498 		BUS_DMASYNC_PREWRITE);
2499 
2500 	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2501 		BUS_DMASYNC_PREWRITE);
2502 
2503 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2504 		data = &ring->data[i];
2505 
2506 		if (data->m != NULL) {
2507 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2508 				BUS_DMASYNC_POSTWRITE);
2509 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2510 			m_freem(data->m);
2511 			data->m = NULL;
2512 		}
2513 	}
2514 
2515 	ring->data_queued = 0;
2516 	ring->data_cur = 0;
2517 	ring->data_next = 0;
2518 }
2519 
2520 /*
2521  * rt_free_tx_ring - free RX ring buffer
2522  */
2523 static void
rt_free_tx_ring(struct rt_softc * sc,struct rt_softc_tx_ring * ring)2524 rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2525 {
2526 	struct rt_softc_tx_data *data;
2527 	int i;
2528 
2529 	if (ring->desc != NULL) {
2530 		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2531 			BUS_DMASYNC_POSTWRITE);
2532 		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2533 		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2534 			ring->desc_dma_map);
2535 	}
2536 
2537 	if (ring->desc_dma_tag != NULL)
2538 		bus_dma_tag_destroy(ring->desc_dma_tag);
2539 
2540 	if (ring->seg0 != NULL) {
2541 		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2542 			BUS_DMASYNC_POSTWRITE);
2543 		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2544 		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2545 			ring->seg0_dma_map);
2546 	}
2547 
2548 	if (ring->seg0_dma_tag != NULL)
2549 		bus_dma_tag_destroy(ring->seg0_dma_tag);
2550 
2551 	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2552 		data = &ring->data[i];
2553 
2554 		if (data->m != NULL) {
2555 			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2556 				BUS_DMASYNC_POSTWRITE);
2557 			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2558 			m_freem(data->m);
2559 		}
2560 
2561 		if (data->dma_map != NULL)
2562 			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2563 	}
2564 
2565 	if (ring->data_dma_tag != NULL)
2566 		bus_dma_tag_destroy(ring->data_dma_tag);
2567 
2568 	mtx_destroy(&ring->lock);
2569 }
2570 
2571 /*
2572  * rt_dma_map_addr - get address of busdma segment
2573  */
2574 static void
rt_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)2575 rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2576 {
2577 	if (error != 0)
2578 		return;
2579 
2580 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2581 
2582 	*(bus_addr_t *) arg = segs[0].ds_addr;
2583 }
2584 
2585 /*
2586  * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2587  */
2588 static void
rt_sysctl_attach(struct rt_softc * sc)2589 rt_sysctl_attach(struct rt_softc *sc)
2590 {
2591 	struct sysctl_ctx_list *ctx;
2592 	struct sysctl_oid *tree;
2593 	struct sysctl_oid *stats;
2594 
2595 	ctx = device_get_sysctl_ctx(sc->dev);
2596 	tree = device_get_sysctl_tree(sc->dev);
2597 
2598 	/* statistic counters */
2599 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2600 	    "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "statistic");
2601 
2602 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2603 	    "interrupts", CTLFLAG_RD, &sc->interrupts,
2604 	    "all interrupts");
2605 
2606 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2607 	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2608 	    "Tx coherent interrupts");
2609 
2610 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2611 	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2612 	    "Rx coherent interrupts");
2613 
2614 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2615 	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0],
2616 	    "Rx interrupts");
2617 
2618 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2619 	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts,
2620 	    "Rx delay interrupts");
2621 
2622 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2623 	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3],
2624 	    "Tx AC3 interrupts");
2625 
2626 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2627 	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2],
2628 	    "Tx AC2 interrupts");
2629 
2630 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2631 	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1],
2632 	    "Tx AC1 interrupts");
2633 
2634 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2635 	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0],
2636 	    "Tx AC0 interrupts");
2637 
2638 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2639 	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2640 	    "Tx delay interrupts");
2641 
2642 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2643 	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2644 	    0, "Tx AC3 descriptors queued");
2645 
2646 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2647 	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2648 	    0, "Tx AC3 data queued");
2649 
2650 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2651 	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2652 	    0, "Tx AC2 descriptors queued");
2653 
2654 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2655 	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2656 	    0, "Tx AC2 data queued");
2657 
2658 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2659 	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2660 	    0, "Tx AC1 descriptors queued");
2661 
2662 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2663 	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2664 	    0, "Tx AC1 data queued");
2665 
2666 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2667 	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2668 	    0, "Tx AC0 descriptors queued");
2669 
2670 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2671 	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2672 	    0, "Tx AC0 data queued");
2673 
2674 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2675 	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2676 	    "Tx AC3 data queue full");
2677 
2678 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2679 	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2680 	    "Tx AC2 data queue full");
2681 
2682 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2683 	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2684 	    "Tx AC1 data queue full");
2685 
2686 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2687 	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2688 	    "Tx AC0 data queue full");
2689 
2690 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2691 	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2692 	    "Tx watchdog timeouts");
2693 
2694 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2695 	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets,
2696 	    "Tx defragmented packets");
2697 
2698 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2699 	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail,
2700 	    "no Tx descriptors available");
2701 
2702 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2703 	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2704 	    "Rx mbuf allocation errors");
2705 
2706 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2707 	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2708 	    "Rx mbuf DMA mapping errors");
2709 
2710 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2711 	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2712 	    "Tx queue 0 not empty");
2713 
2714 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2715 	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2716 	    "Tx queue 1 not empty");
2717 
2718 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2719 	    "rx_packets", CTLFLAG_RD, &sc->rx_packets,
2720 	    "Rx packets");
2721 
2722 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2723 	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err,
2724 	    "Rx CRC errors");
2725 
2726 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2727 	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err,
2728 	    "Rx PHY errors");
2729 
2730 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2731 	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets,
2732 	    "Rx duplicate packets");
2733 
2734 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2735 	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows,
2736 	    "Rx FIFO overflows");
2737 
2738 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2739 	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes,
2740 	    "Rx bytes");
2741 
2742 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2743 	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err,
2744 	    "Rx too long frame errors");
2745 
2746 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2747 	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err,
2748 	    "Rx too short frame errors");
2749 
2750 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2751 	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes,
2752 	    "Tx bytes");
2753 
2754 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2755 	    "tx_packets", CTLFLAG_RD, &sc->tx_packets,
2756 	    "Tx packets");
2757 
2758 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2759 	    "tx_skip", CTLFLAG_RD, &sc->tx_skip,
2760 	    "Tx skip count for GDMA ports");
2761 
2762 	SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2763 	    "tx_collision", CTLFLAG_RD, &sc->tx_collision,
2764 	    "Tx collision count for GDMA ports");
2765 }
2766 
2767 #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO)
2768 /* This code is only work RT2880 and same chip. */
2769 /* TODO: make RT3052 and later support code. But nobody need it? */
2770 static int
rt_miibus_readreg(device_t dev,int phy,int reg)2771 rt_miibus_readreg(device_t dev, int phy, int reg)
2772 {
2773 	struct rt_softc *sc = device_get_softc(dev);
2774 	int dat;
2775 
2776 	/*
2777 	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2778 	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2779 	 */
2780 #ifndef RT_MDIO
2781 	if (phy == 31) {
2782 		/* Fake PHY ID for bfeswitch attach */
2783 		switch (reg) {
2784 		case MII_BMSR:
2785 			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2786 		case MII_PHYIDR1:
2787 			return (0x40);		/* As result of faking */
2788 		case MII_PHYIDR2:		/* PHY will detect as */
2789 			return (0x6250);		/* bfeswitch */
2790 		}
2791 	}
2792 #endif
2793 
2794 	/* Wait prev command done if any */
2795 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2796 	dat = ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
2797 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK);
2798 	RT_WRITE(sc, MDIO_ACCESS, dat);
2799 	RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
2800 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2801 
2802 	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2803 }
2804 
2805 static int
rt_miibus_writereg(device_t dev,int phy,int reg,int val)2806 rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2807 {
2808 	struct rt_softc *sc = device_get_softc(dev);
2809 	int dat;
2810 
2811 	/* Wait prev command done if any */
2812 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2813 	dat = MDIO_CMD_WR |
2814 	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) |
2815 	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) |
2816 	    (val & MDIO_PHY_DATA_MASK);
2817 	RT_WRITE(sc, MDIO_ACCESS, dat);
2818 	RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO);
2819 	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2820 
2821 	return (0);
2822 }
2823 #endif
2824 
2825 #ifdef IF_RT_PHY_SUPPORT
2826 void
rt_miibus_statchg(device_t dev)2827 rt_miibus_statchg(device_t dev)
2828 {
2829 	struct rt_softc *sc = device_get_softc(dev);
2830 	struct mii_data *mii;
2831 
2832 	mii = device_get_softc(sc->rt_miibus);
2833 
2834 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2835 	    (IFM_ACTIVE | IFM_AVALID)) {
2836 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2837 		case IFM_10_T:
2838 		case IFM_100_TX:
2839 			/* XXX check link here */
2840 			sc->flags |= 1;
2841 			break;
2842 		default:
2843 			break;
2844 		}
2845 	}
2846 }
2847 #endif /* IF_RT_PHY_SUPPORT */
2848 
2849 static device_method_t rt_dev_methods[] =
2850 {
2851 	DEVMETHOD(device_probe, rt_probe),
2852 	DEVMETHOD(device_attach, rt_attach),
2853 	DEVMETHOD(device_detach, rt_detach),
2854 	DEVMETHOD(device_shutdown, rt_shutdown),
2855 	DEVMETHOD(device_suspend, rt_suspend),
2856 	DEVMETHOD(device_resume, rt_resume),
2857 
2858 #ifdef IF_RT_PHY_SUPPORT
2859 	/* MII interface */
2860 	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2861 	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2862 	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2863 #endif
2864 
2865 	DEVMETHOD_END
2866 };
2867 
2868 static driver_t rt_driver =
2869 {
2870 	"rt",
2871 	rt_dev_methods,
2872 	sizeof(struct rt_softc)
2873 };
2874 
2875 static devclass_t rt_dev_class;
2876 
2877 DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2878 #ifdef FDT
2879 DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0);
2880 #endif
2881 
2882 MODULE_DEPEND(rt, ether, 1, 1, 1);
2883 MODULE_DEPEND(rt, miibus, 1, 1, 1);
2884 
2885 #ifdef RT_MDIO
2886 MODULE_DEPEND(rt, mdio, 1, 1, 1);
2887 
2888 static int rtmdio_probe(device_t);
2889 static int rtmdio_attach(device_t);
2890 static int rtmdio_detach(device_t);
2891 
2892 static struct mtx miibus_mtx;
2893 
2894 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "rt mii lock", MTX_DEF);
2895 
2896 /*
2897  * Declare an additional, separate driver for accessing the MDIO bus.
2898  */
2899 static device_method_t rtmdio_methods[] = {
2900 	/* Device interface */
2901 	DEVMETHOD(device_probe,         rtmdio_probe),
2902 	DEVMETHOD(device_attach,        rtmdio_attach),
2903 	DEVMETHOD(device_detach,        rtmdio_detach),
2904 
2905 	/* bus interface */
2906 	DEVMETHOD(bus_add_child,        device_add_child_ordered),
2907 
2908 	/* MDIO access */
2909 	DEVMETHOD(mdio_readreg,         rt_miibus_readreg),
2910 	DEVMETHOD(mdio_writereg,        rt_miibus_writereg),
2911 };
2912 
2913 DEFINE_CLASS_0(rtmdio, rtmdio_driver, rtmdio_methods,
2914     sizeof(struct rt_softc));
2915 static devclass_t rtmdio_devclass;
2916 
2917 DRIVER_MODULE(miiproxy, rt, miiproxy_driver, miiproxy_devclass, 0, 0);
2918 DRIVER_MODULE(rtmdio, simplebus, rtmdio_driver, rtmdio_devclass, 0, 0);
2919 DRIVER_MODULE(mdio, rtmdio, mdio_driver, mdio_devclass, 0, 0);
2920 
2921 static int
rtmdio_probe(device_t dev)2922 rtmdio_probe(device_t dev)
2923 {
2924 	if (!ofw_bus_status_okay(dev))
2925 		return (ENXIO);
2926 
2927 	if (!ofw_bus_is_compatible(dev, "ralink,rt2880-mdio"))
2928 		return (ENXIO);
2929 
2930 	device_set_desc(dev, "RT built-in ethernet interface, MDIO controller");
2931 	return(0);
2932 }
2933 
2934 static int
rtmdio_attach(device_t dev)2935 rtmdio_attach(device_t dev)
2936 {
2937 	struct rt_softc	*sc;
2938 	int	error;
2939 
2940 	sc = device_get_softc(dev);
2941 	sc->dev = dev;
2942 	sc->mem_rid = 0;
2943 	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2944 	    &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE);
2945 	if (sc->mem == NULL) {
2946 		device_printf(dev, "couldn't map memory\n");
2947 		error = ENXIO;
2948 		goto fail;
2949 	}
2950 
2951 	sc->bst = rman_get_bustag(sc->mem);
2952 	sc->bsh = rman_get_bushandle(sc->mem);
2953 
2954         bus_generic_probe(dev);
2955 	bus_enumerate_hinted_children(dev);
2956 	error = bus_generic_attach(dev);
2957 fail:
2958 	return(error);
2959 }
2960 
2961 static int
rtmdio_detach(device_t dev)2962 rtmdio_detach(device_t dev)
2963 {
2964 	return(0);
2965 }
2966 #endif
2967