1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1997, 1998
5 * Bill Paul <[email protected]>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 /*
39 * VIA Rhine fast ethernet PCI NIC driver
40 *
41 * Supports various network adapters based on the VIA Rhine
42 * and Rhine II PCI controllers, including the D-Link DFE530TX.
43 * Datasheets are available at http://www.via.com.tw.
44 *
45 * Written by Bill Paul <[email protected]>
46 * Electrical Engineering Department
47 * Columbia University, New York City
48 */
49
50 /*
51 * The VIA Rhine controllers are similar in some respects to the
52 * the DEC tulip chips, except less complicated. The controller
53 * uses an MII bus and an external physical layer interface. The
54 * receiver has a one entry perfect filter and a 64-bit hash table
55 * multicast filter. Transmit and receive descriptors are similar
56 * to the tulip.
57 *
58 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
59 * transmit buffers must be longword aligned. Unfortunately,
60 * FreeBSD doesn't guarantee that mbufs will be filled in starting
61 * at longword boundaries, so we have to do a buffer copy before
62 * transmission.
63 */
64
65 #ifdef HAVE_KERNEL_OPTION_HEADERS
66 #include "opt_device_polling.h"
67 #endif
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/bus.h>
72 #include <sys/endian.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/module.h>
77 #include <sys/rman.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
80 #include <sys/sysctl.h>
81 #include <sys/taskqueue.h>
82
83 #include <net/bpf.h>
84 #include <net/if.h>
85 #include <net/if_var.h>
86 #include <net/ethernet.h>
87 #include <net/if_dl.h>
88 #include <net/if_media.h>
89 #include <net/if_types.h>
90 #include <net/if_vlan_var.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97
98 #include <machine/bus.h>
99
100 #include <dev/vr/if_vrreg.h>
101
102 /* "device miibus" required. See GENERIC if you get errors here. */
103 #include "miibus_if.h"
104
105 MODULE_DEPEND(vr, pci, 1, 1, 1);
106 MODULE_DEPEND(vr, ether, 1, 1, 1);
107 MODULE_DEPEND(vr, miibus, 1, 1, 1);
108
109 /* Define to show Rx/Tx error status. */
110 #undef VR_SHOW_ERRORS
111 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
112
113 /*
114 * Various supported device vendors/types, their names & quirks.
115 */
116 #define VR_Q_NEEDALIGN (1<<0)
117 #define VR_Q_CSUM (1<<1)
118 #define VR_Q_CAM (1<<2)
119
120 static const struct vr_type {
121 u_int16_t vr_vid;
122 u_int16_t vr_did;
123 int vr_quirks;
124 const char *vr_name;
125 } vr_devs[] = {
126 { VIA_VENDORID, VIA_DEVICEID_RHINE,
127 VR_Q_NEEDALIGN,
128 "VIA VT3043 Rhine I 10/100BaseTX" },
129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
130 VR_Q_NEEDALIGN,
131 "VIA VT86C100A Rhine II 10/100BaseTX" },
132 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
133 0,
134 "VIA VT6102 Rhine II 10/100BaseTX" },
135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III,
136 0,
137 "VIA VT6105 Rhine III 10/100BaseTX" },
138 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
139 VR_Q_CSUM,
140 "VIA VT6105M Rhine III 10/100BaseTX" },
141 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
142 VR_Q_NEEDALIGN,
143 "Delta Electronics Rhine II 10/100BaseTX" },
144 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
145 VR_Q_NEEDALIGN,
146 "Addtron Technology Rhine II 10/100BaseTX" },
147 { 0, 0, 0, NULL }
148 };
149
150 static int vr_probe(device_t);
151 static int vr_attach(device_t);
152 static int vr_detach(device_t);
153 static int vr_shutdown(device_t);
154 static int vr_suspend(device_t);
155 static int vr_resume(device_t);
156
157 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
158 static int vr_dma_alloc(struct vr_softc *);
159 static void vr_dma_free(struct vr_softc *);
160 static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
161 static int vr_newbuf(struct vr_softc *, int);
162
163 #ifndef __NO_STRICT_ALIGNMENT
164 static __inline void vr_fixup_rx(struct mbuf *);
165 #endif
166 static int vr_rxeof(struct vr_softc *);
167 static void vr_txeof(struct vr_softc *);
168 static void vr_tick(void *);
169 static int vr_error(struct vr_softc *, uint16_t);
170 static void vr_tx_underrun(struct vr_softc *);
171 static int vr_intr(void *);
172 static void vr_int_task(void *, int);
173 static void vr_start(struct ifnet *);
174 static void vr_start_locked(struct ifnet *);
175 static int vr_encap(struct vr_softc *, struct mbuf **);
176 static int vr_ioctl(struct ifnet *, u_long, caddr_t);
177 static void vr_init(void *);
178 static void vr_init_locked(struct vr_softc *);
179 static void vr_tx_start(struct vr_softc *);
180 static void vr_rx_start(struct vr_softc *);
181 static int vr_tx_stop(struct vr_softc *);
182 static int vr_rx_stop(struct vr_softc *);
183 static void vr_stop(struct vr_softc *);
184 static void vr_watchdog(struct vr_softc *);
185 static int vr_ifmedia_upd(struct ifnet *);
186 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
187
188 static int vr_miibus_readreg(device_t, int, int);
189 static int vr_miibus_writereg(device_t, int, int, int);
190 static void vr_miibus_statchg(device_t);
191
192 static void vr_cam_mask(struct vr_softc *, uint32_t, int);
193 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
194 static void vr_set_filter(struct vr_softc *);
195 static void vr_reset(const struct vr_softc *);
196 static int vr_tx_ring_init(struct vr_softc *);
197 static int vr_rx_ring_init(struct vr_softc *);
198 static void vr_setwol(struct vr_softc *);
199 static void vr_clrwol(struct vr_softc *);
200 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
201
202 static const struct vr_tx_threshold_table {
203 int tx_cfg;
204 int bcr_cfg;
205 int value;
206 } vr_tx_threshold_tables[] = {
207 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 },
208 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
209 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
210 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
211 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
212 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
213 };
214
215 static device_method_t vr_methods[] = {
216 /* Device interface */
217 DEVMETHOD(device_probe, vr_probe),
218 DEVMETHOD(device_attach, vr_attach),
219 DEVMETHOD(device_detach, vr_detach),
220 DEVMETHOD(device_shutdown, vr_shutdown),
221 DEVMETHOD(device_suspend, vr_suspend),
222 DEVMETHOD(device_resume, vr_resume),
223
224 /* MII interface */
225 DEVMETHOD(miibus_readreg, vr_miibus_readreg),
226 DEVMETHOD(miibus_writereg, vr_miibus_writereg),
227 DEVMETHOD(miibus_statchg, vr_miibus_statchg),
228
229 DEVMETHOD_END
230 };
231
232 static driver_t vr_driver = {
233 "vr",
234 vr_methods,
235 sizeof(struct vr_softc)
236 };
237
238 static devclass_t vr_devclass;
239
240 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0);
241 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0);
242
243 static int
vr_miibus_readreg(device_t dev,int phy,int reg)244 vr_miibus_readreg(device_t dev, int phy, int reg)
245 {
246 struct vr_softc *sc;
247 int i;
248
249 sc = device_get_softc(dev);
250
251 /* Set the register address. */
252 CSR_WRITE_1(sc, VR_MIIADDR, reg);
253 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
254
255 for (i = 0; i < VR_MII_TIMEOUT; i++) {
256 DELAY(1);
257 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
258 break;
259 }
260 if (i == VR_MII_TIMEOUT)
261 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
262
263 return (CSR_READ_2(sc, VR_MIIDATA));
264 }
265
266 static int
vr_miibus_writereg(device_t dev,int phy,int reg,int data)267 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
268 {
269 struct vr_softc *sc;
270 int i;
271
272 sc = device_get_softc(dev);
273
274 /* Set the register address and data to write. */
275 CSR_WRITE_1(sc, VR_MIIADDR, reg);
276 CSR_WRITE_2(sc, VR_MIIDATA, data);
277 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
278
279 for (i = 0; i < VR_MII_TIMEOUT; i++) {
280 DELAY(1);
281 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
282 break;
283 }
284 if (i == VR_MII_TIMEOUT)
285 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
286 reg);
287
288 return (0);
289 }
290
291 /*
292 * In order to fiddle with the
293 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
294 * first have to put the transmit and/or receive logic in the idle state.
295 */
296 static void
vr_miibus_statchg(device_t dev)297 vr_miibus_statchg(device_t dev)
298 {
299 struct vr_softc *sc;
300 struct mii_data *mii;
301 struct ifnet *ifp;
302 int lfdx, mfdx;
303 uint8_t cr0, cr1, fc;
304
305 sc = device_get_softc(dev);
306 mii = device_get_softc(sc->vr_miibus);
307 ifp = sc->vr_ifp;
308 if (mii == NULL || ifp == NULL ||
309 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
310 return;
311
312 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
313 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
314 (IFM_ACTIVE | IFM_AVALID)) {
315 switch (IFM_SUBTYPE(mii->mii_media_active)) {
316 case IFM_10_T:
317 case IFM_100_TX:
318 sc->vr_flags |= VR_F_LINK;
319 break;
320 default:
321 break;
322 }
323 }
324
325 if ((sc->vr_flags & VR_F_LINK) != 0) {
326 cr0 = CSR_READ_1(sc, VR_CR0);
327 cr1 = CSR_READ_1(sc, VR_CR1);
328 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
329 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
330 if (mfdx != lfdx) {
331 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
332 if (vr_tx_stop(sc) != 0 ||
333 vr_rx_stop(sc) != 0) {
334 device_printf(sc->vr_dev,
335 "%s: Tx/Rx shutdown error -- "
336 "resetting\n", __func__);
337 sc->vr_flags |= VR_F_RESTART;
338 VR_UNLOCK(sc);
339 return;
340 }
341 }
342 if (lfdx)
343 cr1 |= VR_CR1_FULLDUPLEX;
344 else
345 cr1 &= ~VR_CR1_FULLDUPLEX;
346 CSR_WRITE_1(sc, VR_CR1, cr1);
347 }
348 fc = 0;
349 /* Configure flow-control. */
350 if (sc->vr_revid >= REV_ID_VT6105_A0) {
351 fc = CSR_READ_1(sc, VR_FLOWCR1);
352 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
353 if ((IFM_OPTIONS(mii->mii_media_active) &
354 IFM_ETH_RXPAUSE) != 0)
355 fc |= VR_FLOWCR1_RXPAUSE;
356 if ((IFM_OPTIONS(mii->mii_media_active) &
357 IFM_ETH_TXPAUSE) != 0) {
358 fc |= VR_FLOWCR1_TXPAUSE;
359 sc->vr_flags |= VR_F_TXPAUSE;
360 }
361 CSR_WRITE_1(sc, VR_FLOWCR1, fc);
362 } else if (sc->vr_revid >= REV_ID_VT6102_A) {
363 /* No Tx puase capability available for Rhine II. */
364 fc = CSR_READ_1(sc, VR_MISC_CR0);
365 fc &= ~VR_MISCCR0_RXPAUSE;
366 if ((IFM_OPTIONS(mii->mii_media_active) &
367 IFM_ETH_RXPAUSE) != 0)
368 fc |= VR_MISCCR0_RXPAUSE;
369 CSR_WRITE_1(sc, VR_MISC_CR0, fc);
370 }
371 vr_rx_start(sc);
372 vr_tx_start(sc);
373 } else {
374 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
375 device_printf(sc->vr_dev,
376 "%s: Tx/Rx shutdown error -- resetting\n",
377 __func__);
378 sc->vr_flags |= VR_F_RESTART;
379 }
380 }
381 }
382
383 static void
vr_cam_mask(struct vr_softc * sc,uint32_t mask,int type)384 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
385 {
386
387 if (type == VR_MCAST_CAM)
388 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
389 else
390 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
391 CSR_WRITE_4(sc, VR_CAMMASK, mask);
392 CSR_WRITE_1(sc, VR_CAMCTL, 0);
393 }
394
395 static int
vr_cam_data(struct vr_softc * sc,int type,int idx,uint8_t * mac)396 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
397 {
398 int i;
399
400 if (type == VR_MCAST_CAM) {
401 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
402 return (EINVAL);
403 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
404 } else
405 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
406
407 /* Set CAM entry address. */
408 CSR_WRITE_1(sc, VR_CAMADDR, idx);
409 /* Set CAM entry data. */
410 if (type == VR_MCAST_CAM) {
411 for (i = 0; i < ETHER_ADDR_LEN; i++)
412 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
413 } else {
414 CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
415 CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
416 }
417 DELAY(10);
418 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
419 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
420 for (i = 0; i < VR_TIMEOUT; i++) {
421 DELAY(1);
422 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
423 break;
424 }
425
426 if (i == VR_TIMEOUT)
427 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
428 __func__);
429 CSR_WRITE_1(sc, VR_CAMCTL, 0);
430
431 return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
432 }
433
434 struct vr_hash_maddr_cam_ctx {
435 struct vr_softc *sc;
436 uint32_t mask;
437 int error;
438 };
439
440 static u_int
vr_hash_maddr_cam(void * arg,struct sockaddr_dl * sdl,u_int mcnt)441 vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
442 {
443 struct vr_hash_maddr_cam_ctx *ctx = arg;
444
445 if (ctx->error != 0)
446 return (0);
447 ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl));
448 if (ctx->error != 0) {
449 ctx->mask = 0;
450 return (0);
451 }
452 ctx->mask |= 1 << mcnt;
453
454 return (1);
455 }
456
457 static u_int
vr_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)458 vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
459 {
460 uint32_t *hashes = arg;
461 int h;
462
463 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
464 if (h < 32)
465 hashes[0] |= (1 << h);
466 else
467 hashes[1] |= (1 << (h - 32));
468
469 return (1);
470 }
471
472 /*
473 * Program the 64-bit multicast hash filter.
474 */
475 static void
vr_set_filter(struct vr_softc * sc)476 vr_set_filter(struct vr_softc *sc)
477 {
478 struct ifnet *ifp;
479 uint32_t hashes[2] = { 0, 0 };
480 uint8_t rxfilt;
481 int error, mcnt;
482
483 VR_LOCK_ASSERT(sc);
484
485 ifp = sc->vr_ifp;
486 rxfilt = CSR_READ_1(sc, VR_RXCFG);
487 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
488 VR_RXCFG_RX_MULTI);
489 if (ifp->if_flags & IFF_BROADCAST)
490 rxfilt |= VR_RXCFG_RX_BROAD;
491 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
492 rxfilt |= VR_RXCFG_RX_MULTI;
493 if (ifp->if_flags & IFF_PROMISC)
494 rxfilt |= VR_RXCFG_RX_PROMISC;
495 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
496 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
497 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
498 return;
499 }
500
501 /* Now program new ones. */
502 error = 0;
503 if ((sc->vr_quirks & VR_Q_CAM) != 0) {
504 struct vr_hash_maddr_cam_ctx ctx;
505
506 /*
507 * For hardwares that have CAM capability, use
508 * 32 entries multicast perfect filter.
509 */
510 ctx.sc = sc;
511 ctx.mask = 0;
512 ctx.error = 0;
513 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx);
514 vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask);
515 }
516
517 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
518 /*
519 * If there are too many multicast addresses or
520 * setting multicast CAM filter failed, use hash
521 * table based filtering.
522 */
523 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes);
524 }
525
526 if (mcnt > 0)
527 rxfilt |= VR_RXCFG_RX_MULTI;
528
529 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
530 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
531 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
532 }
533
534 static void
vr_reset(const struct vr_softc * sc)535 vr_reset(const struct vr_softc *sc)
536 {
537 int i;
538
539 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
540
541 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
542 if (sc->vr_revid < REV_ID_VT6102_A) {
543 /* VT86C100A needs more delay after reset. */
544 DELAY(100);
545 }
546 for (i = 0; i < VR_TIMEOUT; i++) {
547 DELAY(10);
548 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
549 break;
550 }
551 if (i == VR_TIMEOUT) {
552 if (sc->vr_revid < REV_ID_VT6102_A)
553 device_printf(sc->vr_dev, "reset never completed!\n");
554 else {
555 /* Use newer force reset command. */
556 device_printf(sc->vr_dev,
557 "Using force reset command.\n");
558 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
559 /*
560 * Wait a little while for the chip to get its brains
561 * in order.
562 */
563 DELAY(2000);
564 }
565 }
566
567 }
568
569 /*
570 * Probe for a VIA Rhine chip. Check the PCI vendor and device
571 * IDs against our list and return a match or NULL
572 */
573 static const struct vr_type *
vr_match(device_t dev)574 vr_match(device_t dev)
575 {
576 const struct vr_type *t = vr_devs;
577
578 for (t = vr_devs; t->vr_name != NULL; t++)
579 if ((pci_get_vendor(dev) == t->vr_vid) &&
580 (pci_get_device(dev) == t->vr_did))
581 return (t);
582 return (NULL);
583 }
584
585 /*
586 * Probe for a VIA Rhine chip. Check the PCI vendor and device
587 * IDs against our list and return a device name if we find a match.
588 */
589 static int
vr_probe(device_t dev)590 vr_probe(device_t dev)
591 {
592 const struct vr_type *t;
593
594 t = vr_match(dev);
595 if (t != NULL) {
596 device_set_desc(dev, t->vr_name);
597 return (BUS_PROBE_DEFAULT);
598 }
599 return (ENXIO);
600 }
601
602 /*
603 * Attach the interface. Allocate softc structures, do ifmedia
604 * setup and ethernet/BPF attach.
605 */
606 static int
vr_attach(device_t dev)607 vr_attach(device_t dev)
608 {
609 struct vr_softc *sc;
610 struct ifnet *ifp;
611 const struct vr_type *t;
612 uint8_t eaddr[ETHER_ADDR_LEN];
613 int error, rid;
614 int i, phy, pmc;
615
616 sc = device_get_softc(dev);
617 sc->vr_dev = dev;
618 t = vr_match(dev);
619 KASSERT(t != NULL, ("Lost if_vr device match"));
620 sc->vr_quirks = t->vr_quirks;
621 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
622
623 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
624 MTX_DEF);
625 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
626 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
627 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
628 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
629 sc, 0, vr_sysctl_stats, "I", "Statistics");
630
631 error = 0;
632
633 /*
634 * Map control/status registers.
635 */
636 pci_enable_busmaster(dev);
637 sc->vr_revid = pci_get_revid(dev);
638 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
639
640 sc->vr_res_id = PCIR_BAR(0);
641 sc->vr_res_type = SYS_RES_IOPORT;
642 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
643 &sc->vr_res_id, RF_ACTIVE);
644 if (sc->vr_res == NULL) {
645 device_printf(dev, "couldn't map ports\n");
646 error = ENXIO;
647 goto fail;
648 }
649
650 /* Allocate interrupt. */
651 rid = 0;
652 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
653 RF_SHAREABLE | RF_ACTIVE);
654
655 if (sc->vr_irq == NULL) {
656 device_printf(dev, "couldn't map interrupt\n");
657 error = ENXIO;
658 goto fail;
659 }
660
661 /* Allocate ifnet structure. */
662 ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
663 if (ifp == NULL) {
664 device_printf(dev, "couldn't allocate ifnet structure\n");
665 error = ENOSPC;
666 goto fail;
667 }
668 ifp->if_softc = sc;
669 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
670 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
671 ifp->if_ioctl = vr_ioctl;
672 ifp->if_start = vr_start;
673 ifp->if_init = vr_init;
674 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1);
675 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1;
676 IFQ_SET_READY(&ifp->if_snd);
677
678 NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
679
680 /* Configure Tx FIFO threshold. */
681 sc->vr_txthresh = VR_TXTHRESH_MIN;
682 if (sc->vr_revid < REV_ID_VT6105_A0) {
683 /*
684 * Use store and forward mode for Rhine I/II.
685 * Otherwise they produce a lot of Tx underruns and
686 * it would take a while to get working FIFO threshold
687 * value.
688 */
689 sc->vr_txthresh = VR_TXTHRESH_MAX;
690 }
691 if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
692 ifp->if_hwassist = VR_CSUM_FEATURES;
693 ifp->if_capabilities |= IFCAP_HWCSUM;
694 /*
695 * To update checksum field the hardware may need to
696 * store entire frames into FIFO before transmitting.
697 */
698 sc->vr_txthresh = VR_TXTHRESH_MAX;
699 }
700
701 if (sc->vr_revid >= REV_ID_VT6102_A &&
702 pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
703 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC;
704
705 /* Rhine supports oversized VLAN frame. */
706 ifp->if_capabilities |= IFCAP_VLAN_MTU;
707 ifp->if_capenable = ifp->if_capabilities;
708 #ifdef DEVICE_POLLING
709 ifp->if_capabilities |= IFCAP_POLLING;
710 #endif
711
712 /*
713 * Windows may put the chip in suspend mode when it
714 * shuts down. Be sure to kick it in the head to wake it
715 * up again.
716 */
717 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
718 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
719
720 /*
721 * Get station address. The way the Rhine chips work,
722 * you're not allowed to directly access the EEPROM once
723 * they've been programmed a special way. Consequently,
724 * we need to read the node address from the PAR0 and PAR1
725 * registers.
726 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
727 * VR_CFGC and VR_CFGD such that memory mapped IO configured
728 * by driver is reset to default state.
729 */
730 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
731 for (i = VR_TIMEOUT; i > 0; i--) {
732 DELAY(1);
733 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
734 break;
735 }
736 if (i == 0)
737 device_printf(dev, "Reloading EEPROM timeout!\n");
738 for (i = 0; i < ETHER_ADDR_LEN; i++)
739 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
740
741 /* Reset the adapter. */
742 vr_reset(sc);
743 /* Ack intr & disable further interrupts. */
744 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
745 CSR_WRITE_2(sc, VR_IMR, 0);
746 if (sc->vr_revid >= REV_ID_VT6102_A)
747 CSR_WRITE_2(sc, VR_MII_IMR, 0);
748
749 if (sc->vr_revid < REV_ID_VT6102_A) {
750 pci_write_config(dev, VR_PCI_MODE2,
751 pci_read_config(dev, VR_PCI_MODE2, 1) |
752 VR_MODE2_MODE10T, 1);
753 } else {
754 /* Report error instead of retrying forever. */
755 pci_write_config(dev, VR_PCI_MODE2,
756 pci_read_config(dev, VR_PCI_MODE2, 1) |
757 VR_MODE2_PCEROPT, 1);
758 /* Detect MII coding error. */
759 pci_write_config(dev, VR_PCI_MODE3,
760 pci_read_config(dev, VR_PCI_MODE3, 1) |
761 VR_MODE3_MIION, 1);
762 if (sc->vr_revid >= REV_ID_VT6105_LOM &&
763 sc->vr_revid < REV_ID_VT6105M_A0)
764 pci_write_config(dev, VR_PCI_MODE2,
765 pci_read_config(dev, VR_PCI_MODE2, 1) |
766 VR_MODE2_MODE10T, 1);
767 /* Enable Memory-Read-Multiple. */
768 if (sc->vr_revid >= REV_ID_VT6107_A1 &&
769 sc->vr_revid < REV_ID_VT6105M_A0)
770 pci_write_config(dev, VR_PCI_MODE2,
771 pci_read_config(dev, VR_PCI_MODE2, 1) |
772 VR_MODE2_MRDPL, 1);
773 }
774 /* Disable MII AUTOPOLL. */
775 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
776
777 if (vr_dma_alloc(sc) != 0) {
778 error = ENXIO;
779 goto fail;
780 }
781
782 /* Do MII setup. */
783 if (sc->vr_revid >= REV_ID_VT6105_A0)
784 phy = 1;
785 else
786 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
787 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
788 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
789 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0);
790 if (error != 0) {
791 device_printf(dev, "attaching PHYs failed\n");
792 goto fail;
793 }
794
795 /* Call MI attach routine. */
796 ether_ifattach(ifp, eaddr);
797 /*
798 * Tell the upper layer(s) we support long frames.
799 * Must appear after the call to ether_ifattach() because
800 * ether_ifattach() sets ifi_hdrlen to the default value.
801 */
802 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
803
804 /* Hook interrupt last to avoid having to lock softc. */
805 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
806 vr_intr, NULL, sc, &sc->vr_intrhand);
807
808 if (error) {
809 device_printf(dev, "couldn't set up irq\n");
810 ether_ifdetach(ifp);
811 goto fail;
812 }
813
814 fail:
815 if (error)
816 vr_detach(dev);
817
818 return (error);
819 }
820
821 /*
822 * Shutdown hardware and free up resources. This can be called any
823 * time after the mutex has been initialized. It is called in both
824 * the error case in attach and the normal detach case so it needs
825 * to be careful about only freeing resources that have actually been
826 * allocated.
827 */
828 static int
vr_detach(device_t dev)829 vr_detach(device_t dev)
830 {
831 struct vr_softc *sc = device_get_softc(dev);
832 struct ifnet *ifp = sc->vr_ifp;
833
834 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
835
836 #ifdef DEVICE_POLLING
837 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
838 ether_poll_deregister(ifp);
839 #endif
840
841 /* These should only be active if attach succeeded. */
842 if (device_is_attached(dev)) {
843 VR_LOCK(sc);
844 sc->vr_flags |= VR_F_DETACHED;
845 vr_stop(sc);
846 VR_UNLOCK(sc);
847 callout_drain(&sc->vr_stat_callout);
848 taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
849 ether_ifdetach(ifp);
850 }
851 if (sc->vr_miibus)
852 device_delete_child(dev, sc->vr_miibus);
853 bus_generic_detach(dev);
854
855 if (sc->vr_intrhand)
856 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
857 if (sc->vr_irq)
858 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
859 if (sc->vr_res)
860 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
861 sc->vr_res);
862
863 if (ifp)
864 if_free(ifp);
865
866 vr_dma_free(sc);
867
868 mtx_destroy(&sc->vr_mtx);
869
870 return (0);
871 }
872
873 struct vr_dmamap_arg {
874 bus_addr_t vr_busaddr;
875 };
876
877 static void
vr_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)878 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
879 {
880 struct vr_dmamap_arg *ctx;
881
882 if (error != 0)
883 return;
884 ctx = arg;
885 ctx->vr_busaddr = segs[0].ds_addr;
886 }
887
888 static int
vr_dma_alloc(struct vr_softc * sc)889 vr_dma_alloc(struct vr_softc *sc)
890 {
891 struct vr_dmamap_arg ctx;
892 struct vr_txdesc *txd;
893 struct vr_rxdesc *rxd;
894 bus_size_t tx_alignment;
895 int error, i;
896
897 /* Create parent DMA tag. */
898 error = bus_dma_tag_create(
899 bus_get_dma_tag(sc->vr_dev), /* parent */
900 1, 0, /* alignment, boundary */
901 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
902 BUS_SPACE_MAXADDR, /* highaddr */
903 NULL, NULL, /* filter, filterarg */
904 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
905 0, /* nsegments */
906 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
907 0, /* flags */
908 NULL, NULL, /* lockfunc, lockarg */
909 &sc->vr_cdata.vr_parent_tag);
910 if (error != 0) {
911 device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
912 goto fail;
913 }
914 /* Create tag for Tx ring. */
915 error = bus_dma_tag_create(
916 sc->vr_cdata.vr_parent_tag, /* parent */
917 VR_RING_ALIGN, 0, /* alignment, boundary */
918 BUS_SPACE_MAXADDR, /* lowaddr */
919 BUS_SPACE_MAXADDR, /* highaddr */
920 NULL, NULL, /* filter, filterarg */
921 VR_TX_RING_SIZE, /* maxsize */
922 1, /* nsegments */
923 VR_TX_RING_SIZE, /* maxsegsize */
924 0, /* flags */
925 NULL, NULL, /* lockfunc, lockarg */
926 &sc->vr_cdata.vr_tx_ring_tag);
927 if (error != 0) {
928 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
929 goto fail;
930 }
931
932 /* Create tag for Rx ring. */
933 error = bus_dma_tag_create(
934 sc->vr_cdata.vr_parent_tag, /* parent */
935 VR_RING_ALIGN, 0, /* alignment, boundary */
936 BUS_SPACE_MAXADDR, /* lowaddr */
937 BUS_SPACE_MAXADDR, /* highaddr */
938 NULL, NULL, /* filter, filterarg */
939 VR_RX_RING_SIZE, /* maxsize */
940 1, /* nsegments */
941 VR_RX_RING_SIZE, /* maxsegsize */
942 0, /* flags */
943 NULL, NULL, /* lockfunc, lockarg */
944 &sc->vr_cdata.vr_rx_ring_tag);
945 if (error != 0) {
946 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
947 goto fail;
948 }
949
950 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
951 tx_alignment = sizeof(uint32_t);
952 else
953 tx_alignment = 1;
954 /* Create tag for Tx buffers. */
955 error = bus_dma_tag_create(
956 sc->vr_cdata.vr_parent_tag, /* parent */
957 tx_alignment, 0, /* alignment, boundary */
958 BUS_SPACE_MAXADDR, /* lowaddr */
959 BUS_SPACE_MAXADDR, /* highaddr */
960 NULL, NULL, /* filter, filterarg */
961 MCLBYTES * VR_MAXFRAGS, /* maxsize */
962 VR_MAXFRAGS, /* nsegments */
963 MCLBYTES, /* maxsegsize */
964 0, /* flags */
965 NULL, NULL, /* lockfunc, lockarg */
966 &sc->vr_cdata.vr_tx_tag);
967 if (error != 0) {
968 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
969 goto fail;
970 }
971
972 /* Create tag for Rx buffers. */
973 error = bus_dma_tag_create(
974 sc->vr_cdata.vr_parent_tag, /* parent */
975 VR_RX_ALIGN, 0, /* alignment, boundary */
976 BUS_SPACE_MAXADDR, /* lowaddr */
977 BUS_SPACE_MAXADDR, /* highaddr */
978 NULL, NULL, /* filter, filterarg */
979 MCLBYTES, /* maxsize */
980 1, /* nsegments */
981 MCLBYTES, /* maxsegsize */
982 0, /* flags */
983 NULL, NULL, /* lockfunc, lockarg */
984 &sc->vr_cdata.vr_rx_tag);
985 if (error != 0) {
986 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
987 goto fail;
988 }
989
990 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
991 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
992 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
993 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
994 if (error != 0) {
995 device_printf(sc->vr_dev,
996 "failed to allocate DMA'able memory for Tx ring\n");
997 goto fail;
998 }
999
1000 ctx.vr_busaddr = 0;
1001 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
1002 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
1003 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1004 if (error != 0 || ctx.vr_busaddr == 0) {
1005 device_printf(sc->vr_dev,
1006 "failed to load DMA'able memory for Tx ring\n");
1007 goto fail;
1008 }
1009 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
1010
1011 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1012 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
1013 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
1014 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
1015 if (error != 0) {
1016 device_printf(sc->vr_dev,
1017 "failed to allocate DMA'able memory for Rx ring\n");
1018 goto fail;
1019 }
1020
1021 ctx.vr_busaddr = 0;
1022 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1023 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1024 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1025 if (error != 0 || ctx.vr_busaddr == 0) {
1026 device_printf(sc->vr_dev,
1027 "failed to load DMA'able memory for Rx ring\n");
1028 goto fail;
1029 }
1030 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1031
1032 /* Create DMA maps for Tx buffers. */
1033 for (i = 0; i < VR_TX_RING_CNT; i++) {
1034 txd = &sc->vr_cdata.vr_txdesc[i];
1035 txd->tx_m = NULL;
1036 txd->tx_dmamap = NULL;
1037 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1038 &txd->tx_dmamap);
1039 if (error != 0) {
1040 device_printf(sc->vr_dev,
1041 "failed to create Tx dmamap\n");
1042 goto fail;
1043 }
1044 }
1045 /* Create DMA maps for Rx buffers. */
1046 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1047 &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1048 device_printf(sc->vr_dev,
1049 "failed to create spare Rx dmamap\n");
1050 goto fail;
1051 }
1052 for (i = 0; i < VR_RX_RING_CNT; i++) {
1053 rxd = &sc->vr_cdata.vr_rxdesc[i];
1054 rxd->rx_m = NULL;
1055 rxd->rx_dmamap = NULL;
1056 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1057 &rxd->rx_dmamap);
1058 if (error != 0) {
1059 device_printf(sc->vr_dev,
1060 "failed to create Rx dmamap\n");
1061 goto fail;
1062 }
1063 }
1064
1065 fail:
1066 return (error);
1067 }
1068
1069 static void
vr_dma_free(struct vr_softc * sc)1070 vr_dma_free(struct vr_softc *sc)
1071 {
1072 struct vr_txdesc *txd;
1073 struct vr_rxdesc *rxd;
1074 int i;
1075
1076 /* Tx ring. */
1077 if (sc->vr_cdata.vr_tx_ring_tag) {
1078 if (sc->vr_rdata.vr_tx_ring_paddr)
1079 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1080 sc->vr_cdata.vr_tx_ring_map);
1081 if (sc->vr_rdata.vr_tx_ring)
1082 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1083 sc->vr_rdata.vr_tx_ring,
1084 sc->vr_cdata.vr_tx_ring_map);
1085 sc->vr_rdata.vr_tx_ring = NULL;
1086 sc->vr_rdata.vr_tx_ring_paddr = 0;
1087 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1088 sc->vr_cdata.vr_tx_ring_tag = NULL;
1089 }
1090 /* Rx ring. */
1091 if (sc->vr_cdata.vr_rx_ring_tag) {
1092 if (sc->vr_rdata.vr_rx_ring_paddr)
1093 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1094 sc->vr_cdata.vr_rx_ring_map);
1095 if (sc->vr_rdata.vr_rx_ring)
1096 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1097 sc->vr_rdata.vr_rx_ring,
1098 sc->vr_cdata.vr_rx_ring_map);
1099 sc->vr_rdata.vr_rx_ring = NULL;
1100 sc->vr_rdata.vr_rx_ring_paddr = 0;
1101 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1102 sc->vr_cdata.vr_rx_ring_tag = NULL;
1103 }
1104 /* Tx buffers. */
1105 if (sc->vr_cdata.vr_tx_tag) {
1106 for (i = 0; i < VR_TX_RING_CNT; i++) {
1107 txd = &sc->vr_cdata.vr_txdesc[i];
1108 if (txd->tx_dmamap) {
1109 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1110 txd->tx_dmamap);
1111 txd->tx_dmamap = NULL;
1112 }
1113 }
1114 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1115 sc->vr_cdata.vr_tx_tag = NULL;
1116 }
1117 /* Rx buffers. */
1118 if (sc->vr_cdata.vr_rx_tag) {
1119 for (i = 0; i < VR_RX_RING_CNT; i++) {
1120 rxd = &sc->vr_cdata.vr_rxdesc[i];
1121 if (rxd->rx_dmamap) {
1122 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1123 rxd->rx_dmamap);
1124 rxd->rx_dmamap = NULL;
1125 }
1126 }
1127 if (sc->vr_cdata.vr_rx_sparemap) {
1128 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1129 sc->vr_cdata.vr_rx_sparemap);
1130 sc->vr_cdata.vr_rx_sparemap = 0;
1131 }
1132 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1133 sc->vr_cdata.vr_rx_tag = NULL;
1134 }
1135
1136 if (sc->vr_cdata.vr_parent_tag) {
1137 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1138 sc->vr_cdata.vr_parent_tag = NULL;
1139 }
1140 }
1141
1142 /*
1143 * Initialize the transmit descriptors.
1144 */
1145 static int
vr_tx_ring_init(struct vr_softc * sc)1146 vr_tx_ring_init(struct vr_softc *sc)
1147 {
1148 struct vr_ring_data *rd;
1149 struct vr_txdesc *txd;
1150 bus_addr_t addr;
1151 int i;
1152
1153 sc->vr_cdata.vr_tx_prod = 0;
1154 sc->vr_cdata.vr_tx_cons = 0;
1155 sc->vr_cdata.vr_tx_cnt = 0;
1156 sc->vr_cdata.vr_tx_pkts = 0;
1157
1158 rd = &sc->vr_rdata;
1159 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1160 for (i = 0; i < VR_TX_RING_CNT; i++) {
1161 if (i == VR_TX_RING_CNT - 1)
1162 addr = VR_TX_RING_ADDR(sc, 0);
1163 else
1164 addr = VR_TX_RING_ADDR(sc, i + 1);
1165 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1166 txd = &sc->vr_cdata.vr_txdesc[i];
1167 txd->tx_m = NULL;
1168 }
1169
1170 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1171 sc->vr_cdata.vr_tx_ring_map,
1172 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1173
1174 return (0);
1175 }
1176
1177 /*
1178 * Initialize the RX descriptors and allocate mbufs for them. Note that
1179 * we arrange the descriptors in a closed ring, so that the last descriptor
1180 * points back to the first.
1181 */
1182 static int
vr_rx_ring_init(struct vr_softc * sc)1183 vr_rx_ring_init(struct vr_softc *sc)
1184 {
1185 struct vr_ring_data *rd;
1186 struct vr_rxdesc *rxd;
1187 bus_addr_t addr;
1188 int i;
1189
1190 sc->vr_cdata.vr_rx_cons = 0;
1191
1192 rd = &sc->vr_rdata;
1193 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1194 for (i = 0; i < VR_RX_RING_CNT; i++) {
1195 rxd = &sc->vr_cdata.vr_rxdesc[i];
1196 rxd->rx_m = NULL;
1197 rxd->desc = &rd->vr_rx_ring[i];
1198 if (i == VR_RX_RING_CNT - 1)
1199 addr = VR_RX_RING_ADDR(sc, 0);
1200 else
1201 addr = VR_RX_RING_ADDR(sc, i + 1);
1202 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1203 if (vr_newbuf(sc, i) != 0)
1204 return (ENOBUFS);
1205 }
1206
1207 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1208 sc->vr_cdata.vr_rx_ring_map,
1209 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210
1211 return (0);
1212 }
1213
1214 static __inline void
vr_discard_rxbuf(struct vr_rxdesc * rxd)1215 vr_discard_rxbuf(struct vr_rxdesc *rxd)
1216 {
1217 struct vr_desc *desc;
1218
1219 desc = rxd->desc;
1220 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1221 desc->vr_status = htole32(VR_RXSTAT_OWN);
1222 }
1223
1224 /*
1225 * Initialize an RX descriptor and attach an MBUF cluster.
1226 * Note: the length fields are only 11 bits wide, which means the
1227 * largest size we can specify is 2047. This is important because
1228 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1229 * overflow the field and make a mess.
1230 */
1231 static int
vr_newbuf(struct vr_softc * sc,int idx)1232 vr_newbuf(struct vr_softc *sc, int idx)
1233 {
1234 struct vr_desc *desc;
1235 struct vr_rxdesc *rxd;
1236 struct mbuf *m;
1237 bus_dma_segment_t segs[1];
1238 bus_dmamap_t map;
1239 int nsegs;
1240
1241 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1242 if (m == NULL)
1243 return (ENOBUFS);
1244 m->m_len = m->m_pkthdr.len = MCLBYTES;
1245 m_adj(m, sizeof(uint64_t));
1246
1247 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1248 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1249 m_freem(m);
1250 return (ENOBUFS);
1251 }
1252 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1253
1254 rxd = &sc->vr_cdata.vr_rxdesc[idx];
1255 if (rxd->rx_m != NULL) {
1256 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1257 BUS_DMASYNC_POSTREAD);
1258 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1259 }
1260 map = rxd->rx_dmamap;
1261 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1262 sc->vr_cdata.vr_rx_sparemap = map;
1263 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1264 BUS_DMASYNC_PREREAD);
1265 rxd->rx_m = m;
1266 desc = rxd->desc;
1267 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1268 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1269 desc->vr_status = htole32(VR_RXSTAT_OWN);
1270
1271 return (0);
1272 }
1273
1274 #ifndef __NO_STRICT_ALIGNMENT
1275 static __inline void
vr_fixup_rx(struct mbuf * m)1276 vr_fixup_rx(struct mbuf *m)
1277 {
1278 uint16_t *src, *dst;
1279 int i;
1280
1281 src = mtod(m, uint16_t *);
1282 dst = src - 1;
1283
1284 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1285 *dst++ = *src++;
1286
1287 m->m_data -= ETHER_ALIGN;
1288 }
1289 #endif
1290
1291 /*
1292 * A frame has been uploaded: pass the resulting mbuf chain up to
1293 * the higher level protocols.
1294 */
1295 static int
vr_rxeof(struct vr_softc * sc)1296 vr_rxeof(struct vr_softc *sc)
1297 {
1298 struct vr_rxdesc *rxd;
1299 struct mbuf *m;
1300 struct ifnet *ifp;
1301 struct vr_desc *cur_rx;
1302 int cons, prog, total_len, rx_npkts;
1303 uint32_t rxstat, rxctl;
1304
1305 VR_LOCK_ASSERT(sc);
1306 ifp = sc->vr_ifp;
1307 cons = sc->vr_cdata.vr_rx_cons;
1308 rx_npkts = 0;
1309
1310 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1311 sc->vr_cdata.vr_rx_ring_map,
1312 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1313
1314 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1315 #ifdef DEVICE_POLLING
1316 if (ifp->if_capenable & IFCAP_POLLING) {
1317 if (sc->rxcycles <= 0)
1318 break;
1319 sc->rxcycles--;
1320 }
1321 #endif
1322 cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1323 rxstat = le32toh(cur_rx->vr_status);
1324 rxctl = le32toh(cur_rx->vr_ctl);
1325 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1326 break;
1327
1328 prog++;
1329 rxd = &sc->vr_cdata.vr_rxdesc[cons];
1330 m = rxd->rx_m;
1331
1332 /*
1333 * If an error occurs, update stats, clear the
1334 * status word and leave the mbuf cluster in place:
1335 * it should simply get re-used next time this descriptor
1336 * comes up in the ring.
1337 * We don't support SG in Rx path yet, so discard
1338 * partial frame.
1339 */
1340 if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
1341 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
1342 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1343 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1344 sc->vr_stat.rx_errors++;
1345 if (rxstat & VR_RXSTAT_CRCERR)
1346 sc->vr_stat.rx_crc_errors++;
1347 if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1348 sc->vr_stat.rx_alignment++;
1349 if (rxstat & VR_RXSTAT_FIFOOFLOW)
1350 sc->vr_stat.rx_fifo_overflows++;
1351 if (rxstat & VR_RXSTAT_GIANT)
1352 sc->vr_stat.rx_giants++;
1353 if (rxstat & VR_RXSTAT_RUNT)
1354 sc->vr_stat.rx_runts++;
1355 if (rxstat & VR_RXSTAT_BUFFERR)
1356 sc->vr_stat.rx_no_buffers++;
1357 #ifdef VR_SHOW_ERRORS
1358 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1359 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1360 #endif
1361 vr_discard_rxbuf(rxd);
1362 continue;
1363 }
1364
1365 if (vr_newbuf(sc, cons) != 0) {
1366 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1367 sc->vr_stat.rx_errors++;
1368 sc->vr_stat.rx_no_mbufs++;
1369 vr_discard_rxbuf(rxd);
1370 continue;
1371 }
1372
1373 /*
1374 * XXX The VIA Rhine chip includes the CRC with every
1375 * received frame, and there's no way to turn this
1376 * behavior off (at least, I can't find anything in
1377 * the manual that explains how to do it) so we have
1378 * to trim off the CRC manually.
1379 */
1380 total_len = VR_RXBYTES(rxstat);
1381 total_len -= ETHER_CRC_LEN;
1382 m->m_pkthdr.len = m->m_len = total_len;
1383 #ifndef __NO_STRICT_ALIGNMENT
1384 /*
1385 * RX buffers must be 32-bit aligned.
1386 * Ignore the alignment problems on the non-strict alignment
1387 * platform. The performance hit incurred due to unaligned
1388 * accesses is much smaller than the hit produced by forcing
1389 * buffer copies all the time.
1390 */
1391 vr_fixup_rx(m);
1392 #endif
1393 m->m_pkthdr.rcvif = ifp;
1394 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1395 sc->vr_stat.rx_ok++;
1396 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
1397 (rxstat & VR_RXSTAT_FRAG) == 0 &&
1398 (rxctl & VR_RXCTL_IP) != 0) {
1399 /* Checksum is valid for non-fragmented IP packets. */
1400 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1401 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1402 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1403 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1404 m->m_pkthdr.csum_flags |=
1405 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1406 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1407 m->m_pkthdr.csum_data = 0xffff;
1408 }
1409 }
1410 }
1411 VR_UNLOCK(sc);
1412 (*ifp->if_input)(ifp, m);
1413 VR_LOCK(sc);
1414 rx_npkts++;
1415 }
1416
1417 if (prog > 0) {
1418 /*
1419 * Let controller know how many number of RX buffers
1420 * are posted but avoid expensive register access if
1421 * TX pause capability was not negotiated with link
1422 * partner.
1423 */
1424 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) {
1425 if (prog >= VR_RX_RING_CNT)
1426 prog = VR_RX_RING_CNT - 1;
1427 CSR_WRITE_1(sc, VR_FLOWCR0, prog);
1428 }
1429 sc->vr_cdata.vr_rx_cons = cons;
1430 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1431 sc->vr_cdata.vr_rx_ring_map,
1432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1433 }
1434 return (rx_npkts);
1435 }
1436
1437 /*
1438 * A frame was downloaded to the chip. It's safe for us to clean up
1439 * the list buffers.
1440 */
1441 static void
vr_txeof(struct vr_softc * sc)1442 vr_txeof(struct vr_softc *sc)
1443 {
1444 struct vr_txdesc *txd;
1445 struct vr_desc *cur_tx;
1446 struct ifnet *ifp;
1447 uint32_t txctl, txstat;
1448 int cons, prod;
1449
1450 VR_LOCK_ASSERT(sc);
1451
1452 cons = sc->vr_cdata.vr_tx_cons;
1453 prod = sc->vr_cdata.vr_tx_prod;
1454 if (cons == prod)
1455 return;
1456
1457 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1458 sc->vr_cdata.vr_tx_ring_map,
1459 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1460
1461 ifp = sc->vr_ifp;
1462 /*
1463 * Go through our tx list and free mbufs for those
1464 * frames that have been transmitted.
1465 */
1466 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1467 cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1468 txctl = le32toh(cur_tx->vr_ctl);
1469 txstat = le32toh(cur_tx->vr_status);
1470 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1471 break;
1472
1473 sc->vr_cdata.vr_tx_cnt--;
1474 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1475 /* Only the first descriptor in the chain is valid. */
1476 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1477 continue;
1478
1479 txd = &sc->vr_cdata.vr_txdesc[cons];
1480 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1481 __func__));
1482
1483 if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1484 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1485 sc->vr_stat.tx_errors++;
1486 if ((txstat & VR_TXSTAT_ABRT) != 0) {
1487 /* Give up and restart Tx. */
1488 sc->vr_stat.tx_abort++;
1489 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1490 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1491 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1492 txd->tx_dmamap);
1493 m_freem(txd->tx_m);
1494 txd->tx_m = NULL;
1495 VR_INC(cons, VR_TX_RING_CNT);
1496 sc->vr_cdata.vr_tx_cons = cons;
1497 if (vr_tx_stop(sc) != 0) {
1498 device_printf(sc->vr_dev,
1499 "%s: Tx shutdown error -- "
1500 "resetting\n", __func__);
1501 sc->vr_flags |= VR_F_RESTART;
1502 return;
1503 }
1504 vr_tx_start(sc);
1505 break;
1506 }
1507 if ((sc->vr_revid < REV_ID_VT3071_A &&
1508 (txstat & VR_TXSTAT_UNDERRUN)) ||
1509 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1510 sc->vr_stat.tx_underrun++;
1511 /* Retry and restart Tx. */
1512 sc->vr_cdata.vr_tx_cnt++;
1513 sc->vr_cdata.vr_tx_cons = cons;
1514 cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1515 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1516 sc->vr_cdata.vr_tx_ring_map,
1517 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1518 vr_tx_underrun(sc);
1519 return;
1520 }
1521 if ((txstat & VR_TXSTAT_DEFER) != 0) {
1522 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1523 sc->vr_stat.tx_collisions++;
1524 }
1525 if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1526 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1527 sc->vr_stat.tx_late_collisions++;
1528 }
1529 } else {
1530 sc->vr_stat.tx_ok++;
1531 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1532 }
1533
1534 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1535 BUS_DMASYNC_POSTWRITE);
1536 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1537 if (sc->vr_revid < REV_ID_VT3071_A) {
1538 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1539 (txstat & VR_TXSTAT_COLLCNT) >> 3);
1540 sc->vr_stat.tx_collisions +=
1541 (txstat & VR_TXSTAT_COLLCNT) >> 3;
1542 } else {
1543 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f));
1544 sc->vr_stat.tx_collisions += (txstat & 0x0f);
1545 }
1546 m_freem(txd->tx_m);
1547 txd->tx_m = NULL;
1548 }
1549
1550 sc->vr_cdata.vr_tx_cons = cons;
1551 if (sc->vr_cdata.vr_tx_cnt == 0)
1552 sc->vr_watchdog_timer = 0;
1553 }
1554
1555 static void
vr_tick(void * xsc)1556 vr_tick(void *xsc)
1557 {
1558 struct vr_softc *sc;
1559 struct mii_data *mii;
1560
1561 sc = (struct vr_softc *)xsc;
1562
1563 VR_LOCK_ASSERT(sc);
1564
1565 if ((sc->vr_flags & VR_F_RESTART) != 0) {
1566 device_printf(sc->vr_dev, "restarting\n");
1567 sc->vr_stat.num_restart++;
1568 sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1569 vr_init_locked(sc);
1570 sc->vr_flags &= ~VR_F_RESTART;
1571 }
1572
1573 mii = device_get_softc(sc->vr_miibus);
1574 mii_tick(mii);
1575 if ((sc->vr_flags & VR_F_LINK) == 0)
1576 vr_miibus_statchg(sc->vr_dev);
1577 vr_watchdog(sc);
1578 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1579 }
1580
1581 #ifdef DEVICE_POLLING
1582 static poll_handler_t vr_poll;
1583 static poll_handler_t vr_poll_locked;
1584
1585 static int
vr_poll(struct ifnet * ifp,enum poll_cmd cmd,int count)1586 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1587 {
1588 struct vr_softc *sc;
1589 int rx_npkts;
1590
1591 sc = ifp->if_softc;
1592 rx_npkts = 0;
1593
1594 VR_LOCK(sc);
1595 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1596 rx_npkts = vr_poll_locked(ifp, cmd, count);
1597 VR_UNLOCK(sc);
1598 return (rx_npkts);
1599 }
1600
1601 static int
vr_poll_locked(struct ifnet * ifp,enum poll_cmd cmd,int count)1602 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1603 {
1604 struct vr_softc *sc;
1605 int rx_npkts;
1606
1607 sc = ifp->if_softc;
1608
1609 VR_LOCK_ASSERT(sc);
1610
1611 sc->rxcycles = count;
1612 rx_npkts = vr_rxeof(sc);
1613 vr_txeof(sc);
1614 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1615 vr_start_locked(ifp);
1616
1617 if (cmd == POLL_AND_CHECK_STATUS) {
1618 uint16_t status;
1619
1620 /* Also check status register. */
1621 status = CSR_READ_2(sc, VR_ISR);
1622 if (status)
1623 CSR_WRITE_2(sc, VR_ISR, status);
1624
1625 if ((status & VR_INTRS) == 0)
1626 return (rx_npkts);
1627
1628 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1629 VR_ISR_STATSOFLOW)) != 0) {
1630 if (vr_error(sc, status) != 0)
1631 return (rx_npkts);
1632 }
1633 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1634 #ifdef VR_SHOW_ERRORS
1635 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1636 __func__, status, VR_ISR_ERR_BITS);
1637 #endif
1638 vr_rx_start(sc);
1639 }
1640 }
1641 return (rx_npkts);
1642 }
1643 #endif /* DEVICE_POLLING */
1644
1645 /* Back off the transmit threshold. */
1646 static void
vr_tx_underrun(struct vr_softc * sc)1647 vr_tx_underrun(struct vr_softc *sc)
1648 {
1649 int thresh;
1650
1651 device_printf(sc->vr_dev, "Tx underrun -- ");
1652 if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1653 thresh = sc->vr_txthresh;
1654 sc->vr_txthresh++;
1655 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1656 sc->vr_txthresh = VR_TXTHRESH_MAX;
1657 printf("using store and forward mode\n");
1658 } else
1659 printf("increasing Tx threshold(%d -> %d)\n",
1660 vr_tx_threshold_tables[thresh].value,
1661 vr_tx_threshold_tables[thresh + 1].value);
1662 } else
1663 printf("\n");
1664 sc->vr_stat.tx_underrun++;
1665 if (vr_tx_stop(sc) != 0) {
1666 device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1667 "resetting\n", __func__);
1668 sc->vr_flags |= VR_F_RESTART;
1669 return;
1670 }
1671 vr_tx_start(sc);
1672 }
1673
1674 static int
vr_intr(void * arg)1675 vr_intr(void *arg)
1676 {
1677 struct vr_softc *sc;
1678 uint16_t status;
1679
1680 sc = (struct vr_softc *)arg;
1681
1682 status = CSR_READ_2(sc, VR_ISR);
1683 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1684 return (FILTER_STRAY);
1685
1686 /* Disable interrupts. */
1687 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1688
1689 taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
1690
1691 return (FILTER_HANDLED);
1692 }
1693
1694 static void
vr_int_task(void * arg,int npending)1695 vr_int_task(void *arg, int npending)
1696 {
1697 struct vr_softc *sc;
1698 struct ifnet *ifp;
1699 uint16_t status;
1700
1701 sc = (struct vr_softc *)arg;
1702
1703 VR_LOCK(sc);
1704
1705 if ((sc->vr_flags & VR_F_SUSPENDED) != 0)
1706 goto done_locked;
1707
1708 status = CSR_READ_2(sc, VR_ISR);
1709 ifp = sc->vr_ifp;
1710 #ifdef DEVICE_POLLING
1711 if ((ifp->if_capenable & IFCAP_POLLING) != 0)
1712 goto done_locked;
1713 #endif
1714
1715 /* Suppress unwanted interrupts. */
1716 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1717 (sc->vr_flags & VR_F_RESTART) != 0) {
1718 CSR_WRITE_2(sc, VR_IMR, 0);
1719 CSR_WRITE_2(sc, VR_ISR, status);
1720 goto done_locked;
1721 }
1722
1723 for (; (status & VR_INTRS) != 0;) {
1724 CSR_WRITE_2(sc, VR_ISR, status);
1725 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1726 VR_ISR_STATSOFLOW)) != 0) {
1727 if (vr_error(sc, status) != 0) {
1728 VR_UNLOCK(sc);
1729 return;
1730 }
1731 }
1732 vr_rxeof(sc);
1733 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1734 #ifdef VR_SHOW_ERRORS
1735 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1736 __func__, status, VR_ISR_ERR_BITS);
1737 #endif
1738 /* Restart Rx if RxDMA SM was stopped. */
1739 vr_rx_start(sc);
1740 }
1741 vr_txeof(sc);
1742
1743 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1744 vr_start_locked(ifp);
1745
1746 status = CSR_READ_2(sc, VR_ISR);
1747 }
1748
1749 /* Re-enable interrupts. */
1750 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1751
1752 done_locked:
1753 VR_UNLOCK(sc);
1754 }
1755
1756 static int
vr_error(struct vr_softc * sc,uint16_t status)1757 vr_error(struct vr_softc *sc, uint16_t status)
1758 {
1759 uint16_t pcis;
1760
1761 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1762 if ((status & VR_ISR_BUSERR) != 0) {
1763 status &= ~VR_ISR_BUSERR;
1764 sc->vr_stat.bus_errors++;
1765 /* Disable further interrupts. */
1766 CSR_WRITE_2(sc, VR_IMR, 0);
1767 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1768 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1769 "resetting\n", pcis);
1770 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1771 sc->vr_flags |= VR_F_RESTART;
1772 return (EAGAIN);
1773 }
1774 if ((status & VR_ISR_LINKSTAT2) != 0) {
1775 /* Link state change, duplex changes etc. */
1776 status &= ~VR_ISR_LINKSTAT2;
1777 }
1778 if ((status & VR_ISR_STATSOFLOW) != 0) {
1779 status &= ~VR_ISR_STATSOFLOW;
1780 if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1781 /* Update MIB counters. */
1782 }
1783 }
1784
1785 if (status != 0)
1786 device_printf(sc->vr_dev,
1787 "unhandled interrupt, status = 0x%04x\n", status);
1788 return (0);
1789 }
1790
1791 /*
1792 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1793 * pointers to the fragment pointers.
1794 */
1795 static int
vr_encap(struct vr_softc * sc,struct mbuf ** m_head)1796 vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1797 {
1798 struct vr_txdesc *txd;
1799 struct vr_desc *desc;
1800 struct mbuf *m;
1801 bus_dma_segment_t txsegs[VR_MAXFRAGS];
1802 uint32_t csum_flags, txctl;
1803 int error, i, nsegs, prod, si;
1804 int padlen;
1805
1806 VR_LOCK_ASSERT(sc);
1807
1808 M_ASSERTPKTHDR((*m_head));
1809
1810 /*
1811 * Some VIA Rhine wants packet buffers to be longword
1812 * aligned, but very often our mbufs aren't. Rather than
1813 * waste time trying to decide when to copy and when not
1814 * to copy, just do it all the time.
1815 */
1816 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1817 m = m_defrag(*m_head, M_NOWAIT);
1818 if (m == NULL) {
1819 m_freem(*m_head);
1820 *m_head = NULL;
1821 return (ENOBUFS);
1822 }
1823 *m_head = m;
1824 }
1825
1826 /*
1827 * The Rhine chip doesn't auto-pad, so we have to make
1828 * sure to pad short frames out to the minimum frame length
1829 * ourselves.
1830 */
1831 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1832 m = *m_head;
1833 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1834 if (M_WRITABLE(m) == 0) {
1835 /* Get a writable copy. */
1836 m = m_dup(*m_head, M_NOWAIT);
1837 m_freem(*m_head);
1838 if (m == NULL) {
1839 *m_head = NULL;
1840 return (ENOBUFS);
1841 }
1842 *m_head = m;
1843 }
1844 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1845 m = m_defrag(m, M_NOWAIT);
1846 if (m == NULL) {
1847 m_freem(*m_head);
1848 *m_head = NULL;
1849 return (ENOBUFS);
1850 }
1851 }
1852 /*
1853 * Manually pad short frames, and zero the pad space
1854 * to avoid leaking data.
1855 */
1856 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1857 m->m_pkthdr.len += padlen;
1858 m->m_len = m->m_pkthdr.len;
1859 *m_head = m;
1860 }
1861
1862 prod = sc->vr_cdata.vr_tx_prod;
1863 txd = &sc->vr_cdata.vr_txdesc[prod];
1864 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1865 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1866 if (error == EFBIG) {
1867 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS);
1868 if (m == NULL) {
1869 m_freem(*m_head);
1870 *m_head = NULL;
1871 return (ENOBUFS);
1872 }
1873 *m_head = m;
1874 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1875 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1876 if (error != 0) {
1877 m_freem(*m_head);
1878 *m_head = NULL;
1879 return (error);
1880 }
1881 } else if (error != 0)
1882 return (error);
1883 if (nsegs == 0) {
1884 m_freem(*m_head);
1885 *m_head = NULL;
1886 return (EIO);
1887 }
1888
1889 /* Check number of available descriptors. */
1890 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1891 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1892 return (ENOBUFS);
1893 }
1894
1895 txd->tx_m = *m_head;
1896 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1897 BUS_DMASYNC_PREWRITE);
1898
1899 /* Set checksum offload. */
1900 csum_flags = 0;
1901 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1902 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1903 csum_flags |= VR_TXCTL_IPCSUM;
1904 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1905 csum_flags |= VR_TXCTL_TCPCSUM;
1906 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1907 csum_flags |= VR_TXCTL_UDPCSUM;
1908 }
1909
1910 /*
1911 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1912 * is required for all descriptors regardless of single or
1913 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1914 * the first descriptor for a multi-fragmented frames. Without
1915 * that VIA Rhine chip generates Tx underrun interrupts and can't
1916 * send any frames.
1917 */
1918 si = prod;
1919 for (i = 0; i < nsegs; i++) {
1920 desc = &sc->vr_rdata.vr_tx_ring[prod];
1921 desc->vr_status = 0;
1922 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1923 if (i == 0)
1924 txctl |= VR_TXCTL_FIRSTFRAG;
1925 desc->vr_ctl = htole32(txctl);
1926 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1927 sc->vr_cdata.vr_tx_cnt++;
1928 VR_INC(prod, VR_TX_RING_CNT);
1929 }
1930 /* Update producer index. */
1931 sc->vr_cdata.vr_tx_prod = prod;
1932
1933 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1934 desc = &sc->vr_rdata.vr_tx_ring[prod];
1935
1936 /*
1937 * Set EOP on the last descriptor and reuqest Tx completion
1938 * interrupt for every VR_TX_INTR_THRESH-th frames.
1939 */
1940 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1941 if (sc->vr_cdata.vr_tx_pkts == 0)
1942 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1943 else
1944 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1945
1946 /* Lastly turn the first descriptor ownership to hardware. */
1947 desc = &sc->vr_rdata.vr_tx_ring[si];
1948 desc->vr_status |= htole32(VR_TXSTAT_OWN);
1949
1950 /* Sync descriptors. */
1951 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1952 sc->vr_cdata.vr_tx_ring_map,
1953 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1954
1955 return (0);
1956 }
1957
1958 static void
vr_start(struct ifnet * ifp)1959 vr_start(struct ifnet *ifp)
1960 {
1961 struct vr_softc *sc;
1962
1963 sc = ifp->if_softc;
1964 VR_LOCK(sc);
1965 vr_start_locked(ifp);
1966 VR_UNLOCK(sc);
1967 }
1968
1969 static void
vr_start_locked(struct ifnet * ifp)1970 vr_start_locked(struct ifnet *ifp)
1971 {
1972 struct vr_softc *sc;
1973 struct mbuf *m_head;
1974 int enq;
1975
1976 sc = ifp->if_softc;
1977
1978 VR_LOCK_ASSERT(sc);
1979
1980 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1981 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0)
1982 return;
1983
1984 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1985 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1986 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1987 if (m_head == NULL)
1988 break;
1989 /*
1990 * Pack the data into the transmit ring. If we
1991 * don't have room, set the OACTIVE flag and wait
1992 * for the NIC to drain the ring.
1993 */
1994 if (vr_encap(sc, &m_head)) {
1995 if (m_head == NULL)
1996 break;
1997 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1998 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1999 break;
2000 }
2001
2002 enq++;
2003 /*
2004 * If there's a BPF listener, bounce a copy of this frame
2005 * to him.
2006 */
2007 ETHER_BPF_MTAP(ifp, m_head);
2008 }
2009
2010 if (enq > 0) {
2011 /* Tell the chip to start transmitting. */
2012 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2013 /* Set a timeout in case the chip goes out to lunch. */
2014 sc->vr_watchdog_timer = 5;
2015 }
2016 }
2017
2018 static void
vr_init(void * xsc)2019 vr_init(void *xsc)
2020 {
2021 struct vr_softc *sc;
2022
2023 sc = (struct vr_softc *)xsc;
2024 VR_LOCK(sc);
2025 vr_init_locked(sc);
2026 VR_UNLOCK(sc);
2027 }
2028
2029 static void
vr_init_locked(struct vr_softc * sc)2030 vr_init_locked(struct vr_softc *sc)
2031 {
2032 struct ifnet *ifp;
2033 struct mii_data *mii;
2034 bus_addr_t addr;
2035 int i;
2036
2037 VR_LOCK_ASSERT(sc);
2038
2039 ifp = sc->vr_ifp;
2040 mii = device_get_softc(sc->vr_miibus);
2041
2042 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2043 return;
2044
2045 /* Cancel pending I/O and free all RX/TX buffers. */
2046 vr_stop(sc);
2047 vr_reset(sc);
2048
2049 /* Set our station address. */
2050 for (i = 0; i < ETHER_ADDR_LEN; i++)
2051 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]);
2052
2053 /* Set DMA size. */
2054 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2055 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2056
2057 /*
2058 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2059 * so we must set both.
2060 */
2061 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2062 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2063
2064 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2065 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2066
2067 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2068 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2069
2070 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2071 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2072
2073 /* Init circular RX list. */
2074 if (vr_rx_ring_init(sc) != 0) {
2075 device_printf(sc->vr_dev,
2076 "initialization failed: no memory for rx buffers\n");
2077 vr_stop(sc);
2078 return;
2079 }
2080
2081 /* Init tx descriptors. */
2082 vr_tx_ring_init(sc);
2083
2084 if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2085 uint8_t vcam[2] = { 0, 0 };
2086
2087 /* Disable VLAN hardware tag insertion/stripping. */
2088 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
2089 /* Disable VLAN hardware filtering. */
2090 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
2091 /* Disable all CAM entries. */
2092 vr_cam_mask(sc, VR_MCAST_CAM, 0);
2093 vr_cam_mask(sc, VR_VLAN_CAM, 0);
2094 /* Enable the first VLAN CAM. */
2095 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
2096 vr_cam_mask(sc, VR_VLAN_CAM, 1);
2097 }
2098
2099 /*
2100 * Set up receive filter.
2101 */
2102 vr_set_filter(sc);
2103
2104 /*
2105 * Load the address of the RX ring.
2106 */
2107 addr = VR_RX_RING_ADDR(sc, 0);
2108 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2109 /*
2110 * Load the address of the TX ring.
2111 */
2112 addr = VR_TX_RING_ADDR(sc, 0);
2113 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2114 /* Default : full-duplex, no Tx poll. */
2115 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2116
2117 /* Set flow-control parameters for Rhine III. */
2118 if (sc->vr_revid >= REV_ID_VT6105_A0) {
2119 /*
2120 * Configure Rx buffer count available for incoming
2121 * packet.
2122 * Even though data sheet says almost nothing about
2123 * this register, this register should be updated
2124 * whenever driver adds new RX buffers to controller.
2125 * Otherwise, XON frame is not sent to link partner
2126 * even if controller has enough RX buffers and you
2127 * would be isolated from network.
2128 * The controller is not smart enough to know number
2129 * of available RX buffers so driver have to let
2130 * controller know how many RX buffers are posted.
2131 * In other words, this register works like a residue
2132 * counter for RX buffers and should be initialized
2133 * to the number of total RX buffers - 1 before
2134 * enabling RX MAC. Note, this register is 8bits so
2135 * it effectively limits the maximum number of RX
2136 * buffer to be configured by controller is 255.
2137 */
2138 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1);
2139 /*
2140 * Tx pause low threshold : 8 free receive buffers
2141 * Tx pause XON high threshold : 24 free receive buffers
2142 */
2143 CSR_WRITE_1(sc, VR_FLOWCR1,
2144 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF);
2145 /* Set Tx pause timer. */
2146 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2147 }
2148
2149 /* Enable receiver and transmitter. */
2150 CSR_WRITE_1(sc, VR_CR0,
2151 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2152
2153 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2154 #ifdef DEVICE_POLLING
2155 /*
2156 * Disable interrupts if we are polling.
2157 */
2158 if (ifp->if_capenable & IFCAP_POLLING)
2159 CSR_WRITE_2(sc, VR_IMR, 0);
2160 else
2161 #endif
2162 /*
2163 * Enable interrupts and disable MII intrs.
2164 */
2165 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2166 if (sc->vr_revid > REV_ID_VT6102_A)
2167 CSR_WRITE_2(sc, VR_MII_IMR, 0);
2168
2169 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2170 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2171
2172 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2173 mii_mediachg(mii);
2174
2175 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2176 }
2177
2178 /*
2179 * Set media options.
2180 */
2181 static int
vr_ifmedia_upd(struct ifnet * ifp)2182 vr_ifmedia_upd(struct ifnet *ifp)
2183 {
2184 struct vr_softc *sc;
2185 struct mii_data *mii;
2186 struct mii_softc *miisc;
2187 int error;
2188
2189 sc = ifp->if_softc;
2190 VR_LOCK(sc);
2191 mii = device_get_softc(sc->vr_miibus);
2192 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2193 PHY_RESET(miisc);
2194 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2195 error = mii_mediachg(mii);
2196 VR_UNLOCK(sc);
2197
2198 return (error);
2199 }
2200
2201 /*
2202 * Report current media status.
2203 */
2204 static void
vr_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)2205 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2206 {
2207 struct vr_softc *sc;
2208 struct mii_data *mii;
2209
2210 sc = ifp->if_softc;
2211 mii = device_get_softc(sc->vr_miibus);
2212 VR_LOCK(sc);
2213 if ((ifp->if_flags & IFF_UP) == 0) {
2214 VR_UNLOCK(sc);
2215 return;
2216 }
2217 mii_pollstat(mii);
2218 ifmr->ifm_active = mii->mii_media_active;
2219 ifmr->ifm_status = mii->mii_media_status;
2220 VR_UNLOCK(sc);
2221 }
2222
2223 static int
vr_ioctl(struct ifnet * ifp,u_long command,caddr_t data)2224 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2225 {
2226 struct vr_softc *sc;
2227 struct ifreq *ifr;
2228 struct mii_data *mii;
2229 int error, mask;
2230
2231 sc = ifp->if_softc;
2232 ifr = (struct ifreq *)data;
2233 error = 0;
2234
2235 switch (command) {
2236 case SIOCSIFFLAGS:
2237 VR_LOCK(sc);
2238 if (ifp->if_flags & IFF_UP) {
2239 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2240 if ((ifp->if_flags ^ sc->vr_if_flags) &
2241 (IFF_PROMISC | IFF_ALLMULTI))
2242 vr_set_filter(sc);
2243 } else {
2244 if ((sc->vr_flags & VR_F_DETACHED) == 0)
2245 vr_init_locked(sc);
2246 }
2247 } else {
2248 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2249 vr_stop(sc);
2250 }
2251 sc->vr_if_flags = ifp->if_flags;
2252 VR_UNLOCK(sc);
2253 break;
2254 case SIOCADDMULTI:
2255 case SIOCDELMULTI:
2256 VR_LOCK(sc);
2257 vr_set_filter(sc);
2258 VR_UNLOCK(sc);
2259 break;
2260 case SIOCGIFMEDIA:
2261 case SIOCSIFMEDIA:
2262 mii = device_get_softc(sc->vr_miibus);
2263 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2264 break;
2265 case SIOCSIFCAP:
2266 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2267 #ifdef DEVICE_POLLING
2268 if (mask & IFCAP_POLLING) {
2269 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2270 error = ether_poll_register(vr_poll, ifp);
2271 if (error != 0)
2272 break;
2273 VR_LOCK(sc);
2274 /* Disable interrupts. */
2275 CSR_WRITE_2(sc, VR_IMR, 0x0000);
2276 ifp->if_capenable |= IFCAP_POLLING;
2277 VR_UNLOCK(sc);
2278 } else {
2279 error = ether_poll_deregister(ifp);
2280 /* Enable interrupts. */
2281 VR_LOCK(sc);
2282 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2283 ifp->if_capenable &= ~IFCAP_POLLING;
2284 VR_UNLOCK(sc);
2285 }
2286 }
2287 #endif /* DEVICE_POLLING */
2288 if ((mask & IFCAP_TXCSUM) != 0 &&
2289 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2290 ifp->if_capenable ^= IFCAP_TXCSUM;
2291 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2292 ifp->if_hwassist |= VR_CSUM_FEATURES;
2293 else
2294 ifp->if_hwassist &= ~VR_CSUM_FEATURES;
2295 }
2296 if ((mask & IFCAP_RXCSUM) != 0 &&
2297 (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
2298 ifp->if_capenable ^= IFCAP_RXCSUM;
2299 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2300 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0)
2301 ifp->if_capenable ^= IFCAP_WOL_UCAST;
2302 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2303 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2304 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2305 break;
2306 default:
2307 error = ether_ioctl(ifp, command, data);
2308 break;
2309 }
2310
2311 return (error);
2312 }
2313
2314 static void
vr_watchdog(struct vr_softc * sc)2315 vr_watchdog(struct vr_softc *sc)
2316 {
2317 struct ifnet *ifp;
2318
2319 VR_LOCK_ASSERT(sc);
2320
2321 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2322 return;
2323
2324 ifp = sc->vr_ifp;
2325 /*
2326 * Reclaim first as we don't request interrupt for every packets.
2327 */
2328 vr_txeof(sc);
2329 if (sc->vr_cdata.vr_tx_cnt == 0)
2330 return;
2331
2332 if ((sc->vr_flags & VR_F_LINK) == 0) {
2333 if (bootverbose)
2334 if_printf(sc->vr_ifp, "watchdog timeout "
2335 "(missed link)\n");
2336 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2337 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2338 vr_init_locked(sc);
2339 return;
2340 }
2341
2342 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2343 if_printf(ifp, "watchdog timeout\n");
2344
2345 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2346 vr_init_locked(sc);
2347
2348 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2349 vr_start_locked(ifp);
2350 }
2351
2352 static void
vr_tx_start(struct vr_softc * sc)2353 vr_tx_start(struct vr_softc *sc)
2354 {
2355 bus_addr_t addr;
2356 uint8_t cmd;
2357
2358 cmd = CSR_READ_1(sc, VR_CR0);
2359 if ((cmd & VR_CR0_TX_ON) == 0) {
2360 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2361 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2362 cmd |= VR_CR0_TX_ON;
2363 CSR_WRITE_1(sc, VR_CR0, cmd);
2364 }
2365 if (sc->vr_cdata.vr_tx_cnt != 0) {
2366 sc->vr_watchdog_timer = 5;
2367 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2368 }
2369 }
2370
2371 static void
vr_rx_start(struct vr_softc * sc)2372 vr_rx_start(struct vr_softc *sc)
2373 {
2374 bus_addr_t addr;
2375 uint8_t cmd;
2376
2377 cmd = CSR_READ_1(sc, VR_CR0);
2378 if ((cmd & VR_CR0_RX_ON) == 0) {
2379 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2380 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2381 cmd |= VR_CR0_RX_ON;
2382 CSR_WRITE_1(sc, VR_CR0, cmd);
2383 }
2384 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2385 }
2386
2387 static int
vr_tx_stop(struct vr_softc * sc)2388 vr_tx_stop(struct vr_softc *sc)
2389 {
2390 int i;
2391 uint8_t cmd;
2392
2393 cmd = CSR_READ_1(sc, VR_CR0);
2394 if ((cmd & VR_CR0_TX_ON) != 0) {
2395 cmd &= ~VR_CR0_TX_ON;
2396 CSR_WRITE_1(sc, VR_CR0, cmd);
2397 for (i = VR_TIMEOUT; i > 0; i--) {
2398 DELAY(5);
2399 cmd = CSR_READ_1(sc, VR_CR0);
2400 if ((cmd & VR_CR0_TX_ON) == 0)
2401 break;
2402 }
2403 if (i == 0)
2404 return (ETIMEDOUT);
2405 }
2406 return (0);
2407 }
2408
2409 static int
vr_rx_stop(struct vr_softc * sc)2410 vr_rx_stop(struct vr_softc *sc)
2411 {
2412 int i;
2413 uint8_t cmd;
2414
2415 cmd = CSR_READ_1(sc, VR_CR0);
2416 if ((cmd & VR_CR0_RX_ON) != 0) {
2417 cmd &= ~VR_CR0_RX_ON;
2418 CSR_WRITE_1(sc, VR_CR0, cmd);
2419 for (i = VR_TIMEOUT; i > 0; i--) {
2420 DELAY(5);
2421 cmd = CSR_READ_1(sc, VR_CR0);
2422 if ((cmd & VR_CR0_RX_ON) == 0)
2423 break;
2424 }
2425 if (i == 0)
2426 return (ETIMEDOUT);
2427 }
2428 return (0);
2429 }
2430
2431 /*
2432 * Stop the adapter and free any mbufs allocated to the
2433 * RX and TX lists.
2434 */
2435 static void
vr_stop(struct vr_softc * sc)2436 vr_stop(struct vr_softc *sc)
2437 {
2438 struct vr_txdesc *txd;
2439 struct vr_rxdesc *rxd;
2440 struct ifnet *ifp;
2441 int i;
2442
2443 VR_LOCK_ASSERT(sc);
2444
2445 ifp = sc->vr_ifp;
2446 sc->vr_watchdog_timer = 0;
2447
2448 callout_stop(&sc->vr_stat_callout);
2449 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2450
2451 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2452 if (vr_rx_stop(sc) != 0)
2453 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2454 if (vr_tx_stop(sc) != 0)
2455 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2456 /* Clear pending interrupts. */
2457 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2458 CSR_WRITE_2(sc, VR_IMR, 0x0000);
2459 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2460 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2461
2462 /*
2463 * Free RX and TX mbufs still in the queues.
2464 */
2465 for (i = 0; i < VR_RX_RING_CNT; i++) {
2466 rxd = &sc->vr_cdata.vr_rxdesc[i];
2467 if (rxd->rx_m != NULL) {
2468 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2469 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2470 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2471 rxd->rx_dmamap);
2472 m_freem(rxd->rx_m);
2473 rxd->rx_m = NULL;
2474 }
2475 }
2476 for (i = 0; i < VR_TX_RING_CNT; i++) {
2477 txd = &sc->vr_cdata.vr_txdesc[i];
2478 if (txd->tx_m != NULL) {
2479 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2480 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2481 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2482 txd->tx_dmamap);
2483 m_freem(txd->tx_m);
2484 txd->tx_m = NULL;
2485 }
2486 }
2487 }
2488
2489 /*
2490 * Stop all chip I/O so that the kernel's probe routines don't
2491 * get confused by errant DMAs when rebooting.
2492 */
2493 static int
vr_shutdown(device_t dev)2494 vr_shutdown(device_t dev)
2495 {
2496
2497 return (vr_suspend(dev));
2498 }
2499
2500 static int
vr_suspend(device_t dev)2501 vr_suspend(device_t dev)
2502 {
2503 struct vr_softc *sc;
2504
2505 sc = device_get_softc(dev);
2506
2507 VR_LOCK(sc);
2508 vr_stop(sc);
2509 vr_setwol(sc);
2510 sc->vr_flags |= VR_F_SUSPENDED;
2511 VR_UNLOCK(sc);
2512
2513 return (0);
2514 }
2515
2516 static int
vr_resume(device_t dev)2517 vr_resume(device_t dev)
2518 {
2519 struct vr_softc *sc;
2520 struct ifnet *ifp;
2521
2522 sc = device_get_softc(dev);
2523
2524 VR_LOCK(sc);
2525 ifp = sc->vr_ifp;
2526 vr_clrwol(sc);
2527 vr_reset(sc);
2528 if (ifp->if_flags & IFF_UP)
2529 vr_init_locked(sc);
2530
2531 sc->vr_flags &= ~VR_F_SUSPENDED;
2532 VR_UNLOCK(sc);
2533
2534 return (0);
2535 }
2536
2537 static void
vr_setwol(struct vr_softc * sc)2538 vr_setwol(struct vr_softc *sc)
2539 {
2540 struct ifnet *ifp;
2541 int pmc;
2542 uint16_t pmstat;
2543 uint8_t v;
2544
2545 VR_LOCK_ASSERT(sc);
2546
2547 if (sc->vr_revid < REV_ID_VT6102_A ||
2548 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2549 return;
2550
2551 ifp = sc->vr_ifp;
2552
2553 /* Clear WOL configuration. */
2554 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2555 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2556 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2557 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2558 if (sc->vr_revid > REV_ID_VT6105_B0) {
2559 /* Newer Rhine III supports two additional patterns. */
2560 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2561 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2562 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2563 }
2564 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
2565 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2566 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2567 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2568 /*
2569 * It seems that multicast wakeup frames require programming pattern
2570 * registers and valid CRC as well as pattern mask for each pattern.
2571 * While it's possible to setup such a pattern it would complicate
2572 * WOL configuration so ignore multicast wakeup frames.
2573 */
2574 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2575 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2576 v = CSR_READ_1(sc, VR_STICKHW);
2577 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2578 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2579 }
2580
2581 /* Put hardware into sleep. */
2582 v = CSR_READ_1(sc, VR_STICKHW);
2583 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2584 CSR_WRITE_1(sc, VR_STICKHW, v);
2585
2586 /* Request PME if WOL is requested. */
2587 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2588 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2589 if ((ifp->if_capenable & IFCAP_WOL) != 0)
2590 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2591 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2592 }
2593
2594 static void
vr_clrwol(struct vr_softc * sc)2595 vr_clrwol(struct vr_softc *sc)
2596 {
2597 uint8_t v;
2598
2599 VR_LOCK_ASSERT(sc);
2600
2601 if (sc->vr_revid < REV_ID_VT6102_A)
2602 return;
2603
2604 /* Take hardware out of sleep. */
2605 v = CSR_READ_1(sc, VR_STICKHW);
2606 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2607 CSR_WRITE_1(sc, VR_STICKHW, v);
2608
2609 /* Clear WOL configuration as WOL may interfere normal operation. */
2610 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2611 CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2612 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2613 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2614 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2615 if (sc->vr_revid > REV_ID_VT6105_B0) {
2616 /* Newer Rhine III supports two additional patterns. */
2617 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2618 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2619 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2620 }
2621 }
2622
2623 static int
vr_sysctl_stats(SYSCTL_HANDLER_ARGS)2624 vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2625 {
2626 struct vr_softc *sc;
2627 struct vr_statistics *stat;
2628 int error;
2629 int result;
2630
2631 result = -1;
2632 error = sysctl_handle_int(oidp, &result, 0, req);
2633
2634 if (error != 0 || req->newptr == NULL)
2635 return (error);
2636
2637 if (result == 1) {
2638 sc = (struct vr_softc *)arg1;
2639 stat = &sc->vr_stat;
2640
2641 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2642 printf("Outbound good frames : %ju\n",
2643 (uintmax_t)stat->tx_ok);
2644 printf("Inbound good frames : %ju\n",
2645 (uintmax_t)stat->rx_ok);
2646 printf("Outbound errors : %u\n", stat->tx_errors);
2647 printf("Inbound errors : %u\n", stat->rx_errors);
2648 printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2649 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2650 printf("Inbound FIFO overflows : %d\n",
2651 stat->rx_fifo_overflows);
2652 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2653 printf("Inbound frame alignment errors : %u\n",
2654 stat->rx_alignment);
2655 printf("Inbound giant frames : %u\n", stat->rx_giants);
2656 printf("Inbound runt frames : %u\n", stat->rx_runts);
2657 printf("Outbound aborted with excessive collisions : %u\n",
2658 stat->tx_abort);
2659 printf("Outbound collisions : %u\n", stat->tx_collisions);
2660 printf("Outbound late collisions : %u\n",
2661 stat->tx_late_collisions);
2662 printf("Outbound underrun : %u\n", stat->tx_underrun);
2663 printf("PCI bus errors : %u\n", stat->bus_errors);
2664 printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
2665 stat->num_restart);
2666 }
2667
2668 return (error);
2669 }
2670