1 /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 2001
7 * Jason L. Wright <[email protected]>, Theo de Raadt, and
8 * Aaron Campbell <[email protected]>. All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Jason L. Wright,
21 * Theo de Raadt and Aaron Campbell.
22 * 4. Neither the name of the author nor the names of any co-contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
36 * THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 /*
43 * Driver for 3c990 (Typhoon) Ethernet ASIC
44 */
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/endian.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/module.h>
54 #include <sys/mutex.h>
55 #include <sys/queue.h>
56 #include <sys/rman.h>
57 #include <sys/socket.h>
58 #include <sys/sockio.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61
62 #include <net/bpf.h>
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/if_arp.h>
66 #include <net/ethernet.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 #include <net/if_types.h>
70 #include <net/if_vlan_var.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75
76 #include <dev/mii/mii.h>
77
78 #include <dev/pci/pcireg.h>
79 #include <dev/pci/pcivar.h>
80
81 #include <machine/bus.h>
82 #include <machine/in_cksum.h>
83
84 #include <dev/txp/if_txpreg.h>
85 #include <dev/txp/3c990img.h>
86
87 MODULE_DEPEND(txp, pci, 1, 1, 1);
88 MODULE_DEPEND(txp, ether, 1, 1, 1);
89
90 /*
91 * XXX Known Typhoon firmware issues.
92 *
93 * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
94 * The firmware hangs when it's told to compute TCP/UDP checksum.
95 * I'm not sure whether the firmware requires special alignment to
96 * do checksum offloading but datasheet says nothing about that.
97 * 2. Datasheet says nothing for maximum number of fragmented
98 * descriptors supported. Experimentation shows up to 16 fragment
99 * descriptors are supported in the firmware. For TSO case, upper
100 * stack can send 64KB sized IP datagram plus link header size(
101 * ethernet header + VLAN tag) frame but controller can handle up
102 * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
103 * Because frames that need TSO operation of hardware can be
104 * larger than 64KB I disabled TSO capability. TSO operation for
105 * less than or equal to 16 fragment descriptors works without
106 * problems, though.
107 * 3. VLAN hardware tag stripping is always enabled in the firmware
108 * even if it's explicitly told to not strip the tag. It's
109 * possible to add the tag back in Rx handler if VLAN hardware
110 * tag is not active but I didn't try that as it would be
111 * layering violation.
112 * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
113 * datasheet such that driver should handle the alignment
114 * restriction by copying received frame to align the frame on
115 * 32bit boundary on strict-alignment architectures. This adds a
116 * lot of CPU burden and it effectively reduce Rx performance on
117 * strict-alignment architectures(e.g. sparc64, arm and mips).
118 *
119 * Unfortunately it seems that 3Com have no longer interests in
120 * releasing fixed firmware so we may have to live with these bugs.
121 */
122
123 #define TXP_CSUM_FEATURES (CSUM_IP)
124
125 /*
126 * Various supported device vendors/types and their names.
127 */
128 static struct txp_type txp_devs[] = {
129 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95,
130 "3Com 3cR990-TX-95 Etherlink with 3XP Processor" },
131 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97,
132 "3Com 3cR990-TX-97 Etherlink with 3XP Processor" },
133 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM,
134 "3Com 3cR990B-TXM Etherlink with 3XP Processor" },
135 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95,
136 "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" },
137 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97,
138 "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" },
139 { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV,
140 "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" },
141 { 0, 0, NULL }
142 };
143
144 static int txp_probe(device_t);
145 static int txp_attach(device_t);
146 static int txp_detach(device_t);
147 static int txp_shutdown(device_t);
148 static int txp_suspend(device_t);
149 static int txp_resume(device_t);
150 static int txp_intr(void *);
151 static void txp_int_task(void *, int);
152 static void txp_tick(void *);
153 static int txp_ioctl(struct ifnet *, u_long, caddr_t);
154 static uint64_t txp_get_counter(struct ifnet *, ift_counter);
155 static void txp_start(struct ifnet *);
156 static void txp_start_locked(struct ifnet *);
157 static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
158 static void txp_stop(struct txp_softc *);
159 static void txp_init(void *);
160 static void txp_init_locked(struct txp_softc *);
161 static void txp_watchdog(struct txp_softc *);
162
163 static int txp_reset(struct txp_softc *);
164 static int txp_boot(struct txp_softc *, uint32_t);
165 static int txp_sleep(struct txp_softc *, int);
166 static int txp_wait(struct txp_softc *, uint32_t);
167 static int txp_download_fw(struct txp_softc *);
168 static int txp_download_fw_wait(struct txp_softc *);
169 static int txp_download_fw_section(struct txp_softc *,
170 struct txp_fw_section_header *, int);
171 static int txp_alloc_rings(struct txp_softc *);
172 static void txp_init_rings(struct txp_softc *);
173 static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
174 bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
175 static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t,
176 void **, bus_addr_t *);
177 static void txp_free_rings(struct txp_softc *);
178 static int txp_rxring_fill(struct txp_softc *);
179 static void txp_rxring_empty(struct txp_softc *);
180 static void txp_set_filter(struct txp_softc *);
181
182 static int txp_cmd_desc_numfree(struct txp_softc *);
183 static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
184 uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
185 static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
186 uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
187 struct txp_rsp_desc **, int);
188 static int txp_response(struct txp_softc *, uint16_t, uint16_t,
189 struct txp_rsp_desc **);
190 static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
191 struct txp_rsp_desc *);
192 static int txp_set_capabilities(struct txp_softc *);
193
194 static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
195 static int txp_ifmedia_upd(struct ifnet *);
196 #ifdef TXP_DEBUG
197 static void txp_show_descriptor(void *);
198 #endif
199 static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
200 static void txp_rxbuf_reclaim(struct txp_softc *);
201 #ifndef __NO_STRICT_ALIGNMENT
202 static __inline void txp_fixup_rx(struct mbuf *);
203 #endif
204 static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
205 static void txp_stats_save(struct txp_softc *);
206 static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
207 static void txp_sysctl_node(struct txp_softc *);
208 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
209 static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
210
211 static int prefer_iomap = 0;
212 TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
213
214 static device_method_t txp_methods[] = {
215 /* Device interface */
216 DEVMETHOD(device_probe, txp_probe),
217 DEVMETHOD(device_attach, txp_attach),
218 DEVMETHOD(device_detach, txp_detach),
219 DEVMETHOD(device_shutdown, txp_shutdown),
220 DEVMETHOD(device_suspend, txp_suspend),
221 DEVMETHOD(device_resume, txp_resume),
222
223 { NULL, NULL }
224 };
225
226 static driver_t txp_driver = {
227 "txp",
228 txp_methods,
229 sizeof(struct txp_softc)
230 };
231
232 static devclass_t txp_devclass;
233
234 DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
235
236 static int
txp_probe(device_t dev)237 txp_probe(device_t dev)
238 {
239 struct txp_type *t;
240
241 t = txp_devs;
242
243 while (t->txp_name != NULL) {
244 if ((pci_get_vendor(dev) == t->txp_vid) &&
245 (pci_get_device(dev) == t->txp_did)) {
246 device_set_desc(dev, t->txp_name);
247 return (BUS_PROBE_DEFAULT);
248 }
249 t++;
250 }
251
252 return (ENXIO);
253 }
254
255 static int
txp_attach(device_t dev)256 txp_attach(device_t dev)
257 {
258 struct txp_softc *sc;
259 struct ifnet *ifp;
260 struct txp_rsp_desc *rsp;
261 uint16_t p1;
262 uint32_t p2, reg;
263 int error = 0, pmc, rid;
264 uint8_t eaddr[ETHER_ADDR_LEN], *ver;
265
266 sc = device_get_softc(dev);
267 sc->sc_dev = dev;
268
269 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
270 MTX_DEF);
271 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
272 TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
273 TAILQ_INIT(&sc->sc_busy_list);
274 TAILQ_INIT(&sc->sc_free_list);
275
276 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
277 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
278 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
279 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
280 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
281 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
282 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
283 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
284
285 pci_enable_busmaster(dev);
286 /* Prefer memory space register mapping over IO space. */
287 if (prefer_iomap == 0) {
288 sc->sc_res_id = PCIR_BAR(1);
289 sc->sc_res_type = SYS_RES_MEMORY;
290 } else {
291 sc->sc_res_id = PCIR_BAR(0);
292 sc->sc_res_type = SYS_RES_IOPORT;
293 }
294 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
295 &sc->sc_res_id, RF_ACTIVE);
296 if (sc->sc_res == NULL && prefer_iomap == 0) {
297 sc->sc_res_id = PCIR_BAR(0);
298 sc->sc_res_type = SYS_RES_IOPORT;
299 sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
300 &sc->sc_res_id, RF_ACTIVE);
301 }
302 if (sc->sc_res == NULL) {
303 device_printf(dev, "couldn't map ports/memory\n");
304 ifmedia_removeall(&sc->sc_ifmedia);
305 mtx_destroy(&sc->sc_mtx);
306 return (ENXIO);
307 }
308
309 /* Enable MWI. */
310 reg = pci_read_config(dev, PCIR_COMMAND, 2);
311 reg |= PCIM_CMD_MWRICEN;
312 pci_write_config(dev, PCIR_COMMAND, reg, 2);
313 /* Check cache line size. */
314 reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
315 reg <<= 4;
316 if (reg == 0 || (reg % 16) != 0)
317 device_printf(sc->sc_dev,
318 "invalid cache line size : %u\n", reg);
319
320 /* Allocate interrupt */
321 rid = 0;
322 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
323 RF_SHAREABLE | RF_ACTIVE);
324
325 if (sc->sc_irq == NULL) {
326 device_printf(dev, "couldn't map interrupt\n");
327 error = ENXIO;
328 goto fail;
329 }
330
331 if ((error = txp_alloc_rings(sc)) != 0)
332 goto fail;
333 txp_init_rings(sc);
334 txp_sysctl_node(sc);
335 /* Reset controller and make it reload sleep image. */
336 if (txp_reset(sc) != 0) {
337 error = ENXIO;
338 goto fail;
339 }
340
341 /* Let controller boot from sleep image. */
342 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
343 device_printf(sc->sc_dev, "could not boot sleep image\n");
344 error = ENXIO;
345 goto fail;
346 }
347
348 /* Get station address. */
349 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
350 &p1, &p2, NULL, TXP_CMD_WAIT)) {
351 error = ENXIO;
352 goto fail;
353 }
354
355 p1 = le16toh(p1);
356 eaddr[0] = ((uint8_t *)&p1)[1];
357 eaddr[1] = ((uint8_t *)&p1)[0];
358 p2 = le32toh(p2);
359 eaddr[2] = ((uint8_t *)&p2)[3];
360 eaddr[3] = ((uint8_t *)&p2)[2];
361 eaddr[4] = ((uint8_t *)&p2)[1];
362 eaddr[5] = ((uint8_t *)&p2)[0];
363
364 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
365 if (ifp == NULL) {
366 device_printf(dev, "can not allocate ifnet structure\n");
367 error = ENOSPC;
368 goto fail;
369 }
370
371 /*
372 * Show sleep image version information which may help to
373 * diagnose sleep image specific issues.
374 */
375 rsp = NULL;
376 if (txp_ext_command(sc, TXP_CMD_VERSIONS_READ, 0, 0, 0, NULL, 0,
377 &rsp, TXP_CMD_WAIT)) {
378 device_printf(dev, "can not read sleep image version\n");
379 error = ENXIO;
380 goto fail;
381 }
382 if (rsp->rsp_numdesc == 0) {
383 p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
384 device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
385 p2 >> 8, p2 & 0xFF);
386 } else if (rsp->rsp_numdesc == 2) {
387 p2 = le32toh(rsp->rsp_par2);
388 ver = (uint8_t *)(rsp + 1);
389 /*
390 * Even if datasheet says the command returns a NULL
391 * terminated version string, explicitly terminate
392 * the string. Given that several bugs of firmware
393 * I can't trust this simple one.
394 */
395 ver[25] = '\0';
396 device_printf(dev,
397 "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
398 p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
399 } else {
400 p2 = le32toh(rsp->rsp_par2);
401 device_printf(dev,
402 "Unknown Typhoon sleep image version: %u:0x%08x\n",
403 rsp->rsp_numdesc, p2);
404 }
405 free(rsp, M_DEVBUF);
406
407 sc->sc_xcvr = TXP_XCVR_AUTO;
408 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
409 NULL, NULL, NULL, TXP_CMD_NOWAIT);
410 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
411
412 ifp->if_softc = sc;
413 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
414 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
415 ifp->if_ioctl = txp_ioctl;
416 ifp->if_start = txp_start;
417 ifp->if_init = txp_init;
418 ifp->if_get_counter = txp_get_counter;
419 ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
420 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
421 IFQ_SET_READY(&ifp->if_snd);
422 /*
423 * It's possible to read firmware's offload capability but
424 * we have not downloaded the firmware yet so announce
425 * working capability here. We're not interested in IPSec
426 * capability and due to the lots of firmware bug we can't
427 * advertise the whole capability anyway.
428 */
429 ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
430 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
431 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
432 /* Enable all capabilities. */
433 ifp->if_capenable = ifp->if_capabilities;
434
435 ether_ifattach(ifp, eaddr);
436
437 /* VLAN capability setup. */
438 ifp->if_capabilities |= IFCAP_VLAN_MTU;
439 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
440 ifp->if_capenable = ifp->if_capabilities;
441 /* Tell the upper layer(s) we support long frames. */
442 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
443
444 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
445 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
446
447 /* Create local taskq. */
448 sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
449 taskqueue_thread_enqueue, &sc->sc_tq);
450 if (sc->sc_tq == NULL) {
451 device_printf(dev, "could not create taskqueue.\n");
452 ether_ifdetach(ifp);
453 error = ENXIO;
454 goto fail;
455 }
456 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
457 device_get_nameunit(sc->sc_dev));
458
459 /* Put controller into sleep. */
460 if (txp_sleep(sc, 0) != 0) {
461 ether_ifdetach(ifp);
462 error = ENXIO;
463 goto fail;
464 }
465
466 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
467 txp_intr, NULL, sc, &sc->sc_intrhand);
468
469 if (error != 0) {
470 ether_ifdetach(ifp);
471 device_printf(dev, "couldn't set up interrupt handler.\n");
472 goto fail;
473 }
474
475 gone_by_fcp101_dev(dev);
476
477 return (0);
478
479 fail:
480 if (error != 0)
481 txp_detach(dev);
482 return (error);
483 }
484
485 static int
txp_detach(device_t dev)486 txp_detach(device_t dev)
487 {
488 struct txp_softc *sc;
489 struct ifnet *ifp;
490
491 sc = device_get_softc(dev);
492
493 ifp = sc->sc_ifp;
494 if (device_is_attached(dev)) {
495 TXP_LOCK(sc);
496 sc->sc_flags |= TXP_FLAG_DETACH;
497 txp_stop(sc);
498 TXP_UNLOCK(sc);
499 callout_drain(&sc->sc_tick);
500 taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
501 ether_ifdetach(ifp);
502 }
503 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
504
505 ifmedia_removeall(&sc->sc_ifmedia);
506 if (sc->sc_intrhand != NULL)
507 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
508 if (sc->sc_irq != NULL)
509 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
510 if (sc->sc_res != NULL)
511 bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
512 sc->sc_res);
513 if (sc->sc_ifp != NULL) {
514 if_free(sc->sc_ifp);
515 sc->sc_ifp = NULL;
516 }
517 txp_free_rings(sc);
518 mtx_destroy(&sc->sc_mtx);
519
520 return (0);
521 }
522
523 static int
txp_reset(struct txp_softc * sc)524 txp_reset(struct txp_softc *sc)
525 {
526 uint32_t r;
527 int i;
528
529 /* Disable interrupts. */
530 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
531 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
532 /* Ack all pending interrupts. */
533 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
534
535 r = 0;
536 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
537 DELAY(1000);
538 WRITE_REG(sc, TXP_SRR, 0);
539
540 /* Should wait max 6 seconds. */
541 for (i = 0; i < 6000; i++) {
542 r = READ_REG(sc, TXP_A2H_0);
543 if (r == STAT_WAITING_FOR_HOST_REQUEST)
544 break;
545 DELAY(1000);
546 }
547
548 if (r != STAT_WAITING_FOR_HOST_REQUEST)
549 device_printf(sc->sc_dev, "reset hung\n");
550
551 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
552 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
553 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
554
555 /*
556 * Give more time to complete loading sleep image before
557 * trying to boot from sleep image.
558 */
559 DELAY(5000);
560
561 return (0);
562 }
563
564 static int
txp_boot(struct txp_softc * sc,uint32_t state)565 txp_boot(struct txp_softc *sc, uint32_t state)
566 {
567
568 /* See if it's waiting for boot, and try to boot it. */
569 if (txp_wait(sc, state) != 0) {
570 device_printf(sc->sc_dev, "not waiting for boot\n");
571 return (ENXIO);
572 }
573
574 WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
575 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
576 WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
577 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
578 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
579 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
580
581 /* See if it booted. */
582 if (txp_wait(sc, STAT_RUNNING) != 0) {
583 device_printf(sc->sc_dev, "firmware not running\n");
584 return (ENXIO);
585 }
586
587 /* Clear TX and CMD ring write registers. */
588 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
589 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
590 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
591 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
592 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
593 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
594 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
595 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
596
597 return (0);
598 }
599
600 static int
txp_download_fw(struct txp_softc * sc)601 txp_download_fw(struct txp_softc *sc)
602 {
603 struct txp_fw_file_header *fileheader;
604 struct txp_fw_section_header *secthead;
605 int sect;
606 uint32_t error, ier, imr;
607
608 TXP_LOCK_ASSERT(sc);
609
610 error = 0;
611 ier = READ_REG(sc, TXP_IER);
612 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
613
614 imr = READ_REG(sc, TXP_IMR);
615 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
616
617 if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
618 device_printf(sc->sc_dev, "not waiting for host request\n");
619 error = ETIMEDOUT;
620 goto fail;
621 }
622
623 /* Ack the status. */
624 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
625
626 fileheader = (struct txp_fw_file_header *)tc990image;
627 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
628 device_printf(sc->sc_dev, "firmware invalid magic\n");
629 goto fail;
630 }
631
632 /* Tell boot firmware to get ready for image. */
633 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
634 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
635 WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
636 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
637 WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
638 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
639 WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
640 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
641 WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
642 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
643 WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
644 TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
645 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
646 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
647
648 if (txp_download_fw_wait(sc)) {
649 device_printf(sc->sc_dev, "firmware wait failed, initial\n");
650 error = ETIMEDOUT;
651 goto fail;
652 }
653
654 secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
655 sizeof(struct txp_fw_file_header));
656
657 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
658 if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
659 goto fail;
660 secthead = (struct txp_fw_section_header *)
661 (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
662 sizeof(*secthead));
663 }
664
665 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
666 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
667
668 if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
669 device_printf(sc->sc_dev, "not waiting for boot\n");
670 error = ETIMEDOUT;
671 goto fail;
672 }
673
674 fail:
675 WRITE_REG(sc, TXP_IER, ier);
676 WRITE_REG(sc, TXP_IMR, imr);
677
678 return (error);
679 }
680
681 static int
txp_download_fw_wait(struct txp_softc * sc)682 txp_download_fw_wait(struct txp_softc *sc)
683 {
684 uint32_t i;
685
686 TXP_LOCK_ASSERT(sc);
687
688 for (i = 0; i < TXP_TIMEOUT; i++) {
689 if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
690 break;
691 DELAY(50);
692 }
693
694 if (i == TXP_TIMEOUT) {
695 device_printf(sc->sc_dev, "firmware wait failed comm0\n");
696 return (ETIMEDOUT);
697 }
698
699 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
700
701 if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
702 device_printf(sc->sc_dev, "firmware not waiting for segment\n");
703 return (ETIMEDOUT);
704 }
705 return (0);
706 }
707
708 static int
txp_download_fw_section(struct txp_softc * sc,struct txp_fw_section_header * sect,int sectnum)709 txp_download_fw_section(struct txp_softc *sc,
710 struct txp_fw_section_header *sect, int sectnum)
711 {
712 bus_dma_tag_t sec_tag;
713 bus_dmamap_t sec_map;
714 bus_addr_t sec_paddr;
715 uint8_t *sec_buf;
716 int rseg, err = 0;
717 struct mbuf m;
718 uint16_t csum;
719
720 TXP_LOCK_ASSERT(sc);
721
722 /* Skip zero length sections. */
723 if (le32toh(sect->nbytes) == 0)
724 return (0);
725
726 /* Make sure we aren't past the end of the image. */
727 rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
728 if (rseg >= sizeof(tc990image)) {
729 device_printf(sc->sc_dev,
730 "firmware invalid section address, section %d\n", sectnum);
731 return (EIO);
732 }
733
734 /* Make sure this section doesn't go past the end. */
735 rseg += le32toh(sect->nbytes);
736 if (rseg >= sizeof(tc990image)) {
737 device_printf(sc->sc_dev, "firmware truncated section %d\n",
738 sectnum);
739 return (EIO);
740 }
741
742 sec_tag = NULL;
743 sec_map = NULL;
744 sec_buf = NULL;
745 /* XXX */
746 TXP_UNLOCK(sc);
747 err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
748 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
749 TXP_LOCK(sc);
750 if (err != 0)
751 goto bail;
752 bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
753 le32toh(sect->nbytes));
754
755 /*
756 * dummy up mbuf and verify section checksum
757 */
758 m.m_type = MT_DATA;
759 m.m_next = m.m_nextpkt = NULL;
760 m.m_len = le32toh(sect->nbytes);
761 m.m_data = sec_buf;
762 m.m_flags = 0;
763 csum = in_cksum(&m, le32toh(sect->nbytes));
764 if (csum != sect->cksum) {
765 device_printf(sc->sc_dev,
766 "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
767 sectnum, le16toh(sect->cksum), csum);
768 err = EIO;
769 goto bail;
770 }
771
772 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
773
774 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
775 TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
776 WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
777 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
778 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
779 TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
780 WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
781 TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
782 WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
783 TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
784 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
785 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
786
787 if (txp_download_fw_wait(sc)) {
788 device_printf(sc->sc_dev,
789 "firmware wait failed, section %d\n", sectnum);
790 err = ETIMEDOUT;
791 }
792
793 bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
794 bail:
795 txp_dma_free(sc, &sec_tag, sec_map, (void **)&sec_buf, &sec_paddr);
796 return (err);
797 }
798
799 static int
txp_intr(void * vsc)800 txp_intr(void *vsc)
801 {
802 struct txp_softc *sc;
803 uint32_t status;
804
805 sc = vsc;
806 status = READ_REG(sc, TXP_ISR);
807 if ((status & TXP_INT_LATCH) == 0)
808 return (FILTER_STRAY);
809 WRITE_REG(sc, TXP_ISR, status);
810 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
811 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
812
813 return (FILTER_HANDLED);
814 }
815
816 static void
txp_int_task(void * arg,int pending)817 txp_int_task(void *arg, int pending)
818 {
819 struct txp_softc *sc;
820 struct ifnet *ifp;
821 struct txp_hostvar *hv;
822 uint32_t isr;
823 int more;
824
825 sc = (struct txp_softc *)arg;
826
827 TXP_LOCK(sc);
828 ifp = sc->sc_ifp;
829 hv = sc->sc_hostvar;
830 isr = READ_REG(sc, TXP_ISR);
831 if ((isr & TXP_INT_LATCH) != 0)
832 WRITE_REG(sc, TXP_ISR, isr);
833
834 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
835 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
836 sc->sc_cdata.txp_hostvar_map,
837 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
838 more = 0;
839 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
840 more += txp_rx_reclaim(sc, &sc->sc_rxhir,
841 sc->sc_process_limit);
842 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
843 more += txp_rx_reclaim(sc, &sc->sc_rxlor,
844 sc->sc_process_limit);
845 /*
846 * XXX
847 * It seems controller is not smart enough to handle
848 * FIFO overflow conditions under heavy network load.
849 * No matter how often new Rx buffers are passed to
850 * controller the situation didn't change. Maybe
851 * flow-control would be the only way to mitigate the
852 * issue but firmware does not have commands that
853 * control the threshold of emitting pause frames.
854 */
855 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
856 txp_rxbuf_reclaim(sc);
857 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
858 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
859 txp_tx_reclaim(sc, &sc->sc_txhir);
860 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
861 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
862 txp_tx_reclaim(sc, &sc->sc_txlor);
863 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
864 sc->sc_cdata.txp_hostvar_map,
865 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
866 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
867 txp_start_locked(sc->sc_ifp);
868 if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
869 taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
870 TXP_UNLOCK(sc);
871 return;
872 }
873 }
874
875 /* Re-enable interrupts. */
876 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
877 TXP_UNLOCK(sc);
878 }
879
880 #ifndef __NO_STRICT_ALIGNMENT
881 static __inline void
txp_fixup_rx(struct mbuf * m)882 txp_fixup_rx(struct mbuf *m)
883 {
884 int i;
885 uint16_t *src, *dst;
886
887 src = mtod(m, uint16_t *);
888 dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
889
890 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
891 *dst++ = *src++;
892
893 m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
894 }
895 #endif
896
897 static int
txp_rx_reclaim(struct txp_softc * sc,struct txp_rx_ring * r,int count)898 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
899 {
900 struct ifnet *ifp;
901 struct txp_rx_desc *rxd;
902 struct mbuf *m;
903 struct txp_rx_swdesc *sd;
904 uint32_t roff, woff, rx_stat, prog;
905
906 TXP_LOCK_ASSERT(sc);
907
908 ifp = sc->sc_ifp;
909
910 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
911 BUS_DMASYNC_POSTWRITE);
912
913 roff = le32toh(*r->r_roff);
914 woff = le32toh(*r->r_woff);
915 rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
916 for (prog = 0; roff != woff; prog++, count--) {
917 if (count <= 0)
918 break;
919 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
920 KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
921 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
922 BUS_DMASYNC_POSTREAD);
923 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
924 m = sd->sd_mbuf;
925 KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
926 sd->sd_mbuf = NULL;
927 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
928 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
929 if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
930 if (bootverbose)
931 device_printf(sc->sc_dev, "Rx error %u\n",
932 le32toh(rxd->rx_stat) & RX_ERROR_MASK);
933 m_freem(m);
934 goto next;
935 }
936
937 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
938 m->m_pkthdr.rcvif = ifp;
939 #ifndef __NO_STRICT_ALIGNMENT
940 txp_fixup_rx(m);
941 #endif
942 rx_stat = le32toh(rxd->rx_stat);
943 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
944 if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
945 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
946 else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
947 m->m_pkthdr.csum_flags |=
948 CSUM_IP_CHECKED|CSUM_IP_VALID;
949
950 if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
951 (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
952 m->m_pkthdr.csum_flags |=
953 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
954 m->m_pkthdr.csum_data = 0xffff;
955 }
956 }
957
958 /*
959 * XXX
960 * Typhoon has a firmware bug that VLAN tag is always
961 * stripped out even if it is told to not remove the tag.
962 * Therefore don't check if_capenable here.
963 */
964 if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
965 (rx_stat & RX_STAT_VLAN) != 0) {
966 m->m_pkthdr.ether_vtag =
967 bswap16((le32toh(rxd->rx_vlan) >> 16));
968 m->m_flags |= M_VLANTAG;
969 }
970
971 TXP_UNLOCK(sc);
972 (*ifp->if_input)(ifp, m);
973 TXP_LOCK(sc);
974
975 next:
976 roff += sizeof(struct txp_rx_desc);
977 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
978 roff = 0;
979 rxd = r->r_desc;
980 } else
981 rxd++;
982 prog++;
983 }
984
985 if (prog == 0)
986 return (0);
987
988 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
989 BUS_DMASYNC_PREWRITE);
990 *r->r_roff = le32toh(roff);
991
992 return (count > 0 ? 0 : EAGAIN);
993 }
994
995 static void
txp_rxbuf_reclaim(struct txp_softc * sc)996 txp_rxbuf_reclaim(struct txp_softc *sc)
997 {
998 struct txp_hostvar *hv;
999 struct txp_rxbuf_desc *rbd;
1000 struct txp_rx_swdesc *sd;
1001 bus_dma_segment_t segs[1];
1002 int nsegs, prod, prog;
1003 uint32_t cons;
1004
1005 TXP_LOCK_ASSERT(sc);
1006
1007 hv = sc->sc_hostvar;
1008 cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
1009 prod = sc->sc_rxbufprod;
1010 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1011 if (prod == cons)
1012 return;
1013
1014 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1015 sc->sc_cdata.txp_rxbufs_map,
1016 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1017
1018 for (prog = 0; prod != cons; prog++) {
1019 sd = TAILQ_FIRST(&sc->sc_free_list);
1020 if (sd == NULL)
1021 break;
1022 rbd = sc->sc_rxbufs + prod;
1023 bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
1024 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1025 if (sd->sd_mbuf == NULL)
1026 break;
1027 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1028 #ifndef __NO_STRICT_ALIGNMENT
1029 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1030 #endif
1031 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1032 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
1033 m_freem(sd->sd_mbuf);
1034 sd->sd_mbuf = NULL;
1035 break;
1036 }
1037 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1038 nsegs));
1039 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1040 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1041 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1042 BUS_DMASYNC_PREREAD);
1043 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1044 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1045 TXP_DESC_INC(prod, RXBUF_ENTRIES);
1046 }
1047
1048 if (prog == 0)
1049 return;
1050 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1051 sc->sc_cdata.txp_rxbufs_map,
1052 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1053 prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
1054 sc->sc_rxbufprod = prod;
1055 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
1056 }
1057
1058 /*
1059 * Reclaim mbufs and entries from a transmit ring.
1060 */
1061 static void
txp_tx_reclaim(struct txp_softc * sc,struct txp_tx_ring * r)1062 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
1063 {
1064 struct ifnet *ifp;
1065 uint32_t idx;
1066 uint32_t cons, cnt;
1067 struct txp_tx_desc *txd;
1068 struct txp_swdesc *sd;
1069
1070 TXP_LOCK_ASSERT(sc);
1071
1072 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
1073 BUS_DMASYNC_POSTWRITE);
1074 ifp = sc->sc_ifp;
1075 idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
1076 cons = r->r_cons;
1077 cnt = r->r_cnt;
1078 txd = r->r_desc + cons;
1079 sd = sc->sc_txd + cons;
1080
1081 for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
1082 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
1083 if (sd->sd_mbuf != NULL) {
1084 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
1085 sd->sd_map, BUS_DMASYNC_POSTWRITE);
1086 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
1087 sd->sd_map);
1088 m_freem(sd->sd_mbuf);
1089 sd->sd_mbuf = NULL;
1090 txd->tx_addrlo = 0;
1091 txd->tx_addrhi = 0;
1092 txd->tx_flags = 0;
1093 }
1094 }
1095 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1096
1097 if (++cons == TX_ENTRIES) {
1098 txd = r->r_desc;
1099 cons = 0;
1100 sd = sc->sc_txd;
1101 } else {
1102 txd++;
1103 sd++;
1104 }
1105 }
1106
1107 bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
1108 BUS_DMASYNC_PREWRITE);
1109 r->r_cons = cons;
1110 r->r_cnt = cnt;
1111 if (cnt == 0)
1112 sc->sc_watchdog_timer = 0;
1113 }
1114
1115 static int
txp_shutdown(device_t dev)1116 txp_shutdown(device_t dev)
1117 {
1118
1119 return (txp_suspend(dev));
1120 }
1121
1122 static int
txp_suspend(device_t dev)1123 txp_suspend(device_t dev)
1124 {
1125 struct txp_softc *sc;
1126 struct ifnet *ifp;
1127 uint8_t *eaddr;
1128 uint16_t p1;
1129 uint32_t p2;
1130 int pmc;
1131 uint16_t pmstat;
1132
1133 sc = device_get_softc(dev);
1134
1135 TXP_LOCK(sc);
1136 ifp = sc->sc_ifp;
1137 txp_stop(sc);
1138 txp_init_rings(sc);
1139 /* Reset controller and make it reload sleep image. */
1140 txp_reset(sc);
1141 /* Let controller boot from sleep image. */
1142 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
1143 device_printf(sc->sc_dev, "couldn't boot sleep image\n");
1144
1145 /* Set station address. */
1146 eaddr = IF_LLADDR(sc->sc_ifp);
1147 p1 = 0;
1148 ((uint8_t *)&p1)[1] = eaddr[0];
1149 ((uint8_t *)&p1)[0] = eaddr[1];
1150 p1 = le16toh(p1);
1151 ((uint8_t *)&p2)[3] = eaddr[2];
1152 ((uint8_t *)&p2)[2] = eaddr[3];
1153 ((uint8_t *)&p2)[1] = eaddr[4];
1154 ((uint8_t *)&p2)[0] = eaddr[5];
1155 p2 = le32toh(p2);
1156 txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
1157 NULL, TXP_CMD_WAIT);
1158 txp_set_filter(sc);
1159 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
1160 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1161 txp_sleep(sc, sc->sc_ifp->if_capenable);
1162 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1163 /* Request PME. */
1164 pmstat = pci_read_config(sc->sc_dev,
1165 pmc + PCIR_POWER_STATUS, 2);
1166 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1167 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1168 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1169 pci_write_config(sc->sc_dev,
1170 pmc + PCIR_POWER_STATUS, pmstat, 2);
1171 }
1172 TXP_UNLOCK(sc);
1173
1174 return (0);
1175 }
1176
1177 static int
txp_resume(device_t dev)1178 txp_resume(device_t dev)
1179 {
1180 struct txp_softc *sc;
1181 int pmc;
1182 uint16_t pmstat;
1183
1184 sc = device_get_softc(dev);
1185
1186 TXP_LOCK(sc);
1187 if (pci_find_cap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
1188 /* Disable PME and clear PME status. */
1189 pmstat = pci_read_config(sc->sc_dev,
1190 pmc + PCIR_POWER_STATUS, 2);
1191 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1192 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1193 pci_write_config(sc->sc_dev,
1194 pmc + PCIR_POWER_STATUS, pmstat, 2);
1195 }
1196 }
1197 if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
1198 txp_init_locked(sc);
1199 TXP_UNLOCK(sc);
1200
1201 return (0);
1202 }
1203
1204 struct txp_dmamap_arg {
1205 bus_addr_t txp_busaddr;
1206 };
1207
1208 static void
txp_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1209 txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1210 {
1211 struct txp_dmamap_arg *ctx;
1212
1213 if (error != 0)
1214 return;
1215
1216 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1217
1218 ctx = (struct txp_dmamap_arg *)arg;
1219 ctx->txp_busaddr = segs[0].ds_addr;
1220 }
1221
1222 static int
txp_dma_alloc(struct txp_softc * sc,char * type,bus_dma_tag_t * tag,bus_size_t alignment,bus_size_t boundary,bus_dmamap_t * map,void ** buf,bus_size_t size,bus_addr_t * paddr)1223 txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
1224 bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
1225 bus_size_t size, bus_addr_t *paddr)
1226 {
1227 struct txp_dmamap_arg ctx;
1228 int error;
1229
1230 /* Create DMA block tag. */
1231 error = bus_dma_tag_create(
1232 sc->sc_cdata.txp_parent_tag, /* parent */
1233 alignment, boundary, /* algnmnt, boundary */
1234 BUS_SPACE_MAXADDR, /* lowaddr */
1235 BUS_SPACE_MAXADDR, /* highaddr */
1236 NULL, NULL, /* filter, filterarg */
1237 size, /* maxsize */
1238 1, /* nsegments */
1239 size, /* maxsegsize */
1240 0, /* flags */
1241 NULL, NULL, /* lockfunc, lockarg */
1242 tag);
1243 if (error != 0) {
1244 device_printf(sc->sc_dev,
1245 "could not create DMA tag for %s.\n", type);
1246 return (error);
1247 }
1248
1249 *paddr = 0;
1250 /* Allocate DMA'able memory and load the DMA map. */
1251 error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
1252 BUS_DMA_COHERENT, map);
1253 if (error != 0) {
1254 device_printf(sc->sc_dev,
1255 "could not allocate DMA'able memory for %s.\n", type);
1256 return (error);
1257 }
1258
1259 ctx.txp_busaddr = 0;
1260 error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
1261 size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1262 if (error != 0 || ctx.txp_busaddr == 0) {
1263 device_printf(sc->sc_dev,
1264 "could not load DMA'able memory for %s.\n", type);
1265 return (error);
1266 }
1267 *paddr = ctx.txp_busaddr;
1268
1269 return (0);
1270 }
1271
1272 static void
txp_dma_free(struct txp_softc * sc,bus_dma_tag_t * tag,bus_dmamap_t map,void ** buf,bus_addr_t * paddr)1273 txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t map,
1274 void **buf, bus_addr_t *paddr)
1275 {
1276
1277 if (*tag != NULL) {
1278 if (*paddr != 0)
1279 bus_dmamap_unload(*tag, map);
1280 if (buf != NULL)
1281 bus_dmamem_free(*tag, *(uint8_t **)buf, map);
1282 *(uint8_t **)buf = NULL;
1283 *paddr = 0;
1284 bus_dma_tag_destroy(*tag);
1285 *tag = NULL;
1286 }
1287 }
1288
1289 static int
txp_alloc_rings(struct txp_softc * sc)1290 txp_alloc_rings(struct txp_softc *sc)
1291 {
1292 struct txp_boot_record *boot;
1293 struct txp_ldata *ld;
1294 struct txp_swdesc *txd;
1295 struct txp_rxbuf_desc *rbd;
1296 struct txp_rx_swdesc *sd;
1297 int error, i;
1298
1299 ld = &sc->sc_ldata;
1300 boot = ld->txp_boot;
1301
1302 /* boot record */
1303 sc->sc_boot = boot;
1304
1305 /*
1306 * Create parent ring/DMA block tag.
1307 * Datasheet says that all ring addresses and descriptors
1308 * support 64bits addressing. However the controller is
1309 * known to have no support DAC so limit DMA address space
1310 * to 32bits.
1311 */
1312 error = bus_dma_tag_create(
1313 bus_get_dma_tag(sc->sc_dev), /* parent */
1314 1, 0, /* algnmnt, boundary */
1315 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1316 BUS_SPACE_MAXADDR, /* highaddr */
1317 NULL, NULL, /* filter, filterarg */
1318 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1319 0, /* nsegments */
1320 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1321 0, /* flags */
1322 NULL, NULL, /* lockfunc, lockarg */
1323 &sc->sc_cdata.txp_parent_tag);
1324 if (error != 0) {
1325 device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
1326 return (error);
1327 }
1328
1329 /* Boot record. */
1330 error = txp_dma_alloc(sc, "boot record",
1331 &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
1332 &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1333 sizeof(struct txp_boot_record),
1334 &sc->sc_ldata.txp_boot_paddr);
1335 if (error != 0)
1336 return (error);
1337 boot = sc->sc_ldata.txp_boot;
1338 sc->sc_boot = boot;
1339
1340 /* Host variables. */
1341 error = txp_dma_alloc(sc, "host variables",
1342 &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
1343 &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1344 sizeof(struct txp_hostvar),
1345 &sc->sc_ldata.txp_hostvar_paddr);
1346 if (error != 0)
1347 return (error);
1348 boot->br_hostvar_lo =
1349 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
1350 boot->br_hostvar_hi =
1351 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
1352 sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
1353
1354 /* Hi priority tx ring. */
1355 error = txp_dma_alloc(sc, "hi priority tx ring",
1356 &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
1357 &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
1358 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1359 &sc->sc_ldata.txp_txhiring_paddr);
1360 if (error != 0)
1361 return (error);
1362 boot->br_txhipri_lo =
1363 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
1364 boot->br_txhipri_hi =
1365 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
1366 boot->br_txhipri_siz =
1367 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1368 sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
1369 sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
1370 sc->sc_txhir.r_reg = TXP_H2A_1;
1371 sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
1372 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
1373 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
1374
1375 /* Low priority tx ring. */
1376 error = txp_dma_alloc(sc, "low priority tx ring",
1377 &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
1378 &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
1379 sizeof(struct txp_tx_desc) * TX_ENTRIES,
1380 &sc->sc_ldata.txp_txloring_paddr);
1381 if (error != 0)
1382 return (error);
1383 boot->br_txlopri_lo =
1384 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
1385 boot->br_txlopri_hi =
1386 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
1387 boot->br_txlopri_siz =
1388 htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
1389 sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
1390 sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
1391 sc->sc_txlor.r_reg = TXP_H2A_3;
1392 sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
1393 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1394 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1395
1396 /* High priority rx ring. */
1397 error = txp_dma_alloc(sc, "hi priority rx ring",
1398 &sc->sc_cdata.txp_rxhiring_tag,
1399 roundup(sizeof(struct txp_rx_desc), 16), 0,
1400 &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
1401 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1402 &sc->sc_ldata.txp_rxhiring_paddr);
1403 if (error != 0)
1404 return (error);
1405 boot->br_rxhipri_lo =
1406 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
1407 boot->br_rxhipri_hi =
1408 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
1409 boot->br_rxhipri_siz =
1410 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1411 sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
1412 sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
1413 sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
1414 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1415 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1416
1417 /* Low priority rx ring. */
1418 error = txp_dma_alloc(sc, "low priority rx ring",
1419 &sc->sc_cdata.txp_rxloring_tag,
1420 roundup(sizeof(struct txp_rx_desc), 16), 0,
1421 &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
1422 sizeof(struct txp_rx_desc) * RX_ENTRIES,
1423 &sc->sc_ldata.txp_rxloring_paddr);
1424 if (error != 0)
1425 return (error);
1426 boot->br_rxlopri_lo =
1427 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
1428 boot->br_rxlopri_hi =
1429 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
1430 boot->br_rxlopri_siz =
1431 htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1432 sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
1433 sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
1434 sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
1435 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1436 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1437
1438 /* Command ring. */
1439 error = txp_dma_alloc(sc, "command ring",
1440 &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
1441 &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1442 sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1443 &sc->sc_ldata.txp_cmdring_paddr);
1444 if (error != 0)
1445 return (error);
1446 boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
1447 boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
1448 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1449 sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
1450 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1451 sc->sc_cmdring.lastwrite = 0;
1452
1453 /* Response ring. */
1454 error = txp_dma_alloc(sc, "response ring",
1455 &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
1456 &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1457 sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1458 &sc->sc_ldata.txp_rspring_paddr);
1459 if (error != 0)
1460 return (error);
1461 boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
1462 boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
1463 boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
1464 sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
1465 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1466 sc->sc_rspring.lastwrite = 0;
1467
1468 /* Receive buffer ring. */
1469 error = txp_dma_alloc(sc, "receive buffer ring",
1470 &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
1471 &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1472 sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1473 &sc->sc_ldata.txp_rxbufs_paddr);
1474 if (error != 0)
1475 return (error);
1476 boot->br_rxbuf_lo =
1477 htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
1478 boot->br_rxbuf_hi =
1479 htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
1480 boot->br_rxbuf_siz =
1481 htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1482 sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
1483
1484 /* Zero ring. */
1485 error = txp_dma_alloc(sc, "zero buffer",
1486 &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
1487 &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1488 sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
1489 if (error != 0)
1490 return (error);
1491 boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
1492 boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
1493
1494 bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
1495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1496
1497 /* Create Tx buffers. */
1498 error = bus_dma_tag_create(
1499 sc->sc_cdata.txp_parent_tag, /* parent */
1500 1, 0, /* algnmnt, boundary */
1501 BUS_SPACE_MAXADDR, /* lowaddr */
1502 BUS_SPACE_MAXADDR, /* highaddr */
1503 NULL, NULL, /* filter, filterarg */
1504 MCLBYTES * TXP_MAXTXSEGS, /* maxsize */
1505 TXP_MAXTXSEGS, /* nsegments */
1506 MCLBYTES, /* maxsegsize */
1507 0, /* flags */
1508 NULL, NULL, /* lockfunc, lockarg */
1509 &sc->sc_cdata.txp_tx_tag);
1510 if (error != 0) {
1511 device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
1512 goto fail;
1513 }
1514
1515 /* Create tag for Rx buffers. */
1516 error = bus_dma_tag_create(
1517 sc->sc_cdata.txp_parent_tag, /* parent */
1518 TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */
1519 BUS_SPACE_MAXADDR, /* lowaddr */
1520 BUS_SPACE_MAXADDR, /* highaddr */
1521 NULL, NULL, /* filter, filterarg */
1522 MCLBYTES, /* maxsize */
1523 1, /* nsegments */
1524 MCLBYTES, /* maxsegsize */
1525 0, /* flags */
1526 NULL, NULL, /* lockfunc, lockarg */
1527 &sc->sc_cdata.txp_rx_tag);
1528 if (error != 0) {
1529 device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
1530 goto fail;
1531 }
1532
1533 /* Create DMA maps for Tx buffers. */
1534 for (i = 0; i < TX_ENTRIES; i++) {
1535 txd = &sc->sc_txd[i];
1536 txd->sd_mbuf = NULL;
1537 txd->sd_map = NULL;
1538 error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
1539 &txd->sd_map);
1540 if (error != 0) {
1541 device_printf(sc->sc_dev,
1542 "could not create Tx dmamap.\n");
1543 goto fail;
1544 }
1545 }
1546
1547 /* Create DMA maps for Rx buffers. */
1548 for (i = 0; i < RXBUF_ENTRIES; i++) {
1549 sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
1550 M_NOWAIT | M_ZERO);
1551 if (sd == NULL) {
1552 error = ENOMEM;
1553 goto fail;
1554 }
1555 /*
1556 * The virtual address part of descriptor is not used
1557 * by hardware so use that to save an ring entry. We
1558 * need bcopy here otherwise the address wouldn't be
1559 * valid on big-endian architectures.
1560 */
1561 rbd = sc->sc_rxbufs + i;
1562 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1563 sd->sd_mbuf = NULL;
1564 sd->sd_map = NULL;
1565 error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
1566 &sd->sd_map);
1567 if (error != 0) {
1568 device_printf(sc->sc_dev,
1569 "could not create Rx dmamap.\n");
1570 goto fail;
1571 }
1572 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1573 }
1574
1575 fail:
1576 return (error);
1577 }
1578
1579 static void
txp_init_rings(struct txp_softc * sc)1580 txp_init_rings(struct txp_softc *sc)
1581 {
1582
1583 bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
1584 bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
1585 sc->sc_txhir.r_cons = 0;
1586 sc->sc_txhir.r_prod = 0;
1587 sc->sc_txhir.r_cnt = 0;
1588 sc->sc_txlor.r_cons = 0;
1589 sc->sc_txlor.r_prod = 0;
1590 sc->sc_txlor.r_cnt = 0;
1591 sc->sc_cmdring.lastwrite = 0;
1592 sc->sc_rspring.lastwrite = 0;
1593 sc->sc_rxbufprod = 0;
1594 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1595 sc->sc_cdata.txp_hostvar_map,
1596 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1597 }
1598
1599 static int
txp_wait(struct txp_softc * sc,uint32_t state)1600 txp_wait(struct txp_softc *sc, uint32_t state)
1601 {
1602 uint32_t reg;
1603 int i;
1604
1605 for (i = 0; i < TXP_TIMEOUT; i++) {
1606 reg = READ_REG(sc, TXP_A2H_0);
1607 if (reg == state)
1608 break;
1609 DELAY(50);
1610 }
1611
1612 return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
1613 }
1614
1615 static void
txp_free_rings(struct txp_softc * sc)1616 txp_free_rings(struct txp_softc *sc)
1617 {
1618 struct txp_swdesc *txd;
1619 struct txp_rx_swdesc *sd;
1620 int i;
1621
1622 /* Tx buffers. */
1623 if (sc->sc_cdata.txp_tx_tag != NULL) {
1624 for (i = 0; i < TX_ENTRIES; i++) {
1625 txd = &sc->sc_txd[i];
1626 if (txd->sd_map != NULL) {
1627 bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
1628 txd->sd_map);
1629 txd->sd_map = NULL;
1630 }
1631 }
1632 bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
1633 sc->sc_cdata.txp_tx_tag = NULL;
1634 }
1635 /* Rx buffers. */
1636 if (sc->sc_cdata.txp_rx_tag != NULL) {
1637 if (sc->sc_rxbufs != NULL) {
1638 KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
1639 ("%s : still have busy Rx buffers", __func__));
1640 while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
1641 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1642 if (sd->sd_map != NULL) {
1643 bus_dmamap_destroy(
1644 sc->sc_cdata.txp_rx_tag,
1645 sd->sd_map);
1646 sd->sd_map = NULL;
1647 }
1648 free(sd, M_DEVBUF);
1649 }
1650 }
1651 bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
1652 sc->sc_cdata.txp_rx_tag = NULL;
1653 }
1654
1655 /* Hi priority Tx ring. */
1656 txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
1657 sc->sc_cdata.txp_txhiring_map,
1658 (void **)&sc->sc_ldata.txp_txhiring,
1659 &sc->sc_ldata.txp_txhiring_paddr);
1660 /* Low priority Tx ring. */
1661 txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
1662 sc->sc_cdata.txp_txloring_map,
1663 (void **)&sc->sc_ldata.txp_txloring,
1664 &sc->sc_ldata.txp_txloring_paddr);
1665 /* Hi priority Rx ring. */
1666 txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
1667 sc->sc_cdata.txp_rxhiring_map,
1668 (void **)&sc->sc_ldata.txp_rxhiring,
1669 &sc->sc_ldata.txp_rxhiring_paddr);
1670 /* Low priority Rx ring. */
1671 txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
1672 sc->sc_cdata.txp_rxloring_map,
1673 (void **)&sc->sc_ldata.txp_rxloring,
1674 &sc->sc_ldata.txp_rxloring_paddr);
1675 /* Receive buffer ring. */
1676 txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
1677 sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
1678 &sc->sc_ldata.txp_rxbufs_paddr);
1679 /* Command ring. */
1680 txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
1681 sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
1682 &sc->sc_ldata.txp_cmdring_paddr);
1683 /* Response ring. */
1684 txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
1685 sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
1686 &sc->sc_ldata.txp_rspring_paddr);
1687 /* Zero ring. */
1688 txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
1689 sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
1690 &sc->sc_ldata.txp_zero_paddr);
1691 /* Host variables. */
1692 txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
1693 sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
1694 &sc->sc_ldata.txp_hostvar_paddr);
1695 /* Boot record. */
1696 txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
1697 sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
1698 &sc->sc_ldata.txp_boot_paddr);
1699
1700 if (sc->sc_cdata.txp_parent_tag != NULL) {
1701 bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
1702 sc->sc_cdata.txp_parent_tag = NULL;
1703 }
1704
1705 }
1706
1707 static int
txp_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1708 txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1709 {
1710 struct txp_softc *sc = ifp->if_softc;
1711 struct ifreq *ifr = (struct ifreq *)data;
1712 int capenable, error = 0, mask;
1713
1714 switch(command) {
1715 case SIOCSIFFLAGS:
1716 TXP_LOCK(sc);
1717 if ((ifp->if_flags & IFF_UP) != 0) {
1718 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1719 if (((ifp->if_flags ^ sc->sc_if_flags)
1720 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1721 txp_set_filter(sc);
1722 } else {
1723 if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
1724 txp_init_locked(sc);
1725 }
1726 } else {
1727 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1728 txp_stop(sc);
1729 }
1730 sc->sc_if_flags = ifp->if_flags;
1731 TXP_UNLOCK(sc);
1732 break;
1733 case SIOCADDMULTI:
1734 case SIOCDELMULTI:
1735 /*
1736 * Multicast list has changed; set the hardware
1737 * filter accordingly.
1738 */
1739 TXP_LOCK(sc);
1740 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1741 txp_set_filter(sc);
1742 TXP_UNLOCK(sc);
1743 break;
1744 case SIOCSIFCAP:
1745 TXP_LOCK(sc);
1746 capenable = ifp->if_capenable;
1747 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1748 if ((mask & IFCAP_TXCSUM) != 0 &&
1749 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1750 ifp->if_capenable ^= IFCAP_TXCSUM;
1751 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1752 ifp->if_hwassist |= TXP_CSUM_FEATURES;
1753 else
1754 ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
1755 }
1756 if ((mask & IFCAP_RXCSUM) != 0 &&
1757 (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
1758 ifp->if_capenable ^= IFCAP_RXCSUM;
1759 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1760 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1761 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1762 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1763 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
1764 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1765 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1766 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1767 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1768 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1769 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1770 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1771 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1772 if (capenable != ifp->if_capenable)
1773 txp_set_capabilities(sc);
1774 TXP_UNLOCK(sc);
1775 VLAN_CAPABILITIES(ifp);
1776 break;
1777 case SIOCGIFMEDIA:
1778 case SIOCSIFMEDIA:
1779 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1780 break;
1781 default:
1782 error = ether_ioctl(ifp, command, data);
1783 break;
1784 }
1785
1786 return (error);
1787 }
1788
1789 static int
txp_rxring_fill(struct txp_softc * sc)1790 txp_rxring_fill(struct txp_softc *sc)
1791 {
1792 struct txp_rxbuf_desc *rbd;
1793 struct txp_rx_swdesc *sd;
1794 bus_dma_segment_t segs[1];
1795 int error, i, nsegs;
1796
1797 TXP_LOCK_ASSERT(sc);
1798
1799 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1800 sc->sc_cdata.txp_rxbufs_map,
1801 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1802
1803 for (i = 0; i < RXBUF_ENTRIES; i++) {
1804 sd = TAILQ_FIRST(&sc->sc_free_list);
1805 if (sd == NULL)
1806 return (ENOMEM);
1807 rbd = sc->sc_rxbufs + i;
1808 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
1809 KASSERT(sd->sd_mbuf == NULL,
1810 ("%s : Rx buffer ring corrupted", __func__));
1811 sd->sd_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1812 if (sd->sd_mbuf == NULL)
1813 return (ENOMEM);
1814 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1815 #ifndef __NO_STRICT_ALIGNMENT
1816 m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
1817 #endif
1818 if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
1819 sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
1820 m_freem(sd->sd_mbuf);
1821 sd->sd_mbuf = NULL;
1822 return (error);
1823 }
1824 KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
1825 nsegs));
1826 TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
1827 TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
1828 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1829 BUS_DMASYNC_PREREAD);
1830 rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
1831 rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
1832 }
1833
1834 bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
1835 sc->sc_cdata.txp_rxbufs_map,
1836 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1837 sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
1838 sc->sc_hostvar->hv_rx_buf_write_idx =
1839 htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
1840
1841 return (0);
1842 }
1843
1844 static void
txp_rxring_empty(struct txp_softc * sc)1845 txp_rxring_empty(struct txp_softc *sc)
1846 {
1847 struct txp_rx_swdesc *sd;
1848 int cnt;
1849
1850 TXP_LOCK_ASSERT(sc);
1851
1852 if (sc->sc_rxbufs == NULL)
1853 return;
1854 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1855 sc->sc_cdata.txp_hostvar_map,
1856 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1857
1858 /* Release allocated Rx buffers. */
1859 cnt = 0;
1860 while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
1861 TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
1862 KASSERT(sd->sd_mbuf != NULL,
1863 ("%s : Rx buffer ring corrupted", __func__));
1864 bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
1865 BUS_DMASYNC_POSTREAD);
1866 bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
1867 m_freem(sd->sd_mbuf);
1868 sd->sd_mbuf = NULL;
1869 TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
1870 cnt++;
1871 }
1872 }
1873
1874 static void
txp_init(void * xsc)1875 txp_init(void *xsc)
1876 {
1877 struct txp_softc *sc;
1878
1879 sc = xsc;
1880 TXP_LOCK(sc);
1881 txp_init_locked(sc);
1882 TXP_UNLOCK(sc);
1883 }
1884
1885 static void
txp_init_locked(struct txp_softc * sc)1886 txp_init_locked(struct txp_softc *sc)
1887 {
1888 struct ifnet *ifp;
1889 uint8_t *eaddr;
1890 uint16_t p1;
1891 uint32_t p2;
1892 int error;
1893
1894 TXP_LOCK_ASSERT(sc);
1895 ifp = sc->sc_ifp;
1896
1897 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1898 return;
1899
1900 /* Initialize ring structure. */
1901 txp_init_rings(sc);
1902 /* Wakeup controller. */
1903 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
1904 TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
1905 /*
1906 * It seems that earlier NV image can go back to online from
1907 * wakeup command but newer ones require controller reset.
1908 * So jut reset controller again.
1909 */
1910 if (txp_reset(sc) != 0)
1911 goto init_fail;
1912 /* Download firmware. */
1913 error = txp_download_fw(sc);
1914 if (error != 0) {
1915 device_printf(sc->sc_dev, "could not download firmware.\n");
1916 goto init_fail;
1917 }
1918 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1919 sc->sc_cdata.txp_hostvar_map,
1920 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1921 if ((error = txp_rxring_fill(sc)) != 0) {
1922 device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
1923 goto init_fail;
1924 }
1925 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
1926 sc->sc_cdata.txp_hostvar_map,
1927 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1928 if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
1929 device_printf(sc->sc_dev, "could not boot firmware.\n");
1930 goto init_fail;
1931 }
1932
1933 /*
1934 * Quite contrary to Typhoon T2 software functional specification,
1935 * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
1936 * implemented in the firmware. This means driver should have to
1937 * handle misaligned frames on alignment architectures. AFAIK this
1938 * is the only controller manufactured by 3Com that has this stupid
1939 * bug. 3Com should fix this.
1940 */
1941 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
1942 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1943 goto init_fail;
1944 /* Undocumented command(interrupt coalescing disable?) - From Linux. */
1945 if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
1946 TXP_CMD_NOWAIT) != 0)
1947 goto init_fail;
1948
1949 /* Set station address. */
1950 eaddr = IF_LLADDR(sc->sc_ifp);
1951 p1 = 0;
1952 ((uint8_t *)&p1)[1] = eaddr[0];
1953 ((uint8_t *)&p1)[0] = eaddr[1];
1954 p1 = le16toh(p1);
1955 ((uint8_t *)&p2)[3] = eaddr[2];
1956 ((uint8_t *)&p2)[2] = eaddr[3];
1957 ((uint8_t *)&p2)[1] = eaddr[4];
1958 ((uint8_t *)&p2)[0] = eaddr[5];
1959 p2 = le32toh(p2);
1960 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
1961 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1962 goto init_fail;
1963
1964 txp_set_filter(sc);
1965 txp_set_capabilities(sc);
1966
1967 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1968 NULL, NULL, NULL, TXP_CMD_NOWAIT))
1969 goto init_fail;
1970 if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
1971 NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
1972 goto init_fail;
1973 if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1974 TXP_CMD_NOWAIT) != 0)
1975 goto init_fail;
1976 if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
1977 TXP_CMD_NOWAIT) != 0)
1978 goto init_fail;
1979
1980 /* Ack all pending interrupts and enable interrupts. */
1981 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
1982 WRITE_REG(sc, TXP_IER, TXP_INTRS);
1983 WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
1984
1985 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1986 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1987
1988 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
1989 return;
1990
1991 init_fail:
1992 txp_rxring_empty(sc);
1993 txp_init_rings(sc);
1994 txp_reset(sc);
1995 WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
1996 }
1997
1998 static void
txp_tick(void * vsc)1999 txp_tick(void *vsc)
2000 {
2001 struct txp_softc *sc;
2002 struct ifnet *ifp;
2003 struct txp_rsp_desc *rsp;
2004 struct txp_ext_desc *ext;
2005 int link;
2006
2007 sc = vsc;
2008 TXP_LOCK_ASSERT(sc);
2009 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2010 sc->sc_cdata.txp_hostvar_map,
2011 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2012 txp_rxbuf_reclaim(sc);
2013 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2014 sc->sc_cdata.txp_hostvar_map,
2015 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2016
2017 ifp = sc->sc_ifp;
2018 rsp = NULL;
2019
2020 link = sc->sc_flags & TXP_FLAG_LINK;
2021 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2022 &rsp, TXP_CMD_WAIT))
2023 goto out;
2024 if (rsp->rsp_numdesc != 6)
2025 goto out;
2026 txp_stats_update(sc, rsp);
2027 if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
2028 ext = (struct txp_ext_desc *)(rsp + 1);
2029 /* Update baudrate with resolved speed. */
2030 if ((ext[5].ext_2 & 0x02) != 0)
2031 ifp->if_baudrate = IF_Mbps(100);
2032 else
2033 ifp->if_baudrate = IF_Mbps(10);
2034 }
2035
2036 out:
2037 if (rsp != NULL)
2038 free(rsp, M_DEVBUF);
2039 txp_watchdog(sc);
2040 callout_reset(&sc->sc_tick, hz, txp_tick, sc);
2041 }
2042
2043 static void
txp_start(struct ifnet * ifp)2044 txp_start(struct ifnet *ifp)
2045 {
2046 struct txp_softc *sc;
2047
2048 sc = ifp->if_softc;
2049 TXP_LOCK(sc);
2050 txp_start_locked(ifp);
2051 TXP_UNLOCK(sc);
2052 }
2053
2054 static void
txp_start_locked(struct ifnet * ifp)2055 txp_start_locked(struct ifnet *ifp)
2056 {
2057 struct txp_softc *sc;
2058 struct mbuf *m_head;
2059 int enq;
2060
2061 sc = ifp->if_softc;
2062 TXP_LOCK_ASSERT(sc);
2063
2064 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2065 IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
2066 return;
2067
2068 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2069 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2070 if (m_head == NULL)
2071 break;
2072 /*
2073 * Pack the data into the transmit ring. If we
2074 * don't have room, set the OACTIVE flag and wait
2075 * for the NIC to drain the ring.
2076 * ATM only Hi-ring is used.
2077 */
2078 if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
2079 if (m_head == NULL)
2080 break;
2081 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2082 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2083 break;
2084 }
2085
2086 /*
2087 * If there's a BPF listener, bounce a copy of this frame
2088 * to him.
2089 */
2090 ETHER_BPF_MTAP(ifp, m_head);
2091
2092 /* Send queued frame. */
2093 WRITE_REG(sc, sc->sc_txhir.r_reg,
2094 TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
2095 }
2096
2097 if (enq > 0) {
2098 /* Set a timeout in case the chip goes out to lunch. */
2099 sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
2100 }
2101 }
2102
2103 static int
txp_encap(struct txp_softc * sc,struct txp_tx_ring * r,struct mbuf ** m_head)2104 txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
2105 {
2106 struct txp_tx_desc *first_txd;
2107 struct txp_frag_desc *fxd;
2108 struct txp_swdesc *sd;
2109 struct mbuf *m;
2110 bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
2111 int error, i, nsegs;
2112
2113 TXP_LOCK_ASSERT(sc);
2114
2115 M_ASSERTPKTHDR((*m_head));
2116
2117 m = *m_head;
2118 first_txd = r->r_desc + r->r_prod;
2119 sd = sc->sc_txd + r->r_prod;
2120
2121 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2122 *m_head, txsegs, &nsegs, 0);
2123 if (error == EFBIG) {
2124 m = m_collapse(*m_head, M_NOWAIT, TXP_MAXTXSEGS);
2125 if (m == NULL) {
2126 m_freem(*m_head);
2127 *m_head = NULL;
2128 return (ENOMEM);
2129 }
2130 *m_head = m;
2131 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
2132 sd->sd_map, *m_head, txsegs, &nsegs, 0);
2133 if (error != 0) {
2134 m_freem(*m_head);
2135 *m_head = NULL;
2136 return (error);
2137 }
2138 } else if (error != 0)
2139 return (error);
2140 if (nsegs == 0) {
2141 m_freem(*m_head);
2142 *m_head = NULL;
2143 return (EIO);
2144 }
2145
2146 /* Check descriptor overrun. */
2147 if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
2148 bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
2149 return (ENOBUFS);
2150 }
2151 bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
2152 BUS_DMASYNC_PREWRITE);
2153 sd->sd_mbuf = m;
2154
2155 first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
2156 first_txd->tx_numdesc = 0;
2157 first_txd->tx_addrlo = 0;
2158 first_txd->tx_addrhi = 0;
2159 first_txd->tx_totlen = 0;
2160 first_txd->tx_pflags = 0;
2161 r->r_cnt++;
2162 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2163
2164 /* Configure Tx IP/TCP/UDP checksum offload. */
2165 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2166 first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
2167 #ifdef notyet
2168 /* XXX firmware bug. */
2169 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2170 first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
2171 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2172 first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
2173 #endif
2174
2175 /* Configure VLAN hardware tag insertion. */
2176 if ((m->m_flags & M_VLANTAG) != 0)
2177 first_txd->tx_pflags |=
2178 htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
2179 (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
2180
2181 for (i = 0; i < nsegs; i++) {
2182 fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
2183 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
2184 fxd->frag_rsvd1 = 0;
2185 fxd->frag_len = htole16(txsegs[i].ds_len);
2186 fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
2187 fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
2188 fxd->frag_rsvd2 = 0;
2189 first_txd->tx_numdesc++;
2190 r->r_cnt++;
2191 TXP_DESC_INC(r->r_prod, TX_ENTRIES);
2192 }
2193
2194 /* Lastly set valid flag. */
2195 first_txd->tx_flags |= TX_FLAGS_VALID;
2196
2197 /* Sync descriptors. */
2198 bus_dmamap_sync(r->r_tag, r->r_map,
2199 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2200
2201 return (0);
2202 }
2203
2204 /*
2205 * Handle simple commands sent to the typhoon
2206 */
2207 static int
txp_command(struct txp_softc * sc,uint16_t id,uint16_t in1,uint32_t in2,uint32_t in3,uint16_t * out1,uint32_t * out2,uint32_t * out3,int wait)2208 txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2209 uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
2210 {
2211 struct txp_rsp_desc *rsp;
2212
2213 rsp = NULL;
2214 if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
2215 device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
2216 return (-1);
2217 }
2218
2219 if (wait == TXP_CMD_NOWAIT)
2220 return (0);
2221
2222 KASSERT(rsp != NULL, ("rsp is NULL!\n"));
2223 if (out1 != NULL)
2224 *out1 = le16toh(rsp->rsp_par1);
2225 if (out2 != NULL)
2226 *out2 = le32toh(rsp->rsp_par2);
2227 if (out3 != NULL)
2228 *out3 = le32toh(rsp->rsp_par3);
2229 free(rsp, M_DEVBUF);
2230 return (0);
2231 }
2232
2233 static int
txp_ext_command(struct txp_softc * sc,uint16_t id,uint16_t in1,uint32_t in2,uint32_t in3,struct txp_ext_desc * in_extp,uint8_t in_extn,struct txp_rsp_desc ** rspp,int wait)2234 txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
2235 uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
2236 struct txp_rsp_desc **rspp, int wait)
2237 {
2238 struct txp_hostvar *hv;
2239 struct txp_cmd_desc *cmd;
2240 struct txp_ext_desc *ext;
2241 uint32_t idx, i;
2242 uint16_t seq;
2243 int error;
2244
2245 error = 0;
2246 hv = sc->sc_hostvar;
2247 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
2248 device_printf(sc->sc_dev,
2249 "%s : out of free cmd descriptors for command 0x%02x\n",
2250 __func__, id);
2251 return (ENOBUFS);
2252 }
2253
2254 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2255 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
2256 idx = sc->sc_cmdring.lastwrite;
2257 cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2258 bzero(cmd, sizeof(*cmd));
2259
2260 cmd->cmd_numdesc = in_extn;
2261 seq = sc->sc_seq++;
2262 cmd->cmd_seq = htole16(seq);
2263 cmd->cmd_id = htole16(id);
2264 cmd->cmd_par1 = htole16(in1);
2265 cmd->cmd_par2 = htole32(in2);
2266 cmd->cmd_par3 = htole32(in3);
2267 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
2268 (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
2269
2270 idx += sizeof(struct txp_cmd_desc);
2271 if (idx == sc->sc_cmdring.size)
2272 idx = 0;
2273
2274 for (i = 0; i < in_extn; i++) {
2275 ext = (struct txp_ext_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
2276 bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
2277 in_extp++;
2278 idx += sizeof(struct txp_cmd_desc);
2279 if (idx == sc->sc_cmdring.size)
2280 idx = 0;
2281 }
2282
2283 sc->sc_cmdring.lastwrite = idx;
2284 bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
2285 sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
2286 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2287 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
2288 BUS_DMASYNC_PREWRITE);
2289 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
2290 TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
2291
2292 if (wait == TXP_CMD_NOWAIT)
2293 return (0);
2294
2295 for (i = 0; i < TXP_TIMEOUT; i++) {
2296 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2297 sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
2298 BUS_DMASYNC_POSTWRITE);
2299 if (le32toh(hv->hv_resp_read_idx) !=
2300 le32toh(hv->hv_resp_write_idx)) {
2301 error = txp_response(sc, id, seq, rspp);
2302 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2303 sc->sc_cdata.txp_hostvar_map,
2304 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2305 if (error != 0)
2306 return (error);
2307 if (*rspp != NULL)
2308 break;
2309 }
2310 DELAY(50);
2311 }
2312 if (i == TXP_TIMEOUT) {
2313 device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
2314 error = ETIMEDOUT;
2315 }
2316
2317 return (error);
2318 }
2319
2320 static int
txp_response(struct txp_softc * sc,uint16_t id,uint16_t seq,struct txp_rsp_desc ** rspp)2321 txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
2322 struct txp_rsp_desc **rspp)
2323 {
2324 struct txp_hostvar *hv;
2325 struct txp_rsp_desc *rsp;
2326 uint32_t ridx;
2327
2328 bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
2329 sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
2330 hv = sc->sc_hostvar;
2331 ridx = le32toh(hv->hv_resp_read_idx);
2332 while (ridx != le32toh(hv->hv_resp_write_idx)) {
2333 rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
2334
2335 if (id == le16toh(rsp->rsp_id) &&
2336 le16toh(rsp->rsp_seq) == seq) {
2337 *rspp = (struct txp_rsp_desc *)malloc(
2338 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
2339 M_DEVBUF, M_NOWAIT);
2340 if (*rspp == NULL) {
2341 device_printf(sc->sc_dev,"%s : command 0x%02x "
2342 "memory allocation failure\n",
2343 __func__, id);
2344 return (ENOMEM);
2345 }
2346 txp_rsp_fixup(sc, rsp, *rspp);
2347 return (0);
2348 }
2349
2350 if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
2351 device_printf(sc->sc_dev,
2352 "%s : command 0x%02x response error!\n", __func__,
2353 le16toh(rsp->rsp_id));
2354 txp_rsp_fixup(sc, rsp, NULL);
2355 ridx = le32toh(hv->hv_resp_read_idx);
2356 continue;
2357 }
2358
2359 /*
2360 * The following unsolicited responses are handled during
2361 * processing of TXP_CMD_READ_STATISTICS which requires
2362 * response. Driver abuses the command to detect media
2363 * status change.
2364 * TXP_CMD_FILTER_DEFINE is not an unsolicited response
2365 * but we don't process response ring in interrupt handler
2366 * so we have to ignore this command here, otherwise
2367 * unknown command message would be printed.
2368 */
2369 switch (le16toh(rsp->rsp_id)) {
2370 case TXP_CMD_CYCLE_STATISTICS:
2371 case TXP_CMD_FILTER_DEFINE:
2372 break;
2373 case TXP_CMD_MEDIA_STATUS_READ:
2374 if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
2375 sc->sc_flags |= TXP_FLAG_LINK;
2376 if_link_state_change(sc->sc_ifp,
2377 LINK_STATE_UP);
2378 } else {
2379 sc->sc_flags &= ~TXP_FLAG_LINK;
2380 if_link_state_change(sc->sc_ifp,
2381 LINK_STATE_DOWN);
2382 }
2383 break;
2384 case TXP_CMD_HELLO_RESPONSE:
2385 /*
2386 * Driver should repsond to hello message but
2387 * TXP_CMD_READ_STATISTICS is issued for every
2388 * hz, therefore there is no need to send an
2389 * explicit command here.
2390 */
2391 device_printf(sc->sc_dev, "%s : hello\n", __func__);
2392 break;
2393 default:
2394 device_printf(sc->sc_dev,
2395 "%s : unknown command 0x%02x\n", __func__,
2396 le16toh(rsp->rsp_id));
2397 }
2398 txp_rsp_fixup(sc, rsp, NULL);
2399 ridx = le32toh(hv->hv_resp_read_idx);
2400 }
2401
2402 return (0);
2403 }
2404
2405 static void
txp_rsp_fixup(struct txp_softc * sc,struct txp_rsp_desc * rsp,struct txp_rsp_desc * dst)2406 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
2407 struct txp_rsp_desc *dst)
2408 {
2409 struct txp_rsp_desc *src;
2410 struct txp_hostvar *hv;
2411 uint32_t i, ridx;
2412
2413 src = rsp;
2414 hv = sc->sc_hostvar;
2415 ridx = le32toh(hv->hv_resp_read_idx);
2416
2417 for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
2418 if (dst != NULL)
2419 bcopy(src, dst++, sizeof(struct txp_rsp_desc));
2420 ridx += sizeof(struct txp_rsp_desc);
2421 if (ridx == sc->sc_rspring.size) {
2422 src = sc->sc_rspring.base;
2423 ridx = 0;
2424 } else
2425 src++;
2426 sc->sc_rspring.lastwrite = ridx;
2427 }
2428
2429 hv->hv_resp_read_idx = htole32(ridx);
2430 }
2431
2432 static int
txp_cmd_desc_numfree(struct txp_softc * sc)2433 txp_cmd_desc_numfree(struct txp_softc *sc)
2434 {
2435 struct txp_hostvar *hv;
2436 struct txp_boot_record *br;
2437 uint32_t widx, ridx, nfree;
2438
2439 bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
2440 sc->sc_cdata.txp_hostvar_map,
2441 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2442 hv = sc->sc_hostvar;
2443 br = sc->sc_boot;
2444 widx = sc->sc_cmdring.lastwrite;
2445 ridx = le32toh(hv->hv_cmd_read_idx);
2446
2447 if (widx == ridx) {
2448 /* Ring is completely free */
2449 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
2450 } else {
2451 if (widx > ridx)
2452 nfree = le32toh(br->br_cmd_siz) -
2453 (widx - ridx + sizeof(struct txp_cmd_desc));
2454 else
2455 nfree = ridx - widx - sizeof(struct txp_cmd_desc);
2456 }
2457
2458 return (nfree / sizeof(struct txp_cmd_desc));
2459 }
2460
2461 static int
txp_sleep(struct txp_softc * sc,int capenable)2462 txp_sleep(struct txp_softc *sc, int capenable)
2463 {
2464 uint16_t events;
2465 int error;
2466
2467 events = 0;
2468 if ((capenable & IFCAP_WOL_MAGIC) != 0)
2469 events |= 0x01;
2470 error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
2471 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2472 if (error == 0) {
2473 /* Goto sleep. */
2474 error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
2475 NULL, NULL, TXP_CMD_NOWAIT);
2476 if (error == 0) {
2477 error = txp_wait(sc, STAT_SLEEPING);
2478 if (error != 0)
2479 device_printf(sc->sc_dev,
2480 "unable to enter into sleep\n");
2481 }
2482 }
2483
2484 return (error);
2485 }
2486
2487 static void
txp_stop(struct txp_softc * sc)2488 txp_stop(struct txp_softc *sc)
2489 {
2490 struct ifnet *ifp;
2491
2492 TXP_LOCK_ASSERT(sc);
2493 ifp = sc->sc_ifp;
2494
2495 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2496 return;
2497
2498 WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
2499 WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
2500
2501 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2502 sc->sc_flags &= ~TXP_FLAG_LINK;
2503
2504 callout_stop(&sc->sc_tick);
2505
2506 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2507 TXP_CMD_NOWAIT);
2508 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
2509 TXP_CMD_NOWAIT);
2510 /* Save statistics for later use. */
2511 txp_stats_save(sc);
2512 /* Halt controller. */
2513 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
2514 TXP_CMD_NOWAIT);
2515
2516 if (txp_wait(sc, STAT_HALTED) != 0)
2517 device_printf(sc->sc_dev, "controller halt timedout!\n");
2518 /* Reclaim Tx/Rx buffers. */
2519 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
2520 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
2521 txp_tx_reclaim(sc, &sc->sc_txhir);
2522 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
2523 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
2524 txp_tx_reclaim(sc, &sc->sc_txlor);
2525 txp_rxring_empty(sc);
2526
2527 txp_init_rings(sc);
2528 /* Reset controller and make it reload sleep image. */
2529 txp_reset(sc);
2530 /* Let controller boot from sleep image. */
2531 if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
2532 device_printf(sc->sc_dev, "could not boot sleep image\n");
2533 txp_sleep(sc, 0);
2534 }
2535
2536 static void
txp_watchdog(struct txp_softc * sc)2537 txp_watchdog(struct txp_softc *sc)
2538 {
2539 struct ifnet *ifp;
2540
2541 TXP_LOCK_ASSERT(sc);
2542
2543 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
2544 return;
2545
2546 ifp = sc->sc_ifp;
2547 if_printf(ifp, "watchdog timeout -- resetting\n");
2548 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2549 txp_stop(sc);
2550 txp_init_locked(sc);
2551 }
2552
2553 static int
txp_ifmedia_upd(struct ifnet * ifp)2554 txp_ifmedia_upd(struct ifnet *ifp)
2555 {
2556 struct txp_softc *sc = ifp->if_softc;
2557 struct ifmedia *ifm = &sc->sc_ifmedia;
2558 uint16_t new_xcvr;
2559
2560 TXP_LOCK(sc);
2561 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2562 TXP_UNLOCK(sc);
2563 return (EINVAL);
2564 }
2565
2566 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
2567 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2568 new_xcvr = TXP_XCVR_10_FDX;
2569 else
2570 new_xcvr = TXP_XCVR_10_HDX;
2571 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
2572 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2573 new_xcvr = TXP_XCVR_100_FDX;
2574 else
2575 new_xcvr = TXP_XCVR_100_HDX;
2576 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
2577 new_xcvr = TXP_XCVR_AUTO;
2578 } else {
2579 TXP_UNLOCK(sc);
2580 return (EINVAL);
2581 }
2582
2583 /* nothing to do */
2584 if (sc->sc_xcvr == new_xcvr) {
2585 TXP_UNLOCK(sc);
2586 return (0);
2587 }
2588
2589 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
2590 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2591 sc->sc_xcvr = new_xcvr;
2592 TXP_UNLOCK(sc);
2593
2594 return (0);
2595 }
2596
2597 static void
txp_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)2598 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2599 {
2600 struct txp_softc *sc = ifp->if_softc;
2601 struct ifmedia *ifm = &sc->sc_ifmedia;
2602 uint16_t bmsr, bmcr, anar, anlpar;
2603
2604 ifmr->ifm_status = IFM_AVALID;
2605 ifmr->ifm_active = IFM_ETHER;
2606
2607 TXP_LOCK(sc);
2608 /* Check whether firmware is running. */
2609 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2610 goto bail;
2611 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2612 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2613 goto bail;
2614 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
2615 &bmsr, NULL, NULL, TXP_CMD_WAIT))
2616 goto bail;
2617
2618 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
2619 &bmcr, NULL, NULL, TXP_CMD_WAIT))
2620 goto bail;
2621
2622 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
2623 &anlpar, NULL, NULL, TXP_CMD_WAIT))
2624 goto bail;
2625
2626 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
2627 &anar, NULL, NULL, TXP_CMD_WAIT))
2628 goto bail;
2629 TXP_UNLOCK(sc);
2630
2631 if (bmsr & BMSR_LINK)
2632 ifmr->ifm_status |= IFM_ACTIVE;
2633
2634 if (bmcr & BMCR_ISO) {
2635 ifmr->ifm_active |= IFM_NONE;
2636 ifmr->ifm_status = 0;
2637 return;
2638 }
2639
2640 if (bmcr & BMCR_LOOP)
2641 ifmr->ifm_active |= IFM_LOOP;
2642
2643 if (bmcr & BMCR_AUTOEN) {
2644 if ((bmsr & BMSR_ACOMP) == 0) {
2645 ifmr->ifm_active |= IFM_NONE;
2646 return;
2647 }
2648
2649 anlpar &= anar;
2650 if (anlpar & ANLPAR_TX_FD)
2651 ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
2652 else if (anlpar & ANLPAR_T4)
2653 ifmr->ifm_active |= IFM_100_T4;
2654 else if (anlpar & ANLPAR_TX)
2655 ifmr->ifm_active |= IFM_100_TX;
2656 else if (anlpar & ANLPAR_10_FD)
2657 ifmr->ifm_active |= IFM_10_T|IFM_FDX;
2658 else if (anlpar & ANLPAR_10)
2659 ifmr->ifm_active |= IFM_10_T;
2660 else
2661 ifmr->ifm_active |= IFM_NONE;
2662 } else
2663 ifmr->ifm_active = ifm->ifm_cur->ifm_media;
2664 return;
2665
2666 bail:
2667 TXP_UNLOCK(sc);
2668 ifmr->ifm_active |= IFM_NONE;
2669 ifmr->ifm_status &= ~IFM_AVALID;
2670 }
2671
2672 #ifdef TXP_DEBUG
2673 static void
txp_show_descriptor(void * d)2674 txp_show_descriptor(void *d)
2675 {
2676 struct txp_cmd_desc *cmd = d;
2677 struct txp_rsp_desc *rsp = d;
2678 struct txp_tx_desc *txd = d;
2679 struct txp_frag_desc *frgd = d;
2680
2681 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
2682 case CMD_FLAGS_TYPE_CMD:
2683 /* command descriptor */
2684 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2685 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2686 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2687 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2688 break;
2689 case CMD_FLAGS_TYPE_RESP:
2690 /* response descriptor */
2691 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2692 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
2693 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
2694 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
2695 break;
2696 case CMD_FLAGS_TYPE_DATA:
2697 /* data header (assuming tx for now) */
2698 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
2699 txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
2700 le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
2701 le32toh(txd->tx_pflags));
2702 break;
2703 case CMD_FLAGS_TYPE_FRAG:
2704 /* fragment descriptor */
2705 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
2706 frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
2707 le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
2708 le32toh(frgd->frag_rsvd2));
2709 break;
2710 default:
2711 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
2712 cmd->cmd_flags & CMD_FLAGS_TYPE_M,
2713 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
2714 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
2715 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
2716 break;
2717 }
2718 }
2719 #endif
2720
2721 static void
txp_set_filter(struct txp_softc * sc)2722 txp_set_filter(struct txp_softc *sc)
2723 {
2724 struct ifnet *ifp;
2725 uint32_t crc, mchash[2];
2726 uint16_t filter;
2727 struct ifmultiaddr *ifma;
2728 int mcnt;
2729
2730 TXP_LOCK_ASSERT(sc);
2731
2732 ifp = sc->sc_ifp;
2733 filter = TXP_RXFILT_DIRECT;
2734 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2735 filter |= TXP_RXFILT_BROADCAST;
2736 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2737 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2738 filter |= TXP_RXFILT_ALLMULTI;
2739 if ((ifp->if_flags & IFF_PROMISC) != 0)
2740 filter = TXP_RXFILT_PROMISC;
2741 goto setit;
2742 }
2743
2744 mchash[0] = mchash[1] = 0;
2745 mcnt = 0;
2746 if_maddr_rlock(ifp);
2747 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2748 if (ifma->ifma_addr->sa_family != AF_LINK)
2749 continue;
2750 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2751 ifma->ifma_addr), ETHER_ADDR_LEN);
2752 crc &= 0x3f;
2753 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2754 mcnt++;
2755 }
2756 if_maddr_runlock(ifp);
2757
2758 if (mcnt > 0) {
2759 filter |= TXP_RXFILT_HASHMULTI;
2760 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
2761 mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
2762 }
2763
2764 setit:
2765 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
2766 NULL, NULL, NULL, TXP_CMD_NOWAIT);
2767 }
2768
2769 static int
txp_set_capabilities(struct txp_softc * sc)2770 txp_set_capabilities(struct txp_softc *sc)
2771 {
2772 struct ifnet *ifp;
2773 uint32_t rxcap, txcap;
2774
2775 TXP_LOCK_ASSERT(sc);
2776
2777 rxcap = txcap = 0;
2778 ifp = sc->sc_ifp;
2779 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
2780 if ((ifp->if_hwassist & CSUM_IP) != 0)
2781 txcap |= OFFLOAD_IPCKSUM;
2782 if ((ifp->if_hwassist & CSUM_TCP) != 0)
2783 txcap |= OFFLOAD_TCPCKSUM;
2784 if ((ifp->if_hwassist & CSUM_UDP) != 0)
2785 txcap |= OFFLOAD_UDPCKSUM;
2786 rxcap = txcap;
2787 }
2788 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2789 rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
2790 OFFLOAD_UDPCKSUM);
2791 if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2792 rxcap |= OFFLOAD_VLAN;
2793 txcap |= OFFLOAD_VLAN;
2794 }
2795
2796 /* Tell firmware new offload configuration. */
2797 return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
2798 NULL, NULL, TXP_CMD_NOWAIT));
2799 }
2800
2801 static void
txp_stats_save(struct txp_softc * sc)2802 txp_stats_save(struct txp_softc *sc)
2803 {
2804 struct txp_rsp_desc *rsp;
2805
2806 TXP_LOCK_ASSERT(sc);
2807
2808 rsp = NULL;
2809 if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
2810 &rsp, TXP_CMD_WAIT))
2811 goto out;
2812 if (rsp->rsp_numdesc != 6)
2813 goto out;
2814 txp_stats_update(sc, rsp);
2815 out:
2816 if (rsp != NULL)
2817 free(rsp, M_DEVBUF);
2818 bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
2819 }
2820
2821 static void
txp_stats_update(struct txp_softc * sc,struct txp_rsp_desc * rsp)2822 txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
2823 {
2824 struct txp_hw_stats *ostats, *stats;
2825 struct txp_ext_desc *ext;
2826
2827 TXP_LOCK_ASSERT(sc);
2828
2829 ext = (struct txp_ext_desc *)(rsp + 1);
2830 ostats = &sc->sc_ostats;
2831 stats = &sc->sc_stats;
2832 stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
2833 stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
2834 ((uint64_t)le32toh(ext[0].ext_1) << 32);
2835 stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
2836 stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
2837 stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
2838 stats->tx_carrier_lost = ostats->tx_carrier_lost +
2839 le32toh(ext[1].ext_1);
2840 stats->tx_multi_colls = ostats->tx_multi_colls +
2841 le32toh(ext[1].ext_2);
2842 stats->tx_excess_colls = ostats->tx_excess_colls +
2843 le32toh(ext[1].ext_3);
2844 stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
2845 le32toh(ext[1].ext_4);
2846 stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
2847 le32toh(ext[2].ext_1);
2848 stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
2849 stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
2850 stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
2851 ((uint64_t)le32toh(ext[3].ext_1) << 32);
2852 stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
2853 stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
2854 stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
2855 stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
2856 stats->rx_bcast_frames = ostats->rx_bcast_frames +
2857 le32toh(ext[4].ext_2);
2858 stats->rx_mcast_frames = ostats->rx_mcast_frames +
2859 le32toh(ext[4].ext_3);
2860 stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
2861 stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
2862 }
2863
2864 static uint64_t
txp_get_counter(struct ifnet * ifp,ift_counter cnt)2865 txp_get_counter(struct ifnet *ifp, ift_counter cnt)
2866 {
2867 struct txp_softc *sc;
2868 struct txp_hw_stats *stats;
2869
2870 sc = if_getsoftc(ifp);
2871 stats = &sc->sc_stats;
2872
2873 switch (cnt) {
2874 case IFCOUNTER_IERRORS:
2875 return (stats->rx_fifo_oflows + stats->rx_badssd +
2876 stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows);
2877 case IFCOUNTER_OERRORS:
2878 return (stats->tx_deferred + stats->tx_carrier_lost +
2879 stats->tx_fifo_underruns + stats->tx_mcast_oflows);
2880 case IFCOUNTER_COLLISIONS:
2881 return (stats->tx_late_colls + stats->tx_multi_colls +
2882 stats->tx_excess_colls);
2883 case IFCOUNTER_OPACKETS:
2884 return (stats->tx_frames);
2885 case IFCOUNTER_IPACKETS:
2886 return (stats->rx_frames);
2887 default:
2888 return (if_get_counter_default(ifp, cnt));
2889 }
2890 }
2891
2892 #define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2893 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2894
2895 #if __FreeBSD_version >= 900030
2896 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2897 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2898 #elif __FreeBSD_version > 800000
2899 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2900 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2901 #else
2902 #define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2903 SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2904 #endif
2905
2906 static void
txp_sysctl_node(struct txp_softc * sc)2907 txp_sysctl_node(struct txp_softc *sc)
2908 {
2909 struct sysctl_ctx_list *ctx;
2910 struct sysctl_oid_list *child, *parent;
2911 struct sysctl_oid *tree;
2912 struct txp_hw_stats *stats;
2913 int error;
2914
2915 stats = &sc->sc_stats;
2916 ctx = device_get_sysctl_ctx(sc->sc_dev);
2917 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
2918 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
2919 CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
2920 sysctl_hw_txp_proc_limit, "I",
2921 "max number of Rx events to process");
2922 /* Pull in device tunables. */
2923 sc->sc_process_limit = TXP_PROC_DEFAULT;
2924 error = resource_int_value(device_get_name(sc->sc_dev),
2925 device_get_unit(sc->sc_dev), "process_limit",
2926 &sc->sc_process_limit);
2927 if (error == 0) {
2928 if (sc->sc_process_limit < TXP_PROC_MIN ||
2929 sc->sc_process_limit > TXP_PROC_MAX) {
2930 device_printf(sc->sc_dev,
2931 "process_limit value out of range; "
2932 "using default: %d\n", TXP_PROC_DEFAULT);
2933 sc->sc_process_limit = TXP_PROC_DEFAULT;
2934 }
2935 }
2936 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
2937 NULL, "TXP statistics");
2938 parent = SYSCTL_CHILDREN(tree);
2939
2940 /* Tx statistics. */
2941 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
2942 NULL, "Tx MAC statistics");
2943 child = SYSCTL_CHILDREN(tree);
2944
2945 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2946 &stats->tx_frames, "Frames");
2947 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2948 &stats->tx_bytes, "Octets");
2949 TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
2950 &stats->tx_deferred, "Deferred frames");
2951 TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
2952 &stats->tx_late_colls, "Late collisions");
2953 TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
2954 &stats->tx_colls, "Collisions");
2955 TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
2956 &stats->tx_carrier_lost, "Carrier lost");
2957 TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
2958 &stats->tx_multi_colls, "Multiple collisions");
2959 TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
2960 &stats->tx_excess_colls, "Excessive collisions");
2961 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
2962 &stats->tx_fifo_underruns, "FIFO underruns");
2963 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
2964 &stats->tx_mcast_oflows, "Multicast overflows");
2965 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2966 &stats->tx_filtered, "Filtered frames");
2967
2968 /* Rx statistics. */
2969 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
2970 NULL, "Rx MAC statistics");
2971 child = SYSCTL_CHILDREN(tree);
2972
2973 TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
2974 &stats->rx_frames, "Frames");
2975 TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
2976 &stats->rx_bytes, "Octets");
2977 TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
2978 &stats->rx_fifo_oflows, "FIFO overflows");
2979 TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
2980 &stats->rx_badssd, "Bad SSD");
2981 TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
2982 &stats->rx_crcerrs, "CRC errors");
2983 TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
2984 &stats->rx_lenerrs, "Length errors");
2985 TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
2986 &stats->rx_bcast_frames, "Broadcast frames");
2987 TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
2988 &stats->rx_mcast_frames, "Multicast frames");
2989 TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
2990 &stats->rx_oflows, "Overflows");
2991 TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
2992 &stats->rx_filtered, "Filtered frames");
2993 }
2994
2995 #undef TXP_SYSCTL_STAT_ADD32
2996 #undef TXP_SYSCTL_STAT_ADD64
2997
2998 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)2999 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3000 {
3001 int error, value;
3002
3003 if (arg1 == NULL)
3004 return (EINVAL);
3005 value = *(int *)arg1;
3006 error = sysctl_handle_int(oidp, &value, 0, req);
3007 if (error || req->newptr == NULL)
3008 return (error);
3009 if (value < low || value > high)
3010 return (EINVAL);
3011 *(int *)arg1 = value;
3012
3013 return (0);
3014 }
3015
3016 static int
sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)3017 sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
3018 {
3019 return (sysctl_int_range(oidp, arg1, arg2, req,
3020 TXP_PROC_MIN, TXP_PROC_MAX));
3021 }
3022