1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008, Pyun YongHyeon <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
70
71 #include <dev/jme/if_jmereg.h>
72 #include <dev/jme/if_jmevar.h>
73
74 /* "device miibus" required. See GENERIC if you get errors here. */
75 #include "miibus_if.h"
76
77 /* Define the following to disable printing Rx errors. */
78 #undef JME_SHOW_ERRORS
79
80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
81
82 MODULE_DEPEND(jme, pci, 1, 1, 1);
83 MODULE_DEPEND(jme, ether, 1, 1, 1);
84 MODULE_DEPEND(jme, miibus, 1, 1, 1);
85
86 /* Tunables. */
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
91
92 /*
93 * Devices supported by this driver.
94 */
95 static struct jme_dev {
96 uint16_t jme_vendorid;
97 uint16_t jme_deviceid;
98 const char *jme_name;
99 } jme_devs[] = {
100 { VENDORID_JMICRON, DEVICEID_JMC250,
101 "JMicron Inc, JMC25x Gigabit Ethernet" },
102 { VENDORID_JMICRON, DEVICEID_JMC260,
103 "JMicron Inc, JMC26x Fast Ethernet" },
104 };
105
106 static int jme_miibus_readreg(device_t, int, int);
107 static int jme_miibus_writereg(device_t, int, int, int);
108 static void jme_miibus_statchg(device_t);
109 static void jme_mediastatus(if_t, struct ifmediareq *);
110 static int jme_mediachange(if_t);
111 static int jme_probe(device_t);
112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 static int jme_eeprom_macaddr(struct jme_softc *);
114 static int jme_efuse_macaddr(struct jme_softc *);
115 static void jme_reg_macaddr(struct jme_softc *);
116 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
117 static void jme_map_intr_vector(struct jme_softc *);
118 static int jme_attach(device_t);
119 static int jme_detach(device_t);
120 static void jme_sysctl_node(struct jme_softc *);
121 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
122 static int jme_dma_alloc(struct jme_softc *);
123 static void jme_dma_free(struct jme_softc *);
124 static int jme_shutdown(device_t);
125 static void jme_setlinkspeed(struct jme_softc *);
126 static void jme_setwol(struct jme_softc *);
127 static int jme_suspend(device_t);
128 static int jme_resume(device_t);
129 static int jme_encap(struct jme_softc *, struct mbuf **);
130 static void jme_start(if_t);
131 static void jme_start_locked(if_t);
132 static void jme_watchdog(struct jme_softc *);
133 static int jme_ioctl(if_t, u_long, caddr_t);
134 static void jme_mac_config(struct jme_softc *);
135 static void jme_link_task(void *, int);
136 static int jme_intr(void *);
137 static void jme_int_task(void *, int);
138 static void jme_txeof(struct jme_softc *);
139 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
140 static void jme_rxeof(struct jme_softc *);
141 static int jme_rxintr(struct jme_softc *, int);
142 static void jme_tick(void *);
143 static void jme_reset(struct jme_softc *);
144 static void jme_init(void *);
145 static void jme_init_locked(struct jme_softc *);
146 static void jme_stop(struct jme_softc *);
147 static void jme_stop_tx(struct jme_softc *);
148 static void jme_stop_rx(struct jme_softc *);
149 static int jme_init_rx_ring(struct jme_softc *);
150 static void jme_init_tx_ring(struct jme_softc *);
151 static void jme_init_ssb(struct jme_softc *);
152 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
153 static void jme_set_vlan(struct jme_softc *);
154 static void jme_set_filter(struct jme_softc *);
155 static void jme_stats_clear(struct jme_softc *);
156 static void jme_stats_save(struct jme_softc *);
157 static void jme_stats_update(struct jme_softc *);
158 static void jme_phy_down(struct jme_softc *);
159 static void jme_phy_up(struct jme_softc *);
160 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
161 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
162 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
163 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
165 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
166
167
168 static device_method_t jme_methods[] = {
169 /* Device interface. */
170 DEVMETHOD(device_probe, jme_probe),
171 DEVMETHOD(device_attach, jme_attach),
172 DEVMETHOD(device_detach, jme_detach),
173 DEVMETHOD(device_shutdown, jme_shutdown),
174 DEVMETHOD(device_suspend, jme_suspend),
175 DEVMETHOD(device_resume, jme_resume),
176
177 /* MII interface. */
178 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
179 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
180 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
181
182 { NULL, NULL }
183 };
184
185 static driver_t jme_driver = {
186 "jme",
187 jme_methods,
188 sizeof(struct jme_softc)
189 };
190
191 DRIVER_MODULE(jme, pci, jme_driver, 0, 0);
192 DRIVER_MODULE(miibus, jme, miibus_driver, 0, 0);
193
194 static struct resource_spec jme_res_spec_mem[] = {
195 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
196 { -1, 0, 0 }
197 };
198
199 static struct resource_spec jme_irq_spec_legacy[] = {
200 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
201 { -1, 0, 0 }
202 };
203
204 static struct resource_spec jme_irq_spec_msi[] = {
205 { SYS_RES_IRQ, 1, RF_ACTIVE },
206 { -1, 0, 0 }
207 };
208
209 /*
210 * Read a PHY register on the MII of the JMC250.
211 */
212 static int
jme_miibus_readreg(device_t dev,int phy,int reg)213 jme_miibus_readreg(device_t dev, int phy, int reg)
214 {
215 struct jme_softc *sc;
216 uint32_t val;
217 int i;
218
219 sc = device_get_softc(dev);
220
221 /* For FPGA version, PHY address 0 should be ignored. */
222 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
223 return (0);
224
225 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
226 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
227 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
228 DELAY(1);
229 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
230 break;
231 }
232
233 if (i == 0) {
234 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
235 return (0);
236 }
237
238 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
239 }
240
241 /*
242 * Write a PHY register on the MII of the JMC250.
243 */
244 static int
jme_miibus_writereg(device_t dev,int phy,int reg,int val)245 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
246 {
247 struct jme_softc *sc;
248 int i;
249
250 sc = device_get_softc(dev);
251
252 /* For FPGA version, PHY address 0 should be ignored. */
253 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
254 return (0);
255
256 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
257 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
258 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
259 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
260 DELAY(1);
261 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
262 break;
263 }
264
265 if (i == 0)
266 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
267
268 return (0);
269 }
270
271 /*
272 * Callback from MII layer when media changes.
273 */
274 static void
jme_miibus_statchg(device_t dev)275 jme_miibus_statchg(device_t dev)
276 {
277 struct jme_softc *sc;
278
279 sc = device_get_softc(dev);
280 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
281 }
282
283 /*
284 * Get the current interface media status.
285 */
286 static void
jme_mediastatus(if_t ifp,struct ifmediareq * ifmr)287 jme_mediastatus(if_t ifp, struct ifmediareq *ifmr)
288 {
289 struct jme_softc *sc;
290 struct mii_data *mii;
291
292 sc = if_getsoftc(ifp);
293 JME_LOCK(sc);
294 if ((if_getflags(ifp) & IFF_UP) == 0) {
295 JME_UNLOCK(sc);
296 return;
297 }
298 mii = device_get_softc(sc->jme_miibus);
299
300 mii_pollstat(mii);
301 ifmr->ifm_status = mii->mii_media_status;
302 ifmr->ifm_active = mii->mii_media_active;
303 JME_UNLOCK(sc);
304 }
305
306 /*
307 * Set hardware to newly-selected media.
308 */
309 static int
jme_mediachange(if_t ifp)310 jme_mediachange(if_t ifp)
311 {
312 struct jme_softc *sc;
313 struct mii_data *mii;
314 struct mii_softc *miisc;
315 int error;
316
317 sc = if_getsoftc(ifp);
318 JME_LOCK(sc);
319 mii = device_get_softc(sc->jme_miibus);
320 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
321 PHY_RESET(miisc);
322 error = mii_mediachg(mii);
323 JME_UNLOCK(sc);
324
325 return (error);
326 }
327
328 static int
jme_probe(device_t dev)329 jme_probe(device_t dev)
330 {
331 struct jme_dev *sp;
332 int i;
333 uint16_t vendor, devid;
334
335 vendor = pci_get_vendor(dev);
336 devid = pci_get_device(dev);
337 sp = jme_devs;
338 for (i = 0; i < nitems(jme_devs); i++, sp++) {
339 if (vendor == sp->jme_vendorid &&
340 devid == sp->jme_deviceid) {
341 device_set_desc(dev, sp->jme_name);
342 return (BUS_PROBE_DEFAULT);
343 }
344 }
345
346 return (ENXIO);
347 }
348
349 static int
jme_eeprom_read_byte(struct jme_softc * sc,uint8_t addr,uint8_t * val)350 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
351 {
352 uint32_t reg;
353 int i;
354
355 *val = 0;
356 for (i = JME_TIMEOUT; i > 0; i--) {
357 reg = CSR_READ_4(sc, JME_SMBCSR);
358 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
359 break;
360 DELAY(1);
361 }
362
363 if (i == 0) {
364 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
365 return (ETIMEDOUT);
366 }
367
368 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
369 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
370 for (i = JME_TIMEOUT; i > 0; i--) {
371 DELAY(1);
372 reg = CSR_READ_4(sc, JME_SMBINTF);
373 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
374 break;
375 }
376
377 if (i == 0) {
378 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
379 return (ETIMEDOUT);
380 }
381
382 reg = CSR_READ_4(sc, JME_SMBINTF);
383 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
384
385 return (0);
386 }
387
388 static int
jme_eeprom_macaddr(struct jme_softc * sc)389 jme_eeprom_macaddr(struct jme_softc *sc)
390 {
391 uint8_t eaddr[ETHER_ADDR_LEN];
392 uint8_t fup, reg, val;
393 uint32_t offset;
394 int match;
395
396 offset = 0;
397 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
398 fup != JME_EEPROM_SIG0)
399 return (ENOENT);
400 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
401 fup != JME_EEPROM_SIG1)
402 return (ENOENT);
403 match = 0;
404 do {
405 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
406 break;
407 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
408 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
409 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
410 break;
411 if (reg >= JME_PAR0 &&
412 reg < JME_PAR0 + ETHER_ADDR_LEN) {
413 if (jme_eeprom_read_byte(sc, offset + 2,
414 &val) != 0)
415 break;
416 eaddr[reg - JME_PAR0] = val;
417 match++;
418 }
419 }
420 /* Check for the end of EEPROM descriptor. */
421 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
422 break;
423 /* Try next eeprom descriptor. */
424 offset += JME_EEPROM_DESC_BYTES;
425 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
426
427 if (match == ETHER_ADDR_LEN) {
428 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
429 return (0);
430 }
431
432 return (ENOENT);
433 }
434
435 static int
jme_efuse_macaddr(struct jme_softc * sc)436 jme_efuse_macaddr(struct jme_softc *sc)
437 {
438 uint32_t reg;
439 int i;
440
441 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
442 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
443 EFUSE_CTL1_AUTOLAOD_DONE)
444 return (ENOENT);
445 /* Reset eFuse controller. */
446 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
447 reg |= EFUSE_CTL2_RESET;
448 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
449 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
450 reg &= ~EFUSE_CTL2_RESET;
451 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
452
453 /* Have eFuse reload station address to MAC controller. */
454 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
455 reg &= ~EFUSE_CTL1_CMD_MASK;
456 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
457 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
458
459 /*
460 * Verify completion of eFuse autload command. It should be
461 * completed within 108us.
462 */
463 DELAY(110);
464 for (i = 10; i > 0; i--) {
465 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
466 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
467 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
468 DELAY(20);
469 continue;
470 }
471 if ((reg & EFUSE_CTL1_EXECUTE) == 0)
472 break;
473 /* Station address loading is still in progress. */
474 DELAY(20);
475 }
476 if (i == 0) {
477 device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
478 return (ETIMEDOUT);
479 }
480
481 return (0);
482 }
483
484 static void
jme_reg_macaddr(struct jme_softc * sc)485 jme_reg_macaddr(struct jme_softc *sc)
486 {
487 uint32_t par0, par1;
488
489 /* Read station address. */
490 par0 = CSR_READ_4(sc, JME_PAR0);
491 par1 = CSR_READ_4(sc, JME_PAR1);
492 par1 &= 0xFFFF;
493 if ((par0 == 0 && par1 == 0) ||
494 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
495 device_printf(sc->jme_dev,
496 "Failed to retrieve Ethernet address.\n");
497 } else {
498 /*
499 * For controllers that use eFuse, the station address
500 * could also be extracted from JME_PCI_PAR0 and
501 * JME_PCI_PAR1 registers in PCI configuration space.
502 * Each register holds exactly half of station address(24bits)
503 * so use JME_PAR0, JME_PAR1 registers instead.
504 */
505 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
506 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
507 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
508 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
509 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
510 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
511 }
512 }
513
514 static void
jme_set_macaddr(struct jme_softc * sc,uint8_t * eaddr)515 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
516 {
517 uint32_t val;
518 int i;
519
520 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
521 /*
522 * Avoid reprogramming station address if the address
523 * is the same as previous one. Note, reprogrammed
524 * station address is permanent as if it was written
525 * to EEPROM. So if station address was changed by
526 * admistrator it's possible to lose factory configured
527 * address when driver fails to restore its address.
528 * (e.g. reboot or system crash)
529 */
530 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
531 for (i = 0; i < ETHER_ADDR_LEN; i++) {
532 val = JME_EFUSE_EEPROM_FUNC0 <<
533 JME_EFUSE_EEPROM_FUNC_SHIFT;
534 val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
535 JME_EFUSE_EEPROM_PAGE_SHIFT;
536 val |= (JME_PAR0 + i) <<
537 JME_EFUSE_EEPROM_ADDR_SHIFT;
538 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
539 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
540 val | JME_EFUSE_EEPROM_WRITE, 4);
541 }
542 }
543 } else {
544 CSR_WRITE_4(sc, JME_PAR0,
545 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
546 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
547 }
548 }
549
550 static void
jme_map_intr_vector(struct jme_softc * sc)551 jme_map_intr_vector(struct jme_softc *sc)
552 {
553 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
554
555 bzero(map, sizeof(map));
556
557 /* Map Tx interrupts source to MSI/MSIX vector 2. */
558 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
559 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
560 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
561 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
562 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
563 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
564 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
565 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
566 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
567 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
568 map[MSINUM_REG_INDEX(N_INTR_TXQ5_COMP)] |=
569 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
570 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
571 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
572 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
573 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
574 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
575 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
576 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
577 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
578
579 /* Map Rx interrupts source to MSI/MSIX vector 1. */
580 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
581 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
582 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
583 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
584 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
585 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
586 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
587 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
588 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
589 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
590 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
591 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
592 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
593 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
594 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
595 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
596 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
597 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
598 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
599 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
600 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
601 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
602 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
603 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
604 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
605 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
606 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
607 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
608 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
609 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
610 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
611 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
612
613 /* Map all other interrupts source to MSI/MSIX vector 0. */
614 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
615 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
616 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
617 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
618 }
619
620 static int
jme_attach(device_t dev)621 jme_attach(device_t dev)
622 {
623 struct jme_softc *sc;
624 if_t ifp;
625 struct mii_softc *miisc;
626 struct mii_data *mii;
627 uint32_t reg;
628 uint16_t burst;
629 int error, i, mii_flags, msic, msixc, pmc;
630
631 error = 0;
632 sc = device_get_softc(dev);
633 sc->jme_dev = dev;
634
635 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
636 MTX_DEF);
637 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
638 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
639 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
640
641 /*
642 * Map the device. JMC250 supports both memory mapped and I/O
643 * register space access. Because I/O register access should
644 * use different BARs to access registers it's waste of time
645 * to use I/O register spce access. JMC250 uses 16K to map
646 * entire memory space.
647 */
648 pci_enable_busmaster(dev);
649 sc->jme_res_spec = jme_res_spec_mem;
650 sc->jme_irq_spec = jme_irq_spec_legacy;
651 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
652 if (error != 0) {
653 device_printf(dev, "cannot allocate memory resources.\n");
654 goto fail;
655 }
656
657 /* Allocate IRQ resources. */
658 msixc = pci_msix_count(dev);
659 msic = pci_msi_count(dev);
660 if (bootverbose) {
661 device_printf(dev, "MSIX count : %d\n", msixc);
662 device_printf(dev, "MSI count : %d\n", msic);
663 }
664
665 /* Use 1 MSI/MSI-X. */
666 if (msixc > 1)
667 msixc = 1;
668 if (msic > 1)
669 msic = 1;
670 /* Prefer MSIX over MSI. */
671 if (msix_disable == 0 || msi_disable == 0) {
672 if (msix_disable == 0 && msixc > 0 &&
673 pci_alloc_msix(dev, &msixc) == 0) {
674 if (msixc == 1) {
675 device_printf(dev, "Using %d MSIX messages.\n",
676 msixc);
677 sc->jme_flags |= JME_FLAG_MSIX;
678 sc->jme_irq_spec = jme_irq_spec_msi;
679 } else
680 pci_release_msi(dev);
681 }
682 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
683 msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
684 if (msic == 1) {
685 device_printf(dev, "Using %d MSI messages.\n",
686 msic);
687 sc->jme_flags |= JME_FLAG_MSI;
688 sc->jme_irq_spec = jme_irq_spec_msi;
689 } else
690 pci_release_msi(dev);
691 }
692 /* Map interrupt vector 0, 1 and 2. */
693 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
694 (sc->jme_flags & JME_FLAG_MSIX) != 0)
695 jme_map_intr_vector(sc);
696 }
697
698 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
699 if (error != 0) {
700 device_printf(dev, "cannot allocate IRQ resources.\n");
701 goto fail;
702 }
703
704 sc->jme_rev = pci_get_device(dev);
705 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
706 sc->jme_flags |= JME_FLAG_FASTETH;
707 sc->jme_flags |= JME_FLAG_NOJUMBO;
708 }
709 reg = CSR_READ_4(sc, JME_CHIPMODE);
710 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
711 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
712 CHIPMODE_NOT_FPGA)
713 sc->jme_flags |= JME_FLAG_FPGA;
714 if (bootverbose) {
715 device_printf(dev, "PCI device revision : 0x%04x\n",
716 sc->jme_rev);
717 device_printf(dev, "Chip revision : 0x%02x\n",
718 sc->jme_chip_rev);
719 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
720 device_printf(dev, "FPGA revision : 0x%04x\n",
721 (reg & CHIPMODE_FPGA_REV_MASK) >>
722 CHIPMODE_FPGA_REV_SHIFT);
723 }
724 if (sc->jme_chip_rev == 0xFF) {
725 device_printf(dev, "Unknown chip revision : 0x%02x\n",
726 sc->jme_rev);
727 error = ENXIO;
728 goto fail;
729 }
730
731 /* Identify controller features and bugs. */
732 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
733 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
734 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
735 sc->jme_flags |= JME_FLAG_DMA32BIT;
736 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
737 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
738 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
739 sc->jme_flags |= JME_FLAG_HWMIB;
740 }
741
742 /* Reset the ethernet controller. */
743 jme_reset(sc);
744
745 /* Get station address. */
746 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
747 error = jme_efuse_macaddr(sc);
748 if (error == 0)
749 jme_reg_macaddr(sc);
750 } else {
751 error = ENOENT;
752 reg = CSR_READ_4(sc, JME_SMBCSR);
753 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
754 error = jme_eeprom_macaddr(sc);
755 if (error != 0 && bootverbose)
756 device_printf(sc->jme_dev,
757 "ethernet hardware address not found in EEPROM.\n");
758 if (error != 0)
759 jme_reg_macaddr(sc);
760 }
761
762 /*
763 * Save PHY address.
764 * Integrated JR0211 has fixed PHY address whereas FPGA version
765 * requires PHY probing to get correct PHY address.
766 */
767 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
768 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
769 GPREG0_PHY_ADDR_MASK;
770 if (bootverbose)
771 device_printf(dev, "PHY is at address %d.\n",
772 sc->jme_phyaddr);
773 } else
774 sc->jme_phyaddr = 0;
775
776 /* Set max allowable DMA size. */
777 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
778 sc->jme_flags |= JME_FLAG_PCIE;
779 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
780 if (bootverbose) {
781 device_printf(dev, "Read request size : %d bytes.\n",
782 128 << ((burst >> 12) & 0x07));
783 device_printf(dev, "TLP payload size : %d bytes.\n",
784 128 << ((burst >> 5) & 0x07));
785 }
786 switch ((burst >> 12) & 0x07) {
787 case 0:
788 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
789 break;
790 case 1:
791 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
792 break;
793 default:
794 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
795 break;
796 }
797 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
798 } else {
799 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
800 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
801 }
802 /* Create coalescing sysctl node. */
803 jme_sysctl_node(sc);
804 if ((error = jme_dma_alloc(sc)) != 0)
805 goto fail;
806
807 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
808 if_setsoftc(ifp, sc);
809 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
810 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
811 if_setioctlfn(ifp, jme_ioctl);
812 if_setstartfn(ifp, jme_start);
813 if_setinitfn(ifp, jme_init);
814 if_setsendqlen(ifp, JME_TX_RING_CNT - 1);
815 if_setsendqready(ifp);
816 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
817 if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
818 if_sethwassist(ifp, JME_CSUM_FEATURES | CSUM_TSO);
819 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
820 sc->jme_flags |= JME_FLAG_PMCAP;
821 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
822 }
823 if_setcapenable(ifp, if_getcapabilities(ifp));
824
825 /* Wakeup PHY. */
826 jme_phy_up(sc);
827 mii_flags = MIIF_DOPAUSE;
828 /* Ask PHY calibration to PHY driver. */
829 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
830 mii_flags |= MIIF_MACPRIV0;
831 /* Set up MII bus. */
832 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
833 jme_mediastatus, BMSR_DEFCAPMASK,
834 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
835 MII_OFFSET_ANY, mii_flags);
836 if (error != 0) {
837 device_printf(dev, "attaching PHYs failed\n");
838 goto fail;
839 }
840
841 /*
842 * Force PHY to FPGA mode.
843 */
844 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
845 mii = device_get_softc(sc->jme_miibus);
846 if (mii->mii_instance != 0) {
847 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
848 if (miisc->mii_phy != 0) {
849 sc->jme_phyaddr = miisc->mii_phy;
850 break;
851 }
852 }
853 if (sc->jme_phyaddr != 0) {
854 device_printf(sc->jme_dev,
855 "FPGA PHY is at %d\n", sc->jme_phyaddr);
856 /* vendor magic. */
857 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
858 0x0004);
859 }
860 }
861 }
862
863 ether_ifattach(ifp, sc->jme_eaddr);
864
865 /* VLAN capability setup */
866 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
867 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
868 if_setcapenable(ifp, if_getcapabilities(ifp));
869
870 /* Tell the upper layer(s) we support long frames. */
871 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
872
873 /* Create local taskq. */
874 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
875 taskqueue_thread_enqueue, &sc->jme_tq);
876 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
877 device_get_nameunit(sc->jme_dev));
878
879 for (i = 0; i < 1; i++) {
880 error = bus_setup_intr(dev, sc->jme_irq[i],
881 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
882 &sc->jme_intrhand[i]);
883 if (error != 0)
884 break;
885 }
886
887 if (error != 0) {
888 device_printf(dev, "could not set up interrupt handler.\n");
889 taskqueue_free(sc->jme_tq);
890 sc->jme_tq = NULL;
891 ether_ifdetach(ifp);
892 goto fail;
893 }
894
895 fail:
896 if (error != 0)
897 jme_detach(dev);
898
899 return (error);
900 }
901
902 static int
jme_detach(device_t dev)903 jme_detach(device_t dev)
904 {
905 struct jme_softc *sc;
906 if_t ifp;
907 int i;
908
909 sc = device_get_softc(dev);
910
911 ifp = sc->jme_ifp;
912 if (device_is_attached(dev)) {
913 JME_LOCK(sc);
914 sc->jme_flags |= JME_FLAG_DETACH;
915 jme_stop(sc);
916 JME_UNLOCK(sc);
917 callout_drain(&sc->jme_tick_ch);
918 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
919 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
920 /* Restore possibly modified station address. */
921 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
922 jme_set_macaddr(sc, sc->jme_eaddr);
923 ether_ifdetach(ifp);
924 }
925
926 if (sc->jme_tq != NULL) {
927 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
928 taskqueue_free(sc->jme_tq);
929 sc->jme_tq = NULL;
930 }
931
932 if (sc->jme_miibus != NULL) {
933 device_delete_child(dev, sc->jme_miibus);
934 sc->jme_miibus = NULL;
935 }
936 bus_generic_detach(dev);
937 jme_dma_free(sc);
938
939 if (ifp != NULL) {
940 if_free(ifp);
941 sc->jme_ifp = NULL;
942 }
943
944 for (i = 0; i < 1; i++) {
945 if (sc->jme_intrhand[i] != NULL) {
946 bus_teardown_intr(dev, sc->jme_irq[i],
947 sc->jme_intrhand[i]);
948 sc->jme_intrhand[i] = NULL;
949 }
950 }
951
952 if (sc->jme_irq[0] != NULL)
953 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
954 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
955 pci_release_msi(dev);
956 if (sc->jme_res[0] != NULL)
957 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
958 mtx_destroy(&sc->jme_mtx);
959
960 return (0);
961 }
962
963 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
964 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
965
966 static void
jme_sysctl_node(struct jme_softc * sc)967 jme_sysctl_node(struct jme_softc *sc)
968 {
969 struct sysctl_ctx_list *ctx;
970 struct sysctl_oid_list *child, *parent;
971 struct sysctl_oid *tree;
972 struct jme_hw_stats *stats;
973 int error;
974
975 stats = &sc->jme_stats;
976 ctx = device_get_sysctl_ctx(sc->jme_dev);
977 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
978
979 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
980 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
981 0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
982
983 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
984 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
985 0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
986
987 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
988 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
989 0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
990
991 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
992 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
993 0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
994
995 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
996 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
997 &sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
998 "max number of Rx events to process");
999
1000 /* Pull in device tunables. */
1001 sc->jme_process_limit = JME_PROC_DEFAULT;
1002 error = resource_int_value(device_get_name(sc->jme_dev),
1003 device_get_unit(sc->jme_dev), "process_limit",
1004 &sc->jme_process_limit);
1005 if (error == 0) {
1006 if (sc->jme_process_limit < JME_PROC_MIN ||
1007 sc->jme_process_limit > JME_PROC_MAX) {
1008 device_printf(sc->jme_dev,
1009 "process_limit value out of range; "
1010 "using default: %d\n", JME_PROC_DEFAULT);
1011 sc->jme_process_limit = JME_PROC_DEFAULT;
1012 }
1013 }
1014
1015 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1016 error = resource_int_value(device_get_name(sc->jme_dev),
1017 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1018 if (error == 0) {
1019 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1020 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1021 device_printf(sc->jme_dev,
1022 "tx_coal_to value out of range; "
1023 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1024 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1025 }
1026 }
1027
1028 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1029 error = resource_int_value(device_get_name(sc->jme_dev),
1030 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1031 if (error == 0) {
1032 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1033 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1034 device_printf(sc->jme_dev,
1035 "tx_coal_pkt value out of range; "
1036 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1037 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1038 }
1039 }
1040
1041 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1042 error = resource_int_value(device_get_name(sc->jme_dev),
1043 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1044 if (error == 0) {
1045 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1046 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1047 device_printf(sc->jme_dev,
1048 "rx_coal_to value out of range; "
1049 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1050 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1051 }
1052 }
1053
1054 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1055 error = resource_int_value(device_get_name(sc->jme_dev),
1056 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1057 if (error == 0) {
1058 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1059 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1060 device_printf(sc->jme_dev,
1061 "tx_coal_pkt value out of range; "
1062 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1063 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1064 }
1065 }
1066
1067 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1068 return;
1069
1070 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1071 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "JME statistics");
1072 parent = SYSCTL_CHILDREN(tree);
1073
1074 /* Rx statistics. */
1075 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
1076 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
1077 child = SYSCTL_CHILDREN(tree);
1078 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1079 &stats->rx_good_frames, "Good frames");
1080 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1081 &stats->rx_crc_errs, "CRC errors");
1082 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1083 &stats->rx_mii_errs, "MII errors");
1084 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1085 &stats->rx_fifo_oflows, "FIFO overflows");
1086 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1087 &stats->rx_desc_empty, "Descriptor empty");
1088 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1089 &stats->rx_bad_frames, "Bad frames");
1090
1091 /* Tx statistics. */
1092 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
1093 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
1094 child = SYSCTL_CHILDREN(tree);
1095 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1096 &stats->tx_good_frames, "Good frames");
1097 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1098 &stats->tx_bad_frames, "Bad frames");
1099 }
1100
1101 #undef JME_SYSCTL_STAT_ADD32
1102
1103 struct jme_dmamap_arg {
1104 bus_addr_t jme_busaddr;
1105 };
1106
1107 static void
jme_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1108 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1109 {
1110 struct jme_dmamap_arg *ctx;
1111
1112 if (error != 0)
1113 return;
1114
1115 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1116
1117 ctx = (struct jme_dmamap_arg *)arg;
1118 ctx->jme_busaddr = segs[0].ds_addr;
1119 }
1120
1121 static int
jme_dma_alloc(struct jme_softc * sc)1122 jme_dma_alloc(struct jme_softc *sc)
1123 {
1124 struct jme_dmamap_arg ctx;
1125 struct jme_txdesc *txd;
1126 struct jme_rxdesc *rxd;
1127 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1128 int error, i;
1129
1130 lowaddr = BUS_SPACE_MAXADDR;
1131 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1132 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1133
1134 again:
1135 /* Create parent ring tag. */
1136 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1137 1, 0, /* algnmnt, boundary */
1138 lowaddr, /* lowaddr */
1139 BUS_SPACE_MAXADDR, /* highaddr */
1140 NULL, NULL, /* filter, filterarg */
1141 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1142 0, /* nsegments */
1143 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1144 0, /* flags */
1145 NULL, NULL, /* lockfunc, lockarg */
1146 &sc->jme_cdata.jme_ring_tag);
1147 if (error != 0) {
1148 device_printf(sc->jme_dev,
1149 "could not create parent ring DMA tag.\n");
1150 goto fail;
1151 }
1152 /* Create tag for Tx ring. */
1153 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1154 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1155 BUS_SPACE_MAXADDR, /* lowaddr */
1156 BUS_SPACE_MAXADDR, /* highaddr */
1157 NULL, NULL, /* filter, filterarg */
1158 JME_TX_RING_SIZE, /* maxsize */
1159 1, /* nsegments */
1160 JME_TX_RING_SIZE, /* maxsegsize */
1161 0, /* flags */
1162 NULL, NULL, /* lockfunc, lockarg */
1163 &sc->jme_cdata.jme_tx_ring_tag);
1164 if (error != 0) {
1165 device_printf(sc->jme_dev,
1166 "could not allocate Tx ring DMA tag.\n");
1167 goto fail;
1168 }
1169
1170 /* Create tag for Rx ring. */
1171 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1172 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1173 lowaddr, /* lowaddr */
1174 BUS_SPACE_MAXADDR, /* highaddr */
1175 NULL, NULL, /* filter, filterarg */
1176 JME_RX_RING_SIZE, /* maxsize */
1177 1, /* nsegments */
1178 JME_RX_RING_SIZE, /* maxsegsize */
1179 0, /* flags */
1180 NULL, NULL, /* lockfunc, lockarg */
1181 &sc->jme_cdata.jme_rx_ring_tag);
1182 if (error != 0) {
1183 device_printf(sc->jme_dev,
1184 "could not allocate Rx ring DMA tag.\n");
1185 goto fail;
1186 }
1187
1188 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1189 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1190 (void **)&sc->jme_rdata.jme_tx_ring,
1191 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1192 &sc->jme_cdata.jme_tx_ring_map);
1193 if (error != 0) {
1194 device_printf(sc->jme_dev,
1195 "could not allocate DMA'able memory for Tx ring.\n");
1196 goto fail;
1197 }
1198
1199 ctx.jme_busaddr = 0;
1200 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1201 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1202 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1203 if (error != 0 || ctx.jme_busaddr == 0) {
1204 device_printf(sc->jme_dev,
1205 "could not load DMA'able memory for Tx ring.\n");
1206 goto fail;
1207 }
1208 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1209
1210 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1211 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1212 (void **)&sc->jme_rdata.jme_rx_ring,
1213 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1214 &sc->jme_cdata.jme_rx_ring_map);
1215 if (error != 0) {
1216 device_printf(sc->jme_dev,
1217 "could not allocate DMA'able memory for Rx ring.\n");
1218 goto fail;
1219 }
1220
1221 ctx.jme_busaddr = 0;
1222 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1223 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1224 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1225 if (error != 0 || ctx.jme_busaddr == 0) {
1226 device_printf(sc->jme_dev,
1227 "could not load DMA'able memory for Rx ring.\n");
1228 goto fail;
1229 }
1230 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1231
1232 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1233 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1234 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1235 JME_TX_RING_SIZE;
1236 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1237 JME_RX_RING_SIZE;
1238 if ((JME_ADDR_HI(tx_ring_end) !=
1239 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1240 (JME_ADDR_HI(rx_ring_end) !=
1241 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1242 device_printf(sc->jme_dev, "4GB boundary crossed, "
1243 "switching to 32bit DMA address mode.\n");
1244 jme_dma_free(sc);
1245 /* Limit DMA address space to 32bit and try again. */
1246 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1247 goto again;
1248 }
1249 }
1250
1251 lowaddr = BUS_SPACE_MAXADDR;
1252 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1253 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1254 /* Create parent buffer tag. */
1255 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1256 1, 0, /* algnmnt, boundary */
1257 lowaddr, /* lowaddr */
1258 BUS_SPACE_MAXADDR, /* highaddr */
1259 NULL, NULL, /* filter, filterarg */
1260 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1261 0, /* nsegments */
1262 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1263 0, /* flags */
1264 NULL, NULL, /* lockfunc, lockarg */
1265 &sc->jme_cdata.jme_buffer_tag);
1266 if (error != 0) {
1267 device_printf(sc->jme_dev,
1268 "could not create parent buffer DMA tag.\n");
1269 goto fail;
1270 }
1271
1272 /* Create shadow status block tag. */
1273 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1274 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1275 BUS_SPACE_MAXADDR, /* lowaddr */
1276 BUS_SPACE_MAXADDR, /* highaddr */
1277 NULL, NULL, /* filter, filterarg */
1278 JME_SSB_SIZE, /* maxsize */
1279 1, /* nsegments */
1280 JME_SSB_SIZE, /* maxsegsize */
1281 0, /* flags */
1282 NULL, NULL, /* lockfunc, lockarg */
1283 &sc->jme_cdata.jme_ssb_tag);
1284 if (error != 0) {
1285 device_printf(sc->jme_dev,
1286 "could not create shared status block DMA tag.\n");
1287 goto fail;
1288 }
1289
1290 /* Create tag for Tx buffers. */
1291 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1292 1, 0, /* algnmnt, boundary */
1293 BUS_SPACE_MAXADDR, /* lowaddr */
1294 BUS_SPACE_MAXADDR, /* highaddr */
1295 NULL, NULL, /* filter, filterarg */
1296 JME_TSO_MAXSIZE, /* maxsize */
1297 JME_MAXTXSEGS, /* nsegments */
1298 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1299 0, /* flags */
1300 NULL, NULL, /* lockfunc, lockarg */
1301 &sc->jme_cdata.jme_tx_tag);
1302 if (error != 0) {
1303 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1304 goto fail;
1305 }
1306
1307 /* Create tag for Rx buffers. */
1308 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1309 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1310 BUS_SPACE_MAXADDR, /* lowaddr */
1311 BUS_SPACE_MAXADDR, /* highaddr */
1312 NULL, NULL, /* filter, filterarg */
1313 MCLBYTES, /* maxsize */
1314 1, /* nsegments */
1315 MCLBYTES, /* maxsegsize */
1316 0, /* flags */
1317 NULL, NULL, /* lockfunc, lockarg */
1318 &sc->jme_cdata.jme_rx_tag);
1319 if (error != 0) {
1320 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1321 goto fail;
1322 }
1323
1324 /*
1325 * Allocate DMA'able memory and load the DMA map for shared
1326 * status block.
1327 */
1328 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1329 (void **)&sc->jme_rdata.jme_ssb_block,
1330 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1331 &sc->jme_cdata.jme_ssb_map);
1332 if (error != 0) {
1333 device_printf(sc->jme_dev, "could not allocate DMA'able "
1334 "memory for shared status block.\n");
1335 goto fail;
1336 }
1337
1338 ctx.jme_busaddr = 0;
1339 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1340 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1341 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1342 if (error != 0 || ctx.jme_busaddr == 0) {
1343 device_printf(sc->jme_dev, "could not load DMA'able memory "
1344 "for shared status block.\n");
1345 goto fail;
1346 }
1347 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1348
1349 /* Create DMA maps for Tx buffers. */
1350 for (i = 0; i < JME_TX_RING_CNT; i++) {
1351 txd = &sc->jme_cdata.jme_txdesc[i];
1352 txd->tx_m = NULL;
1353 txd->tx_dmamap = NULL;
1354 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1355 &txd->tx_dmamap);
1356 if (error != 0) {
1357 device_printf(sc->jme_dev,
1358 "could not create Tx dmamap.\n");
1359 goto fail;
1360 }
1361 }
1362 /* Create DMA maps for Rx buffers. */
1363 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1364 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1365 device_printf(sc->jme_dev,
1366 "could not create spare Rx dmamap.\n");
1367 goto fail;
1368 }
1369 for (i = 0; i < JME_RX_RING_CNT; i++) {
1370 rxd = &sc->jme_cdata.jme_rxdesc[i];
1371 rxd->rx_m = NULL;
1372 rxd->rx_dmamap = NULL;
1373 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1374 &rxd->rx_dmamap);
1375 if (error != 0) {
1376 device_printf(sc->jme_dev,
1377 "could not create Rx dmamap.\n");
1378 goto fail;
1379 }
1380 }
1381
1382 fail:
1383 return (error);
1384 }
1385
1386 static void
jme_dma_free(struct jme_softc * sc)1387 jme_dma_free(struct jme_softc *sc)
1388 {
1389 struct jme_txdesc *txd;
1390 struct jme_rxdesc *rxd;
1391 int i;
1392
1393 /* Tx ring */
1394 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1395 if (sc->jme_rdata.jme_tx_ring_paddr)
1396 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1397 sc->jme_cdata.jme_tx_ring_map);
1398 if (sc->jme_rdata.jme_tx_ring)
1399 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1400 sc->jme_rdata.jme_tx_ring,
1401 sc->jme_cdata.jme_tx_ring_map);
1402 sc->jme_rdata.jme_tx_ring = NULL;
1403 sc->jme_rdata.jme_tx_ring_paddr = 0;
1404 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1405 sc->jme_cdata.jme_tx_ring_tag = NULL;
1406 }
1407 /* Rx ring */
1408 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1409 if (sc->jme_rdata.jme_rx_ring_paddr)
1410 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1411 sc->jme_cdata.jme_rx_ring_map);
1412 if (sc->jme_rdata.jme_rx_ring)
1413 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1414 sc->jme_rdata.jme_rx_ring,
1415 sc->jme_cdata.jme_rx_ring_map);
1416 sc->jme_rdata.jme_rx_ring = NULL;
1417 sc->jme_rdata.jme_rx_ring_paddr = 0;
1418 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1419 sc->jme_cdata.jme_rx_ring_tag = NULL;
1420 }
1421 /* Tx buffers */
1422 if (sc->jme_cdata.jme_tx_tag != NULL) {
1423 for (i = 0; i < JME_TX_RING_CNT; i++) {
1424 txd = &sc->jme_cdata.jme_txdesc[i];
1425 if (txd->tx_dmamap != NULL) {
1426 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1427 txd->tx_dmamap);
1428 txd->tx_dmamap = NULL;
1429 }
1430 }
1431 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1432 sc->jme_cdata.jme_tx_tag = NULL;
1433 }
1434 /* Rx buffers */
1435 if (sc->jme_cdata.jme_rx_tag != NULL) {
1436 for (i = 0; i < JME_RX_RING_CNT; i++) {
1437 rxd = &sc->jme_cdata.jme_rxdesc[i];
1438 if (rxd->rx_dmamap != NULL) {
1439 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1440 rxd->rx_dmamap);
1441 rxd->rx_dmamap = NULL;
1442 }
1443 }
1444 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1445 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1446 sc->jme_cdata.jme_rx_sparemap);
1447 sc->jme_cdata.jme_rx_sparemap = NULL;
1448 }
1449 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1450 sc->jme_cdata.jme_rx_tag = NULL;
1451 }
1452
1453 /* Shared status block. */
1454 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1455 if (sc->jme_rdata.jme_ssb_block_paddr)
1456 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1457 sc->jme_cdata.jme_ssb_map);
1458 if (sc->jme_rdata.jme_ssb_block)
1459 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1460 sc->jme_rdata.jme_ssb_block,
1461 sc->jme_cdata.jme_ssb_map);
1462 sc->jme_rdata.jme_ssb_block = NULL;
1463 sc->jme_rdata.jme_ssb_block_paddr = 0;
1464 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1465 sc->jme_cdata.jme_ssb_tag = NULL;
1466 }
1467
1468 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1469 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1470 sc->jme_cdata.jme_buffer_tag = NULL;
1471 }
1472 if (sc->jme_cdata.jme_ring_tag != NULL) {
1473 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1474 sc->jme_cdata.jme_ring_tag = NULL;
1475 }
1476 }
1477
1478 /*
1479 * Make sure the interface is stopped at reboot time.
1480 */
1481 static int
jme_shutdown(device_t dev)1482 jme_shutdown(device_t dev)
1483 {
1484
1485 return (jme_suspend(dev));
1486 }
1487
1488 /*
1489 * Unlike other ethernet controllers, JMC250 requires
1490 * explicit resetting link speed to 10/100Mbps as gigabit
1491 * link will cunsume more power than 375mA.
1492 * Note, we reset the link speed to 10/100Mbps with
1493 * auto-negotiation but we don't know whether that operation
1494 * would succeed or not as we have no control after powering
1495 * off. If the renegotiation fail WOL may not work. Running
1496 * at 1Gbps draws more power than 375mA at 3.3V which is
1497 * specified in PCI specification and that would result in
1498 * complete shutdowning power to ethernet controller.
1499 *
1500 * TODO
1501 * Save current negotiated media speed/duplex/flow-control
1502 * to softc and restore the same link again after resuming.
1503 * PHY handling such as power down/resetting to 100Mbps
1504 * may be better handled in suspend method in phy driver.
1505 */
1506 static void
jme_setlinkspeed(struct jme_softc * sc)1507 jme_setlinkspeed(struct jme_softc *sc)
1508 {
1509 struct mii_data *mii;
1510 int aneg, i;
1511
1512 JME_LOCK_ASSERT(sc);
1513
1514 mii = device_get_softc(sc->jme_miibus);
1515 mii_pollstat(mii);
1516 aneg = 0;
1517 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1518 switch IFM_SUBTYPE(mii->mii_media_active) {
1519 case IFM_10_T:
1520 case IFM_100_TX:
1521 return;
1522 case IFM_1000_T:
1523 aneg++;
1524 default:
1525 break;
1526 }
1527 }
1528 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1529 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1530 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1531 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1532 BMCR_AUTOEN | BMCR_STARTNEG);
1533 DELAY(1000);
1534 if (aneg != 0) {
1535 /* Poll link state until jme(4) get a 10/100 link. */
1536 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1537 mii_pollstat(mii);
1538 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1539 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1540 case IFM_10_T:
1541 case IFM_100_TX:
1542 jme_mac_config(sc);
1543 return;
1544 default:
1545 break;
1546 }
1547 }
1548 JME_UNLOCK(sc);
1549 pause("jmelnk", hz);
1550 JME_LOCK(sc);
1551 }
1552 if (i == MII_ANEGTICKS_GIGE)
1553 device_printf(sc->jme_dev, "establishing link failed, "
1554 "WOL may not work!");
1555 }
1556 /*
1557 * No link, force MAC to have 100Mbps, full-duplex link.
1558 * This is the last resort and may/may not work.
1559 */
1560 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1561 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1562 jme_mac_config(sc);
1563 }
1564
1565 static void
jme_setwol(struct jme_softc * sc)1566 jme_setwol(struct jme_softc *sc)
1567 {
1568 if_t ifp;
1569 uint32_t gpr, pmcs;
1570 uint16_t pmstat;
1571 int pmc;
1572
1573 JME_LOCK_ASSERT(sc);
1574
1575 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1576 /* Remove Tx MAC/offload clock to save more power. */
1577 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1578 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1579 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1580 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1581 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1582 CSR_WRITE_4(sc, JME_GPREG1,
1583 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1584 /* No PME capability, PHY power down. */
1585 jme_phy_down(sc);
1586 return;
1587 }
1588
1589 ifp = sc->jme_ifp;
1590 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1591 pmcs = CSR_READ_4(sc, JME_PMCS);
1592 pmcs &= ~PMCS_WOL_ENB_MASK;
1593 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
1594 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1595 /* Enable PME message. */
1596 gpr |= GPREG0_PME_ENB;
1597 /* For gigabit controllers, reset link speed to 10/100. */
1598 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1599 jme_setlinkspeed(sc);
1600 }
1601
1602 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1603 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1604 /* Remove Tx MAC/offload clock to save more power. */
1605 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1606 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1607 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1608 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1609 /* Request PME. */
1610 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1611 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1612 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1613 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1614 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1615 if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
1616 /* No WOL, PHY power down. */
1617 jme_phy_down(sc);
1618 }
1619 }
1620
1621 static int
jme_suspend(device_t dev)1622 jme_suspend(device_t dev)
1623 {
1624 struct jme_softc *sc;
1625
1626 sc = device_get_softc(dev);
1627
1628 JME_LOCK(sc);
1629 jme_stop(sc);
1630 jme_setwol(sc);
1631 JME_UNLOCK(sc);
1632
1633 return (0);
1634 }
1635
1636 static int
jme_resume(device_t dev)1637 jme_resume(device_t dev)
1638 {
1639 struct jme_softc *sc;
1640 if_t ifp;
1641 uint16_t pmstat;
1642 int pmc;
1643
1644 sc = device_get_softc(dev);
1645
1646 JME_LOCK(sc);
1647 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
1648 pmstat = pci_read_config(sc->jme_dev,
1649 pmc + PCIR_POWER_STATUS, 2);
1650 /* Disable PME clear PME status. */
1651 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1652 pci_write_config(sc->jme_dev,
1653 pmc + PCIR_POWER_STATUS, pmstat, 2);
1654 }
1655 /* Wakeup PHY. */
1656 jme_phy_up(sc);
1657 ifp = sc->jme_ifp;
1658 if ((if_getflags(ifp) & IFF_UP) != 0) {
1659 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1660 jme_init_locked(sc);
1661 }
1662
1663 JME_UNLOCK(sc);
1664
1665 return (0);
1666 }
1667
1668 static int
jme_encap(struct jme_softc * sc,struct mbuf ** m_head)1669 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1670 {
1671 struct jme_txdesc *txd;
1672 struct jme_desc *desc;
1673 struct mbuf *m;
1674 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1675 int error, i, nsegs, prod;
1676 uint32_t cflags, tsosegsz;
1677
1678 JME_LOCK_ASSERT(sc);
1679
1680 M_ASSERTPKTHDR((*m_head));
1681
1682 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1683 /*
1684 * Due to the adherence to NDIS specification JMC250
1685 * assumes upper stack computed TCP pseudo checksum
1686 * without including payload length. This breaks
1687 * checksum offload for TSO case so recompute TCP
1688 * pseudo checksum for JMC250. Hopefully this wouldn't
1689 * be much burden on modern CPUs.
1690 */
1691 struct ether_header *eh;
1692 struct ip *ip;
1693 struct tcphdr *tcp;
1694 uint32_t ip_off, poff;
1695
1696 if (M_WRITABLE(*m_head) == 0) {
1697 /* Get a writable copy. */
1698 m = m_dup(*m_head, M_NOWAIT);
1699 m_freem(*m_head);
1700 if (m == NULL) {
1701 *m_head = NULL;
1702 return (ENOBUFS);
1703 }
1704 *m_head = m;
1705 }
1706 ip_off = sizeof(struct ether_header);
1707 m = m_pullup(*m_head, ip_off);
1708 if (m == NULL) {
1709 *m_head = NULL;
1710 return (ENOBUFS);
1711 }
1712 eh = mtod(m, struct ether_header *);
1713 /* Check the existence of VLAN tag. */
1714 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1715 ip_off = sizeof(struct ether_vlan_header);
1716 m = m_pullup(m, ip_off);
1717 if (m == NULL) {
1718 *m_head = NULL;
1719 return (ENOBUFS);
1720 }
1721 }
1722 m = m_pullup(m, ip_off + sizeof(struct ip));
1723 if (m == NULL) {
1724 *m_head = NULL;
1725 return (ENOBUFS);
1726 }
1727 ip = (struct ip *)(mtod(m, char *) + ip_off);
1728 poff = ip_off + (ip->ip_hl << 2);
1729 m = m_pullup(m, poff + sizeof(struct tcphdr));
1730 if (m == NULL) {
1731 *m_head = NULL;
1732 return (ENOBUFS);
1733 }
1734 /*
1735 * Reset IP checksum and recompute TCP pseudo
1736 * checksum that NDIS specification requires.
1737 */
1738 ip = (struct ip *)(mtod(m, char *) + ip_off);
1739 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1740 ip->ip_sum = 0;
1741 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1742 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1743 ip->ip_dst.s_addr,
1744 htons((tcp->th_off << 2) + IPPROTO_TCP));
1745 /* No need to TSO, force IP checksum offload. */
1746 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1747 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1748 } else
1749 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1750 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1751 *m_head = m;
1752 }
1753
1754 prod = sc->jme_cdata.jme_tx_prod;
1755 txd = &sc->jme_cdata.jme_txdesc[prod];
1756
1757 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1758 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1759 if (error == EFBIG) {
1760 m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
1761 if (m == NULL) {
1762 m_freem(*m_head);
1763 *m_head = NULL;
1764 return (ENOMEM);
1765 }
1766 *m_head = m;
1767 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1768 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1769 if (error != 0) {
1770 m_freem(*m_head);
1771 *m_head = NULL;
1772 return (error);
1773 }
1774 } else if (error != 0)
1775 return (error);
1776 if (nsegs == 0) {
1777 m_freem(*m_head);
1778 *m_head = NULL;
1779 return (EIO);
1780 }
1781
1782 /*
1783 * Check descriptor overrun. Leave one free descriptor.
1784 * Since we always use 64bit address mode for transmitting,
1785 * each Tx request requires one more dummy descriptor.
1786 */
1787 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1788 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1789 return (ENOBUFS);
1790 }
1791
1792 m = *m_head;
1793 cflags = 0;
1794 tsosegsz = 0;
1795 /* Configure checksum offload and TSO. */
1796 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1797 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1798 JME_TD_MSS_SHIFT;
1799 cflags |= JME_TD_TSO;
1800 } else {
1801 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1802 cflags |= JME_TD_IPCSUM;
1803 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1804 cflags |= JME_TD_TCPCSUM;
1805 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1806 cflags |= JME_TD_UDPCSUM;
1807 }
1808 /* Configure VLAN. */
1809 if ((m->m_flags & M_VLANTAG) != 0) {
1810 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1811 cflags |= JME_TD_VLAN_TAG;
1812 }
1813
1814 desc = &sc->jme_rdata.jme_tx_ring[prod];
1815 desc->flags = htole32(cflags);
1816 desc->buflen = htole32(tsosegsz);
1817 desc->addr_hi = htole32(m->m_pkthdr.len);
1818 desc->addr_lo = 0;
1819 sc->jme_cdata.jme_tx_cnt++;
1820 JME_DESC_INC(prod, JME_TX_RING_CNT);
1821 for (i = 0; i < nsegs; i++) {
1822 desc = &sc->jme_rdata.jme_tx_ring[prod];
1823 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1824 desc->buflen = htole32(txsegs[i].ds_len);
1825 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1826 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1827 sc->jme_cdata.jme_tx_cnt++;
1828 JME_DESC_INC(prod, JME_TX_RING_CNT);
1829 }
1830
1831 /* Update producer index. */
1832 sc->jme_cdata.jme_tx_prod = prod;
1833 /*
1834 * Finally request interrupt and give the first descriptor
1835 * owenership to hardware.
1836 */
1837 desc = txd->tx_desc;
1838 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1839
1840 txd->tx_m = m;
1841 txd->tx_ndesc = nsegs + 1;
1842
1843 /* Sync descriptors. */
1844 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1845 BUS_DMASYNC_PREWRITE);
1846 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1847 sc->jme_cdata.jme_tx_ring_map,
1848 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1849
1850 return (0);
1851 }
1852
1853 static void
jme_start(if_t ifp)1854 jme_start(if_t ifp)
1855 {
1856 struct jme_softc *sc;
1857
1858 sc = if_getsoftc(ifp);
1859 JME_LOCK(sc);
1860 jme_start_locked(ifp);
1861 JME_UNLOCK(sc);
1862 }
1863
1864 static void
jme_start_locked(if_t ifp)1865 jme_start_locked(if_t ifp)
1866 {
1867 struct jme_softc *sc;
1868 struct mbuf *m_head;
1869 int enq;
1870
1871 sc = if_getsoftc(ifp);
1872
1873 JME_LOCK_ASSERT(sc);
1874
1875 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1876 jme_txeof(sc);
1877
1878 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1879 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1880 return;
1881
1882 for (enq = 0; !if_sendq_empty(ifp); ) {
1883 m_head = if_dequeue(ifp);
1884 if (m_head == NULL)
1885 break;
1886 /*
1887 * Pack the data into the transmit ring. If we
1888 * don't have room, set the OACTIVE flag and wait
1889 * for the NIC to drain the ring.
1890 */
1891 if (jme_encap(sc, &m_head)) {
1892 if (m_head == NULL)
1893 break;
1894 if_sendq_prepend(ifp, m_head);
1895 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1896 break;
1897 }
1898
1899 enq++;
1900 /*
1901 * If there's a BPF listener, bounce a copy of this frame
1902 * to him.
1903 */
1904 ETHER_BPF_MTAP(ifp, m_head);
1905 }
1906
1907 if (enq > 0) {
1908 /*
1909 * Reading TXCSR takes very long time under heavy load
1910 * so cache TXCSR value and writes the ORed value with
1911 * the kick command to the TXCSR. This saves one register
1912 * access cycle.
1913 */
1914 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1915 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1916 /* Set a timeout in case the chip goes out to lunch. */
1917 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1918 }
1919 }
1920
1921 static void
jme_watchdog(struct jme_softc * sc)1922 jme_watchdog(struct jme_softc *sc)
1923 {
1924 if_t ifp;
1925
1926 JME_LOCK_ASSERT(sc);
1927
1928 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1929 return;
1930
1931 ifp = sc->jme_ifp;
1932 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1933 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1934 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1935 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1936 jme_init_locked(sc);
1937 return;
1938 }
1939 jme_txeof(sc);
1940 if (sc->jme_cdata.jme_tx_cnt == 0) {
1941 if_printf(sc->jme_ifp,
1942 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1943 if (!if_sendq_empty(ifp))
1944 jme_start_locked(ifp);
1945 return;
1946 }
1947
1948 if_printf(sc->jme_ifp, "watchdog timeout\n");
1949 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1950 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1951 jme_init_locked(sc);
1952 if (!if_sendq_empty(ifp))
1953 jme_start_locked(ifp);
1954 }
1955
1956 static int
jme_ioctl(if_t ifp,u_long cmd,caddr_t data)1957 jme_ioctl(if_t ifp, u_long cmd, caddr_t data)
1958 {
1959 struct jme_softc *sc;
1960 struct ifreq *ifr;
1961 struct mii_data *mii;
1962 uint32_t reg;
1963 int error, mask;
1964
1965 sc = if_getsoftc(ifp);
1966 ifr = (struct ifreq *)data;
1967 error = 0;
1968 switch (cmd) {
1969 case SIOCSIFMTU:
1970 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1971 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1972 ifr->ifr_mtu > JME_MAX_MTU)) {
1973 error = EINVAL;
1974 break;
1975 }
1976
1977 if (if_getmtu(ifp) != ifr->ifr_mtu) {
1978 /*
1979 * No special configuration is required when interface
1980 * MTU is changed but availability of TSO/Tx checksum
1981 * offload should be chcked against new MTU size as
1982 * FIFO size is just 2K.
1983 */
1984 JME_LOCK(sc);
1985 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1986 if_setcapenablebit(ifp, 0,
1987 IFCAP_TXCSUM | IFCAP_TSO4);
1988 if_sethwassistbits(ifp, 0,
1989 JME_CSUM_FEATURES | CSUM_TSO);
1990 VLAN_CAPABILITIES(ifp);
1991 }
1992 if_setmtu(ifp, ifr->ifr_mtu);
1993 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1994 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1995 jme_init_locked(sc);
1996 }
1997 JME_UNLOCK(sc);
1998 }
1999 break;
2000 case SIOCSIFFLAGS:
2001 JME_LOCK(sc);
2002 if ((if_getflags(ifp) & IFF_UP) != 0) {
2003 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2004 if (((if_getflags(ifp) ^ sc->jme_if_flags)
2005 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2006 jme_set_filter(sc);
2007 } else {
2008 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
2009 jme_init_locked(sc);
2010 }
2011 } else {
2012 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2013 jme_stop(sc);
2014 }
2015 sc->jme_if_flags = if_getflags(ifp);
2016 JME_UNLOCK(sc);
2017 break;
2018 case SIOCADDMULTI:
2019 case SIOCDELMULTI:
2020 JME_LOCK(sc);
2021 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2022 jme_set_filter(sc);
2023 JME_UNLOCK(sc);
2024 break;
2025 case SIOCSIFMEDIA:
2026 case SIOCGIFMEDIA:
2027 mii = device_get_softc(sc->jme_miibus);
2028 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2029 break;
2030 case SIOCSIFCAP:
2031 JME_LOCK(sc);
2032 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2033 if ((mask & IFCAP_TXCSUM) != 0 &&
2034 if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2035 if ((IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
2036 if_togglecapenable(ifp, IFCAP_TXCSUM);
2037 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
2038 if_sethwassistbits(ifp, JME_CSUM_FEATURES, 0);
2039 else
2040 if_sethwassistbits(ifp, 0, JME_CSUM_FEATURES);
2041 }
2042 }
2043 if ((mask & IFCAP_RXCSUM) != 0 &&
2044 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) {
2045 if_togglecapenable(ifp, IFCAP_RXCSUM);
2046 reg = CSR_READ_4(sc, JME_RXMAC);
2047 reg &= ~RXMAC_CSUM_ENB;
2048 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2049 reg |= RXMAC_CSUM_ENB;
2050 CSR_WRITE_4(sc, JME_RXMAC, reg);
2051 }
2052 if ((mask & IFCAP_TSO4) != 0 &&
2053 if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2054 if ((IFCAP_TSO4 & if_getcapabilities(ifp)) != 0) {
2055 if_togglecapenable(ifp, IFCAP_TSO4);
2056 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
2057 if_sethwassistbits(ifp, CSUM_TSO, 0);
2058 else
2059 if_sethwassistbits(ifp, 0, CSUM_TSO);
2060 }
2061 }
2062 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2063 (IFCAP_WOL_MAGIC & if_getcapabilities(ifp)) != 0)
2064 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
2065 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2066 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
2067 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2068 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2069 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
2070 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2071 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2072 (IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
2073 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2074 jme_set_vlan(sc);
2075 }
2076 JME_UNLOCK(sc);
2077 VLAN_CAPABILITIES(ifp);
2078 break;
2079 default:
2080 error = ether_ioctl(ifp, cmd, data);
2081 break;
2082 }
2083
2084 return (error);
2085 }
2086
2087 static void
jme_mac_config(struct jme_softc * sc)2088 jme_mac_config(struct jme_softc *sc)
2089 {
2090 struct mii_data *mii;
2091 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2092 uint32_t txclk;
2093
2094 JME_LOCK_ASSERT(sc);
2095
2096 mii = device_get_softc(sc->jme_miibus);
2097
2098 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2099 DELAY(10);
2100 CSR_WRITE_4(sc, JME_GHC, 0);
2101 ghc = 0;
2102 txclk = 0;
2103 rxmac = CSR_READ_4(sc, JME_RXMAC);
2104 rxmac &= ~RXMAC_FC_ENB;
2105 txmac = CSR_READ_4(sc, JME_TXMAC);
2106 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2107 txpause = CSR_READ_4(sc, JME_TXPFC);
2108 txpause &= ~TXPFC_PAUSE_ENB;
2109 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2110 ghc |= GHC_FULL_DUPLEX;
2111 rxmac &= ~RXMAC_COLL_DET_ENB;
2112 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2113 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2114 TXMAC_FRAME_BURST);
2115 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2116 txpause |= TXPFC_PAUSE_ENB;
2117 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2118 rxmac |= RXMAC_FC_ENB;
2119 /* Disable retry transmit timer/retry limit. */
2120 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2121 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2122 } else {
2123 rxmac |= RXMAC_COLL_DET_ENB;
2124 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2125 /* Enable retry transmit timer/retry limit. */
2126 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2127 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2128 }
2129 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2130 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2131 case IFM_10_T:
2132 ghc |= GHC_SPEED_10;
2133 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2134 break;
2135 case IFM_100_TX:
2136 ghc |= GHC_SPEED_100;
2137 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2138 break;
2139 case IFM_1000_T:
2140 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2141 break;
2142 ghc |= GHC_SPEED_1000;
2143 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2144 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2145 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2146 break;
2147 default:
2148 break;
2149 }
2150 if (sc->jme_rev == DEVICEID_JMC250 &&
2151 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2152 /*
2153 * Workaround occasional packet loss issue of JMC250 A2
2154 * when it runs on half-duplex media.
2155 */
2156 gpreg = CSR_READ_4(sc, JME_GPREG1);
2157 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2158 gpreg &= ~GPREG1_HDPX_FIX;
2159 else
2160 gpreg |= GPREG1_HDPX_FIX;
2161 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2162 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2163 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2164 /* Extend interface FIFO depth. */
2165 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2166 0x1B, 0x0000);
2167 } else {
2168 /* Select default interface FIFO depth. */
2169 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2170 0x1B, 0x0004);
2171 }
2172 }
2173 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2174 ghc |= txclk;
2175 CSR_WRITE_4(sc, JME_GHC, ghc);
2176 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2177 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2178 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2179 }
2180
2181 static void
jme_link_task(void * arg,int pending)2182 jme_link_task(void *arg, int pending)
2183 {
2184 struct jme_softc *sc;
2185 struct mii_data *mii;
2186 if_t ifp;
2187 struct jme_txdesc *txd;
2188 bus_addr_t paddr;
2189 int i;
2190
2191 sc = (struct jme_softc *)arg;
2192
2193 JME_LOCK(sc);
2194 mii = device_get_softc(sc->jme_miibus);
2195 ifp = sc->jme_ifp;
2196 if (mii == NULL || ifp == NULL ||
2197 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
2198 JME_UNLOCK(sc);
2199 return;
2200 }
2201
2202 sc->jme_flags &= ~JME_FLAG_LINK;
2203 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2204 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2205 case IFM_10_T:
2206 case IFM_100_TX:
2207 sc->jme_flags |= JME_FLAG_LINK;
2208 break;
2209 case IFM_1000_T:
2210 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2211 break;
2212 sc->jme_flags |= JME_FLAG_LINK;
2213 break;
2214 default:
2215 break;
2216 }
2217 }
2218
2219 /*
2220 * Disabling Rx/Tx MACs have a side-effect of resetting
2221 * JME_TXNDA/JME_RXNDA register to the first address of
2222 * Tx/Rx descriptor address. So driver should reset its
2223 * internal procucer/consumer pointer and reclaim any
2224 * allocated resources. Note, just saving the value of
2225 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2226 * and restoring JME_TXNDA/JME_RXNDA register is not
2227 * sufficient to make sure correct MAC state because
2228 * stopping MAC operation can take a while and hardware
2229 * might have updated JME_TXNDA/JME_RXNDA registers
2230 * during the stop operation.
2231 */
2232 /* Block execution of task. */
2233 taskqueue_block(sc->jme_tq);
2234 /* Disable interrupts and stop driver. */
2235 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2236 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2237 callout_stop(&sc->jme_tick_ch);
2238 sc->jme_watchdog_timer = 0;
2239
2240 /* Stop receiver/transmitter. */
2241 jme_stop_rx(sc);
2242 jme_stop_tx(sc);
2243
2244 /* XXX Drain all queued tasks. */
2245 JME_UNLOCK(sc);
2246 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2247 JME_LOCK(sc);
2248
2249 if (sc->jme_cdata.jme_rxhead != NULL)
2250 m_freem(sc->jme_cdata.jme_rxhead);
2251 JME_RXCHAIN_RESET(sc);
2252 jme_txeof(sc);
2253 if (sc->jme_cdata.jme_tx_cnt != 0) {
2254 /* Remove queued packets for transmit. */
2255 for (i = 0; i < JME_TX_RING_CNT; i++) {
2256 txd = &sc->jme_cdata.jme_txdesc[i];
2257 if (txd->tx_m != NULL) {
2258 bus_dmamap_sync(
2259 sc->jme_cdata.jme_tx_tag,
2260 txd->tx_dmamap,
2261 BUS_DMASYNC_POSTWRITE);
2262 bus_dmamap_unload(
2263 sc->jme_cdata.jme_tx_tag,
2264 txd->tx_dmamap);
2265 m_freem(txd->tx_m);
2266 txd->tx_m = NULL;
2267 txd->tx_ndesc = 0;
2268 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2269 }
2270 }
2271 }
2272
2273 /*
2274 * Reuse configured Rx descriptors and reset
2275 * producer/consumer index.
2276 */
2277 sc->jme_cdata.jme_rx_cons = 0;
2278 sc->jme_morework = 0;
2279 jme_init_tx_ring(sc);
2280 /* Initialize shadow status block. */
2281 jme_init_ssb(sc);
2282
2283 /* Program MAC with resolved speed/duplex/flow-control. */
2284 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2285 jme_mac_config(sc);
2286 jme_stats_clear(sc);
2287
2288 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2289 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2290
2291 /* Set Tx ring address to the hardware. */
2292 paddr = JME_TX_RING_ADDR(sc, 0);
2293 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2294 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2295
2296 /* Set Rx ring address to the hardware. */
2297 paddr = JME_RX_RING_ADDR(sc, 0);
2298 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2299 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2300
2301 /* Restart receiver/transmitter. */
2302 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2303 RXCSR_RXQ_START);
2304 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2305 /* Lastly enable TX/RX clock. */
2306 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2307 CSR_WRITE_4(sc, JME_GHC,
2308 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2309 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2310 CSR_WRITE_4(sc, JME_GPREG1,
2311 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2312 }
2313
2314 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2315 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2316 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2317 /* Unblock execution of task. */
2318 taskqueue_unblock(sc->jme_tq);
2319 /* Reenable interrupts. */
2320 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2321
2322 JME_UNLOCK(sc);
2323 }
2324
2325 static int
jme_intr(void * arg)2326 jme_intr(void *arg)
2327 {
2328 struct jme_softc *sc;
2329 uint32_t status;
2330
2331 sc = (struct jme_softc *)arg;
2332
2333 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2334 if (status == 0 || status == 0xFFFFFFFF)
2335 return (FILTER_STRAY);
2336 /* Disable interrupts. */
2337 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2338 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2339
2340 return (FILTER_HANDLED);
2341 }
2342
2343 static void
jme_int_task(void * arg,int pending)2344 jme_int_task(void *arg, int pending)
2345 {
2346 struct jme_softc *sc;
2347 if_t ifp;
2348 uint32_t status;
2349 int more;
2350
2351 sc = (struct jme_softc *)arg;
2352 ifp = sc->jme_ifp;
2353
2354 JME_LOCK(sc);
2355 status = CSR_READ_4(sc, JME_INTR_STATUS);
2356 if (sc->jme_morework != 0) {
2357 sc->jme_morework = 0;
2358 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2359 }
2360 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2361 goto done;
2362 /* Reset PCC counter/timer and Ack interrupts. */
2363 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2364 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2365 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2366 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2367 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2368 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2369 more = 0;
2370 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2371 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2372 more = jme_rxintr(sc, sc->jme_process_limit);
2373 if (more != 0)
2374 sc->jme_morework = 1;
2375 }
2376 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2377 /*
2378 * Notify hardware availability of new Rx
2379 * buffers.
2380 * Reading RXCSR takes very long time under
2381 * heavy load so cache RXCSR value and writes
2382 * the ORed value with the kick command to
2383 * the RXCSR. This saves one register access
2384 * cycle.
2385 */
2386 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2387 RXCSR_RX_ENB | RXCSR_RXQ_START);
2388 }
2389 if (!if_sendq_empty(ifp))
2390 jme_start_locked(ifp);
2391 }
2392
2393 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2394 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2395 JME_UNLOCK(sc);
2396 return;
2397 }
2398 done:
2399 JME_UNLOCK(sc);
2400
2401 /* Reenable interrupts. */
2402 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2403 }
2404
2405 static void
jme_txeof(struct jme_softc * sc)2406 jme_txeof(struct jme_softc *sc)
2407 {
2408 if_t ifp;
2409 struct jme_txdesc *txd;
2410 uint32_t status;
2411 int cons, nsegs;
2412
2413 JME_LOCK_ASSERT(sc);
2414
2415 ifp = sc->jme_ifp;
2416
2417 cons = sc->jme_cdata.jme_tx_cons;
2418 if (cons == sc->jme_cdata.jme_tx_prod)
2419 return;
2420
2421 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2422 sc->jme_cdata.jme_tx_ring_map,
2423 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2424
2425 /*
2426 * Go through our Tx list and free mbufs for those
2427 * frames which have been transmitted.
2428 */
2429 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2430 txd = &sc->jme_cdata.jme_txdesc[cons];
2431 status = le32toh(txd->tx_desc->flags);
2432 if ((status & JME_TD_OWN) == JME_TD_OWN)
2433 break;
2434
2435 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2436 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2437 else {
2438 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2439 if ((status & JME_TD_COLLISION) != 0)
2440 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2441 le32toh(txd->tx_desc->buflen) &
2442 JME_TD_BUF_LEN_MASK);
2443 }
2444 /*
2445 * Only the first descriptor of multi-descriptor
2446 * transmission is updated so driver have to skip entire
2447 * chained buffers for the transmiited frame. In other
2448 * words, JME_TD_OWN bit is valid only at the first
2449 * descriptor of a multi-descriptor transmission.
2450 */
2451 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2452 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2453 JME_DESC_INC(cons, JME_TX_RING_CNT);
2454 }
2455
2456 /* Reclaim transferred mbufs. */
2457 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2458 BUS_DMASYNC_POSTWRITE);
2459 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2460
2461 KASSERT(txd->tx_m != NULL,
2462 ("%s: freeing NULL mbuf!\n", __func__));
2463 m_freem(txd->tx_m);
2464 txd->tx_m = NULL;
2465 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2466 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2467 ("%s: Active Tx desc counter was garbled\n", __func__));
2468 txd->tx_ndesc = 0;
2469 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2470 }
2471 sc->jme_cdata.jme_tx_cons = cons;
2472 /* Unarm watchdog timer when there is no pending descriptors in queue. */
2473 if (sc->jme_cdata.jme_tx_cnt == 0)
2474 sc->jme_watchdog_timer = 0;
2475
2476 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2477 sc->jme_cdata.jme_tx_ring_map,
2478 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2479 }
2480
2481 static __inline void
jme_discard_rxbuf(struct jme_softc * sc,int cons)2482 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2483 {
2484 struct jme_desc *desc;
2485
2486 desc = &sc->jme_rdata.jme_rx_ring[cons];
2487 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2488 desc->buflen = htole32(MCLBYTES);
2489 }
2490
2491 /* Receive a frame. */
2492 static void
jme_rxeof(struct jme_softc * sc)2493 jme_rxeof(struct jme_softc *sc)
2494 {
2495 if_t ifp;
2496 struct jme_desc *desc;
2497 struct jme_rxdesc *rxd;
2498 struct mbuf *mp, *m;
2499 uint32_t flags, status;
2500 int cons, count, nsegs;
2501
2502 JME_LOCK_ASSERT(sc);
2503
2504 ifp = sc->jme_ifp;
2505
2506 cons = sc->jme_cdata.jme_rx_cons;
2507 desc = &sc->jme_rdata.jme_rx_ring[cons];
2508 flags = le32toh(desc->flags);
2509 status = le32toh(desc->buflen);
2510 nsegs = JME_RX_NSEGS(status);
2511 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2512 if ((status & JME_RX_ERR_STAT) != 0) {
2513 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2514 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2515 #ifdef JME_SHOW_ERRORS
2516 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2517 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2518 #endif
2519 sc->jme_cdata.jme_rx_cons += nsegs;
2520 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2521 return;
2522 }
2523
2524 for (count = 0; count < nsegs; count++,
2525 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2526 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2527 mp = rxd->rx_m;
2528 /* Add a new receive buffer to the ring. */
2529 if (jme_newbuf(sc, rxd) != 0) {
2530 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2531 /* Reuse buffer. */
2532 for (; count < nsegs; count++) {
2533 jme_discard_rxbuf(sc, cons);
2534 JME_DESC_INC(cons, JME_RX_RING_CNT);
2535 }
2536 if (sc->jme_cdata.jme_rxhead != NULL) {
2537 m_freem(sc->jme_cdata.jme_rxhead);
2538 JME_RXCHAIN_RESET(sc);
2539 }
2540 break;
2541 }
2542
2543 /*
2544 * Assume we've received a full sized frame.
2545 * Actual size is fixed when we encounter the end of
2546 * multi-segmented frame.
2547 */
2548 mp->m_len = MCLBYTES;
2549
2550 /* Chain received mbufs. */
2551 if (sc->jme_cdata.jme_rxhead == NULL) {
2552 sc->jme_cdata.jme_rxhead = mp;
2553 sc->jme_cdata.jme_rxtail = mp;
2554 } else {
2555 /*
2556 * Receive processor can receive a maximum frame
2557 * size of 65535 bytes.
2558 */
2559 mp->m_flags &= ~M_PKTHDR;
2560 sc->jme_cdata.jme_rxtail->m_next = mp;
2561 sc->jme_cdata.jme_rxtail = mp;
2562 }
2563
2564 if (count == nsegs - 1) {
2565 /* Last desc. for this frame. */
2566 m = sc->jme_cdata.jme_rxhead;
2567 m->m_flags |= M_PKTHDR;
2568 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2569 if (nsegs > 1) {
2570 /* Set first mbuf size. */
2571 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2572 /* Set last mbuf size. */
2573 mp->m_len = sc->jme_cdata.jme_rxlen -
2574 ((MCLBYTES - JME_RX_PAD_BYTES) +
2575 (MCLBYTES * (nsegs - 2)));
2576 } else
2577 m->m_len = sc->jme_cdata.jme_rxlen;
2578 m->m_pkthdr.rcvif = ifp;
2579
2580 /*
2581 * Account for 10bytes auto padding which is used
2582 * to align IP header on 32bit boundary. Also note,
2583 * CRC bytes is automatically removed by the
2584 * hardware.
2585 */
2586 m->m_data += JME_RX_PAD_BYTES;
2587
2588 /* Set checksum information. */
2589 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
2590 (flags & JME_RD_IPV4) != 0) {
2591 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2592 if ((flags & JME_RD_IPCSUM) != 0)
2593 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2594 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2595 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2596 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2597 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2598 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2599 m->m_pkthdr.csum_flags |=
2600 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2601 m->m_pkthdr.csum_data = 0xffff;
2602 }
2603 }
2604
2605 /* Check for VLAN tagged packets. */
2606 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
2607 (flags & JME_RD_VLAN_TAG) != 0) {
2608 m->m_pkthdr.ether_vtag =
2609 flags & JME_RD_VLAN_MASK;
2610 m->m_flags |= M_VLANTAG;
2611 }
2612
2613 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2614 /* Pass it on. */
2615 JME_UNLOCK(sc);
2616 if_input(ifp, m);
2617 JME_LOCK(sc);
2618
2619 /* Reset mbuf chains. */
2620 JME_RXCHAIN_RESET(sc);
2621 }
2622 }
2623
2624 sc->jme_cdata.jme_rx_cons += nsegs;
2625 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2626 }
2627
2628 static int
jme_rxintr(struct jme_softc * sc,int count)2629 jme_rxintr(struct jme_softc *sc, int count)
2630 {
2631 struct jme_desc *desc;
2632 int nsegs, prog, pktlen;
2633
2634 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2635 sc->jme_cdata.jme_rx_ring_map,
2636 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2637
2638 for (prog = 0; count > 0; prog++) {
2639 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2640 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2641 break;
2642 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2643 break;
2644 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2645 /*
2646 * Check number of segments against received bytes.
2647 * Non-matching value would indicate that hardware
2648 * is still trying to update Rx descriptors. I'm not
2649 * sure whether this check is needed.
2650 */
2651 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2652 if (nsegs != howmany(pktlen, MCLBYTES))
2653 break;
2654 prog++;
2655 /* Received a frame. */
2656 jme_rxeof(sc);
2657 count -= nsegs;
2658 }
2659
2660 if (prog > 0)
2661 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2662 sc->jme_cdata.jme_rx_ring_map,
2663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2664
2665 return (count > 0 ? 0 : EAGAIN);
2666 }
2667
2668 static void
jme_tick(void * arg)2669 jme_tick(void *arg)
2670 {
2671 struct jme_softc *sc;
2672 struct mii_data *mii;
2673
2674 sc = (struct jme_softc *)arg;
2675
2676 JME_LOCK_ASSERT(sc);
2677
2678 mii = device_get_softc(sc->jme_miibus);
2679 mii_tick(mii);
2680 /*
2681 * Reclaim Tx buffers that have been completed. It's not
2682 * needed here but it would release allocated mbuf chains
2683 * faster and limit the maximum delay to a hz.
2684 */
2685 jme_txeof(sc);
2686 jme_stats_update(sc);
2687 jme_watchdog(sc);
2688 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2689 }
2690
2691 static void
jme_reset(struct jme_softc * sc)2692 jme_reset(struct jme_softc *sc)
2693 {
2694 uint32_t ghc, gpreg;
2695
2696 /* Stop receiver, transmitter. */
2697 jme_stop_rx(sc);
2698 jme_stop_tx(sc);
2699
2700 /* Reset controller. */
2701 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2702 CSR_READ_4(sc, JME_GHC);
2703 DELAY(10);
2704 /*
2705 * Workaround Rx FIFO overruns seen under certain conditions.
2706 * Explicitly synchorize TX/RX clock. TX/RX clock should be
2707 * enabled only after enabling TX/RX MACs.
2708 */
2709 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2710 /* Disable TX clock. */
2711 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2712 /* Disable RX clock. */
2713 gpreg = CSR_READ_4(sc, JME_GPREG1);
2714 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2715 gpreg = CSR_READ_4(sc, JME_GPREG1);
2716 /* De-assert RESET but still disable TX clock. */
2717 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2718 ghc = CSR_READ_4(sc, JME_GHC);
2719
2720 /* Enable TX clock. */
2721 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2722 /* Enable RX clock. */
2723 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2724 CSR_READ_4(sc, JME_GPREG1);
2725
2726 /* Disable TX/RX clock again. */
2727 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2728 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2729 } else
2730 CSR_WRITE_4(sc, JME_GHC, 0);
2731 CSR_READ_4(sc, JME_GHC);
2732 DELAY(10);
2733 }
2734
2735 static void
jme_init(void * xsc)2736 jme_init(void *xsc)
2737 {
2738 struct jme_softc *sc;
2739
2740 sc = (struct jme_softc *)xsc;
2741 JME_LOCK(sc);
2742 jme_init_locked(sc);
2743 JME_UNLOCK(sc);
2744 }
2745
2746 static void
jme_init_locked(struct jme_softc * sc)2747 jme_init_locked(struct jme_softc *sc)
2748 {
2749 if_t ifp;
2750 struct mii_data *mii;
2751 bus_addr_t paddr;
2752 uint32_t reg;
2753 int error;
2754
2755 JME_LOCK_ASSERT(sc);
2756
2757 ifp = sc->jme_ifp;
2758 mii = device_get_softc(sc->jme_miibus);
2759
2760 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2761 return;
2762 /*
2763 * Cancel any pending I/O.
2764 */
2765 jme_stop(sc);
2766
2767 /*
2768 * Reset the chip to a known state.
2769 */
2770 jme_reset(sc);
2771
2772 /* Init descriptors. */
2773 error = jme_init_rx_ring(sc);
2774 if (error != 0) {
2775 device_printf(sc->jme_dev,
2776 "%s: initialization failed: no memory for Rx buffers.\n",
2777 __func__);
2778 jme_stop(sc);
2779 return;
2780 }
2781 jme_init_tx_ring(sc);
2782 /* Initialize shadow status block. */
2783 jme_init_ssb(sc);
2784
2785 /* Reprogram the station address. */
2786 jme_set_macaddr(sc, if_getlladdr(sc->jme_ifp));
2787
2788 /*
2789 * Configure Tx queue.
2790 * Tx priority queue weight value : 0
2791 * Tx FIFO threshold for processing next packet : 16QW
2792 * Maximum Tx DMA length : 512
2793 * Allow Tx DMA burst.
2794 */
2795 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2796 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2797 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2798 sc->jme_txcsr |= sc->jme_tx_dma_size;
2799 sc->jme_txcsr |= TXCSR_DMA_BURST;
2800 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2801
2802 /* Set Tx descriptor counter. */
2803 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2804
2805 /* Set Tx ring address to the hardware. */
2806 paddr = JME_TX_RING_ADDR(sc, 0);
2807 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2808 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2809
2810 /* Configure TxMAC parameters. */
2811 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2812 reg |= TXMAC_THRESH_1_PKT;
2813 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2814 CSR_WRITE_4(sc, JME_TXMAC, reg);
2815
2816 /*
2817 * Configure Rx queue.
2818 * FIFO full threshold for transmitting Tx pause packet : 128T
2819 * FIFO threshold for processing next packet : 128QW
2820 * Rx queue 0 select
2821 * Max Rx DMA length : 128
2822 * Rx descriptor retry : 32
2823 * Rx descriptor retry time gap : 256ns
2824 * Don't receive runt/bad frame.
2825 */
2826 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2827 /*
2828 * Since Rx FIFO size is 4K bytes, receiving frames larger
2829 * than 4K bytes will suffer from Rx FIFO overruns. So
2830 * decrease FIFO threshold to reduce the FIFO overruns for
2831 * frames larger than 4000 bytes.
2832 * For best performance of standard MTU sized frames use
2833 * maximum allowable FIFO threshold, 128QW. Note these do
2834 * not hold on chip full mask version >=2. For these
2835 * controllers 64QW and 128QW are not valid value.
2836 */
2837 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2838 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2839 else {
2840 if ((if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2841 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2842 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2843 else
2844 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2845 }
2846 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2847 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2848 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2849 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2850
2851 /* Set Rx descriptor counter. */
2852 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2853
2854 /* Set Rx ring address to the hardware. */
2855 paddr = JME_RX_RING_ADDR(sc, 0);
2856 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2857 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2858
2859 /* Clear receive filter. */
2860 CSR_WRITE_4(sc, JME_RXMAC, 0);
2861 /* Set up the receive filter. */
2862 jme_set_filter(sc);
2863 jme_set_vlan(sc);
2864
2865 /*
2866 * Disable all WOL bits as WOL can interfere normal Rx
2867 * operation. Also clear WOL detection status bits.
2868 */
2869 reg = CSR_READ_4(sc, JME_PMCS);
2870 reg &= ~PMCS_WOL_ENB_MASK;
2871 CSR_WRITE_4(sc, JME_PMCS, reg);
2872
2873 reg = CSR_READ_4(sc, JME_RXMAC);
2874 /*
2875 * Pad 10bytes right before received frame. This will greatly
2876 * help Rx performance on strict-alignment architectures as
2877 * it does not need to copy the frame to align the payload.
2878 */
2879 reg |= RXMAC_PAD_10BYTES;
2880 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2881 reg |= RXMAC_CSUM_ENB;
2882 CSR_WRITE_4(sc, JME_RXMAC, reg);
2883
2884 /* Configure general purpose reg0 */
2885 reg = CSR_READ_4(sc, JME_GPREG0);
2886 reg &= ~GPREG0_PCC_UNIT_MASK;
2887 /* Set PCC timer resolution to micro-seconds unit. */
2888 reg |= GPREG0_PCC_UNIT_US;
2889 /*
2890 * Disable all shadow register posting as we have to read
2891 * JME_INTR_STATUS register in jme_int_task. Also it seems
2892 * that it's hard to synchronize interrupt status between
2893 * hardware and software with shadow posting due to
2894 * requirements of bus_dmamap_sync(9).
2895 */
2896 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2897 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2898 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2899 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2900 /* Disable posting of DW0. */
2901 reg &= ~GPREG0_POST_DW0_ENB;
2902 /* Clear PME message. */
2903 reg &= ~GPREG0_PME_ENB;
2904 /* Set PHY address. */
2905 reg &= ~GPREG0_PHY_ADDR_MASK;
2906 reg |= sc->jme_phyaddr;
2907 CSR_WRITE_4(sc, JME_GPREG0, reg);
2908
2909 /* Configure Tx queue 0 packet completion coalescing. */
2910 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2911 PCCTX_COAL_TO_MASK;
2912 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2913 PCCTX_COAL_PKT_MASK;
2914 reg |= PCCTX_COAL_TXQ0;
2915 CSR_WRITE_4(sc, JME_PCCTX, reg);
2916
2917 /* Configure Rx queue 0 packet completion coalescing. */
2918 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2919 PCCRX_COAL_TO_MASK;
2920 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2921 PCCRX_COAL_PKT_MASK;
2922 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2923
2924 /*
2925 * Configure PCD(Packet Completion Deferring). It seems PCD
2926 * generates an interrupt when the time interval between two
2927 * back-to-back incoming/outgoing packet is long enough for
2928 * it to reach its timer value 0. The arrival of new packets
2929 * after timer has started causes the PCD timer to restart.
2930 * Unfortunately, it's not clear how PCD is useful at this
2931 * moment, so just use the same of PCC parameters.
2932 */
2933 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2934 sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2935 if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2936 sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2937 sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2938 if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2939 sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2940 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2941 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2942 CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2943 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2944 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2945 CSR_WRITE_4(sc, JME_PCDTX, reg);
2946 }
2947
2948 /* Configure shadow status block but don't enable posting. */
2949 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2950 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2951 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2952
2953 /* Disable Timer 1 and Timer 2. */
2954 CSR_WRITE_4(sc, JME_TIMER1, 0);
2955 CSR_WRITE_4(sc, JME_TIMER2, 0);
2956
2957 /* Configure retry transmit period, retry limit value. */
2958 CSR_WRITE_4(sc, JME_TXTRHD,
2959 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2960 TXTRHD_RT_PERIOD_MASK) |
2961 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2962 TXTRHD_RT_LIMIT_SHIFT));
2963
2964 /* Disable RSS. */
2965 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2966
2967 /* Initialize the interrupt mask. */
2968 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2969 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2970
2971 /*
2972 * Enabling Tx/Rx DMA engines and Rx queue processing is
2973 * done after detection of valid link in jme_link_task.
2974 */
2975
2976 sc->jme_flags &= ~JME_FLAG_LINK;
2977 /* Set the current media. */
2978 mii_mediachg(mii);
2979
2980 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2981
2982 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2983 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2984 }
2985
2986 static void
jme_stop(struct jme_softc * sc)2987 jme_stop(struct jme_softc *sc)
2988 {
2989 if_t ifp;
2990 struct jme_txdesc *txd;
2991 struct jme_rxdesc *rxd;
2992 int i;
2993
2994 JME_LOCK_ASSERT(sc);
2995 /*
2996 * Mark the interface down and cancel the watchdog timer.
2997 */
2998 ifp = sc->jme_ifp;
2999 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
3000 sc->jme_flags &= ~JME_FLAG_LINK;
3001 callout_stop(&sc->jme_tick_ch);
3002 sc->jme_watchdog_timer = 0;
3003
3004 /*
3005 * Disable interrupts.
3006 */
3007 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3008 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
3009
3010 /* Disable updating shadow status block. */
3011 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
3012 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
3013
3014 /* Stop receiver, transmitter. */
3015 jme_stop_rx(sc);
3016 jme_stop_tx(sc);
3017
3018 /* Reclaim Rx/Tx buffers that have been completed. */
3019 jme_rxintr(sc, JME_RX_RING_CNT);
3020 if (sc->jme_cdata.jme_rxhead != NULL)
3021 m_freem(sc->jme_cdata.jme_rxhead);
3022 JME_RXCHAIN_RESET(sc);
3023 jme_txeof(sc);
3024 /*
3025 * Free RX and TX mbufs still in the queues.
3026 */
3027 for (i = 0; i < JME_RX_RING_CNT; i++) {
3028 rxd = &sc->jme_cdata.jme_rxdesc[i];
3029 if (rxd->rx_m != NULL) {
3030 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3031 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3032 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3033 rxd->rx_dmamap);
3034 m_freem(rxd->rx_m);
3035 rxd->rx_m = NULL;
3036 }
3037 }
3038 for (i = 0; i < JME_TX_RING_CNT; i++) {
3039 txd = &sc->jme_cdata.jme_txdesc[i];
3040 if (txd->tx_m != NULL) {
3041 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3042 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3043 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3044 txd->tx_dmamap);
3045 m_freem(txd->tx_m);
3046 txd->tx_m = NULL;
3047 txd->tx_ndesc = 0;
3048 }
3049 }
3050 jme_stats_update(sc);
3051 jme_stats_save(sc);
3052 }
3053
3054 static void
jme_stop_tx(struct jme_softc * sc)3055 jme_stop_tx(struct jme_softc *sc)
3056 {
3057 uint32_t reg;
3058 int i;
3059
3060 reg = CSR_READ_4(sc, JME_TXCSR);
3061 if ((reg & TXCSR_TX_ENB) == 0)
3062 return;
3063 reg &= ~TXCSR_TX_ENB;
3064 CSR_WRITE_4(sc, JME_TXCSR, reg);
3065 for (i = JME_TIMEOUT; i > 0; i--) {
3066 DELAY(1);
3067 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3068 break;
3069 }
3070 if (i == 0)
3071 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3072 }
3073
3074 static void
jme_stop_rx(struct jme_softc * sc)3075 jme_stop_rx(struct jme_softc *sc)
3076 {
3077 uint32_t reg;
3078 int i;
3079
3080 reg = CSR_READ_4(sc, JME_RXCSR);
3081 if ((reg & RXCSR_RX_ENB) == 0)
3082 return;
3083 reg &= ~RXCSR_RX_ENB;
3084 CSR_WRITE_4(sc, JME_RXCSR, reg);
3085 for (i = JME_TIMEOUT; i > 0; i--) {
3086 DELAY(1);
3087 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3088 break;
3089 }
3090 if (i == 0)
3091 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3092 }
3093
3094 static void
jme_init_tx_ring(struct jme_softc * sc)3095 jme_init_tx_ring(struct jme_softc *sc)
3096 {
3097 struct jme_ring_data *rd;
3098 struct jme_txdesc *txd;
3099 int i;
3100
3101 sc->jme_cdata.jme_tx_prod = 0;
3102 sc->jme_cdata.jme_tx_cons = 0;
3103 sc->jme_cdata.jme_tx_cnt = 0;
3104
3105 rd = &sc->jme_rdata;
3106 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3107 for (i = 0; i < JME_TX_RING_CNT; i++) {
3108 txd = &sc->jme_cdata.jme_txdesc[i];
3109 txd->tx_m = NULL;
3110 txd->tx_desc = &rd->jme_tx_ring[i];
3111 txd->tx_ndesc = 0;
3112 }
3113
3114 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3115 sc->jme_cdata.jme_tx_ring_map,
3116 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3117 }
3118
3119 static void
jme_init_ssb(struct jme_softc * sc)3120 jme_init_ssb(struct jme_softc *sc)
3121 {
3122 struct jme_ring_data *rd;
3123
3124 rd = &sc->jme_rdata;
3125 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3126 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3127 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3128 }
3129
3130 static int
jme_init_rx_ring(struct jme_softc * sc)3131 jme_init_rx_ring(struct jme_softc *sc)
3132 {
3133 struct jme_ring_data *rd;
3134 struct jme_rxdesc *rxd;
3135 int i;
3136
3137 sc->jme_cdata.jme_rx_cons = 0;
3138 JME_RXCHAIN_RESET(sc);
3139 sc->jme_morework = 0;
3140
3141 rd = &sc->jme_rdata;
3142 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3143 for (i = 0; i < JME_RX_RING_CNT; i++) {
3144 rxd = &sc->jme_cdata.jme_rxdesc[i];
3145 rxd->rx_m = NULL;
3146 rxd->rx_desc = &rd->jme_rx_ring[i];
3147 if (jme_newbuf(sc, rxd) != 0)
3148 return (ENOBUFS);
3149 }
3150
3151 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3152 sc->jme_cdata.jme_rx_ring_map,
3153 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3154
3155 return (0);
3156 }
3157
3158 static int
jme_newbuf(struct jme_softc * sc,struct jme_rxdesc * rxd)3159 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3160 {
3161 struct jme_desc *desc;
3162 struct mbuf *m;
3163 bus_dma_segment_t segs[1];
3164 bus_dmamap_t map;
3165 int nsegs;
3166
3167 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3168 if (m == NULL)
3169 return (ENOBUFS);
3170 /*
3171 * JMC250 has 64bit boundary alignment limitation so jme(4)
3172 * takes advantage of 10 bytes padding feature of hardware
3173 * in order not to copy entire frame to align IP header on
3174 * 32bit boundary.
3175 */
3176 m->m_len = m->m_pkthdr.len = MCLBYTES;
3177
3178 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3179 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3180 m_freem(m);
3181 return (ENOBUFS);
3182 }
3183 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3184
3185 if (rxd->rx_m != NULL) {
3186 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3187 BUS_DMASYNC_POSTREAD);
3188 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3189 }
3190 map = rxd->rx_dmamap;
3191 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3192 sc->jme_cdata.jme_rx_sparemap = map;
3193 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3194 BUS_DMASYNC_PREREAD);
3195 rxd->rx_m = m;
3196
3197 desc = rxd->rx_desc;
3198 desc->buflen = htole32(segs[0].ds_len);
3199 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3200 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3201 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3202
3203 return (0);
3204 }
3205
3206 static void
jme_set_vlan(struct jme_softc * sc)3207 jme_set_vlan(struct jme_softc *sc)
3208 {
3209 if_t ifp;
3210 uint32_t reg;
3211
3212 JME_LOCK_ASSERT(sc);
3213
3214 ifp = sc->jme_ifp;
3215 reg = CSR_READ_4(sc, JME_RXMAC);
3216 reg &= ~RXMAC_VLAN_ENB;
3217 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
3218 reg |= RXMAC_VLAN_ENB;
3219 CSR_WRITE_4(sc, JME_RXMAC, reg);
3220 }
3221
3222 static u_int
jme_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)3223 jme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3224 {
3225 uint32_t crc, *mchash = arg;
3226
3227 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
3228
3229 /* Just want the 6 least significant bits. */
3230 crc &= 0x3f;
3231
3232 /* Set the corresponding bit in the hash table. */
3233 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3234
3235 return (1);
3236 }
3237
3238 static void
jme_set_filter(struct jme_softc * sc)3239 jme_set_filter(struct jme_softc *sc)
3240 {
3241 if_t ifp;
3242 uint32_t mchash[2];
3243 uint32_t rxcfg;
3244
3245 JME_LOCK_ASSERT(sc);
3246
3247 ifp = sc->jme_ifp;
3248
3249 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3250 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3251 RXMAC_ALLMULTI);
3252 /* Always accept frames destined to our station address. */
3253 rxcfg |= RXMAC_UNICAST;
3254 if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
3255 rxcfg |= RXMAC_BROADCAST;
3256 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3257 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
3258 rxcfg |= RXMAC_PROMISC;
3259 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
3260 rxcfg |= RXMAC_ALLMULTI;
3261 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3262 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3263 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3264 return;
3265 }
3266
3267 /*
3268 * Set up the multicast address filter by passing all multicast
3269 * addresses through a CRC generator, and then using the low-order
3270 * 6 bits as an index into the 64 bit multicast hash table. The
3271 * high order bits select the register, while the rest of the bits
3272 * select the bit within the register.
3273 */
3274 rxcfg |= RXMAC_MULTICAST;
3275 bzero(mchash, sizeof(mchash));
3276 if_foreach_llmaddr(ifp, jme_hash_maddr, &mchash);
3277
3278 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3279 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3280 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3281 }
3282
3283 static void
jme_stats_clear(struct jme_softc * sc)3284 jme_stats_clear(struct jme_softc *sc)
3285 {
3286
3287 JME_LOCK_ASSERT(sc);
3288
3289 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3290 return;
3291
3292 /* Disable and clear counters. */
3293 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3294 /* Activate hw counters. */
3295 CSR_WRITE_4(sc, JME_STATCSR, 0);
3296 CSR_READ_4(sc, JME_STATCSR);
3297 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3298 }
3299
3300 static void
jme_stats_save(struct jme_softc * sc)3301 jme_stats_save(struct jme_softc *sc)
3302 {
3303
3304 JME_LOCK_ASSERT(sc);
3305
3306 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3307 return;
3308 /* Save current counters. */
3309 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3310 /* Disable and clear counters. */
3311 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3312 }
3313
3314 static void
jme_stats_update(struct jme_softc * sc)3315 jme_stats_update(struct jme_softc *sc)
3316 {
3317 struct jme_hw_stats *stat, *ostat;
3318 uint32_t reg;
3319
3320 JME_LOCK_ASSERT(sc);
3321
3322 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3323 return;
3324 stat = &sc->jme_stats;
3325 ostat = &sc->jme_ostats;
3326 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3327 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3328 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3329 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3330 STAT_RX_CRC_ERR_SHIFT;
3331 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3332 STAT_RX_MII_ERR_SHIFT;
3333 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3334 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3335 STAT_RXERR_OFLOW_SHIFT;
3336 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3337 STAT_RXERR_MPTY_SHIFT;
3338 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3339 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3340 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3341
3342 /* Account for previous counters. */
3343 stat->rx_good_frames += ostat->rx_good_frames;
3344 stat->rx_crc_errs += ostat->rx_crc_errs;
3345 stat->rx_mii_errs += ostat->rx_mii_errs;
3346 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3347 stat->rx_desc_empty += ostat->rx_desc_empty;
3348 stat->rx_bad_frames += ostat->rx_bad_frames;
3349 stat->tx_good_frames += ostat->tx_good_frames;
3350 stat->tx_bad_frames += ostat->tx_bad_frames;
3351 }
3352
3353 static void
jme_phy_down(struct jme_softc * sc)3354 jme_phy_down(struct jme_softc *sc)
3355 {
3356 uint32_t reg;
3357
3358 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3359 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3360 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3361 reg |= 0x0000000F;
3362 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3363 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3364 reg &= ~PE1_GIGA_PDOWN_MASK;
3365 reg |= PE1_GIGA_PDOWN_D3;
3366 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3367 }
3368 }
3369
3370 static void
jme_phy_up(struct jme_softc * sc)3371 jme_phy_up(struct jme_softc *sc)
3372 {
3373 uint32_t reg;
3374 uint16_t bmcr;
3375
3376 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3377 bmcr &= ~BMCR_PDOWN;
3378 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3379 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3380 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3381 reg &= ~0x0000000F;
3382 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3383 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3384 reg &= ~PE1_GIGA_PDOWN_MASK;
3385 reg |= PE1_GIGA_PDOWN_DIS;
3386 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3387 }
3388 }
3389
3390 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3391 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3392 {
3393 int error, value;
3394
3395 if (arg1 == NULL)
3396 return (EINVAL);
3397 value = *(int *)arg1;
3398 error = sysctl_handle_int(oidp, &value, 0, req);
3399 if (error || req->newptr == NULL)
3400 return (error);
3401 if (value < low || value > high)
3402 return (EINVAL);
3403 *(int *)arg1 = value;
3404
3405 return (0);
3406 }
3407
3408 static int
sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)3409 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3410 {
3411 return (sysctl_int_range(oidp, arg1, arg2, req,
3412 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3413 }
3414
3415 static int
sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)3416 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3417 {
3418 return (sysctl_int_range(oidp, arg1, arg2, req,
3419 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3420 }
3421
3422 static int
sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)3423 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3424 {
3425 return (sysctl_int_range(oidp, arg1, arg2, req,
3426 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3427 }
3428
3429 static int
sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)3430 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3431 {
3432 return (sysctl_int_range(oidp, arg1, arg2, req,
3433 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3434 }
3435
3436 static int
sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)3437 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3438 {
3439 return (sysctl_int_range(oidp, arg1, arg2, req,
3440 JME_PROC_MIN, JME_PROC_MAX));
3441 }
3442