1 /*
2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "opt_platform.h"
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/mbuf.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
42 #include <sys/smp.h>
43 #include <sys/taskqueue.h>
44 #ifdef MVNETA_KTR
45 #include <sys/ktr.h>
46 #endif
47
48 #include <net/ethernet.h>
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56
57 #include <netinet/in_systm.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp_lro.h>
61
62 #include <sys/sockio.h>
63 #include <sys/bus.h>
64 #include <machine/bus.h>
65 #include <sys/rman.h>
66 #include <machine/resource.h>
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_bus.h>
73 #include <dev/ofw/ofw_bus_subr.h>
74
75 #include <dev/mdio/mdio.h>
76
77 #include <arm/mv/mvvar.h>
78
79 #if !defined(__aarch64__)
80 #include <arm/mv/mvreg.h>
81 #include <arm/mv/mvwin.h>
82 #endif
83
84 #include "if_mvnetareg.h"
85 #include "if_mvnetavar.h"
86
87 #include "miibus_if.h"
88 #include "mdio_if.h"
89
90 #ifdef MVNETA_DEBUG
91 #define STATIC /* nothing */
92 #else
93 #define STATIC static
94 #endif
95
96 #define DASSERT(x) KASSERT((x), (#x))
97
98 #define A3700_TCLK_250MHZ 250000000
99
100 /* Device Register Initialization */
101 STATIC int mvneta_initreg(struct ifnet *);
102
103 /* Descriptor Ring Control for each of queues */
104 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
105 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
106 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
107 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
108 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
109 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
110 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
111 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
112 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
113 STATIC int mvneta_dma_create(struct mvneta_softc *);
114
115 /* Rx/Tx Queue Control */
116 STATIC int mvneta_rx_queue_init(struct ifnet *, int);
117 STATIC int mvneta_tx_queue_init(struct ifnet *, int);
118 STATIC int mvneta_rx_queue_enable(struct ifnet *, int);
119 STATIC int mvneta_tx_queue_enable(struct ifnet *, int);
120 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
121 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
122 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
123 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
124
125 /* Interrupt Handlers */
126 STATIC void mvneta_disable_intr(struct mvneta_softc *);
127 STATIC void mvneta_enable_intr(struct mvneta_softc *);
128 STATIC void mvneta_rxtxth_intr(void *);
129 STATIC int mvneta_misc_intr(struct mvneta_softc *);
130 STATIC void mvneta_tick(void *);
131 /* struct ifnet and mii callbacks*/
132 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
133 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
134 #ifdef MVNETA_MULTIQUEUE
135 STATIC int mvneta_transmit(struct ifnet *, struct mbuf *);
136 #else /* !MVNETA_MULTIQUEUE */
137 STATIC void mvneta_start(struct ifnet *);
138 #endif
139 STATIC void mvneta_qflush(struct ifnet *);
140 STATIC void mvneta_tx_task(void *, int);
141 STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
142 STATIC void mvneta_init(void *);
143 STATIC void mvneta_init_locked(void *);
144 STATIC void mvneta_stop(struct mvneta_softc *);
145 STATIC void mvneta_stop_locked(struct mvneta_softc *);
146 STATIC int mvneta_mediachange(struct ifnet *);
147 STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
148 STATIC void mvneta_portup(struct mvneta_softc *);
149 STATIC void mvneta_portdown(struct mvneta_softc *);
150
151 /* Link State Notify */
152 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
153 STATIC int mvneta_update_media(struct mvneta_softc *, int);
154 STATIC void mvneta_adjust_link(struct mvneta_softc *);
155 STATIC void mvneta_update_eee(struct mvneta_softc *);
156 STATIC void mvneta_update_fc(struct mvneta_softc *);
157 STATIC void mvneta_link_isr(struct mvneta_softc *);
158 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
159 STATIC void mvneta_linkup(struct mvneta_softc *);
160 STATIC void mvneta_linkdown(struct mvneta_softc *);
161 STATIC void mvneta_linkreset(struct mvneta_softc *);
162
163 /* Tx Subroutines */
164 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
165 STATIC void mvneta_tx_set_csumflag(struct ifnet *,
166 struct mvneta_tx_desc *, struct mbuf *);
167 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
168 STATIC void mvneta_tx_drain(struct mvneta_softc *);
169
170 /* Rx Subroutines */
171 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
172 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
173 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
174 STATIC void mvneta_rx_set_csumflag(struct ifnet *,
175 struct mvneta_rx_desc *, struct mbuf *);
176 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
177
178 /* MAC address filter */
179 STATIC void mvneta_filter_setup(struct mvneta_softc *);
180
181 /* sysctl(9) */
182 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
183 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
184 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
185 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
186
187 /* MIB */
188 STATIC void mvneta_clear_mib(struct mvneta_softc *);
189 STATIC void mvneta_update_mib(struct mvneta_softc *);
190
191 /* Switch */
192 STATIC boolean_t mvneta_has_switch(device_t);
193
194 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
195 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
196
197 STATIC struct mtx mii_mutex;
198 STATIC int mii_init = 0;
199
200 /* Device */
201 STATIC int mvneta_detach(device_t);
202 /* MII */
203 STATIC int mvneta_miibus_readreg(device_t, int, int);
204 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
205
206 /* Clock */
207 STATIC uint32_t mvneta_get_clk(void);
208
209 static device_method_t mvneta_methods[] = {
210 /* Device interface */
211 DEVMETHOD(device_detach, mvneta_detach),
212 /* MII interface */
213 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
214 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
215 /* MDIO interface */
216 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
217 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
218
219 /* End */
220 DEVMETHOD_END
221 };
222
223 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
224
225 DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0);
226 DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0);
227 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
228 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
229 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
230 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
231
232 /*
233 * List of MIB register and names
234 */
235 enum mvneta_mib_idx
236 {
237 MVNETA_MIB_RX_GOOD_OCT_IDX,
238 MVNETA_MIB_RX_BAD_OCT_IDX,
239 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
240 MVNETA_MIB_RX_GOOD_FRAME_IDX,
241 MVNETA_MIB_RX_BAD_FRAME_IDX,
242 MVNETA_MIB_RX_BCAST_FRAME_IDX,
243 MVNETA_MIB_RX_MCAST_FRAME_IDX,
244 MVNETA_MIB_RX_FRAME64_OCT_IDX,
245 MVNETA_MIB_RX_FRAME127_OCT_IDX,
246 MVNETA_MIB_RX_FRAME255_OCT_IDX,
247 MVNETA_MIB_RX_FRAME511_OCT_IDX,
248 MVNETA_MIB_RX_FRAME1023_OCT_IDX,
249 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
250 MVNETA_MIB_TX_GOOD_OCT_IDX,
251 MVNETA_MIB_TX_GOOD_FRAME_IDX,
252 MVNETA_MIB_TX_EXCES_COL_IDX,
253 MVNETA_MIB_TX_MCAST_FRAME_IDX,
254 MVNETA_MIB_TX_BCAST_FRAME_IDX,
255 MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
256 MVNETA_MIB_FC_SENT_IDX,
257 MVNETA_MIB_FC_GOOD_IDX,
258 MVNETA_MIB_FC_BAD_IDX,
259 MVNETA_MIB_PKT_UNDERSIZE_IDX,
260 MVNETA_MIB_PKT_FRAGMENT_IDX,
261 MVNETA_MIB_PKT_OVERSIZE_IDX,
262 MVNETA_MIB_PKT_JABBER_IDX,
263 MVNETA_MIB_MAC_RX_ERR_IDX,
264 MVNETA_MIB_MAC_CRC_ERR_IDX,
265 MVNETA_MIB_MAC_COL_IDX,
266 MVNETA_MIB_MAC_LATE_COL_IDX,
267 };
268
269 STATIC struct mvneta_mib_def {
270 uint32_t regnum;
271 int reg64;
272 const char *sysctl_name;
273 const char *desc;
274 } mvneta_mib_list[] = {
275 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
276 "rx_good_oct", "Good Octets Rx"},
277 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
278 "rx_bad_oct", "Bad Octets Rx"},
279 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
280 "tx_mac_err", "MAC Transmit Error"},
281 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
282 "rx_good_frame", "Good Frames Rx"},
283 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
284 "rx_bad_frame", "Bad Frames Rx"},
285 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
286 "rx_bcast_frame", "Broadcast Frames Rx"},
287 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
288 "rx_mcast_frame", "Multicast Frames Rx"},
289 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
290 "rx_frame_1_64", "Frame Size 1 - 64"},
291 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
292 "rx_frame_65_127", "Frame Size 65 - 127"},
293 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
294 "rx_frame_128_255", "Frame Size 128 - 255"},
295 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
296 "rx_frame_256_511", "Frame Size 256 - 511"},
297 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
298 "rx_frame_512_1023", "Frame Size 512 - 1023"},
299 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
300 "rx_fame_1024_max", "Frame Size 1024 - Max"},
301 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
302 "tx_good_oct", "Good Octets Tx"},
303 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
304 "tx_good_frame", "Good Frames Tx"},
305 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
306 "tx_exces_collision", "Excessive Collision"},
307 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
308 "tx_mcast_frame", "Multicast Frames Tx"},
309 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
310 "tx_bcast_frame", "Broadcast Frames Tx"},
311 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
312 "tx_mac_ctl_err", "Unknown MAC Control"},
313 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
314 "fc_tx", "Flow Control Tx"},
315 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
316 "fc_rx_good", "Good Flow Control Rx"},
317 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
318 "fc_rx_bad", "Bad Flow Control Rx"},
319 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
320 "pkt_undersize", "Undersized Packets Rx"},
321 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
322 "pkt_fragment", "Fragmented Packets Rx"},
323 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
324 "pkt_oversize", "Oversized Packets Rx"},
325 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
326 "pkt_jabber", "Jabber Packets Rx"},
327 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
328 "mac_rx_err", "MAC Rx Errors"},
329 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
330 "mac_crc_err", "MAC CRC Errors"},
331 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
332 "mac_collision", "MAC Collision"},
333 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
334 "mac_late_collision", "MAC Late Collision"},
335 };
336
337 static struct resource_spec res_spec[] = {
338 { SYS_RES_MEMORY, 0, RF_ACTIVE },
339 { SYS_RES_IRQ, 0, RF_ACTIVE },
340 { -1, 0}
341 };
342
343 static struct {
344 driver_intr_t *handler;
345 char * description;
346 } mvneta_intrs[] = {
347 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
348 };
349
350 STATIC uint32_t
mvneta_get_clk()351 mvneta_get_clk()
352 {
353 #if defined(__aarch64__)
354 return (A3700_TCLK_250MHZ);
355 #else
356 return (get_tclk());
357 #endif
358 }
359
360 static int
mvneta_set_mac_address(struct mvneta_softc * sc,uint8_t * addr)361 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
362 {
363 unsigned int mac_h;
364 unsigned int mac_l;
365
366 mac_l = (addr[4] << 8) | (addr[5]);
367 mac_h = (addr[0] << 24) | (addr[1] << 16) |
368 (addr[2] << 8) | (addr[3] << 0);
369
370 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
371 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
372 return (0);
373 }
374
375 static int
mvneta_get_mac_address(struct mvneta_softc * sc,uint8_t * addr)376 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
377 {
378 uint32_t mac_l, mac_h;
379
380 #ifdef FDT
381 if (mvneta_fdt_mac_address(sc, addr) == 0)
382 return (0);
383 #endif
384 /*
385 * Fall back -- use the currently programmed address.
386 */
387 mac_l = MVNETA_READ(sc, MVNETA_MACAL);
388 mac_h = MVNETA_READ(sc, MVNETA_MACAH);
389 if (mac_l == 0 && mac_h == 0) {
390 /*
391 * Generate pseudo-random MAC.
392 * Set lower part to random number | unit number.
393 */
394 mac_l = arc4random() & ~0xff;
395 mac_l |= device_get_unit(sc->dev) & 0xff;
396 mac_h = arc4random();
397 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
398 if (bootverbose) {
399 device_printf(sc->dev,
400 "Could not acquire MAC address. "
401 "Using randomized one.\n");
402 }
403 }
404
405 addr[0] = (mac_h & 0xff000000) >> 24;
406 addr[1] = (mac_h & 0x00ff0000) >> 16;
407 addr[2] = (mac_h & 0x0000ff00) >> 8;
408 addr[3] = (mac_h & 0x000000ff);
409 addr[4] = (mac_l & 0x0000ff00) >> 8;
410 addr[5] = (mac_l & 0x000000ff);
411 return (0);
412 }
413
414 STATIC boolean_t
mvneta_has_switch(device_t self)415 mvneta_has_switch(device_t self)
416 {
417 phandle_t node, switch_node, switch_eth, switch_eth_handle;
418
419 node = ofw_bus_get_node(self);
420 switch_node =
421 ofw_bus_find_compatible(OF_finddevice("/"), "marvell,dsa");
422 switch_eth = 0;
423
424 OF_getencprop(switch_node, "dsa,ethernet",
425 (void*)&switch_eth_handle, sizeof(switch_eth_handle));
426
427 if (switch_eth_handle > 0)
428 switch_eth = OF_node_from_xref(switch_eth_handle);
429
430 /* Return true if dsa,ethernet cell points to us */
431 return (node == switch_eth);
432 }
433
434 STATIC int
mvneta_dma_create(struct mvneta_softc * sc)435 mvneta_dma_create(struct mvneta_softc *sc)
436 {
437 size_t maxsize, maxsegsz;
438 size_t q;
439 int error;
440
441 /*
442 * Create Tx DMA
443 */
444 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
445
446 error = bus_dma_tag_create(
447 bus_get_dma_tag(sc->dev), /* parent */
448 16, 0, /* alignment, boundary */
449 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
450 BUS_SPACE_MAXADDR, /* highaddr */
451 NULL, NULL, /* filtfunc, filtfuncarg */
452 maxsize, /* maxsize */
453 1, /* nsegments */
454 maxsegsz, /* maxsegsz */
455 0, /* flags */
456 NULL, NULL, /* lockfunc, lockfuncarg */
457 &sc->tx_dtag); /* dmat */
458 if (error != 0) {
459 device_printf(sc->dev,
460 "Failed to create DMA tag for Tx descriptors.\n");
461 goto fail;
462 }
463 error = bus_dma_tag_create(
464 bus_get_dma_tag(sc->dev), /* parent */
465 1, 0, /* alignment, boundary */
466 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
467 BUS_SPACE_MAXADDR, /* highaddr */
468 NULL, NULL, /* filtfunc, filtfuncarg */
469 MVNETA_PACKET_SIZE, /* maxsize */
470 MVNETA_TX_SEGLIMIT, /* nsegments */
471 MVNETA_PACKET_SIZE, /* maxsegsz */
472 BUS_DMA_ALLOCNOW, /* flags */
473 NULL, NULL, /* lockfunc, lockfuncarg */
474 &sc->txmbuf_dtag);
475 if (error != 0) {
476 device_printf(sc->dev,
477 "Failed to create DMA tag for Tx mbufs.\n");
478 goto fail;
479 }
480
481 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
482 error = mvneta_ring_alloc_tx_queue(sc, q);
483 if (error != 0) {
484 device_printf(sc->dev,
485 "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
486 goto fail;
487 }
488 }
489
490 /*
491 * Create Rx DMA.
492 */
493 /* Create tag for Rx descripors */
494 error = bus_dma_tag_create(
495 bus_get_dma_tag(sc->dev), /* parent */
496 32, 0, /* alignment, boundary */
497 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
498 BUS_SPACE_MAXADDR, /* highaddr */
499 NULL, NULL, /* filtfunc, filtfuncarg */
500 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
501 1, /* nsegments */
502 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
503 0, /* flags */
504 NULL, NULL, /* lockfunc, lockfuncarg */
505 &sc->rx_dtag); /* dmat */
506 if (error != 0) {
507 device_printf(sc->dev,
508 "Failed to create DMA tag for Rx descriptors.\n");
509 goto fail;
510 }
511
512 /* Create tag for Rx buffers */
513 error = bus_dma_tag_create(
514 bus_get_dma_tag(sc->dev), /* parent */
515 32, 0, /* alignment, boundary */
516 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
517 BUS_SPACE_MAXADDR, /* highaddr */
518 NULL, NULL, /* filtfunc, filtfuncarg */
519 MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */
520 MVNETA_PACKET_SIZE, /* maxsegsz */
521 0, /* flags */
522 NULL, NULL, /* lockfunc, lockfuncarg */
523 &sc->rxbuf_dtag); /* dmat */
524 if (error != 0) {
525 device_printf(sc->dev,
526 "Failed to create DMA tag for Rx buffers.\n");
527 goto fail;
528 }
529
530 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
531 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
532 device_printf(sc->dev,
533 "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
534 goto fail;
535 }
536 }
537
538 return (0);
539 fail:
540 mvneta_detach(sc->dev);
541
542 return (error);
543 }
544
545 /* ARGSUSED */
546 int
mvneta_attach(device_t self)547 mvneta_attach(device_t self)
548 {
549 struct mvneta_softc *sc;
550 struct ifnet *ifp;
551 device_t child;
552 int ifm_target;
553 int q, error;
554 #if !defined(__aarch64__)
555 uint32_t reg;
556 #endif
557
558 sc = device_get_softc(self);
559 sc->dev = self;
560
561 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
562
563 error = bus_alloc_resources(self, res_spec, sc->res);
564 if (error) {
565 device_printf(self, "could not allocate resources\n");
566 return (ENXIO);
567 }
568
569 sc->version = MVNETA_READ(sc, MVNETA_PV);
570 device_printf(self, "version is %x\n", sc->version);
571 callout_init(&sc->tick_ch, 0);
572
573 /*
574 * make sure DMA engines are in reset state
575 */
576 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
577 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
578
579 #if !defined(__aarch64__)
580 /*
581 * Disable port snoop for buffers and descriptors
582 * to avoid L2 caching of both without DRAM copy.
583 * Obtain coherency settings from the first MBUS
584 * window attribute.
585 */
586 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
587 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
588 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
589 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
590 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
591 }
592 #endif
593
594 /*
595 * MAC address
596 */
597 if (mvneta_get_mac_address(sc, sc->enaddr)) {
598 device_printf(self, "no mac address.\n");
599 return (ENXIO);
600 }
601 mvneta_set_mac_address(sc, sc->enaddr);
602
603 mvneta_disable_intr(sc);
604
605 /* Allocate network interface */
606 ifp = sc->ifp = if_alloc(IFT_ETHER);
607 if (ifp == NULL) {
608 device_printf(self, "if_alloc() failed\n");
609 mvneta_detach(self);
610 return (ENOMEM);
611 }
612 if_initname(ifp, device_get_name(self), device_get_unit(self));
613
614 /*
615 * We can support 802.1Q VLAN-sized frames and jumbo
616 * Ethernet frames.
617 */
618 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU;
619
620 ifp->if_softc = sc;
621 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
622 #ifdef MVNETA_MULTIQUEUE
623 ifp->if_transmit = mvneta_transmit;
624 ifp->if_qflush = mvneta_qflush;
625 #else /* !MVNETA_MULTIQUEUE */
626 ifp->if_start = mvneta_start;
627 ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1;
628 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
629 IFQ_SET_READY(&ifp->if_snd);
630 #endif
631 ifp->if_init = mvneta_init;
632 ifp->if_ioctl = mvneta_ioctl;
633
634 /*
635 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
636 */
637 ifp->if_capabilities |= IFCAP_HWCSUM;
638
639 /*
640 * As VLAN hardware tagging is not supported
641 * but is necessary to perform VLAN hardware checksums,
642 * it is done in the driver
643 */
644 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
645
646 /*
647 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
648 */
649 ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6;
650 ifp->if_capenable = ifp->if_capabilities;
651
652 /*
653 * Disabled option(s):
654 * - Support for Large Receive Offload
655 */
656 ifp->if_capabilities |= IFCAP_LRO;
657
658 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
659
660 /*
661 * Device DMA Buffer allocation.
662 * Handles resource deallocation in case of failure.
663 */
664 error = mvneta_dma_create(sc);
665 if (error != 0) {
666 mvneta_detach(self);
667 return (error);
668 }
669
670 /* Initialize queues */
671 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
672 error = mvneta_ring_init_tx_queue(sc, q);
673 if (error != 0) {
674 mvneta_detach(self);
675 return (error);
676 }
677 }
678
679 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
680 error = mvneta_ring_init_rx_queue(sc, q);
681 if (error != 0) {
682 mvneta_detach(self);
683 return (error);
684 }
685 }
686
687 ether_ifattach(ifp, sc->enaddr);
688
689 /*
690 * Enable DMA engines and Initialize Device Registers.
691 */
692 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
693 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
694 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
695 mvneta_sc_lock(sc);
696 mvneta_filter_setup(sc);
697 mvneta_sc_unlock(sc);
698 mvneta_initreg(ifp);
699
700 /*
701 * Now MAC is working, setup MII.
702 */
703 if (mii_init == 0) {
704 /*
705 * MII bus is shared by all MACs and all PHYs in SoC.
706 * serializing the bus access should be safe.
707 */
708 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
709 mii_init = 1;
710 }
711
712 /* Attach PHY(s) */
713 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
714 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
715 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
716 MII_OFFSET_ANY, 0);
717 if (error != 0) {
718 if (bootverbose) {
719 device_printf(self,
720 "MII attach failed, error: %d\n", error);
721 }
722 ether_ifdetach(sc->ifp);
723 mvneta_detach(self);
724 return (error);
725 }
726 sc->mii = device_get_softc(sc->miibus);
727 sc->phy_attached = 1;
728
729 /* Disable auto-negotiation in MAC - rely on PHY layer */
730 mvneta_update_autoneg(sc, FALSE);
731 } else if (sc->use_inband_status == TRUE) {
732 /* In-band link status */
733 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
734 mvneta_mediastatus);
735
736 /* Configure media */
737 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
738 0, NULL);
739 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
740 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
741 0, NULL);
742 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
743 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
744 0, NULL);
745 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
746 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
747
748 /* Enable auto-negotiation */
749 mvneta_update_autoneg(sc, TRUE);
750
751 mvneta_sc_lock(sc);
752 if (MVNETA_IS_LINKUP(sc))
753 mvneta_linkup(sc);
754 else
755 mvneta_linkdown(sc);
756 mvneta_sc_unlock(sc);
757
758 } else {
759 /* Fixed-link, use predefined values */
760 mvneta_update_autoneg(sc, FALSE);
761 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
762 mvneta_mediastatus);
763
764 ifm_target = IFM_ETHER;
765 switch (sc->phy_speed) {
766 case 2500:
767 if (sc->phy_mode != MVNETA_PHY_SGMII &&
768 sc->phy_mode != MVNETA_PHY_QSGMII) {
769 device_printf(self,
770 "2.5G speed can work only in (Q)SGMII mode\n");
771 ether_ifdetach(sc->ifp);
772 mvneta_detach(self);
773 return (ENXIO);
774 }
775 ifm_target |= IFM_2500_T;
776 break;
777 case 1000:
778 ifm_target |= IFM_1000_T;
779 break;
780 case 100:
781 ifm_target |= IFM_100_TX;
782 break;
783 case 10:
784 ifm_target |= IFM_10_T;
785 break;
786 default:
787 ether_ifdetach(sc->ifp);
788 mvneta_detach(self);
789 return (ENXIO);
790 }
791
792 if (sc->phy_fdx)
793 ifm_target |= IFM_FDX;
794 else
795 ifm_target |= IFM_HDX;
796
797 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
798 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
799 if_link_state_change(sc->ifp, LINK_STATE_UP);
800
801 if (mvneta_has_switch(self)) {
802 child = device_add_child(sc->dev, "mdio", -1);
803 if (child == NULL) {
804 ether_ifdetach(sc->ifp);
805 mvneta_detach(self);
806 return (ENXIO);
807 }
808 bus_generic_attach(sc->dev);
809 bus_generic_attach(child);
810 }
811
812 /* Configure MAC media */
813 mvneta_update_media(sc, ifm_target);
814 }
815
816 sysctl_mvneta_init(sc);
817
818 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
819
820 error = bus_setup_intr(self, sc->res[1],
821 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
822 &sc->ih_cookie[0]);
823 if (error) {
824 device_printf(self, "could not setup %s\n",
825 mvneta_intrs[0].description);
826 ether_ifdetach(sc->ifp);
827 mvneta_detach(self);
828 return (error);
829 }
830
831 return (0);
832 }
833
834 STATIC int
mvneta_detach(device_t dev)835 mvneta_detach(device_t dev)
836 {
837 struct mvneta_softc *sc;
838 int q;
839
840 sc = device_get_softc(dev);
841
842 mvneta_stop(sc);
843 /* Detach network interface */
844 if (sc->ifp)
845 if_free(sc->ifp);
846
847 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
848 mvneta_ring_dealloc_rx_queue(sc, q);
849 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
850 mvneta_ring_dealloc_tx_queue(sc, q);
851
852 if (sc->tx_dtag != NULL)
853 bus_dma_tag_destroy(sc->tx_dtag);
854 if (sc->rx_dtag != NULL)
855 bus_dma_tag_destroy(sc->rx_dtag);
856 if (sc->txmbuf_dtag != NULL)
857 bus_dma_tag_destroy(sc->txmbuf_dtag);
858
859 bus_release_resources(dev, res_spec, sc->res);
860 return (0);
861 }
862
863 /*
864 * MII
865 */
866 STATIC int
mvneta_miibus_readreg(device_t dev,int phy,int reg)867 mvneta_miibus_readreg(device_t dev, int phy, int reg)
868 {
869 struct mvneta_softc *sc;
870 struct ifnet *ifp;
871 uint32_t smi, val;
872 int i;
873
874 sc = device_get_softc(dev);
875 ifp = sc->ifp;
876
877 mtx_lock(&mii_mutex);
878
879 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
880 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
881 break;
882 DELAY(1);
883 }
884 if (i == MVNETA_PHY_TIMEOUT) {
885 if_printf(ifp, "SMI busy timeout\n");
886 mtx_unlock(&mii_mutex);
887 return (-1);
888 }
889
890 smi = MVNETA_SMI_PHYAD(phy) |
891 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
892 MVNETA_WRITE(sc, MVNETA_SMI, smi);
893
894 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
895 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
896 break;
897 DELAY(1);
898 }
899
900 if (i == MVNETA_PHY_TIMEOUT) {
901 if_printf(ifp, "SMI busy timeout\n");
902 mtx_unlock(&mii_mutex);
903 return (-1);
904 }
905 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
906 smi = MVNETA_READ(sc, MVNETA_SMI);
907 if (smi & MVNETA_SMI_READVALID)
908 break;
909 DELAY(1);
910 }
911
912 if (i == MVNETA_PHY_TIMEOUT) {
913 if_printf(ifp, "SMI busy timeout\n");
914 mtx_unlock(&mii_mutex);
915 return (-1);
916 }
917
918 mtx_unlock(&mii_mutex);
919
920 #ifdef MVNETA_KTR
921 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i,
922 MVNETA_PHY_TIMEOUT);
923 #endif
924
925 val = smi & MVNETA_SMI_DATA_MASK;
926
927 #ifdef MVNETA_KTR
928 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy,
929 reg, val);
930 #endif
931 return (val);
932 }
933
934 STATIC int
mvneta_miibus_writereg(device_t dev,int phy,int reg,int val)935 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
936 {
937 struct mvneta_softc *sc;
938 struct ifnet *ifp;
939 uint32_t smi;
940 int i;
941
942 sc = device_get_softc(dev);
943 ifp = sc->ifp;
944 #ifdef MVNETA_KTR
945 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname,
946 phy, reg, val);
947 #endif
948
949 mtx_lock(&mii_mutex);
950
951 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
952 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
953 break;
954 DELAY(1);
955 }
956 if (i == MVNETA_PHY_TIMEOUT) {
957 if_printf(ifp, "SMI busy timeout\n");
958 mtx_unlock(&mii_mutex);
959 return (0);
960 }
961
962 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
963 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
964 MVNETA_WRITE(sc, MVNETA_SMI, smi);
965
966 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
967 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
968 break;
969 DELAY(1);
970 }
971
972 mtx_unlock(&mii_mutex);
973
974 if (i == MVNETA_PHY_TIMEOUT)
975 if_printf(ifp, "phy write timed out\n");
976
977 return (0);
978 }
979
980 STATIC void
mvneta_portup(struct mvneta_softc * sc)981 mvneta_portup(struct mvneta_softc *sc)
982 {
983 int q;
984
985 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
986 mvneta_rx_lockq(sc, q);
987 mvneta_rx_queue_enable(sc->ifp, q);
988 mvneta_rx_unlockq(sc, q);
989 }
990
991 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
992 mvneta_tx_lockq(sc, q);
993 mvneta_tx_queue_enable(sc->ifp, q);
994 mvneta_tx_unlockq(sc, q);
995 }
996
997 }
998
999 STATIC void
mvneta_portdown(struct mvneta_softc * sc)1000 mvneta_portdown(struct mvneta_softc *sc)
1001 {
1002 struct mvneta_rx_ring *rx;
1003 struct mvneta_tx_ring *tx;
1004 int q, cnt;
1005 uint32_t reg;
1006
1007 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1008 rx = MVNETA_RX_RING(sc, q);
1009 mvneta_rx_lockq(sc, q);
1010 rx->queue_status = MVNETA_QUEUE_DISABLED;
1011 mvneta_rx_unlockq(sc, q);
1012 }
1013
1014 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1015 tx = MVNETA_TX_RING(sc, q);
1016 mvneta_tx_lockq(sc, q);
1017 tx->queue_status = MVNETA_QUEUE_DISABLED;
1018 mvneta_tx_unlockq(sc, q);
1019 }
1020
1021 /* Wait for all Rx activity to terminate. */
1022 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1023 reg = MVNETA_RQC_DIS(reg);
1024 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1025 cnt = 0;
1026 do {
1027 if (cnt >= RX_DISABLE_TIMEOUT) {
1028 if_printf(sc->ifp,
1029 "timeout for RX stopped. rqc 0x%x\n", reg);
1030 break;
1031 }
1032 cnt++;
1033 reg = MVNETA_READ(sc, MVNETA_RQC);
1034 } while ((reg & MVNETA_RQC_EN_MASK) != 0);
1035
1036 /* Wait for all Tx activity to terminate. */
1037 reg = MVNETA_READ(sc, MVNETA_PIE);
1038 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1039 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1040
1041 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1042 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1043 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1044
1045 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1046 reg = MVNETA_TQC_DIS(reg);
1047 MVNETA_WRITE(sc, MVNETA_TQC, reg);
1048 cnt = 0;
1049 do {
1050 if (cnt >= TX_DISABLE_TIMEOUT) {
1051 if_printf(sc->ifp,
1052 "timeout for TX stopped. tqc 0x%x\n", reg);
1053 break;
1054 }
1055 cnt++;
1056 reg = MVNETA_READ(sc, MVNETA_TQC);
1057 } while ((reg & MVNETA_TQC_EN_MASK) != 0);
1058
1059 /* Wait for all Tx FIFO is empty */
1060 cnt = 0;
1061 do {
1062 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1063 if_printf(sc->ifp,
1064 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1065 break;
1066 }
1067 cnt++;
1068 reg = MVNETA_READ(sc, MVNETA_PS0);
1069 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1070 ((reg & MVNETA_PS0_TXINPROG) != 0));
1071 }
1072
1073 /*
1074 * Device Register Initialization
1075 * reset device registers to device driver default value.
1076 * the device is not enabled here.
1077 */
1078 STATIC int
mvneta_initreg(struct ifnet * ifp)1079 mvneta_initreg(struct ifnet *ifp)
1080 {
1081 struct mvneta_softc *sc;
1082 int q, i;
1083 uint32_t reg;
1084
1085 sc = ifp->if_softc;
1086 #ifdef MVNETA_KTR
1087 CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname);
1088 #endif
1089
1090 /* Disable Legacy WRR, Disable EJP, Release from reset. */
1091 MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1092 /* Enable mbus retry. */
1093 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1094
1095 /* Init TX/RX Queue Registers */
1096 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1097 mvneta_rx_lockq(sc, q);
1098 if (mvneta_rx_queue_init(ifp, q) != 0) {
1099 device_printf(sc->dev,
1100 "initialization failed: cannot initialize queue\n");
1101 mvneta_rx_unlockq(sc, q);
1102 return (ENOBUFS);
1103 }
1104 mvneta_rx_unlockq(sc, q);
1105 }
1106 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1107 mvneta_tx_lockq(sc, q);
1108 if (mvneta_tx_queue_init(ifp, q) != 0) {
1109 device_printf(sc->dev,
1110 "initialization failed: cannot initialize queue\n");
1111 mvneta_tx_unlockq(sc, q);
1112 return (ENOBUFS);
1113 }
1114 mvneta_tx_unlockq(sc, q);
1115 }
1116
1117 /*
1118 * Ethernet Unit Control - disable automatic PHY management by HW.
1119 * In case the port uses SMI-controlled PHY, poll its status with
1120 * mii_tick() and update MAC settings accordingly.
1121 */
1122 reg = MVNETA_READ(sc, MVNETA_EUC);
1123 reg &= ~MVNETA_EUC_POLLING;
1124 MVNETA_WRITE(sc, MVNETA_EUC, reg);
1125
1126 /* EEE: Low Power Idle */
1127 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1128 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1129 MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1130
1131 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1132 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1133
1134 reg = MVNETA_LPIC2_MUSTSET;
1135 MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1136
1137 /* Port MAC Control set 0 */
1138 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
1139 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
1140 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME);
1141 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1142
1143 /* Port MAC Control set 2 */
1144 reg = MVNETA_READ(sc, MVNETA_PMACC2);
1145 switch (sc->phy_mode) {
1146 case MVNETA_PHY_QSGMII:
1147 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1148 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1149 break;
1150 case MVNETA_PHY_SGMII:
1151 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1152 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1153 break;
1154 case MVNETA_PHY_RGMII:
1155 case MVNETA_PHY_RGMII_ID:
1156 reg |= MVNETA_PMACC2_RGMIIEN;
1157 break;
1158 }
1159 reg |= MVNETA_PMACC2_MUSTSET;
1160 reg &= ~MVNETA_PMACC2_PORTMACRESET;
1161 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1162
1163 /* Port Configuration Extended: enable Tx CRC generation */
1164 reg = MVNETA_READ(sc, MVNETA_PXCX);
1165 reg &= ~MVNETA_PXCX_TXCRCDIS;
1166 MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1167
1168 /* clear MIB counter registers(clear by read) */
1169 for (i = 0; i < nitems(mvneta_mib_list); i++) {
1170 if (mvneta_mib_list[i].reg64)
1171 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
1172 else
1173 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
1174 }
1175 MVNETA_READ(sc, MVNETA_PDFC);
1176 MVNETA_READ(sc, MVNETA_POFC);
1177
1178 /* Set SDC register except IPGINT bits */
1179 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1180 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1181 reg |= MVNETA_SDC_BLMR;
1182 reg |= MVNETA_SDC_BLMT;
1183 MVNETA_WRITE(sc, MVNETA_SDC, reg);
1184
1185 return (0);
1186 }
1187
1188 STATIC void
mvneta_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)1189 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1190 {
1191
1192 if (error != 0)
1193 return;
1194 *(bus_addr_t *)arg = segs->ds_addr;
1195 }
1196
1197 STATIC int
mvneta_ring_alloc_rx_queue(struct mvneta_softc * sc,int q)1198 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1199 {
1200 struct mvneta_rx_ring *rx;
1201 struct mvneta_buf *rxbuf;
1202 bus_dmamap_t dmap;
1203 int i, error;
1204
1205 if (q >= MVNETA_RX_QNUM_MAX)
1206 return (EINVAL);
1207
1208 rx = MVNETA_RX_RING(sc, q);
1209 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1210 /* Allocate DMA memory for Rx descriptors */
1211 error = bus_dmamem_alloc(sc->rx_dtag,
1212 (void**)&(rx->desc),
1213 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1214 &rx->desc_map);
1215 if (error != 0 || rx->desc == NULL)
1216 goto fail;
1217 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1218 rx->desc,
1219 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1220 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1221 if (error != 0)
1222 goto fail;
1223
1224 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1225 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1226 if (error != 0) {
1227 device_printf(sc->dev,
1228 "Failed to create DMA map for Rx buffer num: %d\n", i);
1229 goto fail;
1230 }
1231 rxbuf = &rx->rxbuf[i];
1232 rxbuf->dmap = dmap;
1233 rxbuf->m = NULL;
1234 }
1235
1236 return (0);
1237 fail:
1238 mvneta_ring_dealloc_rx_queue(sc, q);
1239 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1240 return (error);
1241 }
1242
1243 STATIC int
mvneta_ring_alloc_tx_queue(struct mvneta_softc * sc,int q)1244 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1245 {
1246 struct mvneta_tx_ring *tx;
1247 int error;
1248
1249 if (q >= MVNETA_TX_QNUM_MAX)
1250 return (EINVAL);
1251 tx = MVNETA_TX_RING(sc, q);
1252 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1253 error = bus_dmamem_alloc(sc->tx_dtag,
1254 (void**)&(tx->desc),
1255 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1256 &tx->desc_map);
1257 if (error != 0 || tx->desc == NULL)
1258 goto fail;
1259 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1260 tx->desc,
1261 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1262 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1263 if (error != 0)
1264 goto fail;
1265
1266 #ifdef MVNETA_MULTIQUEUE
1267 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1268 &tx->ring_mtx);
1269 if (tx->br == NULL) {
1270 device_printf(sc->dev,
1271 "Could not setup buffer ring for TxQ(%d)\n", q);
1272 error = ENOMEM;
1273 goto fail;
1274 }
1275 #endif
1276
1277 return (0);
1278 fail:
1279 mvneta_ring_dealloc_tx_queue(sc, q);
1280 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1281 return (error);
1282 }
1283
1284 STATIC void
mvneta_ring_dealloc_tx_queue(struct mvneta_softc * sc,int q)1285 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1286 {
1287 struct mvneta_tx_ring *tx;
1288 struct mvneta_buf *txbuf;
1289 void *kva;
1290 int error;
1291 int i;
1292
1293 if (q >= MVNETA_TX_QNUM_MAX)
1294 return;
1295 tx = MVNETA_TX_RING(sc, q);
1296
1297 if (tx->taskq != NULL) {
1298 /* Remove task */
1299 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1300 taskqueue_drain(tx->taskq, &tx->task);
1301 }
1302 #ifdef MVNETA_MULTIQUEUE
1303 if (tx->br != NULL)
1304 drbr_free(tx->br, M_DEVBUF);
1305 #endif
1306
1307 if (sc->txmbuf_dtag != NULL) {
1308 if (mtx_name(&tx->ring_mtx) != NULL) {
1309 /*
1310 * It is assumed that maps are being loaded after mutex
1311 * is initialized. Therefore we can skip unloading maps
1312 * when mutex is empty.
1313 */
1314 mvneta_tx_lockq(sc, q);
1315 mvneta_ring_flush_tx_queue(sc, q);
1316 mvneta_tx_unlockq(sc, q);
1317 }
1318 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1319 txbuf = &tx->txbuf[i];
1320 if (txbuf->dmap != NULL) {
1321 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1322 txbuf->dmap);
1323 if (error != 0) {
1324 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1325 __func__, q, i);
1326 }
1327 }
1328 }
1329 }
1330
1331 if (tx->desc_pa != 0)
1332 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1333
1334 kva = (void *)tx->desc;
1335 if (kva != NULL)
1336 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1337
1338 if (mtx_name(&tx->ring_mtx) != NULL)
1339 mtx_destroy(&tx->ring_mtx);
1340
1341 memset(tx, 0, sizeof(*tx));
1342 }
1343
1344 STATIC void
mvneta_ring_dealloc_rx_queue(struct mvneta_softc * sc,int q)1345 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1346 {
1347 struct mvneta_rx_ring *rx;
1348 struct lro_ctrl *lro;
1349 void *kva;
1350
1351 if (q >= MVNETA_RX_QNUM_MAX)
1352 return;
1353
1354 rx = MVNETA_RX_RING(sc, q);
1355
1356 mvneta_ring_flush_rx_queue(sc, q);
1357
1358 if (rx->desc_pa != 0)
1359 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1360
1361 kva = (void *)rx->desc;
1362 if (kva != NULL)
1363 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1364
1365 lro = &rx->lro;
1366 tcp_lro_free(lro);
1367
1368 if (mtx_name(&rx->ring_mtx) != NULL)
1369 mtx_destroy(&rx->ring_mtx);
1370
1371 memset(rx, 0, sizeof(*rx));
1372 }
1373
1374 STATIC int
mvneta_ring_init_rx_queue(struct mvneta_softc * sc,int q)1375 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1376 {
1377 struct mvneta_rx_ring *rx;
1378 struct lro_ctrl *lro;
1379 int error;
1380
1381 if (q >= MVNETA_RX_QNUM_MAX)
1382 return (0);
1383
1384 rx = MVNETA_RX_RING(sc, q);
1385 rx->dma = rx->cpu = 0;
1386 rx->queue_th_received = MVNETA_RXTH_COUNT;
1387 rx->queue_th_time = (mvneta_get_clk() / 1000) / 10; /* 0.1 [ms] */
1388
1389 /* Initialize LRO */
1390 rx->lro_enabled = FALSE;
1391 if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) {
1392 lro = &rx->lro;
1393 error = tcp_lro_init(lro);
1394 if (error != 0)
1395 device_printf(sc->dev, "LRO Initialization failed!\n");
1396 else {
1397 rx->lro_enabled = TRUE;
1398 lro->ifp = sc->ifp;
1399 }
1400 }
1401
1402 return (0);
1403 }
1404
1405 STATIC int
mvneta_ring_init_tx_queue(struct mvneta_softc * sc,int q)1406 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1407 {
1408 struct mvneta_tx_ring *tx;
1409 struct mvneta_buf *txbuf;
1410 int i, error;
1411
1412 if (q >= MVNETA_TX_QNUM_MAX)
1413 return (0);
1414
1415 tx = MVNETA_TX_RING(sc, q);
1416
1417 /* Tx handle */
1418 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1419 txbuf = &tx->txbuf[i];
1420 txbuf->m = NULL;
1421 /* Tx handle needs DMA map for busdma_load_mbuf() */
1422 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1423 &txbuf->dmap);
1424 if (error != 0) {
1425 device_printf(sc->dev,
1426 "can't create dma map (tx ring %d)\n", i);
1427 return (error);
1428 }
1429 }
1430 tx->dma = tx->cpu = 0;
1431 tx->used = 0;
1432 tx->drv_error = 0;
1433 tx->queue_status = MVNETA_QUEUE_DISABLED;
1434 tx->queue_hung = FALSE;
1435
1436 tx->ifp = sc->ifp;
1437 tx->qidx = q;
1438 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1439 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1440 taskqueue_thread_enqueue, &tx->taskq);
1441 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1442 device_get_nameunit(sc->dev), q);
1443
1444 return (0);
1445 }
1446
1447 STATIC void
mvneta_ring_flush_tx_queue(struct mvneta_softc * sc,int q)1448 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1449 {
1450 struct mvneta_tx_ring *tx;
1451 struct mvneta_buf *txbuf;
1452 int i;
1453
1454 tx = MVNETA_TX_RING(sc, q);
1455 KASSERT_TX_MTX(sc, q);
1456
1457 /* Tx handle */
1458 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1459 txbuf = &tx->txbuf[i];
1460 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1461 if (txbuf->m != NULL) {
1462 m_freem(txbuf->m);
1463 txbuf->m = NULL;
1464 }
1465 }
1466 tx->dma = tx->cpu = 0;
1467 tx->used = 0;
1468 }
1469
1470 STATIC void
mvneta_ring_flush_rx_queue(struct mvneta_softc * sc,int q)1471 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1472 {
1473 struct mvneta_rx_ring *rx;
1474 struct mvneta_buf *rxbuf;
1475 int i;
1476
1477 rx = MVNETA_RX_RING(sc, q);
1478 KASSERT_RX_MTX(sc, q);
1479
1480 /* Rx handle */
1481 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1482 rxbuf = &rx->rxbuf[i];
1483 mvneta_rx_buf_free(sc, rxbuf);
1484 }
1485 rx->dma = rx->cpu = 0;
1486 }
1487
1488 /*
1489 * Rx/Tx Queue Control
1490 */
1491 STATIC int
mvneta_rx_queue_init(struct ifnet * ifp,int q)1492 mvneta_rx_queue_init(struct ifnet *ifp, int q)
1493 {
1494 struct mvneta_softc *sc;
1495 struct mvneta_rx_ring *rx;
1496 uint32_t reg;
1497
1498 sc = ifp->if_softc;
1499 KASSERT_RX_MTX(sc, q);
1500 rx = MVNETA_RX_RING(sc, q);
1501 DASSERT(rx->desc_pa != 0);
1502
1503 /* descriptor address */
1504 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1505
1506 /* Rx buffer size and descriptor ring size */
1507 reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3);
1508 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1509 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1510 #ifdef MVNETA_KTR
1511 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q,
1512 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1513 #endif
1514 /* Rx packet offset address */
1515 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1516 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1517 #ifdef MVNETA_KTR
1518 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q,
1519 MVNETA_READ(sc, MVNETA_PRXC(q)));
1520 #endif
1521
1522 /* if DMA is not working, register is not updated */
1523 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1524 return (0);
1525 }
1526
1527 STATIC int
mvneta_tx_queue_init(struct ifnet * ifp,int q)1528 mvneta_tx_queue_init(struct ifnet *ifp, int q)
1529 {
1530 struct mvneta_softc *sc;
1531 struct mvneta_tx_ring *tx;
1532 uint32_t reg;
1533
1534 sc = ifp->if_softc;
1535 KASSERT_TX_MTX(sc, q);
1536 tx = MVNETA_TX_RING(sc, q);
1537 DASSERT(tx->desc_pa != 0);
1538
1539 /* descriptor address */
1540 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1541
1542 /* descriptor ring size */
1543 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1544 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1545
1546 /* if DMA is not working, register is not updated */
1547 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1548 return (0);
1549 }
1550
1551 STATIC int
mvneta_rx_queue_enable(struct ifnet * ifp,int q)1552 mvneta_rx_queue_enable(struct ifnet *ifp, int q)
1553 {
1554 struct mvneta_softc *sc;
1555 struct mvneta_rx_ring *rx;
1556 uint32_t reg;
1557
1558 sc = ifp->if_softc;
1559 rx = MVNETA_RX_RING(sc, q);
1560 KASSERT_RX_MTX(sc, q);
1561
1562 /* Set Rx interrupt threshold */
1563 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1564 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1565
1566 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1567 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1568
1569 /* Unmask RXTX_TH Intr. */
1570 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1571 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1572 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1573
1574 /* Enable Rx queue */
1575 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1576 reg |= MVNETA_RQC_ENQ(q);
1577 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1578
1579 rx->queue_status = MVNETA_QUEUE_WORKING;
1580 return (0);
1581 }
1582
1583 STATIC int
mvneta_tx_queue_enable(struct ifnet * ifp,int q)1584 mvneta_tx_queue_enable(struct ifnet *ifp, int q)
1585 {
1586 struct mvneta_softc *sc;
1587 struct mvneta_tx_ring *tx;
1588
1589 sc = ifp->if_softc;
1590 tx = MVNETA_TX_RING(sc, q);
1591 KASSERT_TX_MTX(sc, q);
1592
1593 /* Enable Tx queue */
1594 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1595
1596 tx->queue_status = MVNETA_QUEUE_IDLE;
1597 tx->queue_hung = FALSE;
1598 return (0);
1599 }
1600
1601 STATIC __inline void
mvneta_rx_lockq(struct mvneta_softc * sc,int q)1602 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1603 {
1604
1605 DASSERT(q >= 0);
1606 DASSERT(q < MVNETA_RX_QNUM_MAX);
1607 mtx_lock(&sc->rx_ring[q].ring_mtx);
1608 }
1609
1610 STATIC __inline void
mvneta_rx_unlockq(struct mvneta_softc * sc,int q)1611 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1612 {
1613
1614 DASSERT(q >= 0);
1615 DASSERT(q < MVNETA_RX_QNUM_MAX);
1616 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1617 }
1618
1619 STATIC __inline int __unused
mvneta_tx_trylockq(struct mvneta_softc * sc,int q)1620 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1621 {
1622
1623 DASSERT(q >= 0);
1624 DASSERT(q < MVNETA_TX_QNUM_MAX);
1625 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1626 }
1627
1628 STATIC __inline void
mvneta_tx_lockq(struct mvneta_softc * sc,int q)1629 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1630 {
1631
1632 DASSERT(q >= 0);
1633 DASSERT(q < MVNETA_TX_QNUM_MAX);
1634 mtx_lock(&sc->tx_ring[q].ring_mtx);
1635 }
1636
1637 STATIC __inline void
mvneta_tx_unlockq(struct mvneta_softc * sc,int q)1638 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1639 {
1640
1641 DASSERT(q >= 0);
1642 DASSERT(q < MVNETA_TX_QNUM_MAX);
1643 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1644 }
1645
1646 /*
1647 * Interrupt Handlers
1648 */
1649 STATIC void
mvneta_disable_intr(struct mvneta_softc * sc)1650 mvneta_disable_intr(struct mvneta_softc *sc)
1651 {
1652
1653 MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1654 MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1655 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1656 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1657 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1658 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1659 MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1660 MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1661 MVNETA_WRITE(sc, MVNETA_PIE, 0);
1662 }
1663
1664 STATIC void
mvneta_enable_intr(struct mvneta_softc * sc)1665 mvneta_enable_intr(struct mvneta_softc *sc)
1666 {
1667 uint32_t reg;
1668
1669 /* Enable Summary Bit to check all interrupt cause. */
1670 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1671 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1672 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1673
1674 if (sc->use_inband_status) {
1675 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1676 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1677 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1678 }
1679
1680 /* Enable All Queue Interrupt */
1681 reg = MVNETA_READ(sc, MVNETA_PIE);
1682 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1683 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1684 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1685 }
1686
1687 STATIC void
mvneta_rxtxth_intr(void * arg)1688 mvneta_rxtxth_intr(void *arg)
1689 {
1690 struct mvneta_softc *sc;
1691 struct ifnet *ifp;
1692 uint32_t ic, queues;
1693
1694 sc = arg;
1695 ifp = sc->ifp;
1696 #ifdef MVNETA_KTR
1697 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname);
1698 #endif
1699 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1700 if (ic == 0)
1701 return;
1702 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1703
1704 /* Ack maintance interrupt first */
1705 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1706 sc->use_inband_status)) {
1707 mvneta_sc_lock(sc);
1708 mvneta_misc_intr(sc);
1709 mvneta_sc_unlock(sc);
1710 }
1711 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
1712 return;
1713 /* RxTxTH interrupt */
1714 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1715 if (__predict_true(queues)) {
1716 #ifdef MVNETA_KTR
1717 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname);
1718 #endif
1719 /* At the moment the driver support only one RX queue. */
1720 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1721 mvneta_rx(sc, 0, 0);
1722 }
1723 }
1724
1725 STATIC int
mvneta_misc_intr(struct mvneta_softc * sc)1726 mvneta_misc_intr(struct mvneta_softc *sc)
1727 {
1728 uint32_t ic;
1729 int claimed = 0;
1730
1731 #ifdef MVNETA_KTR
1732 CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname);
1733 #endif
1734 KASSERT_SC_MTX(sc);
1735
1736 for (;;) {
1737 ic = MVNETA_READ(sc, MVNETA_PMIC);
1738 ic &= MVNETA_READ(sc, MVNETA_PMIM);
1739 if (ic == 0)
1740 break;
1741 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1742 claimed = 1;
1743
1744 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1745 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1746 mvneta_link_isr(sc);
1747 }
1748 return (claimed);
1749 }
1750
1751 STATIC void
mvneta_tick(void * arg)1752 mvneta_tick(void *arg)
1753 {
1754 struct mvneta_softc *sc;
1755 struct mvneta_tx_ring *tx;
1756 struct mvneta_rx_ring *rx;
1757 int q;
1758 uint32_t fc_prev, fc_curr;
1759
1760 sc = arg;
1761
1762 /*
1763 * This is done before mib update to get the right stats
1764 * for this tick.
1765 */
1766 mvneta_tx_drain(sc);
1767
1768 /* Extract previous flow-control frame received counter. */
1769 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1770 /* Read mib registers (clear by read). */
1771 mvneta_update_mib(sc);
1772 /* Extract current flow-control frame received counter. */
1773 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1774
1775
1776 if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) {
1777 mvneta_sc_lock(sc);
1778 mii_tick(sc->mii);
1779
1780 /* Adjust MAC settings */
1781 mvneta_adjust_link(sc);
1782 mvneta_sc_unlock(sc);
1783 }
1784
1785 /*
1786 * We were unable to refill the rx queue and left the rx func, leaving
1787 * the ring without mbuf and no way to call the refill func.
1788 */
1789 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1790 rx = MVNETA_RX_RING(sc, q);
1791 if (rx->needs_refill == TRUE) {
1792 mvneta_rx_lockq(sc, q);
1793 mvneta_rx_queue_refill(sc, q);
1794 mvneta_rx_unlockq(sc, q);
1795 }
1796 }
1797
1798 /*
1799 * Watchdog:
1800 * - check if queue is mark as hung.
1801 * - ignore hung status if we received some pause frame
1802 * as hardware may have paused packet transmit.
1803 */
1804 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1805 /*
1806 * We should take queue lock, but as we only read
1807 * queue status we can do it without lock, we may
1808 * only missdetect queue status for one tick.
1809 */
1810 tx = MVNETA_TX_RING(sc, q);
1811
1812 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1813 goto timeout;
1814 }
1815
1816 callout_schedule(&sc->tick_ch, hz);
1817 return;
1818
1819 timeout:
1820 if_printf(sc->ifp, "watchdog timeout\n");
1821
1822 mvneta_sc_lock(sc);
1823 sc->counter_watchdog++;
1824 sc->counter_watchdog_mib++;
1825 /* Trigger reinitialize sequence. */
1826 mvneta_stop_locked(sc);
1827 mvneta_init_locked(sc);
1828 mvneta_sc_unlock(sc);
1829 }
1830
1831 STATIC void
mvneta_qflush(struct ifnet * ifp)1832 mvneta_qflush(struct ifnet *ifp)
1833 {
1834 #ifdef MVNETA_MULTIQUEUE
1835 struct mvneta_softc *sc;
1836 struct mvneta_tx_ring *tx;
1837 struct mbuf *m;
1838 size_t q;
1839
1840 sc = ifp->if_softc;
1841
1842 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1843 tx = MVNETA_TX_RING(sc, q);
1844 mvneta_tx_lockq(sc, q);
1845 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1846 m_freem(m);
1847 mvneta_tx_unlockq(sc, q);
1848 }
1849 #endif
1850 if_qflush(ifp);
1851 }
1852
1853 STATIC void
mvneta_tx_task(void * arg,int pending)1854 mvneta_tx_task(void *arg, int pending)
1855 {
1856 struct mvneta_softc *sc;
1857 struct mvneta_tx_ring *tx;
1858 struct ifnet *ifp;
1859 int error;
1860
1861 tx = arg;
1862 ifp = tx->ifp;
1863 sc = ifp->if_softc;
1864
1865 mvneta_tx_lockq(sc, tx->qidx);
1866 error = mvneta_xmit_locked(sc, tx->qidx);
1867 mvneta_tx_unlockq(sc, tx->qidx);
1868
1869 /* Try again */
1870 if (__predict_false(error != 0 && error != ENETDOWN)) {
1871 pause("mvneta_tx_task_sleep", 1);
1872 taskqueue_enqueue(tx->taskq, &tx->task);
1873 }
1874 }
1875
1876 STATIC int
mvneta_xmitfast_locked(struct mvneta_softc * sc,int q,struct mbuf ** m)1877 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1878 {
1879 struct mvneta_tx_ring *tx;
1880 struct ifnet *ifp;
1881 int error;
1882
1883 KASSERT_TX_MTX(sc, q);
1884 tx = MVNETA_TX_RING(sc, q);
1885 error = 0;
1886
1887 ifp = sc->ifp;
1888
1889 /* Dont enqueue packet if the queue is disabled. */
1890 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1891 m_freem(*m);
1892 *m = NULL;
1893 return (ENETDOWN);
1894 }
1895
1896 /* Reclaim mbuf if above threshold. */
1897 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1898 mvneta_tx_queue_complete(sc, q);
1899
1900 /* Do not call transmit path if queue is already too full. */
1901 if (__predict_false(tx->used >
1902 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1903 return (ENOBUFS);
1904
1905 error = mvneta_tx_queue(sc, m, q);
1906 if (__predict_false(error != 0))
1907 return (error);
1908
1909 /* Send a copy of the frame to the BPF listener */
1910 ETHER_BPF_MTAP(ifp, *m);
1911
1912 /* Set watchdog on */
1913 tx->watchdog_time = ticks;
1914 tx->queue_status = MVNETA_QUEUE_WORKING;
1915
1916 return (error);
1917 }
1918
1919 #ifdef MVNETA_MULTIQUEUE
1920 STATIC int
mvneta_transmit(struct ifnet * ifp,struct mbuf * m)1921 mvneta_transmit(struct ifnet *ifp, struct mbuf *m)
1922 {
1923 struct mvneta_softc *sc;
1924 struct mvneta_tx_ring *tx;
1925 int error;
1926 int q;
1927
1928 sc = ifp->if_softc;
1929
1930 /* Use default queue if there is no flow id as thread can migrate. */
1931 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1932 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1933 else
1934 q = 0;
1935
1936 tx = MVNETA_TX_RING(sc, q);
1937
1938 /* If buf_ring is full start transmit immediatly. */
1939 if (buf_ring_full(tx->br)) {
1940 mvneta_tx_lockq(sc, q);
1941 mvneta_xmit_locked(sc, q);
1942 mvneta_tx_unlockq(sc, q);
1943 }
1944
1945 /*
1946 * If the buf_ring is empty we will not reorder packets.
1947 * If the lock is available transmit without using buf_ring.
1948 */
1949 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1950 error = mvneta_xmitfast_locked(sc, q, &m);
1951 mvneta_tx_unlockq(sc, q);
1952 if (__predict_true(error == 0))
1953 return (0);
1954
1955 /* Transmit can fail in fastpath. */
1956 if (__predict_false(m == NULL))
1957 return (error);
1958 }
1959
1960 /* Enqueue then schedule taskqueue. */
1961 error = drbr_enqueue(ifp, tx->br, m);
1962 if (__predict_false(error != 0))
1963 return (error);
1964
1965 taskqueue_enqueue(tx->taskq, &tx->task);
1966 return (0);
1967 }
1968
1969 STATIC int
mvneta_xmit_locked(struct mvneta_softc * sc,int q)1970 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1971 {
1972 struct ifnet *ifp;
1973 struct mvneta_tx_ring *tx;
1974 struct mbuf *m;
1975 int error;
1976
1977 KASSERT_TX_MTX(sc, q);
1978 ifp = sc->ifp;
1979 tx = MVNETA_TX_RING(sc, q);
1980 error = 0;
1981
1982 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1983 error = mvneta_xmitfast_locked(sc, q, &m);
1984 if (__predict_false(error != 0)) {
1985 if (m != NULL)
1986 drbr_putback(ifp, tx->br, m);
1987 else
1988 drbr_advance(ifp, tx->br);
1989 break;
1990 }
1991 drbr_advance(ifp, tx->br);
1992 }
1993
1994 return (error);
1995 }
1996 #else /* !MVNETA_MULTIQUEUE */
1997 STATIC void
mvneta_start(struct ifnet * ifp)1998 mvneta_start(struct ifnet *ifp)
1999 {
2000 struct mvneta_softc *sc;
2001 struct mvneta_tx_ring *tx;
2002 int error;
2003
2004 sc = ifp->if_softc;
2005 tx = MVNETA_TX_RING(sc, 0);
2006
2007 mvneta_tx_lockq(sc, 0);
2008 error = mvneta_xmit_locked(sc, 0);
2009 mvneta_tx_unlockq(sc, 0);
2010 /* Handle retransmit in the background taskq. */
2011 if (__predict_false(error != 0 && error != ENETDOWN))
2012 taskqueue_enqueue(tx->taskq, &tx->task);
2013 }
2014
2015 STATIC int
mvneta_xmit_locked(struct mvneta_softc * sc,int q)2016 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2017 {
2018 struct ifnet *ifp;
2019 struct mvneta_tx_ring *tx;
2020 struct mbuf *m;
2021 int error;
2022
2023 KASSERT_TX_MTX(sc, q);
2024 ifp = sc->ifp;
2025 tx = MVNETA_TX_RING(sc, 0);
2026 error = 0;
2027
2028 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2029 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
2030 if (m == NULL)
2031 break;
2032
2033 error = mvneta_xmitfast_locked(sc, q, &m);
2034 if (__predict_false(error != 0)) {
2035 if (m != NULL)
2036 IFQ_DRV_PREPEND(&ifp->if_snd, m);
2037 break;
2038 }
2039 }
2040
2041 return (error);
2042 }
2043 #endif
2044
2045 STATIC int
mvneta_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)2046 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2047 {
2048 struct mvneta_softc *sc;
2049 struct mvneta_rx_ring *rx;
2050 struct ifreq *ifr;
2051 int error, mask;
2052 uint32_t flags;
2053 int q;
2054
2055 error = 0;
2056 sc = ifp->if_softc;
2057 ifr = (struct ifreq *)data;
2058 switch (cmd) {
2059 case SIOCSIFFLAGS:
2060 mvneta_sc_lock(sc);
2061 if (ifp->if_flags & IFF_UP) {
2062 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2063 flags = ifp->if_flags ^ sc->mvneta_if_flags;
2064
2065 if (flags != 0)
2066 sc->mvneta_if_flags = ifp->if_flags;
2067
2068 if ((flags & IFF_PROMISC) != 0)
2069 mvneta_filter_setup(sc);
2070 } else {
2071 mvneta_init_locked(sc);
2072 sc->mvneta_if_flags = ifp->if_flags;
2073 if (sc->phy_attached)
2074 mii_mediachg(sc->mii);
2075 mvneta_sc_unlock(sc);
2076 break;
2077 }
2078 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2079 mvneta_stop_locked(sc);
2080
2081 sc->mvneta_if_flags = ifp->if_flags;
2082 mvneta_sc_unlock(sc);
2083 break;
2084 case SIOCSIFCAP:
2085 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU &&
2086 ifr->ifr_reqcap & IFCAP_TXCSUM)
2087 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2088 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2089 if (mask & IFCAP_HWCSUM) {
2090 ifp->if_capenable &= ~IFCAP_HWCSUM;
2091 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
2092 if (ifp->if_capenable & IFCAP_TXCSUM)
2093 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2094 CSUM_UDP;
2095 else
2096 ifp->if_hwassist = 0;
2097 }
2098 if (mask & IFCAP_LRO) {
2099 mvneta_sc_lock(sc);
2100 ifp->if_capenable ^= IFCAP_LRO;
2101 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2102 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2103 rx = MVNETA_RX_RING(sc, q);
2104 rx->lro_enabled = !rx->lro_enabled;
2105 }
2106 }
2107 mvneta_sc_unlock(sc);
2108 }
2109 VLAN_CAPABILITIES(ifp);
2110 break;
2111 case SIOCSIFMEDIA:
2112 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2113 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2114 (ifr->ifr_media & IFM_FDX) == 0) {
2115 device_printf(sc->dev,
2116 "%s half-duplex unsupported\n",
2117 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2118 "1000Base-T" :
2119 "2500Base-T");
2120 error = EINVAL;
2121 break;
2122 }
2123 case SIOCGIFMEDIA: /* FALLTHROUGH */
2124 case SIOCGIFXMEDIA:
2125 if (!sc->phy_attached)
2126 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2127 cmd);
2128 else
2129 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2130 cmd);
2131 break;
2132 case SIOCSIFMTU:
2133 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2134 MVNETA_ETHER_SIZE) {
2135 error = EINVAL;
2136 } else {
2137 ifp->if_mtu = ifr->ifr_mtu;
2138 mvneta_sc_lock(sc);
2139 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) {
2140 ifp->if_capenable &= ~IFCAP_TXCSUM;
2141 ifp->if_hwassist = 0;
2142 } else {
2143 ifp->if_capenable |= IFCAP_TXCSUM;
2144 ifp->if_hwassist = CSUM_IP | CSUM_TCP |
2145 CSUM_UDP;
2146 }
2147
2148 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2149 /* Trigger reinitialize sequence */
2150 mvneta_stop_locked(sc);
2151 mvneta_init_locked(sc);
2152 }
2153 mvneta_sc_unlock(sc);
2154 }
2155 break;
2156
2157 default:
2158 error = ether_ioctl(ifp, cmd, data);
2159 break;
2160 }
2161
2162 return (error);
2163 }
2164
2165 STATIC void
mvneta_init_locked(void * arg)2166 mvneta_init_locked(void *arg)
2167 {
2168 struct mvneta_softc *sc;
2169 struct ifnet *ifp;
2170 uint32_t reg;
2171 int q, cpu;
2172
2173 sc = arg;
2174 ifp = sc->ifp;
2175
2176 if (!device_is_attached(sc->dev) ||
2177 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2178 return;
2179
2180 mvneta_disable_intr(sc);
2181 callout_stop(&sc->tick_ch);
2182
2183 /* Get the latest mac address */
2184 bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN);
2185 mvneta_set_mac_address(sc, sc->enaddr);
2186 mvneta_filter_setup(sc);
2187
2188 /* Start DMA Engine */
2189 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2190 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2191 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2192
2193 /* Enable port */
2194 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2195 reg |= MVNETA_PMACC0_PORTEN;
2196 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2197
2198 /* Allow access to each TXQ/RXQ from both CPU's */
2199 for (cpu = 0; cpu < mp_ncpus; ++cpu)
2200 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2201 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2202
2203 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2204 mvneta_rx_lockq(sc, q);
2205 mvneta_rx_queue_refill(sc, q);
2206 mvneta_rx_unlockq(sc, q);
2207 }
2208
2209 if (!sc->phy_attached)
2210 mvneta_linkup(sc);
2211
2212 /* Enable interrupt */
2213 mvneta_enable_intr(sc);
2214
2215 /* Set Counter */
2216 callout_schedule(&sc->tick_ch, hz);
2217
2218 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2219 }
2220
2221 STATIC void
mvneta_init(void * arg)2222 mvneta_init(void *arg)
2223 {
2224 struct mvneta_softc *sc;
2225
2226 sc = arg;
2227 mvneta_sc_lock(sc);
2228 mvneta_init_locked(sc);
2229 if (sc->phy_attached)
2230 mii_mediachg(sc->mii);
2231 mvneta_sc_unlock(sc);
2232 }
2233
2234 /* ARGSUSED */
2235 STATIC void
mvneta_stop_locked(struct mvneta_softc * sc)2236 mvneta_stop_locked(struct mvneta_softc *sc)
2237 {
2238 struct ifnet *ifp;
2239 struct mvneta_rx_ring *rx;
2240 struct mvneta_tx_ring *tx;
2241 uint32_t reg;
2242 int q;
2243
2244 ifp = sc->ifp;
2245 if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2246 return;
2247
2248 mvneta_disable_intr(sc);
2249
2250 callout_stop(&sc->tick_ch);
2251
2252 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2253
2254 /* Link down */
2255 if (sc->linkup == TRUE)
2256 mvneta_linkdown(sc);
2257
2258 /* Reset the MAC Port Enable bit */
2259 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2260 reg &= ~MVNETA_PMACC0_PORTEN;
2261 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2262
2263 /* Disable each of queue */
2264 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2265 rx = MVNETA_RX_RING(sc, q);
2266
2267 mvneta_rx_lockq(sc, q);
2268 mvneta_ring_flush_rx_queue(sc, q);
2269 mvneta_rx_unlockq(sc, q);
2270 }
2271
2272 /*
2273 * Hold Reset state of DMA Engine
2274 * (must write 0x0 to restart it)
2275 */
2276 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2277 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2278
2279 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2280 tx = MVNETA_TX_RING(sc, q);
2281
2282 mvneta_tx_lockq(sc, q);
2283 mvneta_ring_flush_tx_queue(sc, q);
2284 mvneta_tx_unlockq(sc, q);
2285 }
2286 }
2287
2288 STATIC void
mvneta_stop(struct mvneta_softc * sc)2289 mvneta_stop(struct mvneta_softc *sc)
2290 {
2291
2292 mvneta_sc_lock(sc);
2293 mvneta_stop_locked(sc);
2294 mvneta_sc_unlock(sc);
2295 }
2296
2297 STATIC int
mvneta_mediachange(struct ifnet * ifp)2298 mvneta_mediachange(struct ifnet *ifp)
2299 {
2300 struct mvneta_softc *sc;
2301
2302 sc = ifp->if_softc;
2303
2304 if (!sc->phy_attached && !sc->use_inband_status) {
2305 /* We shouldn't be here */
2306 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2307 return (0);
2308 }
2309
2310 if (sc->use_inband_status) {
2311 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2312 return (0);
2313 }
2314
2315 mvneta_sc_lock(sc);
2316
2317 /* Update PHY */
2318 mii_mediachg(sc->mii);
2319
2320 mvneta_sc_unlock(sc);
2321
2322 return (0);
2323 }
2324
2325 STATIC void
mvneta_get_media(struct mvneta_softc * sc,struct ifmediareq * ifmr)2326 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2327 {
2328 uint32_t psr;
2329
2330 psr = MVNETA_READ(sc, MVNETA_PSR);
2331
2332 /* Speed */
2333 if (psr & MVNETA_PSR_GMIISPEED)
2334 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2335 else if (psr & MVNETA_PSR_MIISPEED)
2336 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2337 else if (psr & MVNETA_PSR_LINKUP)
2338 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2339
2340 /* Duplex */
2341 if (psr & MVNETA_PSR_FULLDX)
2342 ifmr->ifm_active |= IFM_FDX;
2343
2344 /* Link */
2345 ifmr->ifm_status = IFM_AVALID;
2346 if (psr & MVNETA_PSR_LINKUP)
2347 ifmr->ifm_status |= IFM_ACTIVE;
2348 }
2349
2350 STATIC void
mvneta_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)2351 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2352 {
2353 struct mvneta_softc *sc;
2354 struct mii_data *mii;
2355
2356 sc = ifp->if_softc;
2357
2358 if (!sc->phy_attached && !sc->use_inband_status) {
2359 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2360 return;
2361 }
2362
2363 mvneta_sc_lock(sc);
2364
2365 if (sc->use_inband_status) {
2366 mvneta_get_media(sc, ifmr);
2367 mvneta_sc_unlock(sc);
2368 return;
2369 }
2370
2371 mii = sc->mii;
2372 mii_pollstat(mii);
2373
2374 ifmr->ifm_active = mii->mii_media_active;
2375 ifmr->ifm_status = mii->mii_media_status;
2376
2377 mvneta_sc_unlock(sc);
2378 }
2379
2380 /*
2381 * Link State Notify
2382 */
2383 STATIC void
mvneta_update_autoneg(struct mvneta_softc * sc,int enable)2384 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2385 {
2386 int reg;
2387
2388 if (enable) {
2389 reg = MVNETA_READ(sc, MVNETA_PANC);
2390 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2391 MVNETA_PANC_ANFCEN);
2392 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2393 MVNETA_PANC_INBANDANEN;
2394 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2395
2396 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2397 reg |= MVNETA_PMACC2_INBANDANMODE;
2398 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2399
2400 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2401 reg |= MVNETA_PSOMSCD_ENABLE;
2402 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2403 } else {
2404 reg = MVNETA_READ(sc, MVNETA_PANC);
2405 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2406 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2407 MVNETA_PANC_INBANDANEN);
2408 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2409
2410 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2411 reg &= ~MVNETA_PMACC2_INBANDANMODE;
2412 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2413
2414 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2415 reg &= ~MVNETA_PSOMSCD_ENABLE;
2416 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2417 }
2418 }
2419
2420 STATIC int
mvneta_update_media(struct mvneta_softc * sc,int media)2421 mvneta_update_media(struct mvneta_softc *sc, int media)
2422 {
2423 int reg, err;
2424 boolean_t running;
2425
2426 err = 0;
2427
2428 mvneta_sc_lock(sc);
2429
2430 mvneta_linkreset(sc);
2431
2432 running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
2433 if (running)
2434 mvneta_stop_locked(sc);
2435
2436 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2437
2438 if (sc->use_inband_status)
2439 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2440
2441 mvneta_update_eee(sc);
2442 mvneta_update_fc(sc);
2443
2444 if (IFM_SUBTYPE(media) != IFM_AUTO) {
2445 reg = MVNETA_READ(sc, MVNETA_PANC);
2446 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2447 MVNETA_PANC_SETMIISPEED |
2448 MVNETA_PANC_SETFULLDX);
2449 if (IFM_SUBTYPE(media) == IFM_1000_T ||
2450 IFM_SUBTYPE(media) == IFM_2500_T) {
2451 if ((media & IFM_FDX) == 0) {
2452 device_printf(sc->dev,
2453 "%s half-duplex unsupported\n",
2454 IFM_SUBTYPE(media) == IFM_1000_T ?
2455 "1000Base-T" :
2456 "2500Base-T");
2457 err = EINVAL;
2458 goto out;
2459 }
2460 reg |= MVNETA_PANC_SETGMIISPEED;
2461 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
2462 reg |= MVNETA_PANC_SETMIISPEED;
2463
2464 if (media & IFM_FDX)
2465 reg |= MVNETA_PANC_SETFULLDX;
2466
2467 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2468 }
2469 out:
2470 if (running)
2471 mvneta_init_locked(sc);
2472 mvneta_sc_unlock(sc);
2473 return (err);
2474 }
2475
2476 STATIC void
mvneta_adjust_link(struct mvneta_softc * sc)2477 mvneta_adjust_link(struct mvneta_softc *sc)
2478 {
2479 boolean_t phy_linkup;
2480 int reg;
2481
2482 /* Update eee/fc */
2483 mvneta_update_eee(sc);
2484 mvneta_update_fc(sc);
2485
2486 /* Check for link change */
2487 phy_linkup = (sc->mii->mii_media_status &
2488 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2489
2490 if (sc->linkup != phy_linkup)
2491 mvneta_linkupdate(sc, phy_linkup);
2492
2493 /* Don't update media on disabled link */
2494 if (!phy_linkup)
2495 return;
2496
2497 /* Check for media type change */
2498 if (sc->mvneta_media != sc->mii->mii_media_active) {
2499 sc->mvneta_media = sc->mii->mii_media_active;
2500
2501 reg = MVNETA_READ(sc, MVNETA_PANC);
2502 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2503 MVNETA_PANC_SETMIISPEED |
2504 MVNETA_PANC_SETFULLDX);
2505 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2506 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2507 reg |= MVNETA_PANC_SETGMIISPEED;
2508 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2509 reg |= MVNETA_PANC_SETMIISPEED;
2510
2511 if (sc->mvneta_media & IFM_FDX)
2512 reg |= MVNETA_PANC_SETFULLDX;
2513
2514 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2515 }
2516 }
2517
2518 STATIC void
mvneta_link_isr(struct mvneta_softc * sc)2519 mvneta_link_isr(struct mvneta_softc *sc)
2520 {
2521 int linkup;
2522
2523 KASSERT_SC_MTX(sc);
2524
2525 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2526 if (sc->linkup == linkup)
2527 return;
2528
2529 if (linkup == TRUE)
2530 mvneta_linkup(sc);
2531 else
2532 mvneta_linkdown(sc);
2533
2534 #ifdef DEBUG
2535 log(LOG_DEBUG,
2536 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2537 #endif
2538 }
2539
2540 STATIC void
mvneta_linkupdate(struct mvneta_softc * sc,boolean_t linkup)2541 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2542 {
2543
2544 KASSERT_SC_MTX(sc);
2545
2546 if (linkup == TRUE)
2547 mvneta_linkup(sc);
2548 else
2549 mvneta_linkdown(sc);
2550
2551 #ifdef DEBUG
2552 log(LOG_DEBUG,
2553 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down");
2554 #endif
2555 }
2556
2557 STATIC void
mvneta_update_eee(struct mvneta_softc * sc)2558 mvneta_update_eee(struct mvneta_softc *sc)
2559 {
2560 uint32_t reg;
2561
2562 KASSERT_SC_MTX(sc);
2563
2564 /* set EEE parameters */
2565 reg = MVNETA_READ(sc, MVNETA_LPIC1);
2566 if (sc->cf_lpi)
2567 reg |= MVNETA_LPIC1_LPIRE;
2568 else
2569 reg &= ~MVNETA_LPIC1_LPIRE;
2570 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2571 }
2572
2573 STATIC void
mvneta_update_fc(struct mvneta_softc * sc)2574 mvneta_update_fc(struct mvneta_softc *sc)
2575 {
2576 uint32_t reg;
2577
2578 KASSERT_SC_MTX(sc);
2579
2580 reg = MVNETA_READ(sc, MVNETA_PANC);
2581 if (sc->cf_fc) {
2582 /* Flow control negotiation */
2583 reg |= MVNETA_PANC_PAUSEADV;
2584 reg |= MVNETA_PANC_ANFCEN;
2585 } else {
2586 /* Disable flow control negotiation */
2587 reg &= ~MVNETA_PANC_PAUSEADV;
2588 reg &= ~MVNETA_PANC_ANFCEN;
2589 }
2590
2591 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2592 }
2593
2594 STATIC void
mvneta_linkup(struct mvneta_softc * sc)2595 mvneta_linkup(struct mvneta_softc *sc)
2596 {
2597 uint32_t reg;
2598
2599 KASSERT_SC_MTX(sc);
2600
2601 if (!sc->use_inband_status) {
2602 reg = MVNETA_READ(sc, MVNETA_PANC);
2603 reg |= MVNETA_PANC_FORCELINKPASS;
2604 reg &= ~MVNETA_PANC_FORCELINKFAIL;
2605 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2606 }
2607
2608 mvneta_qflush(sc->ifp);
2609 mvneta_portup(sc);
2610 sc->linkup = TRUE;
2611 if_link_state_change(sc->ifp, LINK_STATE_UP);
2612 }
2613
2614 STATIC void
mvneta_linkdown(struct mvneta_softc * sc)2615 mvneta_linkdown(struct mvneta_softc *sc)
2616 {
2617 uint32_t reg;
2618
2619 KASSERT_SC_MTX(sc);
2620
2621 if (!sc->use_inband_status) {
2622 reg = MVNETA_READ(sc, MVNETA_PANC);
2623 reg &= ~MVNETA_PANC_FORCELINKPASS;
2624 reg |= MVNETA_PANC_FORCELINKFAIL;
2625 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2626 }
2627
2628 mvneta_portdown(sc);
2629 mvneta_qflush(sc->ifp);
2630 sc->linkup = FALSE;
2631 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2632 }
2633
2634 STATIC void
mvneta_linkreset(struct mvneta_softc * sc)2635 mvneta_linkreset(struct mvneta_softc *sc)
2636 {
2637 struct mii_softc *mii;
2638
2639 if (sc->phy_attached) {
2640 /* Force reset PHY */
2641 mii = LIST_FIRST(&sc->mii->mii_phys);
2642 if (mii)
2643 mii_phy_reset(mii);
2644 }
2645 }
2646
2647 /*
2648 * Tx Subroutines
2649 */
2650 STATIC int
mvneta_tx_queue(struct mvneta_softc * sc,struct mbuf ** mbufp,int q)2651 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2652 {
2653 struct ifnet *ifp;
2654 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2655 struct mbuf *mtmp, *mbuf;
2656 struct mvneta_tx_ring *tx;
2657 struct mvneta_buf *txbuf;
2658 struct mvneta_tx_desc *t;
2659 uint32_t ptxsu;
2660 int start, used, error, i, txnsegs;
2661
2662 mbuf = *mbufp;
2663 tx = MVNETA_TX_RING(sc, q);
2664 DASSERT(tx->used >= 0);
2665 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2666 t = NULL;
2667 ifp = sc->ifp;
2668
2669 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2670 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2671 if (mbuf == NULL) {
2672 tx->drv_error++;
2673 *mbufp = NULL;
2674 return (ENOBUFS);
2675 }
2676 mbuf->m_flags &= ~M_VLANTAG;
2677 *mbufp = mbuf;
2678 }
2679
2680 if (__predict_false(mbuf->m_next != NULL &&
2681 (mbuf->m_pkthdr.csum_flags &
2682 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2683 if (M_WRITABLE(mbuf) == 0) {
2684 mtmp = m_dup(mbuf, M_NOWAIT);
2685 m_freem(mbuf);
2686 if (mtmp == NULL) {
2687 tx->drv_error++;
2688 *mbufp = NULL;
2689 return (ENOBUFS);
2690 }
2691 *mbufp = mbuf = mtmp;
2692 }
2693 }
2694
2695 /* load mbuf using dmamap of 1st descriptor */
2696 txbuf = &tx->txbuf[tx->cpu];
2697 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2698 txbuf->dmap, mbuf, txsegs, &txnsegs,
2699 BUS_DMA_NOWAIT);
2700 if (__predict_false(error != 0)) {
2701 #ifdef MVNETA_KTR
2702 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error);
2703 #endif
2704 /* This is the only recoverable error (except EFBIG). */
2705 if (error != ENOMEM) {
2706 tx->drv_error++;
2707 m_freem(mbuf);
2708 *mbufp = NULL;
2709 return (ENOBUFS);
2710 }
2711 return (error);
2712 }
2713
2714 if (__predict_false(txnsegs <= 0
2715 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2716 /* we have no enough descriptors or mbuf is broken */
2717 #ifdef MVNETA_KTR
2718 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2719 ifp->if_xname, q, txnsegs);
2720 #endif
2721 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2722 return (ENOBUFS);
2723 }
2724 DASSERT(txbuf->m == NULL);
2725
2726 /* remember mbuf using 1st descriptor */
2727 txbuf->m = mbuf;
2728 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2729 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2730
2731 /* load to tx descriptors */
2732 start = tx->cpu;
2733 used = 0;
2734 for (i = 0; i < txnsegs; i++) {
2735 t = &tx->desc[tx->cpu];
2736 t->command = 0;
2737 t->l4ichk = 0;
2738 t->flags = 0;
2739 if (__predict_true(i == 0)) {
2740 /* 1st descriptor */
2741 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2742 t->command |= MVNETA_TX_CMD_F;
2743 mvneta_tx_set_csumflag(ifp, t, mbuf);
2744 }
2745 t->bufptr_pa = txsegs[i].ds_addr;
2746 t->bytecnt = txsegs[i].ds_len;
2747 tx->cpu = tx_counter_adv(tx->cpu, 1);
2748
2749 tx->used++;
2750 used++;
2751 }
2752 /* t is last descriptor here */
2753 DASSERT(t != NULL);
2754 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2755
2756 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2757 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2758
2759 while (__predict_false(used > 255)) {
2760 ptxsu = MVNETA_PTXSU_NOWD(255);
2761 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2762 used -= 255;
2763 }
2764 if (__predict_true(used > 0)) {
2765 ptxsu = MVNETA_PTXSU_NOWD(used);
2766 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2767 }
2768 return (0);
2769 }
2770
2771 STATIC void
mvneta_tx_set_csumflag(struct ifnet * ifp,struct mvneta_tx_desc * t,struct mbuf * m)2772 mvneta_tx_set_csumflag(struct ifnet *ifp,
2773 struct mvneta_tx_desc *t, struct mbuf *m)
2774 {
2775 struct ether_header *eh;
2776 int csum_flags;
2777 uint32_t iphl, ipoff;
2778 struct ip *ip;
2779
2780 iphl = ipoff = 0;
2781 csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags;
2782 eh = mtod(m, struct ether_header *);
2783 switch (ntohs(eh->ether_type)) {
2784 case ETHERTYPE_IP:
2785 ipoff = ETHER_HDR_LEN;
2786 break;
2787 case ETHERTYPE_IPV6:
2788 return;
2789 case ETHERTYPE_VLAN:
2790 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2791 break;
2792 }
2793
2794 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2795 ip = (struct ip *)(m->m_data + ipoff);
2796 iphl = ip->ip_hl<<2;
2797 t->command |= MVNETA_TX_CMD_L3_IP4;
2798 } else {
2799 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2800 return;
2801 }
2802
2803
2804 /* L3 */
2805 if (csum_flags & CSUM_IP) {
2806 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2807 }
2808
2809 /* L4 */
2810 if (csum_flags & CSUM_IP_TCP) {
2811 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2812 t->command |= MVNETA_TX_CMD_L4_TCP;
2813 } else if (csum_flags & CSUM_IP_UDP) {
2814 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2815 t->command |= MVNETA_TX_CMD_L4_UDP;
2816 } else
2817 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2818
2819 t->l4ichk = 0;
2820 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2821 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2822 }
2823
2824 STATIC void
mvneta_tx_queue_complete(struct mvneta_softc * sc,int q)2825 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2826 {
2827 struct mvneta_tx_ring *tx;
2828 struct mvneta_buf *txbuf;
2829 struct mvneta_tx_desc *t;
2830 uint32_t ptxs, ptxsu, ndesc;
2831 int i;
2832
2833 KASSERT_TX_MTX(sc, q);
2834
2835 tx = MVNETA_TX_RING(sc, q);
2836 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2837 return;
2838
2839 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2840 ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2841
2842 if (__predict_false(ndesc == 0)) {
2843 if (tx->used == 0)
2844 tx->queue_status = MVNETA_QUEUE_IDLE;
2845 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2846 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2847 tx->queue_hung = TRUE;
2848 return;
2849 }
2850
2851 #ifdef MVNETA_KTR
2852 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2853 sc->ifp->if_xname, q, ndesc);
2854 #endif
2855
2856 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2857 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2858
2859 for (i = 0; i < ndesc; i++) {
2860 t = &tx->desc[tx->dma];
2861 #ifdef MVNETA_KTR
2862 if (t->flags & MVNETA_TX_F_ES)
2863 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2864 sc->ifp->if_xname, q, tx->dma);
2865 #endif
2866 txbuf = &tx->txbuf[tx->dma];
2867 if (__predict_true(txbuf->m != NULL)) {
2868 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2869 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2870 m_freem(txbuf->m);
2871 txbuf->m = NULL;
2872 }
2873 else
2874 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2875 tx->dma = tx_counter_adv(tx->dma, 1);
2876 tx->used--;
2877 }
2878 DASSERT(tx->used >= 0);
2879 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2880 while (__predict_false(ndesc > 255)) {
2881 ptxsu = MVNETA_PTXSU_NORB(255);
2882 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2883 ndesc -= 255;
2884 }
2885 if (__predict_true(ndesc > 0)) {
2886 ptxsu = MVNETA_PTXSU_NORB(ndesc);
2887 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2888 }
2889 #ifdef MVNETA_KTR
2890 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2891 sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used);
2892 #endif
2893
2894 tx->watchdog_time = ticks;
2895
2896 if (tx->used == 0)
2897 tx->queue_status = MVNETA_QUEUE_IDLE;
2898 }
2899
2900 /*
2901 * Do a final TX complete when TX is idle.
2902 */
2903 STATIC void
mvneta_tx_drain(struct mvneta_softc * sc)2904 mvneta_tx_drain(struct mvneta_softc *sc)
2905 {
2906 struct mvneta_tx_ring *tx;
2907 int q;
2908
2909 /*
2910 * Handle trailing mbuf on TX queue.
2911 * Check is done lockess to avoid TX path contention.
2912 */
2913 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2914 tx = MVNETA_TX_RING(sc, q);
2915 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2916 tx->used > 0) {
2917 mvneta_tx_lockq(sc, q);
2918 mvneta_tx_queue_complete(sc, q);
2919 mvneta_tx_unlockq(sc, q);
2920 }
2921 }
2922 }
2923
2924 /*
2925 * Rx Subroutines
2926 */
2927 STATIC int
mvneta_rx(struct mvneta_softc * sc,int q,int count)2928 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2929 {
2930 uint32_t prxs, npkt;
2931 int more;
2932
2933 more = 0;
2934 mvneta_rx_lockq(sc, q);
2935 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2936 npkt = MVNETA_PRXS_GET_ODC(prxs);
2937 if (__predict_false(npkt == 0))
2938 goto out;
2939
2940 if (count > 0 && npkt > count) {
2941 more = 1;
2942 npkt = count;
2943 }
2944 mvneta_rx_queue(sc, q, npkt);
2945 out:
2946 mvneta_rx_unlockq(sc, q);
2947 return more;
2948 }
2949
2950 /*
2951 * Helper routine for updating PRXSU register of a given queue.
2952 * Handles number of processed descriptors bigger than maximum acceptable value.
2953 */
2954 STATIC __inline void
mvneta_prxsu_update(struct mvneta_softc * sc,int q,int processed)2955 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2956 {
2957 uint32_t prxsu;
2958
2959 while (__predict_false(processed > 255)) {
2960 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2961 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2962 processed -= 255;
2963 }
2964 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
2965 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2966 }
2967
2968 static __inline void
mvneta_prefetch(void * p)2969 mvneta_prefetch(void *p)
2970 {
2971
2972 __builtin_prefetch(p);
2973 }
2974
2975 STATIC void
mvneta_rx_queue(struct mvneta_softc * sc,int q,int npkt)2976 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
2977 {
2978 struct ifnet *ifp;
2979 struct mvneta_rx_ring *rx;
2980 struct mvneta_rx_desc *r;
2981 struct mvneta_buf *rxbuf;
2982 struct mbuf *m;
2983 struct lro_ctrl *lro;
2984 struct lro_entry *queued;
2985 void *pktbuf;
2986 int i, pktlen, processed, ndma;
2987
2988 KASSERT_RX_MTX(sc, q);
2989
2990 ifp = sc->ifp;
2991 rx = MVNETA_RX_RING(sc, q);
2992 processed = 0;
2993
2994 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
2995 return;
2996
2997 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
2998 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2999
3000 for (i = 0; i < npkt; i++) {
3001 /* Prefetch next desc, rxbuf. */
3002 ndma = rx_counter_adv(rx->dma, 1);
3003 mvneta_prefetch(&rx->desc[ndma]);
3004 mvneta_prefetch(&rx->rxbuf[ndma]);
3005
3006 /* get descriptor and packet */
3007 r = &rx->desc[rx->dma];
3008 rxbuf = &rx->rxbuf[rx->dma];
3009 m = rxbuf->m;
3010 rxbuf->m = NULL;
3011 DASSERT(m != NULL);
3012 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3013 BUS_DMASYNC_POSTREAD);
3014 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3015 /* Prefetch mbuf header. */
3016 mvneta_prefetch(m);
3017
3018 processed++;
3019 /* Drop desc with error status or not in a single buffer. */
3020 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3021 (MVNETA_RX_F|MVNETA_RX_L));
3022 if (__predict_false((r->status & MVNETA_RX_ES) ||
3023 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3024 (MVNETA_RX_F|MVNETA_RX_L)))
3025 goto rx_error;
3026
3027 /*
3028 * [ OFF | MH | PKT | CRC ]
3029 * bytecnt cover MH, PKT, CRC
3030 */
3031 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3032 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3033 MVNETA_HWHEADER_SIZE;
3034
3035 /* Prefetch mbuf data. */
3036 mvneta_prefetch(pktbuf);
3037
3038 /* Write value to mbuf (avoid read). */
3039 m->m_data = pktbuf;
3040 m->m_len = m->m_pkthdr.len = pktlen;
3041 m->m_pkthdr.rcvif = ifp;
3042 mvneta_rx_set_csumflag(ifp, r, m);
3043
3044 /* Increase rx_dma before releasing the lock. */
3045 rx->dma = ndma;
3046
3047 if (__predict_false(rx->lro_enabled &&
3048 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3049 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3050 (m->m_pkthdr.csum_flags &
3051 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3052 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3053 if (rx->lro.lro_cnt != 0) {
3054 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3055 goto rx_done;
3056 }
3057 }
3058
3059 mvneta_rx_unlockq(sc, q);
3060 (*ifp->if_input)(ifp, m);
3061 mvneta_rx_lockq(sc, q);
3062 /*
3063 * Check whether this queue has been disabled in the
3064 * meantime. If yes, then clear LRO and exit.
3065 */
3066 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3067 goto rx_lro;
3068 rx_done:
3069 /* Refresh receive ring to avoid stall and minimize jitter. */
3070 if (processed >= MVNETA_RX_REFILL_COUNT) {
3071 mvneta_prxsu_update(sc, q, processed);
3072 mvneta_rx_queue_refill(sc, q);
3073 processed = 0;
3074 }
3075 continue;
3076 rx_error:
3077 m_freem(m);
3078 rx->dma = ndma;
3079 /* Refresh receive ring to avoid stall and minimize jitter. */
3080 if (processed >= MVNETA_RX_REFILL_COUNT) {
3081 mvneta_prxsu_update(sc, q, processed);
3082 mvneta_rx_queue_refill(sc, q);
3083 processed = 0;
3084 }
3085 }
3086 #ifdef MVNETA_KTR
3087 CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt);
3088 #endif
3089 /* DMA status update */
3090 mvneta_prxsu_update(sc, q, processed);
3091 /* Refill the rest of buffers if there are any to refill */
3092 mvneta_rx_queue_refill(sc, q);
3093
3094 rx_lro:
3095 /*
3096 * Flush any outstanding LRO work
3097 */
3098 lro = &rx->lro;
3099 while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
3100 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
3101 tcp_lro_flush(lro, queued);
3102 }
3103 }
3104
3105 STATIC void
mvneta_rx_buf_free(struct mvneta_softc * sc,struct mvneta_buf * rxbuf)3106 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3107 {
3108
3109 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3110 /* This will remove all data at once */
3111 m_freem(rxbuf->m);
3112 }
3113
3114 STATIC void
mvneta_rx_queue_refill(struct mvneta_softc * sc,int q)3115 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3116 {
3117 struct mvneta_rx_ring *rx;
3118 struct mvneta_rx_desc *r;
3119 struct mvneta_buf *rxbuf;
3120 bus_dma_segment_t segs;
3121 struct mbuf *m;
3122 uint32_t prxs, prxsu, ndesc;
3123 int npkt, refill, nsegs, error;
3124
3125 KASSERT_RX_MTX(sc, q);
3126
3127 rx = MVNETA_RX_RING(sc, q);
3128 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3129 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3130 refill = MVNETA_RX_RING_CNT - ndesc;
3131 #ifdef MVNETA_KTR
3132 CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q,
3133 refill);
3134 #endif
3135 if (__predict_false(refill <= 0))
3136 return;
3137
3138 for (npkt = 0; npkt < refill; npkt++) {
3139 rxbuf = &rx->rxbuf[rx->cpu];
3140 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3141 if (__predict_false(m == NULL)) {
3142 error = ENOBUFS;
3143 break;
3144 }
3145 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3146
3147 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3148 m, &segs, &nsegs, BUS_DMA_NOWAIT);
3149 if (__predict_false(error != 0 || nsegs != 1)) {
3150 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3151 m_freem(m);
3152 break;
3153 }
3154
3155 /* Add the packet to the ring */
3156 rxbuf->m = m;
3157 r = &rx->desc[rx->cpu];
3158 r->bufptr_pa = segs.ds_addr;
3159 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3160
3161 rx->cpu = rx_counter_adv(rx->cpu, 1);
3162 }
3163 if (npkt == 0) {
3164 if (refill == MVNETA_RX_RING_CNT)
3165 rx->needs_refill = TRUE;
3166 return;
3167 }
3168
3169 rx->needs_refill = FALSE;
3170 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3171
3172 while (__predict_false(npkt > 255)) {
3173 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3174 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3175 npkt -= 255;
3176 }
3177 if (__predict_true(npkt > 0)) {
3178 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3179 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3180 }
3181 }
3182
3183 STATIC __inline void
mvneta_rx_set_csumflag(struct ifnet * ifp,struct mvneta_rx_desc * r,struct mbuf * m)3184 mvneta_rx_set_csumflag(struct ifnet *ifp,
3185 struct mvneta_rx_desc *r, struct mbuf *m)
3186 {
3187 uint32_t csum_flags;
3188
3189 csum_flags = 0;
3190 if (__predict_false((r->status &
3191 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3192 return; /* not a IP packet */
3193
3194 /* L3 */
3195 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3196 MVNETA_RX_IP_HEADER_OK))
3197 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3198
3199 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3200 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3201 /* L4 */
3202 switch (r->status & MVNETA_RX_L4_MASK) {
3203 case MVNETA_RX_L4_TCP:
3204 case MVNETA_RX_L4_UDP:
3205 csum_flags |= CSUM_L4_CALC;
3206 if (__predict_true((r->status &
3207 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3208 csum_flags |= CSUM_L4_VALID;
3209 m->m_pkthdr.csum_data = htons(0xffff);
3210 }
3211 break;
3212 case MVNETA_RX_L4_OTH:
3213 default:
3214 break;
3215 }
3216 }
3217 m->m_pkthdr.csum_flags = csum_flags;
3218 }
3219
3220 /*
3221 * MAC address filter
3222 */
3223 STATIC void
mvneta_filter_setup(struct mvneta_softc * sc)3224 mvneta_filter_setup(struct mvneta_softc *sc)
3225 {
3226 struct ifnet *ifp;
3227 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3228 uint32_t pxc;
3229 int i;
3230
3231 KASSERT_SC_MTX(sc);
3232
3233 memset(dfut, 0, sizeof(dfut));
3234 memset(dfsmt, 0, sizeof(dfsmt));
3235 memset(dfomt, 0, sizeof(dfomt));
3236
3237 ifp = sc->ifp;
3238 ifp->if_flags |= IFF_ALLMULTI;
3239 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) {
3240 for (i = 0; i < MVNETA_NDFSMT; i++) {
3241 dfsmt[i] = dfomt[i] =
3242 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3243 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3244 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3245 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3246 }
3247 }
3248
3249 pxc = MVNETA_READ(sc, MVNETA_PXC);
3250 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3251 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3252 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3253 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3254 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3255 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3256 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3257 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3258 if (ifp->if_flags & IFF_BROADCAST) {
3259 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3260 }
3261 if (ifp->if_flags & IFF_PROMISC) {
3262 pxc |= MVNETA_PXC_UPM;
3263 }
3264 MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3265
3266 /* Set Destination Address Filter Unicast Table */
3267 if (ifp->if_flags & IFF_PROMISC) {
3268 /* pass all unicast addresses */
3269 for (i = 0; i < MVNETA_NDFUT; i++) {
3270 dfut[i] =
3271 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3272 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3273 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3274 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3275 }
3276 } else {
3277 i = sc->enaddr[5] & 0xf; /* last nibble */
3278 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3279 }
3280 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3281
3282 /* Set Destination Address Filter Multicast Tables */
3283 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3284 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3285 }
3286
3287 /*
3288 * sysctl(9)
3289 */
3290 STATIC int
sysctl_read_mib(SYSCTL_HANDLER_ARGS)3291 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3292 {
3293 struct mvneta_sysctl_mib *arg;
3294 struct mvneta_softc *sc;
3295 uint64_t val;
3296
3297 arg = (struct mvneta_sysctl_mib *)arg1;
3298 if (arg == NULL)
3299 return (EINVAL);
3300
3301 sc = arg->sc;
3302 if (sc == NULL)
3303 return (EINVAL);
3304 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3305 return (EINVAL);
3306
3307 mvneta_sc_lock(sc);
3308 val = arg->counter;
3309 mvneta_sc_unlock(sc);
3310 return sysctl_handle_64(oidp, &val, 0, req);
3311 }
3312
3313
3314 STATIC int
sysctl_clear_mib(SYSCTL_HANDLER_ARGS)3315 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3316 {
3317 struct mvneta_softc *sc;
3318 int err, val;
3319
3320 val = 0;
3321 sc = (struct mvneta_softc *)arg1;
3322 if (sc == NULL)
3323 return (EINVAL);
3324
3325 err = sysctl_handle_int(oidp, &val, 0, req);
3326 if (err != 0)
3327 return (err);
3328
3329 if (val < 0 || val > 1)
3330 return (EINVAL);
3331
3332 if (val == 1) {
3333 mvneta_sc_lock(sc);
3334 mvneta_clear_mib(sc);
3335 mvneta_sc_unlock(sc);
3336 }
3337
3338 return (0);
3339 }
3340
3341 STATIC int
sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)3342 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3343 {
3344 struct mvneta_sysctl_queue *arg;
3345 struct mvneta_rx_ring *rx;
3346 struct mvneta_softc *sc;
3347 uint32_t reg, time_mvtclk;
3348 int err, time_us;
3349
3350 rx = NULL;
3351 arg = (struct mvneta_sysctl_queue *)arg1;
3352 if (arg == NULL)
3353 return (EINVAL);
3354 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3355 return (EINVAL);
3356 if (arg->rxtx != MVNETA_SYSCTL_RX)
3357 return (EINVAL);
3358
3359 sc = arg->sc;
3360 if (sc == NULL)
3361 return (EINVAL);
3362
3363 /* read queue length */
3364 mvneta_sc_lock(sc);
3365 mvneta_rx_lockq(sc, arg->queue);
3366 rx = MVNETA_RX_RING(sc, arg->queue);
3367 time_mvtclk = rx->queue_th_time;
3368 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvneta_get_clk();
3369 mvneta_rx_unlockq(sc, arg->queue);
3370 mvneta_sc_unlock(sc);
3371
3372 err = sysctl_handle_int(oidp, &time_us, 0, req);
3373 if (err != 0)
3374 return (err);
3375
3376 mvneta_sc_lock(sc);
3377 mvneta_rx_lockq(sc, arg->queue);
3378
3379 /* update queue length (0[sec] - 1[sec]) */
3380 if (time_us < 0 || time_us > (1000 * 1000)) {
3381 mvneta_rx_unlockq(sc, arg->queue);
3382 mvneta_sc_unlock(sc);
3383 return (EINVAL);
3384 }
3385 time_mvtclk =
3386 (uint64_t)mvneta_get_clk() * (uint64_t)time_us / (1000ULL * 1000ULL);
3387 rx->queue_th_time = time_mvtclk;
3388 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3389 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3390 mvneta_rx_unlockq(sc, arg->queue);
3391 mvneta_sc_unlock(sc);
3392
3393 return (0);
3394 }
3395
3396 STATIC void
sysctl_mvneta_init(struct mvneta_softc * sc)3397 sysctl_mvneta_init(struct mvneta_softc *sc)
3398 {
3399 struct sysctl_ctx_list *ctx;
3400 struct sysctl_oid_list *children;
3401 struct sysctl_oid_list *rxchildren;
3402 struct sysctl_oid_list *qchildren, *mchildren;
3403 struct sysctl_oid *tree;
3404 int i, q;
3405 struct mvneta_sysctl_queue *rxarg;
3406 #define MVNETA_SYSCTL_NAME(num) "queue" # num
3407 static const char *sysctl_queue_names[] = {
3408 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3409 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3410 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3411 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3412 };
3413 #undef MVNETA_SYSCTL_NAME
3414
3415 #ifndef NO_SYSCTL_DESCR
3416 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3417 static const char *sysctl_queue_descrs[] = {
3418 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3419 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3420 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3421 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3422 };
3423 #undef MVNETA_SYSCTL_DESCR
3424 #endif
3425
3426
3427 ctx = device_get_sysctl_ctx(sc->dev);
3428 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3429
3430 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3431 CTLFLAG_RD, 0, "NETA RX");
3432 rxchildren = SYSCTL_CHILDREN(tree);
3433 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3434 CTLFLAG_RD, 0, "NETA MIB");
3435 mchildren = SYSCTL_CHILDREN(tree);
3436
3437
3438 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3439 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3440 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3441 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3442
3443 /*
3444 * MIB access
3445 */
3446 /* dev.mvneta.[unit].mib.<mibs> */
3447 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3448 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3449
3450 mib_arg->sc = sc;
3451 mib_arg->index = i;
3452 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
3453 mvneta_mib_list[i].sysctl_name,
3454 CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0,
3455 sysctl_read_mib, "I", mvneta_mib_list[i].desc);
3456 }
3457 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3458 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3459 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3460 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3461 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3462 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3463
3464 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3465 CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0,
3466 sysctl_clear_mib, "I", "Reset MIB counters");
3467
3468 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3469 rxarg = &sc->sysctl_rx_queue[q];
3470
3471 rxarg->sc = sc;
3472 rxarg->queue = q;
3473 rxarg->rxtx = MVNETA_SYSCTL_RX;
3474
3475 /* hw.mvneta.mvneta[unit].rx.[queue] */
3476 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3477 sysctl_queue_names[q], CTLFLAG_RD, 0,
3478 sysctl_queue_descrs[q]);
3479 qchildren = SYSCTL_CHILDREN(tree);
3480
3481 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3482 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3483 CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0,
3484 sysctl_set_queue_rxthtime, "I",
3485 "interrupt coalescing threshold timer [us]");
3486 }
3487 }
3488
3489 /*
3490 * MIB
3491 */
3492 STATIC void
mvneta_clear_mib(struct mvneta_softc * sc)3493 mvneta_clear_mib(struct mvneta_softc *sc)
3494 {
3495 int i;
3496
3497 KASSERT_SC_MTX(sc);
3498
3499 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3500 if (mvneta_mib_list[i].reg64)
3501 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3502 else
3503 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3504 sc->sysctl_mib[i].counter = 0;
3505 }
3506 MVNETA_READ(sc, MVNETA_PDFC);
3507 sc->counter_pdfc = 0;
3508 MVNETA_READ(sc, MVNETA_POFC);
3509 sc->counter_pofc = 0;
3510 sc->counter_watchdog = 0;
3511 }
3512
3513 STATIC void
mvneta_update_mib(struct mvneta_softc * sc)3514 mvneta_update_mib(struct mvneta_softc *sc)
3515 {
3516 struct mvneta_tx_ring *tx;
3517 int i;
3518 uint64_t val;
3519 uint32_t reg;
3520
3521 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3522
3523 if (mvneta_mib_list[i].reg64)
3524 val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum);
3525 else
3526 val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum);
3527
3528 if (val == 0)
3529 continue;
3530
3531 sc->sysctl_mib[i].counter += val;
3532 switch (mvneta_mib_list[i].regnum) {
3533 case MVNETA_MIB_RX_GOOD_OCT:
3534 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3535 break;
3536 case MVNETA_MIB_RX_BAD_FRAME:
3537 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3538 break;
3539 case MVNETA_MIB_RX_GOOD_FRAME:
3540 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3541 break;
3542 case MVNETA_MIB_RX_MCAST_FRAME:
3543 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3544 break;
3545 case MVNETA_MIB_TX_GOOD_OCT:
3546 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3547 break;
3548 case MVNETA_MIB_TX_GOOD_FRAME:
3549 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3550 break;
3551 case MVNETA_MIB_TX_MCAST_FRAME:
3552 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3553 break;
3554 case MVNETA_MIB_MAC_COL:
3555 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3556 break;
3557 case MVNETA_MIB_TX_MAC_TRNS_ERR:
3558 case MVNETA_MIB_TX_EXCES_COL:
3559 case MVNETA_MIB_MAC_LATE_COL:
3560 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3561 break;
3562 }
3563 }
3564
3565 reg = MVNETA_READ(sc, MVNETA_PDFC);
3566 sc->counter_pdfc += reg;
3567 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3568 reg = MVNETA_READ(sc, MVNETA_POFC);
3569 sc->counter_pofc += reg;
3570 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3571
3572 /* TX watchdog. */
3573 if (sc->counter_watchdog_mib > 0) {
3574 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3575 sc->counter_watchdog_mib = 0;
3576 }
3577 /*
3578 * TX driver errors:
3579 * We do not take queue locks to not disrupt TX path.
3580 * We may only miss one drv error which will be fixed at
3581 * next mib update. We may also clear counter when TX path
3582 * is incrementing it but we only do it if counter was not zero
3583 * thus we may only loose one error.
3584 */
3585 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3586 tx = MVNETA_TX_RING(sc, i);
3587
3588 if (tx->drv_error > 0) {
3589 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
3590 tx->drv_error = 0;
3591 }
3592 }
3593 }
3594