xref: /f-stack/freebsd/mips/nlm/dev/net/xlpge.c (revision 22ce4aff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2003-2012 Broadcom Corporation
5  * All Rights Reserved
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
25  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
27  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
28  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/limits.h>
42 #include <sys/bus.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #define __RMAN_RESOURCE_VISIBLE
49 #include <sys/rman.h>
50 #include <sys/taskqueue.h>
51 
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_arp.h>
55 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #include <net/if_types.h>
60 #include <net/if_vlan_var.h>
61 
62 #include <dev/pci/pcivar.h>
63 
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 
68 #include <vm/vm.h>
69 #include <vm/pmap.h>
70 #include <vm/uma.h>
71 
72 #include <machine/reg.h>
73 #include <machine/cpu.h>
74 #include <machine/mips_opcode.h>
75 #include <machine/asm.h>
76 #include <machine/cpuregs.h>
77 
78 #include <machine/intr_machdep.h>
79 #include <machine/clock.h>	/* for DELAY */
80 #include <machine/bus.h>
81 #include <machine/resource.h>
82 #include <mips/nlm/hal/haldefs.h>
83 #include <mips/nlm/hal/iomap.h>
84 #include <mips/nlm/hal/mips-extns.h>
85 #include <mips/nlm/hal/cop2.h>
86 #include <mips/nlm/hal/fmn.h>
87 #include <mips/nlm/hal/sys.h>
88 #include <mips/nlm/hal/nae.h>
89 #include <mips/nlm/hal/mdio.h>
90 #include <mips/nlm/hal/sgmii.h>
91 #include <mips/nlm/hal/xaui.h>
92 #include <mips/nlm/hal/poe.h>
93 #include <ucore_app_bin.h>
94 #include <mips/nlm/hal/ucore_loader.h>
95 #include <mips/nlm/xlp.h>
96 #include <mips/nlm/board.h>
97 #include <mips/nlm/msgring.h>
98 
99 #include <dev/mii/mii.h>
100 #include <dev/mii/miivar.h>
101 #include "miidevs.h"
102 #include <dev/mii/brgphyreg.h>
103 #include "miibus_if.h"
104 #include <sys/sysctl.h>
105 
106 #include <mips/nlm/dev/net/xlpge.h>
107 
108 /*#define XLP_DRIVER_LOOPBACK*/
109 
110 static struct nae_port_config nae_port_config[64];
111 
112 int poe_cl_tbl[MAX_POE_CLASSES] = {
113 	0x0, 0x249249,
114 	0x492492, 0x6db6db,
115 	0x924924, 0xb6db6d,
116 	0xdb6db6, 0xffffff
117 };
118 
119 /* #define DUMP_PACKET */
120 
121 static uint64_t
nlm_paddr_ld(uint64_t paddr)122 nlm_paddr_ld(uint64_t paddr)
123 {
124 	uint64_t xkaddr = 0x9800000000000000 | paddr;
125 
126 	return (nlm_load_dword_daddr(xkaddr));
127 }
128 
129 struct nlm_xlp_portdata ifp_ports[64];
130 static uma_zone_t nl_tx_desc_zone;
131 
132 /* This implementation will register the following tree of device
133  * registration:
134  *                      pcibus
135  *                       |
136  *                      xlpnae (1 instance - virtual entity)
137  *                       |
138  *                     xlpge
139  *      (18 sgmii / 4 xaui / 2 interlaken instances)
140  *                       |
141  *                    miibus
142  */
143 
144 static int nlm_xlpnae_probe(device_t);
145 static int nlm_xlpnae_attach(device_t);
146 static int nlm_xlpnae_detach(device_t);
147 static int nlm_xlpnae_suspend(device_t);
148 static int nlm_xlpnae_resume(device_t);
149 static int nlm_xlpnae_shutdown(device_t);
150 
151 static device_method_t nlm_xlpnae_methods[] = {
152 	/* Methods from the device interface */
153 	DEVMETHOD(device_probe,		nlm_xlpnae_probe),
154 	DEVMETHOD(device_attach,	nlm_xlpnae_attach),
155 	DEVMETHOD(device_detach,	nlm_xlpnae_detach),
156 	DEVMETHOD(device_suspend,	nlm_xlpnae_suspend),
157 	DEVMETHOD(device_resume,	nlm_xlpnae_resume),
158 	DEVMETHOD(device_shutdown,	nlm_xlpnae_shutdown),
159 
160 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
161 
162 	DEVMETHOD_END
163 };
164 
165 static driver_t nlm_xlpnae_driver = {
166 	"xlpnae",
167 	nlm_xlpnae_methods,
168 	sizeof(struct nlm_xlpnae_softc)
169 };
170 
171 static devclass_t nlm_xlpnae_devclass;
172 
173 static int nlm_xlpge_probe(device_t);
174 static int nlm_xlpge_attach(device_t);
175 static int nlm_xlpge_detach(device_t);
176 static int nlm_xlpge_suspend(device_t);
177 static int nlm_xlpge_resume(device_t);
178 static int nlm_xlpge_shutdown(device_t);
179 
180 /* mii override functions */
181 static int nlm_xlpge_mii_read(device_t, int, int);
182 static int nlm_xlpge_mii_write(device_t, int, int, int);
183 static void nlm_xlpge_mii_statchg(device_t);
184 
185 static device_method_t nlm_xlpge_methods[] = {
186 	/* Methods from the device interface */
187 	DEVMETHOD(device_probe,		nlm_xlpge_probe),
188 	DEVMETHOD(device_attach,	nlm_xlpge_attach),
189 	DEVMETHOD(device_detach,	nlm_xlpge_detach),
190 	DEVMETHOD(device_suspend,	nlm_xlpge_suspend),
191 	DEVMETHOD(device_resume,	nlm_xlpge_resume),
192 	DEVMETHOD(device_shutdown,	nlm_xlpge_shutdown),
193 
194 	/* Methods from the nexus bus needed for explicitly
195 	 * probing children when driver is loaded as a kernel module
196 	 */
197 	DEVMETHOD(miibus_readreg,	nlm_xlpge_mii_read),
198 	DEVMETHOD(miibus_writereg,	nlm_xlpge_mii_write),
199 	DEVMETHOD(miibus_statchg,	nlm_xlpge_mii_statchg),
200 
201 	/* Terminate method list */
202 	DEVMETHOD_END
203 };
204 
205 static driver_t nlm_xlpge_driver = {
206 	"xlpge",
207 	nlm_xlpge_methods,
208 	sizeof(struct nlm_xlpge_softc)
209 };
210 
211 static devclass_t nlm_xlpge_devclass;
212 
213 DRIVER_MODULE(xlpnae, pci, nlm_xlpnae_driver, nlm_xlpnae_devclass, 0, 0);
214 DRIVER_MODULE(xlpge, xlpnae, nlm_xlpge_driver, nlm_xlpge_devclass, 0, 0);
215 DRIVER_MODULE(miibus, xlpge, miibus_driver, miibus_devclass, 0, 0);
216 
217 MODULE_DEPEND(pci, xlpnae, 1, 1, 1);
218 MODULE_DEPEND(xlpnae, xlpge, 1, 1, 1);
219 MODULE_DEPEND(xlpge, ether, 1, 1, 1);
220 MODULE_DEPEND(xlpge, miibus, 1, 1, 1);
221 
222 #define SGMII_RCV_CONTEXT_WIDTH 8
223 
224 /* prototypes */
225 static void nlm_xlpge_msgring_handler(int vc, int size,
226     int code, int srcid, struct nlm_fmn_msg *msg, void *data);
227 static void nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num);
228 static void nlm_xlpge_init(void *addr);
229 static void nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc);
230 static void nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc);
231 
232 /* globals */
233 int dbg_on = 1;
234 int cntx2port[524];
235 
236 static __inline void
atomic_incr_long(unsigned long * addr)237 atomic_incr_long(unsigned long *addr)
238 {
239 	atomic_add_long(addr, 1);
240 }
241 
242 /*
243  * xlpnae driver implementation
244  */
245 static int
nlm_xlpnae_probe(device_t dev)246 nlm_xlpnae_probe(device_t dev)
247 {
248 	if (pci_get_vendor(dev) != PCI_VENDOR_NETLOGIC ||
249 	    pci_get_device(dev) != PCI_DEVICE_ID_NLM_NAE)
250 		return (ENXIO);
251 
252 	return (BUS_PROBE_DEFAULT);
253 }
254 
255 static void
nlm_xlpnae_print_frin_desc_carving(struct nlm_xlpnae_softc * sc)256 nlm_xlpnae_print_frin_desc_carving(struct nlm_xlpnae_softc *sc)
257 {
258 	int intf;
259 	uint32_t value;
260 	int start, size;
261 
262 	/* XXXJC: use max_ports instead of 20 ? */
263 	for (intf = 0; intf < 20; intf++) {
264 		nlm_write_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG,
265 		    (0x80000000 | intf));
266 		value = nlm_read_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG);
267 		size = 2 * ((value >> 20) & 0x3ff);
268 		start = 2 * ((value >> 8) & 0x1ff);
269 	}
270 }
271 
272 static void
nlm_config_egress(struct nlm_xlpnae_softc * sc,int nblock,int context_base,int hwport,int max_channels)273 nlm_config_egress(struct nlm_xlpnae_softc *sc, int nblock,
274     int context_base, int hwport, int max_channels)
275 {
276 	int offset, num_channels;
277 	uint32_t data;
278 
279 	num_channels = sc->portcfg[hwport].num_channels;
280 
281 	data = (2048 << 12) | (hwport << 4) | 1;
282 	nlm_write_nae_reg(sc->base, NAE_TX_IF_BURSTMAX_CMD, data);
283 
284 	data = ((context_base + num_channels - 1) << 22) |
285 	    (context_base << 12) | (hwport << 4) | 1;
286 	nlm_write_nae_reg(sc->base, NAE_TX_DDR_ACTVLIST_CMD, data);
287 
288 	config_egress_fifo_carvings(sc->base, hwport,
289 	    context_base, num_channels, max_channels, sc->portcfg);
290 	config_egress_fifo_credits(sc->base, hwport,
291 	    context_base, num_channels, max_channels, sc->portcfg);
292 
293 	data = nlm_read_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH);
294 	data |= (1 << 25) | (1 << 24);
295 	nlm_write_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH, data);
296 
297 	for (offset = 0; offset < num_channels; offset++) {
298 		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD1,
299 		    NAE_DRR_QUANTA);
300 		data = (hwport << 15) | ((context_base + offset) << 5);
301 		if (sc->cmplx_type[nblock] == ILC)
302 			data |= (offset << 20);
303 		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data | 1);
304 		nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data);
305 	}
306 }
307 
308 static int
xlpnae_get_maxchannels(struct nlm_xlpnae_softc * sc)309 xlpnae_get_maxchannels(struct nlm_xlpnae_softc *sc)
310 {
311 	int maxchans = 0;
312 	int i;
313 
314 	for (i = 0; i < sc->max_ports; i++) {
315 		if (sc->portcfg[i].type == UNKNOWN)
316 			continue;
317 		maxchans += sc->portcfg[i].num_channels;
318 	}
319 
320 	return (maxchans);
321 }
322 
323 static void
nlm_setup_interface(struct nlm_xlpnae_softc * sc,int nblock,int port,uint32_t cur_flow_base,uint32_t flow_mask,int max_channels,int context)324 nlm_setup_interface(struct nlm_xlpnae_softc *sc, int nblock,
325     int port, uint32_t cur_flow_base, uint32_t flow_mask,
326     int max_channels, int context)
327 {
328 	uint64_t nae_base = sc->base;
329 	int mtu = 1536;			/* XXXJC: don't hard code */
330 	uint32_t ucore_mask;
331 
332 	if (sc->cmplx_type[nblock] == XAUIC)
333 		nlm_config_xaui(nae_base, nblock, mtu,
334 		    mtu, sc->portcfg[port].vlan_pri_en);
335 	nlm_config_freein_fifo_uniq_cfg(nae_base,
336 	    port, sc->portcfg[port].free_desc_sizes);
337 	nlm_config_ucore_iface_mask_cfg(nae_base,
338 	    port, sc->portcfg[port].ucore_mask);
339 
340 	nlm_program_flow_cfg(nae_base, port, cur_flow_base, flow_mask);
341 
342 	if (sc->cmplx_type[nblock] == SGMIIC)
343 		nlm_configure_sgmii_interface(nae_base, nblock, port, mtu, 0);
344 
345 	nlm_config_egress(sc, nblock, context, port, max_channels);
346 
347 	nlm_nae_init_netior(nae_base, sc->nblocks);
348 	nlm_nae_open_if(nae_base, nblock, sc->cmplx_type[nblock], port,
349 	    sc->portcfg[port].free_desc_sizes);
350 
351 	/*  XXXJC: check mask calculation */
352 	ucore_mask = (1 << sc->nucores) - 1;
353 	nlm_nae_init_ucore(nae_base, port, ucore_mask);
354 }
355 
356 static void
nlm_setup_interfaces(struct nlm_xlpnae_softc * sc)357 nlm_setup_interfaces(struct nlm_xlpnae_softc *sc)
358 {
359 	uint64_t nae_base;
360 	uint32_t cur_slot, cur_slot_base;
361 	uint32_t cur_flow_base, port, flow_mask;
362 	int max_channels;
363 	int i, context;
364 
365 	cur_slot = 0;
366 	cur_slot_base = 0;
367 	cur_flow_base = 0;
368 	nae_base = sc->base;
369 	flow_mask = nlm_get_flow_mask(sc->total_num_ports);
370 	/* calculate max_channels */
371 	max_channels = xlpnae_get_maxchannels(sc);
372 
373 	port = 0;
374 	context = 0;
375 	for (i = 0; i < sc->max_ports; i++) {
376 		if (sc->portcfg[i].type == UNKNOWN)
377 			continue;
378 		nlm_setup_interface(sc, sc->portcfg[i].block, i, cur_flow_base,
379 		    flow_mask, max_channels, context);
380 		cur_flow_base += sc->per_port_num_flows;
381 		context += sc->portcfg[i].num_channels;
382 	}
383 }
384 
385 static void
nlm_xlpnae_init(int node,struct nlm_xlpnae_softc * sc)386 nlm_xlpnae_init(int node, struct nlm_xlpnae_softc *sc)
387 {
388 	uint64_t nae_base;
389 	uint32_t ucoremask = 0;
390 	uint32_t val;
391 	int i;
392 
393 	nae_base = sc->base;
394 
395 	nlm_nae_flush_free_fifo(nae_base, sc->nblocks);
396 	nlm_deflate_frin_fifo_carving(nae_base, sc->max_ports);
397 	nlm_reset_nae(node);
398 
399 	for (i = 0; i < sc->nucores; i++)	/* XXXJC: code repeated below */
400 		ucoremask |= (0x1 << i);
401 	printf("Loading 0x%x ucores with microcode\n", ucoremask);
402 	nlm_ucore_load_all(nae_base, ucoremask, 1);
403 
404 	val = nlm_set_device_frequency(node, DFS_DEVICE_NAE, sc->freq);
405 	printf("Setup NAE frequency to %dMHz\n", val);
406 
407 	nlm_mdio_reset_all(nae_base);
408 
409 	printf("Initialze SGMII PCS for blocks 0x%x\n", sc->sgmiimask);
410 	nlm_sgmii_pcs_init(nae_base, sc->sgmiimask);
411 
412 	printf("Initialze XAUI PCS for blocks 0x%x\n", sc->xauimask);
413 	nlm_xaui_pcs_init(nae_base, sc->xauimask);
414 
415 	/* clear NETIOR soft reset */
416 	nlm_write_nae_reg(nae_base, NAE_LANE_CFG_SOFTRESET, 0x0);
417 
418 	/* Disable RX enable bit in RX_CONFIG */
419 	val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG);
420 	val &= 0xfffffffe;
421 	nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val);
422 
423 	if (nlm_is_xlp8xx_ax() == 0) {
424 		val = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG);
425 		val &= ~(1 << 3);
426 		nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, val);
427 	}
428 
429 	nlm_setup_poe_class_config(nae_base, MAX_POE_CLASSES,
430 	    sc->ncontexts, poe_cl_tbl);
431 
432 	nlm_setup_vfbid_mapping(nae_base);
433 
434 	nlm_setup_flow_crc_poly(nae_base, sc->flow_crc_poly);
435 
436 	nlm_setup_rx_cal_cfg(nae_base, sc->max_ports, sc->portcfg);
437 	/* note: xlp8xx Ax does not have Tx Calendering */
438 	if (!nlm_is_xlp8xx_ax())
439 		nlm_setup_tx_cal_cfg(nae_base, sc->max_ports, sc->portcfg);
440 
441 	nlm_setup_interfaces(sc);
442 	nlm_config_poe(sc->poe_base, sc->poedv_base);
443 
444 	if (sc->hw_parser_en)
445 		nlm_enable_hardware_parser(nae_base);
446 
447 	if (sc->prepad_en)
448 		nlm_prepad_enable(nae_base, sc->prepad_size);
449 
450 	if (sc->ieee_1588_en)
451 		nlm_setup_1588_timer(sc->base, sc->portcfg);
452 }
453 
454 static void
nlm_xlpnae_update_pde(void * dummy __unused)455 nlm_xlpnae_update_pde(void *dummy __unused)
456 {
457 	struct nlm_xlpnae_softc *sc;
458 	uint32_t dv[NUM_WORDS_PER_DV];
459 	device_t dev;
460 	int vec;
461 
462 	dev = devclass_get_device(devclass_find("xlpnae"), 0);
463 	sc = device_get_softc(dev);
464 
465 	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0);
466 	for (vec = 0; vec < NUM_DIST_VEC; vec++) {
467 		if (nlm_get_poe_distvec(vec, dv) != 0)
468 			continue;
469 
470 		nlm_write_poe_distvec(sc->poedv_base, vec, dv);
471 	}
472 	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1);
473 }
474 
475 SYSINIT(nlm_xlpnae_update_pde, SI_SUB_SMP, SI_ORDER_ANY,
476     nlm_xlpnae_update_pde, NULL);
477 
478 /* configuration common for sgmii, xaui, ilaken goes here */
479 static void
nlm_setup_portcfg(struct nlm_xlpnae_softc * sc,struct xlp_nae_ivars * naep,int block,int port)480 nlm_setup_portcfg(struct nlm_xlpnae_softc *sc, struct xlp_nae_ivars *naep,
481     int block, int port)
482 {
483 	int i;
484 	uint32_t ucore_mask = 0;
485 	struct xlp_block_ivars *bp;
486 	struct xlp_port_ivars *p;
487 
488 	bp = &(naep->block_ivars[block]);
489 	p  = &(bp->port_ivars[port & 0x3]);
490 
491 	sc->portcfg[port].node = p->node;
492 	sc->portcfg[port].block = p->block;
493 	sc->portcfg[port].port = p->port;
494 	sc->portcfg[port].type = p->type;
495 	sc->portcfg[port].mdio_bus = p->mdio_bus;
496 	sc->portcfg[port].phy_addr = p->phy_addr;
497 	sc->portcfg[port].loopback_mode = p->loopback_mode;
498 	sc->portcfg[port].num_channels = p->num_channels;
499 	if (p->free_desc_sizes != MCLBYTES) {
500 		printf("[%d, %d] Error: free_desc_sizes %d != %d\n",
501 		    block, port, p->free_desc_sizes, MCLBYTES);
502 		return;
503 	}
504 	sc->portcfg[port].free_desc_sizes = p->free_desc_sizes;
505 	for (i = 0; i < sc->nucores; i++)	/* XXXJC: configure this */
506 		ucore_mask |= (0x1 << i);
507 	sc->portcfg[port].ucore_mask = ucore_mask;
508 	sc->portcfg[port].vlan_pri_en = p->vlan_pri_en;
509 	sc->portcfg[port].num_free_descs = p->num_free_descs;
510 	sc->portcfg[port].iface_fifo_size = p->iface_fifo_size;
511 	sc->portcfg[port].rxbuf_size = p->rxbuf_size;
512 	sc->portcfg[port].rx_slots_reqd = p->rx_slots_reqd;
513 	sc->portcfg[port].tx_slots_reqd = p->tx_slots_reqd;
514 	sc->portcfg[port].pseq_fifo_size = p->pseq_fifo_size;
515 
516 	sc->portcfg[port].stg2_fifo_size = p->stg2_fifo_size;
517 	sc->portcfg[port].eh_fifo_size = p->eh_fifo_size;
518 	sc->portcfg[port].frout_fifo_size = p->frout_fifo_size;
519 	sc->portcfg[port].ms_fifo_size = p->ms_fifo_size;
520 	sc->portcfg[port].pkt_fifo_size = p->pkt_fifo_size;
521 	sc->portcfg[port].pktlen_fifo_size = p->pktlen_fifo_size;
522 	sc->portcfg[port].max_stg2_offset = p->max_stg2_offset;
523 	sc->portcfg[port].max_eh_offset = p->max_eh_offset;
524 	sc->portcfg[port].max_frout_offset = p->max_frout_offset;
525 	sc->portcfg[port].max_ms_offset = p->max_ms_offset;
526 	sc->portcfg[port].max_pmem_offset = p->max_pmem_offset;
527 	sc->portcfg[port].stg1_2_credit = p->stg1_2_credit;
528 	sc->portcfg[port].stg2_eh_credit = p->stg2_eh_credit;
529 	sc->portcfg[port].stg2_frout_credit = p->stg2_frout_credit;
530 	sc->portcfg[port].stg2_ms_credit = p->stg2_ms_credit;
531 	sc->portcfg[port].ieee1588_inc_intg = p->ieee1588_inc_intg;
532 	sc->portcfg[port].ieee1588_inc_den = p->ieee1588_inc_den;
533 	sc->portcfg[port].ieee1588_inc_num = p->ieee1588_inc_num;
534 	sc->portcfg[port].ieee1588_userval = p->ieee1588_userval;
535 	sc->portcfg[port].ieee1588_ptpoff = p->ieee1588_ptpoff;
536 	sc->portcfg[port].ieee1588_tmr1 = p->ieee1588_tmr1;
537 	sc->portcfg[port].ieee1588_tmr2 = p->ieee1588_tmr2;
538 	sc->portcfg[port].ieee1588_tmr3 = p->ieee1588_tmr3;
539 
540 	sc->total_free_desc += sc->portcfg[port].free_desc_sizes;
541 	sc->total_num_ports++;
542 }
543 
544 static int
nlm_xlpnae_attach(device_t dev)545 nlm_xlpnae_attach(device_t dev)
546 {
547 	struct xlp_nae_ivars	*nae_ivars;
548 	struct nlm_xlpnae_softc *sc;
549 	device_t tmpd;
550 	uint32_t dv[NUM_WORDS_PER_DV];
551 	int port, i, j, nchan, nblock, node, qstart, qnum;
552 	int offset, context, txq_base, rxvcbase;
553 	uint64_t poe_pcibase, nae_pcibase;
554 
555 	node = pci_get_slot(dev) / 8;
556 	nae_ivars = &xlp_board_info.nodes[node].nae_ivars;
557 
558 	sc = device_get_softc(dev);
559 	sc->xlpnae_dev = dev;
560 	sc->node = nae_ivars->node;
561 	sc->base = nlm_get_nae_regbase(sc->node);
562 	sc->poe_base = nlm_get_poe_regbase(sc->node);
563 	sc->poedv_base = nlm_get_poedv_regbase(sc->node);
564 	sc->portcfg = nae_port_config;
565 	sc->blockmask = nae_ivars->blockmask;
566 	sc->ilmask = nae_ivars->ilmask;
567 	sc->xauimask = nae_ivars->xauimask;
568 	sc->sgmiimask = nae_ivars->sgmiimask;
569 	sc->nblocks = nae_ivars->nblocks;
570 	sc->freq = nae_ivars->freq;
571 
572 	/* flow table generation is done by CRC16 polynomial */
573 	sc->flow_crc_poly = nae_ivars->flow_crc_poly;
574 
575 	sc->hw_parser_en = nae_ivars->hw_parser_en;
576 	sc->prepad_en = nae_ivars->prepad_en;
577 	sc->prepad_size = nae_ivars->prepad_size;
578 	sc->ieee_1588_en = nae_ivars->ieee_1588_en;
579 
580 	nae_pcibase = nlm_get_nae_pcibase(sc->node);
581 	sc->ncontexts = nlm_read_reg(nae_pcibase, XLP_PCI_DEVINFO_REG5);
582 	sc->nucores = nlm_num_uengines(nae_pcibase);
583 
584 	for (nblock = 0; nblock < sc->nblocks; nblock++) {
585 		sc->cmplx_type[nblock] = nae_ivars->block_ivars[nblock].type;
586 		sc->portmask[nblock] = nae_ivars->block_ivars[nblock].portmask;
587 	}
588 
589 	for (i = 0; i < sc->ncontexts; i++)
590 		cntx2port[i] = 18;	/* 18 is an invalid port */
591 
592 	if (sc->nblocks == 5)
593 		sc->max_ports = 18;	/* 8xx has a block 4 with 2 ports */
594 	else
595 		sc->max_ports = sc->nblocks * PORTS_PER_CMPLX;
596 
597 	for (i = 0; i < sc->max_ports; i++)
598 		sc->portcfg[i].type = UNKNOWN; /* Port Not Present */
599 	/*
600 	 * Now setup all internal fifo carvings based on
601 	 * total number of ports in the system
602 	 */
603 	sc->total_free_desc = 0;
604 	sc->total_num_ports = 0;
605 	port = 0;
606 	context = 0;
607 	txq_base = nlm_qidstart(nae_pcibase);
608 	rxvcbase = txq_base + sc->ncontexts;
609 	for (i = 0; i < sc->nblocks; i++) {
610 		uint32_t portmask;
611 
612 		if ((nae_ivars->blockmask & (1 << i)) == 0) {
613 			port += 4;
614 			continue;
615 		}
616 		portmask = nae_ivars->block_ivars[i].portmask;
617 		for (j = 0; j < PORTS_PER_CMPLX; j++, port++) {
618 			if ((portmask & (1 << j)) == 0)
619 				continue;
620 			nlm_setup_portcfg(sc, nae_ivars, i, port);
621 			nchan = sc->portcfg[port].num_channels;
622 			for (offset = 0; offset < nchan; offset++)
623 				cntx2port[context + offset] = port;
624 			sc->portcfg[port].txq = txq_base + context;
625 			sc->portcfg[port].rxfreeq = rxvcbase + port;
626 			context += nchan;
627 		}
628 	}
629 
630 	poe_pcibase = nlm_get_poe_pcibase(sc->node);
631 	sc->per_port_num_flows =
632 	    nlm_poe_max_flows(poe_pcibase) / sc->total_num_ports;
633 
634 	/* zone for P2P descriptors */
635 	nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
636 	    sizeof(struct xlpge_tx_desc), NULL, NULL, NULL, NULL,
637 	    NAE_CACHELINE_SIZE, 0);
638 
639 	/* NAE FMN messages have CMS src station id's in the
640 	 * range of qstart to qnum.
641 	 */
642 	qstart = nlm_qidstart(nae_pcibase);
643 	qnum = nlm_qnum(nae_pcibase);
644 	if (register_msgring_handler(qstart, qstart + qnum - 1,
645 	    nlm_xlpge_msgring_handler, sc)) {
646 		panic("Couldn't register NAE msgring handler\n");
647 	}
648 
649 	/* POE FMN messages have CMS src station id's in the
650 	 * range of qstart to qnum.
651 	 */
652 	qstart = nlm_qidstart(poe_pcibase);
653 	qnum = nlm_qnum(poe_pcibase);
654 	if (register_msgring_handler(qstart, qstart + qnum - 1,
655 	    nlm_xlpge_msgring_handler, sc)) {
656 		panic("Couldn't register POE msgring handler\n");
657 	}
658 
659 	nlm_xlpnae_init(node, sc);
660 
661 	for (i = 0; i < sc->max_ports; i++) {
662 		char desc[32];
663 		int block, port;
664 
665 		if (sc->portcfg[i].type == UNKNOWN)
666 			continue;
667 		block = sc->portcfg[i].block;
668 		port = sc->portcfg[i].port;
669 		tmpd = device_add_child(dev, "xlpge", i);
670 		device_set_ivars(tmpd,
671 		    &(nae_ivars->block_ivars[block].port_ivars[port]));
672 		sprintf(desc, "XLP NAE Port %d,%d", block, port);
673 		device_set_desc_copy(tmpd, desc);
674 	}
675 	nlm_setup_iface_fifo_cfg(sc->base, sc->max_ports, sc->portcfg);
676 	nlm_setup_rx_base_config(sc->base, sc->max_ports, sc->portcfg);
677 	nlm_setup_rx_buf_config(sc->base, sc->max_ports, sc->portcfg);
678 	nlm_setup_freein_fifo_cfg(sc->base, sc->portcfg);
679 	nlm_program_nae_parser_seq_fifo(sc->base, sc->max_ports, sc->portcfg);
680 
681 	nlm_xlpnae_print_frin_desc_carving(sc);
682 	bus_generic_probe(dev);
683 	bus_generic_attach(dev);
684 
685 	/*
686 	 * Enable only boot cpu at this point, full distribution comes
687 	 * only after SMP is started
688 	 */
689 	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0);
690 	nlm_calc_poe_distvec(0x1, 0, 0, 0, 0x1 << XLPGE_RX_VC, dv);
691 	nlm_write_poe_distvec(sc->poedv_base, 0, dv);
692 	nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1);
693 
694 	return (0);
695 }
696 
697 static int
nlm_xlpnae_detach(device_t dev)698 nlm_xlpnae_detach(device_t dev)
699 {
700 	/*  TODO - free zone here */
701 	return (0);
702 }
703 
704 static int
nlm_xlpnae_suspend(device_t dev)705 nlm_xlpnae_suspend(device_t dev)
706 {
707 	return (0);
708 }
709 
710 static int
nlm_xlpnae_resume(device_t dev)711 nlm_xlpnae_resume(device_t dev)
712 {
713 	return (0);
714 }
715 
716 static int
nlm_xlpnae_shutdown(device_t dev)717 nlm_xlpnae_shutdown(device_t dev)
718 {
719 	return (0);
720 }
721 
722 /*
723  * xlpge driver implementation
724  */
725 
726 static void
nlm_xlpge_mac_set_rx_mode(struct nlm_xlpge_softc * sc)727 nlm_xlpge_mac_set_rx_mode(struct nlm_xlpge_softc *sc)
728 {
729 	if (sc->if_flags & IFF_PROMISC) {
730 		if (sc->type == SGMIIC)
731 			nlm_nae_setup_rx_mode_sgmii(sc->base_addr,
732 			    sc->block, sc->port, sc->type, 1 /* broadcast */,
733 			    1/* multicast */, 0 /* pause */, 1 /* promisc */);
734 		else
735 			nlm_nae_setup_rx_mode_xaui(sc->base_addr,
736 			    sc->block, sc->port, sc->type, 1 /* broadcast */,
737 			    1/* multicast */, 0 /* pause */, 1 /* promisc */);
738 	} else {
739 		if (sc->type == SGMIIC)
740 			nlm_nae_setup_rx_mode_sgmii(sc->base_addr,
741 			    sc->block, sc->port, sc->type, 1 /* broadcast */,
742 			    1/* multicast */, 0 /* pause */, 0 /* promisc */);
743 		else
744 			nlm_nae_setup_rx_mode_xaui(sc->base_addr,
745 			    sc->block, sc->port, sc->type, 1 /* broadcast */,
746 			    1/* multicast */, 0 /* pause */, 0 /* promisc */);
747 	}
748 }
749 
750 static int
nlm_xlpge_ioctl(struct ifnet * ifp,u_long command,caddr_t data)751 nlm_xlpge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
752 {
753 	struct mii_data		*mii;
754 	struct nlm_xlpge_softc	*sc;
755 	struct ifreq		*ifr;
756 	int			error;
757 
758 	sc = ifp->if_softc;
759 	error = 0;
760 	ifr = (struct ifreq *)data;
761 
762 	switch (command) {
763 	case SIOCSIFFLAGS:
764 		XLPGE_LOCK(sc);
765 		sc->if_flags = ifp->if_flags;
766 		if (ifp->if_flags & IFF_UP) {
767 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
768 				nlm_xlpge_init(sc);
769 			else
770 				nlm_xlpge_port_enable(sc);
771 			nlm_xlpge_mac_set_rx_mode(sc);
772 			sc->link = NLM_LINK_UP;
773 		} else {
774 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
775 				nlm_xlpge_port_disable(sc);
776 			sc->link = NLM_LINK_DOWN;
777 		}
778 		XLPGE_UNLOCK(sc);
779 		error = 0;
780 		break;
781 	case SIOCGIFMEDIA:
782 	case SIOCSIFMEDIA:
783 		if (sc->mii_bus != NULL) {
784 			mii = device_get_softc(sc->mii_bus);
785 			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
786 			    command);
787 		}
788 		break;
789 	default:
790 		error = ether_ioctl(ifp, command, data);
791 		break;
792 	}
793 
794 	return (error);
795 }
796 
797 static int
xlpge_tx(struct ifnet * ifp,struct mbuf * mbuf_chain)798 xlpge_tx(struct ifnet *ifp, struct mbuf *mbuf_chain)
799 {
800 	struct nlm_fmn_msg	msg;
801 	struct xlpge_tx_desc	*p2p;
802 	struct nlm_xlpge_softc	*sc;
803 	struct mbuf	*m;
804 	vm_paddr_t      paddr;
805 	int		fbid, dst, pos, err;
806 	int		ret = 0, tx_msgstatus, retries;
807 
808 	err = 0;
809 	if (mbuf_chain == NULL)
810 		return (0);
811 
812 	sc = ifp->if_softc;
813 	p2p = NULL;
814 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) ||
815 	    ifp->if_drv_flags & IFF_DRV_OACTIVE) {
816 		err = ENXIO;
817 		goto fail;
818 	}
819 
820 	/* free a few in coming messages on the fb vc */
821 	xlp_handle_msg_vc(1 << XLPGE_FB_VC, 2);
822 
823 	/* vfb id table is setup to map cpu to vc 3 of the cpu */
824 	fbid = nlm_cpuid();
825 	dst = sc->txq;
826 
827 	pos = 0;
828 	p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
829 	if (p2p == NULL) {
830 		printf("alloc fail\n");
831 		err = ENOBUFS;
832 		goto fail;
833 	}
834 
835 	for (m = mbuf_chain; m != NULL; m = m->m_next) {
836 		vm_offset_t buf = (vm_offset_t) m->m_data;
837 		int	len = m->m_len;
838 		int	frag_sz;
839 		uint64_t desc;
840 
841 		/*printf("m_data = %p len %d\n", m->m_data, len); */
842 		while (len) {
843 			if (pos == XLP_NTXFRAGS - 3) {
844 				device_printf(sc->xlpge_dev,
845 				    "packet defrag %d\n",
846 				    m_length(mbuf_chain, NULL));
847 				err = ENOBUFS; /* TODO fix error */
848 				goto fail;
849 			}
850 			paddr = vtophys(buf);
851 			frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
852 			if (len < frag_sz)
853 				frag_sz = len;
854 			desc = nae_tx_desc(P2D_NEOP, 0, 127,
855 			    frag_sz, paddr);
856 			p2p->frag[pos] = htobe64(desc);
857 			pos++;
858 			len -= frag_sz;
859 			buf += frag_sz;
860 		}
861 	}
862 
863 	KASSERT(pos != 0, ("Zero-length mbuf chain?\n"));
864 
865 	/* Make the last one P2D EOP */
866 	p2p->frag[pos-1] |= htobe64((uint64_t)P2D_EOP << 62);
867 
868 	/* stash useful pointers in the desc */
869 	p2p->frag[XLP_NTXFRAGS-3] = 0xf00bad;
870 	p2p->frag[XLP_NTXFRAGS-2] = (uintptr_t)p2p;
871 	p2p->frag[XLP_NTXFRAGS-1] = (uintptr_t)mbuf_chain;
872 
873 	paddr = vtophys(p2p);
874 	msg.msg[0] = nae_tx_desc(P2P, 0, fbid, pos, paddr);
875 
876 	for (retries = 16;  retries > 0; retries--) {
877 		ret = nlm_fmn_msgsend(dst, 1, FMN_SWCODE_NAE, &msg);
878 		if (ret == 0)
879 			return (0);
880 	}
881 
882 fail:
883 	if (ret != 0) {
884 		tx_msgstatus = nlm_read_c2_txmsgstatus();
885 		if ((tx_msgstatus >> 24) & 0x1)
886 			device_printf(sc->xlpge_dev, "Transmit queue full - ");
887 		if ((tx_msgstatus >> 3) & 0x1)
888 			device_printf(sc->xlpge_dev, "ECC error - ");
889 		if ((tx_msgstatus >> 2) & 0x1)
890 			device_printf(sc->xlpge_dev, "Pending Sync - ");
891 		if ((tx_msgstatus >> 1) & 0x1)
892 			device_printf(sc->xlpge_dev,
893 			    "Insufficient input queue credits - ");
894 		if (tx_msgstatus & 0x1)
895 			device_printf(sc->xlpge_dev,
896 			    "Insufficient output queue credits - ");
897 	}
898 	device_printf(sc->xlpge_dev, "Send failed! err = %d\n", err);
899 	if (p2p)
900 		uma_zfree(nl_tx_desc_zone, p2p);
901 	m_freem(mbuf_chain);
902 	if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
903 	return (err);
904 }
905 
906 static int
nlm_xlpge_gmac_config_speed(struct nlm_xlpge_softc * sc)907 nlm_xlpge_gmac_config_speed(struct nlm_xlpge_softc *sc)
908 {
909 	struct mii_data *mii;
910 
911 	if (sc->type == XAUIC || sc->type == ILC)
912 		return (0);
913 
914 	if (sc->mii_bus) {
915 		mii = device_get_softc(sc->mii_bus);
916 		mii_pollstat(mii);
917 	}
918 
919 	return (0);
920 }
921 
922 static void
nlm_xlpge_port_disable(struct nlm_xlpge_softc * sc)923 nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc)
924 {
925 	struct ifnet   *ifp;
926 
927 	ifp = sc->xlpge_if;
928 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
929 
930 	callout_stop(&sc->xlpge_callout);
931 	nlm_mac_disable(sc->base_addr, sc->block, sc->type, sc->port);
932 }
933 
934 static void
nlm_mii_pollstat(void * arg)935 nlm_mii_pollstat(void *arg)
936 {
937 	struct nlm_xlpge_softc *sc = (struct nlm_xlpge_softc *)arg;
938 	struct mii_data *mii = NULL;
939 
940 	if (sc->mii_bus) {
941 		mii = device_get_softc(sc->mii_bus);
942 
943 		KASSERT(mii != NULL, ("mii ptr is NULL"));
944 
945 		mii_pollstat(mii);
946 
947 		callout_reset(&sc->xlpge_callout, hz,
948 		    nlm_mii_pollstat, sc);
949 	}
950 }
951 
952 static void
nlm_xlpge_port_enable(struct nlm_xlpge_softc * sc)953 nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc)
954 {
955 	if ((sc->type != SGMIIC) && (sc->type != XAUIC))
956 		return;
957 	nlm_mac_enable(sc->base_addr, sc->block, sc->type, sc->port);
958 	nlm_mii_pollstat((void *)sc);
959 }
960 
961 static void
nlm_xlpge_init(void * addr)962 nlm_xlpge_init(void *addr)
963 {
964 	struct nlm_xlpge_softc *sc;
965 	struct ifnet   *ifp;
966 	struct mii_data *mii = NULL;
967 
968 	sc = (struct nlm_xlpge_softc *)addr;
969 	ifp = sc->xlpge_if;
970 
971 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
972 		return;
973 
974 	if (sc->mii_bus) {
975 		mii = device_get_softc(sc->mii_bus);
976 		mii_mediachg(mii);
977 	}
978 
979 	nlm_xlpge_gmac_config_speed(sc);
980 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
981 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
982 	nlm_xlpge_port_enable(sc);
983 
984 	/* start the callout */
985 	callout_reset(&sc->xlpge_callout, hz, nlm_mii_pollstat, sc);
986 }
987 
988 /*
989  * Read the MAC address from FDT or board eeprom.
990  */
991 static void
xlpge_read_mac_addr(struct nlm_xlpge_softc * sc)992 xlpge_read_mac_addr(struct nlm_xlpge_softc *sc)
993 {
994 
995 	xlpge_get_macaddr(sc->dev_addr);
996 	/* last octet is port specific */
997 	sc->dev_addr[5] += (sc->block * 4) + sc->port;
998 
999 	if (sc->type == SGMIIC)
1000 		nlm_nae_setup_mac_addr_sgmii(sc->base_addr, sc->block,
1001 		    sc->port, sc->type, sc->dev_addr);
1002 	else if (sc->type == XAUIC)
1003 		nlm_nae_setup_mac_addr_xaui(sc->base_addr, sc->block,
1004 		    sc->port, sc->type, sc->dev_addr);
1005 }
1006 
1007 static int
xlpge_mediachange(struct ifnet * ifp)1008 xlpge_mediachange(struct ifnet *ifp)
1009 {
1010 	return (0);
1011 }
1012 
1013 static void
xlpge_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1014 xlpge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1015 {
1016 	struct nlm_xlpge_softc *sc;
1017 	struct mii_data *md;
1018 
1019 	md = NULL;
1020 	sc = ifp->if_softc;
1021 
1022 	if (sc->mii_bus)
1023 		md = device_get_softc(sc->mii_bus);
1024 
1025 	ifmr->ifm_status = IFM_AVALID;
1026 	ifmr->ifm_active = IFM_ETHER;
1027 
1028 	if (sc->link == NLM_LINK_DOWN)
1029 		return;
1030 
1031 	if (md != NULL)
1032 		ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
1033 	ifmr->ifm_status |= IFM_ACTIVE;
1034 }
1035 
1036 static int
nlm_xlpge_ifinit(struct nlm_xlpge_softc * sc)1037 nlm_xlpge_ifinit(struct nlm_xlpge_softc *sc)
1038 {
1039 	struct ifnet *ifp;
1040 	device_t dev;
1041 	int port = sc->block * 4 + sc->port;
1042 
1043 	dev = sc->xlpge_dev;
1044 	ifp = sc->xlpge_if = if_alloc(IFT_ETHER);
1045 	/*(sc->network_sc)->ifp_ports[port].xlpge_if = ifp;*/
1046 	ifp_ports[port].xlpge_if = ifp;
1047 
1048 	if (ifp == NULL) {
1049 		device_printf(dev, "cannot if_alloc()\n");
1050 		return (ENOSPC);
1051 	}
1052 	ifp->if_softc = sc;
1053 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1054 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1055 	sc->if_flags = ifp->if_flags;
1056 	/*ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;*/
1057 	ifp->if_capabilities = 0;
1058 	ifp->if_capenable = ifp->if_capabilities;
1059 	ifp->if_ioctl = nlm_xlpge_ioctl;
1060 	ifp->if_init  = nlm_xlpge_init ;
1061 	ifp->if_hwassist = 0;
1062 	ifp->if_snd.ifq_drv_maxlen = NLM_XLPGE_TXQ_SIZE; /* TODO: make this a sysint */
1063 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1064 	IFQ_SET_READY(&ifp->if_snd);
1065 
1066 	ifmedia_init(&sc->xlpge_mii.mii_media, 0, xlpge_mediachange,
1067 	    xlpge_mediastatus);
1068 	ifmedia_add(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1069 	ifmedia_set(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1070 	sc->xlpge_mii.mii_media.ifm_media =
1071 	    sc->xlpge_mii.mii_media.ifm_cur->ifm_media;
1072 	xlpge_read_mac_addr(sc);
1073 
1074 	ether_ifattach(ifp, sc->dev_addr);
1075 
1076 	/* override if_transmit : per ifnet(9), do it after if_attach */
1077 	ifp->if_transmit = xlpge_tx;
1078 
1079 	return (0);
1080 }
1081 
1082 static int
nlm_xlpge_probe(device_t dev)1083 nlm_xlpge_probe(device_t dev)
1084 {
1085 	return (BUS_PROBE_DEFAULT);
1086 }
1087 
1088 static void *
get_buf(void)1089 get_buf(void)
1090 {
1091 	struct mbuf     *m_new;
1092 	uint64_t        *md;
1093 #ifdef INVARIANTS
1094 	vm_paddr_t      temp1, temp2;
1095 #endif
1096 
1097 	if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
1098 		return (NULL);
1099 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1100 	KASSERT(((uintptr_t)m_new->m_data & (NAE_CACHELINE_SIZE - 1)) == 0,
1101 	    ("m_new->m_data is not cacheline aligned"));
1102 	md = (uint64_t *)m_new->m_data;
1103 	md[0] = (intptr_t)m_new;        /* Back Ptr */
1104 	md[1] = 0xf00bad;
1105 	m_adj(m_new, NAE_CACHELINE_SIZE);
1106 
1107 #ifdef INVARIANTS
1108 	temp1 = vtophys((vm_offset_t) m_new->m_data);
1109 	temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
1110 	KASSERT((temp1 + 1536) == temp2,
1111 	    ("Alloced buffer is not contiguous"));
1112 #endif
1113 	return ((void *)m_new->m_data);
1114 }
1115 
1116 static void
nlm_xlpge_mii_init(device_t dev,struct nlm_xlpge_softc * sc)1117 nlm_xlpge_mii_init(device_t dev, struct nlm_xlpge_softc *sc)
1118 {
1119 	int error;
1120 
1121 	error = mii_attach(dev, &sc->mii_bus, sc->xlpge_if,
1122 			xlpge_mediachange, xlpge_mediastatus,
1123 			BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0);
1124 
1125 	if (error) {
1126 		device_printf(dev, "attaching PHYs failed\n");
1127 		sc->mii_bus = NULL;
1128 	}
1129 
1130 	if (sc->mii_bus != NULL) {
1131 		/* enable MDIO interrupts in the PHY */
1132 		/* XXXJC: TODO */
1133 	}
1134 }
1135 
1136 static int
xlpge_stats_sysctl(SYSCTL_HANDLER_ARGS)1137 xlpge_stats_sysctl(SYSCTL_HANDLER_ARGS)
1138 {
1139 	struct nlm_xlpge_softc *sc;
1140 	uint32_t val;
1141 	int reg, field;
1142 
1143 	sc = arg1;
1144 	field = arg2;
1145 	reg = SGMII_STATS_MLR(sc->block, sc->port) + field;
1146 	val = nlm_read_nae_reg(sc->base_addr, reg);
1147 	return (sysctl_handle_int(oidp, &val, 0, req));
1148 }
1149 
1150 static void
nlm_xlpge_setup_stats_sysctl(device_t dev,struct nlm_xlpge_softc * sc)1151 nlm_xlpge_setup_stats_sysctl(device_t dev, struct nlm_xlpge_softc *sc)
1152 {
1153 	struct sysctl_ctx_list *ctx;
1154 	struct sysctl_oid_list *child;
1155 	struct sysctl_oid *tree;
1156 
1157 	ctx = device_get_sysctl_ctx(dev);
1158 	tree = device_get_sysctl_tree(dev);
1159 	child = SYSCTL_CHILDREN(tree);
1160 
1161 #define XLPGE_STAT(name, offset, desc)				\
1162 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, name,		\
1163 	    CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,	\
1164 	    sc, offset,	xlpge_stats_sysctl, "IU", desc)
1165 
1166 	XLPGE_STAT("tr127", nlm_sgmii_stats_tr127, "TxRx 64 - 127 Bytes");
1167 	XLPGE_STAT("tr255", nlm_sgmii_stats_tr255, "TxRx 128 - 255 Bytes");
1168 	XLPGE_STAT("tr511", nlm_sgmii_stats_tr511, "TxRx 256 - 511 Bytes");
1169 	XLPGE_STAT("tr1k",  nlm_sgmii_stats_tr1k,  "TxRx 512 - 1023 Bytes");
1170 	XLPGE_STAT("trmax", nlm_sgmii_stats_trmax, "TxRx 1024 - 1518 Bytes");
1171 	XLPGE_STAT("trmgv", nlm_sgmii_stats_trmgv, "TxRx 1519 - 1522 Bytes");
1172 
1173 	XLPGE_STAT("rbyt", nlm_sgmii_stats_rbyt, "Rx Bytes");
1174 	XLPGE_STAT("rpkt", nlm_sgmii_stats_rpkt, "Rx Packets");
1175 	XLPGE_STAT("rfcs", nlm_sgmii_stats_rfcs, "Rx FCS Error");
1176 	XLPGE_STAT("rmca", nlm_sgmii_stats_rmca, "Rx Multicast Packets");
1177 	XLPGE_STAT("rbca", nlm_sgmii_stats_rbca, "Rx Broadcast Packets");
1178 	XLPGE_STAT("rxcf", nlm_sgmii_stats_rxcf, "Rx Control Frames");
1179 	XLPGE_STAT("rxpf", nlm_sgmii_stats_rxpf, "Rx Pause Frames");
1180 	XLPGE_STAT("rxuo", nlm_sgmii_stats_rxuo, "Rx Unknown Opcode");
1181 	XLPGE_STAT("raln", nlm_sgmii_stats_raln, "Rx Alignment Errors");
1182 	XLPGE_STAT("rflr", nlm_sgmii_stats_rflr, "Rx Framelength Errors");
1183 	XLPGE_STAT("rcde", nlm_sgmii_stats_rcde, "Rx Code Errors");
1184 	XLPGE_STAT("rcse", nlm_sgmii_stats_rcse, "Rx Carrier Sense Errors");
1185 	XLPGE_STAT("rund", nlm_sgmii_stats_rund, "Rx Undersize Packet Errors");
1186 	XLPGE_STAT("rovr", nlm_sgmii_stats_rovr, "Rx Oversize Packet Errors");
1187 	XLPGE_STAT("rfrg", nlm_sgmii_stats_rfrg, "Rx Fragments");
1188 	XLPGE_STAT("rjbr", nlm_sgmii_stats_rjbr, "Rx Jabber");
1189 
1190 	XLPGE_STAT("tbyt", nlm_sgmii_stats_tbyt, "Tx Bytes");
1191 	XLPGE_STAT("tpkt", nlm_sgmii_stats_tpkt, "Tx Packets");
1192 	XLPGE_STAT("tmca", nlm_sgmii_stats_tmca, "Tx Multicast Packets");
1193 	XLPGE_STAT("tbca", nlm_sgmii_stats_tbca, "Tx Broadcast Packets");
1194 	XLPGE_STAT("txpf", nlm_sgmii_stats_txpf, "Tx Pause Frame");
1195 	XLPGE_STAT("tdfr", nlm_sgmii_stats_tdfr, "Tx Deferral Packets");
1196 	XLPGE_STAT("tedf", nlm_sgmii_stats_tedf, "Tx Excessive Deferral Pkts");
1197 	XLPGE_STAT("tscl", nlm_sgmii_stats_tscl, "Tx Single Collisions");
1198 	XLPGE_STAT("tmcl", nlm_sgmii_stats_tmcl, "Tx Multiple Collisions");
1199 	XLPGE_STAT("tlcl", nlm_sgmii_stats_tlcl, "Tx Late Collision Pkts");
1200 	XLPGE_STAT("txcl", nlm_sgmii_stats_txcl, "Tx Excessive Collisions");
1201 	XLPGE_STAT("tncl", nlm_sgmii_stats_tncl, "Tx Total Collisions");
1202 	XLPGE_STAT("tjbr", nlm_sgmii_stats_tjbr, "Tx Jabber Frames");
1203 	XLPGE_STAT("tfcs", nlm_sgmii_stats_tfcs, "Tx FCS Errors");
1204 	XLPGE_STAT("txcf", nlm_sgmii_stats_txcf, "Tx Control Frames");
1205 	XLPGE_STAT("tovr", nlm_sgmii_stats_tovr, "Tx Oversize Frames");
1206 	XLPGE_STAT("tund", nlm_sgmii_stats_tund, "Tx Undersize Frames");
1207 	XLPGE_STAT("tfrg", nlm_sgmii_stats_tfrg, "Tx Fragments");
1208 #undef XLPGE_STAT
1209 }
1210 
1211 static int
nlm_xlpge_attach(device_t dev)1212 nlm_xlpge_attach(device_t dev)
1213 {
1214 	struct xlp_port_ivars *pv;
1215 	struct nlm_xlpge_softc *sc;
1216 	int port;
1217 
1218 	pv = device_get_ivars(dev);
1219 	sc = device_get_softc(dev);
1220 	sc->xlpge_dev = dev;
1221 	sc->mii_bus = NULL;
1222 	sc->block = pv->block;
1223 	sc->node = pv->node;
1224 	sc->port = pv->port;
1225 	sc->type = pv->type;
1226 	sc->xlpge_if = NULL;
1227 	sc->phy_addr = pv->phy_addr;
1228 	sc->mdio_bus = pv->mdio_bus;
1229 	sc->portcfg = nae_port_config;
1230 	sc->hw_parser_en = pv->hw_parser_en;
1231 
1232 	/* default settings */
1233 	sc->speed = NLM_SGMII_SPEED_10;
1234 	sc->duplexity = NLM_SGMII_DUPLEX_FULL;
1235 	sc->link = NLM_LINK_DOWN;
1236 	sc->flowctrl = NLM_FLOWCTRL_DISABLED;
1237 
1238 	sc->network_sc = device_get_softc(device_get_parent(dev));
1239 	sc->base_addr = sc->network_sc->base;
1240 	sc->prepad_en = sc->network_sc->prepad_en;
1241 	sc->prepad_size = sc->network_sc->prepad_size;
1242 
1243 	callout_init(&sc->xlpge_callout, 1);
1244 
1245 	XLPGE_LOCK_INIT(sc, device_get_nameunit(dev));
1246 
1247 	port = (sc->block*4)+sc->port;
1248 	sc->nfree_desc = nae_port_config[port].num_free_descs;
1249 	sc->txq = nae_port_config[port].txq;
1250 	sc->rxfreeq = nae_port_config[port].rxfreeq;
1251 
1252 	nlm_xlpge_submit_rx_free_desc(sc, sc->nfree_desc);
1253 	if (sc->hw_parser_en)
1254 		nlm_enable_hardware_parser_per_port(sc->base_addr,
1255 		    sc->block, sc->port);
1256 
1257 	nlm_xlpge_ifinit(sc);
1258 	ifp_ports[port].xlpge_sc = sc;
1259 	nlm_xlpge_mii_init(dev, sc);
1260 
1261 	nlm_xlpge_setup_stats_sysctl(dev, sc);
1262 
1263 	return (0);
1264 }
1265 
1266 static int
nlm_xlpge_detach(device_t dev)1267 nlm_xlpge_detach(device_t dev)
1268 {
1269 	return (0);
1270 }
1271 
1272 static int
nlm_xlpge_suspend(device_t dev)1273 nlm_xlpge_suspend(device_t dev)
1274 {
1275 	return (0);
1276 }
1277 
1278 static int
nlm_xlpge_resume(device_t dev)1279 nlm_xlpge_resume(device_t dev)
1280 {
1281 	return (0);
1282 }
1283 
1284 static int
nlm_xlpge_shutdown(device_t dev)1285 nlm_xlpge_shutdown(device_t dev)
1286 {
1287 	return (0);
1288 }
1289 
1290 /*
1291  * miibus function with custom implementation
1292  */
1293 static int
nlm_xlpge_mii_read(device_t dev,int phyaddr,int regidx)1294 nlm_xlpge_mii_read(device_t dev, int phyaddr, int regidx)
1295 {
1296 	struct nlm_xlpge_softc *sc;
1297 	int val;
1298 
1299 	sc = device_get_softc(dev);
1300 	if (sc->type == SGMIIC)
1301 		val = nlm_gmac_mdio_read(sc->base_addr, sc->mdio_bus,
1302 		    BLOCK_7, LANE_CFG, phyaddr, regidx);
1303 	else
1304 		val = 0xffff;
1305 
1306 	return (val);
1307 }
1308 
1309 static int
nlm_xlpge_mii_write(device_t dev,int phyaddr,int regidx,int val)1310 nlm_xlpge_mii_write(device_t dev, int phyaddr, int regidx, int val)
1311 {
1312 	struct nlm_xlpge_softc *sc;
1313 
1314 	sc = device_get_softc(dev);
1315 	if (sc->type == SGMIIC)
1316 		nlm_gmac_mdio_write(sc->base_addr, sc->mdio_bus, BLOCK_7,
1317 		    LANE_CFG, phyaddr, regidx, val);
1318 
1319 	return (0);
1320 }
1321 
1322 static void
nlm_xlpge_mii_statchg(device_t dev)1323 nlm_xlpge_mii_statchg(device_t dev)
1324 {
1325 	struct nlm_xlpge_softc *sc;
1326 	struct mii_data *mii;
1327 	char *speed, *duplexity;
1328 
1329 	sc = device_get_softc(dev);
1330 	if (sc->mii_bus == NULL)
1331 		return;
1332 
1333 	mii = device_get_softc(sc->mii_bus);
1334 	if (mii->mii_media_status & IFM_ACTIVE) {
1335 		if (IFM_SUBTYPE(mii->mii_media_active) ==  IFM_10_T) {
1336 			sc->speed = NLM_SGMII_SPEED_10;
1337 			speed =  "10Mbps";
1338 		} else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
1339 			sc->speed = NLM_SGMII_SPEED_100;
1340 			speed = "100Mbps";
1341 		} else { /* default to 1G */
1342 			sc->speed = NLM_SGMII_SPEED_1000;
1343 			speed =  "1Gbps";
1344 		}
1345 
1346 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1347 			sc->duplexity = NLM_SGMII_DUPLEX_FULL;
1348 			duplexity =  "full";
1349 		} else {
1350 			sc->duplexity = NLM_SGMII_DUPLEX_HALF;
1351 			duplexity = "half";
1352 		}
1353 
1354 		printf("Port [%d, %d] setup with speed=%s duplex=%s\n",
1355 		    sc->block, sc->port, speed, duplexity);
1356 
1357 		nlm_nae_setup_mac(sc->base_addr, sc->block, sc->port, 0, 1, 1,
1358 		    sc->speed, sc->duplexity);
1359 	}
1360 }
1361 
1362 /*
1363  * xlpge support function implementations
1364  */
1365 static void
nlm_xlpge_release_mbuf(uint64_t paddr)1366 nlm_xlpge_release_mbuf(uint64_t paddr)
1367 {
1368 	uint64_t	mag, desc, mbuf;
1369 
1370 	paddr += (XLP_NTXFRAGS - 3) * sizeof(uint64_t);
1371 	mag = nlm_paddr_ld(paddr);
1372 	desc = nlm_paddr_ld(paddr + sizeof(uint64_t));
1373 	mbuf = nlm_paddr_ld(paddr + 2 * sizeof(uint64_t));
1374 
1375 	if (mag != 0xf00bad) {
1376 		/* somebody else packet Error - FIXME in intialization */
1377 		printf("cpu %d: ERR Tx packet paddr %jx, mag %jx, desc %jx mbuf %jx\n",
1378 		    nlm_cpuid(), (uintmax_t)paddr, (uintmax_t)mag,
1379 		    (intmax_t)desc, (uintmax_t)mbuf);
1380 		return;
1381 	}
1382 	m_freem((struct mbuf *)(uintptr_t)mbuf);
1383 	uma_zfree(nl_tx_desc_zone, (void *)(uintptr_t)desc);
1384 }
1385 
1386 static void
nlm_xlpge_rx(struct nlm_xlpge_softc * sc,int port,vm_paddr_t paddr,int len)1387 nlm_xlpge_rx(struct nlm_xlpge_softc *sc, int port, vm_paddr_t paddr, int len)
1388 {
1389 	struct ifnet	*ifp;
1390 	struct mbuf	*m;
1391 	vm_offset_t	temp;
1392 	unsigned long	mag;
1393 	int		prepad_size;
1394 
1395 	ifp = sc->xlpge_if;
1396 	temp = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE);
1397 	mag = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE + sizeof(uint64_t));
1398 
1399 	m = (struct mbuf *)(intptr_t)temp;
1400 	if (mag != 0xf00bad) {
1401 		/* somebody else packet Error - FIXME in intialization */
1402 		printf("cpu %d: ERR Rx packet paddr %jx, temp %p, mag %lx\n",
1403 		    nlm_cpuid(), (uintmax_t)paddr, (void *)temp, mag);
1404 		return;
1405 	}
1406 
1407 	m->m_pkthdr.rcvif = ifp;
1408 
1409 #ifdef DUMP_PACKET
1410 	{
1411 		int     i = 0, j = 64;
1412 		unsigned char *buf = (char *)m->m_data;
1413 		printf("(cpu_%d: nlge_rx, !RX_COPY) Rx Packet: length=%d\n",
1414 				nlm_cpuid(), len);
1415 		if (len < j)
1416 			j = len;
1417 		if (sc->prepad_en)
1418 			j += ((sc->prepad_size + 1) * 16);
1419 		for (i = 0; i < j; i++) {
1420 			if (i && (i % 16) == 0)
1421 				printf("\n");
1422 			printf("%02x ", buf[i]);
1423 		}
1424 		printf("\n");
1425 	}
1426 #endif
1427 
1428 	if (sc->prepad_en) {
1429 		prepad_size = ((sc->prepad_size + 1) * 16);
1430 		m->m_data += prepad_size;
1431 		m->m_pkthdr.len = m->m_len = (len - prepad_size);
1432 	} else
1433 		m->m_pkthdr.len = m->m_len = len;
1434 
1435 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1436 #ifdef XLP_DRIVER_LOOPBACK
1437 	if (port == 16 || port == 17)
1438 		(*ifp->if_input)(ifp, m);
1439 	else
1440 		xlpge_tx(ifp, m);
1441 #else
1442 	(*ifp->if_input)(ifp, m);
1443 #endif
1444 }
1445 
1446 void
nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc * sc,int num)1447 nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num)
1448 {
1449 	int i, size, ret, n;
1450 	struct nlm_fmn_msg msg;
1451 	void *ptr;
1452 
1453 	for(i = 0; i < num; i++) {
1454 		memset(&msg, 0, sizeof(msg));
1455 		ptr = get_buf();
1456 		if (!ptr) {
1457 			device_printf(sc->xlpge_dev, "Cannot allocate mbuf\n");
1458 			break;
1459 		}
1460 
1461 		msg.msg[0] = vtophys(ptr);
1462 		if (msg.msg[0] == 0) {
1463 			printf("Bad ptr for %p\n", ptr);
1464 			break;
1465 		}
1466 		size = 1;
1467 
1468 		n = 0;
1469 		while (1) {
1470 			/* on success returns 1, else 0 */
1471 			ret = nlm_fmn_msgsend(sc->rxfreeq, size, 0, &msg);
1472 			if (ret == 0)
1473 				break;
1474 			if (n++ > 10000) {
1475 				printf("Too many credit fails for send free desc\n");
1476 				break;
1477 			}
1478 		}
1479 	}
1480 }
1481 
1482 void
nlm_xlpge_msgring_handler(int vc,int size,int code,int src_id,struct nlm_fmn_msg * msg,void * data)1483 nlm_xlpge_msgring_handler(int vc, int size, int code, int src_id,
1484     struct nlm_fmn_msg *msg, void *data)
1485 {
1486 	uint64_t phys_addr;
1487 	struct nlm_xlpnae_softc *sc;
1488 	struct nlm_xlpge_softc *xlpge_sc;
1489 	struct ifnet *ifp;
1490 	uint32_t context;
1491 	uint32_t port = 0;
1492 	uint32_t length;
1493 
1494 	sc = (struct nlm_xlpnae_softc *)data;
1495 	KASSERT(sc != NULL, ("Null sc in msgring handler"));
1496 
1497 	if (size == 1) { /* process transmit complete */
1498 		phys_addr = msg->msg[0] & 0xffffffffffULL;
1499 
1500 		/* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type
1501 		 * or vlan priority
1502 		 */
1503 		context = (msg->msg[0] >> 40) & 0x3fff;
1504 		port = cntx2port[context];
1505 
1506 		if (port >= XLP_MAX_PORTS) {
1507 			printf("%s:%d Bad port %d (context=%d)\n",
1508 				__func__, __LINE__, port, context);
1509 			return;
1510 		}
1511 		ifp = ifp_ports[port].xlpge_if;
1512 		xlpge_sc = ifp_ports[port].xlpge_sc;
1513 
1514 		nlm_xlpge_release_mbuf(phys_addr);
1515 
1516 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1517 
1518 	} else if (size > 1) { /* Recieve packet */
1519 		phys_addr = msg->msg[1] & 0xffffffffc0ULL;
1520 		length = (msg->msg[1] >> 40) & 0x3fff;
1521 		length -= MAC_CRC_LEN;
1522 
1523 		/* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type
1524 		 * or vlan priority
1525 		 */
1526 		context = (msg->msg[1] >> 54) & 0x3ff;
1527 		port = cntx2port[context];
1528 
1529 		if (port >= XLP_MAX_PORTS) {
1530 			printf("%s:%d Bad port %d (context=%d)\n",
1531 				__func__, __LINE__, port, context);
1532 			return;
1533 		}
1534 
1535 		ifp = ifp_ports[port].xlpge_if;
1536 		xlpge_sc = ifp_ports[port].xlpge_sc;
1537 
1538 		nlm_xlpge_rx(xlpge_sc, port, phys_addr, length);
1539 		/* return back a free descriptor to NA */
1540 		nlm_xlpge_submit_rx_free_desc(xlpge_sc, 1);
1541 	}
1542 }
1543