xref: /f-stack/freebsd/mips/cavium/octe/ethernet.c (revision 22ce4aff)
1 /*************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3 
4 Copyright (c) 2003-2007  Cavium Networks ([email protected]). All rights
5 reserved.
6 
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10 
11     * Redistributions of source code must retain the above copyright
12       notice, this list of conditions and the following disclaimer.
13 
14     * Redistributions in binary form must reproduce the above
15       copyright notice, this list of conditions and the following
16       disclaimer in the documentation and/or other materials provided
17       with the distribution.
18 
19     * Neither the name of Cavium Networks nor the names of
20       its contributors may be used to endorse or promote products
21       derived from this software without specific prior written
22       permission.
23 
24 This Software, including technical data, may be subject to U.S. export  control laws, including the U.S. Export Administration Act and its  associated regulations, and may be subject to export or import  regulations in other countries.
25 
26 TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
27 AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
28 *************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/conf.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/rman.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/module.h>
43 #include <sys/smp.h>
44 #include <sys/taskqueue.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_types.h>
50 
51 #include "wrapper-cvmx-includes.h"
52 #include "ethernet-headers.h"
53 
54 #include "octebusvar.h"
55 
56 /*
57  * XXX/juli
58  * Convert 0444 to tunables, 0644 to sysctls.
59  */
60 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
61 int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
62 #else
63 int num_packet_buffers = 2048;
64 #endif
65 TUNABLE_INT("hw.octe.num_packet_buffers", &num_packet_buffers);
66 /*
67 		 "\t\tNumber of packet buffers to allocate and store in the\n"
68 		 "\t\tFPA. By default, 1024 packet buffers are used unless\n"
69 		 "\t\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined." */
70 
71 int pow_receive_group = 15;
72 TUNABLE_INT("hw.octe.pow_receive_group", &pow_receive_group);
73 /*
74 		 "\t\tPOW group to receive packets from. All ethernet hardware\n"
75 		 "\t\twill be configured to send incomming packets to this POW\n"
76 		 "\t\tgroup. Also any other software can submit packets to this\n"
77 		 "\t\tgroup for the kernel to process." */
78 
79 /**
80  * Periodic timer to check auto negotiation
81  */
82 static struct callout cvm_oct_poll_timer;
83 
84 /**
85  * Array of every ethernet device owned by this driver indexed by
86  * the ipd input port number.
87  */
88 struct ifnet *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
89 
90 /**
91  * Task to handle link status changes.
92  */
93 static struct taskqueue *cvm_oct_link_taskq;
94 
95 /*
96  * Number of buffers in output buffer pool.
97  */
98 static int cvm_oct_num_output_buffers;
99 
100 /**
101  * Function to update link status.
102  */
cvm_oct_update_link(void * context,int pending)103 static void cvm_oct_update_link(void *context, int pending)
104 {
105 	cvm_oct_private_t *priv = (cvm_oct_private_t *)context;
106 	struct ifnet *ifp = priv->ifp;
107 	cvmx_helper_link_info_t link_info;
108 
109 	link_info.u64 = priv->link_info;
110 
111 	if (link_info.s.link_up) {
112 		if_link_state_change(ifp, LINK_STATE_UP);
113 		DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
114 			   if_name(ifp), link_info.s.speed,
115 			   (link_info.s.full_duplex) ? "Full" : "Half",
116 			   priv->port, priv->queue);
117 	} else {
118 		if_link_state_change(ifp, LINK_STATE_DOWN);
119 		DEBUGPRINT("%s: Link down\n", if_name(ifp));
120 	}
121 	priv->need_link_update = 0;
122 }
123 
124 /**
125  * Periodic timer tick for slow management operations
126  *
127  * @param arg    Device to check
128  */
cvm_do_timer(void * arg)129 static void cvm_do_timer(void *arg)
130 {
131 	static int port;
132 	static int updated;
133 	if (port < CVMX_PIP_NUM_INPUT_PORTS) {
134 		if (cvm_oct_device[port]) {
135 			int queues_per_port;
136 			int qos;
137 			cvm_oct_private_t *priv = (cvm_oct_private_t *)cvm_oct_device[port]->if_softc;
138 
139 			cvm_oct_common_poll(priv->ifp);
140 			if (priv->need_link_update) {
141 				updated++;
142 				taskqueue_enqueue(cvm_oct_link_taskq, &priv->link_task);
143 			}
144 
145 			queues_per_port = cvmx_pko_get_num_queues(port);
146 			/* Drain any pending packets in the free list */
147 			for (qos = 0; qos < queues_per_port; qos++) {
148 				if (_IF_QLEN(&priv->tx_free_queue[qos]) > 0) {
149 					IF_LOCK(&priv->tx_free_queue[qos]);
150 					while (_IF_QLEN(&priv->tx_free_queue[qos]) > cvmx_fau_fetch_and_add32(priv->fau+qos*4, 0)) {
151 						struct mbuf *m;
152 
153 						_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
154 						m_freem(m);
155 					}
156 					IF_UNLOCK(&priv->tx_free_queue[qos]);
157 
158 					/*
159 					 * XXX locking!
160 					 */
161 					priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
162 				}
163 			}
164 		}
165 		port++;
166 		/* Poll the next port in a 50th of a second.
167 		   This spreads the polling of ports out a little bit */
168 		callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
169 	} else {
170 		port = 0;
171 		/* If any updates were made in this run, continue iterating at
172 		 * 1/50th of a second, so that if a link has merely gone down
173 		 * temporarily (e.g. because of interface reinitialization) it
174 		 * will not be forced to stay down for an entire second.
175 		 */
176 		if (updated > 0) {
177 			updated = 0;
178 			callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
179 		} else {
180 			/* All ports have been polled. Start the next iteration through
181 			   the ports in one second */
182 			callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);
183 		}
184 	}
185 }
186 
187 /**
188  * Configure common hardware for all interfaces
189  */
cvm_oct_configure_common_hw(device_t bus)190 static void cvm_oct_configure_common_hw(device_t bus)
191 {
192 	struct octebus_softc *sc;
193 	int pko_queues;
194 	int error;
195 	int rid;
196 
197         sc = device_get_softc(bus);
198 
199 	/* Setup the FPA */
200 	cvmx_fpa_enable();
201 	cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
202 			     num_packet_buffers);
203 	cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
204 			     num_packet_buffers);
205 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) {
206 		/*
207 		 * If the FPA uses different pools for output buffers and
208 		 * packets, size the output buffer pool based on the number
209 		 * of PKO queues.
210 		 */
211 		if (OCTEON_IS_MODEL(OCTEON_CN38XX))
212 			pko_queues = 128;
213 		else if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
214 			pko_queues = 32;
215 		else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
216 			pko_queues = 32;
217 		else
218 			pko_queues = 256;
219 
220 		cvm_oct_num_output_buffers = 4 * pko_queues;
221 		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
222 				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
223 				     cvm_oct_num_output_buffers);
224 	}
225 
226 	if (USE_RED)
227 		cvmx_helper_setup_red(num_packet_buffers/4,
228 				      num_packet_buffers/8);
229 
230 	/* Enable the MII interface */
231 	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
232 		cvmx_write_csr(CVMX_SMI_EN, 1);
233 
234 	/* Register an IRQ hander for to receive POW interrupts */
235         rid = 0;
236         sc->sc_rx_irq = bus_alloc_resource(bus, SYS_RES_IRQ, &rid,
237 					   OCTEON_IRQ_WORKQ0 + pow_receive_group,
238 					   OCTEON_IRQ_WORKQ0 + pow_receive_group,
239 					   1, RF_ACTIVE);
240         if (sc->sc_rx_irq == NULL) {
241                 device_printf(bus, "could not allocate workq irq");
242 		return;
243         }
244 
245         error = bus_setup_intr(bus, sc->sc_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
246 			       cvm_oct_do_interrupt, NULL, cvm_oct_device,
247 			       &sc->sc_rx_intr_cookie);
248         if (error != 0) {
249                 device_printf(bus, "could not setup workq irq");
250 		return;
251         }
252 
253 #ifdef SMP
254 	{
255 		cvmx_ciu_intx0_t en;
256 		int core;
257 
258 		CPU_FOREACH(core) {
259 			if (core == PCPU_GET(cpuid))
260 				continue;
261 
262 			en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2));
263 			en.s.workq |= (1<<pow_receive_group);
264 			cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64);
265 		}
266 	}
267 #endif
268 }
269 
270 /**
271  * Free a work queue entry received in a intercept callback.
272  *
273  * @param work_queue_entry
274  *               Work queue entry to free
275  * @return Zero on success, Negative on failure.
276  */
cvm_oct_free_work(void * work_queue_entry)277 int cvm_oct_free_work(void *work_queue_entry)
278 {
279 	cvmx_wqe_t *work = work_queue_entry;
280 
281 	int segments = work->word2.s.bufs;
282 	cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
283 
284 	while (segments--) {
285 		cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
286 		if (__predict_false(!segment_ptr.s.i))
287 			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), segment_ptr.s.pool, DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE/128));
288 		segment_ptr = next_ptr;
289 	}
290 	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
291 
292 	return 0;
293 }
294 
295 /**
296  * Module/ driver initialization. Creates the linux network
297  * devices.
298  *
299  * @return Zero on success
300  */
cvm_oct_init_module(device_t bus)301 int cvm_oct_init_module(device_t bus)
302 {
303 	device_t dev;
304 	int ifnum;
305 	int num_interfaces;
306 	int interface;
307 	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
308 	int qos;
309 
310 	cvm_oct_rx_initialize();
311 	cvm_oct_configure_common_hw(bus);
312 
313 	cvmx_helper_initialize_packet_io_global();
314 
315 	/* Change the input group for all ports before input is enabled */
316 	num_interfaces = cvmx_helper_get_number_of_interfaces();
317 	for (interface = 0; interface < num_interfaces; interface++) {
318 		int num_ports = cvmx_helper_ports_on_interface(interface);
319 		int port;
320 
321 		for (port = 0; port < num_ports; port++) {
322 			cvmx_pip_prt_tagx_t pip_prt_tagx;
323 			int pkind = cvmx_helper_get_ipd_port(interface, port);
324 
325 			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
326 			pip_prt_tagx.s.grp = pow_receive_group;
327 			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
328 		}
329 	}
330 
331 	cvmx_helper_ipd_and_packet_input_enable();
332 
333 	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
334 
335 	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
336 	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
337 	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
338 	    "octe link taskq");
339 
340 	/* Initialize the FAU used for counting packet buffers that need to be freed */
341 	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
342 
343 	ifnum = 0;
344 	num_interfaces = cvmx_helper_get_number_of_interfaces();
345 	for (interface = 0; interface < num_interfaces; interface++) {
346 		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
347 		int num_ports = cvmx_helper_ports_on_interface(interface);
348 		int port;
349 
350 		for (port = cvmx_helper_get_ipd_port(interface, 0);
351 		     port < cvmx_helper_get_ipd_port(interface, num_ports);
352 		     ifnum++, port++) {
353 			cvm_oct_private_t *priv;
354 			struct ifnet *ifp;
355 
356 			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
357 			if (dev != NULL)
358 				ifp = if_alloc(IFT_ETHER);
359 			if (dev == NULL || ifp == NULL) {
360 				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
361 				continue;
362 			}
363 
364 			/* Initialize the device private structure. */
365 			device_probe(dev);
366 			priv = device_get_softc(dev);
367 			priv->dev = dev;
368 			priv->ifp = ifp;
369 			priv->imode = imode;
370 			priv->port = port;
371 			priv->queue = cvmx_pko_get_base_queue(priv->port);
372 			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
373 			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
374 				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
375 			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);
376 
377 			switch (priv->imode) {
378 			/* These types don't support ports to IPD/PKO */
379 			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
380 			case CVMX_HELPER_INTERFACE_MODE_PCIE:
381 			case CVMX_HELPER_INTERFACE_MODE_PICMG:
382 				break;
383 
384 			case CVMX_HELPER_INTERFACE_MODE_NPI:
385 				priv->init = cvm_oct_common_init;
386 				priv->uninit = cvm_oct_common_uninit;
387 				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
388 				break;
389 
390 			case CVMX_HELPER_INTERFACE_MODE_XAUI:
391 				priv->init = cvm_oct_xaui_init;
392 				priv->uninit = cvm_oct_common_uninit;
393 				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
394 				break;
395 
396 			case CVMX_HELPER_INTERFACE_MODE_LOOP:
397 				priv->init = cvm_oct_common_init;
398 				priv->uninit = cvm_oct_common_uninit;
399 				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
400 				break;
401 
402 			case CVMX_HELPER_INTERFACE_MODE_SGMII:
403 				priv->init = cvm_oct_sgmii_init;
404 				priv->uninit = cvm_oct_common_uninit;
405 				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
406 				break;
407 
408 			case CVMX_HELPER_INTERFACE_MODE_SPI:
409 				priv->init = cvm_oct_spi_init;
410 				priv->uninit = cvm_oct_spi_uninit;
411 				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
412 				break;
413 
414 			case CVMX_HELPER_INTERFACE_MODE_RGMII:
415 				priv->init = cvm_oct_rgmii_init;
416 				priv->uninit = cvm_oct_rgmii_uninit;
417 				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
418 				break;
419 
420 			case CVMX_HELPER_INTERFACE_MODE_GMII:
421 				priv->init = cvm_oct_rgmii_init;
422 				priv->uninit = cvm_oct_rgmii_uninit;
423 				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
424 				break;
425 			}
426 
427 			ifp->if_softc = priv;
428 
429 			if (!priv->init) {
430 				printf("octe%d: unsupported device type interface %d, port %d\n",
431 				       ifnum, interface, priv->port);
432 				if_free(ifp);
433 			} else if (priv->init(ifp) != 0) {
434 				printf("octe%d: failed to register device for interface %d, port %d\n",
435 				       ifnum, interface, priv->port);
436 				if_free(ifp);
437 			} else {
438 				cvm_oct_device[priv->port] = ifp;
439 				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
440 			}
441 		}
442 	}
443 
444 	if (INTERRUPT_LIMIT) {
445 		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
446 		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/((INTERRUPT_LIMIT+1)*16*256)<<8);
447 
448 		/* Enable POW timer interrupt. It will count when there are packets available */
449 		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
450 	} else {
451 		/* Enable POW interrupt when our port has at least one packet */
452 		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
453 	}
454 
455 	callout_init(&cvm_oct_poll_timer, 1);
456 	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);
457 
458 	return 0;
459 }
460 
461 /**
462  * Module / driver shutdown
463  *
464  * @return Zero on success
465  */
cvm_oct_cleanup_module(device_t bus)466 void cvm_oct_cleanup_module(device_t bus)
467 {
468 	int port;
469 	struct octebus_softc *sc = device_get_softc(bus);
470 
471 	/* Disable POW interrupt */
472 	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
473 
474 	/* Free the interrupt handler */
475 	bus_teardown_intr(bus, sc->sc_rx_irq, sc->sc_rx_intr_cookie);
476 
477 	callout_stop(&cvm_oct_poll_timer);
478 	cvm_oct_rx_shutdown();
479 
480 	cvmx_helper_shutdown_packet_io_global();
481 
482 	/* Free the ethernet devices */
483 	for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
484 		if (cvm_oct_device[port]) {
485 			cvm_oct_tx_shutdown(cvm_oct_device[port]);
486 #if 0
487 			unregister_netdev(cvm_oct_device[port]);
488 			kfree(cvm_oct_device[port]);
489 #else
490 			panic("%s: need to detach and free interface.", __func__);
491 #endif
492 			cvm_oct_device[port] = NULL;
493 		}
494 	}
495 	/* Free the HW pools */
496 	cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers);
497 	cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers);
498 
499 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
500 		cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, cvm_oct_num_output_buffers);
501 
502 	/* Disable FPA, all buffers are free, not done by helper shutdown. */
503 	cvmx_fpa_disable();
504 }
505