xref: /f-stack/freebsd/mips/ingenic/jz4780_pdma.c (revision 22ce4aff)
1 /*-
2  * Copyright (c) 2016-2018 Ruslan Bukin <[email protected]>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Ingenic JZ4780 PDMA Controller. */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_platform.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/conf.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/resource.h>
46 #include <sys/rman.h>
47 
48 #include <machine/bus.h>
49 #include <machine/cache.h>
50 
51 #ifdef FDT
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
55 #endif
56 
57 #include <dev/xdma/xdma.h>
58 
59 #include <mips/ingenic/jz4780_common.h>
60 #include <mips/ingenic/jz4780_pdma.h>
61 
62 #include "xdma_if.h"
63 
64 #define	PDMA_DEBUG
65 #undef	PDMA_DEBUG
66 
67 #ifdef	PDMA_DEBUG
68 #define	dprintf(fmt, ...)	printf(fmt, ##__VA_ARGS__)
69 #else
70 #define	dprintf(fmt, ...)
71 #endif
72 
73 #define	PDMA_DESC_RING_ALIGN	2048
74 
75 struct pdma_softc {
76 	device_t		dev;
77 	struct resource		*res[2];
78 	bus_space_tag_t		bst;
79 	bus_space_handle_t	bsh;
80 	void			*ih;
81 };
82 
83 struct pdma_fdt_data {
84 	int tx;
85 	int rx;
86 	int chan;
87 };
88 
89 struct pdma_channel {
90 	struct pdma_fdt_data	data;
91 	int			cur_desc;
92 	int			used;
93 	int			index;
94 	int			flags;
95 #define	CHAN_DESCR_RELINK	(1 << 0)
96 
97 	/* Descriptors */
98 	bus_dma_tag_t		desc_tag;
99 	bus_dmamap_t		desc_map;
100 	struct pdma_hwdesc	*desc_ring;
101 	bus_addr_t		desc_ring_paddr;
102 
103 	/* xDMA */
104 	xdma_channel_t		*xchan;
105 	struct xdma_request	*req;
106 };
107 
108 #define	PDMA_NCHANNELS	32
109 struct pdma_channel pdma_channels[PDMA_NCHANNELS];
110 
111 static struct resource_spec pdma_spec[] = {
112 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
113 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
114 	{ -1, 0 }
115 };
116 
117 static int pdma_probe(device_t dev);
118 static int pdma_attach(device_t dev);
119 static int pdma_detach(device_t dev);
120 static int chan_start(struct pdma_softc *sc, struct pdma_channel *chan);
121 
122 static void
pdma_intr(void * arg)123 pdma_intr(void *arg)
124 {
125 	struct xdma_request *req;
126 	xdma_transfer_status_t status;
127 	struct pdma_channel *chan;
128 	struct pdma_softc *sc;
129 	xdma_channel_t *xchan;
130 	int pending;
131 	int i;
132 
133 	sc = arg;
134 
135 	pending = READ4(sc, PDMA_DIRQP);
136 
137 	/* Ack all the channels. */
138 	WRITE4(sc, PDMA_DIRQP, 0);
139 
140 	for (i = 0; i < PDMA_NCHANNELS; i++) {
141 		if (pending & (1 << i)) {
142 			chan = &pdma_channels[i];
143 			xchan = chan->xchan;
144 			req = chan->req;
145 
146 			/* TODO: check for AR, HLT error bits here. */
147 
148 			/* Disable channel */
149 			WRITE4(sc, PDMA_DCS(chan->index), 0);
150 
151 			if (chan->flags & CHAN_DESCR_RELINK) {
152 				/* Enable again */
153 				chan->cur_desc = (chan->cur_desc + 1) % \
154 				    req->block_num;
155 				chan_start(sc, chan);
156 			}
157 
158 			status.error = 0;
159 			xdma_callback(chan->xchan, &status);
160 		}
161 	}
162 }
163 
164 static int
pdma_probe(device_t dev)165 pdma_probe(device_t dev)
166 {
167 
168 	if (!ofw_bus_status_okay(dev))
169 		return (ENXIO);
170 
171 	if (!ofw_bus_is_compatible(dev, "ingenic,jz4780-dma"))
172 		return (ENXIO);
173 
174 	device_set_desc(dev, "Ingenic JZ4780 PDMA Controller");
175 
176 	return (BUS_PROBE_DEFAULT);
177 }
178 
179 static int
pdma_attach(device_t dev)180 pdma_attach(device_t dev)
181 {
182 	struct pdma_softc *sc;
183 	phandle_t xref, node;
184 	int err;
185 	int reg;
186 
187 	sc = device_get_softc(dev);
188 	sc->dev = dev;
189 
190 	if (bus_alloc_resources(dev, pdma_spec, sc->res)) {
191 		device_printf(dev, "could not allocate resources for device\n");
192 		return (ENXIO);
193 	}
194 
195 	/* Memory interface */
196 	sc->bst = rman_get_bustag(sc->res[0]);
197 	sc->bsh = rman_get_bushandle(sc->res[0]);
198 
199 	/* Setup interrupt handler */
200 	err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
201 	    NULL, pdma_intr, sc, &sc->ih);
202 	if (err) {
203 		device_printf(dev, "Unable to alloc interrupt resource.\n");
204 		return (ENXIO);
205 	}
206 
207 	node = ofw_bus_get_node(dev);
208 	xref = OF_xref_from_node(node);
209 	OF_device_register_xref(xref, dev);
210 
211 	reg = READ4(sc, PDMA_DMAC);
212 	reg &= ~(DMAC_HLT | DMAC_AR);
213 	reg |= (DMAC_DMAE);
214 	WRITE4(sc, PDMA_DMAC, reg);
215 
216 	WRITE4(sc, PDMA_DMACP, 0);
217 
218 	return (0);
219 }
220 
221 static int
pdma_detach(device_t dev)222 pdma_detach(device_t dev)
223 {
224 	struct pdma_softc *sc;
225 
226 	sc = device_get_softc(dev);
227 
228 	bus_release_resources(dev, pdma_spec, sc->res);
229 
230 	return (0);
231 }
232 
233 static int
chan_start(struct pdma_softc * sc,struct pdma_channel * chan)234 chan_start(struct pdma_softc *sc, struct pdma_channel *chan)
235 {
236 	struct xdma_channel *xchan;
237 
238 	xchan = chan->xchan;
239 
240 	/* 8 byte descriptor. */
241 	WRITE4(sc, PDMA_DCS(chan->index), DCS_DES8);
242 	WRITE4(sc, PDMA_DDA(chan->index),
243 	    chan->desc_ring_paddr + 8 * 4 * chan->cur_desc);
244 
245 	WRITE4(sc, PDMA_DDS, (1 << chan->index));
246 
247 	/* Channel transfer enable. */
248 	WRITE4(sc, PDMA_DCS(chan->index), (DCS_DES8 | DCS_CTE));
249 
250 	return (0);
251 }
252 
253 static int
chan_stop(struct pdma_softc * sc,struct pdma_channel * chan)254 chan_stop(struct pdma_softc *sc, struct pdma_channel *chan)
255 {
256 	int timeout;
257 
258 	WRITE4(sc, PDMA_DCS(chan->index), 0);
259 
260 	timeout = 100;
261 
262 	do {
263 		if ((READ4(sc, PDMA_DCS(chan->index)) & DCS_CTE) == 0) {
264 			break;
265 		}
266 	} while (timeout--);
267 
268 	if (timeout == 0) {
269 		device_printf(sc->dev, "%s: Can't stop channel %d\n",
270 		    __func__, chan->index);
271 	}
272 
273 	return (0);
274 }
275 
276 static void
dwc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)277 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
278 {
279 
280 	if (error != 0)
281 		return;
282 	*(bus_addr_t *)arg = segs[0].ds_addr;
283 }
284 
285 static int
pdma_channel_setup_descriptors(device_t dev,struct pdma_channel * chan)286 pdma_channel_setup_descriptors(device_t dev, struct pdma_channel *chan)
287 {
288 	struct pdma_softc *sc;
289 	int error;
290 
291 	sc = device_get_softc(dev);
292 
293 	/*
294 	 * Set up TX descriptor ring, descriptors, and dma maps.
295 	 */
296 	error = bus_dma_tag_create(
297 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
298 	    PDMA_DESC_RING_ALIGN, 0,	/* alignment, boundary */
299 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
300 	    BUS_SPACE_MAXADDR,		/* highaddr */
301 	    NULL, NULL,			/* filter, filterarg */
302 	    CHAN_DESC_SIZE, 1, 		/* maxsize, nsegments */
303 	    CHAN_DESC_SIZE,		/* maxsegsize */
304 	    0,				/* flags */
305 	    NULL, NULL,			/* lockfunc, lockarg */
306 	    &chan->desc_tag);
307 	if (error != 0) {
308 		device_printf(sc->dev,
309 		    "could not create TX ring DMA tag.\n");
310 		return (-1);
311 	}
312 
313 	error = bus_dmamem_alloc(chan->desc_tag, (void**)&chan->desc_ring,
314 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
315 	    &chan->desc_map);
316 	if (error != 0) {
317 		device_printf(sc->dev,
318 		    "could not allocate TX descriptor ring.\n");
319 		return (-1);
320 	}
321 
322 	error = bus_dmamap_load(chan->desc_tag, chan->desc_map,
323 	    chan->desc_ring, CHAN_DESC_SIZE, dwc_get1paddr,
324 	    &chan->desc_ring_paddr, 0);
325 	if (error != 0) {
326 		device_printf(sc->dev,
327 		    "could not load TX descriptor ring map.\n");
328 		return (-1);
329 	}
330 
331 	return (0);
332 }
333 
334 static int
pdma_channel_alloc(device_t dev,struct xdma_channel * xchan)335 pdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
336 {
337 	struct pdma_channel *chan;
338 	struct pdma_softc *sc;
339 	int i;
340 
341 	sc = device_get_softc(dev);
342 
343 	for (i = 0; i < PDMA_NCHANNELS; i++) {
344 		chan = &pdma_channels[i];
345 		if (chan->used == 0) {
346 			chan->xchan = xchan;
347 			xchan->chan = (void *)chan;
348 			chan->used = 1;
349 			chan->index = i;
350 
351 			pdma_channel_setup_descriptors(dev, chan);
352 
353 			return (0);
354 		}
355 	}
356 
357 	return (-1);
358 }
359 
360 static int
pdma_channel_free(device_t dev,struct xdma_channel * xchan)361 pdma_channel_free(device_t dev, struct xdma_channel *xchan)
362 {
363 	struct pdma_channel *chan;
364 	struct pdma_softc *sc;
365 
366 	sc = device_get_softc(dev);
367 
368 	chan = (struct pdma_channel *)xchan->chan;
369 	chan->used = 0;
370 
371 	return (0);
372 }
373 
374 static int
access_width(struct xdma_request * req,uint32_t * dcm,uint32_t * max_width)375 access_width(struct xdma_request *req, uint32_t *dcm, uint32_t *max_width)
376 {
377 
378 	*dcm = 0;
379 	*max_width = max(req->src_width, req->dst_width);
380 
381 	switch (req->src_width) {
382 	case 1:
383 		*dcm |= DCM_SP_1;
384 		break;
385 	case 2:
386 		*dcm |= DCM_SP_2;
387 		break;
388 	case 4:
389 		*dcm |= DCM_SP_4;
390 		break;
391 	default:
392 		return (-1);
393 	}
394 
395 	switch (req->dst_width) {
396 	case 1:
397 		*dcm |= DCM_DP_1;
398 		break;
399 	case 2:
400 		*dcm |= DCM_DP_2;
401 		break;
402 	case 4:
403 		*dcm |= DCM_DP_4;
404 		break;
405 	default:
406 		return (-1);
407 	}
408 
409 	switch (*max_width) {
410 	case 1:
411 		*dcm |= DCM_TSZ_1;
412 		break;
413 	case 2:
414 		*dcm |= DCM_TSZ_2;
415 		break;
416 	case 4:
417 		*dcm |= DCM_TSZ_4;
418 		break;
419 	default:
420 		return (-1);
421 	};
422 
423 	return (0);
424 }
425 
426 static int
pdma_channel_request(device_t dev,struct xdma_channel * xchan,struct xdma_request * req)427 pdma_channel_request(device_t dev, struct xdma_channel *xchan, struct xdma_request *req)
428 {
429 	struct pdma_fdt_data *data;
430 	struct pdma_channel *chan;
431 	struct pdma_hwdesc *desc;
432 	xdma_controller_t *xdma;
433 	struct pdma_softc *sc;
434 	int max_width;
435 	uint32_t reg;
436 	uint32_t dcm;
437 	int i;
438 
439 	sc = device_get_softc(dev);
440 
441 	dprintf("%s: block_len %d block_num %d\n",
442 	    __func__, req->block_len, req->block_num);
443 
444 	xdma = xchan->xdma;
445 	data = (struct pdma_fdt_data *)xdma->data;
446 
447 	chan = (struct pdma_channel *)xchan->chan;
448 	/* Ensure we are not in operation */
449 	chan_stop(sc, chan);
450 	if (req->operation == XDMA_CYCLIC)
451 		chan->flags = CHAN_DESCR_RELINK;
452 	chan->cur_desc = 0;
453 	chan->req = req;
454 
455 	for (i = 0; i < req->block_num; i++) {
456 		desc = &chan->desc_ring[i];
457 
458 		if (req->direction == XDMA_MEM_TO_DEV) {
459 			desc->dsa = req->src_addr + (i * req->block_len);
460 			desc->dta = req->dst_addr;
461 			desc->drt = data->tx;
462 			desc->dcm = DCM_SAI;
463 		} else if (req->direction == XDMA_DEV_TO_MEM) {
464 			desc->dsa = req->src_addr;
465 			desc->dta = req->dst_addr + (i * req->block_len);
466 			desc->drt = data->rx;
467 			desc->dcm = DCM_DAI;
468 		} else if (req->direction == XDMA_MEM_TO_MEM) {
469 			desc->dsa = req->src_addr + (i * req->block_len);
470 			desc->dta = req->dst_addr + (i * req->block_len);
471 			desc->drt = DRT_AUTO;
472 			desc->dcm = DCM_SAI | DCM_DAI;
473 		}
474 
475 		if (access_width(req, &dcm, &max_width) != 0) {
476 			device_printf(dev,
477 			    "%s: can't configure access width\n", __func__);
478 			return (-1);
479 		}
480 
481 		desc->dcm |= dcm | DCM_TIE;
482 		desc->dtc = (req->block_len / max_width);
483 
484 		/*
485 		 * TODO: bus dma pre read/write sync here
486 		 */
487 
488 		/*
489 		 * PDMA does not provide interrupt after processing each descriptor,
490 		 * but after processing all the chain only.
491 		 * As a workaround we do unlink descriptors here, so our chain will
492 		 * consists of single descriptor only. And then we reconfigure channel
493 		 * on each interrupt again.
494 		 */
495 		if ((chan->flags & CHAN_DESCR_RELINK) == 0) {
496 			if (i != (req->block_num - 1)) {
497 				desc->dcm |= DCM_LINK;
498 				reg = ((i + 1) * sizeof(struct pdma_hwdesc));
499 				desc->dtc |= (reg >> 4) << 24;
500 			}
501 		}
502 	}
503 
504 	return (0);
505 }
506 
507 static int
pdma_channel_control(device_t dev,xdma_channel_t * xchan,int cmd)508 pdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
509 {
510 	struct pdma_channel *chan;
511 	struct pdma_softc *sc;
512 
513 	sc = device_get_softc(dev);
514 
515 	chan = (struct pdma_channel *)xchan->chan;
516 
517 	switch (cmd) {
518 	case XDMA_CMD_BEGIN:
519 		chan_start(sc, chan);
520 		break;
521 	case XDMA_CMD_TERMINATE:
522 		chan_stop(sc, chan);
523 		break;
524 	case XDMA_CMD_PAUSE:
525 		/* TODO: implement me */
526 		return (-1);
527 	}
528 
529 	return (0);
530 }
531 
532 #ifdef FDT
533 static int
pdma_ofw_md_data(device_t dev,pcell_t * cells,int ncells,void ** ptr)534 pdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
535 {
536 	struct pdma_fdt_data *data;
537 
538 	if (ncells != 3) {
539 		return (-1);
540 	}
541 
542 	data = malloc(sizeof(struct pdma_fdt_data), M_DEVBUF, (M_WAITOK | M_ZERO));
543 	if (data == NULL) {
544 		device_printf(dev, "%s: Cant allocate memory\n", __func__);
545 		return (-1);
546 	}
547 
548 	data->tx = cells[0];
549 	data->rx = cells[1];
550 	data->chan = cells[2];
551 
552 	*ptr = data;
553 
554 	return (0);
555 }
556 #endif
557 
558 static device_method_t pdma_methods[] = {
559 	/* Device interface */
560 	DEVMETHOD(device_probe,			pdma_probe),
561 	DEVMETHOD(device_attach,		pdma_attach),
562 	DEVMETHOD(device_detach,		pdma_detach),
563 
564 	/* xDMA Interface */
565 	DEVMETHOD(xdma_channel_alloc,		pdma_channel_alloc),
566 	DEVMETHOD(xdma_channel_free,		pdma_channel_free),
567 	DEVMETHOD(xdma_channel_request,		pdma_channel_request),
568 	DEVMETHOD(xdma_channel_control,		pdma_channel_control),
569 #ifdef FDT
570 	DEVMETHOD(xdma_ofw_md_data,		pdma_ofw_md_data),
571 #endif
572 
573 	DEVMETHOD_END
574 };
575 
576 static driver_t pdma_driver = {
577 	"pdma",
578 	pdma_methods,
579 	sizeof(struct pdma_softc),
580 };
581 
582 static devclass_t pdma_devclass;
583 
584 EARLY_DRIVER_MODULE(pdma, simplebus, pdma_driver, pdma_devclass, 0, 0,
585     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
586