1 /*-
2 * Copyright (c) 2016-2018 Ruslan Bukin <[email protected]>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Altera mSGDMA driver. */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "opt_platform.h"
37 #include <sys/param.h>
38 #include <sys/endian.h>
39 #include <sys/systm.h>
40 #include <sys/conf.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/kthread.h>
44 #include <sys/sglist.h>
45 #include <sys/module.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/resource.h>
49 #include <sys/rman.h>
50
51 #include <machine/bus.h>
52 #include <machine/fdt.h>
53 #include <machine/cache.h>
54
55 #ifdef FDT
56 #include <dev/fdt/fdt_common.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 #endif
60
61 #include <dev/xdma/xdma.h>
62 #include "xdma_if.h"
63
64 #include <dev/altera/msgdma/msgdma.h>
65
66 #define MSGDMA_DEBUG
67 #undef MSGDMA_DEBUG
68
69 #ifdef MSGDMA_DEBUG
70 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
71 #else
72 #define dprintf(fmt, ...)
73 #endif
74
75 #define MSGDMA_NCHANNELS 1
76
77 struct msgdma_channel {
78 struct msgdma_softc *sc;
79 struct mtx mtx;
80 xdma_channel_t *xchan;
81 struct proc *p;
82 int used;
83 int index;
84 int idx_head;
85 int idx_tail;
86
87 struct msgdma_desc **descs;
88 bus_dma_segment_t *descs_phys;
89 uint32_t descs_num;
90 bus_dma_tag_t dma_tag;
91 bus_dmamap_t *dma_map;
92 uint32_t map_descr;
93 uint8_t map_err;
94 uint32_t descs_used_count;
95 };
96
97 struct msgdma_softc {
98 device_t dev;
99 struct resource *res[3];
100 bus_space_tag_t bst;
101 bus_space_handle_t bsh;
102 bus_space_tag_t bst_d;
103 bus_space_handle_t bsh_d;
104 void *ih;
105 struct msgdma_desc desc;
106 struct msgdma_channel channels[MSGDMA_NCHANNELS];
107 };
108
109 static struct resource_spec msgdma_spec[] = {
110 { SYS_RES_MEMORY, 0, RF_ACTIVE },
111 { SYS_RES_MEMORY, 1, RF_ACTIVE },
112 { SYS_RES_IRQ, 0, RF_ACTIVE },
113 { -1, 0 }
114 };
115
116 #define HWTYPE_NONE 0
117 #define HWTYPE_STD 1
118
119 static struct ofw_compat_data compat_data[] = {
120 { "altr,msgdma-16.0", HWTYPE_STD },
121 { "altr,msgdma-1.0", HWTYPE_STD },
122 { NULL, HWTYPE_NONE },
123 };
124
125 static int msgdma_probe(device_t dev);
126 static int msgdma_attach(device_t dev);
127 static int msgdma_detach(device_t dev);
128
129 static inline uint32_t
msgdma_next_desc(struct msgdma_channel * chan,uint32_t curidx)130 msgdma_next_desc(struct msgdma_channel *chan, uint32_t curidx)
131 {
132
133 return ((curidx + 1) % chan->descs_num);
134 }
135
136 static void
msgdma_intr(void * arg)137 msgdma_intr(void *arg)
138 {
139 xdma_transfer_status_t status;
140 struct xdma_transfer_status st;
141 struct msgdma_desc *desc;
142 struct msgdma_channel *chan;
143 struct xdma_channel *xchan;
144 struct msgdma_softc *sc;
145 uint32_t tot_copied;
146
147 sc = arg;
148 chan = &sc->channels[0];
149 xchan = chan->xchan;
150
151 dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
152 __func__, device_get_unit(sc->dev),
153 READ4_DESC(sc, PF_STATUS),
154 READ4_DESC(sc, PF_NEXT_LO),
155 READ4_DESC(sc, PF_CONTROL));
156
157 tot_copied = 0;
158
159 while (chan->idx_tail != chan->idx_head) {
160 dprintf("%s: idx_tail %d idx_head %d\n", __func__,
161 chan->idx_tail, chan->idx_head);
162 bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
163 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
164
165 desc = chan->descs[chan->idx_tail];
166 if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
167 break;
168 }
169
170 tot_copied += le32toh(desc->transferred);
171 st.error = 0;
172 st.transferred = le32toh(desc->transferred);
173 xchan_seg_done(xchan, &st);
174
175 chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
176 atomic_subtract_int(&chan->descs_used_count, 1);
177 }
178
179 WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);
180
181 /* Finish operation */
182 status.error = 0;
183 status.transferred = tot_copied;
184 xdma_callback(chan->xchan, &status);
185 }
186
187 static int
msgdma_reset(struct msgdma_softc * sc)188 msgdma_reset(struct msgdma_softc *sc)
189 {
190 int timeout;
191
192 dprintf("%s: read status: %x\n", __func__, READ4(sc, 0x00));
193 dprintf("%s: read control: %x\n", __func__, READ4(sc, 0x04));
194 dprintf("%s: read 1: %x\n", __func__, READ4(sc, 0x08));
195 dprintf("%s: read 2: %x\n", __func__, READ4(sc, 0x0C));
196
197 WRITE4(sc, DMA_CONTROL, CONTROL_RESET);
198
199 timeout = 100;
200 do {
201 if ((READ4(sc, DMA_STATUS) & STATUS_RESETTING) == 0)
202 break;
203 } while (timeout--);
204
205 dprintf("timeout %d\n", timeout);
206
207 if (timeout == 0)
208 return (-1);
209
210 dprintf("%s: read control after reset: %x\n",
211 __func__, READ4(sc, DMA_CONTROL));
212
213 return (0);
214 }
215
216 static int
msgdma_probe(device_t dev)217 msgdma_probe(device_t dev)
218 {
219 int hwtype;
220
221 if (!ofw_bus_status_okay(dev))
222 return (ENXIO);
223
224 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
225 if (hwtype == HWTYPE_NONE)
226 return (ENXIO);
227
228 device_set_desc(dev, "Altera mSGDMA");
229
230 return (BUS_PROBE_DEFAULT);
231 }
232
233 static int
msgdma_attach(device_t dev)234 msgdma_attach(device_t dev)
235 {
236 struct msgdma_softc *sc;
237 phandle_t xref, node;
238 int err;
239
240 sc = device_get_softc(dev);
241 sc->dev = dev;
242
243 if (bus_alloc_resources(dev, msgdma_spec, sc->res)) {
244 device_printf(dev, "could not allocate resources for device\n");
245 return (ENXIO);
246 }
247
248 /* CSR memory interface */
249 sc->bst = rman_get_bustag(sc->res[0]);
250 sc->bsh = rman_get_bushandle(sc->res[0]);
251
252 /* Descriptor memory interface */
253 sc->bst_d = rman_get_bustag(sc->res[1]);
254 sc->bsh_d = rman_get_bushandle(sc->res[1]);
255
256 /* Setup interrupt handler */
257 err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
258 NULL, msgdma_intr, sc, &sc->ih);
259 if (err) {
260 device_printf(dev, "Unable to alloc interrupt resource.\n");
261 return (ENXIO);
262 }
263
264 node = ofw_bus_get_node(dev);
265 xref = OF_xref_from_node(node);
266 OF_device_register_xref(xref, dev);
267
268 if (msgdma_reset(sc) != 0)
269 return (-1);
270
271 WRITE4(sc, DMA_CONTROL, CONTROL_GIEM);
272
273 return (0);
274 }
275
276 static int
msgdma_detach(device_t dev)277 msgdma_detach(device_t dev)
278 {
279 struct msgdma_softc *sc;
280
281 sc = device_get_softc(dev);
282
283 return (0);
284 }
285
286 static void
msgdma_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int err)287 msgdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
288 {
289 struct msgdma_channel *chan;
290
291 chan = (struct msgdma_channel *)arg;
292 KASSERT(chan != NULL, ("xchan is NULL"));
293
294 if (err) {
295 chan->map_err = 1;
296 return;
297 }
298
299 chan->descs_phys[chan->map_descr].ds_addr = segs[0].ds_addr;
300 chan->descs_phys[chan->map_descr].ds_len = segs[0].ds_len;
301
302 dprintf("map desc %d: descs phys %lx len %ld\n",
303 chan->map_descr, segs[0].ds_addr, segs[0].ds_len);
304 }
305
306 static int
msgdma_desc_free(struct msgdma_softc * sc,struct msgdma_channel * chan)307 msgdma_desc_free(struct msgdma_softc *sc, struct msgdma_channel *chan)
308 {
309 struct msgdma_desc *desc;
310 int nsegments;
311 int i;
312
313 nsegments = chan->descs_num;
314
315 for (i = 0; i < nsegments; i++) {
316 desc = chan->descs[i];
317 bus_dmamap_unload(chan->dma_tag, chan->dma_map[i]);
318 bus_dmamem_free(chan->dma_tag, desc, chan->dma_map[i]);
319 }
320
321 bus_dma_tag_destroy(chan->dma_tag);
322 free(chan->descs, M_DEVBUF);
323 free(chan->dma_map, M_DEVBUF);
324 free(chan->descs_phys, M_DEVBUF);
325
326 return (0);
327 }
328
329 static int
msgdma_desc_alloc(struct msgdma_softc * sc,struct msgdma_channel * chan,uint32_t desc_size,uint32_t align)330 msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
331 uint32_t desc_size, uint32_t align)
332 {
333 int nsegments;
334 int err;
335 int i;
336
337 nsegments = chan->descs_num;
338
339 dprintf("%s: nseg %d\n", __func__, nsegments);
340
341 err = bus_dma_tag_create(
342 bus_get_dma_tag(sc->dev),
343 align, 0, /* alignment, boundary */
344 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
345 BUS_SPACE_MAXADDR, /* highaddr */
346 NULL, NULL, /* filter, filterarg */
347 desc_size, 1, /* maxsize, nsegments*/
348 desc_size, 0, /* maxsegsize, flags */
349 NULL, NULL, /* lockfunc, lockarg */
350 &chan->dma_tag);
351 if (err) {
352 device_printf(sc->dev,
353 "%s: Can't create bus_dma tag.\n", __func__);
354 return (-1);
355 }
356
357 /* Descriptors. */
358 chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
359 M_DEVBUF, (M_WAITOK | M_ZERO));
360 if (chan->descs == NULL) {
361 device_printf(sc->dev,
362 "%s: Can't allocate memory.\n", __func__);
363 return (-1);
364 }
365 chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
366 M_DEVBUF, (M_WAITOK | M_ZERO));
367 chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
368 M_DEVBUF, (M_WAITOK | M_ZERO));
369
370 /* Allocate bus_dma memory for each descriptor. */
371 for (i = 0; i < nsegments; i++) {
372 err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
373 BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
374 if (err) {
375 device_printf(sc->dev,
376 "%s: Can't allocate memory for descriptors.\n",
377 __func__);
378 return (-1);
379 }
380
381 chan->map_err = 0;
382 chan->map_descr = i;
383 err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
384 desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
385 if (err) {
386 device_printf(sc->dev,
387 "%s: Can't load DMA map.\n", __func__);
388 return (-1);
389 }
390
391 if (chan->map_err != 0) {
392 device_printf(sc->dev,
393 "%s: Can't load DMA map.\n", __func__);
394 return (-1);
395 }
396 }
397
398 return (0);
399 }
400
401
402 static int
msgdma_channel_alloc(device_t dev,struct xdma_channel * xchan)403 msgdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
404 {
405 struct msgdma_channel *chan;
406 struct msgdma_softc *sc;
407 int i;
408
409 sc = device_get_softc(dev);
410
411 for (i = 0; i < MSGDMA_NCHANNELS; i++) {
412 chan = &sc->channels[i];
413 if (chan->used == 0) {
414 chan->xchan = xchan;
415 xchan->chan = (void *)chan;
416 xchan->caps |= XCHAN_CAP_BUSDMA;
417 chan->index = i;
418 chan->sc = sc;
419 chan->used = 1;
420 chan->idx_head = 0;
421 chan->idx_tail = 0;
422 chan->descs_used_count = 0;
423 chan->descs_num = 1024;
424
425 return (0);
426 }
427 }
428
429 return (-1);
430 }
431
432 static int
msgdma_channel_free(device_t dev,struct xdma_channel * xchan)433 msgdma_channel_free(device_t dev, struct xdma_channel *xchan)
434 {
435 struct msgdma_channel *chan;
436 struct msgdma_softc *sc;
437
438 sc = device_get_softc(dev);
439
440 chan = (struct msgdma_channel *)xchan->chan;
441
442 msgdma_desc_free(sc, chan);
443
444 chan->used = 0;
445
446 return (0);
447 }
448
449 static int
msgdma_channel_capacity(device_t dev,xdma_channel_t * xchan,uint32_t * capacity)450 msgdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
451 uint32_t *capacity)
452 {
453 struct msgdma_channel *chan;
454 uint32_t c;
455
456 chan = (struct msgdma_channel *)xchan->chan;
457
458 /* At least one descriptor must be left empty. */
459 c = (chan->descs_num - chan->descs_used_count - 1);
460
461 *capacity = c;
462
463 return (0);
464 }
465
466 static int
msgdma_channel_submit_sg(device_t dev,struct xdma_channel * xchan,struct xdma_sglist * sg,uint32_t sg_n)467 msgdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
468 struct xdma_sglist *sg, uint32_t sg_n)
469 {
470 struct msgdma_channel *chan;
471 struct msgdma_desc *desc;
472 struct msgdma_softc *sc;
473 uint32_t src_addr_lo;
474 uint32_t dst_addr_lo;
475 uint32_t len;
476 uint32_t tmp;
477 int i;
478
479 sc = device_get_softc(dev);
480
481 chan = (struct msgdma_channel *)xchan->chan;
482
483 for (i = 0; i < sg_n; i++) {
484 src_addr_lo = (uint32_t)sg[i].src_addr;
485 dst_addr_lo = (uint32_t)sg[i].dst_addr;
486 len = (uint32_t)sg[i].len;
487
488 dprintf("%s: src %x dst %x len %d\n", __func__,
489 src_addr_lo, dst_addr_lo, len);
490
491 desc = chan->descs[chan->idx_head];
492 desc->read_lo = htole32(src_addr_lo);
493 desc->write_lo = htole32(dst_addr_lo);
494 desc->length = htole32(len);
495 desc->transferred = 0;
496 desc->status = 0;
497 desc->reserved = 0;
498 desc->control = 0;
499
500 if (sg[i].direction == XDMA_MEM_TO_DEV) {
501 if (sg[i].first == 1) {
502 desc->control |= htole32(CONTROL_GEN_SOP);
503 }
504
505 if (sg[i].last == 1) {
506 desc->control |= htole32(CONTROL_GEN_EOP);
507 desc->control |= htole32(CONTROL_TC_IRQ_EN |
508 CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
509 }
510 } else {
511 desc->control |= htole32(CONTROL_END_ON_EOP | (1 << 13));
512 desc->control |= htole32(CONTROL_TC_IRQ_EN |
513 CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
514 }
515
516 tmp = chan->idx_head;
517
518 atomic_add_int(&chan->descs_used_count, 1);
519 chan->idx_head = msgdma_next_desc(chan, chan->idx_head);
520
521 desc->control |= htole32(CONTROL_OWN | CONTROL_GO);
522
523 bus_dmamap_sync(chan->dma_tag, chan->dma_map[tmp],
524 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
525 }
526
527 return (0);
528 }
529
530 static int
msgdma_channel_prep_sg(device_t dev,struct xdma_channel * xchan)531 msgdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
532 {
533 struct msgdma_channel *chan;
534 struct msgdma_desc *desc;
535 struct msgdma_softc *sc;
536 uint32_t addr;
537 uint32_t reg;
538 int ret;
539 int i;
540
541 sc = device_get_softc(dev);
542
543 dprintf("%s(%d)\n", __func__, device_get_unit(dev));
544
545 chan = (struct msgdma_channel *)xchan->chan;
546
547 ret = msgdma_desc_alloc(sc, chan, sizeof(struct msgdma_desc), 16);
548 if (ret != 0) {
549 device_printf(sc->dev,
550 "%s: Can't allocate descriptors.\n", __func__);
551 return (-1);
552 }
553
554 for (i = 0; i < chan->descs_num; i++) {
555 desc = chan->descs[i];
556
557 if (i == (chan->descs_num - 1)) {
558 desc->next = htole32(chan->descs_phys[0].ds_addr);
559 } else {
560 desc->next = htole32(chan->descs_phys[i+1].ds_addr);
561 }
562
563 dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
564 device_get_unit(dev), i, (uint64_t)desc, le32toh(desc->next));
565 }
566
567 addr = chan->descs_phys[0].ds_addr;
568 WRITE4_DESC(sc, PF_NEXT_LO, addr);
569 WRITE4_DESC(sc, PF_NEXT_HI, 0);
570 WRITE4_DESC(sc, PF_POLL_FREQ, 1000);
571
572 reg = (PF_CONTROL_GIEM | PF_CONTROL_DESC_POLL_EN);
573 reg |= PF_CONTROL_RUN;
574 WRITE4_DESC(sc, PF_CONTROL, reg);
575
576 return (0);
577 }
578
579 static int
msgdma_channel_control(device_t dev,xdma_channel_t * xchan,int cmd)580 msgdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
581 {
582 struct msgdma_channel *chan;
583 struct msgdma_softc *sc;
584
585 sc = device_get_softc(dev);
586
587 chan = (struct msgdma_channel *)xchan->chan;
588
589 switch (cmd) {
590 case XDMA_CMD_BEGIN:
591 case XDMA_CMD_TERMINATE:
592 case XDMA_CMD_PAUSE:
593 /* TODO: implement me */
594 return (-1);
595 }
596
597 return (0);
598 }
599
600 #ifdef FDT
601 static int
msgdma_ofw_md_data(device_t dev,pcell_t * cells,int ncells,void ** ptr)602 msgdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
603 {
604
605 return (0);
606 }
607 #endif
608
609 static device_method_t msgdma_methods[] = {
610 /* Device interface */
611 DEVMETHOD(device_probe, msgdma_probe),
612 DEVMETHOD(device_attach, msgdma_attach),
613 DEVMETHOD(device_detach, msgdma_detach),
614
615 /* xDMA Interface */
616 DEVMETHOD(xdma_channel_alloc, msgdma_channel_alloc),
617 DEVMETHOD(xdma_channel_free, msgdma_channel_free),
618 DEVMETHOD(xdma_channel_control, msgdma_channel_control),
619
620 /* xDMA SG Interface */
621 DEVMETHOD(xdma_channel_capacity, msgdma_channel_capacity),
622 DEVMETHOD(xdma_channel_prep_sg, msgdma_channel_prep_sg),
623 DEVMETHOD(xdma_channel_submit_sg, msgdma_channel_submit_sg),
624
625 #ifdef FDT
626 DEVMETHOD(xdma_ofw_md_data, msgdma_ofw_md_data),
627 #endif
628
629 DEVMETHOD_END
630 };
631
632 static driver_t msgdma_driver = {
633 "msgdma",
634 msgdma_methods,
635 sizeof(struct msgdma_softc),
636 };
637
638 static devclass_t msgdma_devclass;
639
640 EARLY_DRIVER_MODULE(msgdma, simplebus, msgdma_driver, msgdma_devclass, 0, 0,
641 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
642