1 /*-
2 * Copyright (c) 2016-2018 Ruslan Bukin <[email protected]>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Altera mSGDMA driver. */
32
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/sglist.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/resource.h>
47 #include <sys/rman.h>
48
49 #include <machine/bus.h>
50 #include <machine/fdt.h>
51 #include <machine/cache.h>
52
53 #ifdef FDT
54 #include <dev/fdt/fdt_common.h>
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 #endif
58
59 #include <dev/xdma/xdma.h>
60 #include "xdma_if.h"
61 #include "opt_altera_msgdma.h"
62
63 #include <dev/altera/msgdma/msgdma.h>
64
65 #define MSGDMA_DEBUG
66 #undef MSGDMA_DEBUG
67
68 #ifdef MSGDMA_DEBUG
69 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
70 #else
71 #define dprintf(fmt, ...)
72 #endif
73
74 #define MSGDMA_NCHANNELS 1
75
76 struct msgdma_channel {
77 struct msgdma_softc *sc;
78 struct mtx mtx;
79 xdma_channel_t *xchan;
80 struct proc *p;
81 int used;
82 int index;
83 int idx_head;
84 int idx_tail;
85
86 struct msgdma_desc **descs;
87 bus_dma_segment_t *descs_phys;
88 uint32_t descs_num;
89 bus_dma_tag_t dma_tag;
90 bus_dmamap_t *dma_map;
91 uint32_t map_descr;
92 uint8_t map_err;
93 uint32_t descs_used_count;
94 };
95
96 struct msgdma_softc {
97 device_t dev;
98 struct resource *res[3];
99 bus_space_tag_t bst;
100 bus_space_handle_t bsh;
101 bus_space_tag_t bst_d;
102 bus_space_handle_t bsh_d;
103 void *ih;
104 struct msgdma_desc desc;
105 struct msgdma_channel channels[MSGDMA_NCHANNELS];
106 };
107
108 static struct resource_spec msgdma_spec[] = {
109 { SYS_RES_MEMORY, 0, RF_ACTIVE },
110 { SYS_RES_MEMORY, 1, RF_ACTIVE },
111 { SYS_RES_IRQ, 0, RF_ACTIVE },
112 { -1, 0 }
113 };
114
115 #define HWTYPE_NONE 0
116 #define HWTYPE_STD 1
117
118 static struct ofw_compat_data compat_data[] = {
119 { "altr,msgdma-16.0", HWTYPE_STD },
120 { "altr,msgdma-1.0", HWTYPE_STD },
121 { NULL, HWTYPE_NONE },
122 };
123
124 static int msgdma_probe(device_t dev);
125 static int msgdma_attach(device_t dev);
126 static int msgdma_detach(device_t dev);
127
128 static inline uint32_t
msgdma_next_desc(struct msgdma_channel * chan,uint32_t curidx)129 msgdma_next_desc(struct msgdma_channel *chan, uint32_t curidx)
130 {
131
132 return ((curidx + 1) % chan->descs_num);
133 }
134
135 static void
msgdma_intr(void * arg)136 msgdma_intr(void *arg)
137 {
138 xdma_transfer_status_t status;
139 struct xdma_transfer_status st;
140 struct msgdma_desc *desc;
141 struct msgdma_channel *chan;
142 struct xdma_channel *xchan;
143 struct msgdma_softc *sc;
144 uint32_t tot_copied;
145
146 sc = arg;
147 chan = &sc->channels[0];
148 xchan = chan->xchan;
149
150 dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
151 __func__, device_get_unit(sc->dev),
152 READ4_DESC(sc, PF_STATUS),
153 READ4_DESC(sc, PF_NEXT_LO),
154 READ4_DESC(sc, PF_CONTROL));
155
156 tot_copied = 0;
157
158 while (chan->idx_tail != chan->idx_head) {
159 dprintf("%s: idx_tail %d idx_head %d\n", __func__,
160 chan->idx_tail, chan->idx_head);
161 bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
162 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
163
164 desc = chan->descs[chan->idx_tail];
165 if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
166 break;
167 }
168
169 tot_copied += le32toh(desc->transferred);
170 st.error = 0;
171 st.transferred = le32toh(desc->transferred);
172 xchan_seg_done(xchan, &st);
173
174 chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
175 atomic_subtract_int(&chan->descs_used_count, 1);
176 }
177
178 WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);
179
180 /* Finish operation */
181 status.error = 0;
182 status.transferred = tot_copied;
183 xdma_callback(chan->xchan, &status);
184 }
185
186 static int
msgdma_reset(struct msgdma_softc * sc)187 msgdma_reset(struct msgdma_softc *sc)
188 {
189 int timeout;
190
191 dprintf("%s: read status: %x\n", __func__, READ4(sc, 0x00));
192 dprintf("%s: read control: %x\n", __func__, READ4(sc, 0x04));
193 dprintf("%s: read 1: %x\n", __func__, READ4(sc, 0x08));
194 dprintf("%s: read 2: %x\n", __func__, READ4(sc, 0x0C));
195
196 WRITE4(sc, DMA_CONTROL, CONTROL_RESET);
197
198 timeout = 100;
199 do {
200 if ((READ4(sc, DMA_STATUS) & STATUS_RESETTING) == 0)
201 break;
202 } while (timeout--);
203
204 dprintf("timeout %d\n", timeout);
205
206 if (timeout == 0)
207 return (-1);
208
209 dprintf("%s: read control after reset: %x\n",
210 __func__, READ4(sc, DMA_CONTROL));
211
212 return (0);
213 }
214
215 static int
msgdma_probe(device_t dev)216 msgdma_probe(device_t dev)
217 {
218 int hwtype;
219
220 if (!ofw_bus_status_okay(dev))
221 return (ENXIO);
222
223 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
224 if (hwtype == HWTYPE_NONE)
225 return (ENXIO);
226
227 device_set_desc(dev, "Altera mSGDMA");
228
229 return (BUS_PROBE_DEFAULT);
230 }
231
232 static int
msgdma_attach(device_t dev)233 msgdma_attach(device_t dev)
234 {
235 struct msgdma_softc *sc;
236 phandle_t xref, node;
237 int err;
238
239 sc = device_get_softc(dev);
240 sc->dev = dev;
241
242 if (bus_alloc_resources(dev, msgdma_spec, sc->res)) {
243 device_printf(dev, "could not allocate resources for device\n");
244 return (ENXIO);
245 }
246
247 /* CSR memory interface */
248 sc->bst = rman_get_bustag(sc->res[0]);
249 sc->bsh = rman_get_bushandle(sc->res[0]);
250
251 /* Descriptor memory interface */
252 sc->bst_d = rman_get_bustag(sc->res[1]);
253 sc->bsh_d = rman_get_bushandle(sc->res[1]);
254
255 /* Setup interrupt handler */
256 err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
257 NULL, msgdma_intr, sc, &sc->ih);
258 if (err) {
259 device_printf(dev, "Unable to alloc interrupt resource.\n");
260 return (ENXIO);
261 }
262
263 node = ofw_bus_get_node(dev);
264 xref = OF_xref_from_node(node);
265 OF_device_register_xref(xref, dev);
266
267 if (msgdma_reset(sc) != 0)
268 return (-1);
269
270 WRITE4(sc, DMA_CONTROL, CONTROL_GIEM);
271
272 return (0);
273 }
274
275 static int
msgdma_detach(device_t dev)276 msgdma_detach(device_t dev)
277 {
278 struct msgdma_softc *sc;
279
280 sc = device_get_softc(dev);
281
282 return (0);
283 }
284
285 static void
msgdma_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int err)286 msgdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
287 {
288 struct msgdma_channel *chan;
289
290 chan = (struct msgdma_channel *)arg;
291 KASSERT(chan != NULL, ("xchan is NULL"));
292
293 if (err) {
294 chan->map_err = 1;
295 return;
296 }
297
298 chan->descs_phys[chan->map_descr].ds_addr = segs[0].ds_addr;
299 chan->descs_phys[chan->map_descr].ds_len = segs[0].ds_len;
300
301 dprintf("map desc %d: descs phys %lx len %ld\n",
302 chan->map_descr, segs[0].ds_addr, segs[0].ds_len);
303 }
304
305 static int
msgdma_desc_free(struct msgdma_softc * sc,struct msgdma_channel * chan)306 msgdma_desc_free(struct msgdma_softc *sc, struct msgdma_channel *chan)
307 {
308 struct msgdma_desc *desc;
309 int nsegments;
310 int i;
311
312 nsegments = chan->descs_num;
313
314 for (i = 0; i < nsegments; i++) {
315 desc = chan->descs[i];
316 bus_dmamap_unload(chan->dma_tag, chan->dma_map[i]);
317 bus_dmamem_free(chan->dma_tag, desc, chan->dma_map[i]);
318 }
319
320 bus_dma_tag_destroy(chan->dma_tag);
321 free(chan->descs, M_DEVBUF);
322 free(chan->dma_map, M_DEVBUF);
323 free(chan->descs_phys, M_DEVBUF);
324
325 return (0);
326 }
327
328 static int
msgdma_desc_alloc(struct msgdma_softc * sc,struct msgdma_channel * chan,uint32_t desc_size,uint32_t align)329 msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
330 uint32_t desc_size, uint32_t align)
331 {
332 int nsegments;
333 int err;
334 int i;
335
336 nsegments = chan->descs_num;
337
338 dprintf("%s: nseg %d\n", __func__, nsegments);
339
340 err = bus_dma_tag_create(
341 bus_get_dma_tag(sc->dev),
342 align, 0, /* alignment, boundary */
343 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
344 BUS_SPACE_MAXADDR, /* highaddr */
345 NULL, NULL, /* filter, filterarg */
346 desc_size, 1, /* maxsize, nsegments*/
347 desc_size, 0, /* maxsegsize, flags */
348 NULL, NULL, /* lockfunc, lockarg */
349 &chan->dma_tag);
350 if (err) {
351 device_printf(sc->dev,
352 "%s: Can't create bus_dma tag.\n", __func__);
353 return (-1);
354 }
355
356 /* Descriptors. */
357 chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
358 M_DEVBUF, (M_WAITOK | M_ZERO));
359 chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
360 M_DEVBUF, (M_WAITOK | M_ZERO));
361 chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
362 M_DEVBUF, (M_WAITOK | M_ZERO));
363
364 /* Allocate bus_dma memory for each descriptor. */
365 for (i = 0; i < nsegments; i++) {
366 err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
367 BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
368 if (err) {
369 device_printf(sc->dev,
370 "%s: Can't allocate memory for descriptors.\n",
371 __func__);
372 return (-1);
373 }
374
375 chan->map_err = 0;
376 chan->map_descr = i;
377 err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
378 desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
379 if (err) {
380 device_printf(sc->dev,
381 "%s: Can't load DMA map.\n", __func__);
382 return (-1);
383 }
384
385 if (chan->map_err != 0) {
386 device_printf(sc->dev,
387 "%s: Can't load DMA map.\n", __func__);
388 return (-1);
389 }
390 }
391
392 return (0);
393 }
394
395 static int
msgdma_channel_alloc(device_t dev,struct xdma_channel * xchan)396 msgdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
397 {
398 struct msgdma_channel *chan;
399 struct msgdma_softc *sc;
400 int i;
401
402 sc = device_get_softc(dev);
403
404 for (i = 0; i < MSGDMA_NCHANNELS; i++) {
405 chan = &sc->channels[i];
406 if (chan->used == 0) {
407 chan->xchan = xchan;
408 xchan->chan = (void *)chan;
409 if ((xchan->caps & XCHAN_CAP_IOMMU) == 0)
410 xchan->caps |= XCHAN_CAP_BUSDMA;
411 chan->index = i;
412 chan->sc = sc;
413 chan->used = 1;
414 chan->idx_head = 0;
415 chan->idx_tail = 0;
416 chan->descs_used_count = 0;
417 chan->descs_num = 1024;
418
419 return (0);
420 }
421 }
422
423 return (-1);
424 }
425
426 static int
msgdma_channel_free(device_t dev,struct xdma_channel * xchan)427 msgdma_channel_free(device_t dev, struct xdma_channel *xchan)
428 {
429 struct msgdma_channel *chan;
430 struct msgdma_softc *sc;
431
432 sc = device_get_softc(dev);
433
434 chan = (struct msgdma_channel *)xchan->chan;
435
436 msgdma_desc_free(sc, chan);
437
438 chan->used = 0;
439
440 return (0);
441 }
442
443 static int
msgdma_channel_capacity(device_t dev,xdma_channel_t * xchan,uint32_t * capacity)444 msgdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
445 uint32_t *capacity)
446 {
447 struct msgdma_channel *chan;
448 uint32_t c;
449
450 chan = (struct msgdma_channel *)xchan->chan;
451
452 /* At least one descriptor must be left empty. */
453 c = (chan->descs_num - chan->descs_used_count - 1);
454
455 *capacity = c;
456
457 return (0);
458 }
459
460 static int
msgdma_channel_submit_sg(device_t dev,struct xdma_channel * xchan,struct xdma_sglist * sg,uint32_t sg_n)461 msgdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
462 struct xdma_sglist *sg, uint32_t sg_n)
463 {
464 struct msgdma_channel *chan;
465 struct msgdma_desc *desc;
466 struct msgdma_softc *sc;
467 bus_addr_t src_addr_lo;
468 bus_addr_t dst_addr_lo;
469 uint32_t len;
470 uint32_t tmp;
471 int i;
472
473 sc = device_get_softc(dev);
474
475 chan = (struct msgdma_channel *)xchan->chan;
476
477 for (i = 0; i < sg_n; i++) {
478 src_addr_lo = sg[i].src_addr;
479 dst_addr_lo = sg[i].dst_addr;
480 len = (uint32_t)sg[i].len;
481
482 dprintf("%s: src %x dst %x len %d\n", __func__,
483 src_addr_lo, dst_addr_lo, len);
484
485 desc = chan->descs[chan->idx_head];
486 #if defined(ALTERA_MSGDMA_DESC_EXT) || defined(ALTERA_MSGDMA_DESC_PF_EXT)
487 desc->read_hi = htole32(src_addr_lo >> 32);
488 desc->write_hi = htole32(dst_addr_lo >> 32);
489 #endif
490 desc->read_lo = htole32(src_addr_lo);
491 desc->write_lo = htole32(dst_addr_lo);
492 desc->length = htole32(len);
493 desc->transferred = 0;
494 desc->status = 0;
495 desc->reserved = 0;
496 desc->control = 0;
497
498 if (sg[i].direction == XDMA_MEM_TO_DEV) {
499 if (sg[i].first == 1) {
500 desc->control |= htole32(CONTROL_GEN_SOP);
501 }
502
503 if (sg[i].last == 1) {
504 desc->control |= htole32(CONTROL_GEN_EOP);
505 desc->control |= htole32(CONTROL_TC_IRQ_EN |
506 CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
507 }
508 } else {
509 desc->control |= htole32(CONTROL_END_ON_EOP | (1 << 13));
510 desc->control |= htole32(CONTROL_TC_IRQ_EN |
511 CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
512 }
513
514 tmp = chan->idx_head;
515
516 atomic_add_int(&chan->descs_used_count, 1);
517 chan->idx_head = msgdma_next_desc(chan, chan->idx_head);
518
519 desc->control |= htole32(CONTROL_OWN | CONTROL_GO);
520
521 bus_dmamap_sync(chan->dma_tag, chan->dma_map[tmp],
522 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
523 }
524
525 return (0);
526 }
527
528 static int
msgdma_channel_prep_sg(device_t dev,struct xdma_channel * xchan)529 msgdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
530 {
531 struct msgdma_channel *chan;
532 struct msgdma_desc *desc;
533 struct msgdma_softc *sc;
534 uint32_t addr;
535 uint32_t reg;
536 int ret;
537 int i;
538
539 sc = device_get_softc(dev);
540
541 dprintf("%s(%d)\n", __func__, device_get_unit(dev));
542
543 chan = (struct msgdma_channel *)xchan->chan;
544
545 ret = msgdma_desc_alloc(sc, chan, sizeof(struct msgdma_desc), 16);
546 if (ret != 0) {
547 device_printf(sc->dev,
548 "%s: Can't allocate descriptors.\n", __func__);
549 return (-1);
550 }
551
552 for (i = 0; i < chan->descs_num; i++) {
553 desc = chan->descs[i];
554
555 if (i == (chan->descs_num - 1)) {
556 desc->next = htole32(chan->descs_phys[0].ds_addr);
557 } else {
558 desc->next = htole32(chan->descs_phys[i+1].ds_addr);
559 }
560
561 dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
562 device_get_unit(dev), i, (uint64_t)desc, le32toh(desc->next));
563 }
564
565 addr = chan->descs_phys[0].ds_addr;
566 WRITE4_DESC(sc, PF_NEXT_LO, addr);
567 WRITE4_DESC(sc, PF_NEXT_HI, 0);
568 WRITE4_DESC(sc, PF_POLL_FREQ, 1000);
569
570 reg = (PF_CONTROL_GIEM | PF_CONTROL_DESC_POLL_EN);
571 reg |= PF_CONTROL_RUN;
572 WRITE4_DESC(sc, PF_CONTROL, reg);
573
574 return (0);
575 }
576
577 static int
msgdma_channel_control(device_t dev,xdma_channel_t * xchan,int cmd)578 msgdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
579 {
580 struct msgdma_channel *chan;
581 struct msgdma_softc *sc;
582
583 sc = device_get_softc(dev);
584
585 chan = (struct msgdma_channel *)xchan->chan;
586
587 switch (cmd) {
588 case XDMA_CMD_BEGIN:
589 case XDMA_CMD_TERMINATE:
590 case XDMA_CMD_PAUSE:
591 /* TODO: implement me */
592 return (-1);
593 }
594
595 return (0);
596 }
597
598 #ifdef FDT
599 static int
msgdma_ofw_md_data(device_t dev,pcell_t * cells,int ncells,void ** ptr)600 msgdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
601 {
602
603 return (0);
604 }
605 #endif
606
607 static device_method_t msgdma_methods[] = {
608 /* Device interface */
609 DEVMETHOD(device_probe, msgdma_probe),
610 DEVMETHOD(device_attach, msgdma_attach),
611 DEVMETHOD(device_detach, msgdma_detach),
612
613 /* xDMA Interface */
614 DEVMETHOD(xdma_channel_alloc, msgdma_channel_alloc),
615 DEVMETHOD(xdma_channel_free, msgdma_channel_free),
616 DEVMETHOD(xdma_channel_control, msgdma_channel_control),
617
618 /* xDMA SG Interface */
619 DEVMETHOD(xdma_channel_capacity, msgdma_channel_capacity),
620 DEVMETHOD(xdma_channel_prep_sg, msgdma_channel_prep_sg),
621 DEVMETHOD(xdma_channel_submit_sg, msgdma_channel_submit_sg),
622
623 #ifdef FDT
624 DEVMETHOD(xdma_ofw_md_data, msgdma_ofw_md_data),
625 #endif
626
627 DEVMETHOD_END
628 };
629
630 static driver_t msgdma_driver = {
631 "msgdma",
632 msgdma_methods,
633 sizeof(struct msgdma_softc),
634 };
635
636 EARLY_DRIVER_MODULE(msgdma, simplebus, msgdma_driver, 0, 0,
637 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
638