1 /*-
2 * Copyright (c) 2017-2018 Ruslan Bukin <[email protected]>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Cadence Quad SPI Flash Controller driver.
33 * 4B-addressing mode supported only.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_platform.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bio.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/kernel.h>
47 #include <sys/kthread.h>
48 #include <sys/lock.h>
49 #include <sys/mbuf.h>
50 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/rman.h>
54 #include <geom/geom_disk.h>
55
56 #include <machine/bus.h>
57
58 #include <dev/fdt/simplebus.h>
59 #include <dev/fdt/fdt_common.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 #include <dev/ofw/openfirm.h>
62
63 #include <dev/flash/cqspi.h>
64 #include <dev/flash/mx25lreg.h>
65 #include <dev/xdma/xdma.h>
66
67 #include "qspi_if.h"
68
69 #define CQSPI_DEBUG
70 #undef CQSPI_DEBUG
71
72 #ifdef CQSPI_DEBUG
73 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
74 #else
75 #define dprintf(fmt, ...)
76 #endif
77
78 #define CQSPI_SECTORSIZE 512
79 #define TX_QUEUE_SIZE 16
80 #define RX_QUEUE_SIZE 16
81
82 #define READ4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
83 #define READ2(_sc, _reg) bus_read_2((_sc)->res[0], _reg)
84 #define READ1(_sc, _reg) bus_read_1((_sc)->res[0], _reg)
85 #define WRITE4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
86 #define WRITE2(_sc, _reg, _val) bus_write_2((_sc)->res[0], _reg, _val)
87 #define WRITE1(_sc, _reg, _val) bus_write_1((_sc)->res[0], _reg, _val)
88 #define READ_DATA_4(_sc, _reg) bus_read_4((_sc)->res[1], _reg)
89 #define READ_DATA_1(_sc, _reg) bus_read_1((_sc)->res[1], _reg)
90 #define WRITE_DATA_4(_sc, _reg, _val) bus_write_4((_sc)->res[1], _reg, _val)
91 #define WRITE_DATA_1(_sc, _reg, _val) bus_write_1((_sc)->res[1], _reg, _val)
92
93 struct cqspi_softc {
94 device_t dev;
95
96 struct resource *res[3];
97 bus_space_tag_t bst;
98 bus_space_handle_t bsh;
99 void *ih;
100 uint8_t read_op_done;
101 uint8_t write_op_done;
102
103 uint32_t fifo_depth;
104 uint32_t fifo_width;
105 uint32_t trigger_address;
106 uint32_t sram_phys;
107
108 /* xDMA */
109 xdma_controller_t *xdma_tx;
110 xdma_channel_t *xchan_tx;
111 void *ih_tx;
112
113 xdma_controller_t *xdma_rx;
114 xdma_channel_t *xchan_rx;
115 void *ih_rx;
116
117 struct intr_config_hook config_intrhook;
118 struct mtx sc_mtx;
119 };
120
121 #define CQSPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
122 #define CQSPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
123 #define CQSPI_LOCK_INIT(_sc) \
124 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
125 "cqspi", MTX_DEF)
126 #define CQSPI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
127 #define CQSPI_ASSERT_LOCKED(_sc) \
128 mtx_assert(&_sc->sc_mtx, MA_OWNED);
129 #define CQSPI_ASSERT_UNLOCKED(_sc) \
130 mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
131
132 static struct resource_spec cqspi_spec[] = {
133 { SYS_RES_MEMORY, 0, RF_ACTIVE },
134 { SYS_RES_MEMORY, 1, RF_ACTIVE },
135 { SYS_RES_IRQ, 0, RF_ACTIVE },
136 { -1, 0 }
137 };
138
139 static struct ofw_compat_data compat_data[] = {
140 { "cdns,qspi-nor", 1 },
141 { NULL, 0 },
142 };
143
144 static void
cqspi_intr(void * arg)145 cqspi_intr(void *arg)
146 {
147 struct cqspi_softc *sc;
148 uint32_t pending;
149
150 sc = arg;
151
152 pending = READ4(sc, CQSPI_IRQSTAT);
153
154 dprintf("%s: IRQSTAT %x\n", __func__, pending);
155
156 if (pending & (IRQMASK_INDOPDONE | IRQMASK_INDXFRLVL |
157 IRQMASK_INDSRAMFULL)) {
158 /* TODO: PIO operation done */
159 }
160
161 WRITE4(sc, CQSPI_IRQSTAT, pending);
162 }
163
164 static int
cqspi_xdma_tx_intr(void * arg,xdma_transfer_status_t * status)165 cqspi_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
166 {
167 struct xdma_transfer_status st;
168 struct cqspi_softc *sc;
169 struct bio *bp;
170 int ret;
171 int deq;
172
173 sc = arg;
174
175 dprintf("%s\n", __func__);
176
177 deq = 0;
178
179 while (1) {
180 ret = xdma_dequeue_bio(sc->xchan_tx, &bp, &st);
181 if (ret != 0) {
182 break;
183 }
184 sc->write_op_done = 1;
185 deq++;
186 }
187
188 if (deq > 1)
189 device_printf(sc->dev,
190 "Warning: more than 1 tx bio dequeued\n");
191
192 wakeup(&sc->xdma_tx);
193
194 return (0);
195 }
196
197 static int
cqspi_xdma_rx_intr(void * arg,xdma_transfer_status_t * status)198 cqspi_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
199 {
200 struct xdma_transfer_status st;
201 struct cqspi_softc *sc;
202 struct bio *bp;
203 int ret;
204 int deq;
205
206 sc = arg;
207
208 dprintf("%s\n", __func__);
209
210 deq = 0;
211
212 while (1) {
213 ret = xdma_dequeue_bio(sc->xchan_rx, &bp, &st);
214 if (ret != 0) {
215 break;
216 }
217 sc->read_op_done = 1;
218 deq++;
219 }
220
221 if (deq > 1)
222 device_printf(sc->dev,
223 "Warning: more than 1 rx bio dequeued\n");
224
225 wakeup(&sc->xdma_rx);
226
227 return (0);
228 }
229
230 static int
cqspi_wait_for_completion(struct cqspi_softc * sc)231 cqspi_wait_for_completion(struct cqspi_softc *sc)
232 {
233 int timeout;
234 int i;
235
236 timeout = 10000;
237
238 for (i = timeout; i > 0; i--) {
239 if ((READ4(sc, CQSPI_FLASHCMD) & FLASHCMD_CMDEXECSTAT) == 0) {
240 break;
241 }
242 }
243
244 if (i == 0) {
245 device_printf(sc->dev, "%s: cmd timed out: %x\n",
246 __func__, READ4(sc, CQSPI_FLASHCMD));
247 return (-1);
248 }
249
250 return (0);
251 }
252
253 static int
cqspi_cmd_write_addr(struct cqspi_softc * sc,uint8_t cmd,uint32_t addr,uint32_t len)254 cqspi_cmd_write_addr(struct cqspi_softc *sc, uint8_t cmd,
255 uint32_t addr, uint32_t len)
256 {
257 uint32_t reg;
258 int ret;
259
260 dprintf("%s: %x\n", __func__, cmd);
261
262 WRITE4(sc, CQSPI_FLASHCMDADDR, addr);
263 reg = (cmd << FLASHCMD_CMDOPCODE_S);
264 reg |= (FLASHCMD_ENCMDADDR);
265 reg |= ((len - 1) << FLASHCMD_NUMADDRBYTES_S);
266 WRITE4(sc, CQSPI_FLASHCMD, reg);
267
268 reg |= FLASHCMD_EXECCMD;
269 WRITE4(sc, CQSPI_FLASHCMD, reg);
270
271 ret = cqspi_wait_for_completion(sc);
272
273 return (ret);
274 }
275
276 static int
cqspi_cmd_write(struct cqspi_softc * sc,uint8_t cmd,uint8_t * addr,uint32_t len)277 cqspi_cmd_write(struct cqspi_softc *sc, uint8_t cmd,
278 uint8_t *addr, uint32_t len)
279 {
280 uint32_t reg;
281 int ret;
282
283 reg = (cmd << FLASHCMD_CMDOPCODE_S);
284 WRITE4(sc, CQSPI_FLASHCMD, reg);
285 reg |= FLASHCMD_EXECCMD;
286 WRITE4(sc, CQSPI_FLASHCMD, reg);
287
288 ret = cqspi_wait_for_completion(sc);
289
290 return (ret);
291 }
292
293 static int
cqspi_cmd_read(struct cqspi_softc * sc,uint8_t cmd,uint8_t * addr,uint32_t len)294 cqspi_cmd_read(struct cqspi_softc *sc, uint8_t cmd,
295 uint8_t *addr, uint32_t len)
296 {
297 uint32_t data;
298 uint32_t reg;
299 uint8_t *buf;
300 int ret;
301 int i;
302
303 if (len > 8) {
304 device_printf(sc->dev, "Failed to read data\n");
305 return (-1);
306 }
307
308 dprintf("%s: %x\n", __func__, cmd);
309
310 buf = (uint8_t *)addr;
311
312 reg = (cmd << FLASHCMD_CMDOPCODE_S);
313 reg |= ((len - 1) << FLASHCMD_NUMRDDATABYTES_S);
314 reg |= FLASHCMD_ENRDDATA;
315 WRITE4(sc, CQSPI_FLASHCMD, reg);
316
317 reg |= FLASHCMD_EXECCMD;
318 WRITE4(sc, CQSPI_FLASHCMD, reg);
319
320 ret = cqspi_wait_for_completion(sc);
321 if (ret != 0) {
322 device_printf(sc->dev, "%s: cmd failed: %x\n",
323 __func__, cmd);
324 return (ret);
325 }
326
327 data = READ4(sc, CQSPI_FLASHCMDRDDATALO);
328
329 for (i = 0; i < len; i++)
330 buf[i] = (data >> (i * 8)) & 0xff;
331
332 return (0);
333 }
334
335 static int
cqspi_wait_ready(struct cqspi_softc * sc)336 cqspi_wait_ready(struct cqspi_softc *sc)
337 {
338 uint8_t data;
339 int ret;
340
341 do {
342 ret = cqspi_cmd_read(sc, CMD_READ_STATUS, &data, 1);
343 } while (data & STATUS_WIP);
344
345 return (0);
346 }
347
348 static int
cqspi_write_reg(device_t dev,device_t child,uint8_t opcode,uint8_t * addr,uint32_t len)349 cqspi_write_reg(device_t dev, device_t child,
350 uint8_t opcode, uint8_t *addr, uint32_t len)
351 {
352 struct cqspi_softc *sc;
353 int ret;
354
355 sc = device_get_softc(dev);
356
357 ret = cqspi_cmd_write(sc, opcode, addr, len);
358
359 return (ret);
360 }
361
362 static int
cqspi_read_reg(device_t dev,device_t child,uint8_t opcode,uint8_t * addr,uint32_t len)363 cqspi_read_reg(device_t dev, device_t child,
364 uint8_t opcode, uint8_t *addr, uint32_t len)
365 {
366 struct cqspi_softc *sc;
367 int ret;
368
369 sc = device_get_softc(dev);
370
371 ret = cqspi_cmd_read(sc, opcode, addr, len);
372
373 return (ret);
374 }
375
376 static int
cqspi_wait_idle(struct cqspi_softc * sc)377 cqspi_wait_idle(struct cqspi_softc *sc)
378 {
379 uint32_t reg;
380
381 do {
382 reg = READ4(sc, CQSPI_CFG);
383 if (reg & CFG_IDLE) {
384 break;
385 }
386 } while (1);
387
388 return (0);
389 }
390
391 static int
cqspi_erase(device_t dev,device_t child,off_t offset)392 cqspi_erase(device_t dev, device_t child, off_t offset)
393 {
394 struct cqspi_softc *sc;
395 int ret;
396
397 sc = device_get_softc(dev);
398
399 cqspi_wait_idle(sc);
400 cqspi_wait_ready(sc);
401 ret = cqspi_cmd_write(sc, CMD_WRITE_ENABLE, 0, 0);
402
403 cqspi_wait_idle(sc);
404 cqspi_wait_ready(sc);
405 ret = cqspi_cmd_write_addr(sc, CMD_QUAD_SECTOR_ERASE, offset, 4);
406
407 cqspi_wait_idle(sc);
408
409 return (0);
410 }
411
412 static int
cqspi_write(device_t dev,device_t child,struct bio * bp,off_t offset,caddr_t data,off_t count)413 cqspi_write(device_t dev, device_t child, struct bio *bp,
414 off_t offset, caddr_t data, off_t count)
415 {
416 struct cqspi_softc *sc;
417 uint32_t reg;
418
419 dprintf("%s: offset 0x%llx count %lld bytes\n",
420 __func__, offset, count);
421
422 sc = device_get_softc(dev);
423
424 cqspi_wait_ready(sc);
425 reg = cqspi_cmd_write(sc, CMD_WRITE_ENABLE, 0, 0);
426
427 cqspi_wait_idle(sc);
428 cqspi_wait_ready(sc);
429 cqspi_wait_idle(sc);
430
431 reg = DMAPER_NUMSGLREQBYTES_4;
432 reg |= DMAPER_NUMBURSTREQBYTES_4;
433 WRITE4(sc, CQSPI_DMAPER, reg);
434
435 WRITE4(sc, CQSPI_INDWRWATER, 64);
436 WRITE4(sc, CQSPI_INDWR, INDRD_IND_OPS_DONE_STATUS);
437 WRITE4(sc, CQSPI_INDWR, 0);
438
439 WRITE4(sc, CQSPI_INDWRCNT, count);
440 WRITE4(sc, CQSPI_INDWRSTADDR, offset);
441
442 reg = (0 << DEVWR_DUMMYWRCLKS_S);
443 reg |= DEVWR_DATA_WIDTH_QUAD;
444 reg |= DEVWR_ADDR_WIDTH_SINGLE;
445 reg |= (CMD_QUAD_PAGE_PROGRAM << DEVWR_WROPCODE_S);
446 WRITE4(sc, CQSPI_DEVWR, reg);
447
448 reg = DEVRD_DATA_WIDTH_QUAD;
449 reg |= DEVRD_ADDR_WIDTH_SINGLE;
450 reg |= DEVRD_INST_WIDTH_SINGLE;
451 WRITE4(sc, CQSPI_DEVRD, reg);
452
453 xdma_enqueue_bio(sc->xchan_tx, &bp,
454 sc->sram_phys, 4, 4, XDMA_MEM_TO_DEV);
455 xdma_queue_submit(sc->xchan_tx);
456
457 sc->write_op_done = 0;
458
459 WRITE4(sc, CQSPI_INDWR, INDRD_START);
460
461 while (sc->write_op_done == 0)
462 tsleep(&sc->xdma_tx, PCATCH | PZERO, "spi", hz/2);
463
464 cqspi_wait_idle(sc);
465
466 return (0);
467 }
468
469 static int
cqspi_read(device_t dev,device_t child,struct bio * bp,off_t offset,caddr_t data,off_t count)470 cqspi_read(device_t dev, device_t child, struct bio *bp,
471 off_t offset, caddr_t data, off_t count)
472 {
473 struct cqspi_softc *sc;
474 uint32_t reg;
475
476 sc = device_get_softc(dev);
477
478 dprintf("%s: offset 0x%llx count %lld bytes\n",
479 __func__, offset, count);
480
481 cqspi_wait_idle(sc);
482
483 reg = DMAPER_NUMSGLREQBYTES_4;
484 reg |= DMAPER_NUMBURSTREQBYTES_4;
485 WRITE4(sc, CQSPI_DMAPER, reg);
486
487 WRITE4(sc, CQSPI_INDRDWATER, 64);
488 WRITE4(sc, CQSPI_INDRD, INDRD_IND_OPS_DONE_STATUS);
489 WRITE4(sc, CQSPI_INDRD, 0);
490
491 WRITE4(sc, CQSPI_INDRDCNT, count);
492 WRITE4(sc, CQSPI_INDRDSTADDR, offset);
493
494 reg = (0 << DEVRD_DUMMYRDCLKS_S);
495 reg |= DEVRD_DATA_WIDTH_QUAD;
496 reg |= DEVRD_ADDR_WIDTH_SINGLE;
497 reg |= DEVRD_INST_WIDTH_SINGLE;
498 reg |= DEVRD_ENMODEBITS;
499 reg |= (CMD_READ_4B_QUAD_OUTPUT << DEVRD_RDOPCODE_S);
500 WRITE4(sc, CQSPI_DEVRD, reg);
501
502 WRITE4(sc, CQSPI_MODEBIT, 0xff);
503 WRITE4(sc, CQSPI_IRQMASK, 0);
504
505 xdma_enqueue_bio(sc->xchan_rx, &bp, sc->sram_phys, 4, 4,
506 XDMA_DEV_TO_MEM);
507 xdma_queue_submit(sc->xchan_rx);
508
509 sc->read_op_done = 0;
510
511 WRITE4(sc, CQSPI_INDRD, INDRD_START);
512
513 while (sc->read_op_done == 0)
514 tsleep(&sc->xdma_rx, PCATCH | PZERO, "spi", hz/2);
515
516 cqspi_wait_idle(sc);
517
518 return (0);
519 }
520
521 static int
cqspi_init(struct cqspi_softc * sc)522 cqspi_init(struct cqspi_softc *sc)
523 {
524 pcell_t dts_value[1];
525 phandle_t node;
526 uint32_t reg;
527 int len;
528
529 device_printf(sc->dev, "Module ID %x\n",
530 READ4(sc, CQSPI_MODULEID));
531
532 if ((node = ofw_bus_get_node(sc->dev)) == -1) {
533 return (ENXIO);
534 }
535
536 if ((len = OF_getproplen(node, "cdns,fifo-depth")) <= 0) {
537 return (ENXIO);
538 }
539 OF_getencprop(node, "cdns,fifo-depth", dts_value, len);
540 sc->fifo_depth = dts_value[0];
541
542 if ((len = OF_getproplen(node, "cdns,fifo-width")) <= 0) {
543 return (ENXIO);
544 }
545 OF_getencprop(node, "cdns,fifo-width", dts_value, len);
546 sc->fifo_width = dts_value[0];
547
548 if ((len = OF_getproplen(node, "cdns,trigger-address")) <= 0) {
549 return (ENXIO);
550 }
551 OF_getencprop(node, "cdns,trigger-address", dts_value, len);
552 sc->trigger_address = dts_value[0];
553
554 /* Disable controller */
555 reg = READ4(sc, CQSPI_CFG);
556 reg &= ~(CFG_EN);
557 WRITE4(sc, CQSPI_CFG, reg);
558
559 reg = READ4(sc, CQSPI_DEVSZ);
560 reg &= ~(DEVSZ_NUMADDRBYTES_M);
561 reg |= ((4 - 1) - DEVSZ_NUMADDRBYTES_S);
562 WRITE4(sc, CQSPI_DEVSZ, reg);
563
564 WRITE4(sc, CQSPI_SRAMPART, sc->fifo_depth/2);
565
566 /* TODO: calculate baud rate and delay values. */
567
568 reg = READ4(sc, CQSPI_CFG);
569 /* Configure baud rate */
570 reg &= ~(CFG_BAUD_M);
571 reg |= CFG_BAUD12;
572 reg |= CFG_ENDMA;
573 WRITE4(sc, CQSPI_CFG, reg);
574
575 reg = (3 << DELAY_NSS_S);
576 reg |= (3 << DELAY_BTWN_S);
577 reg |= (1 << DELAY_AFTER_S);
578 reg |= (1 << DELAY_INIT_S);
579 WRITE4(sc, CQSPI_DELAY, reg);
580
581 READ4(sc, CQSPI_RDDATACAP);
582 reg &= ~(RDDATACAP_DELAY_M);
583 reg |= (1 << RDDATACAP_DELAY_S);
584 WRITE4(sc, CQSPI_RDDATACAP, reg);
585
586 /* Enable controller */
587 reg = READ4(sc, CQSPI_CFG);
588 reg |= (CFG_EN);
589 WRITE4(sc, CQSPI_CFG, reg);
590
591 return (0);
592 }
593
594 static int
cqspi_add_devices(device_t dev)595 cqspi_add_devices(device_t dev)
596 {
597 phandle_t child, node;
598 device_t child_dev;
599 int error;
600
601 node = ofw_bus_get_node(dev);
602
603 for (child = OF_child(node); child != 0; child = OF_peer(child)) {
604 child_dev =
605 simplebus_add_device(dev, child, 0, NULL, -1, NULL);
606 if (child_dev == NULL) {
607 return (ENXIO);
608 }
609
610 error = device_probe_and_attach(child_dev);
611 if (error != 0) {
612 printf("can't probe and attach: %d\n", error);
613 }
614 }
615
616 return (0);
617 }
618
619 static void
cqspi_delayed_attach(void * arg)620 cqspi_delayed_attach(void *arg)
621 {
622 struct cqspi_softc *sc;
623
624 sc = arg;
625
626 cqspi_add_devices(sc->dev);
627 bus_generic_attach(sc->dev);
628
629 config_intrhook_disestablish(&sc->config_intrhook);
630 }
631
632 static int
cqspi_probe(device_t dev)633 cqspi_probe(device_t dev)
634 {
635
636 if (!ofw_bus_status_okay(dev)) {
637 return (ENXIO);
638 }
639
640 if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
641 return (ENXIO);
642 }
643
644 device_set_desc(dev, "Cadence Quad SPI controller");
645
646 return (0);
647 }
648
649 static int
cqspi_attach(device_t dev)650 cqspi_attach(device_t dev)
651 {
652 struct cqspi_softc *sc;
653 uint32_t caps;
654 int error;
655
656 sc = device_get_softc(dev);
657 sc->dev = dev;
658
659 if (bus_alloc_resources(dev, cqspi_spec, sc->res)) {
660 device_printf(dev, "could not allocate resources\n");
661 return (ENXIO);
662 }
663
664 /* Memory interface */
665 sc->bst = rman_get_bustag(sc->res[0]);
666 sc->bsh = rman_get_bushandle(sc->res[0]);
667
668 sc->sram_phys = rman_get_start(sc->res[1]);
669
670 /* Setup interrupt handlers */
671 if (bus_setup_intr(sc->dev, sc->res[2], INTR_TYPE_BIO | INTR_MPSAFE,
672 NULL, cqspi_intr, sc, &sc->ih)) {
673 device_printf(sc->dev, "Unable to setup intr\n");
674 return (ENXIO);
675 }
676
677 CQSPI_LOCK_INIT(sc);
678
679 caps = 0;
680
681 /* Get xDMA controller. */
682 sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
683 if (sc->xdma_tx == NULL) {
684 device_printf(dev, "Can't find DMA controller.\n");
685 return (ENXIO);
686 }
687
688 sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
689 if (sc->xdma_rx == NULL) {
690 device_printf(dev, "Can't find DMA controller.\n");
691 return (ENXIO);
692 }
693
694 /* Alloc xDMA virtual channels. */
695 sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
696 if (sc->xchan_tx == NULL) {
697 device_printf(dev, "Can't alloc virtual DMA channel.\n");
698 return (ENXIO);
699 }
700
701 sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
702 if (sc->xchan_rx == NULL) {
703 device_printf(dev, "Can't alloc virtual DMA channel.\n");
704 return (ENXIO);
705 }
706
707 /* Setup xDMA interrupt handlers. */
708 error = xdma_setup_intr(sc->xchan_tx, 0, cqspi_xdma_tx_intr,
709 sc, &sc->ih_tx);
710 if (error) {
711 device_printf(sc->dev,
712 "Can't setup xDMA interrupt handler.\n");
713 return (ENXIO);
714 }
715
716 error = xdma_setup_intr(sc->xchan_rx, 0, cqspi_xdma_rx_intr,
717 sc, &sc->ih_rx);
718 if (error) {
719 device_printf(sc->dev,
720 "Can't setup xDMA interrupt handler.\n");
721 return (ENXIO);
722 }
723
724 xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, maxphys, 8, 16, 0,
725 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
726 xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, maxphys, 8, 16, 0,
727 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
728
729 cqspi_init(sc);
730
731 sc->config_intrhook.ich_func = cqspi_delayed_attach;
732 sc->config_intrhook.ich_arg = sc;
733 if (config_intrhook_establish(&sc->config_intrhook) != 0) {
734 device_printf(dev, "config_intrhook_establish failed\n");
735 return (ENOMEM);
736 }
737
738 return (0);
739 }
740
741 static int
cqspi_detach(device_t dev)742 cqspi_detach(device_t dev)
743 {
744
745 return (ENXIO);
746 }
747
748 static device_method_t cqspi_methods[] = {
749 /* Device interface */
750 DEVMETHOD(device_probe, cqspi_probe),
751 DEVMETHOD(device_attach, cqspi_attach),
752 DEVMETHOD(device_detach, cqspi_detach),
753
754 /* Quad SPI Flash Interface */
755 DEVMETHOD(qspi_read_reg, cqspi_read_reg),
756 DEVMETHOD(qspi_write_reg, cqspi_write_reg),
757 DEVMETHOD(qspi_read, cqspi_read),
758 DEVMETHOD(qspi_write, cqspi_write),
759 DEVMETHOD(qspi_erase, cqspi_erase),
760
761 { 0, 0 }
762 };
763
764 static devclass_t cqspi_devclass;
765
766 DEFINE_CLASS_1(cqspi, cqspi_driver, cqspi_methods,
767 sizeof(struct cqspi_softc), simplebus_driver);
768
769 DRIVER_MODULE(cqspi, simplebus, cqspi_driver, cqspi_devclass, 0, 0);
770