1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2018 Emmanuel Vadot <[email protected]>
5 * Copyright (c) 2013 Alexander Fedorov
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/resource.h>
42 #include <sys/rman.h>
43 #include <sys/sysctl.h>
44 #include <sys/queue.h>
45 #include <sys/taskqueue.h>
46
47 #include <machine/bus.h>
48
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51
52 #include <dev/mmc/bridge.h>
53 #include <dev/mmc/mmcbrvar.h>
54 #include <dev/mmc/mmc_fdt_helpers.h>
55
56 #include <arm/allwinner/aw_mmc.h>
57 #include <dev/extres/clk/clk.h>
58 #include <dev/extres/hwreset/hwreset.h>
59 #include <dev/extres/regulator/regulator.h>
60
61 #include "opt_mmccam.h"
62
63 #ifdef MMCCAM
64 #include <cam/cam.h>
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #endif
70
71 #define AW_MMC_MEMRES 0
72 #define AW_MMC_IRQRES 1
73 #define AW_MMC_RESSZ 2
74 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc))
75 #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS)
76 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
77
78 #define AW_MMC_RESET_RETRY 1000
79
80 #define CARD_ID_FREQUENCY 400000
81
82 struct aw_mmc_conf {
83 uint32_t dma_xferlen;
84 bool mask_data0;
85 bool can_calibrate;
86 bool new_timing;
87 };
88
89 static const struct aw_mmc_conf a10_mmc_conf = {
90 .dma_xferlen = 0x2000,
91 };
92
93 static const struct aw_mmc_conf a13_mmc_conf = {
94 .dma_xferlen = 0x10000,
95 };
96
97 static const struct aw_mmc_conf a64_mmc_conf = {
98 .dma_xferlen = 0x10000,
99 .mask_data0 = true,
100 .can_calibrate = true,
101 .new_timing = true,
102 };
103
104 static const struct aw_mmc_conf a64_emmc_conf = {
105 .dma_xferlen = 0x2000,
106 .can_calibrate = true,
107 };
108
109 static struct ofw_compat_data compat_data[] = {
110 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
111 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
112 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
113 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
114 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
115 {NULL, 0}
116 };
117
118 struct aw_mmc_softc {
119 device_t aw_dev;
120 clk_t aw_clk_ahb;
121 clk_t aw_clk_mmc;
122 hwreset_t aw_rst_ahb;
123 int aw_bus_busy;
124 int aw_resid;
125 int aw_timeout;
126 struct callout aw_timeoutc;
127 struct mmc_host aw_host;
128 struct mmc_fdt_helper mmc_helper;
129 #ifdef MMCCAM
130 union ccb * ccb;
131 struct cam_devq * devq;
132 struct cam_sim * sim;
133 struct mtx sim_mtx;
134 #else
135 struct mmc_request * aw_req;
136 #endif
137 struct mtx aw_mtx;
138 struct resource * aw_res[AW_MMC_RESSZ];
139 struct aw_mmc_conf * aw_mmc_conf;
140 uint32_t aw_intr;
141 uint32_t aw_intr_wait;
142 void * aw_intrhand;
143 unsigned int aw_clock;
144 device_t child;
145
146 /* Fields required for DMA access. */
147 bus_addr_t aw_dma_desc_phys;
148 bus_dmamap_t aw_dma_map;
149 bus_dma_tag_t aw_dma_tag;
150 void * aw_dma_desc;
151 bus_dmamap_t aw_dma_buf_map;
152 bus_dma_tag_t aw_dma_buf_tag;
153 int aw_dma_map_err;
154 };
155
156 static struct resource_spec aw_mmc_res_spec[] = {
157 { SYS_RES_MEMORY, 0, RF_ACTIVE },
158 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
159 { -1, 0, 0 }
160 };
161
162 static int aw_mmc_probe(device_t);
163 static int aw_mmc_attach(device_t);
164 static int aw_mmc_detach(device_t);
165 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
166 static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc);
167 static int aw_mmc_reset(struct aw_mmc_softc *);
168 static int aw_mmc_init(struct aw_mmc_softc *);
169 static void aw_mmc_intr(void *);
170 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
171 static void aw_mmc_helper_cd_handler(device_t, bool);
172
173 static void aw_mmc_print_error(uint32_t);
174 static int aw_mmc_update_ios(device_t, device_t);
175 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
176 static int aw_mmc_get_ro(device_t, device_t);
177 static int aw_mmc_acquire_host(device_t, device_t);
178 static int aw_mmc_release_host(device_t, device_t);
179 #ifdef MMCCAM
180 static void aw_mmc_cam_action(struct cam_sim *, union ccb *);
181 static void aw_mmc_cam_poll(struct cam_sim *);
182 static int aw_mmc_cam_settran_settings(struct aw_mmc_softc *, union ccb *);
183 static int aw_mmc_cam_request(struct aw_mmc_softc *, union ccb *);
184 static void aw_mmc_cam_handle_mmcio(struct cam_sim *, union ccb *);
185 #endif
186
187 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
188 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
189 #define AW_MMC_READ_4(_sc, _reg) \
190 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
191 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
192 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
193
194 SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
195 "aw_mmc driver");
196
197 static int aw_mmc_debug = 0;
198 SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0,
199 "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands");
200 #define AW_MMC_DEBUG_CARD 0x1
201 #define AW_MMC_DEBUG_IOS 0x2
202 #define AW_MMC_DEBUG_INT 0x4
203 #define AW_MMC_DEBUG_CMD 0x8
204
205 #ifdef MMCCAM
206 static void
aw_mmc_cam_handle_mmcio(struct cam_sim * sim,union ccb * ccb)207 aw_mmc_cam_handle_mmcio(struct cam_sim *sim, union ccb *ccb)
208 {
209 struct aw_mmc_softc *sc;
210
211 sc = cam_sim_softc(sim);
212
213 aw_mmc_cam_request(sc, ccb);
214 }
215
216 static void
aw_mmc_cam_action(struct cam_sim * sim,union ccb * ccb)217 aw_mmc_cam_action(struct cam_sim *sim, union ccb *ccb)
218 {
219 struct aw_mmc_softc *sc;
220
221 sc = cam_sim_softc(sim);
222 if (sc == NULL) {
223 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
224 xpt_done(ccb);
225 return;
226 }
227
228 mtx_assert(&sc->sim_mtx, MA_OWNED);
229
230 switch (ccb->ccb_h.func_code) {
231 case XPT_PATH_INQ:
232 mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim,
233 (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) /
234 MMC_SECTOR_SIZE);
235 break;
236
237 case XPT_GET_TRAN_SETTINGS:
238 {
239 struct ccb_trans_settings *cts = &ccb->cts;
240
241 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
242 device_printf(sc->aw_dev, "Got XPT_GET_TRAN_SETTINGS\n");
243
244 cts->protocol = PROTO_MMCSD;
245 cts->protocol_version = 1;
246 cts->transport = XPORT_MMCSD;
247 cts->transport_version = 1;
248 cts->xport_specific.valid = 0;
249 cts->proto_specific.mmc.host_ocr = sc->aw_host.host_ocr;
250 cts->proto_specific.mmc.host_f_min = sc->aw_host.f_min;
251 cts->proto_specific.mmc.host_f_max = sc->aw_host.f_max;
252 cts->proto_specific.mmc.host_caps = sc->aw_host.caps;
253 cts->proto_specific.mmc.host_max_data = (sc->aw_mmc_conf->dma_xferlen *
254 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
255 memcpy(&cts->proto_specific.mmc.ios, &sc->aw_host.ios, sizeof(struct mmc_ios));
256 ccb->ccb_h.status = CAM_REQ_CMP;
257 break;
258 }
259 case XPT_SET_TRAN_SETTINGS:
260 {
261 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
262 device_printf(sc->aw_dev, "Got XPT_SET_TRAN_SETTINGS\n");
263 aw_mmc_cam_settran_settings(sc, ccb);
264 ccb->ccb_h.status = CAM_REQ_CMP;
265 break;
266 }
267 case XPT_RESET_BUS:
268 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
269 device_printf(sc->aw_dev, "Got XPT_RESET_BUS, ACK it...\n");
270 ccb->ccb_h.status = CAM_REQ_CMP;
271 break;
272 case XPT_MMC_IO:
273 /*
274 * Here is the HW-dependent part of
275 * sending the command to the underlying h/w
276 * At some point in the future an interrupt comes.
277 * Then the request will be marked as completed.
278 */
279 ccb->ccb_h.status = CAM_REQ_INPROG;
280
281 aw_mmc_cam_handle_mmcio(sim, ccb);
282 return;
283 /* NOTREACHED */
284 break;
285 default:
286 ccb->ccb_h.status = CAM_REQ_INVALID;
287 break;
288 }
289 xpt_done(ccb);
290 return;
291 }
292
293 static void
aw_mmc_cam_poll(struct cam_sim * sim)294 aw_mmc_cam_poll(struct cam_sim *sim)
295 {
296 return;
297 }
298
299 static int
aw_mmc_cam_settran_settings(struct aw_mmc_softc * sc,union ccb * ccb)300 aw_mmc_cam_settran_settings(struct aw_mmc_softc *sc, union ccb *ccb)
301 {
302 struct mmc_ios *ios;
303 struct mmc_ios *new_ios;
304 struct ccb_trans_settings_mmc *cts;
305
306 ios = &sc->aw_host.ios;
307
308 cts = &ccb->cts.proto_specific.mmc;
309 new_ios = &cts->ios;
310
311 /* Update only requested fields */
312 if (cts->ios_valid & MMC_CLK) {
313 ios->clock = new_ios->clock;
314 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
315 device_printf(sc->aw_dev, "Clock => %d\n", ios->clock);
316 }
317 if (cts->ios_valid & MMC_VDD) {
318 ios->vdd = new_ios->vdd;
319 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
320 device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd);
321 }
322 if (cts->ios_valid & MMC_CS) {
323 ios->chip_select = new_ios->chip_select;
324 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
325 device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select);
326 }
327 if (cts->ios_valid & MMC_BW) {
328 ios->bus_width = new_ios->bus_width;
329 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
330 device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width);
331 }
332 if (cts->ios_valid & MMC_PM) {
333 ios->power_mode = new_ios->power_mode;
334 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
335 device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode);
336 }
337 if (cts->ios_valid & MMC_BT) {
338 ios->timing = new_ios->timing;
339 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
340 device_printf(sc->aw_dev, "Timing => %d\n", ios->timing);
341 }
342 if (cts->ios_valid & MMC_BM) {
343 ios->bus_mode = new_ios->bus_mode;
344 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
345 device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode);
346 }
347
348 return (aw_mmc_update_ios(sc->aw_dev, NULL));
349 }
350
351 static int
aw_mmc_cam_request(struct aw_mmc_softc * sc,union ccb * ccb)352 aw_mmc_cam_request(struct aw_mmc_softc *sc, union ccb *ccb)
353 {
354 struct ccb_mmcio *mmcio;
355
356 mmcio = &ccb->mmcio;
357
358 AW_MMC_LOCK(sc);
359
360 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
361 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
362 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
363 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
364 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
365 }
366 if (mmcio->cmd.data != NULL) {
367 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
368 panic("data->len = %d, data->flags = %d -- something is b0rked",
369 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
370 }
371 if (sc->ccb != NULL) {
372 device_printf(sc->aw_dev, "Controller still has an active command\n");
373 return (EBUSY);
374 }
375 sc->ccb = ccb;
376 /* aw_mmc_request locks again */
377 AW_MMC_UNLOCK(sc);
378 aw_mmc_request(sc->aw_dev, NULL, NULL);
379
380 return (0);
381 }
382 #endif /* MMCCAM */
383
384 static void
aw_mmc_helper_cd_handler(device_t dev,bool present)385 aw_mmc_helper_cd_handler(device_t dev, bool present)
386 {
387 struct aw_mmc_softc *sc;
388
389 sc = device_get_softc(dev);
390 #ifdef MMCCAM
391 mmccam_start_discovery(sc->sim);
392 #else
393 AW_MMC_LOCK(sc);
394 if (present) {
395 if (sc->child == NULL) {
396 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
397 device_printf(sc->aw_dev, "Card inserted\n");
398
399 sc->child = device_add_child(sc->aw_dev, "mmc", -1);
400 AW_MMC_UNLOCK(sc);
401 if (sc->child) {
402 device_set_ivars(sc->child, sc);
403 (void)device_probe_and_attach(sc->child);
404 }
405 } else
406 AW_MMC_UNLOCK(sc);
407 } else {
408 /* Card isn't present, detach if necessary */
409 if (sc->child != NULL) {
410 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
411 device_printf(sc->aw_dev, "Card removed\n");
412
413 AW_MMC_UNLOCK(sc);
414 device_delete_child(sc->aw_dev, sc->child);
415 sc->child = NULL;
416 } else
417 AW_MMC_UNLOCK(sc);
418 }
419 #endif /* MMCCAM */
420 }
421
422 static int
aw_mmc_probe(device_t dev)423 aw_mmc_probe(device_t dev)
424 {
425
426 if (!ofw_bus_status_okay(dev))
427 return (ENXIO);
428 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
429 return (ENXIO);
430
431 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
432
433 return (BUS_PROBE_DEFAULT);
434 }
435
436 static int
aw_mmc_attach(device_t dev)437 aw_mmc_attach(device_t dev)
438 {
439 struct aw_mmc_softc *sc;
440 struct sysctl_ctx_list *ctx;
441 struct sysctl_oid_list *tree;
442 int error;
443
444 sc = device_get_softc(dev);
445 sc->aw_dev = dev;
446
447 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
448
449 #ifndef MMCCAM
450 sc->aw_req = NULL;
451 #endif
452 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
453 device_printf(dev, "cannot allocate device resources\n");
454 return (ENXIO);
455 }
456 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
457 INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
458 &sc->aw_intrhand)) {
459 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
460 device_printf(dev, "cannot setup interrupt handler\n");
461 return (ENXIO);
462 }
463 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
464 MTX_DEF);
465 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
466
467 /* De-assert reset */
468 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
469 error = hwreset_deassert(sc->aw_rst_ahb);
470 if (error != 0) {
471 device_printf(dev, "cannot de-assert reset\n");
472 goto fail;
473 }
474 }
475
476 /* Activate the module clock. */
477 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
478 if (error != 0) {
479 device_printf(dev, "cannot get ahb clock\n");
480 goto fail;
481 }
482 error = clk_enable(sc->aw_clk_ahb);
483 if (error != 0) {
484 device_printf(dev, "cannot enable ahb clock\n");
485 goto fail;
486 }
487 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
488 if (error != 0) {
489 device_printf(dev, "cannot get mmc clock\n");
490 goto fail;
491 }
492 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
493 CLK_SET_ROUND_DOWN);
494 if (error != 0) {
495 device_printf(dev, "cannot init mmc clock\n");
496 goto fail;
497 }
498 error = clk_enable(sc->aw_clk_mmc);
499 if (error != 0) {
500 device_printf(dev, "cannot enable mmc clock\n");
501 goto fail;
502 }
503
504 sc->aw_timeout = 10;
505 ctx = device_get_sysctl_ctx(dev);
506 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
507 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
508 &sc->aw_timeout, 0, "Request timeout in seconds");
509
510 /* Soft Reset controller. */
511 if (aw_mmc_reset(sc) != 0) {
512 device_printf(dev, "cannot reset the controller\n");
513 goto fail;
514 }
515
516 if (aw_mmc_setup_dma(sc) != 0) {
517 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
518 goto fail;
519 }
520
521 /* Set some defaults for freq and supported mode */
522 sc->aw_host.f_min = 400000;
523 sc->aw_host.f_max = 52000000;
524 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
525 sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
526 mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host);
527 mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler);
528
529 #ifdef MMCCAM
530 sc->ccb = NULL;
531 if ((sc->devq = cam_simq_alloc(1)) == NULL) {
532 goto fail;
533 }
534
535 mtx_init(&sc->sim_mtx, "awmmcsim", NULL, MTX_DEF);
536 sc->sim = cam_sim_alloc_dev(aw_mmc_cam_action, aw_mmc_cam_poll,
537 "aw_mmc_sim", sc, dev,
538 &sc->sim_mtx, 1, 1, sc->devq);
539
540 if (sc->sim == NULL) {
541 cam_simq_free(sc->devq);
542 device_printf(dev, "cannot allocate CAM SIM\n");
543 goto fail;
544 }
545
546 mtx_lock(&sc->sim_mtx);
547 if (xpt_bus_register(sc->sim, sc->aw_dev, 0) != 0) {
548 device_printf(dev, "cannot register SCSI pass-through bus\n");
549 cam_sim_free(sc->sim, FALSE);
550 cam_simq_free(sc->devq);
551 mtx_unlock(&sc->sim_mtx);
552 goto fail;
553 }
554
555 mtx_unlock(&sc->sim_mtx);
556 #endif /* MMCCAM */
557
558 return (0);
559
560 fail:
561 callout_drain(&sc->aw_timeoutc);
562 mtx_destroy(&sc->aw_mtx);
563 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
564 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
565
566 #ifdef MMCCAM
567 if (sc->sim != NULL) {
568 mtx_lock(&sc->sim_mtx);
569 xpt_bus_deregister(cam_sim_path(sc->sim));
570 cam_sim_free(sc->sim, FALSE);
571 mtx_unlock(&sc->sim_mtx);
572 }
573
574 if (sc->devq != NULL)
575 cam_simq_free(sc->devq);
576 #endif
577 return (ENXIO);
578 }
579
580 static int
aw_mmc_detach(device_t dev)581 aw_mmc_detach(device_t dev)
582 {
583 struct aw_mmc_softc *sc;
584 device_t d;
585
586 sc = device_get_softc(dev);
587
588 clk_disable(sc->aw_clk_mmc);
589 clk_disable(sc->aw_clk_ahb);
590 hwreset_assert(sc->aw_rst_ahb);
591
592 mmc_fdt_gpio_teardown(&sc->mmc_helper);
593
594 callout_drain(&sc->aw_timeoutc);
595
596 AW_MMC_LOCK(sc);
597 d = sc->child;
598 sc->child = NULL;
599 AW_MMC_UNLOCK(sc);
600 if (d != NULL)
601 device_delete_child(sc->aw_dev, d);
602
603 aw_mmc_teardown_dma(sc);
604
605 mtx_destroy(&sc->aw_mtx);
606
607 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
608 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
609
610 #ifdef MMCCAM
611 if (sc->sim != NULL) {
612 mtx_lock(&sc->sim_mtx);
613 xpt_bus_deregister(cam_sim_path(sc->sim));
614 cam_sim_free(sc->sim, FALSE);
615 mtx_unlock(&sc->sim_mtx);
616 }
617
618 if (sc->devq != NULL)
619 cam_simq_free(sc->devq);
620 #endif
621
622 return (0);
623 }
624
625 static void
aw_dma_desc_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)626 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
627 {
628 struct aw_mmc_softc *sc;
629
630 sc = (struct aw_mmc_softc *)arg;
631 if (err) {
632 sc->aw_dma_map_err = err;
633 return;
634 }
635 sc->aw_dma_desc_phys = segs[0].ds_addr;
636 }
637
638 static int
aw_mmc_setup_dma(struct aw_mmc_softc * sc)639 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
640 {
641 int error;
642
643 /* Allocate the DMA descriptor memory. */
644 error = bus_dma_tag_create(
645 bus_get_dma_tag(sc->aw_dev), /* parent */
646 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
647 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
648 BUS_SPACE_MAXADDR, /* highaddr */
649 NULL, NULL, /* filter, filterarg*/
650 AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */
651 AW_MMC_DMA_DESC_SIZE, /* maxsegsize */
652 0, /* flags */
653 NULL, NULL, /* lock, lockarg*/
654 &sc->aw_dma_tag);
655 if (error)
656 return (error);
657
658 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
659 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
660 &sc->aw_dma_map);
661 if (error)
662 return (error);
663
664 error = bus_dmamap_load(sc->aw_dma_tag,
665 sc->aw_dma_map,
666 sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE,
667 aw_dma_desc_cb, sc, 0);
668 if (error)
669 return (error);
670 if (sc->aw_dma_map_err)
671 return (sc->aw_dma_map_err);
672
673 /* Create the DMA map for data transfers. */
674 error = bus_dma_tag_create(
675 bus_get_dma_tag(sc->aw_dev), /* parent */
676 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
677 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
678 BUS_SPACE_MAXADDR, /* highaddr */
679 NULL, NULL, /* filter, filterarg*/
680 sc->aw_mmc_conf->dma_xferlen *
681 AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */
682 sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */
683 BUS_DMA_ALLOCNOW, /* flags */
684 NULL, NULL, /* lock, lockarg*/
685 &sc->aw_dma_buf_tag);
686 if (error)
687 return (error);
688 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
689 &sc->aw_dma_buf_map);
690 if (error)
691 return (error);
692
693 return (0);
694 }
695
696 static void
aw_mmc_teardown_dma(struct aw_mmc_softc * sc)697 aw_mmc_teardown_dma(struct aw_mmc_softc *sc)
698 {
699
700 bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map);
701 bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map);
702 if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0)
703 device_printf(sc->aw_dev, "Cannot destroy the dma tag\n");
704
705 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
706 bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
707 if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0)
708 device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n");
709 }
710
711 static void
aw_dma_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)712 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
713 {
714 int i;
715 struct aw_mmc_dma_desc *dma_desc;
716 struct aw_mmc_softc *sc;
717
718 sc = (struct aw_mmc_softc *)arg;
719 sc->aw_dma_map_err = err;
720
721 if (err)
722 return;
723
724 dma_desc = sc->aw_dma_desc;
725 for (i = 0; i < nsegs; i++) {
726 if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen)
727 dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */
728 else
729 dma_desc[i].buf_size = segs[i].ds_len;
730 dma_desc[i].buf_addr = segs[i].ds_addr;
731 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
732 AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC;
733
734 dma_desc[i].next = sc->aw_dma_desc_phys +
735 ((i + 1) * sizeof(struct aw_mmc_dma_desc));
736 }
737
738 dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD;
739 dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD |
740 AW_MMC_DMA_CONFIG_ER;
741 dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC;
742 dma_desc[nsegs - 1].next = 0;
743 }
744
745 static int
aw_mmc_prepare_dma(struct aw_mmc_softc * sc)746 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
747 {
748 bus_dmasync_op_t sync_op;
749 int error;
750 struct mmc_command *cmd;
751 uint32_t val;
752
753 #ifdef MMCCAM
754 cmd = &sc->ccb->mmcio.cmd;
755 #else
756 cmd = sc->aw_req->cmd;
757 #endif
758 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
759 return (EFBIG);
760 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
761 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
762 if (error)
763 return (error);
764 if (sc->aw_dma_map_err)
765 return (sc->aw_dma_map_err);
766
767 if (cmd->data->flags & MMC_DATA_WRITE)
768 sync_op = BUS_DMASYNC_PREWRITE;
769 else
770 sync_op = BUS_DMASYNC_PREREAD;
771 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
772 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
773
774 /* Enable DMA */
775 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
776 val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
777 val |= AW_MMC_GCTL_DMA_ENB;
778 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
779
780 /* Reset DMA */
781 val |= AW_MMC_GCTL_DMA_RST;
782 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
783
784 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
785 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
786 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
787
788 /* Enable RX or TX DMA interrupt */
789 val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
790 if (cmd->data->flags & MMC_DATA_WRITE)
791 val |= AW_MMC_IDST_TX_INT;
792 else
793 val |= AW_MMC_IDST_RX_INT;
794 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
795
796 /* Set DMA descritptor list address */
797 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys);
798
799 /* FIFO trigger level */
800 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
801
802 return (0);
803 }
804
805 static int
aw_mmc_reset(struct aw_mmc_softc * sc)806 aw_mmc_reset(struct aw_mmc_softc *sc)
807 {
808 uint32_t reg;
809 int timeout;
810
811 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
812 reg |= AW_MMC_GCTL_RESET;
813 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
814 timeout = AW_MMC_RESET_RETRY;
815 while (--timeout > 0) {
816 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
817 break;
818 DELAY(100);
819 }
820 if (timeout == 0)
821 return (ETIMEDOUT);
822
823 return (0);
824 }
825
826 static int
aw_mmc_init(struct aw_mmc_softc * sc)827 aw_mmc_init(struct aw_mmc_softc *sc)
828 {
829 uint32_t reg;
830 int ret;
831
832 ret = aw_mmc_reset(sc);
833 if (ret != 0)
834 return (ret);
835
836 /* Set the timeout. */
837 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
838 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
839 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
840
841 /* Unmask interrupts. */
842 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
843
844 /* Clear pending interrupts. */
845 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
846
847 /* Debug register, undocumented */
848 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
849
850 /* Function select register */
851 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
852
853 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
854
855 /* Enable interrupts and disable AHB access. */
856 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
857 reg |= AW_MMC_GCTL_INT_ENB;
858 reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
859 reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
860 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
861
862 return (0);
863 }
864
865 static void
aw_mmc_req_done(struct aw_mmc_softc * sc)866 aw_mmc_req_done(struct aw_mmc_softc *sc)
867 {
868 struct mmc_command *cmd;
869 #ifdef MMCCAM
870 union ccb *ccb;
871 #else
872 struct mmc_request *req;
873 #endif
874 uint32_t val, mask;
875 int retry;
876
877 #ifdef MMCCAM
878 ccb = sc->ccb;
879 cmd = &ccb->mmcio.cmd;
880 #else
881 cmd = sc->aw_req->cmd;
882 #endif
883 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
884 device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error);
885 }
886 if (cmd->error != MMC_ERR_NONE) {
887 /* Reset the FIFO and DMA engines. */
888 mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
889 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
890 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
891
892 retry = AW_MMC_RESET_RETRY;
893 while (--retry > 0) {
894 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) &
895 AW_MMC_GCTL_RESET) == 0)
896 break;
897 DELAY(100);
898 }
899 if (retry == 0)
900 device_printf(sc->aw_dev,
901 "timeout resetting DMA/FIFO\n");
902 aw_mmc_update_clock(sc, 1);
903 }
904
905 callout_stop(&sc->aw_timeoutc);
906 sc->aw_intr = 0;
907 sc->aw_resid = 0;
908 sc->aw_dma_map_err = 0;
909 sc->aw_intr_wait = 0;
910 #ifdef MMCCAM
911 sc->ccb = NULL;
912 ccb->ccb_h.status =
913 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
914 xpt_done(ccb);
915 #else
916 req = sc->aw_req;
917 sc->aw_req = NULL;
918 req->done(req);
919 #endif
920 }
921
922 static void
aw_mmc_req_ok(struct aw_mmc_softc * sc)923 aw_mmc_req_ok(struct aw_mmc_softc *sc)
924 {
925 int timeout;
926 struct mmc_command *cmd;
927 uint32_t status;
928
929 timeout = 1000;
930 while (--timeout > 0) {
931 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
932 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
933 break;
934 DELAY(1000);
935 }
936 #ifdef MMCCAM
937 cmd = &sc->ccb->mmcio.cmd;
938 #else
939 cmd = sc->aw_req->cmd;
940 #endif
941 if (timeout == 0) {
942 cmd->error = MMC_ERR_FAILED;
943 aw_mmc_req_done(sc);
944 return;
945 }
946 if (cmd->flags & MMC_RSP_PRESENT) {
947 if (cmd->flags & MMC_RSP_136) {
948 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
949 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
950 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
951 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
952 } else
953 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
954 }
955 /* All data has been transferred ? */
956 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
957 cmd->error = MMC_ERR_FAILED;
958 aw_mmc_req_done(sc);
959 }
960
961 static inline void
set_mmc_error(struct aw_mmc_softc * sc,int error_code)962 set_mmc_error(struct aw_mmc_softc *sc, int error_code)
963 {
964 #ifdef MMCCAM
965 sc->ccb->mmcio.cmd.error = error_code;
966 #else
967 sc->aw_req->cmd->error = error_code;
968 #endif
969 }
970
971 static void
aw_mmc_timeout(void * arg)972 aw_mmc_timeout(void *arg)
973 {
974 struct aw_mmc_softc *sc;
975
976 sc = (struct aw_mmc_softc *)arg;
977 #ifdef MMCCAM
978 if (sc->ccb != NULL) {
979 #else
980 if (sc->aw_req != NULL) {
981 #endif
982 device_printf(sc->aw_dev, "controller timeout\n");
983 set_mmc_error(sc, MMC_ERR_TIMEOUT);
984 aw_mmc_req_done(sc);
985 } else
986 device_printf(sc->aw_dev,
987 "Spurious timeout - no active request\n");
988 }
989
990 static void
991 aw_mmc_print_error(uint32_t err)
992 {
993 if(err & AW_MMC_INT_RESP_ERR)
994 printf("AW_MMC_INT_RESP_ERR ");
995 if (err & AW_MMC_INT_RESP_CRC_ERR)
996 printf("AW_MMC_INT_RESP_CRC_ERR ");
997 if (err & AW_MMC_INT_DATA_CRC_ERR)
998 printf("AW_MMC_INT_DATA_CRC_ERR ");
999 if (err & AW_MMC_INT_RESP_TIMEOUT)
1000 printf("AW_MMC_INT_RESP_TIMEOUT ");
1001 if (err & AW_MMC_INT_FIFO_RUN_ERR)
1002 printf("AW_MMC_INT_FIFO_RUN_ERR ");
1003 if (err & AW_MMC_INT_CMD_BUSY)
1004 printf("AW_MMC_INT_CMD_BUSY ");
1005 if (err & AW_MMC_INT_DATA_START_ERR)
1006 printf("AW_MMC_INT_DATA_START_ERR ");
1007 if (err & AW_MMC_INT_DATA_END_BIT_ERR)
1008 printf("AW_MMC_INT_DATA_END_BIT_ERR");
1009 printf("\n");
1010 }
1011
1012 static void
1013 aw_mmc_intr(void *arg)
1014 {
1015 bus_dmasync_op_t sync_op;
1016 struct aw_mmc_softc *sc;
1017 struct mmc_data *data;
1018 uint32_t idst, imask, rint;
1019
1020 sc = (struct aw_mmc_softc *)arg;
1021 AW_MMC_LOCK(sc);
1022 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
1023 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
1024 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
1025 if (idst == 0 && imask == 0 && rint == 0) {
1026 AW_MMC_UNLOCK(sc);
1027 return;
1028 }
1029 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
1030 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
1031 idst, imask, rint);
1032 }
1033 #ifdef MMCCAM
1034 if (sc->ccb == NULL) {
1035 #else
1036 if (sc->aw_req == NULL) {
1037 #endif
1038 device_printf(sc->aw_dev,
1039 "Spurious interrupt - no active request, rint: 0x%08X\n",
1040 rint);
1041 aw_mmc_print_error(rint);
1042 goto end;
1043 }
1044 if (rint & AW_MMC_INT_ERR_BIT) {
1045 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
1046 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
1047 aw_mmc_print_error(rint);
1048 }
1049 if (rint & AW_MMC_INT_RESP_TIMEOUT)
1050 set_mmc_error(sc, MMC_ERR_TIMEOUT);
1051 else
1052 set_mmc_error(sc, MMC_ERR_FAILED);
1053 aw_mmc_req_done(sc);
1054 goto end;
1055 }
1056 if (idst & AW_MMC_IDST_ERROR) {
1057 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT))
1058 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
1059 set_mmc_error(sc, MMC_ERR_FAILED);
1060 aw_mmc_req_done(sc);
1061 goto end;
1062 }
1063
1064 sc->aw_intr |= rint;
1065 #ifdef MMCCAM
1066 data = sc->ccb->mmcio.cmd.data;
1067 #else
1068 data = sc->aw_req->cmd->data;
1069 #endif
1070 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
1071 if (data->flags & MMC_DATA_WRITE)
1072 sync_op = BUS_DMASYNC_POSTWRITE;
1073 else
1074 sync_op = BUS_DMASYNC_POSTREAD;
1075 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
1076 sync_op);
1077 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
1078 BUS_DMASYNC_POSTWRITE);
1079 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
1080 sc->aw_resid = data->len >> 2;
1081 }
1082 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
1083 aw_mmc_req_ok(sc);
1084
1085 end:
1086 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
1087 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
1088 AW_MMC_UNLOCK(sc);
1089 }
1090
1091 static int
1092 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
1093 {
1094 int blksz;
1095 struct aw_mmc_softc *sc;
1096 struct mmc_command *cmd;
1097 uint32_t cmdreg, imask;
1098 int err;
1099
1100 sc = device_get_softc(bus);
1101
1102 AW_MMC_LOCK(sc);
1103 #ifdef MMCCAM
1104 KASSERT(req == NULL, ("req should be NULL in MMCCAM case!"));
1105 /*
1106 * For MMCCAM, sc->ccb has been NULL-checked and populated
1107 * by aw_mmc_cam_request() already.
1108 */
1109 cmd = &sc->ccb->mmcio.cmd;
1110 #else
1111 if (sc->aw_req) {
1112 AW_MMC_UNLOCK(sc);
1113 return (EBUSY);
1114 }
1115 sc->aw_req = req;
1116 cmd = req->cmd;
1117
1118 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
1119 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1120 cmd->opcode, cmd->arg, cmd->flags,
1121 cmd->data != NULL ? (unsigned int)cmd->data->len : 0,
1122 cmd->data != NULL ? cmd->data->flags: 0);
1123 }
1124 #endif
1125 cmdreg = AW_MMC_CMDR_LOAD;
1126 imask = AW_MMC_INT_ERR_BIT;
1127 sc->aw_intr_wait = 0;
1128 sc->aw_intr = 0;
1129 sc->aw_resid = 0;
1130 cmd->error = MMC_ERR_NONE;
1131
1132 if (cmd->opcode == MMC_GO_IDLE_STATE)
1133 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
1134
1135 if (cmd->flags & MMC_RSP_PRESENT)
1136 cmdreg |= AW_MMC_CMDR_RESP_RCV;
1137 if (cmd->flags & MMC_RSP_136)
1138 cmdreg |= AW_MMC_CMDR_LONG_RESP;
1139 if (cmd->flags & MMC_RSP_CRC)
1140 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
1141
1142 if (cmd->data) {
1143 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
1144
1145 if (cmd->data->flags & MMC_DATA_MULTI) {
1146 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
1147 imask |= AW_MMC_INT_AUTO_STOP_DONE;
1148 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
1149 } else {
1150 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
1151 imask |= AW_MMC_INT_DATA_OVER;
1152 }
1153 if (cmd->data->flags & MMC_DATA_WRITE)
1154 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
1155 #ifdef MMCCAM
1156 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1157 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size);
1158 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1159 } else
1160 #endif
1161 {
1162 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
1163 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
1164 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1165 }
1166 } else {
1167 imask |= AW_MMC_INT_CMD_DONE;
1168 }
1169
1170 /* Enable the interrupts we are interested in */
1171 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
1172 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1173
1174 /* Enable auto stop if needed */
1175 AW_MMC_WRITE_4(sc, AW_MMC_A12A,
1176 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
1177
1178 /* Write the command argument */
1179 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
1180
1181 /*
1182 * If we don't have data start the request
1183 * if we do prepare the dma request and start the request
1184 */
1185 if (cmd->data == NULL) {
1186 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1187 } else {
1188 err = aw_mmc_prepare_dma(sc);
1189 if (err != 0)
1190 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
1191
1192 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1193 }
1194
1195 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
1196 aw_mmc_timeout, sc);
1197 AW_MMC_UNLOCK(sc);
1198
1199 return (0);
1200 }
1201
1202 static int
1203 aw_mmc_read_ivar(device_t bus, device_t child, int which,
1204 uintptr_t *result)
1205 {
1206 struct aw_mmc_softc *sc;
1207
1208 sc = device_get_softc(bus);
1209 switch (which) {
1210 default:
1211 return (EINVAL);
1212 case MMCBR_IVAR_BUS_MODE:
1213 *(int *)result = sc->aw_host.ios.bus_mode;
1214 break;
1215 case MMCBR_IVAR_BUS_WIDTH:
1216 *(int *)result = sc->aw_host.ios.bus_width;
1217 break;
1218 case MMCBR_IVAR_CHIP_SELECT:
1219 *(int *)result = sc->aw_host.ios.chip_select;
1220 break;
1221 case MMCBR_IVAR_CLOCK:
1222 *(int *)result = sc->aw_host.ios.clock;
1223 break;
1224 case MMCBR_IVAR_F_MIN:
1225 *(int *)result = sc->aw_host.f_min;
1226 break;
1227 case MMCBR_IVAR_F_MAX:
1228 *(int *)result = sc->aw_host.f_max;
1229 break;
1230 case MMCBR_IVAR_HOST_OCR:
1231 *(int *)result = sc->aw_host.host_ocr;
1232 break;
1233 case MMCBR_IVAR_MODE:
1234 *(int *)result = sc->aw_host.mode;
1235 break;
1236 case MMCBR_IVAR_OCR:
1237 *(int *)result = sc->aw_host.ocr;
1238 break;
1239 case MMCBR_IVAR_POWER_MODE:
1240 *(int *)result = sc->aw_host.ios.power_mode;
1241 break;
1242 case MMCBR_IVAR_VDD:
1243 *(int *)result = sc->aw_host.ios.vdd;
1244 break;
1245 case MMCBR_IVAR_VCCQ:
1246 *(int *)result = sc->aw_host.ios.vccq;
1247 break;
1248 case MMCBR_IVAR_CAPS:
1249 *(int *)result = sc->aw_host.caps;
1250 break;
1251 case MMCBR_IVAR_TIMING:
1252 *(int *)result = sc->aw_host.ios.timing;
1253 break;
1254 case MMCBR_IVAR_MAX_DATA:
1255 *(int *)result = (sc->aw_mmc_conf->dma_xferlen *
1256 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
1257 break;
1258 case MMCBR_IVAR_RETUNE_REQ:
1259 *(int *)result = retune_req_none;
1260 break;
1261 }
1262
1263 return (0);
1264 }
1265
1266 static int
1267 aw_mmc_write_ivar(device_t bus, device_t child, int which,
1268 uintptr_t value)
1269 {
1270 struct aw_mmc_softc *sc;
1271
1272 sc = device_get_softc(bus);
1273 switch (which) {
1274 default:
1275 return (EINVAL);
1276 case MMCBR_IVAR_BUS_MODE:
1277 sc->aw_host.ios.bus_mode = value;
1278 break;
1279 case MMCBR_IVAR_BUS_WIDTH:
1280 sc->aw_host.ios.bus_width = value;
1281 break;
1282 case MMCBR_IVAR_CHIP_SELECT:
1283 sc->aw_host.ios.chip_select = value;
1284 break;
1285 case MMCBR_IVAR_CLOCK:
1286 sc->aw_host.ios.clock = value;
1287 break;
1288 case MMCBR_IVAR_MODE:
1289 sc->aw_host.mode = value;
1290 break;
1291 case MMCBR_IVAR_OCR:
1292 sc->aw_host.ocr = value;
1293 break;
1294 case MMCBR_IVAR_POWER_MODE:
1295 sc->aw_host.ios.power_mode = value;
1296 break;
1297 case MMCBR_IVAR_VDD:
1298 sc->aw_host.ios.vdd = value;
1299 break;
1300 case MMCBR_IVAR_VCCQ:
1301 sc->aw_host.ios.vccq = value;
1302 break;
1303 case MMCBR_IVAR_TIMING:
1304 sc->aw_host.ios.timing = value;
1305 break;
1306 /* These are read-only */
1307 case MMCBR_IVAR_CAPS:
1308 case MMCBR_IVAR_HOST_OCR:
1309 case MMCBR_IVAR_F_MIN:
1310 case MMCBR_IVAR_F_MAX:
1311 case MMCBR_IVAR_MAX_DATA:
1312 return (EINVAL);
1313 }
1314
1315 return (0);
1316 }
1317
1318 static int
1319 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
1320 {
1321 uint32_t reg;
1322 int retry;
1323
1324 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1325 reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER |
1326 AW_MMC_CKCR_MASK_DATA0);
1327
1328 if (clkon)
1329 reg |= AW_MMC_CKCR_ENB;
1330 if (sc->aw_mmc_conf->mask_data0)
1331 reg |= AW_MMC_CKCR_MASK_DATA0;
1332
1333 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1334
1335 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
1336 AW_MMC_CMDR_WAIT_PRE_OVER;
1337 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
1338 retry = 0xfffff;
1339
1340 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
1341 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
1342 DELAY(10);
1343 }
1344 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1345
1346 if (reg & AW_MMC_CMDR_LOAD) {
1347 device_printf(sc->aw_dev, "timeout updating clock\n");
1348 return (ETIMEDOUT);
1349 }
1350
1351 if (sc->aw_mmc_conf->mask_data0) {
1352 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1353 reg &= ~AW_MMC_CKCR_MASK_DATA0;
1354 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1355 }
1356
1357 return (0);
1358 }
1359
1360 static int
1361 aw_mmc_switch_vccq(device_t bus, device_t child)
1362 {
1363 struct aw_mmc_softc *sc;
1364 int uvolt, err;
1365
1366 sc = device_get_softc(bus);
1367
1368 if (sc->mmc_helper.vqmmc_supply == NULL)
1369 return EOPNOTSUPP;
1370
1371 switch (sc->aw_host.ios.vccq) {
1372 case vccq_180:
1373 uvolt = 1800000;
1374 break;
1375 case vccq_330:
1376 uvolt = 3300000;
1377 break;
1378 default:
1379 return EINVAL;
1380 }
1381
1382 err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt);
1383 if (err != 0) {
1384 device_printf(sc->aw_dev,
1385 "Cannot set vqmmc to %d<->%d\n",
1386 uvolt,
1387 uvolt);
1388 return (err);
1389 }
1390
1391 return (0);
1392 }
1393
1394 static int
1395 aw_mmc_update_ios(device_t bus, device_t child)
1396 {
1397 int error;
1398 struct aw_mmc_softc *sc;
1399 struct mmc_ios *ios;
1400 unsigned int clock;
1401 uint32_t reg, div = 1;
1402
1403 sc = device_get_softc(bus);
1404
1405 ios = &sc->aw_host.ios;
1406
1407 /* Set the bus width. */
1408 switch (ios->bus_width) {
1409 case bus_width_1:
1410 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
1411 break;
1412 case bus_width_4:
1413 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
1414 break;
1415 case bus_width_8:
1416 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
1417 break;
1418 }
1419
1420 switch (ios->power_mode) {
1421 case power_on:
1422 break;
1423 case power_off:
1424 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1425 device_printf(sc->aw_dev, "Powering down sd/mmc\n");
1426
1427 if (sc->mmc_helper.vmmc_supply)
1428 regulator_disable(sc->mmc_helper.vmmc_supply);
1429 if (sc->mmc_helper.vqmmc_supply)
1430 regulator_disable(sc->mmc_helper.vqmmc_supply);
1431
1432 aw_mmc_reset(sc);
1433 break;
1434 case power_up:
1435 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1436 device_printf(sc->aw_dev, "Powering up sd/mmc\n");
1437
1438 if (sc->mmc_helper.vmmc_supply)
1439 regulator_enable(sc->mmc_helper.vmmc_supply);
1440 if (sc->mmc_helper.vqmmc_supply)
1441 regulator_enable(sc->mmc_helper.vqmmc_supply);
1442 aw_mmc_init(sc);
1443 break;
1444 };
1445
1446 /* Enable ddr mode if needed */
1447 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1448 if (ios->timing == bus_timing_uhs_ddr50 ||
1449 ios->timing == bus_timing_mmc_ddr52)
1450 reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1451 else
1452 reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1453 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1454
1455 if (ios->clock && ios->clock != sc->aw_clock) {
1456 sc->aw_clock = clock = ios->clock;
1457
1458 /* Disable clock */
1459 error = aw_mmc_update_clock(sc, 0);
1460 if (error != 0)
1461 return (error);
1462
1463 if (ios->timing == bus_timing_mmc_ddr52 &&
1464 (sc->aw_mmc_conf->new_timing ||
1465 ios->bus_width == bus_width_8)) {
1466 div = 2;
1467 clock <<= 1;
1468 }
1469
1470 /* Reset the divider. */
1471 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1472 reg &= ~AW_MMC_CKCR_DIV;
1473 reg |= div - 1;
1474 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1475
1476 /* New timing mode if needed */
1477 if (sc->aw_mmc_conf->new_timing) {
1478 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1479 reg |= AW_MMC_NTSR_MODE_SELECT;
1480 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1481 }
1482
1483 /* Set the MMC clock. */
1484 error = clk_disable(sc->aw_clk_mmc);
1485 if (error != 0 && bootverbose)
1486 device_printf(sc->aw_dev,
1487 "failed to disable mmc clock: %d\n", error);
1488 error = clk_set_freq(sc->aw_clk_mmc, clock,
1489 CLK_SET_ROUND_DOWN);
1490 if (error != 0) {
1491 device_printf(sc->aw_dev,
1492 "failed to set frequency to %u Hz: %d\n",
1493 clock, error);
1494 return (error);
1495 }
1496 error = clk_enable(sc->aw_clk_mmc);
1497 if (error != 0 && bootverbose)
1498 device_printf(sc->aw_dev,
1499 "failed to re-enable mmc clock: %d\n", error);
1500
1501 if (sc->aw_mmc_conf->can_calibrate)
1502 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1503
1504 /* Enable clock. */
1505 error = aw_mmc_update_clock(sc, 1);
1506 if (error != 0)
1507 return (error);
1508 }
1509
1510 return (0);
1511 }
1512
1513 static int
1514 aw_mmc_get_ro(device_t bus, device_t child)
1515 {
1516 struct aw_mmc_softc *sc;
1517
1518 sc = device_get_softc(bus);
1519
1520 return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper));
1521 }
1522
1523 static int
1524 aw_mmc_acquire_host(device_t bus, device_t child)
1525 {
1526 struct aw_mmc_softc *sc;
1527 int error;
1528
1529 sc = device_get_softc(bus);
1530 AW_MMC_LOCK(sc);
1531 while (sc->aw_bus_busy) {
1532 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1533 if (error != 0) {
1534 AW_MMC_UNLOCK(sc);
1535 return (error);
1536 }
1537 }
1538 sc->aw_bus_busy++;
1539 AW_MMC_UNLOCK(sc);
1540
1541 return (0);
1542 }
1543
1544 static int
1545 aw_mmc_release_host(device_t bus, device_t child)
1546 {
1547 struct aw_mmc_softc *sc;
1548
1549 sc = device_get_softc(bus);
1550 AW_MMC_LOCK(sc);
1551 sc->aw_bus_busy--;
1552 wakeup(sc);
1553 AW_MMC_UNLOCK(sc);
1554
1555 return (0);
1556 }
1557
1558 static device_method_t aw_mmc_methods[] = {
1559 /* Device interface */
1560 DEVMETHOD(device_probe, aw_mmc_probe),
1561 DEVMETHOD(device_attach, aw_mmc_attach),
1562 DEVMETHOD(device_detach, aw_mmc_detach),
1563
1564 /* Bus interface */
1565 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
1566 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
1567 DEVMETHOD(bus_add_child, bus_generic_add_child),
1568
1569 /* MMC bridge interface */
1570 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
1571 DEVMETHOD(mmcbr_request, aw_mmc_request),
1572 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
1573 DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq),
1574 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
1575 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
1576
1577 DEVMETHOD_END
1578 };
1579
1580 static devclass_t aw_mmc_devclass;
1581
1582 static driver_t aw_mmc_driver = {
1583 "aw_mmc",
1584 aw_mmc_methods,
1585 sizeof(struct aw_mmc_softc),
1586 };
1587
1588 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL,
1589 NULL);
1590 #ifndef MMCCAM
1591 MMC_DECLARE_BRIDGE(aw_mmc);
1592 #endif
1593 SIMPLEBUS_PNP_INFO(compat_data);
1594