1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2022 Dmitry Salychev
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 /*
30 * The DPAA2 Management Complex (MC) bus driver.
31 *
32 * MC is a hardware resource manager which can be found in several NXP
33 * SoCs (LX2160A, for example) and provides an access to the specialized
34 * hardware objects used in network-oriented packet processing applications.
35 */
36
37 #include "opt_acpi.h"
38 #include "opt_platform.h"
39
40 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/bus.h>
43 #include <sys/rman.h>
44 #include <sys/lock.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/queue.h>
49
50 #include <vm/vm.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54
55 #ifdef DEV_ACPI
56 #include <contrib/dev/acpica/include/acpi.h>
57 #include <dev/acpica/acpivar.h>
58 #endif
59
60 #ifdef FDT
61 #include <dev/ofw/openfirm.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #include <dev/ofw/ofw_pci.h>
65 #endif
66
67 #include "pcib_if.h"
68 #include "pci_if.h"
69
70 #include "dpaa2_mc.h"
71
72 /* Macros to read/write MC registers */
73 #define mcreg_read_4(_sc, _r) bus_read_4(&(_sc)->map[1], (_r))
74 #define mcreg_write_4(_sc, _r, _v) bus_write_4(&(_sc)->map[1], (_r), (_v))
75
76 #define IORT_DEVICE_NAME "MCE"
77
78 /* MC Registers */
79 #define MC_REG_GCR1 0x0000u
80 #define MC_REG_GCR2 0x0004u /* TODO: Does it exist? */
81 #define MC_REG_GSR 0x0008u
82 #define MC_REG_FAPR 0x0028u
83
84 /* General Control Register 1 (GCR1) */
85 #define GCR1_P1_STOP 0x80000000u
86 #define GCR1_P2_STOP 0x40000000u
87
88 /* General Status Register (GSR) */
89 #define GSR_HW_ERR(v) (((v) & 0x80000000u) >> 31)
90 #define GSR_CAT_ERR(v) (((v) & 0x40000000u) >> 30)
91 #define GSR_DPL_OFFSET(v) (((v) & 0x3FFFFF00u) >> 8)
92 #define GSR_MCS(v) (((v) & 0xFFu) >> 0)
93
94 /* Timeouts to wait for the MC status. */
95 #define MC_STAT_TIMEOUT 1000u /* us */
96 #define MC_STAT_ATTEMPTS 100u
97
98 /**
99 * @brief Structure to describe a DPAA2 device as a managed resource.
100 */
101 struct dpaa2_mc_devinfo {
102 STAILQ_ENTRY(dpaa2_mc_devinfo) link;
103 device_t dpaa2_dev;
104 uint32_t flags;
105 uint32_t owners;
106 };
107
108 MALLOC_DEFINE(M_DPAA2_MC, "dpaa2_mc", "DPAA2 Management Complex");
109
110 static struct resource_spec dpaa2_mc_spec[] = {
111 { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_UNMAPPED },
112 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_UNMAPPED | RF_OPTIONAL },
113 RESOURCE_SPEC_END
114 };
115
116 static u_int dpaa2_mc_get_xref(device_t, device_t);
117 static u_int dpaa2_mc_map_id(device_t, device_t, uintptr_t *);
118 static struct rman *dpaa2_mc_rman(device_t, int);
119
120 static int dpaa2_mc_alloc_msi_impl(device_t, device_t, int, int, int *);
121 static int dpaa2_mc_release_msi_impl(device_t, device_t, int, int *);
122 static int dpaa2_mc_map_msi_impl(device_t, device_t, int, uint64_t *,
123 uint32_t *);
124
125 /*
126 * For device interface.
127 */
128
129 int
dpaa2_mc_attach(device_t dev)130 dpaa2_mc_attach(device_t dev)
131 {
132 struct dpaa2_mc_softc *sc;
133 struct resource_map_request req;
134 uint32_t val;
135 int error;
136
137 sc = device_get_softc(dev);
138 sc->dev = dev;
139 sc->msi_allocated = false;
140 sc->msi_owner = NULL;
141
142 error = bus_alloc_resources(sc->dev, dpaa2_mc_spec, sc->res);
143 if (error) {
144 device_printf(dev, "%s: failed to allocate resources\n",
145 __func__);
146 return (ENXIO);
147 }
148
149 if (sc->res[1]) {
150 resource_init_map_request(&req);
151 req.memattr = VM_MEMATTR_DEVICE;
152 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[1],
153 &req, &sc->map[1]);
154 if (error) {
155 device_printf(dev, "%s: failed to map control "
156 "registers\n", __func__);
157 dpaa2_mc_detach(dev);
158 return (ENXIO);
159 }
160
161 if (bootverbose)
162 device_printf(dev,
163 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
164 mcreg_read_4(sc, MC_REG_GCR1),
165 mcreg_read_4(sc, MC_REG_GCR2),
166 mcreg_read_4(sc, MC_REG_GSR),
167 mcreg_read_4(sc, MC_REG_FAPR));
168
169 /* Reset P1_STOP and P2_STOP bits to resume MC processor. */
170 val = mcreg_read_4(sc, MC_REG_GCR1) &
171 ~(GCR1_P1_STOP | GCR1_P2_STOP);
172 mcreg_write_4(sc, MC_REG_GCR1, val);
173
174 /* Poll MC status. */
175 if (bootverbose)
176 device_printf(dev, "polling MC status...\n");
177 for (int i = 0; i < MC_STAT_ATTEMPTS; i++) {
178 val = mcreg_read_4(sc, MC_REG_GSR);
179 if (GSR_MCS(val) != 0u)
180 break;
181 DELAY(MC_STAT_TIMEOUT);
182 }
183
184 if (bootverbose)
185 device_printf(dev,
186 "GCR1=0x%x, GCR2=0x%x, GSR=0x%x, FAPR=0x%x\n",
187 mcreg_read_4(sc, MC_REG_GCR1),
188 mcreg_read_4(sc, MC_REG_GCR2),
189 mcreg_read_4(sc, MC_REG_GSR),
190 mcreg_read_4(sc, MC_REG_FAPR));
191 }
192
193 /* At least 64 bytes of the command portal should be available. */
194 if (rman_get_size(sc->res[0]) < DPAA2_MCP_MEM_WIDTH) {
195 device_printf(dev, "%s: MC portal memory region too small: "
196 "%jd\n", __func__, rman_get_size(sc->res[0]));
197 dpaa2_mc_detach(dev);
198 return (ENXIO);
199 }
200
201 /* Map MC portal memory resource. */
202 resource_init_map_request(&req);
203 req.memattr = VM_MEMATTR_DEVICE;
204 error = bus_map_resource(sc->dev, SYS_RES_MEMORY, sc->res[0],
205 &req, &sc->map[0]);
206 if (error) {
207 device_printf(dev, "Failed to map MC portal memory\n");
208 dpaa2_mc_detach(dev);
209 return (ENXIO);
210 }
211
212 /* Initialize a resource manager for the DPAA2 I/O objects. */
213 sc->dpio_rman.rm_type = RMAN_ARRAY;
214 sc->dpio_rman.rm_descr = "DPAA2 DPIO objects";
215 error = rman_init(&sc->dpio_rman);
216 if (error) {
217 device_printf(dev, "Failed to initialize a resource manager for "
218 "the DPAA2 I/O objects: error=%d\n", error);
219 dpaa2_mc_detach(dev);
220 return (ENXIO);
221 }
222
223 /* Initialize a resource manager for the DPAA2 buffer pools. */
224 sc->dpbp_rman.rm_type = RMAN_ARRAY;
225 sc->dpbp_rman.rm_descr = "DPAA2 DPBP objects";
226 error = rman_init(&sc->dpbp_rman);
227 if (error) {
228 device_printf(dev, "Failed to initialize a resource manager for "
229 "the DPAA2 buffer pools: error=%d\n", error);
230 dpaa2_mc_detach(dev);
231 return (ENXIO);
232 }
233
234 /* Initialize a resource manager for the DPAA2 concentrators. */
235 sc->dpcon_rman.rm_type = RMAN_ARRAY;
236 sc->dpcon_rman.rm_descr = "DPAA2 DPCON objects";
237 error = rman_init(&sc->dpcon_rman);
238 if (error) {
239 device_printf(dev, "Failed to initialize a resource manager for "
240 "the DPAA2 concentrators: error=%d\n", error);
241 dpaa2_mc_detach(dev);
242 return (ENXIO);
243 }
244
245 /* Initialize a resource manager for the DPAA2 MC portals. */
246 sc->dpmcp_rman.rm_type = RMAN_ARRAY;
247 sc->dpmcp_rman.rm_descr = "DPAA2 DPMCP objects";
248 error = rman_init(&sc->dpmcp_rman);
249 if (error) {
250 device_printf(dev, "Failed to initialize a resource manager for "
251 "the DPAA2 MC portals: error=%d\n", error);
252 dpaa2_mc_detach(dev);
253 return (ENXIO);
254 }
255
256 /* Initialize a list of non-allocatable DPAA2 devices. */
257 mtx_init(&sc->mdev_lock, "MC portal mdev lock", NULL, MTX_DEF);
258 STAILQ_INIT(&sc->mdev_list);
259
260 mtx_init(&sc->msi_lock, "MC MSI lock", NULL, MTX_DEF);
261
262 /*
263 * Add a root resource container as the only child of the bus. All of
264 * the direct descendant containers will be attached to the root one
265 * instead of the MC device.
266 */
267 sc->rcdev = device_add_child(dev, "dpaa2_rc", 0);
268 if (sc->rcdev == NULL) {
269 dpaa2_mc_detach(dev);
270 return (ENXIO);
271 }
272 bus_generic_probe(dev);
273 bus_generic_attach(dev);
274
275 return (0);
276 }
277
278 int
dpaa2_mc_detach(device_t dev)279 dpaa2_mc_detach(device_t dev)
280 {
281 struct dpaa2_mc_softc *sc;
282 struct dpaa2_devinfo *dinfo = NULL;
283 int error;
284
285 bus_generic_detach(dev);
286
287 sc = device_get_softc(dev);
288 if (sc->rcdev)
289 device_delete_child(dev, sc->rcdev);
290 bus_release_resources(dev, dpaa2_mc_spec, sc->res);
291
292 dinfo = device_get_ivars(dev);
293 if (dinfo)
294 free(dinfo, M_DPAA2_MC);
295
296 error = bus_generic_detach(dev);
297 if (error != 0)
298 return (error);
299
300 return (device_delete_children(dev));
301 }
302
303 /*
304 * For bus interface.
305 */
306
307 struct resource *
dpaa2_mc_alloc_resource(device_t mcdev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)308 dpaa2_mc_alloc_resource(device_t mcdev, device_t child, int type, int *rid,
309 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
310 {
311 struct resource *res;
312 struct rman *rm;
313 int error;
314
315 rm = dpaa2_mc_rman(mcdev, type);
316 if (!rm)
317 return (BUS_ALLOC_RESOURCE(device_get_parent(mcdev), child,
318 type, rid, start, end, count, flags));
319
320 /*
321 * Skip managing DPAA2-specific resource. It must be provided to MC by
322 * calling DPAA2_MC_MANAGE_DEV() beforehand.
323 */
324 if (type <= DPAA2_DEV_MC) {
325 error = rman_manage_region(rm, start, end);
326 if (error) {
327 device_printf(mcdev, "rman_manage_region() failed: "
328 "start=%#jx, end=%#jx, error=%d\n", start, end,
329 error);
330 goto fail;
331 }
332 }
333
334 res = rman_reserve_resource(rm, start, end, count, flags, child);
335 if (!res) {
336 device_printf(mcdev, "rman_reserve_resource() failed: "
337 "start=%#jx, end=%#jx, count=%#jx\n", start, end, count);
338 goto fail;
339 }
340
341 rman_set_rid(res, *rid);
342
343 if (flags & RF_ACTIVE) {
344 if (bus_activate_resource(child, type, *rid, res)) {
345 device_printf(mcdev, "bus_activate_resource() failed: "
346 "rid=%d, res=%#jx\n", *rid, (uintmax_t) res);
347 rman_release_resource(res);
348 goto fail;
349 }
350 }
351
352 return (res);
353 fail:
354 device_printf(mcdev, "%s() failed: type=%d, rid=%d, start=%#jx, "
355 "end=%#jx, count=%#jx, flags=%x\n", __func__, type, *rid, start, end,
356 count, flags);
357 return (NULL);
358 }
359
360 int
dpaa2_mc_adjust_resource(device_t mcdev,device_t child,int type,struct resource * r,rman_res_t start,rman_res_t end)361 dpaa2_mc_adjust_resource(device_t mcdev, device_t child, int type,
362 struct resource *r, rman_res_t start, rman_res_t end)
363 {
364 struct rman *rm;
365
366 rm = dpaa2_mc_rman(mcdev, type);
367 if (rm)
368 return (rman_adjust_resource(r, start, end));
369 return (bus_generic_adjust_resource(mcdev, child, type, r, start, end));
370 }
371
372 int
dpaa2_mc_release_resource(device_t mcdev,device_t child,int type,int rid,struct resource * r)373 dpaa2_mc_release_resource(device_t mcdev, device_t child, int type, int rid,
374 struct resource *r)
375 {
376 struct rman *rm;
377
378 rm = dpaa2_mc_rman(mcdev, type);
379 if (rm) {
380 KASSERT(rman_is_region_manager(r, rm), ("rman mismatch"));
381 rman_release_resource(r);
382 }
383
384 return (bus_generic_release_resource(mcdev, child, type, rid, r));
385 }
386
387 int
dpaa2_mc_activate_resource(device_t mcdev,device_t child,int type,int rid,struct resource * r)388 dpaa2_mc_activate_resource(device_t mcdev, device_t child, int type, int rid,
389 struct resource *r)
390 {
391 int rc;
392
393 if ((rc = rman_activate_resource(r)) != 0)
394 return (rc);
395
396 return (BUS_ACTIVATE_RESOURCE(device_get_parent(mcdev), child, type,
397 rid, r));
398 }
399
400 int
dpaa2_mc_deactivate_resource(device_t mcdev,device_t child,int type,int rid,struct resource * r)401 dpaa2_mc_deactivate_resource(device_t mcdev, device_t child, int type, int rid,
402 struct resource *r)
403 {
404 int rc;
405
406 if ((rc = rman_deactivate_resource(r)) != 0)
407 return (rc);
408
409 return (BUS_DEACTIVATE_RESOURCE(device_get_parent(mcdev), child, type,
410 rid, r));
411 }
412
413 /*
414 * For pseudo-pcib interface.
415 */
416
417 int
dpaa2_mc_alloc_msi(device_t mcdev,device_t child,int count,int maxcount,int * irqs)418 dpaa2_mc_alloc_msi(device_t mcdev, device_t child, int count, int maxcount,
419 int *irqs)
420 {
421 #if defined(INTRNG)
422 return (dpaa2_mc_alloc_msi_impl(mcdev, child, count, maxcount, irqs));
423 #else
424 return (ENXIO);
425 #endif
426 }
427
428 int
dpaa2_mc_release_msi(device_t mcdev,device_t child,int count,int * irqs)429 dpaa2_mc_release_msi(device_t mcdev, device_t child, int count, int *irqs)
430 {
431 #if defined(INTRNG)
432 return (dpaa2_mc_release_msi_impl(mcdev, child, count, irqs));
433 #else
434 return (ENXIO);
435 #endif
436 }
437
438 int
dpaa2_mc_map_msi(device_t mcdev,device_t child,int irq,uint64_t * addr,uint32_t * data)439 dpaa2_mc_map_msi(device_t mcdev, device_t child, int irq, uint64_t *addr,
440 uint32_t *data)
441 {
442 #if defined(INTRNG)
443 return (dpaa2_mc_map_msi_impl(mcdev, child, irq, addr, data));
444 #else
445 return (ENXIO);
446 #endif
447 }
448
449 int
dpaa2_mc_get_id(device_t mcdev,device_t child,enum pci_id_type type,uintptr_t * id)450 dpaa2_mc_get_id(device_t mcdev, device_t child, enum pci_id_type type,
451 uintptr_t *id)
452 {
453 struct dpaa2_devinfo *dinfo;
454
455 dinfo = device_get_ivars(child);
456
457 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
458 return (ENXIO);
459
460 if (type == PCI_ID_MSI)
461 return (dpaa2_mc_map_id(mcdev, child, id));
462
463 *id = dinfo->icid;
464 return (0);
465 }
466
467 /*
468 * For DPAA2 Management Complex bus driver interface.
469 */
470
471 int
dpaa2_mc_manage_dev(device_t mcdev,device_t dpaa2_dev,uint32_t flags)472 dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
473 {
474 struct dpaa2_mc_softc *sc;
475 struct dpaa2_devinfo *dinfo;
476 struct dpaa2_mc_devinfo *di;
477 struct rman *rm;
478 int error;
479
480 sc = device_get_softc(mcdev);
481 dinfo = device_get_ivars(dpaa2_dev);
482
483 if (!sc || !dinfo || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
484 return (EINVAL);
485
486 di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
487 di->dpaa2_dev = dpaa2_dev;
488 di->flags = flags;
489 di->owners = 0;
490
491 /* Append a new managed DPAA2 device to the queue. */
492 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
493 mtx_lock(&sc->mdev_lock);
494 STAILQ_INSERT_TAIL(&sc->mdev_list, di, link);
495 mtx_unlock(&sc->mdev_lock);
496
497 if (flags & DPAA2_MC_DEV_ALLOCATABLE) {
498 /* Select rman based on a type of the DPAA2 device. */
499 rm = dpaa2_mc_rman(mcdev, dinfo->dtype);
500 if (!rm)
501 return (ENOENT);
502 /* Manage DPAA2 device as an allocatable resource. */
503 error = rman_manage_region(rm, (rman_res_t) dpaa2_dev,
504 (rman_res_t) dpaa2_dev);
505 if (error)
506 return (error);
507 }
508
509 return (0);
510 }
511
512 int
dpaa2_mc_get_free_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype)513 dpaa2_mc_get_free_dev(device_t mcdev, device_t *dpaa2_dev,
514 enum dpaa2_dev_type devtype)
515 {
516 struct rman *rm;
517 rman_res_t start, end;
518 int error;
519
520 if (strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
521 return (EINVAL);
522
523 /* Select resource manager based on a type of the DPAA2 device. */
524 rm = dpaa2_mc_rman(mcdev, devtype);
525 if (!rm)
526 return (ENOENT);
527 /* Find first free DPAA2 device of the given type. */
528 error = rman_first_free_region(rm, &start, &end);
529 if (error)
530 return (error);
531
532 KASSERT(start == end, ("start != end, but should be the same pointer "
533 "to the DPAA2 device: start=%jx, end=%jx", start, end));
534
535 *dpaa2_dev = (device_t) start;
536
537 return (0);
538 }
539
540 int
dpaa2_mc_get_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype,uint32_t obj_id)541 dpaa2_mc_get_dev(device_t mcdev, device_t *dpaa2_dev,
542 enum dpaa2_dev_type devtype, uint32_t obj_id)
543 {
544 struct dpaa2_mc_softc *sc;
545 struct dpaa2_devinfo *dinfo;
546 struct dpaa2_mc_devinfo *di;
547 int error = ENOENT;
548
549 sc = device_get_softc(mcdev);
550
551 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
552 return (EINVAL);
553
554 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
555 mtx_lock(&sc->mdev_lock);
556
557 STAILQ_FOREACH(di, &sc->mdev_list, link) {
558 dinfo = device_get_ivars(di->dpaa2_dev);
559 if (dinfo->dtype == devtype && dinfo->id == obj_id) {
560 *dpaa2_dev = di->dpaa2_dev;
561 error = 0;
562 break;
563 }
564 }
565
566 mtx_unlock(&sc->mdev_lock);
567
568 return (error);
569 }
570
571 int
dpaa2_mc_get_shared_dev(device_t mcdev,device_t * dpaa2_dev,enum dpaa2_dev_type devtype)572 dpaa2_mc_get_shared_dev(device_t mcdev, device_t *dpaa2_dev,
573 enum dpaa2_dev_type devtype)
574 {
575 struct dpaa2_mc_softc *sc;
576 struct dpaa2_devinfo *dinfo;
577 struct dpaa2_mc_devinfo *di;
578 device_t dev = NULL;
579 uint32_t owners = UINT32_MAX;
580 int error = ENOENT;
581
582 sc = device_get_softc(mcdev);
583
584 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
585 return (EINVAL);
586
587 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
588 mtx_lock(&sc->mdev_lock);
589
590 STAILQ_FOREACH(di, &sc->mdev_list, link) {
591 dinfo = device_get_ivars(di->dpaa2_dev);
592
593 if ((dinfo->dtype == devtype) &&
594 (di->flags & DPAA2_MC_DEV_SHAREABLE) &&
595 (di->owners < owners)) {
596 dev = di->dpaa2_dev;
597 owners = di->owners;
598 }
599 }
600 if (dev) {
601 *dpaa2_dev = dev;
602 error = 0;
603 }
604
605 mtx_unlock(&sc->mdev_lock);
606
607 return (error);
608 }
609
610 int
dpaa2_mc_reserve_dev(device_t mcdev,device_t dpaa2_dev,enum dpaa2_dev_type devtype)611 dpaa2_mc_reserve_dev(device_t mcdev, device_t dpaa2_dev,
612 enum dpaa2_dev_type devtype)
613 {
614 struct dpaa2_mc_softc *sc;
615 struct dpaa2_mc_devinfo *di;
616 int error = ENOENT;
617
618 sc = device_get_softc(mcdev);
619
620 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
621 return (EINVAL);
622
623 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
624 mtx_lock(&sc->mdev_lock);
625
626 STAILQ_FOREACH(di, &sc->mdev_list, link) {
627 if (di->dpaa2_dev == dpaa2_dev &&
628 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
629 di->owners++;
630 error = 0;
631 break;
632 }
633 }
634
635 mtx_unlock(&sc->mdev_lock);
636
637 return (error);
638 }
639
640 int
dpaa2_mc_release_dev(device_t mcdev,device_t dpaa2_dev,enum dpaa2_dev_type devtype)641 dpaa2_mc_release_dev(device_t mcdev, device_t dpaa2_dev,
642 enum dpaa2_dev_type devtype)
643 {
644 struct dpaa2_mc_softc *sc;
645 struct dpaa2_mc_devinfo *di;
646 int error = ENOENT;
647
648 sc = device_get_softc(mcdev);
649
650 if (!sc || strcmp(device_get_name(mcdev), "dpaa2_mc") != 0)
651 return (EINVAL);
652
653 mtx_assert(&sc->mdev_lock, MA_NOTOWNED);
654 mtx_lock(&sc->mdev_lock);
655
656 STAILQ_FOREACH(di, &sc->mdev_list, link) {
657 if (di->dpaa2_dev == dpaa2_dev &&
658 (di->flags & DPAA2_MC_DEV_SHAREABLE)) {
659 di->owners -= di->owners > 0 ? 1 : 0;
660 error = 0;
661 break;
662 }
663 }
664
665 mtx_unlock(&sc->mdev_lock);
666
667 return (error);
668 }
669
670 /**
671 * @internal
672 */
673 static u_int
dpaa2_mc_get_xref(device_t mcdev,device_t child)674 dpaa2_mc_get_xref(device_t mcdev, device_t child)
675 {
676 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
677 struct dpaa2_devinfo *dinfo = device_get_ivars(child);
678 #ifdef DEV_ACPI
679 u_int xref, devid;
680 #endif
681 #ifdef FDT
682 phandle_t msi_parent;
683 #endif
684 int error;
685
686 if (sc && dinfo) {
687 #ifdef DEV_ACPI
688 if (sc->acpi_based) {
689 /*
690 * NOTE: The first named component from the IORT table
691 * with the given name (as a substring) will be used.
692 */
693 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME,
694 dinfo->icid, &xref, &devid);
695 if (error)
696 return (0);
697 return (xref);
698 }
699 #endif
700 #ifdef FDT
701 if (!sc->acpi_based) {
702 /* FDT-based driver. */
703 error = ofw_bus_msimap(sc->ofw_node, dinfo->icid,
704 &msi_parent, NULL);
705 if (error)
706 return (0);
707 return ((u_int) msi_parent);
708 }
709 #endif
710 }
711 return (0);
712 }
713
714 /**
715 * @internal
716 */
717 static u_int
dpaa2_mc_map_id(device_t mcdev,device_t child,uintptr_t * id)718 dpaa2_mc_map_id(device_t mcdev, device_t child, uintptr_t *id)
719 {
720 struct dpaa2_devinfo *dinfo;
721 #ifdef DEV_ACPI
722 u_int xref, devid;
723 int error;
724 #endif
725
726 dinfo = device_get_ivars(child);
727 if (dinfo) {
728 /*
729 * The first named components from IORT table with the given
730 * name (as a substring) will be used.
731 */
732 #ifdef DEV_ACPI
733 error = acpi_iort_map_named_msi(IORT_DEVICE_NAME, dinfo->icid,
734 &xref, &devid);
735 if (error == 0)
736 *id = devid;
737 else
738 #endif
739 *id = dinfo->icid; /* RID not in IORT, likely FW bug */
740
741 return (0);
742 }
743 return (ENXIO);
744 }
745
746 /**
747 * @internal
748 * @brief Obtain a resource manager based on the given type of the resource.
749 */
750 static struct rman *
dpaa2_mc_rman(device_t mcdev,int type)751 dpaa2_mc_rman(device_t mcdev, int type)
752 {
753 struct dpaa2_mc_softc *sc;
754
755 sc = device_get_softc(mcdev);
756
757 switch (type) {
758 case DPAA2_DEV_IO:
759 return (&sc->dpio_rman);
760 case DPAA2_DEV_BP:
761 return (&sc->dpbp_rman);
762 case DPAA2_DEV_CON:
763 return (&sc->dpcon_rman);
764 case DPAA2_DEV_MCP:
765 return (&sc->dpmcp_rman);
766 default:
767 break;
768 }
769
770 return (NULL);
771 }
772
773 #if defined(INTRNG) && !defined(IOMMU)
774
775 /**
776 * @internal
777 * @brief Allocates requested number of MSIs.
778 *
779 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
780 * Total number of IRQs is limited to 32.
781 */
782 static int
dpaa2_mc_alloc_msi_impl(device_t mcdev,device_t child,int count,int maxcount,int * irqs)783 dpaa2_mc_alloc_msi_impl(device_t mcdev, device_t child, int count, int maxcount,
784 int *irqs)
785 {
786 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
787 int msi_irqs[DPAA2_MC_MSI_COUNT];
788 int error;
789
790 /* Pre-allocate a bunch of MSIs for MC to be used by its children. */
791 if (!sc->msi_allocated) {
792 error = intr_alloc_msi(mcdev, child, dpaa2_mc_get_xref(mcdev,
793 child), DPAA2_MC_MSI_COUNT, DPAA2_MC_MSI_COUNT, msi_irqs);
794 if (error) {
795 device_printf(mcdev, "failed to pre-allocate %d MSIs: "
796 "error=%d\n", DPAA2_MC_MSI_COUNT, error);
797 return (error);
798 }
799
800 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
801 mtx_lock(&sc->msi_lock);
802 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
803 sc->msi[i].child = NULL;
804 sc->msi[i].irq = msi_irqs[i];
805 }
806 sc->msi_owner = child;
807 sc->msi_allocated = true;
808 mtx_unlock(&sc->msi_lock);
809 }
810
811 error = ENOENT;
812
813 /* Find the first free MSIs from the pre-allocated pool. */
814 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
815 mtx_lock(&sc->msi_lock);
816 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
817 if (sc->msi[i].child != NULL)
818 continue;
819 error = 0;
820 for (int j = 0; j < count; j++) {
821 if (i + j >= DPAA2_MC_MSI_COUNT) {
822 device_printf(mcdev, "requested %d MSIs exceed "
823 "limit of %d available\n", count,
824 DPAA2_MC_MSI_COUNT);
825 error = E2BIG;
826 break;
827 }
828 sc->msi[i + j].child = child;
829 irqs[j] = sc->msi[i + j].irq;
830 }
831 break;
832 }
833 mtx_unlock(&sc->msi_lock);
834
835 return (error);
836 }
837
838 /**
839 * @internal
840 * @brief Marks IRQs as free in the pre-allocated pool of MSIs.
841 *
842 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
843 * Total number of IRQs is limited to 32.
844 * NOTE: MSIs are kept allocated in the kernel as a part of the pool.
845 */
846 static int
dpaa2_mc_release_msi_impl(device_t mcdev,device_t child,int count,int * irqs)847 dpaa2_mc_release_msi_impl(device_t mcdev, device_t child, int count, int *irqs)
848 {
849 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
850
851 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
852 mtx_lock(&sc->msi_lock);
853 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
854 if (sc->msi[i].child != child)
855 continue;
856 for (int j = 0; j < count; j++) {
857 if (sc->msi[i].irq == irqs[j]) {
858 sc->msi[i].child = NULL;
859 break;
860 }
861 }
862 }
863 mtx_unlock(&sc->msi_lock);
864
865 return (0);
866 }
867
868 /**
869 * @internal
870 * @brief Provides address to write to and data according to the given MSI from
871 * the pre-allocated pool.
872 *
873 * NOTE: This function is a part of fallback solution when IOMMU isn't available.
874 * Total number of IRQs is limited to 32.
875 */
876 static int
dpaa2_mc_map_msi_impl(device_t mcdev,device_t child,int irq,uint64_t * addr,uint32_t * data)877 dpaa2_mc_map_msi_impl(device_t mcdev, device_t child, int irq, uint64_t *addr,
878 uint32_t *data)
879 {
880 struct dpaa2_mc_softc *sc = device_get_softc(mcdev);
881 int error = EINVAL;
882
883 mtx_assert(&sc->msi_lock, MA_NOTOWNED);
884 mtx_lock(&sc->msi_lock);
885 for (int i = 0; i < DPAA2_MC_MSI_COUNT; i++) {
886 if (sc->msi[i].child == child && sc->msi[i].irq == irq) {
887 error = 0;
888 break;
889 }
890 }
891 mtx_unlock(&sc->msi_lock);
892 if (error)
893 return (error);
894
895 return (intr_map_msi(mcdev, sc->msi_owner, dpaa2_mc_get_xref(mcdev,
896 sc->msi_owner), irq, addr, data));
897 }
898
899 #endif /* defined(INTRNG) && !defined(IOMMU) */
900
901 static device_method_t dpaa2_mc_methods[] = {
902 DEVMETHOD_END
903 };
904
905 DEFINE_CLASS_0(dpaa2_mc, dpaa2_mc_driver, dpaa2_mc_methods,
906 sizeof(struct dpaa2_mc_softc));
907