1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Andrew Turner under
6 * the sponsorship of the FreeBSD Foundation.
7 *
8 * This software was developed by Semihalf under
9 * the sponsorship of the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35 #include "opt_iommu.h"
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/cpuset.h>
44 #include <sys/endian.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/module.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/taskqueue.h>
52 #include <sys/tree.h>
53 #include <sys/queue.h>
54 #include <sys/rman.h>
55 #include <sys/sbuf.h>
56 #include <sys/smp.h>
57 #include <sys/sysctl.h>
58 #include <sys/vmem.h>
59
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_page.h>
63
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66
67 #include <arm/arm/gic_common.h>
68 #include <arm64/arm64/gic_v3_reg.h>
69 #include <arm64/arm64/gic_v3_var.h>
70
71 #ifdef FDT
72 #include <dev/ofw/openfirm.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 #endif
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78
79 #ifdef IOMMU
80 #include <dev/iommu/iommu.h>
81 #include <dev/iommu/iommu_gas.h>
82 #endif
83
84 #include "pcib_if.h"
85 #include "pic_if.h"
86 #include "msi_if.h"
87
88 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
89 "ARM GICv3 Interrupt Translation Service");
90
91 #define LPI_NIRQS (64 * 1024)
92
93 /* The size and alignment of the command circular buffer */
94 #define ITS_CMDQ_SIZE (64 * 1024) /* Must be a multiple of 4K */
95 #define ITS_CMDQ_ALIGN (64 * 1024)
96
97 #define LPI_CONFTAB_SIZE LPI_NIRQS
98 #define LPI_CONFTAB_ALIGN (64 * 1024)
99 #define LPI_CONFTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */
100
101 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
102 #define LPI_PENDTAB_SIZE ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
103 #define LPI_PENDTAB_ALIGN (64 * 1024)
104 #define LPI_PENDTAB_MAX_ADDR ((1ul << 48) - 1) /* We need a 47 bit PA */
105
106 #define LPI_INT_TRANS_TAB_ALIGN 256
107 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
108
109 /* ITS commands encoding */
110 #define ITS_CMD_MOVI (0x01)
111 #define ITS_CMD_SYNC (0x05)
112 #define ITS_CMD_MAPD (0x08)
113 #define ITS_CMD_MAPC (0x09)
114 #define ITS_CMD_MAPTI (0x0a)
115 #define ITS_CMD_MAPI (0x0b)
116 #define ITS_CMD_INV (0x0c)
117 #define ITS_CMD_INVALL (0x0d)
118 /* Command */
119 #define CMD_COMMAND_MASK (0xFFUL)
120 /* PCI device ID */
121 #define CMD_DEVID_SHIFT (32)
122 #define CMD_DEVID_MASK (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
123 /* Size of IRQ ID bitfield */
124 #define CMD_SIZE_MASK (0xFFUL)
125 /* Virtual LPI ID */
126 #define CMD_ID_MASK (0xFFFFFFFFUL)
127 /* Physical LPI ID */
128 #define CMD_PID_SHIFT (32)
129 #define CMD_PID_MASK (0xFFFFFFFFUL << CMD_PID_SHIFT)
130 /* Collection */
131 #define CMD_COL_MASK (0xFFFFUL)
132 /* Target (CPU or Re-Distributor) */
133 #define CMD_TARGET_SHIFT (16)
134 #define CMD_TARGET_MASK (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
135 /* Interrupt Translation Table address */
136 #define CMD_ITT_MASK (0xFFFFFFFFFF00UL)
137 /* Valid command bit */
138 #define CMD_VALID_SHIFT (63)
139 #define CMD_VALID_MASK (1UL << CMD_VALID_SHIFT)
140
141 #define ITS_TARGET_NONE 0xFBADBEEF
142
143 /* LPI chunk owned by ITS device */
144 struct lpi_chunk {
145 u_int lpi_base;
146 u_int lpi_free; /* First free LPI in set */
147 u_int lpi_num; /* Total number of LPIs in chunk */
148 u_int lpi_busy; /* Number of busy LPIs in chink */
149 };
150
151 /* ITS device */
152 struct its_dev {
153 TAILQ_ENTRY(its_dev) entry;
154 /* PCI device */
155 device_t pci_dev;
156 /* Device ID (i.e. PCI device ID) */
157 uint32_t devid;
158 /* List of assigned LPIs */
159 struct lpi_chunk lpis;
160 /* Virtual address of ITT */
161 vm_offset_t itt;
162 size_t itt_size;
163 };
164
165 /*
166 * ITS command descriptor.
167 * Idea for command description passing taken from Linux.
168 */
169 struct its_cmd_desc {
170 uint8_t cmd_type;
171
172 union {
173 struct {
174 struct its_dev *its_dev;
175 struct its_col *col;
176 uint32_t id;
177 } cmd_desc_movi;
178
179 struct {
180 struct its_col *col;
181 } cmd_desc_sync;
182
183 struct {
184 struct its_col *col;
185 uint8_t valid;
186 } cmd_desc_mapc;
187
188 struct {
189 struct its_dev *its_dev;
190 struct its_col *col;
191 uint32_t pid;
192 uint32_t id;
193 } cmd_desc_mapvi;
194
195 struct {
196 struct its_dev *its_dev;
197 struct its_col *col;
198 uint32_t pid;
199 } cmd_desc_mapi;
200
201 struct {
202 struct its_dev *its_dev;
203 uint8_t valid;
204 } cmd_desc_mapd;
205
206 struct {
207 struct its_dev *its_dev;
208 struct its_col *col;
209 uint32_t pid;
210 } cmd_desc_inv;
211
212 struct {
213 struct its_col *col;
214 } cmd_desc_invall;
215 };
216 };
217
218 /* ITS command. Each command is 32 bytes long */
219 struct its_cmd {
220 uint64_t cmd_dword[4]; /* ITS command double word */
221 };
222
223 /* An ITS private table */
224 struct its_ptable {
225 vm_offset_t ptab_vaddr;
226 unsigned long ptab_size;
227 };
228
229 /* ITS collection description. */
230 struct its_col {
231 uint64_t col_target; /* Target Re-Distributor */
232 uint64_t col_id; /* Collection ID */
233 };
234
235 struct gicv3_its_irqsrc {
236 struct intr_irqsrc gi_isrc;
237 u_int gi_id;
238 u_int gi_lpi;
239 struct its_dev *gi_its_dev;
240 TAILQ_ENTRY(gicv3_its_irqsrc) gi_link;
241 };
242
243 struct gicv3_its_softc {
244 device_t dev;
245 struct intr_pic *sc_pic;
246 struct resource *sc_its_res;
247
248 cpuset_t sc_cpus;
249 u_int gic_irq_cpu;
250
251 struct its_ptable sc_its_ptab[GITS_BASER_NUM];
252 struct its_col *sc_its_cols[MAXCPU]; /* Per-CPU collections */
253
254 /*
255 * TODO: We should get these from the parent as we only want a
256 * single copy of each across the interrupt controller.
257 */
258 uint8_t *sc_conf_base;
259 vm_offset_t sc_pend_base[MAXCPU];
260
261 /* Command handling */
262 struct mtx sc_its_cmd_lock;
263 struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
264 size_t sc_its_cmd_next_idx;
265
266 vmem_t *sc_irq_alloc;
267 struct gicv3_its_irqsrc **sc_irqs;
268 u_int sc_irq_base;
269 u_int sc_irq_length;
270 u_int sc_irq_count;
271
272 struct mtx sc_its_dev_lock;
273 TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
274 TAILQ_HEAD(free_irqs, gicv3_its_irqsrc) sc_free_irqs;
275
276 #define ITS_FLAGS_CMDQ_FLUSH 0x00000001
277 #define ITS_FLAGS_LPI_CONF_FLUSH 0x00000002
278 #define ITS_FLAGS_ERRATA_CAVIUM_22375 0x00000004
279 u_int sc_its_flags;
280 bool trace_enable;
281 vm_page_t ma; /* fake msi page */
282 };
283
284 static void *conf_base;
285
286 typedef void (its_quirk_func_t)(device_t);
287 static its_quirk_func_t its_quirk_cavium_22375;
288
289 static const struct {
290 const char *desc;
291 uint32_t iidr;
292 uint32_t iidr_mask;
293 its_quirk_func_t *func;
294 } its_quirks[] = {
295 {
296 /* Cavium ThunderX Pass 1.x */
297 .desc = "Cavium ThunderX errata: 22375, 24313",
298 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
299 GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
300 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
301 .func = its_quirk_cavium_22375,
302 },
303 };
304
305 #define gic_its_read_4(sc, reg) \
306 bus_read_4((sc)->sc_its_res, (reg))
307 #define gic_its_read_8(sc, reg) \
308 bus_read_8((sc)->sc_its_res, (reg))
309
310 #define gic_its_write_4(sc, reg, val) \
311 bus_write_4((sc)->sc_its_res, (reg), (val))
312 #define gic_its_write_8(sc, reg, val) \
313 bus_write_8((sc)->sc_its_res, (reg), (val))
314
315 static device_attach_t gicv3_its_attach;
316 static device_detach_t gicv3_its_detach;
317
318 static pic_disable_intr_t gicv3_its_disable_intr;
319 static pic_enable_intr_t gicv3_its_enable_intr;
320 static pic_map_intr_t gicv3_its_map_intr;
321 static pic_setup_intr_t gicv3_its_setup_intr;
322 static pic_post_filter_t gicv3_its_post_filter;
323 static pic_post_ithread_t gicv3_its_post_ithread;
324 static pic_pre_ithread_t gicv3_its_pre_ithread;
325 static pic_bind_intr_t gicv3_its_bind_intr;
326 #ifdef SMP
327 static pic_init_secondary_t gicv3_its_init_secondary;
328 #endif
329 static msi_alloc_msi_t gicv3_its_alloc_msi;
330 static msi_release_msi_t gicv3_its_release_msi;
331 static msi_alloc_msix_t gicv3_its_alloc_msix;
332 static msi_release_msix_t gicv3_its_release_msix;
333 static msi_map_msi_t gicv3_its_map_msi;
334 #ifdef IOMMU
335 static msi_iommu_init_t gicv3_iommu_init;
336 static msi_iommu_deinit_t gicv3_iommu_deinit;
337 #endif
338
339 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
340 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
341 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
342 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
343 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
344 static void its_cmd_invall(device_t, struct its_col *);
345
346 static device_method_t gicv3_its_methods[] = {
347 /* Device interface */
348 DEVMETHOD(device_detach, gicv3_its_detach),
349
350 /* Interrupt controller interface */
351 DEVMETHOD(pic_disable_intr, gicv3_its_disable_intr),
352 DEVMETHOD(pic_enable_intr, gicv3_its_enable_intr),
353 DEVMETHOD(pic_map_intr, gicv3_its_map_intr),
354 DEVMETHOD(pic_setup_intr, gicv3_its_setup_intr),
355 DEVMETHOD(pic_post_filter, gicv3_its_post_filter),
356 DEVMETHOD(pic_post_ithread, gicv3_its_post_ithread),
357 DEVMETHOD(pic_pre_ithread, gicv3_its_pre_ithread),
358 #ifdef SMP
359 DEVMETHOD(pic_bind_intr, gicv3_its_bind_intr),
360 DEVMETHOD(pic_init_secondary, gicv3_its_init_secondary),
361 #endif
362
363 /* MSI/MSI-X */
364 DEVMETHOD(msi_alloc_msi, gicv3_its_alloc_msi),
365 DEVMETHOD(msi_release_msi, gicv3_its_release_msi),
366 DEVMETHOD(msi_alloc_msix, gicv3_its_alloc_msix),
367 DEVMETHOD(msi_release_msix, gicv3_its_release_msix),
368 DEVMETHOD(msi_map_msi, gicv3_its_map_msi),
369 #ifdef IOMMU
370 DEVMETHOD(msi_iommu_init, gicv3_iommu_init),
371 DEVMETHOD(msi_iommu_deinit, gicv3_iommu_deinit),
372 #endif
373
374 /* End */
375 DEVMETHOD_END
376 };
377
378 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
379 sizeof(struct gicv3_its_softc));
380
381 static void
gicv3_its_cmdq_init(struct gicv3_its_softc * sc)382 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
383 {
384 vm_paddr_t cmd_paddr;
385 uint64_t reg, tmp;
386
387 /* Set up the command circular buffer */
388 sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
389 M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
390 sc->sc_its_cmd_next_idx = 0;
391
392 cmd_paddr = vtophys(sc->sc_its_cmd_base);
393
394 /* Set the base of the command buffer */
395 reg = GITS_CBASER_VALID |
396 (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
397 cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
398 (ITS_CMDQ_SIZE / 4096 - 1);
399 gic_its_write_8(sc, GITS_CBASER, reg);
400
401 /* Read back to check for fixed value fields */
402 tmp = gic_its_read_8(sc, GITS_CBASER);
403
404 if ((tmp & GITS_CBASER_SHARE_MASK) !=
405 (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
406 /* Check if the hardware reported non-shareable */
407 if ((tmp & GITS_CBASER_SHARE_MASK) ==
408 (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
409 /* If so remove the cache attribute */
410 reg &= ~GITS_CBASER_CACHE_MASK;
411 reg &= ~GITS_CBASER_SHARE_MASK;
412 /* Set to Non-cacheable, Non-shareable */
413 reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
414 reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
415
416 gic_its_write_8(sc, GITS_CBASER, reg);
417 }
418
419 /* The command queue has to be flushed after each command */
420 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
421 }
422
423 /* Get the next command from the start of the buffer */
424 gic_its_write_8(sc, GITS_CWRITER, 0x0);
425 }
426
427 static int
gicv3_its_table_init(device_t dev,struct gicv3_its_softc * sc)428 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
429 {
430 vm_offset_t table;
431 vm_paddr_t paddr;
432 uint64_t cache, reg, share, tmp, type;
433 size_t esize, its_tbl_size, nidents, nitspages, npages;
434 int i, page_size;
435 int devbits;
436
437 if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
438 /*
439 * GITS_TYPER[17:13] of ThunderX reports that device IDs
440 * are to be 21 bits in length. The entry size of the ITS
441 * table can be read from GITS_BASERn[52:48] and on ThunderX
442 * is supposed to be 8 bytes in length (for device table).
443 * Finally the page size that is to be used by ITS to access
444 * this table will be set to 64KB.
445 *
446 * This gives 0x200000 entries of size 0x8 bytes covered by
447 * 256 pages each of which 64KB in size. The number of pages
448 * (minus 1) should then be written to GITS_BASERn[7:0]. In
449 * that case this value would be 0xFF but on ThunderX the
450 * maximum value that HW accepts is 0xFD.
451 *
452 * Set an arbitrary number of device ID bits to 20 in order
453 * to limit the number of entries in ITS device table to
454 * 0x100000 and the table size to 8MB.
455 */
456 devbits = 20;
457 cache = 0;
458 } else {
459 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
460 cache = GITS_BASER_CACHE_WAWB;
461 }
462 share = GITS_BASER_SHARE_IS;
463 page_size = PAGE_SIZE_64K;
464
465 for (i = 0; i < GITS_BASER_NUM; i++) {
466 reg = gic_its_read_8(sc, GITS_BASER(i));
467 /* The type of table */
468 type = GITS_BASER_TYPE(reg);
469 /* The table entry size */
470 esize = GITS_BASER_ESIZE(reg);
471
472 switch(type) {
473 case GITS_BASER_TYPE_DEV:
474 nidents = (1 << devbits);
475 its_tbl_size = esize * nidents;
476 its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
477 break;
478 case GITS_BASER_TYPE_VP:
479 case GITS_BASER_TYPE_PP: /* Undocumented? */
480 case GITS_BASER_TYPE_IC:
481 its_tbl_size = page_size;
482 break;
483 default:
484 continue;
485 }
486 npages = howmany(its_tbl_size, PAGE_SIZE);
487
488 /* Allocate the table */
489 table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
490 M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
491 PAGE_SIZE_64K, 0);
492
493 sc->sc_its_ptab[i].ptab_vaddr = table;
494 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
495
496 paddr = vtophys(table);
497
498 while (1) {
499 nitspages = howmany(its_tbl_size, page_size);
500
501 /* Clear the fields we will be setting */
502 reg &= ~(GITS_BASER_VALID |
503 GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
504 GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
505 GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
506 GITS_BASER_SIZE_MASK);
507 /* Set the new values */
508 reg |= GITS_BASER_VALID |
509 (cache << GITS_BASER_CACHE_SHIFT) |
510 (type << GITS_BASER_TYPE_SHIFT) |
511 ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
512 paddr | (share << GITS_BASER_SHARE_SHIFT) |
513 (nitspages - 1);
514
515 switch (page_size) {
516 case PAGE_SIZE_4K: /* 4KB */
517 reg |=
518 GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
519 break;
520 case PAGE_SIZE_16K: /* 16KB */
521 reg |=
522 GITS_BASER_PSZ_16K << GITS_BASER_PSZ_SHIFT;
523 break;
524 case PAGE_SIZE_64K: /* 64KB */
525 reg |=
526 GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
527 break;
528 }
529
530 gic_its_write_8(sc, GITS_BASER(i), reg);
531
532 /* Read back to check */
533 tmp = gic_its_read_8(sc, GITS_BASER(i));
534
535 /* Do the shareability masks line up? */
536 if ((tmp & GITS_BASER_SHARE_MASK) !=
537 (reg & GITS_BASER_SHARE_MASK)) {
538 share = (tmp & GITS_BASER_SHARE_MASK) >>
539 GITS_BASER_SHARE_SHIFT;
540 continue;
541 }
542
543 if ((tmp & GITS_BASER_PSZ_MASK) !=
544 (reg & GITS_BASER_PSZ_MASK)) {
545 switch (page_size) {
546 case PAGE_SIZE_16K:
547 page_size = PAGE_SIZE_4K;
548 continue;
549 case PAGE_SIZE_64K:
550 page_size = PAGE_SIZE_16K;
551 continue;
552 }
553 }
554
555 if (tmp != reg) {
556 device_printf(dev, "GITS_BASER%d: "
557 "unable to be updated: %lx != %lx\n",
558 i, reg, tmp);
559 return (ENXIO);
560 }
561
562 /* We should have made all needed changes */
563 break;
564 }
565 }
566
567 return (0);
568 }
569
570 static void
gicv3_its_conftable_init(struct gicv3_its_softc * sc)571 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
572 {
573 void *conf_table;
574
575 conf_table = atomic_load_ptr(&conf_base);
576 if (conf_table == NULL) {
577 conf_table = contigmalloc(LPI_CONFTAB_SIZE,
578 M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR,
579 LPI_CONFTAB_ALIGN, 0);
580
581 if (atomic_cmpset_ptr((uintptr_t *)&conf_base,
582 (uintptr_t)NULL, (uintptr_t)conf_table) == 0) {
583 contigfree(conf_table, LPI_CONFTAB_SIZE, M_GICV3_ITS);
584 conf_table = atomic_load_ptr(&conf_base);
585 }
586 }
587 sc->sc_conf_base = conf_table;
588
589 /* Set the default configuration */
590 memset(sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
591 LPI_CONFTAB_SIZE);
592
593 /* Flush the table to memory */
594 cpu_dcache_wb_range((vm_offset_t)sc->sc_conf_base, LPI_CONFTAB_SIZE);
595 }
596
597 static void
gicv3_its_pendtables_init(struct gicv3_its_softc * sc)598 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
599 {
600 int i;
601
602 for (i = 0; i <= mp_maxid; i++) {
603 if (CPU_ISSET(i, &sc->sc_cpus) == 0)
604 continue;
605
606 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
607 LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
608 0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
609
610 /* Flush so the ITS can see the memory */
611 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base[i],
612 LPI_PENDTAB_SIZE);
613 }
614 }
615
616 static void
its_init_cpu_lpi(device_t dev,struct gicv3_its_softc * sc)617 its_init_cpu_lpi(device_t dev, struct gicv3_its_softc *sc)
618 {
619 device_t gicv3;
620 uint64_t xbaser, tmp;
621 uint32_t ctlr;
622 u_int cpuid;
623
624 gicv3 = device_get_parent(dev);
625 cpuid = PCPU_GET(cpuid);
626
627 /* Disable LPIs */
628 ctlr = gic_r_read_4(gicv3, GICR_CTLR);
629 ctlr &= ~GICR_CTLR_LPI_ENABLE;
630 gic_r_write_4(gicv3, GICR_CTLR, ctlr);
631
632 /* Make sure changes are observable my the GIC */
633 dsb(sy);
634
635 /*
636 * Set the redistributor base
637 */
638 xbaser = vtophys(sc->sc_conf_base) |
639 (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
640 (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
641 (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
642 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
643
644 /* Check the cache attributes we set */
645 tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
646
647 if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
648 (xbaser & GICR_PROPBASER_SHARE_MASK)) {
649 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
650 (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
651 /* We need to mark as non-cacheable */
652 xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
653 GICR_PROPBASER_CACHE_MASK);
654 /* Non-cacheable */
655 xbaser |= GICR_PROPBASER_CACHE_NIN <<
656 GICR_PROPBASER_CACHE_SHIFT;
657 /* Non-sareable */
658 xbaser |= GICR_PROPBASER_SHARE_NS <<
659 GICR_PROPBASER_SHARE_SHIFT;
660 gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
661 }
662 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
663 }
664
665 /*
666 * Set the LPI pending table base
667 */
668 xbaser = vtophys(sc->sc_pend_base[cpuid]) |
669 (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
670 (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
671
672 gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
673
674 tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
675
676 if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
677 (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
678 /* Clear the cahce and shareability bits */
679 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
680 GICR_PENDBASER_SHARE_MASK);
681 /* Mark as non-shareable */
682 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
683 /* And non-cacheable */
684 xbaser |= GICR_PENDBASER_CACHE_NIN <<
685 GICR_PENDBASER_CACHE_SHIFT;
686 }
687
688 /* Enable LPIs */
689 ctlr = gic_r_read_4(gicv3, GICR_CTLR);
690 ctlr |= GICR_CTLR_LPI_ENABLE;
691 gic_r_write_4(gicv3, GICR_CTLR, ctlr);
692
693 /* Make sure the GIC has seen everything */
694 dsb(sy);
695 }
696
697 static int
its_init_cpu(device_t dev,struct gicv3_its_softc * sc)698 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
699 {
700 device_t gicv3;
701 vm_paddr_t target;
702 u_int cpuid;
703 struct redist_pcpu *rpcpu;
704
705 gicv3 = device_get_parent(dev);
706 cpuid = PCPU_GET(cpuid);
707 if (!CPU_ISSET(cpuid, &sc->sc_cpus))
708 return (0);
709
710 /* Check if the ITS is enabled on this CPU */
711 if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0)
712 return (ENXIO);
713
714 rpcpu = gicv3_get_redist(dev);
715
716 /* Do per-cpu LPI init once */
717 if (!rpcpu->lpi_enabled) {
718 its_init_cpu_lpi(dev, sc);
719 rpcpu->lpi_enabled = true;
720 }
721
722 if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
723 /* This ITS wants the redistributor physical address */
724 target = vtophys(rman_get_virtual(&rpcpu->res));
725 } else {
726 /* This ITS wants the unique processor number */
727 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER)) <<
728 CMD_TARGET_SHIFT;
729 }
730
731 sc->sc_its_cols[cpuid]->col_target = target;
732 sc->sc_its_cols[cpuid]->col_id = cpuid;
733
734 its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
735 its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
736
737 return (0);
738 }
739
740 static int
gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)741 gicv3_its_sysctl_trace_enable(SYSCTL_HANDLER_ARGS)
742 {
743 struct gicv3_its_softc *sc;
744 int rv;
745
746 sc = arg1;
747
748 rv = sysctl_handle_bool(oidp, &sc->trace_enable, 0, req);
749 if (rv != 0 || req->newptr == NULL)
750 return (rv);
751 if (sc->trace_enable)
752 gic_its_write_8(sc, GITS_TRKCTLR, 3);
753 else
754 gic_its_write_8(sc, GITS_TRKCTLR, 0);
755
756 return (0);
757 }
758
759 static int
gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)760 gicv3_its_sysctl_trace_regs(SYSCTL_HANDLER_ARGS)
761 {
762 struct gicv3_its_softc *sc;
763 struct sbuf *sb;
764 int err;
765
766 sc = arg1;
767 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
768 if (sb == NULL) {
769 device_printf(sc->dev, "Could not allocate sbuf for output.\n");
770 return (ENOMEM);
771 }
772 sbuf_cat(sb, "\n");
773 sbuf_printf(sb, "GITS_TRKCTLR: 0x%08X\n",
774 gic_its_read_4(sc, GITS_TRKCTLR));
775 sbuf_printf(sb, "GITS_TRKR: 0x%08X\n",
776 gic_its_read_4(sc, GITS_TRKR));
777 sbuf_printf(sb, "GITS_TRKDIDR: 0x%08X\n",
778 gic_its_read_4(sc, GITS_TRKDIDR));
779 sbuf_printf(sb, "GITS_TRKPIDR: 0x%08X\n",
780 gic_its_read_4(sc, GITS_TRKPIDR));
781 sbuf_printf(sb, "GITS_TRKVIDR: 0x%08X\n",
782 gic_its_read_4(sc, GITS_TRKVIDR));
783 sbuf_printf(sb, "GITS_TRKTGTR: 0x%08X\n",
784 gic_its_read_4(sc, GITS_TRKTGTR));
785
786 err = sbuf_finish(sb);
787 if (err)
788 device_printf(sc->dev, "Error finishing sbuf: %d\n", err);
789 sbuf_delete(sb);
790 return(err);
791 }
792
793 static int
gicv3_its_init_sysctl(struct gicv3_its_softc * sc)794 gicv3_its_init_sysctl(struct gicv3_its_softc *sc)
795 {
796 struct sysctl_oid *oid, *child;
797 struct sysctl_ctx_list *ctx_list;
798
799 ctx_list = device_get_sysctl_ctx(sc->dev);
800 child = device_get_sysctl_tree(sc->dev);
801 oid = SYSCTL_ADD_NODE(ctx_list,
802 SYSCTL_CHILDREN(child), OID_AUTO, "tracing",
803 CTLFLAG_RD| CTLFLAG_MPSAFE, NULL, "Messages tracing");
804 if (oid == NULL)
805 return (ENXIO);
806
807 /* Add registers */
808 SYSCTL_ADD_PROC(ctx_list,
809 SYSCTL_CHILDREN(oid), OID_AUTO, "enable",
810 CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
811 gicv3_its_sysctl_trace_enable, "CU", "Enable tracing");
812 SYSCTL_ADD_PROC(ctx_list,
813 SYSCTL_CHILDREN(oid), OID_AUTO, "capture",
814 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
815 gicv3_its_sysctl_trace_regs, "", "Captured tracing registers.");
816
817 return (0);
818 }
819
820 static int
gicv3_its_attach(device_t dev)821 gicv3_its_attach(device_t dev)
822 {
823 struct gicv3_its_softc *sc;
824 int domain, err, i, rid;
825 uint64_t phys;
826 uint32_t iidr;
827
828 sc = device_get_softc(dev);
829
830 sc->sc_irq_length = gicv3_get_nirqs(dev);
831 sc->sc_irq_base = GIC_FIRST_LPI;
832 sc->sc_irq_base += device_get_unit(dev) * sc->sc_irq_length;
833
834 rid = 0;
835 sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
836 RF_ACTIVE);
837 if (sc->sc_its_res == NULL) {
838 device_printf(dev, "Could not allocate memory\n");
839 return (ENXIO);
840 }
841
842 phys = rounddown2(vtophys(rman_get_virtual(sc->sc_its_res)) +
843 GITS_TRANSLATER, PAGE_SIZE);
844 sc->ma = malloc(sizeof(struct vm_page), M_DEVBUF, M_WAITOK | M_ZERO);
845 vm_page_initfake(sc->ma, phys, VM_MEMATTR_DEFAULT);
846
847 iidr = gic_its_read_4(sc, GITS_IIDR);
848 for (i = 0; i < nitems(its_quirks); i++) {
849 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
850 if (bootverbose) {
851 device_printf(dev, "Applying %s\n",
852 its_quirks[i].desc);
853 }
854 its_quirks[i].func(dev);
855 break;
856 }
857 }
858
859 /* Allocate the private tables */
860 err = gicv3_its_table_init(dev, sc);
861 if (err != 0)
862 return (err);
863
864 /* Protects access to the device list */
865 mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
866
867 /* Protects access to the ITS command circular buffer. */
868 mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
869
870 CPU_ZERO(&sc->sc_cpus);
871 if (bus_get_domain(dev, &domain) == 0) {
872 if (domain < MAXMEMDOM)
873 CPU_COPY(&cpuset_domain[domain], &sc->sc_cpus);
874 } else {
875 CPU_COPY(&all_cpus, &sc->sc_cpus);
876 }
877
878 /* Allocate the command circular buffer */
879 gicv3_its_cmdq_init(sc);
880
881 /* Allocate the per-CPU collections */
882 for (int cpu = 0; cpu <= mp_maxid; cpu++)
883 if (CPU_ISSET(cpu, &sc->sc_cpus) != 0)
884 sc->sc_its_cols[cpu] = malloc(
885 sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
886 M_WAITOK | M_ZERO);
887 else
888 sc->sc_its_cols[cpu] = NULL;
889
890 /* Enable the ITS */
891 gic_its_write_4(sc, GITS_CTLR,
892 gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
893
894 /* Create the LPI configuration table */
895 gicv3_its_conftable_init(sc);
896
897 /* And the pending tebles */
898 gicv3_its_pendtables_init(sc);
899
900 /* Enable LPIs on this CPU */
901 its_init_cpu(dev, sc);
902
903 TAILQ_INIT(&sc->sc_its_dev_list);
904 TAILQ_INIT(&sc->sc_free_irqs);
905
906 /*
907 * Create the vmem object to allocate INTRNG IRQs from. We try to
908 * use all IRQs not already used by the GICv3.
909 * XXX: This assumes there are no other interrupt controllers in the
910 * system.
911 */
912 sc->sc_irq_alloc = vmem_create(device_get_nameunit(dev), 0,
913 gicv3_get_nirqs(dev), 1, 0, M_FIRSTFIT | M_WAITOK);
914
915 sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * sc->sc_irq_length,
916 M_GICV3_ITS, M_WAITOK | M_ZERO);
917
918 /* For GIC-500 install tracking sysctls. */
919 if ((iidr & (GITS_IIDR_PRODUCT_MASK | GITS_IIDR_IMPLEMENTOR_MASK)) ==
920 GITS_IIDR_RAW(GITS_IIDR_IMPL_ARM, GITS_IIDR_PROD_GIC500, 0, 0))
921 gicv3_its_init_sysctl(sc);
922
923 return (0);
924 }
925
926 static int
gicv3_its_detach(device_t dev)927 gicv3_its_detach(device_t dev)
928 {
929
930 return (ENXIO);
931 }
932
933 static void
its_quirk_cavium_22375(device_t dev)934 its_quirk_cavium_22375(device_t dev)
935 {
936 struct gicv3_its_softc *sc;
937
938 sc = device_get_softc(dev);
939 sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
940 }
941
942 static void
gicv3_its_disable_intr(device_t dev,struct intr_irqsrc * isrc)943 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
944 {
945 struct gicv3_its_softc *sc;
946 struct gicv3_its_irqsrc *girq;
947 uint8_t *conf;
948
949 sc = device_get_softc(dev);
950 girq = (struct gicv3_its_irqsrc *)isrc;
951 conf = sc->sc_conf_base;
952
953 conf[girq->gi_lpi] &= ~LPI_CONF_ENABLE;
954
955 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
956 /* Clean D-cache under command. */
957 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
958 } else {
959 /* DSB inner shareable, store */
960 dsb(ishst);
961 }
962
963 its_cmd_inv(dev, girq->gi_its_dev, girq);
964 }
965
966 static void
gicv3_its_enable_intr(device_t dev,struct intr_irqsrc * isrc)967 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
968 {
969 struct gicv3_its_softc *sc;
970 struct gicv3_its_irqsrc *girq;
971 uint8_t *conf;
972
973 sc = device_get_softc(dev);
974 girq = (struct gicv3_its_irqsrc *)isrc;
975 conf = sc->sc_conf_base;
976
977 conf[girq->gi_lpi] |= LPI_CONF_ENABLE;
978
979 if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
980 /* Clean D-cache under command. */
981 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_lpi], 1);
982 } else {
983 /* DSB inner shareable, store */
984 dsb(ishst);
985 }
986
987 its_cmd_inv(dev, girq->gi_its_dev, girq);
988 }
989
990 static int
gicv3_its_intr(void * arg,uintptr_t irq)991 gicv3_its_intr(void *arg, uintptr_t irq)
992 {
993 struct gicv3_its_softc *sc = arg;
994 struct gicv3_its_irqsrc *girq;
995 struct trapframe *tf;
996
997 irq -= sc->sc_irq_base;
998 girq = sc->sc_irqs[irq];
999 if (girq == NULL)
1000 panic("gicv3_its_intr: Invalid interrupt %ld",
1001 irq + sc->sc_irq_base);
1002
1003 tf = curthread->td_intr_frame;
1004 intr_isrc_dispatch(&girq->gi_isrc, tf);
1005 return (FILTER_HANDLED);
1006 }
1007
1008 static void
gicv3_its_pre_ithread(device_t dev,struct intr_irqsrc * isrc)1009 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
1010 {
1011 struct gicv3_its_irqsrc *girq;
1012 struct gicv3_its_softc *sc;
1013
1014 sc = device_get_softc(dev);
1015 girq = (struct gicv3_its_irqsrc *)isrc;
1016 gicv3_its_disable_intr(dev, isrc);
1017 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1018 }
1019
1020 static void
gicv3_its_post_ithread(device_t dev,struct intr_irqsrc * isrc)1021 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
1022 {
1023
1024 gicv3_its_enable_intr(dev, isrc);
1025 }
1026
1027 static void
gicv3_its_post_filter(device_t dev,struct intr_irqsrc * isrc)1028 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
1029 {
1030 struct gicv3_its_irqsrc *girq;
1031 struct gicv3_its_softc *sc;
1032
1033 sc = device_get_softc(dev);
1034 girq = (struct gicv3_its_irqsrc *)isrc;
1035 gic_icc_write(EOIR1, girq->gi_lpi + GIC_FIRST_LPI);
1036 }
1037
1038 static int
gicv3_its_select_cpu(device_t dev,struct intr_irqsrc * isrc)1039 gicv3_its_select_cpu(device_t dev, struct intr_irqsrc *isrc)
1040 {
1041 struct gicv3_its_softc *sc;
1042
1043 sc = device_get_softc(dev);
1044 if (CPU_EMPTY(&isrc->isrc_cpu)) {
1045 sc->gic_irq_cpu = intr_irq_next_cpu(sc->gic_irq_cpu,
1046 &sc->sc_cpus);
1047 CPU_SETOF(sc->gic_irq_cpu, &isrc->isrc_cpu);
1048 }
1049
1050 return (0);
1051 }
1052
1053 static int
gicv3_its_bind_intr(device_t dev,struct intr_irqsrc * isrc)1054 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
1055 {
1056 struct gicv3_its_irqsrc *girq;
1057
1058 gicv3_its_select_cpu(dev, isrc);
1059
1060 girq = (struct gicv3_its_irqsrc *)isrc;
1061 its_cmd_movi(dev, girq);
1062 return (0);
1063 }
1064
1065 static int
gicv3_its_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)1066 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
1067 struct intr_irqsrc **isrcp)
1068 {
1069
1070 /*
1071 * This should never happen, we only call this function to map
1072 * interrupts found before the controller driver is ready.
1073 */
1074 panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
1075 }
1076
1077 static int
gicv3_its_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)1078 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
1079 struct resource *res, struct intr_map_data *data)
1080 {
1081
1082 /* Bind the interrupt to a CPU */
1083 gicv3_its_bind_intr(dev, isrc);
1084
1085 return (0);
1086 }
1087
1088 #ifdef SMP
1089 static void
gicv3_its_init_secondary(device_t dev)1090 gicv3_its_init_secondary(device_t dev)
1091 {
1092 struct gicv3_its_softc *sc;
1093
1094 sc = device_get_softc(dev);
1095
1096 /*
1097 * This is fatal as otherwise we may bind interrupts to this CPU.
1098 * We need a way to tell the interrupt framework to only bind to a
1099 * subset of given CPUs when it performs the shuffle.
1100 */
1101 if (its_init_cpu(dev, sc) != 0)
1102 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
1103 PCPU_GET(cpuid));
1104 }
1105 #endif
1106
1107 static uint32_t
its_get_devid(device_t pci_dev)1108 its_get_devid(device_t pci_dev)
1109 {
1110 uintptr_t id;
1111
1112 if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
1113 panic("its_get_devid: Unable to get the MSI DeviceID");
1114
1115 return (id);
1116 }
1117
1118 static struct its_dev *
its_device_find(device_t dev,device_t child)1119 its_device_find(device_t dev, device_t child)
1120 {
1121 struct gicv3_its_softc *sc;
1122 struct its_dev *its_dev = NULL;
1123
1124 sc = device_get_softc(dev);
1125
1126 mtx_lock_spin(&sc->sc_its_dev_lock);
1127 TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
1128 if (its_dev->pci_dev == child)
1129 break;
1130 }
1131 mtx_unlock_spin(&sc->sc_its_dev_lock);
1132
1133 return (its_dev);
1134 }
1135
1136 static struct its_dev *
its_device_get(device_t dev,device_t child,u_int nvecs)1137 its_device_get(device_t dev, device_t child, u_int nvecs)
1138 {
1139 struct gicv3_its_softc *sc;
1140 struct its_dev *its_dev;
1141 vmem_addr_t irq_base;
1142 size_t esize;
1143
1144 sc = device_get_softc(dev);
1145
1146 its_dev = its_device_find(dev, child);
1147 if (its_dev != NULL)
1148 return (its_dev);
1149
1150 its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
1151 if (its_dev == NULL)
1152 return (NULL);
1153
1154 its_dev->pci_dev = child;
1155 its_dev->devid = its_get_devid(child);
1156
1157 its_dev->lpis.lpi_busy = 0;
1158 its_dev->lpis.lpi_num = nvecs;
1159 its_dev->lpis.lpi_free = nvecs;
1160
1161 if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
1162 &irq_base) != 0) {
1163 free(its_dev, M_GICV3_ITS);
1164 return (NULL);
1165 }
1166 its_dev->lpis.lpi_base = irq_base;
1167
1168 /* Get ITT entry size */
1169 esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
1170
1171 /*
1172 * Allocate ITT for this device.
1173 * PA has to be 256 B aligned. At least two entries for device.
1174 */
1175 its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
1176 its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
1177 M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
1178 LPI_INT_TRANS_TAB_ALIGN, 0);
1179 if (its_dev->itt == 0) {
1180 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
1181 free(its_dev, M_GICV3_ITS);
1182 return (NULL);
1183 }
1184
1185 mtx_lock_spin(&sc->sc_its_dev_lock);
1186 TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
1187 mtx_unlock_spin(&sc->sc_its_dev_lock);
1188
1189 /* Map device to its ITT */
1190 its_cmd_mapd(dev, its_dev, 1);
1191
1192 return (its_dev);
1193 }
1194
1195 static void
its_device_release(device_t dev,struct its_dev * its_dev)1196 its_device_release(device_t dev, struct its_dev *its_dev)
1197 {
1198 struct gicv3_its_softc *sc;
1199
1200 KASSERT(its_dev->lpis.lpi_busy == 0,
1201 ("its_device_release: Trying to release an inuse ITS device"));
1202
1203 /* Unmap device in ITS */
1204 its_cmd_mapd(dev, its_dev, 0);
1205
1206 sc = device_get_softc(dev);
1207
1208 /* Remove the device from the list of devices */
1209 mtx_lock_spin(&sc->sc_its_dev_lock);
1210 TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
1211 mtx_unlock_spin(&sc->sc_its_dev_lock);
1212
1213 /* Free ITT */
1214 KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
1215 contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
1216
1217 /* Free the IRQ allocation */
1218 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
1219 its_dev->lpis.lpi_num);
1220
1221 free(its_dev, M_GICV3_ITS);
1222 }
1223
1224 static struct gicv3_its_irqsrc *
gicv3_its_alloc_irqsrc(device_t dev,struct gicv3_its_softc * sc,u_int irq)1225 gicv3_its_alloc_irqsrc(device_t dev, struct gicv3_its_softc *sc, u_int irq)
1226 {
1227 struct gicv3_its_irqsrc *girq = NULL;
1228
1229 KASSERT(sc->sc_irqs[irq] == NULL,
1230 ("%s: Interrupt %u already allocated", __func__, irq));
1231 mtx_lock_spin(&sc->sc_its_dev_lock);
1232 if (!TAILQ_EMPTY(&sc->sc_free_irqs)) {
1233 girq = TAILQ_FIRST(&sc->sc_free_irqs);
1234 TAILQ_REMOVE(&sc->sc_free_irqs, girq, gi_link);
1235 }
1236 mtx_unlock_spin(&sc->sc_its_dev_lock);
1237 if (girq == NULL) {
1238 girq = malloc(sizeof(*girq), M_GICV3_ITS,
1239 M_NOWAIT | M_ZERO);
1240 if (girq == NULL)
1241 return (NULL);
1242 girq->gi_id = -1;
1243 if (intr_isrc_register(&girq->gi_isrc, dev, 0,
1244 "%s,%u", device_get_nameunit(dev), irq) != 0) {
1245 free(girq, M_GICV3_ITS);
1246 return (NULL);
1247 }
1248 }
1249 girq->gi_lpi = irq + sc->sc_irq_base - GIC_FIRST_LPI;
1250 sc->sc_irqs[irq] = girq;
1251
1252 return (girq);
1253 }
1254
1255 static void
gicv3_its_release_irqsrc(struct gicv3_its_softc * sc,struct gicv3_its_irqsrc * girq)1256 gicv3_its_release_irqsrc(struct gicv3_its_softc *sc,
1257 struct gicv3_its_irqsrc *girq)
1258 {
1259 u_int irq;
1260
1261 mtx_assert(&sc->sc_its_dev_lock, MA_OWNED);
1262
1263 irq = girq->gi_lpi + GIC_FIRST_LPI - sc->sc_irq_base;
1264 sc->sc_irqs[irq] = NULL;
1265
1266 girq->gi_id = -1;
1267 girq->gi_its_dev = NULL;
1268 TAILQ_INSERT_TAIL(&sc->sc_free_irqs, girq, gi_link);
1269 }
1270
1271 static int
gicv3_its_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)1272 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1273 device_t *pic, struct intr_irqsrc **srcs)
1274 {
1275 struct gicv3_its_softc *sc;
1276 struct gicv3_its_irqsrc *girq;
1277 struct its_dev *its_dev;
1278 u_int irq;
1279 int i;
1280
1281 its_dev = its_device_get(dev, child, count);
1282 if (its_dev == NULL)
1283 return (ENXIO);
1284
1285 KASSERT(its_dev->lpis.lpi_free >= count,
1286 ("gicv3_its_alloc_msi: No free LPIs"));
1287 sc = device_get_softc(dev);
1288 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1289 its_dev->lpis.lpi_free;
1290
1291 /* Allocate the irqsrc for each MSI */
1292 for (i = 0; i < count; i++, irq++) {
1293 its_dev->lpis.lpi_free--;
1294 srcs[i] = (struct intr_irqsrc *)gicv3_its_alloc_irqsrc(dev,
1295 sc, irq);
1296 if (srcs[i] == NULL)
1297 break;
1298 }
1299
1300 /* The allocation failed, release them */
1301 if (i != count) {
1302 mtx_lock_spin(&sc->sc_its_dev_lock);
1303 for (i = 0; i < count; i++) {
1304 girq = (struct gicv3_its_irqsrc *)srcs[i];
1305 if (girq == NULL)
1306 break;
1307 gicv3_its_release_irqsrc(sc, girq);
1308 srcs[i] = NULL;
1309 }
1310 mtx_unlock_spin(&sc->sc_its_dev_lock);
1311 return (ENXIO);
1312 }
1313
1314 /* Finish the allocation now we have all MSI irqsrcs */
1315 for (i = 0; i < count; i++) {
1316 girq = (struct gicv3_its_irqsrc *)srcs[i];
1317 girq->gi_id = i;
1318 girq->gi_its_dev = its_dev;
1319
1320 /* Map the message to the given IRQ */
1321 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1322 its_cmd_mapti(dev, girq);
1323 }
1324 its_dev->lpis.lpi_busy += count;
1325 *pic = dev;
1326
1327 return (0);
1328 }
1329
1330 static int
gicv3_its_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1331 gicv3_its_release_msi(device_t dev, device_t child, int count,
1332 struct intr_irqsrc **isrc)
1333 {
1334 struct gicv3_its_softc *sc;
1335 struct gicv3_its_irqsrc *girq;
1336 struct its_dev *its_dev;
1337 int i;
1338
1339 its_dev = its_device_find(dev, child);
1340
1341 KASSERT(its_dev != NULL,
1342 ("gicv3_its_release_msi: Releasing a MSI interrupt with "
1343 "no ITS device"));
1344 KASSERT(its_dev->lpis.lpi_busy >= count,
1345 ("gicv3_its_release_msi: Releasing more interrupts than "
1346 "were allocated: releasing %d, allocated %d", count,
1347 its_dev->lpis.lpi_busy));
1348
1349 sc = device_get_softc(dev);
1350 mtx_lock_spin(&sc->sc_its_dev_lock);
1351 for (i = 0; i < count; i++) {
1352 girq = (struct gicv3_its_irqsrc *)isrc[i];
1353 gicv3_its_release_irqsrc(sc, girq);
1354 }
1355 mtx_unlock_spin(&sc->sc_its_dev_lock);
1356 its_dev->lpis.lpi_busy -= count;
1357
1358 if (its_dev->lpis.lpi_busy == 0)
1359 its_device_release(dev, its_dev);
1360
1361 return (0);
1362 }
1363
1364 static int
gicv3_its_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrcp)1365 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
1366 struct intr_irqsrc **isrcp)
1367 {
1368 struct gicv3_its_softc *sc;
1369 struct gicv3_its_irqsrc *girq;
1370 struct its_dev *its_dev;
1371 u_int nvecs, irq;
1372
1373 nvecs = pci_msix_count(child);
1374 its_dev = its_device_get(dev, child, nvecs);
1375 if (its_dev == NULL)
1376 return (ENXIO);
1377
1378 KASSERT(its_dev->lpis.lpi_free > 0,
1379 ("gicv3_its_alloc_msix: No free LPIs"));
1380 sc = device_get_softc(dev);
1381 irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
1382 its_dev->lpis.lpi_free;
1383
1384 girq = gicv3_its_alloc_irqsrc(dev, sc, irq);
1385 if (girq == NULL)
1386 return (ENXIO);
1387 girq->gi_id = its_dev->lpis.lpi_busy;
1388 girq->gi_its_dev = its_dev;
1389
1390 its_dev->lpis.lpi_free--;
1391 its_dev->lpis.lpi_busy++;
1392
1393 /* Map the message to the given IRQ */
1394 gicv3_its_select_cpu(dev, (struct intr_irqsrc *)girq);
1395 its_cmd_mapti(dev, girq);
1396
1397 *pic = dev;
1398 *isrcp = (struct intr_irqsrc *)girq;
1399
1400 return (0);
1401 }
1402
1403 static int
gicv3_its_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1404 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1405 {
1406 struct gicv3_its_softc *sc;
1407 struct gicv3_its_irqsrc *girq;
1408 struct its_dev *its_dev;
1409
1410 its_dev = its_device_find(dev, child);
1411
1412 KASSERT(its_dev != NULL,
1413 ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
1414 "no ITS device"));
1415 KASSERT(its_dev->lpis.lpi_busy > 0,
1416 ("gicv3_its_release_msix: Releasing more interrupts than "
1417 "were allocated: allocated %d", its_dev->lpis.lpi_busy));
1418
1419 sc = device_get_softc(dev);
1420 girq = (struct gicv3_its_irqsrc *)isrc;
1421 mtx_lock_spin(&sc->sc_its_dev_lock);
1422 gicv3_its_release_irqsrc(sc, girq);
1423 mtx_unlock_spin(&sc->sc_its_dev_lock);
1424 its_dev->lpis.lpi_busy--;
1425
1426 if (its_dev->lpis.lpi_busy == 0)
1427 its_device_release(dev, its_dev);
1428
1429 return (0);
1430 }
1431
1432 static int
gicv3_its_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1433 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1434 uint64_t *addr, uint32_t *data)
1435 {
1436 struct gicv3_its_softc *sc;
1437 struct gicv3_its_irqsrc *girq;
1438
1439 sc = device_get_softc(dev);
1440 girq = (struct gicv3_its_irqsrc *)isrc;
1441
1442 *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
1443 *data = girq->gi_id;
1444
1445 return (0);
1446 }
1447
1448 #ifdef IOMMU
1449 static int
gicv3_iommu_init(device_t dev,device_t child,struct iommu_domain ** domain)1450 gicv3_iommu_init(device_t dev, device_t child, struct iommu_domain **domain)
1451 {
1452 struct gicv3_its_softc *sc;
1453 struct iommu_ctx *ctx;
1454 int error;
1455
1456 sc = device_get_softc(dev);
1457 ctx = iommu_get_dev_ctx(child);
1458 error = iommu_map_msi(ctx, PAGE_SIZE, GITS_TRANSLATER,
1459 IOMMU_MAP_ENTRY_WRITE, IOMMU_MF_CANWAIT, &sc->ma);
1460 *domain = iommu_get_ctx_domain(ctx);
1461
1462 return (error);
1463 }
1464
1465 static void
gicv3_iommu_deinit(device_t dev,device_t child)1466 gicv3_iommu_deinit(device_t dev, device_t child)
1467 {
1468 struct iommu_ctx *ctx;
1469
1470 ctx = iommu_get_dev_ctx(child);
1471 iommu_unmap_msi(ctx);
1472 }
1473 #endif
1474
1475 /*
1476 * Commands handling.
1477 */
1478
1479 static __inline void
cmd_format_command(struct its_cmd * cmd,uint8_t cmd_type)1480 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
1481 {
1482 /* Command field: DW0 [7:0] */
1483 cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
1484 cmd->cmd_dword[0] |= htole64(cmd_type);
1485 }
1486
1487 static __inline void
cmd_format_devid(struct its_cmd * cmd,uint32_t devid)1488 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
1489 {
1490 /* Device ID field: DW0 [63:32] */
1491 cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
1492 cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
1493 }
1494
1495 static __inline void
cmd_format_size(struct its_cmd * cmd,uint16_t size)1496 cmd_format_size(struct its_cmd *cmd, uint16_t size)
1497 {
1498 /* Size field: DW1 [4:0] */
1499 cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
1500 cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
1501 }
1502
1503 static __inline void
cmd_format_id(struct its_cmd * cmd,uint32_t id)1504 cmd_format_id(struct its_cmd *cmd, uint32_t id)
1505 {
1506 /* ID field: DW1 [31:0] */
1507 cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
1508 cmd->cmd_dword[1] |= htole64(id);
1509 }
1510
1511 static __inline void
cmd_format_pid(struct its_cmd * cmd,uint32_t pid)1512 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
1513 {
1514 /* Physical ID field: DW1 [63:32] */
1515 cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
1516 cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
1517 }
1518
1519 static __inline void
cmd_format_col(struct its_cmd * cmd,uint16_t col_id)1520 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
1521 {
1522 /* Collection field: DW2 [16:0] */
1523 cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
1524 cmd->cmd_dword[2] |= htole64(col_id);
1525 }
1526
1527 static __inline void
cmd_format_target(struct its_cmd * cmd,uint64_t target)1528 cmd_format_target(struct its_cmd *cmd, uint64_t target)
1529 {
1530 /* Target Address field: DW2 [47:16] */
1531 cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
1532 cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
1533 }
1534
1535 static __inline void
cmd_format_itt(struct its_cmd * cmd,uint64_t itt)1536 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
1537 {
1538 /* ITT Address field: DW2 [47:8] */
1539 cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
1540 cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
1541 }
1542
1543 static __inline void
cmd_format_valid(struct its_cmd * cmd,uint8_t valid)1544 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
1545 {
1546 /* Valid field: DW2 [63] */
1547 cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
1548 cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
1549 }
1550
1551 static inline bool
its_cmd_queue_full(struct gicv3_its_softc * sc)1552 its_cmd_queue_full(struct gicv3_its_softc *sc)
1553 {
1554 size_t read_idx, next_write_idx;
1555
1556 /* Get the index of the next command */
1557 next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
1558 (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
1559 /* And the index of the current command being read */
1560 read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
1561
1562 /*
1563 * The queue is full when the write offset points
1564 * at the command before the current read offset.
1565 */
1566 return (next_write_idx == read_idx);
1567 }
1568
1569 static inline void
its_cmd_sync(struct gicv3_its_softc * sc,struct its_cmd * cmd)1570 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1571 {
1572
1573 if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
1574 /* Clean D-cache under command. */
1575 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
1576 } else {
1577 /* DSB inner shareable, store */
1578 dsb(ishst);
1579 }
1580
1581 }
1582
1583 static inline uint64_t
its_cmd_cwriter_offset(struct gicv3_its_softc * sc,struct its_cmd * cmd)1584 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
1585 {
1586 uint64_t off;
1587
1588 off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
1589
1590 return (off);
1591 }
1592
1593 static void
its_cmd_wait_completion(device_t dev,struct its_cmd * cmd_first,struct its_cmd * cmd_last)1594 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
1595 struct its_cmd *cmd_last)
1596 {
1597 struct gicv3_its_softc *sc;
1598 uint64_t first, last, read;
1599 size_t us_left;
1600
1601 sc = device_get_softc(dev);
1602
1603 /*
1604 * XXX ARM64TODO: This is obviously a significant delay.
1605 * The reason for that is that currently the time frames for
1606 * the command to complete are not known.
1607 */
1608 us_left = 1000000;
1609
1610 first = its_cmd_cwriter_offset(sc, cmd_first);
1611 last = its_cmd_cwriter_offset(sc, cmd_last);
1612
1613 for (;;) {
1614 read = gic_its_read_8(sc, GITS_CREADR);
1615 if (first < last) {
1616 if (read < first || read >= last)
1617 break;
1618 } else if (read < first && read >= last)
1619 break;
1620
1621 if (us_left-- == 0) {
1622 /* This means timeout */
1623 device_printf(dev,
1624 "Timeout while waiting for CMD completion.\n");
1625 return;
1626 }
1627 DELAY(1);
1628 }
1629 }
1630
1631 static struct its_cmd *
its_cmd_alloc_locked(device_t dev)1632 its_cmd_alloc_locked(device_t dev)
1633 {
1634 struct gicv3_its_softc *sc;
1635 struct its_cmd *cmd;
1636 size_t us_left;
1637
1638 sc = device_get_softc(dev);
1639
1640 /*
1641 * XXX ARM64TODO: This is obviously a significant delay.
1642 * The reason for that is that currently the time frames for
1643 * the command to complete (and therefore free the descriptor)
1644 * are not known.
1645 */
1646 us_left = 1000000;
1647
1648 mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
1649 while (its_cmd_queue_full(sc)) {
1650 if (us_left-- == 0) {
1651 /* Timeout while waiting for free command */
1652 device_printf(dev,
1653 "Timeout while waiting for free command\n");
1654 return (NULL);
1655 }
1656 DELAY(1);
1657 }
1658
1659 cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1660 sc->sc_its_cmd_next_idx++;
1661 sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
1662
1663 return (cmd);
1664 }
1665
1666 static uint64_t
its_cmd_prepare(struct its_cmd * cmd,struct its_cmd_desc * desc)1667 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
1668 {
1669 uint64_t target;
1670 uint8_t cmd_type;
1671 u_int size;
1672
1673 cmd_type = desc->cmd_type;
1674 target = ITS_TARGET_NONE;
1675
1676 switch (cmd_type) {
1677 case ITS_CMD_MOVI: /* Move interrupt ID to another collection */
1678 target = desc->cmd_desc_movi.col->col_target;
1679 cmd_format_command(cmd, ITS_CMD_MOVI);
1680 cmd_format_id(cmd, desc->cmd_desc_movi.id);
1681 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
1682 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
1683 break;
1684 case ITS_CMD_SYNC: /* Wait for previous commands completion */
1685 target = desc->cmd_desc_sync.col->col_target;
1686 cmd_format_command(cmd, ITS_CMD_SYNC);
1687 cmd_format_target(cmd, target);
1688 break;
1689 case ITS_CMD_MAPD: /* Assign ITT to device */
1690 cmd_format_command(cmd, ITS_CMD_MAPD);
1691 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
1692 /*
1693 * Size describes number of bits to encode interrupt IDs
1694 * supported by the device minus one.
1695 * When V (valid) bit is zero, this field should be written
1696 * as zero.
1697 */
1698 if (desc->cmd_desc_mapd.valid != 0) {
1699 size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
1700 size = MAX(1, size) - 1;
1701 } else
1702 size = 0;
1703
1704 cmd_format_size(cmd, size);
1705 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
1706 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
1707 break;
1708 case ITS_CMD_MAPC: /* Map collection to Re-Distributor */
1709 target = desc->cmd_desc_mapc.col->col_target;
1710 cmd_format_command(cmd, ITS_CMD_MAPC);
1711 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
1712 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
1713 cmd_format_target(cmd, target);
1714 break;
1715 case ITS_CMD_MAPTI:
1716 target = desc->cmd_desc_mapvi.col->col_target;
1717 cmd_format_command(cmd, ITS_CMD_MAPTI);
1718 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
1719 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
1720 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
1721 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
1722 break;
1723 case ITS_CMD_MAPI:
1724 target = desc->cmd_desc_mapi.col->col_target;
1725 cmd_format_command(cmd, ITS_CMD_MAPI);
1726 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
1727 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
1728 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
1729 break;
1730 case ITS_CMD_INV:
1731 target = desc->cmd_desc_inv.col->col_target;
1732 cmd_format_command(cmd, ITS_CMD_INV);
1733 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
1734 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
1735 break;
1736 case ITS_CMD_INVALL:
1737 cmd_format_command(cmd, ITS_CMD_INVALL);
1738 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
1739 break;
1740 default:
1741 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
1742 }
1743
1744 return (target);
1745 }
1746
1747 static int
its_cmd_send(device_t dev,struct its_cmd_desc * desc)1748 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
1749 {
1750 struct gicv3_its_softc *sc;
1751 struct its_cmd *cmd, *cmd_sync, *cmd_write;
1752 struct its_col col_sync;
1753 struct its_cmd_desc desc_sync;
1754 uint64_t target, cwriter;
1755
1756 sc = device_get_softc(dev);
1757 mtx_lock_spin(&sc->sc_its_cmd_lock);
1758 cmd = its_cmd_alloc_locked(dev);
1759 if (cmd == NULL) {
1760 device_printf(dev, "could not allocate ITS command\n");
1761 mtx_unlock_spin(&sc->sc_its_cmd_lock);
1762 return (EBUSY);
1763 }
1764
1765 target = its_cmd_prepare(cmd, desc);
1766 its_cmd_sync(sc, cmd);
1767
1768 if (target != ITS_TARGET_NONE) {
1769 cmd_sync = its_cmd_alloc_locked(dev);
1770 if (cmd_sync != NULL) {
1771 desc_sync.cmd_type = ITS_CMD_SYNC;
1772 col_sync.col_target = target;
1773 desc_sync.cmd_desc_sync.col = &col_sync;
1774 its_cmd_prepare(cmd_sync, &desc_sync);
1775 its_cmd_sync(sc, cmd_sync);
1776 }
1777 }
1778
1779 /* Update GITS_CWRITER */
1780 cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
1781 gic_its_write_8(sc, GITS_CWRITER, cwriter);
1782 cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
1783 mtx_unlock_spin(&sc->sc_its_cmd_lock);
1784
1785 its_cmd_wait_completion(dev, cmd, cmd_write);
1786
1787 return (0);
1788 }
1789
1790 /* Handlers to send commands */
1791 static void
its_cmd_movi(device_t dev,struct gicv3_its_irqsrc * girq)1792 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
1793 {
1794 struct gicv3_its_softc *sc;
1795 struct its_cmd_desc desc;
1796 struct its_col *col;
1797
1798 sc = device_get_softc(dev);
1799 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1800
1801 desc.cmd_type = ITS_CMD_MOVI;
1802 desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
1803 desc.cmd_desc_movi.col = col;
1804 desc.cmd_desc_movi.id = girq->gi_id;
1805
1806 its_cmd_send(dev, &desc);
1807 }
1808
1809 static void
its_cmd_mapc(device_t dev,struct its_col * col,uint8_t valid)1810 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
1811 {
1812 struct its_cmd_desc desc;
1813
1814 desc.cmd_type = ITS_CMD_MAPC;
1815 desc.cmd_desc_mapc.col = col;
1816 /*
1817 * Valid bit set - map the collection.
1818 * Valid bit cleared - unmap the collection.
1819 */
1820 desc.cmd_desc_mapc.valid = valid;
1821
1822 its_cmd_send(dev, &desc);
1823 }
1824
1825 static void
its_cmd_mapti(device_t dev,struct gicv3_its_irqsrc * girq)1826 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
1827 {
1828 struct gicv3_its_softc *sc;
1829 struct its_cmd_desc desc;
1830 struct its_col *col;
1831 u_int col_id;
1832
1833 sc = device_get_softc(dev);
1834
1835 col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
1836 col = sc->sc_its_cols[col_id];
1837
1838 desc.cmd_type = ITS_CMD_MAPTI;
1839 desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
1840 desc.cmd_desc_mapvi.col = col;
1841 /* The EventID sent to the device */
1842 desc.cmd_desc_mapvi.id = girq->gi_id;
1843 /* The physical interrupt presented to softeware */
1844 desc.cmd_desc_mapvi.pid = girq->gi_lpi + GIC_FIRST_LPI;
1845
1846 its_cmd_send(dev, &desc);
1847 }
1848
1849 static void
its_cmd_mapd(device_t dev,struct its_dev * its_dev,uint8_t valid)1850 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
1851 {
1852 struct its_cmd_desc desc;
1853
1854 desc.cmd_type = ITS_CMD_MAPD;
1855 desc.cmd_desc_mapd.its_dev = its_dev;
1856 desc.cmd_desc_mapd.valid = valid;
1857
1858 its_cmd_send(dev, &desc);
1859 }
1860
1861 static void
its_cmd_inv(device_t dev,struct its_dev * its_dev,struct gicv3_its_irqsrc * girq)1862 its_cmd_inv(device_t dev, struct its_dev *its_dev,
1863 struct gicv3_its_irqsrc *girq)
1864 {
1865 struct gicv3_its_softc *sc;
1866 struct its_cmd_desc desc;
1867 struct its_col *col;
1868
1869 sc = device_get_softc(dev);
1870 col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
1871
1872 desc.cmd_type = ITS_CMD_INV;
1873 /* The EventID sent to the device */
1874 desc.cmd_desc_inv.pid = girq->gi_id;
1875 desc.cmd_desc_inv.its_dev = its_dev;
1876 desc.cmd_desc_inv.col = col;
1877
1878 its_cmd_send(dev, &desc);
1879 }
1880
1881 static void
its_cmd_invall(device_t dev,struct its_col * col)1882 its_cmd_invall(device_t dev, struct its_col *col)
1883 {
1884 struct its_cmd_desc desc;
1885
1886 desc.cmd_type = ITS_CMD_INVALL;
1887 desc.cmd_desc_invall.col = col;
1888
1889 its_cmd_send(dev, &desc);
1890 }
1891
1892 #ifdef FDT
1893 static device_probe_t gicv3_its_fdt_probe;
1894 static device_attach_t gicv3_its_fdt_attach;
1895
1896 static device_method_t gicv3_its_fdt_methods[] = {
1897 /* Device interface */
1898 DEVMETHOD(device_probe, gicv3_its_fdt_probe),
1899 DEVMETHOD(device_attach, gicv3_its_fdt_attach),
1900
1901 /* End */
1902 DEVMETHOD_END
1903 };
1904
1905 #define its_baseclasses its_fdt_baseclasses
1906 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
1907 sizeof(struct gicv3_its_softc), gicv3_its_driver);
1908 #undef its_baseclasses
1909 static devclass_t gicv3_its_fdt_devclass;
1910
1911 EARLY_DRIVER_MODULE(its_fdt, gic, gicv3_its_fdt_driver,
1912 gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1913
1914 static int
gicv3_its_fdt_probe(device_t dev)1915 gicv3_its_fdt_probe(device_t dev)
1916 {
1917
1918 if (!ofw_bus_status_okay(dev))
1919 return (ENXIO);
1920
1921 if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
1922 return (ENXIO);
1923
1924 device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1925 return (BUS_PROBE_DEFAULT);
1926 }
1927
1928 static int
gicv3_its_fdt_attach(device_t dev)1929 gicv3_its_fdt_attach(device_t dev)
1930 {
1931 struct gicv3_its_softc *sc;
1932 phandle_t xref;
1933 int err;
1934
1935 sc = device_get_softc(dev);
1936 sc->dev = dev;
1937 err = gicv3_its_attach(dev);
1938 if (err != 0)
1939 return (err);
1940
1941 /* Register this device as a interrupt controller */
1942 xref = OF_xref_from_node(ofw_bus_get_node(dev));
1943 sc->sc_pic = intr_pic_register(dev, xref);
1944 intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
1945 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
1946
1947 /* Register this device to handle MSI interrupts */
1948 intr_msi_register(dev, xref);
1949
1950 return (0);
1951 }
1952 #endif
1953
1954 #ifdef DEV_ACPI
1955 static device_probe_t gicv3_its_acpi_probe;
1956 static device_attach_t gicv3_its_acpi_attach;
1957
1958 static device_method_t gicv3_its_acpi_methods[] = {
1959 /* Device interface */
1960 DEVMETHOD(device_probe, gicv3_its_acpi_probe),
1961 DEVMETHOD(device_attach, gicv3_its_acpi_attach),
1962
1963 /* End */
1964 DEVMETHOD_END
1965 };
1966
1967 #define its_baseclasses its_acpi_baseclasses
1968 DEFINE_CLASS_1(its, gicv3_its_acpi_driver, gicv3_its_acpi_methods,
1969 sizeof(struct gicv3_its_softc), gicv3_its_driver);
1970 #undef its_baseclasses
1971 static devclass_t gicv3_its_acpi_devclass;
1972
1973 EARLY_DRIVER_MODULE(its_acpi, gic, gicv3_its_acpi_driver,
1974 gicv3_its_acpi_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
1975
1976 static int
gicv3_its_acpi_probe(device_t dev)1977 gicv3_its_acpi_probe(device_t dev)
1978 {
1979
1980 if (gic_get_bus(dev) != GIC_BUS_ACPI)
1981 return (EINVAL);
1982
1983 if (gic_get_hw_rev(dev) < 3)
1984 return (EINVAL);
1985
1986 device_set_desc(dev, "ARM GIC Interrupt Translation Service");
1987 return (BUS_PROBE_DEFAULT);
1988 }
1989
1990 static int
gicv3_its_acpi_attach(device_t dev)1991 gicv3_its_acpi_attach(device_t dev)
1992 {
1993 struct gicv3_its_softc *sc;
1994 struct gic_v3_devinfo *di;
1995 int err;
1996
1997 sc = device_get_softc(dev);
1998 sc->dev = dev;
1999 err = gicv3_its_attach(dev);
2000 if (err != 0)
2001 return (err);
2002
2003 di = device_get_ivars(dev);
2004 sc->sc_pic = intr_pic_register(dev, di->msi_xref);
2005 intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
2006 gicv3_its_intr, sc, sc->sc_irq_base, sc->sc_irq_length);
2007
2008 /* Register this device to handle MSI interrupts */
2009 intr_msi_register(dev, di->msi_xref);
2010
2011 return (0);
2012 }
2013 #endif
2014