xref: /f-stack/freebsd/arm64/arm64/gic_v3.c (revision 22ce4aff)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * the sponsorship of the FreeBSD Foundation.
7  *
8  * This software was developed by Semihalf under
9  * the sponsorship of the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "opt_acpi.h"
34 #include "opt_platform.h"
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bitstring.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/rman.h>
48 #include <sys/pcpu.h>
49 #include <sys/proc.h>
50 #include <sys/cpuset.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/smp.h>
54 #include <sys/interrupt.h>
55 
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 
59 #include <machine/bus.h>
60 #include <machine/cpu.h>
61 #include <machine/intr.h>
62 
63 #ifdef FDT
64 #include <dev/fdt/fdt_intr.h>
65 #include <dev/ofw/ofw_bus_subr.h>
66 #endif
67 
68 #ifdef DEV_ACPI
69 #include <contrib/dev/acpica/include/acpi.h>
70 #include <dev/acpica/acpivar.h>
71 #endif
72 
73 #include "pic_if.h"
74 #include "msi_if.h"
75 
76 #include <arm/arm/gic_common.h>
77 #include "gic_v3_reg.h"
78 #include "gic_v3_var.h"
79 
80 static bus_get_domain_t gic_v3_get_domain;
81 static bus_read_ivar_t gic_v3_read_ivar;
82 
83 static pic_disable_intr_t gic_v3_disable_intr;
84 static pic_enable_intr_t gic_v3_enable_intr;
85 static pic_map_intr_t gic_v3_map_intr;
86 static pic_setup_intr_t gic_v3_setup_intr;
87 static pic_teardown_intr_t gic_v3_teardown_intr;
88 static pic_post_filter_t gic_v3_post_filter;
89 static pic_post_ithread_t gic_v3_post_ithread;
90 static pic_pre_ithread_t gic_v3_pre_ithread;
91 static pic_bind_intr_t gic_v3_bind_intr;
92 #ifdef SMP
93 static pic_init_secondary_t gic_v3_init_secondary;
94 static pic_ipi_send_t gic_v3_ipi_send;
95 static pic_ipi_setup_t gic_v3_ipi_setup;
96 #endif
97 
98 static msi_alloc_msi_t gic_v3_alloc_msi;
99 static msi_release_msi_t gic_v3_release_msi;
100 static msi_alloc_msix_t gic_v3_alloc_msix;
101 static msi_release_msix_t gic_v3_release_msix;
102 static msi_map_msi_t gic_v3_map_msi;
103 
104 static u_int gic_irq_cpu;
105 #ifdef SMP
106 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
107 static u_int sgi_first_unused = GIC_FIRST_SGI;
108 #endif
109 
110 static device_method_t gic_v3_methods[] = {
111 	/* Device interface */
112 	DEVMETHOD(device_detach,	gic_v3_detach),
113 
114 	/* Bus interface */
115 	DEVMETHOD(bus_get_domain,	gic_v3_get_domain),
116 	DEVMETHOD(bus_read_ivar,	gic_v3_read_ivar),
117 
118 	/* Interrupt controller interface */
119 	DEVMETHOD(pic_disable_intr,	gic_v3_disable_intr),
120 	DEVMETHOD(pic_enable_intr,	gic_v3_enable_intr),
121 	DEVMETHOD(pic_map_intr,		gic_v3_map_intr),
122 	DEVMETHOD(pic_setup_intr,	gic_v3_setup_intr),
123 	DEVMETHOD(pic_teardown_intr,	gic_v3_teardown_intr),
124 	DEVMETHOD(pic_post_filter,	gic_v3_post_filter),
125 	DEVMETHOD(pic_post_ithread,	gic_v3_post_ithread),
126 	DEVMETHOD(pic_pre_ithread,	gic_v3_pre_ithread),
127 #ifdef SMP
128 	DEVMETHOD(pic_bind_intr,	gic_v3_bind_intr),
129 	DEVMETHOD(pic_init_secondary,	gic_v3_init_secondary),
130 	DEVMETHOD(pic_ipi_send,		gic_v3_ipi_send),
131 	DEVMETHOD(pic_ipi_setup,	gic_v3_ipi_setup),
132 #endif
133 
134 	/* MSI/MSI-X */
135 	DEVMETHOD(msi_alloc_msi,        gic_v3_alloc_msi),
136 	DEVMETHOD(msi_release_msi,      gic_v3_release_msi),
137 	DEVMETHOD(msi_alloc_msix,       gic_v3_alloc_msix),
138 	DEVMETHOD(msi_release_msix,     gic_v3_release_msix),
139 	DEVMETHOD(msi_map_msi,          gic_v3_map_msi),
140 
141 	/* End */
142 	DEVMETHOD_END
143 };
144 
145 DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods,
146     sizeof(struct gic_v3_softc));
147 
148 /*
149  * Driver-specific definitions.
150  */
151 MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR);
152 
153 /*
154  * Helper functions and definitions.
155  */
156 /* Destination registers, either Distributor or Re-Distributor */
157 enum gic_v3_xdist {
158 	DIST = 0,
159 	REDIST,
160 };
161 
162 struct gic_v3_irqsrc {
163 	struct intr_irqsrc	gi_isrc;
164 	uint32_t		gi_irq;
165 	enum intr_polarity	gi_pol;
166 	enum intr_trigger	gi_trig;
167 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
168 					 /* be used for MSI/MSI-X interrupts */
169 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
170 					 /* for a MSI/MSI-X interrupt */
171 	u_int			gi_flags;
172 };
173 
174 /* Helper routines starting with gic_v3_ */
175 static int gic_v3_dist_init(struct gic_v3_softc *);
176 static int gic_v3_redist_alloc(struct gic_v3_softc *);
177 static int gic_v3_redist_find(struct gic_v3_softc *);
178 static int gic_v3_redist_init(struct gic_v3_softc *);
179 static int gic_v3_cpu_init(struct gic_v3_softc *);
180 static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist);
181 
182 /* A sequence of init functions for primary (boot) CPU */
183 typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *);
184 /* Primary CPU initialization sequence */
185 static gic_v3_initseq_t gic_v3_primary_init[] = {
186 	gic_v3_dist_init,
187 	gic_v3_redist_alloc,
188 	gic_v3_redist_init,
189 	gic_v3_cpu_init,
190 	NULL
191 };
192 
193 #ifdef SMP
194 /* Secondary CPU initialization sequence */
195 static gic_v3_initseq_t gic_v3_secondary_init[] = {
196 	gic_v3_redist_init,
197 	gic_v3_cpu_init,
198 	NULL
199 };
200 #endif
201 
202 uint32_t
gic_r_read_4(device_t dev,bus_size_t offset)203 gic_r_read_4(device_t dev, bus_size_t offset)
204 {
205 	struct gic_v3_softc *sc;
206 	struct resource *rdist;
207 
208 	sc = device_get_softc(dev);
209 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
210 	return (bus_read_4(rdist, offset));
211 }
212 
213 uint64_t
gic_r_read_8(device_t dev,bus_size_t offset)214 gic_r_read_8(device_t dev, bus_size_t offset)
215 {
216 	struct gic_v3_softc *sc;
217 	struct resource *rdist;
218 
219 	sc = device_get_softc(dev);
220 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
221 	return (bus_read_8(rdist, offset));
222 }
223 
224 void
gic_r_write_4(device_t dev,bus_size_t offset,uint32_t val)225 gic_r_write_4(device_t dev, bus_size_t offset, uint32_t val)
226 {
227 	struct gic_v3_softc *sc;
228 	struct resource *rdist;
229 
230 	sc = device_get_softc(dev);
231 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
232 	bus_write_4(rdist, offset, val);
233 }
234 
235 void
gic_r_write_8(device_t dev,bus_size_t offset,uint64_t val)236 gic_r_write_8(device_t dev, bus_size_t offset, uint64_t val)
237 {
238 	struct gic_v3_softc *sc;
239 	struct resource *rdist;
240 
241 	sc = device_get_softc(dev);
242 	rdist = &sc->gic_redists.pcpu[PCPU_GET(cpuid)]->res;
243 	bus_write_8(rdist, offset, val);
244 }
245 
246 /*
247  * Device interface.
248  */
249 int
gic_v3_attach(device_t dev)250 gic_v3_attach(device_t dev)
251 {
252 	struct gic_v3_softc *sc;
253 	gic_v3_initseq_t *init_func;
254 	uint32_t typer;
255 	int rid;
256 	int err;
257 	size_t i;
258 	u_int irq;
259 	const char *name;
260 
261 	sc = device_get_softc(dev);
262 	sc->gic_registered = FALSE;
263 	sc->dev = dev;
264 	err = 0;
265 
266 	/* Initialize mutex */
267 	mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN);
268 
269 	/*
270 	 * Allocate array of struct resource.
271 	 * One entry for Distributor and all remaining for Re-Distributor.
272 	 */
273 	sc->gic_res = malloc(
274 	    sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1),
275 	    M_GIC_V3, M_WAITOK);
276 
277 	/* Now allocate corresponding resources */
278 	for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) {
279 		sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
280 		    &rid, RF_ACTIVE);
281 		if (sc->gic_res[rid] == NULL)
282 			return (ENXIO);
283 	}
284 
285 	/*
286 	 * Distributor interface
287 	 */
288 	sc->gic_dist = sc->gic_res[0];
289 
290 	/*
291 	 * Re-Dristributor interface
292 	 */
293 	/* Allocate space under region descriptions */
294 	sc->gic_redists.regions = malloc(
295 	    sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions,
296 	    M_GIC_V3, M_WAITOK);
297 
298 	/* Fill-up bus_space information for each region. */
299 	for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++)
300 		sc->gic_redists.regions[i] = sc->gic_res[rid];
301 
302 	/* Get the number of supported SPI interrupts */
303 	typer = gic_d_read(sc, 4, GICD_TYPER);
304 	sc->gic_nirqs = GICD_TYPER_I_NUM(typer);
305 	if (sc->gic_nirqs > GIC_I_NUM_MAX)
306 		sc->gic_nirqs = GIC_I_NUM_MAX;
307 
308 	sc->gic_irqs = malloc(sizeof(*sc->gic_irqs) * sc->gic_nirqs,
309 	    M_GIC_V3, M_WAITOK | M_ZERO);
310 	name = device_get_nameunit(dev);
311 	for (irq = 0; irq < sc->gic_nirqs; irq++) {
312 		struct intr_irqsrc *isrc;
313 
314 		sc->gic_irqs[irq].gi_irq = irq;
315 		sc->gic_irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
316 		sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
317 
318 		isrc = &sc->gic_irqs[irq].gi_isrc;
319 		if (irq <= GIC_LAST_SGI) {
320 			err = intr_isrc_register(isrc, sc->dev,
321 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
322 		} else if (irq <= GIC_LAST_PPI) {
323 			err = intr_isrc_register(isrc, sc->dev,
324 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
325 		} else {
326 			err = intr_isrc_register(isrc, sc->dev, 0,
327 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
328 		}
329 		if (err != 0) {
330 			/* XXX call intr_isrc_deregister() */
331 			free(sc->gic_irqs, M_DEVBUF);
332 			return (err);
333 		}
334 	}
335 
336 	if (sc->gic_mbi_start > 0) {
337 		/* Reserve these interrupts for MSI/MSI-X use */
338 		for (irq = sc->gic_mbi_start; irq <= sc->gic_mbi_end; irq++) {
339 			sc->gic_irqs[irq].gi_pol = INTR_POLARITY_HIGH;
340 			sc->gic_irqs[irq].gi_trig = INTR_TRIGGER_EDGE;
341 			sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI;
342 		}
343 
344 		mtx_init(&sc->gic_mbi_mtx, "GICv3 mbi lock", NULL, MTX_DEF);
345 
346 		if (bootverbose) {
347 			device_printf(dev, "using spi %u to %u\n", sc->gic_mbi_start,
348 					sc->gic_mbi_end);
349 		}
350 	}
351 
352 	/*
353 	 * Read the Peripheral ID2 register. This is an implementation
354 	 * defined register, but seems to be implemented in all GICv3
355 	 * parts and Linux expects it to be there.
356 	 */
357 	sc->gic_pidr2 = gic_d_read(sc, 4, GICD_PIDR2);
358 
359 	/* Get the number of supported interrupt identifier bits */
360 	sc->gic_idbits = GICD_TYPER_IDBITS(typer);
361 
362 	if (bootverbose) {
363 		device_printf(dev, "SPIs: %u, IDs: %u\n",
364 		    sc->gic_nirqs, (1 << sc->gic_idbits) - 1);
365 	}
366 
367 	/* Train init sequence for boot CPU */
368 	for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) {
369 		err = (*init_func)(sc);
370 		if (err != 0)
371 			return (err);
372 	}
373 
374 	return (0);
375 }
376 
377 int
gic_v3_detach(device_t dev)378 gic_v3_detach(device_t dev)
379 {
380 	struct gic_v3_softc *sc;
381 	size_t i;
382 	int rid;
383 
384 	sc = device_get_softc(dev);
385 
386 	if (device_is_attached(dev)) {
387 		/*
388 		 * XXX: We should probably deregister PIC
389 		 */
390 		if (sc->gic_registered)
391 			panic("Trying to detach registered PIC");
392 	}
393 	for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++)
394 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]);
395 
396 	for (i = 0; i <= mp_maxid; i++)
397 		free(sc->gic_redists.pcpu[i], M_GIC_V3);
398 
399 	free(sc->gic_res, M_GIC_V3);
400 	free(sc->gic_redists.regions, M_GIC_V3);
401 
402 	return (0);
403 }
404 
405 static int
gic_v3_get_domain(device_t dev,device_t child,int * domain)406 gic_v3_get_domain(device_t dev, device_t child, int *domain)
407 {
408 	struct gic_v3_devinfo *di;
409 
410 	di = device_get_ivars(child);
411 	if (di->gic_domain < 0)
412 		return (ENOENT);
413 
414 	*domain = di->gic_domain;
415 	return (0);
416 }
417 
418 static int
gic_v3_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)419 gic_v3_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
420 {
421 	struct gic_v3_softc *sc;
422 
423 	sc = device_get_softc(dev);
424 
425 	switch (which) {
426 	case GICV3_IVAR_NIRQS:
427 		*result = (intr_nirq - sc->gic_nirqs) / sc->gic_nchildren;
428 		return (0);
429 	case GICV3_IVAR_REDIST:
430 		*result = (uintptr_t)sc->gic_redists.pcpu[PCPU_GET(cpuid)];
431 		return (0);
432 	case GIC_IVAR_HW_REV:
433 		KASSERT(
434 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv3 ||
435 		    GICR_PIDR2_ARCH(sc->gic_pidr2) == GICR_PIDR2_ARCH_GICv4,
436 		    ("gic_v3_read_ivar: Invalid GIC architecture: %d (%.08X)",
437 		     GICR_PIDR2_ARCH(sc->gic_pidr2), sc->gic_pidr2));
438 		*result = GICR_PIDR2_ARCH(sc->gic_pidr2);
439 		return (0);
440 	case GIC_IVAR_BUS:
441 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
442 		    ("gic_v3_read_ivar: Unknown bus type"));
443 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
444 		    ("gic_v3_read_ivar: Invalid bus type %u", sc->gic_bus));
445 		*result = sc->gic_bus;
446 		return (0);
447 	}
448 
449 	return (ENOENT);
450 }
451 
452 int
arm_gic_v3_intr(void * arg)453 arm_gic_v3_intr(void *arg)
454 {
455 	struct gic_v3_softc *sc = arg;
456 	struct gic_v3_irqsrc *gi;
457 	struct intr_pic *pic;
458 	uint64_t active_irq;
459 	struct trapframe *tf;
460 
461 	pic = sc->gic_pic;
462 
463 	while (1) {
464 		if (CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) {
465 			/*
466 			 * Hardware:		Cavium ThunderX
467 			 * Chip revision:	Pass 1.0 (early version)
468 			 *			Pass 1.1 (production)
469 			 * ERRATUM:		22978, 23154
470 			 */
471 			__asm __volatile(
472 			    "nop;nop;nop;nop;nop;nop;nop;nop;	\n"
473 			    "mrs %0, ICC_IAR1_EL1		\n"
474 			    "nop;nop;nop;nop;			\n"
475 			    "dsb sy				\n"
476 			    : "=&r" (active_irq));
477 		} else {
478 			active_irq = gic_icc_read(IAR1);
479 		}
480 
481 		if (active_irq >= GIC_FIRST_LPI) {
482 			intr_child_irq_handler(pic, active_irq);
483 			continue;
484 		}
485 
486 		if (__predict_false(active_irq >= sc->gic_nirqs))
487 			return (FILTER_HANDLED);
488 
489 		tf = curthread->td_intr_frame;
490 		gi = &sc->gic_irqs[active_irq];
491 		if (active_irq <= GIC_LAST_SGI) {
492 			/* Call EOI for all IPI before dispatch. */
493 			gic_icc_write(EOIR1, (uint64_t)active_irq);
494 #ifdef SMP
495 			intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
496 #else
497 			device_printf(sc->dev, "SGI %ju on UP system detected\n",
498 			    (uintmax_t)(active_irq - GIC_FIRST_SGI));
499 #endif
500 		} else if (active_irq >= GIC_FIRST_PPI &&
501 		    active_irq <= GIC_LAST_SPI) {
502 			if (gi->gi_trig == INTR_TRIGGER_EDGE)
503 				gic_icc_write(EOIR1, gi->gi_irq);
504 
505 			if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
506 				if (gi->gi_trig != INTR_TRIGGER_EDGE)
507 					gic_icc_write(EOIR1, gi->gi_irq);
508 				gic_v3_disable_intr(sc->dev, &gi->gi_isrc);
509 				device_printf(sc->dev,
510 				    "Stray irq %lu disabled\n", active_irq);
511 			}
512 		}
513 	}
514 }
515 
516 #ifdef FDT
517 static int
gic_map_fdt(device_t dev,u_int ncells,pcell_t * cells,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)518 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
519     enum intr_polarity *polp, enum intr_trigger *trigp)
520 {
521 	u_int irq;
522 
523 	if (ncells < 3)
524 		return (EINVAL);
525 
526 	/*
527 	 * The 1st cell is the interrupt type:
528 	 *	0 = SPI
529 	 *	1 = PPI
530 	 * The 2nd cell contains the interrupt number:
531 	 *	[0 - 987] for SPI
532 	 *	[0 -  15] for PPI
533 	 * The 3rd cell is the flags, encoded as follows:
534 	 *   bits[3:0] trigger type and level flags
535 	 *	1 = edge triggered
536 	 *      2 = edge triggered (PPI only)
537 	 *	4 = level-sensitive
538 	 *	8 = level-sensitive (PPI only)
539 	 */
540 	switch (cells[0]) {
541 	case 0:
542 		irq = GIC_FIRST_SPI + cells[1];
543 		/* SPI irq is checked later. */
544 		break;
545 	case 1:
546 		irq = GIC_FIRST_PPI + cells[1];
547 		if (irq > GIC_LAST_PPI) {
548 			device_printf(dev, "unsupported PPI interrupt "
549 			    "number %u\n", cells[1]);
550 			return (EINVAL);
551 		}
552 		break;
553 	default:
554 		device_printf(dev, "unsupported interrupt type "
555 		    "configuration %u\n", cells[0]);
556 		return (EINVAL);
557 	}
558 
559 	switch (cells[2] & FDT_INTR_MASK) {
560 	case FDT_INTR_EDGE_RISING:
561 		*trigp = INTR_TRIGGER_EDGE;
562 		*polp = INTR_POLARITY_HIGH;
563 		break;
564 	case FDT_INTR_EDGE_FALLING:
565 		*trigp = INTR_TRIGGER_EDGE;
566 		*polp = INTR_POLARITY_LOW;
567 		break;
568 	case FDT_INTR_LEVEL_HIGH:
569 		*trigp = INTR_TRIGGER_LEVEL;
570 		*polp = INTR_POLARITY_HIGH;
571 		break;
572 	case FDT_INTR_LEVEL_LOW:
573 		*trigp = INTR_TRIGGER_LEVEL;
574 		*polp = INTR_POLARITY_LOW;
575 		break;
576 	default:
577 		device_printf(dev, "unsupported trigger/polarity "
578 		    "configuration 0x%02x\n", cells[2]);
579 		return (EINVAL);
580 	}
581 
582 	/* Check the interrupt is valid */
583 	if (irq >= GIC_FIRST_SPI && *polp != INTR_POLARITY_HIGH)
584 		return (EINVAL);
585 
586 	*irqp = irq;
587 	return (0);
588 }
589 #endif
590 
591 static int
gic_map_msi(device_t dev,struct intr_map_data_msi * msi_data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)592 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
593     enum intr_polarity *polp, enum intr_trigger *trigp)
594 {
595 	struct gic_v3_irqsrc *gi;
596 
597 	/* SPI-mapped MSI */
598 	gi = (struct gic_v3_irqsrc *)msi_data->isrc;
599 	if (gi == NULL)
600 		return (ENXIO);
601 
602 	*irqp = gi->gi_irq;
603 
604 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
605 	*polp = INTR_POLARITY_HIGH;
606 	*trigp = INTR_TRIGGER_EDGE;
607 
608 	return (0);
609 }
610 
611 static int
do_gic_v3_map_intr(device_t dev,struct intr_map_data * data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)612 do_gic_v3_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
613     enum intr_polarity *polp, enum intr_trigger *trigp)
614 {
615 	struct gic_v3_softc *sc;
616 	enum intr_polarity pol;
617 	enum intr_trigger trig;
618 	struct intr_map_data_msi *dam;
619 #ifdef FDT
620 	struct intr_map_data_fdt *daf;
621 #endif
622 #ifdef DEV_ACPI
623 	struct intr_map_data_acpi *daa;
624 #endif
625 	u_int irq;
626 
627 	sc = device_get_softc(dev);
628 
629 	switch (data->type) {
630 #ifdef FDT
631 	case INTR_MAP_DATA_FDT:
632 		daf = (struct intr_map_data_fdt *)data;
633 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
634 		    &trig) != 0)
635 			return (EINVAL);
636 		break;
637 #endif
638 #ifdef DEV_ACPI
639 	case INTR_MAP_DATA_ACPI:
640 		daa = (struct intr_map_data_acpi *)data;
641 		irq = daa->irq;
642 		pol = daa->pol;
643 		trig = daa->trig;
644 		break;
645 #endif
646 	case INTR_MAP_DATA_MSI:
647 		/* SPI-mapped MSI */
648 		dam = (struct intr_map_data_msi *)data;
649 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
650 			return (EINVAL);
651 		break;
652 	default:
653 		return (EINVAL);
654 	}
655 
656 	if (irq >= sc->gic_nirqs)
657 		return (EINVAL);
658 	switch (pol) {
659 	case INTR_POLARITY_CONFORM:
660 	case INTR_POLARITY_LOW:
661 	case INTR_POLARITY_HIGH:
662 		break;
663 	default:
664 		return (EINVAL);
665 	}
666 	switch (trig) {
667 	case INTR_TRIGGER_CONFORM:
668 	case INTR_TRIGGER_EDGE:
669 	case INTR_TRIGGER_LEVEL:
670 		break;
671 	default:
672 		return (EINVAL);
673 	}
674 
675 	*irqp = irq;
676 	if (polp != NULL)
677 		*polp = pol;
678 	if (trigp != NULL)
679 		*trigp = trig;
680 	return (0);
681 }
682 
683 static int
gic_v3_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)684 gic_v3_map_intr(device_t dev, struct intr_map_data *data,
685     struct intr_irqsrc **isrcp)
686 {
687 	struct gic_v3_softc *sc;
688 	int error;
689 	u_int irq;
690 
691 	error = do_gic_v3_map_intr(dev, data, &irq, NULL, NULL);
692 	if (error == 0) {
693 		sc = device_get_softc(dev);
694 		*isrcp = GIC_INTR_ISRC(sc, irq);
695 	}
696 	return (error);
697 }
698 
699 static int
gic_v3_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)700 gic_v3_setup_intr(device_t dev, struct intr_irqsrc *isrc,
701     struct resource *res, struct intr_map_data *data)
702 {
703 	struct gic_v3_softc *sc = device_get_softc(dev);
704 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
705 	enum intr_trigger trig;
706 	enum intr_polarity pol;
707 	uint32_t reg;
708 	u_int irq;
709 	int error;
710 
711 	if (data == NULL)
712 		return (ENOTSUP);
713 
714 	error = do_gic_v3_map_intr(dev, data, &irq, &pol, &trig);
715 	if (error != 0)
716 		return (error);
717 
718 	if (gi->gi_irq != irq || pol == INTR_POLARITY_CONFORM ||
719 	    trig == INTR_TRIGGER_CONFORM)
720 		return (EINVAL);
721 
722 	/* Compare config if this is not first setup. */
723 	if (isrc->isrc_handlers != 0) {
724 		if (pol != gi->gi_pol || trig != gi->gi_trig)
725 			return (EINVAL);
726 		else
727 			return (0);
728 	}
729 
730 	/* For MSI/MSI-X we should have already configured these */
731 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
732 		gi->gi_pol = pol;
733 		gi->gi_trig = trig;
734 	}
735 
736 	/*
737 	 * XXX - In case that per CPU interrupt is going to be enabled in time
738 	 *       when SMP is already started, we need some IPI call which
739 	 *       enables it on others CPUs. Further, it's more complicated as
740 	 *       pic_enable_source() and pic_disable_source() should act on
741 	 *       per CPU basis only. Thus, it should be solved here somehow.
742 	 */
743 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
744 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
745 
746 	if (irq >= GIC_FIRST_PPI && irq <= GIC_LAST_SPI) {
747 		mtx_lock_spin(&sc->gic_mtx);
748 
749 		/* Set the trigger and polarity */
750 		if (irq <= GIC_LAST_PPI)
751 			reg = gic_r_read(sc, 4,
752 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq));
753 		else
754 			reg = gic_d_read(sc, 4, GICD_ICFGR(irq));
755 		if (trig == INTR_TRIGGER_LEVEL)
756 			reg &= ~(2 << ((irq % 16) * 2));
757 		else
758 			reg |= 2 << ((irq % 16) * 2);
759 
760 		if (irq <= GIC_LAST_PPI) {
761 			gic_r_write(sc, 4,
762 			    GICR_SGI_BASE_SIZE + GICD_ICFGR(irq), reg);
763 			gic_v3_wait_for_rwp(sc, REDIST);
764 		} else {
765 			gic_d_write(sc, 4, GICD_ICFGR(irq), reg);
766 			gic_v3_wait_for_rwp(sc, DIST);
767 		}
768 
769 		mtx_unlock_spin(&sc->gic_mtx);
770 
771 		gic_v3_bind_intr(dev, isrc);
772 	}
773 
774 	return (0);
775 }
776 
777 static int
gic_v3_teardown_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)778 gic_v3_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
779     struct resource *res, struct intr_map_data *data)
780 {
781 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
782 
783 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
784 		gi->gi_pol = INTR_POLARITY_CONFORM;
785 		gi->gi_trig = INTR_TRIGGER_CONFORM;
786 	}
787 
788 	return (0);
789 }
790 
791 static void
gic_v3_disable_intr(device_t dev,struct intr_irqsrc * isrc)792 gic_v3_disable_intr(device_t dev, struct intr_irqsrc *isrc)
793 {
794 	struct gic_v3_softc *sc;
795 	struct gic_v3_irqsrc *gi;
796 	u_int irq;
797 
798 	sc = device_get_softc(dev);
799 	gi = (struct gic_v3_irqsrc *)isrc;
800 	irq = gi->gi_irq;
801 
802 	if (irq <= GIC_LAST_PPI) {
803 		/* SGIs and PPIs in corresponding Re-Distributor */
804 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq),
805 		    GICD_I_MASK(irq));
806 		gic_v3_wait_for_rwp(sc, REDIST);
807 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
808 		/* SPIs in distributor */
809 		gic_d_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq));
810 		gic_v3_wait_for_rwp(sc, DIST);
811 	} else
812 		panic("%s: Unsupported IRQ %u", __func__, irq);
813 }
814 
815 static void
gic_v3_enable_intr(device_t dev,struct intr_irqsrc * isrc)816 gic_v3_enable_intr(device_t dev, struct intr_irqsrc *isrc)
817 {
818 	struct gic_v3_softc *sc;
819 	struct gic_v3_irqsrc *gi;
820 	u_int irq;
821 
822 	sc = device_get_softc(dev);
823 	gi = (struct gic_v3_irqsrc *)isrc;
824 	irq = gi->gi_irq;
825 
826 	if (irq <= GIC_LAST_PPI) {
827 		/* SGIs and PPIs in corresponding Re-Distributor */
828 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq),
829 		    GICD_I_MASK(irq));
830 		gic_v3_wait_for_rwp(sc, REDIST);
831 	} else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) {
832 		/* SPIs in distributor */
833 		gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq));
834 		gic_v3_wait_for_rwp(sc, DIST);
835 	} else
836 		panic("%s: Unsupported IRQ %u", __func__, irq);
837 }
838 
839 static void
gic_v3_pre_ithread(device_t dev,struct intr_irqsrc * isrc)840 gic_v3_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
841 {
842 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
843 
844 	gic_v3_disable_intr(dev, isrc);
845 	gic_icc_write(EOIR1, gi->gi_irq);
846 }
847 
848 static void
gic_v3_post_ithread(device_t dev,struct intr_irqsrc * isrc)849 gic_v3_post_ithread(device_t dev, struct intr_irqsrc *isrc)
850 {
851 
852 	gic_v3_enable_intr(dev, isrc);
853 }
854 
855 static void
gic_v3_post_filter(device_t dev,struct intr_irqsrc * isrc)856 gic_v3_post_filter(device_t dev, struct intr_irqsrc *isrc)
857 {
858 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
859 
860 	if (gi->gi_trig == INTR_TRIGGER_EDGE)
861 		return;
862 
863 	gic_icc_write(EOIR1, gi->gi_irq);
864 }
865 
866 static int
gic_v3_bind_intr(device_t dev,struct intr_irqsrc * isrc)867 gic_v3_bind_intr(device_t dev, struct intr_irqsrc *isrc)
868 {
869 	struct gic_v3_softc *sc;
870 	struct gic_v3_irqsrc *gi;
871 	int cpu;
872 
873 	gi = (struct gic_v3_irqsrc *)isrc;
874 	if (gi->gi_irq <= GIC_LAST_PPI)
875 		return (EINVAL);
876 
877 	KASSERT(gi->gi_irq >= GIC_FIRST_SPI && gi->gi_irq <= GIC_LAST_SPI,
878 	    ("%s: Attempting to bind an invalid IRQ", __func__));
879 
880 	sc = device_get_softc(dev);
881 
882 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
883 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
884 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
885 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq),
886 		    CPU_AFFINITY(gic_irq_cpu));
887 	} else {
888 		/*
889 		 * We can only bind to a single CPU so select
890 		 * the first CPU found.
891 		 */
892 		cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
893 		gic_d_write(sc, 8, GICD_IROUTER(gi->gi_irq), CPU_AFFINITY(cpu));
894 	}
895 
896 	return (0);
897 }
898 
899 #ifdef SMP
900 static void
gic_v3_init_secondary(device_t dev)901 gic_v3_init_secondary(device_t dev)
902 {
903 	device_t child;
904 	struct gic_v3_softc *sc;
905 	gic_v3_initseq_t *init_func;
906 	struct intr_irqsrc *isrc;
907 	u_int cpu, irq;
908 	int err, i;
909 
910 	sc = device_get_softc(dev);
911 	cpu = PCPU_GET(cpuid);
912 
913 	/* Train init sequence for boot CPU */
914 	for (init_func = gic_v3_secondary_init; *init_func != NULL;
915 	    init_func++) {
916 		err = (*init_func)(sc);
917 		if (err != 0) {
918 			device_printf(dev,
919 			    "Could not initialize GIC for CPU%u\n", cpu);
920 			return;
921 		}
922 	}
923 
924 	/* Unmask attached SGI interrupts. */
925 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++) {
926 		isrc = GIC_INTR_ISRC(sc, irq);
927 		if (intr_isrc_init_on_cpu(isrc, cpu))
928 			gic_v3_enable_intr(dev, isrc);
929 	}
930 
931 	/* Unmask attached PPI interrupts. */
932 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++) {
933 		isrc = GIC_INTR_ISRC(sc, irq);
934 		if (intr_isrc_init_on_cpu(isrc, cpu))
935 			gic_v3_enable_intr(dev, isrc);
936 	}
937 
938 	for (i = 0; i < sc->gic_nchildren; i++) {
939 		child = sc->gic_children[i];
940 		PIC_INIT_SECONDARY(child);
941 	}
942 }
943 
944 static void
gic_v3_ipi_send(device_t dev,struct intr_irqsrc * isrc,cpuset_t cpus,u_int ipi)945 gic_v3_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
946     u_int ipi)
947 {
948 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
949 	uint64_t aff, val, irq;
950 	int i;
951 
952 #define	GIC_AFF_MASK	(CPU_AFF3_MASK | CPU_AFF2_MASK | CPU_AFF1_MASK)
953 #define	GIC_AFFINITY(i)	(CPU_AFFINITY(i) & GIC_AFF_MASK)
954 	aff = GIC_AFFINITY(0);
955 	irq = gi->gi_irq;
956 	val = 0;
957 
958 	/* Iterate through all CPUs in set */
959 	for (i = 0; i <= mp_maxid; i++) {
960 		/* Move to the next affinity group */
961 		if (aff != GIC_AFFINITY(i)) {
962 			/* Send the IPI */
963 			if (val != 0) {
964 				gic_icc_write(SGI1R, val);
965 				val = 0;
966 			}
967 			aff = GIC_AFFINITY(i);
968 		}
969 
970 		/* Send the IPI to this cpu */
971 		if (CPU_ISSET(i, &cpus)) {
972 #define	ICC_SGI1R_AFFINITY(aff)					\
973     (((uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT) |	\
974      ((uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT) |	\
975      ((uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT))
976 			/* Set the affinity when the first at this level */
977 			if (val == 0)
978 				val = ICC_SGI1R_AFFINITY(aff) |
979 				    irq << ICC_SGI1R_EL1_SGIID_SHIFT;
980 			/* Set the bit to send the IPI to te CPU */
981 			val |= 1 << CPU_AFF0(CPU_AFFINITY(i));
982 		}
983 	}
984 
985 	/* Send the IPI to the last cpu affinity group */
986 	if (val != 0)
987 		gic_icc_write(SGI1R, val);
988 #undef GIC_AFF_MASK
989 #undef GIC_AFFINITY
990 }
991 
992 static int
gic_v3_ipi_setup(device_t dev,u_int ipi,struct intr_irqsrc ** isrcp)993 gic_v3_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
994 {
995 	struct intr_irqsrc *isrc;
996 	struct gic_v3_softc *sc = device_get_softc(dev);
997 
998 	if (sgi_first_unused > GIC_LAST_SGI)
999 		return (ENOSPC);
1000 
1001 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
1002 	sgi_to_ipi[sgi_first_unused++] = ipi;
1003 
1004 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
1005 
1006 	*isrcp = isrc;
1007 	return (0);
1008 }
1009 #endif /* SMP */
1010 
1011 /*
1012  * Helper routines
1013  */
1014 static void
gic_v3_wait_for_rwp(struct gic_v3_softc * sc,enum gic_v3_xdist xdist)1015 gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist)
1016 {
1017 	struct resource *res;
1018 	u_int cpuid;
1019 	size_t us_left = 1000000;
1020 
1021 	cpuid = PCPU_GET(cpuid);
1022 
1023 	switch (xdist) {
1024 	case DIST:
1025 		res = sc->gic_dist;
1026 		break;
1027 	case REDIST:
1028 		res = &sc->gic_redists.pcpu[cpuid]->res;
1029 		break;
1030 	default:
1031 		KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__));
1032 		return;
1033 	}
1034 
1035 	while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) {
1036 		DELAY(1);
1037 		if (us_left-- == 0)
1038 			panic("GICD Register write pending for too long");
1039 	}
1040 }
1041 
1042 /* CPU interface. */
1043 static __inline void
gic_v3_cpu_priority(uint64_t mask)1044 gic_v3_cpu_priority(uint64_t mask)
1045 {
1046 
1047 	/* Set prority mask */
1048 	gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK);
1049 }
1050 
1051 static int
gic_v3_cpu_enable_sre(struct gic_v3_softc * sc)1052 gic_v3_cpu_enable_sre(struct gic_v3_softc *sc)
1053 {
1054 	uint64_t sre;
1055 	u_int cpuid;
1056 
1057 	cpuid = PCPU_GET(cpuid);
1058 	/*
1059 	 * Set the SRE bit to enable access to GIC CPU interface
1060 	 * via system registers.
1061 	 */
1062 	sre = READ_SPECIALREG(icc_sre_el1);
1063 	sre |= ICC_SRE_EL1_SRE;
1064 	WRITE_SPECIALREG(icc_sre_el1, sre);
1065 	isb();
1066 	/*
1067 	 * Now ensure that the bit is set.
1068 	 */
1069 	sre = READ_SPECIALREG(icc_sre_el1);
1070 	if ((sre & ICC_SRE_EL1_SRE) == 0) {
1071 		/* We are done. This was disabled in EL2 */
1072 		device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface "
1073 		    "via system registers\n", cpuid);
1074 		return (ENXIO);
1075 	} else if (bootverbose) {
1076 		device_printf(sc->dev,
1077 		    "CPU%u enabled CPU interface via system registers\n",
1078 		    cpuid);
1079 	}
1080 
1081 	return (0);
1082 }
1083 
1084 static int
gic_v3_cpu_init(struct gic_v3_softc * sc)1085 gic_v3_cpu_init(struct gic_v3_softc *sc)
1086 {
1087 	int err;
1088 
1089 	/* Enable access to CPU interface via system registers */
1090 	err = gic_v3_cpu_enable_sre(sc);
1091 	if (err != 0)
1092 		return (err);
1093 	/* Priority mask to minimum - accept all interrupts */
1094 	gic_v3_cpu_priority(GIC_PRIORITY_MIN);
1095 	/* Disable EOI mode */
1096 	gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE);
1097 	/* Enable group 1 (insecure) interrups */
1098 	gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN);
1099 
1100 	return (0);
1101 }
1102 
1103 /* Distributor */
1104 static int
gic_v3_dist_init(struct gic_v3_softc * sc)1105 gic_v3_dist_init(struct gic_v3_softc *sc)
1106 {
1107 	uint64_t aff;
1108 	u_int i;
1109 
1110 	/*
1111 	 * 1. Disable the Distributor
1112 	 */
1113 	gic_d_write(sc, 4, GICD_CTLR, 0);
1114 	gic_v3_wait_for_rwp(sc, DIST);
1115 
1116 	/*
1117 	 * 2. Configure the Distributor
1118 	 */
1119 	/* Set all SPIs to be Group 1 Non-secure */
1120 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IGROUPRn)
1121 		gic_d_write(sc, 4, GICD_IGROUPR(i), 0xFFFFFFFF);
1122 
1123 	/* Set all global interrupts to be level triggered, active low. */
1124 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn)
1125 		gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000);
1126 
1127 	/* Set priority to all shared interrupts */
1128 	for (i = GIC_FIRST_SPI;
1129 	    i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) {
1130 		/* Set highest priority */
1131 		gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX);
1132 	}
1133 
1134 	/*
1135 	 * Disable all interrupts. Leave PPI and SGIs as they are enabled in
1136 	 * Re-Distributor registers.
1137 	 */
1138 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn)
1139 		gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF);
1140 
1141 	gic_v3_wait_for_rwp(sc, DIST);
1142 
1143 	/*
1144 	 * 3. Enable Distributor
1145 	 */
1146 	/* Enable Distributor with ARE, Group 1 */
1147 	gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A |
1148 	    GICD_CTLR_G1);
1149 
1150 	/*
1151 	 * 4. Route all interrupts to boot CPU.
1152 	 */
1153 	aff = CPU_AFFINITY(0);
1154 	for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++)
1155 		gic_d_write(sc, 8, GICD_IROUTER(i), aff);
1156 
1157 	return (0);
1158 }
1159 
1160 /* Re-Distributor */
1161 static int
gic_v3_redist_alloc(struct gic_v3_softc * sc)1162 gic_v3_redist_alloc(struct gic_v3_softc *sc)
1163 {
1164 	u_int cpuid;
1165 
1166 	/* Allocate struct resource for all CPU's Re-Distributor registers */
1167 	for (cpuid = 0; cpuid <= mp_maxid; cpuid++)
1168 		if (CPU_ISSET(cpuid, &all_cpus) != 0)
1169 			sc->gic_redists.pcpu[cpuid] =
1170 				malloc(sizeof(*sc->gic_redists.pcpu[0]),
1171 				    M_GIC_V3, M_WAITOK);
1172 		else
1173 			sc->gic_redists.pcpu[cpuid] = NULL;
1174 	return (0);
1175 }
1176 
1177 static int
gic_v3_redist_find(struct gic_v3_softc * sc)1178 gic_v3_redist_find(struct gic_v3_softc *sc)
1179 {
1180 	struct resource r_res;
1181 	bus_space_handle_t r_bsh;
1182 	uint64_t aff;
1183 	uint64_t typer;
1184 	uint32_t pidr2;
1185 	u_int cpuid;
1186 	size_t i;
1187 
1188 	cpuid = PCPU_GET(cpuid);
1189 
1190 	aff = CPU_AFFINITY(cpuid);
1191 	/* Affinity in format for comparison with typer */
1192 	aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) |
1193 	    (CPU_AFF1(aff) << 8) | CPU_AFF0(aff);
1194 
1195 	if (bootverbose) {
1196 		device_printf(sc->dev,
1197 		    "Start searching for Re-Distributor\n");
1198 	}
1199 	/* Iterate through Re-Distributor regions */
1200 	for (i = 0; i < sc->gic_redists.nregions; i++) {
1201 		/* Take a copy of the region's resource */
1202 		r_res = *sc->gic_redists.regions[i];
1203 		r_bsh = rman_get_bushandle(&r_res);
1204 
1205 		pidr2 = bus_read_4(&r_res, GICR_PIDR2);
1206 		switch (GICR_PIDR2_ARCH(pidr2)) {
1207 		case GICR_PIDR2_ARCH_GICv3: /* fall through */
1208 		case GICR_PIDR2_ARCH_GICv4:
1209 			break;
1210 		default:
1211 			device_printf(sc->dev,
1212 			    "No Re-Distributor found for CPU%u\n", cpuid);
1213 			return (ENODEV);
1214 		}
1215 
1216 		do {
1217 			typer = bus_read_8(&r_res, GICR_TYPER);
1218 			if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) {
1219 				KASSERT(sc->gic_redists.pcpu[cpuid] != NULL,
1220 				    ("Invalid pointer to per-CPU redistributor"));
1221 				/* Copy res contents to its final destination */
1222 				sc->gic_redists.pcpu[cpuid]->res = r_res;
1223 				sc->gic_redists.pcpu[cpuid]->lpi_enabled = false;
1224 				if (bootverbose) {
1225 					device_printf(sc->dev,
1226 					    "CPU%u Re-Distributor has been found\n",
1227 					    cpuid);
1228 				}
1229 				return (0);
1230 			}
1231 
1232 			r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1233 			if ((typer & GICR_TYPER_VLPIS) != 0) {
1234 				r_bsh +=
1235 				    (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE);
1236 			}
1237 
1238 			rman_set_bushandle(&r_res, r_bsh);
1239 		} while ((typer & GICR_TYPER_LAST) == 0);
1240 	}
1241 
1242 	device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid);
1243 	return (ENXIO);
1244 }
1245 
1246 static int
gic_v3_redist_wake(struct gic_v3_softc * sc)1247 gic_v3_redist_wake(struct gic_v3_softc *sc)
1248 {
1249 	uint32_t waker;
1250 	size_t us_left = 1000000;
1251 
1252 	waker = gic_r_read(sc, 4, GICR_WAKER);
1253 	/* Wake up Re-Distributor for this CPU */
1254 	waker &= ~GICR_WAKER_PS;
1255 	gic_r_write(sc, 4, GICR_WAKER, waker);
1256 	/*
1257 	 * When clearing ProcessorSleep bit it is required to wait for
1258 	 * ChildrenAsleep to become zero following the processor power-on.
1259 	 */
1260 	while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) {
1261 		DELAY(1);
1262 		if (us_left-- == 0) {
1263 			panic("Could not wake Re-Distributor for CPU%u",
1264 			    PCPU_GET(cpuid));
1265 		}
1266 	}
1267 
1268 	if (bootverbose) {
1269 		device_printf(sc->dev, "CPU%u Re-Distributor woke up\n",
1270 		    PCPU_GET(cpuid));
1271 	}
1272 
1273 	return (0);
1274 }
1275 
1276 static int
gic_v3_redist_init(struct gic_v3_softc * sc)1277 gic_v3_redist_init(struct gic_v3_softc *sc)
1278 {
1279 	int err;
1280 	size_t i;
1281 
1282 	err = gic_v3_redist_find(sc);
1283 	if (err != 0)
1284 		return (err);
1285 
1286 	err = gic_v3_redist_wake(sc);
1287 	if (err != 0)
1288 		return (err);
1289 
1290 	/* Configure SGIs and PPIs to be Group1 Non-secure */
1291 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_IGROUPR0,
1292 	    0xFFFFFFFF);
1293 
1294 	/* Disable SPIs */
1295 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0,
1296 	    GICR_I_ENABLER_PPI_MASK);
1297 	/* Enable SGIs */
1298 	gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0,
1299 	    GICR_I_ENABLER_SGI_MASK);
1300 
1301 	/* Set priority for SGIs and PPIs */
1302 	for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) {
1303 		gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i),
1304 		    GIC_PRIORITY_MAX);
1305 	}
1306 
1307 	gic_v3_wait_for_rwp(sc, REDIST);
1308 
1309 	return (0);
1310 }
1311 
1312 /*
1313  * SPI-mapped Message Based Interrupts -- a GICv3 MSI/MSI-X controller.
1314  */
1315 
1316 static int
gic_v3_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)1317 gic_v3_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1318     device_t *pic, struct intr_irqsrc **srcs)
1319 {
1320 	struct gic_v3_softc *sc;
1321 	int i, irq, end_irq;
1322 	bool found;
1323 
1324 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1325 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1326 
1327 	sc = device_get_softc(dev);
1328 
1329 	mtx_lock(&sc->gic_mbi_mtx);
1330 
1331 	found = false;
1332 	for (irq = sc->gic_mbi_start; irq < sc->gic_mbi_end; irq++) {
1333 		/* Start on an aligned interrupt */
1334 		if ((irq & (maxcount - 1)) != 0)
1335 			continue;
1336 
1337 		/* Assume we found a valid range until shown otherwise */
1338 		found = true;
1339 
1340 		/* Check this range is valid */
1341 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1342 			/* No free interrupts */
1343 			if (end_irq == sc->gic_mbi_end) {
1344 				found = false;
1345 				break;
1346 			}
1347 
1348 			KASSERT((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1349 			    ("%s: Non-MSI interrupt found", __func__));
1350 
1351 			/* This is already used */
1352 			if ((sc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1353 			    GI_FLAG_MSI_USED) {
1354 				found = false;
1355 				break;
1356 			}
1357 		}
1358 		if (found)
1359 			break;
1360 	}
1361 
1362 	/* Not enough interrupts were found */
1363 	if (!found || irq == sc->gic_mbi_end) {
1364 		mtx_unlock(&sc->gic_mbi_mtx);
1365 		return (ENXIO);
1366 	}
1367 
1368 	for (i = 0; i < count; i++) {
1369 		/* Mark the interrupt as used */
1370 		sc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1371 	}
1372 	mtx_unlock(&sc->gic_mbi_mtx);
1373 
1374 	for (i = 0; i < count; i++)
1375 		srcs[i] = (struct intr_irqsrc *)&sc->gic_irqs[irq + i];
1376 	*pic = dev;
1377 
1378 	return (0);
1379 }
1380 
1381 static int
gic_v3_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1382 gic_v3_release_msi(device_t dev, device_t child, int count,
1383     struct intr_irqsrc **isrc)
1384 {
1385 	struct gic_v3_softc *sc;
1386 	struct gic_v3_irqsrc *gi;
1387 	int i;
1388 
1389 	sc = device_get_softc(dev);
1390 
1391 	mtx_lock(&sc->gic_mbi_mtx);
1392 	for (i = 0; i < count; i++) {
1393 		gi = (struct gic_v3_irqsrc *)isrc[i];
1394 
1395 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1396 		    ("%s: Trying to release an unused MSI-X interrupt",
1397 		    __func__));
1398 
1399 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1400 	}
1401 	mtx_unlock(&sc->gic_mbi_mtx);
1402 
1403 	return (0);
1404 }
1405 
1406 static int
gic_v3_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrcp)1407 gic_v3_alloc_msix(device_t dev, device_t child, device_t *pic,
1408     struct intr_irqsrc **isrcp)
1409 {
1410 	struct gic_v3_softc *sc;
1411 	int irq;
1412 
1413 	sc = device_get_softc(dev);
1414 
1415 	mtx_lock(&sc->gic_mbi_mtx);
1416 	/* Find an unused interrupt */
1417 	for (irq = sc->gic_mbi_start; irq < sc->gic_mbi_end; irq++) {
1418 		KASSERT((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1419 		    ("%s: Non-MSI interrupt found", __func__));
1420 		if ((sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1421 			break;
1422 	}
1423 	/* No free interrupt was found */
1424 	if (irq == sc->gic_mbi_end) {
1425 		mtx_unlock(&sc->gic_mbi_mtx);
1426 		return (ENXIO);
1427 	}
1428 
1429 	/* Mark the interrupt as used */
1430 	sc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1431 	mtx_unlock(&sc->gic_mbi_mtx);
1432 
1433 	*isrcp = (struct intr_irqsrc *)&sc->gic_irqs[irq];
1434 	*pic = dev;
1435 
1436 	return (0);
1437 }
1438 
1439 static int
gic_v3_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1440 gic_v3_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1441 {
1442 	struct gic_v3_softc *sc;
1443 	struct gic_v3_irqsrc *gi;
1444 
1445 	sc = device_get_softc(dev);
1446 	gi = (struct gic_v3_irqsrc *)isrc;
1447 
1448 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1449 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1450 
1451 	mtx_lock(&sc->gic_mbi_mtx);
1452 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1453 	mtx_unlock(&sc->gic_mbi_mtx);
1454 
1455 	return (0);
1456 }
1457 
1458 static int
gic_v3_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1459 gic_v3_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1460     uint64_t *addr, uint32_t *data)
1461 {
1462 	struct gic_v3_softc *sc = device_get_softc(dev);
1463 	struct gic_v3_irqsrc *gi = (struct gic_v3_irqsrc *)isrc;
1464 
1465 #define GICD_SETSPI_NSR 0x40
1466 	*addr = vtophys(rman_get_virtual(sc->gic_dist)) + GICD_SETSPI_NSR;
1467 	*data = gi->gi_irq;
1468 
1469 	return (0);
1470 }
1471