xref: /f-stack/freebsd/arm/arm/gic.c (revision 22ce4aff)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2011 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Developed by Damjan Marion <[email protected]>
8  *
9  * Based on OMAP4 GIC code by Ben Gray
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the company nor the name of the author may be used to
20  *    endorse or promote products derived from this software without specific
21  *    prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_acpi.h"
40 #include "opt_platform.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/rman.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 
65 #ifdef FDT
66 #include <dev/fdt/fdt_intr.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #endif
69 
70 #ifdef DEV_ACPI
71 #include <contrib/dev/acpica/include/acpi.h>
72 #include <dev/acpica/acpivar.h>
73 #endif
74 
75 #include <arm/arm/gic.h>
76 #include <arm/arm/gic_common.h>
77 
78 #include "pic_if.h"
79 #include "msi_if.h"
80 
81 /* We are using GICv2 register naming */
82 
83 /* Distributor Registers */
84 
85 /* CPU Registers */
86 #define GICC_CTLR		0x0000			/* v1 ICCICR */
87 #define GICC_PMR		0x0004			/* v1 ICCPMR */
88 #define GICC_BPR		0x0008			/* v1 ICCBPR */
89 #define GICC_IAR		0x000C			/* v1 ICCIAR */
90 #define GICC_EOIR		0x0010			/* v1 ICCEOIR */
91 #define GICC_RPR		0x0014			/* v1 ICCRPR */
92 #define GICC_HPPIR		0x0018			/* v1 ICCHPIR */
93 #define GICC_ABPR		0x001C			/* v1 ICCABPR */
94 #define GICC_IIDR		0x00FC			/* v1 ICCIIDR*/
95 
96 /* TYPER Registers */
97 #define	GICD_TYPER_SECURITYEXT	0x400
98 #define	GIC_SUPPORT_SECEXT(_sc)	\
99     ((_sc->typer & GICD_TYPER_SECURITYEXT) == GICD_TYPER_SECURITYEXT)
100 
101 #ifndef	GIC_DEFAULT_ICFGR_INIT
102 #define	GIC_DEFAULT_ICFGR_INIT	0x00000000
103 #endif
104 
105 struct gic_irqsrc {
106 	struct intr_irqsrc	gi_isrc;
107 	uint32_t		gi_irq;
108 	enum intr_polarity	gi_pol;
109 	enum intr_trigger	gi_trig;
110 #define GI_FLAG_EARLY_EOI	(1 << 0)
111 #define GI_FLAG_MSI		(1 << 1) /* This interrupt source should only */
112 					 /* be used for MSI/MSI-X interrupts */
113 #define GI_FLAG_MSI_USED	(1 << 2) /* This irq is already allocated */
114 					 /* for a MSI/MSI-X interrupt */
115 	u_int			gi_flags;
116 };
117 
118 static u_int gic_irq_cpu;
119 static int arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc);
120 
121 #ifdef SMP
122 static u_int sgi_to_ipi[GIC_LAST_SGI - GIC_FIRST_SGI + 1];
123 static u_int sgi_first_unused = GIC_FIRST_SGI;
124 #endif
125 
126 #define GIC_INTR_ISRC(sc, irq)	(&sc->gic_irqs[irq].gi_isrc)
127 
128 static struct resource_spec arm_gic_spec[] = {
129 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },	/* Distributor registers */
130 	{ SYS_RES_MEMORY,	1,	RF_ACTIVE },	/* CPU Interrupt Intf. registers */
131 	{ SYS_RES_IRQ,	  0, RF_ACTIVE | RF_OPTIONAL }, /* Parent interrupt */
132 	{ -1, 0 }
133 };
134 
135 #if defined(__arm__) && defined(INVARIANTS)
136 static int gic_debug_spurious = 1;
137 #else
138 static int gic_debug_spurious = 0;
139 #endif
140 TUNABLE_INT("hw.gic.debug_spurious", &gic_debug_spurious);
141 
142 static u_int arm_gic_map[MAXCPU];
143 
144 static struct arm_gic_softc *gic_sc = NULL;
145 
146 #define	gic_c_read_4(_sc, _reg)		\
147     bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
148 #define	gic_c_write_4(_sc, _reg, _val)		\
149     bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
150 #define	gic_d_read_4(_sc, _reg)		\
151     bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
152 #define	gic_d_write_1(_sc, _reg, _val)		\
153     bus_space_write_1((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
154 #define	gic_d_write_4(_sc, _reg, _val)		\
155     bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
156 
157 static inline void
gic_irq_unmask(struct arm_gic_softc * sc,u_int irq)158 gic_irq_unmask(struct arm_gic_softc *sc, u_int irq)
159 {
160 
161 	gic_d_write_4(sc, GICD_ISENABLER(irq), GICD_I_MASK(irq));
162 }
163 
164 static inline void
gic_irq_mask(struct arm_gic_softc * sc,u_int irq)165 gic_irq_mask(struct arm_gic_softc *sc, u_int irq)
166 {
167 
168 	gic_d_write_4(sc, GICD_ICENABLER(irq), GICD_I_MASK(irq));
169 }
170 
171 static uint8_t
gic_cpu_mask(struct arm_gic_softc * sc)172 gic_cpu_mask(struct arm_gic_softc *sc)
173 {
174 	uint32_t mask;
175 	int i;
176 
177 	/* Read the current cpuid mask by reading ITARGETSR{0..7} */
178 	for (i = 0; i < 8; i++) {
179 		mask = gic_d_read_4(sc, GICD_ITARGETSR(4 * i));
180 		if (mask != 0)
181 			break;
182 	}
183 	/* No mask found, assume we are on CPU interface 0 */
184 	if (mask == 0)
185 		return (1);
186 
187 	/* Collect the mask in the lower byte */
188 	mask |= mask >> 16;
189 	mask |= mask >> 8;
190 
191 	return (mask);
192 }
193 
194 #ifdef SMP
195 static void
arm_gic_init_secondary(device_t dev)196 arm_gic_init_secondary(device_t dev)
197 {
198 	struct arm_gic_softc *sc = device_get_softc(dev);
199 	u_int irq, cpu;
200 
201 	/* Set the mask so we can find this CPU to send it IPIs */
202 	cpu = PCPU_GET(cpuid);
203 	arm_gic_map[cpu] = gic_cpu_mask(sc);
204 
205 	for (irq = 0; irq < sc->nirqs; irq += 4)
206 		gic_d_write_4(sc, GICD_IPRIORITYR(irq), 0);
207 
208 	/* Set all the interrupts to be in Group 0 (secure) */
209 	for (irq = 0; GIC_SUPPORT_SECEXT(sc) && irq < sc->nirqs; irq += 32) {
210 		gic_d_write_4(sc, GICD_IGROUPR(irq), 0);
211 	}
212 
213 	/* Enable CPU interface */
214 	gic_c_write_4(sc, GICC_CTLR, 1);
215 
216 	/* Set priority mask register. */
217 	gic_c_write_4(sc, GICC_PMR, 0xff);
218 
219 	/* Enable interrupt distribution */
220 	gic_d_write_4(sc, GICD_CTLR, 0x01);
221 
222 	/* Unmask attached SGI interrupts. */
223 	for (irq = GIC_FIRST_SGI; irq <= GIC_LAST_SGI; irq++)
224 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
225 			gic_irq_unmask(sc, irq);
226 
227 	/* Unmask attached PPI interrupts. */
228 	for (irq = GIC_FIRST_PPI; irq <= GIC_LAST_PPI; irq++)
229 		if (intr_isrc_init_on_cpu(GIC_INTR_ISRC(sc, irq), cpu))
230 			gic_irq_unmask(sc, irq);
231 }
232 #endif /* SMP */
233 
234 static int
arm_gic_register_isrcs(struct arm_gic_softc * sc,uint32_t num)235 arm_gic_register_isrcs(struct arm_gic_softc *sc, uint32_t num)
236 {
237 	int error;
238 	uint32_t irq;
239 	struct gic_irqsrc *irqs;
240 	struct intr_irqsrc *isrc;
241 	const char *name;
242 
243 	irqs = malloc(num * sizeof(struct gic_irqsrc), M_DEVBUF,
244 	    M_WAITOK | M_ZERO);
245 
246 	name = device_get_nameunit(sc->gic_dev);
247 	for (irq = 0; irq < num; irq++) {
248 		irqs[irq].gi_irq = irq;
249 		irqs[irq].gi_pol = INTR_POLARITY_CONFORM;
250 		irqs[irq].gi_trig = INTR_TRIGGER_CONFORM;
251 
252 		isrc = &irqs[irq].gi_isrc;
253 		if (irq <= GIC_LAST_SGI) {
254 			error = intr_isrc_register(isrc, sc->gic_dev,
255 			    INTR_ISRCF_IPI, "%s,i%u", name, irq - GIC_FIRST_SGI);
256 		} else if (irq <= GIC_LAST_PPI) {
257 			error = intr_isrc_register(isrc, sc->gic_dev,
258 			    INTR_ISRCF_PPI, "%s,p%u", name, irq - GIC_FIRST_PPI);
259 		} else {
260 			error = intr_isrc_register(isrc, sc->gic_dev, 0,
261 			    "%s,s%u", name, irq - GIC_FIRST_SPI);
262 		}
263 		if (error != 0) {
264 			/* XXX call intr_isrc_deregister() */
265 			free(irqs, M_DEVBUF);
266 			return (error);
267 		}
268 	}
269 	sc->gic_irqs = irqs;
270 	sc->nirqs = num;
271 	return (0);
272 }
273 
274 static void
arm_gic_reserve_msi_range(device_t dev,u_int start,u_int count)275 arm_gic_reserve_msi_range(device_t dev, u_int start, u_int count)
276 {
277 	struct arm_gic_softc *sc;
278 	int i;
279 
280 	sc = device_get_softc(dev);
281 
282 	KASSERT((start + count) < sc->nirqs,
283 	    ("%s: Trying to allocate too many MSI IRQs: %d + %d > %d", __func__,
284 	    start, count, sc->nirqs));
285 	for (i = 0; i < count; i++) {
286 		KASSERT(sc->gic_irqs[start + i].gi_isrc.isrc_handlers == 0,
287 		    ("%s: MSI interrupt %d already has a handler", __func__,
288 		    count + i));
289 		KASSERT(sc->gic_irqs[start + i].gi_pol == INTR_POLARITY_CONFORM,
290 		    ("%s: MSI interrupt %d already has a polarity", __func__,
291 		    count + i));
292 		KASSERT(sc->gic_irqs[start + i].gi_trig == INTR_TRIGGER_CONFORM,
293 		    ("%s: MSI interrupt %d already has a trigger", __func__,
294 		    count + i));
295 		sc->gic_irqs[start + i].gi_pol = INTR_POLARITY_HIGH;
296 		sc->gic_irqs[start + i].gi_trig = INTR_TRIGGER_EDGE;
297 		sc->gic_irqs[start + i].gi_flags |= GI_FLAG_MSI;
298 	}
299 }
300 
301 int
arm_gic_attach(device_t dev)302 arm_gic_attach(device_t dev)
303 {
304 	struct		arm_gic_softc *sc;
305 	int		i;
306 	uint32_t	icciidr, mask, nirqs;
307 
308 	if (gic_sc)
309 		return (ENXIO);
310 
311 	sc = device_get_softc(dev);
312 
313 	if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
314 		device_printf(dev, "could not allocate resources\n");
315 		return (ENXIO);
316 	}
317 
318 	sc->gic_dev = dev;
319 	gic_sc = sc;
320 
321 	/* Initialize mutex */
322 	mtx_init(&sc->mutex, "GIC lock", NULL, MTX_SPIN);
323 
324 	/* Distributor Interface */
325 	sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
326 	sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
327 
328 	/* CPU Interface */
329 	sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
330 	sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
331 
332 	/* Disable interrupt forwarding to the CPU interface */
333 	gic_d_write_4(sc, GICD_CTLR, 0x00);
334 
335 	/* Get the number of interrupts */
336 	sc->typer = gic_d_read_4(sc, GICD_TYPER);
337 	nirqs = GICD_TYPER_I_NUM(sc->typer);
338 
339 	if (arm_gic_register_isrcs(sc, nirqs)) {
340 		device_printf(dev, "could not register irqs\n");
341 		goto cleanup;
342 	}
343 
344 	icciidr = gic_c_read_4(sc, GICC_IIDR);
345 	device_printf(dev,
346 	    "pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
347 	    GICD_IIDR_PROD(icciidr), GICD_IIDR_VAR(icciidr),
348 	    GICD_IIDR_REV(icciidr), GICD_IIDR_IMPL(icciidr), sc->nirqs);
349 	sc->gic_iidr = icciidr;
350 
351 	/* Set all global interrupts to be level triggered, active low. */
352 	for (i = 32; i < sc->nirqs; i += 16) {
353 		gic_d_write_4(sc, GICD_ICFGR(i), GIC_DEFAULT_ICFGR_INIT);
354 	}
355 
356 	/* Disable all interrupts. */
357 	for (i = 32; i < sc->nirqs; i += 32) {
358 		gic_d_write_4(sc, GICD_ICENABLER(i), 0xFFFFFFFF);
359 	}
360 
361 	/* Find the current cpu mask */
362 	mask = gic_cpu_mask(sc);
363 	/* Set the mask so we can find this CPU to send it IPIs */
364 	arm_gic_map[PCPU_GET(cpuid)] = mask;
365 	/* Set all four targets to this cpu */
366 	mask |= mask << 8;
367 	mask |= mask << 16;
368 
369 	for (i = 0; i < sc->nirqs; i += 4) {
370 		gic_d_write_4(sc, GICD_IPRIORITYR(i), 0);
371 		if (i > 32) {
372 			gic_d_write_4(sc, GICD_ITARGETSR(i), mask);
373 		}
374 	}
375 
376 	/* Set all the interrupts to be in Group 0 (secure) */
377 	for (i = 0; GIC_SUPPORT_SECEXT(sc) && i < sc->nirqs; i += 32) {
378 		gic_d_write_4(sc, GICD_IGROUPR(i), 0);
379 	}
380 
381 	/* Enable CPU interface */
382 	gic_c_write_4(sc, GICC_CTLR, 1);
383 
384 	/* Set priority mask register. */
385 	gic_c_write_4(sc, GICC_PMR, 0xff);
386 
387 	/* Enable interrupt distribution */
388 	gic_d_write_4(sc, GICD_CTLR, 0x01);
389 	return (0);
390 
391 cleanup:
392 	arm_gic_detach(dev);
393 	return(ENXIO);
394 }
395 
396 int
arm_gic_detach(device_t dev)397 arm_gic_detach(device_t dev)
398 {
399 	struct arm_gic_softc *sc;
400 
401 	sc = device_get_softc(dev);
402 
403 	if (sc->gic_irqs != NULL)
404 		free(sc->gic_irqs, M_DEVBUF);
405 
406 	bus_release_resources(dev, arm_gic_spec, sc->gic_res);
407 
408 	return (0);
409 }
410 
411 static int
arm_gic_print_child(device_t bus,device_t child)412 arm_gic_print_child(device_t bus, device_t child)
413 {
414 	struct resource_list *rl;
415 	int rv;
416 
417 	rv = bus_print_child_header(bus, child);
418 
419 	rl = BUS_GET_RESOURCE_LIST(bus, child);
420 	if (rl != NULL) {
421 		rv += resource_list_print_type(rl, "mem", SYS_RES_MEMORY,
422 		    "%#jx");
423 		rv += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
424 	}
425 
426 	rv += bus_print_child_footer(bus, child);
427 
428 	return (rv);
429 }
430 
431 static struct resource *
arm_gic_alloc_resource(device_t bus,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)432 arm_gic_alloc_resource(device_t bus, device_t child, int type, int *rid,
433     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
434 {
435 	struct arm_gic_softc *sc;
436 	struct resource_list_entry *rle;
437 	struct resource_list *rl;
438 	int j;
439 
440 	KASSERT(type == SYS_RES_MEMORY, ("Invalid resoure type %x", type));
441 
442 	sc = device_get_softc(bus);
443 
444 	/*
445 	 * Request for the default allocation with a given rid: use resource
446 	 * list stored in the local device info.
447 	 */
448 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
449 		rl = BUS_GET_RESOURCE_LIST(bus, child);
450 
451 		if (type == SYS_RES_IOPORT)
452 			type = SYS_RES_MEMORY;
453 
454 		rle = resource_list_find(rl, type, *rid);
455 		if (rle == NULL) {
456 			if (bootverbose)
457 				device_printf(bus, "no default resources for "
458 				    "rid = %d, type = %d\n", *rid, type);
459 			return (NULL);
460 		}
461 		start = rle->start;
462 		end = rle->end;
463 		count = rle->count;
464 	}
465 
466 	/* Remap through ranges property */
467 	for (j = 0; j < sc->nranges; j++) {
468 		if (start >= sc->ranges[j].bus && end <
469 		    sc->ranges[j].bus + sc->ranges[j].size) {
470 			start -= sc->ranges[j].bus;
471 			start += sc->ranges[j].host;
472 			end -= sc->ranges[j].bus;
473 			end += sc->ranges[j].host;
474 			break;
475 		}
476 	}
477 	if (j == sc->nranges && sc->nranges != 0) {
478 		if (bootverbose)
479 			device_printf(bus, "Could not map resource "
480 			    "%#jx-%#jx\n", (uintmax_t)start, (uintmax_t)end);
481 
482 		return (NULL);
483 	}
484 
485 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
486 	    count, flags));
487 }
488 
489 static int
arm_gic_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)490 arm_gic_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
491 {
492 	struct arm_gic_softc *sc;
493 
494 	sc = device_get_softc(dev);
495 
496 	switch(which) {
497 	case GIC_IVAR_HW_REV:
498 		KASSERT(GICD_IIDR_VAR(sc->gic_iidr) < 3,
499 		    ("arm_gic_read_ivar: Unknown IIDR revision %u (%.08x)",
500 		     GICD_IIDR_VAR(sc->gic_iidr), sc->gic_iidr));
501 		*result = GICD_IIDR_VAR(sc->gic_iidr);
502 		return (0);
503 	case GIC_IVAR_BUS:
504 		KASSERT(sc->gic_bus != GIC_BUS_UNKNOWN,
505 		    ("arm_gic_read_ivar: Unknown bus type"));
506 		KASSERT(sc->gic_bus <= GIC_BUS_MAX,
507 		    ("arm_gic_read_ivar: Invalid bus type %u", sc->gic_bus));
508 		*result = sc->gic_bus;
509 		return (0);
510 	}
511 
512 	return (ENOENT);
513 }
514 
515 int
arm_gic_intr(void * arg)516 arm_gic_intr(void *arg)
517 {
518 	struct arm_gic_softc *sc = arg;
519 	struct gic_irqsrc *gi;
520 	uint32_t irq_active_reg, irq;
521 	struct trapframe *tf;
522 
523 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
524 	irq = irq_active_reg & 0x3FF;
525 
526 	/*
527 	 * 1. We do EOI here because recent read value from active interrupt
528 	 *    register must be used for it. Another approach is to save this
529 	 *    value into associated interrupt source.
530 	 * 2. EOI must be done on same CPU where interrupt has fired. Thus
531 	 *    we must ensure that interrupted thread does not migrate to
532 	 *    another CPU.
533 	 * 3. EOI cannot be delayed by any preemption which could happen on
534 	 *    critical_exit() used in MI intr code, when interrupt thread is
535 	 *    scheduled. See next point.
536 	 * 4. IPI_RENDEZVOUS assumes that no preemption is permitted during
537 	 *    an action and any use of critical_exit() could break this
538 	 *    assumption. See comments within smp_rendezvous_action().
539 	 * 5. We always return FILTER_HANDLED as this is an interrupt
540 	 *    controller dispatch function. Otherwise, in cascaded interrupt
541 	 *    case, the whole interrupt subtree would be masked.
542 	 */
543 
544 	if (irq >= sc->nirqs) {
545 		if (gic_debug_spurious)
546 			device_printf(sc->gic_dev,
547 			    "Spurious interrupt detected: last irq: %d on CPU%d\n",
548 			    sc->last_irq[PCPU_GET(cpuid)], PCPU_GET(cpuid));
549 		return (FILTER_HANDLED);
550 	}
551 
552 	tf = curthread->td_intr_frame;
553 dispatch_irq:
554 	gi = sc->gic_irqs + irq;
555 	/*
556 	 * Note that GIC_FIRST_SGI is zero and is not used in 'if' statement
557 	 * as compiler complains that comparing u_int >= 0 is always true.
558 	 */
559 	if (irq <= GIC_LAST_SGI) {
560 #ifdef SMP
561 		/* Call EOI for all IPI before dispatch. */
562 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
563 		intr_ipi_dispatch(sgi_to_ipi[gi->gi_irq], tf);
564 		goto next_irq;
565 #else
566 		device_printf(sc->gic_dev, "SGI %u on UP system detected\n",
567 		    irq - GIC_FIRST_SGI);
568 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
569 		goto next_irq;
570 #endif
571 	}
572 
573 	if (gic_debug_spurious)
574 		sc->last_irq[PCPU_GET(cpuid)] = irq;
575 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
576 		gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
577 
578 	if (intr_isrc_dispatch(&gi->gi_isrc, tf) != 0) {
579 		gic_irq_mask(sc, irq);
580 		if ((gi->gi_flags & GI_FLAG_EARLY_EOI) != GI_FLAG_EARLY_EOI)
581 			gic_c_write_4(sc, GICC_EOIR, irq_active_reg);
582 		device_printf(sc->gic_dev, "Stray irq %u disabled\n", irq);
583 	}
584 
585 next_irq:
586 	arm_irq_memory_barrier(irq);
587 	irq_active_reg = gic_c_read_4(sc, GICC_IAR);
588 	irq = irq_active_reg & 0x3FF;
589 	if (irq < sc->nirqs)
590 		goto dispatch_irq;
591 
592 	return (FILTER_HANDLED);
593 }
594 
595 static void
gic_config(struct arm_gic_softc * sc,u_int irq,enum intr_trigger trig,enum intr_polarity pol)596 gic_config(struct arm_gic_softc *sc, u_int irq, enum intr_trigger trig,
597     enum intr_polarity pol)
598 {
599 	uint32_t reg;
600 	uint32_t mask;
601 
602 	if (irq < GIC_FIRST_SPI)
603 		return;
604 
605 	mtx_lock_spin(&sc->mutex);
606 
607 	reg = gic_d_read_4(sc, GICD_ICFGR(irq));
608 	mask = (reg >> 2*(irq % 16)) & 0x3;
609 
610 	if (pol == INTR_POLARITY_LOW) {
611 		mask &= ~GICD_ICFGR_POL_MASK;
612 		mask |= GICD_ICFGR_POL_LOW;
613 	} else if (pol == INTR_POLARITY_HIGH) {
614 		mask &= ~GICD_ICFGR_POL_MASK;
615 		mask |= GICD_ICFGR_POL_HIGH;
616 	}
617 
618 	if (trig == INTR_TRIGGER_LEVEL) {
619 		mask &= ~GICD_ICFGR_TRIG_MASK;
620 		mask |= GICD_ICFGR_TRIG_LVL;
621 	} else if (trig == INTR_TRIGGER_EDGE) {
622 		mask &= ~GICD_ICFGR_TRIG_MASK;
623 		mask |= GICD_ICFGR_TRIG_EDGE;
624 	}
625 
626 	/* Set mask */
627 	reg = reg & ~(0x3 << 2*(irq % 16));
628 	reg = reg | (mask << 2*(irq % 16));
629 	gic_d_write_4(sc, GICD_ICFGR(irq), reg);
630 
631 	mtx_unlock_spin(&sc->mutex);
632 }
633 
634 static int
gic_bind(struct arm_gic_softc * sc,u_int irq,cpuset_t * cpus)635 gic_bind(struct arm_gic_softc *sc, u_int irq, cpuset_t *cpus)
636 {
637 	uint32_t cpu, end, mask;
638 
639 	end = min(mp_ncpus, 8);
640 	for (cpu = end; cpu < MAXCPU; cpu++)
641 		if (CPU_ISSET(cpu, cpus))
642 			return (EINVAL);
643 
644 	for (mask = 0, cpu = 0; cpu < end; cpu++)
645 		if (CPU_ISSET(cpu, cpus))
646 			mask |= arm_gic_map[cpu];
647 
648 	gic_d_write_1(sc, GICD_ITARGETSR(0) + irq, mask);
649 	return (0);
650 }
651 
652 #ifdef FDT
653 static int
gic_map_fdt(device_t dev,u_int ncells,pcell_t * cells,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)654 gic_map_fdt(device_t dev, u_int ncells, pcell_t *cells, u_int *irqp,
655     enum intr_polarity *polp, enum intr_trigger *trigp)
656 {
657 
658 	if (ncells == 1) {
659 		*irqp = cells[0];
660 		*polp = INTR_POLARITY_CONFORM;
661 		*trigp = INTR_TRIGGER_CONFORM;
662 		return (0);
663 	}
664 	if (ncells == 3) {
665 		u_int irq, tripol;
666 
667 		/*
668 		 * The 1st cell is the interrupt type:
669 		 *	0 = SPI
670 		 *	1 = PPI
671 		 * The 2nd cell contains the interrupt number:
672 		 *	[0 - 987] for SPI
673 		 *	[0 -  15] for PPI
674 		 * The 3rd cell is the flags, encoded as follows:
675 		 *   bits[3:0] trigger type and level flags
676 		 *	1 = low-to-high edge triggered
677 		 *	2 = high-to-low edge triggered
678 		 *	4 = active high level-sensitive
679 		 *	8 = active low level-sensitive
680 		 *   bits[15:8] PPI interrupt cpu mask
681 		 *	Each bit corresponds to each of the 8 possible cpus
682 		 *	attached to the GIC.  A bit set to '1' indicated
683 		 *	the interrupt is wired to that CPU.
684 		 */
685 		switch (cells[0]) {
686 		case 0:
687 			irq = GIC_FIRST_SPI + cells[1];
688 			/* SPI irq is checked later. */
689 			break;
690 		case 1:
691 			irq = GIC_FIRST_PPI + cells[1];
692 			if (irq > GIC_LAST_PPI) {
693 				device_printf(dev, "unsupported PPI interrupt "
694 				    "number %u\n", cells[1]);
695 				return (EINVAL);
696 			}
697 			break;
698 		default:
699 			device_printf(dev, "unsupported interrupt type "
700 			    "configuration %u\n", cells[0]);
701 			return (EINVAL);
702 		}
703 
704 		tripol = cells[2] & 0xff;
705 		if (tripol & 0xf0 || (tripol & FDT_INTR_LOW_MASK &&
706 		    cells[0] == 0))
707 			device_printf(dev, "unsupported trigger/polarity "
708 			    "configuration 0x%02x\n", tripol);
709 
710 		*irqp = irq;
711 		*polp = INTR_POLARITY_CONFORM;
712 		*trigp = tripol & FDT_INTR_EDGE_MASK ?
713 		    INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL;
714 		return (0);
715 	}
716 	return (EINVAL);
717 }
718 #endif
719 
720 static int
gic_map_msi(device_t dev,struct intr_map_data_msi * msi_data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)721 gic_map_msi(device_t dev, struct intr_map_data_msi *msi_data, u_int *irqp,
722     enum intr_polarity *polp, enum intr_trigger *trigp)
723 {
724 	struct gic_irqsrc *gi;
725 
726 	/* Map a non-GICv2m MSI */
727 	gi = (struct gic_irqsrc *)msi_data->isrc;
728 	if (gi == NULL)
729 		return (ENXIO);
730 
731 	*irqp = gi->gi_irq;
732 
733 	/* MSI/MSI-X interrupts are always edge triggered with high polarity */
734 	*polp = INTR_POLARITY_HIGH;
735 	*trigp = INTR_TRIGGER_EDGE;
736 
737 	return (0);
738 }
739 
740 static int
gic_map_intr(device_t dev,struct intr_map_data * data,u_int * irqp,enum intr_polarity * polp,enum intr_trigger * trigp)741 gic_map_intr(device_t dev, struct intr_map_data *data, u_int *irqp,
742     enum intr_polarity *polp, enum intr_trigger *trigp)
743 {
744 	u_int irq;
745 	enum intr_polarity pol;
746 	enum intr_trigger trig;
747 	struct arm_gic_softc *sc;
748 	struct intr_map_data_msi *dam;
749 #ifdef FDT
750 	struct intr_map_data_fdt *daf;
751 #endif
752 #ifdef DEV_ACPI
753 	struct intr_map_data_acpi *daa;
754 #endif
755 
756 	sc = device_get_softc(dev);
757 	switch (data->type) {
758 #ifdef FDT
759 	case INTR_MAP_DATA_FDT:
760 		daf = (struct intr_map_data_fdt *)data;
761 		if (gic_map_fdt(dev, daf->ncells, daf->cells, &irq, &pol,
762 		    &trig) != 0)
763 			return (EINVAL);
764 		KASSERT(irq >= sc->nirqs ||
765 		    (sc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) == 0,
766 		    ("%s: Attempting to map a MSI interrupt from FDT",
767 		    __func__));
768 		break;
769 #endif
770 #ifdef DEV_ACPI
771 	case INTR_MAP_DATA_ACPI:
772 		daa = (struct intr_map_data_acpi *)data;
773 		irq = daa->irq;
774 		pol = daa->pol;
775 		trig = daa->trig;
776 		break;
777 #endif
778 	case INTR_MAP_DATA_MSI:
779 		/* Non-GICv2m MSI */
780 		dam = (struct intr_map_data_msi *)data;
781 		if (gic_map_msi(dev, dam, &irq, &pol, &trig) != 0)
782 			return (EINVAL);
783 		break;
784 	default:
785 		return (ENOTSUP);
786 	}
787 
788 	if (irq >= sc->nirqs)
789 		return (EINVAL);
790 	if (pol != INTR_POLARITY_CONFORM && pol != INTR_POLARITY_LOW &&
791 	    pol != INTR_POLARITY_HIGH)
792 		return (EINVAL);
793 	if (trig != INTR_TRIGGER_CONFORM && trig != INTR_TRIGGER_EDGE &&
794 	    trig != INTR_TRIGGER_LEVEL)
795 		return (EINVAL);
796 
797 	*irqp = irq;
798 	if (polp != NULL)
799 		*polp = pol;
800 	if (trigp != NULL)
801 		*trigp = trig;
802 	return (0);
803 }
804 
805 static int
arm_gic_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)806 arm_gic_map_intr(device_t dev, struct intr_map_data *data,
807     struct intr_irqsrc **isrcp)
808 {
809 	int error;
810 	u_int irq;
811 	struct arm_gic_softc *sc;
812 
813 	error = gic_map_intr(dev, data, &irq, NULL, NULL);
814 	if (error == 0) {
815 		sc = device_get_softc(dev);
816 		*isrcp = GIC_INTR_ISRC(sc, irq);
817 	}
818 	return (error);
819 }
820 
821 static int
arm_gic_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)822 arm_gic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
823     struct resource *res, struct intr_map_data *data)
824 {
825 	struct arm_gic_softc *sc = device_get_softc(dev);
826 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
827 	enum intr_trigger trig;
828 	enum intr_polarity pol;
829 
830 	if ((gi->gi_flags & GI_FLAG_MSI) == GI_FLAG_MSI) {
831 		/* GICv2m MSI */
832 		pol = gi->gi_pol;
833 		trig = gi->gi_trig;
834 		KASSERT(pol == INTR_POLARITY_HIGH,
835 		    ("%s: MSI interrupts must be active-high", __func__));
836 		KASSERT(trig == INTR_TRIGGER_EDGE,
837 		    ("%s: MSI interrupts must be edge triggered", __func__));
838 	} else if (data != NULL) {
839 		u_int irq;
840 
841 		/* Get config for resource. */
842 		if (gic_map_intr(dev, data, &irq, &pol, &trig) ||
843 		    gi->gi_irq != irq)
844 			return (EINVAL);
845 	} else {
846 		pol = INTR_POLARITY_CONFORM;
847 		trig = INTR_TRIGGER_CONFORM;
848 	}
849 
850 	/* Compare config if this is not first setup. */
851 	if (isrc->isrc_handlers != 0) {
852 		if ((pol != INTR_POLARITY_CONFORM && pol != gi->gi_pol) ||
853 		    (trig != INTR_TRIGGER_CONFORM && trig != gi->gi_trig))
854 			return (EINVAL);
855 		else
856 			return (0);
857 	}
858 
859 	/* For MSI/MSI-X we should have already configured these */
860 	if ((gi->gi_flags & GI_FLAG_MSI) == 0) {
861 		if (pol == INTR_POLARITY_CONFORM)
862 			pol = INTR_POLARITY_LOW;	/* just pick some */
863 		if (trig == INTR_TRIGGER_CONFORM)
864 			trig = INTR_TRIGGER_EDGE;	/* just pick some */
865 
866 		gi->gi_pol = pol;
867 		gi->gi_trig = trig;
868 
869 		/* Edge triggered interrupts need an early EOI sent */
870 		if (gi->gi_trig == INTR_TRIGGER_EDGE)
871 			gi->gi_flags |= GI_FLAG_EARLY_EOI;
872 	}
873 
874 	/*
875 	 * XXX - In case that per CPU interrupt is going to be enabled in time
876 	 *       when SMP is already started, we need some IPI call which
877 	 *       enables it on others CPUs. Further, it's more complicated as
878 	 *       pic_enable_source() and pic_disable_source() should act on
879 	 *       per CPU basis only. Thus, it should be solved here somehow.
880 	 */
881 	if (isrc->isrc_flags & INTR_ISRCF_PPI)
882 		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
883 
884 	gic_config(sc, gi->gi_irq, gi->gi_trig, gi->gi_pol);
885 	arm_gic_bind_intr(dev, isrc);
886 	return (0);
887 }
888 
889 static int
arm_gic_teardown_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)890 arm_gic_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
891     struct resource *res, struct intr_map_data *data)
892 {
893 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
894 
895 	if (isrc->isrc_handlers == 0 && (gi->gi_flags & GI_FLAG_MSI) == 0) {
896 		gi->gi_pol = INTR_POLARITY_CONFORM;
897 		gi->gi_trig = INTR_TRIGGER_CONFORM;
898 	}
899 	return (0);
900 }
901 
902 static void
arm_gic_enable_intr(device_t dev,struct intr_irqsrc * isrc)903 arm_gic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
904 {
905 	struct arm_gic_softc *sc = device_get_softc(dev);
906 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
907 
908 	arm_irq_memory_barrier(gi->gi_irq);
909 	gic_irq_unmask(sc, gi->gi_irq);
910 }
911 
912 static void
arm_gic_disable_intr(device_t dev,struct intr_irqsrc * isrc)913 arm_gic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
914 {
915 	struct arm_gic_softc *sc = device_get_softc(dev);
916 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
917 
918 	gic_irq_mask(sc, gi->gi_irq);
919 }
920 
921 static void
arm_gic_pre_ithread(device_t dev,struct intr_irqsrc * isrc)922 arm_gic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
923 {
924 	struct arm_gic_softc *sc = device_get_softc(dev);
925 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
926 
927 	arm_gic_disable_intr(dev, isrc);
928 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
929 }
930 
931 static void
arm_gic_post_ithread(device_t dev,struct intr_irqsrc * isrc)932 arm_gic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
933 {
934 
935 	arm_irq_memory_barrier(0);
936 	arm_gic_enable_intr(dev, isrc);
937 }
938 
939 static void
arm_gic_post_filter(device_t dev,struct intr_irqsrc * isrc)940 arm_gic_post_filter(device_t dev, struct intr_irqsrc *isrc)
941 {
942 	struct arm_gic_softc *sc = device_get_softc(dev);
943 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
944 
945         /* EOI for edge-triggered done earlier. */
946 	if ((gi->gi_flags & GI_FLAG_EARLY_EOI) == GI_FLAG_EARLY_EOI)
947 		return;
948 
949 	arm_irq_memory_barrier(0);
950 	gic_c_write_4(sc, GICC_EOIR, gi->gi_irq);
951 }
952 
953 static int
arm_gic_bind_intr(device_t dev,struct intr_irqsrc * isrc)954 arm_gic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
955 {
956 	struct arm_gic_softc *sc = device_get_softc(dev);
957 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
958 
959 	if (gi->gi_irq < GIC_FIRST_SPI)
960 		return (EINVAL);
961 
962 	if (CPU_EMPTY(&isrc->isrc_cpu)) {
963 		gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
964 		CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
965 	}
966 	return (gic_bind(sc, gi->gi_irq, &isrc->isrc_cpu));
967 }
968 
969 #ifdef SMP
970 static void
arm_gic_ipi_send(device_t dev,struct intr_irqsrc * isrc,cpuset_t cpus,u_int ipi)971 arm_gic_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
972     u_int ipi)
973 {
974 	struct arm_gic_softc *sc = device_get_softc(dev);
975 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
976 	uint32_t val = 0, i;
977 
978 	for (i = 0; i < MAXCPU; i++)
979 		if (CPU_ISSET(i, &cpus))
980 			val |= arm_gic_map[i] << GICD_SGI_TARGET_SHIFT;
981 
982 	gic_d_write_4(sc, GICD_SGIR, val | gi->gi_irq);
983 }
984 
985 static int
arm_gic_ipi_setup(device_t dev,u_int ipi,struct intr_irqsrc ** isrcp)986 arm_gic_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
987 {
988 	struct intr_irqsrc *isrc;
989 	struct arm_gic_softc *sc = device_get_softc(dev);
990 
991 	if (sgi_first_unused > GIC_LAST_SGI)
992 		return (ENOSPC);
993 
994 	isrc = GIC_INTR_ISRC(sc, sgi_first_unused);
995 	sgi_to_ipi[sgi_first_unused++] = ipi;
996 
997 	CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
998 
999 	*isrcp = isrc;
1000 	return (0);
1001 }
1002 #endif
1003 
1004 static device_method_t arm_gic_methods[] = {
1005 	/* Bus interface */
1006 	DEVMETHOD(bus_print_child,	arm_gic_print_child),
1007 	DEVMETHOD(bus_add_child,	bus_generic_add_child),
1008 	DEVMETHOD(bus_alloc_resource,	arm_gic_alloc_resource),
1009 	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
1010 	DEVMETHOD(bus_activate_resource,bus_generic_activate_resource),
1011 	DEVMETHOD(bus_read_ivar,	arm_gic_read_ivar),
1012 
1013 	/* Interrupt controller interface */
1014 	DEVMETHOD(pic_disable_intr,	arm_gic_disable_intr),
1015 	DEVMETHOD(pic_enable_intr,	arm_gic_enable_intr),
1016 	DEVMETHOD(pic_map_intr,		arm_gic_map_intr),
1017 	DEVMETHOD(pic_setup_intr,	arm_gic_setup_intr),
1018 	DEVMETHOD(pic_teardown_intr,	arm_gic_teardown_intr),
1019 	DEVMETHOD(pic_post_filter,	arm_gic_post_filter),
1020 	DEVMETHOD(pic_post_ithread,	arm_gic_post_ithread),
1021 	DEVMETHOD(pic_pre_ithread,	arm_gic_pre_ithread),
1022 #ifdef SMP
1023 	DEVMETHOD(pic_bind_intr,	arm_gic_bind_intr),
1024 	DEVMETHOD(pic_init_secondary,	arm_gic_init_secondary),
1025 	DEVMETHOD(pic_ipi_send,		arm_gic_ipi_send),
1026 	DEVMETHOD(pic_ipi_setup,	arm_gic_ipi_setup),
1027 #endif
1028 	{ 0, 0 }
1029 };
1030 
1031 DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods,
1032     sizeof(struct arm_gic_softc));
1033 
1034 /*
1035  * GICv2m support -- the GICv2 MSI/MSI-X controller.
1036  */
1037 
1038 #define	GICV2M_MSI_TYPER	0x008
1039 #define	 MSI_TYPER_SPI_BASE(x)	(((x) >> 16) & 0x3ff)
1040 #define	 MSI_TYPER_SPI_COUNT(x)	(((x) >> 0) & 0x3ff)
1041 #define	GICv2M_MSI_SETSPI_NS	0x040
1042 #define	GICV2M_MSI_IIDR		0xFCC
1043 
1044 int
arm_gicv2m_attach(device_t dev)1045 arm_gicv2m_attach(device_t dev)
1046 {
1047 	struct arm_gicv2m_softc *sc;
1048 	uint32_t typer;
1049 	int rid;
1050 
1051 	sc = device_get_softc(dev);
1052 
1053 	rid = 0;
1054 	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1055 	    RF_ACTIVE);
1056 	if (sc->sc_mem == NULL) {
1057 		device_printf(dev, "Unable to allocate resources\n");
1058 		return (ENXIO);
1059 	}
1060 
1061 	typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER);
1062 	sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer);
1063 	sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer);
1064 	sc->sc_spi_end = sc->sc_spi_start + sc->sc_spi_count;
1065 
1066 	/* Reserve these interrupts for MSI/MSI-X use */
1067 	arm_gic_reserve_msi_range(device_get_parent(dev), sc->sc_spi_start,
1068 	    sc->sc_spi_count);
1069 
1070 	mtx_init(&sc->sc_mutex, "GICv2m lock", NULL, MTX_DEF);
1071 
1072 	intr_msi_register(dev, sc->sc_xref);
1073 
1074 	if (bootverbose)
1075 		device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start,
1076 		    sc->sc_spi_start + sc->sc_spi_count - 1);
1077 
1078 	return (0);
1079 }
1080 
1081 static int
arm_gicv2m_alloc_msi(device_t dev,device_t child,int count,int maxcount,device_t * pic,struct intr_irqsrc ** srcs)1082 arm_gicv2m_alloc_msi(device_t dev, device_t child, int count, int maxcount,
1083     device_t *pic, struct intr_irqsrc **srcs)
1084 {
1085 	struct arm_gic_softc *psc;
1086 	struct arm_gicv2m_softc *sc;
1087 	int i, irq, end_irq;
1088 	bool found;
1089 
1090 	KASSERT(powerof2(count), ("%s: bad count", __func__));
1091 	KASSERT(powerof2(maxcount), ("%s: bad maxcount", __func__));
1092 
1093 	psc = device_get_softc(device_get_parent(dev));
1094 	sc = device_get_softc(dev);
1095 
1096 	mtx_lock(&sc->sc_mutex);
1097 
1098 	found = false;
1099 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1100 		/* Start on an aligned interrupt */
1101 		if ((irq & (maxcount - 1)) != 0)
1102 			continue;
1103 
1104 		/* Assume we found a valid range until shown otherwise */
1105 		found = true;
1106 
1107 		/* Check this range is valid */
1108 		for (end_irq = irq; end_irq != irq + count; end_irq++) {
1109 			/* No free interrupts */
1110 			if (end_irq == sc->sc_spi_end) {
1111 				found = false;
1112 				break;
1113 			}
1114 
1115 			KASSERT((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI)!= 0,
1116 			    ("%s: Non-MSI interrupt found", __func__));
1117 
1118 			/* This is already used */
1119 			if ((psc->gic_irqs[end_irq].gi_flags & GI_FLAG_MSI_USED) ==
1120 			    GI_FLAG_MSI_USED) {
1121 				found = false;
1122 				break;
1123 			}
1124 		}
1125 		if (found)
1126 			break;
1127 	}
1128 
1129 	/* Not enough interrupts were found */
1130 	if (!found || irq == sc->sc_spi_end) {
1131 		mtx_unlock(&sc->sc_mutex);
1132 		return (ENXIO);
1133 	}
1134 
1135 	for (i = 0; i < count; i++) {
1136 		/* Mark the interrupt as used */
1137 		psc->gic_irqs[irq + i].gi_flags |= GI_FLAG_MSI_USED;
1138 	}
1139 	mtx_unlock(&sc->sc_mutex);
1140 
1141 	for (i = 0; i < count; i++)
1142 		srcs[i] = (struct intr_irqsrc *)&psc->gic_irqs[irq + i];
1143 	*pic = device_get_parent(dev);
1144 
1145 	return (0);
1146 }
1147 
1148 static int
arm_gicv2m_release_msi(device_t dev,device_t child,int count,struct intr_irqsrc ** isrc)1149 arm_gicv2m_release_msi(device_t dev, device_t child, int count,
1150     struct intr_irqsrc **isrc)
1151 {
1152 	struct arm_gicv2m_softc *sc;
1153 	struct gic_irqsrc *gi;
1154 	int i;
1155 
1156 	sc = device_get_softc(dev);
1157 
1158 	mtx_lock(&sc->sc_mutex);
1159 	for (i = 0; i < count; i++) {
1160 		gi = (struct gic_irqsrc *)isrc[i];
1161 
1162 		KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1163 		    ("%s: Trying to release an unused MSI-X interrupt",
1164 		    __func__));
1165 
1166 		gi->gi_flags &= ~GI_FLAG_MSI_USED;
1167 	}
1168 	mtx_unlock(&sc->sc_mutex);
1169 
1170 	return (0);
1171 }
1172 
1173 static int
arm_gicv2m_alloc_msix(device_t dev,device_t child,device_t * pic,struct intr_irqsrc ** isrcp)1174 arm_gicv2m_alloc_msix(device_t dev, device_t child, device_t *pic,
1175     struct intr_irqsrc **isrcp)
1176 {
1177 	struct arm_gicv2m_softc *sc;
1178 	struct arm_gic_softc *psc;
1179 	int irq;
1180 
1181 	psc = device_get_softc(device_get_parent(dev));
1182 	sc = device_get_softc(dev);
1183 
1184 	mtx_lock(&sc->sc_mutex);
1185 	/* Find an unused interrupt */
1186 	for (irq = sc->sc_spi_start; irq < sc->sc_spi_end; irq++) {
1187 		KASSERT((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI) != 0,
1188 		    ("%s: Non-MSI interrupt found", __func__));
1189 		if ((psc->gic_irqs[irq].gi_flags & GI_FLAG_MSI_USED) == 0)
1190 			break;
1191 	}
1192 	/* No free interrupt was found */
1193 	if (irq == sc->sc_spi_end) {
1194 		mtx_unlock(&sc->sc_mutex);
1195 		return (ENXIO);
1196 	}
1197 
1198 	/* Mark the interrupt as used */
1199 	psc->gic_irqs[irq].gi_flags |= GI_FLAG_MSI_USED;
1200 	mtx_unlock(&sc->sc_mutex);
1201 
1202 	*isrcp = (struct intr_irqsrc *)&psc->gic_irqs[irq];
1203 	*pic = device_get_parent(dev);
1204 
1205 	return (0);
1206 }
1207 
1208 static int
arm_gicv2m_release_msix(device_t dev,device_t child,struct intr_irqsrc * isrc)1209 arm_gicv2m_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
1210 {
1211 	struct arm_gicv2m_softc *sc;
1212 	struct gic_irqsrc *gi;
1213 
1214 	sc = device_get_softc(dev);
1215 	gi = (struct gic_irqsrc *)isrc;
1216 
1217 	KASSERT((gi->gi_flags & GI_FLAG_MSI_USED) == GI_FLAG_MSI_USED,
1218 	    ("%s: Trying to release an unused MSI-X interrupt", __func__));
1219 
1220 	mtx_lock(&sc->sc_mutex);
1221 	gi->gi_flags &= ~GI_FLAG_MSI_USED;
1222 	mtx_unlock(&sc->sc_mutex);
1223 
1224 	return (0);
1225 }
1226 
1227 static int
arm_gicv2m_map_msi(device_t dev,device_t child,struct intr_irqsrc * isrc,uint64_t * addr,uint32_t * data)1228 arm_gicv2m_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
1229     uint64_t *addr, uint32_t *data)
1230 {
1231 	struct arm_gicv2m_softc *sc = device_get_softc(dev);
1232 	struct gic_irqsrc *gi = (struct gic_irqsrc *)isrc;
1233 
1234 	*addr = vtophys(rman_get_virtual(sc->sc_mem)) + GICv2M_MSI_SETSPI_NS;
1235 	*data = gi->gi_irq;
1236 
1237 	return (0);
1238 }
1239 
1240 static device_method_t arm_gicv2m_methods[] = {
1241 	/* Device interface */
1242 	DEVMETHOD(device_attach,	arm_gicv2m_attach),
1243 
1244 	/* MSI/MSI-X */
1245 	DEVMETHOD(msi_alloc_msi,	arm_gicv2m_alloc_msi),
1246 	DEVMETHOD(msi_release_msi,	arm_gicv2m_release_msi),
1247 	DEVMETHOD(msi_alloc_msix,	arm_gicv2m_alloc_msix),
1248 	DEVMETHOD(msi_release_msix,	arm_gicv2m_release_msix),
1249 	DEVMETHOD(msi_map_msi,		arm_gicv2m_map_msi),
1250 
1251 	/* End */
1252 	DEVMETHOD_END
1253 };
1254 
1255 DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods,
1256     sizeof(struct arm_gicv2m_softc));
1257