xref: /f-stack/freebsd/arm64/arm64/mp_machdep.c (revision 22ce4aff)
1 /*-
2  * Copyright (c) 2015-2016 The FreeBSD Foundation
3  * All rights reserved.
4  *
5  * This software was developed by Andrew Turner under
6  * sponsorship from the FreeBSD Foundation.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_platform.h"
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpu.h>
43 #include <sys/csan.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_map.h>
59 
60 #include <machine/machdep.h>
61 #include <machine/debug_monitor.h>
62 #include <machine/intr.h>
63 #include <machine/smp.h>
64 #ifdef VFP
65 #include <machine/vfp.h>
66 #endif
67 
68 #ifdef DEV_ACPI
69 #include <contrib/dev/acpica/include/acpi.h>
70 #include <dev/acpica/acpivar.h>
71 #endif
72 
73 #ifdef FDT
74 #include <dev/ofw/openfirm.h>
75 #include <dev/ofw/ofw_bus.h>
76 #include <dev/ofw/ofw_bus_subr.h>
77 #include <dev/ofw/ofw_cpu.h>
78 #endif
79 
80 #include <dev/psci/psci.h>
81 
82 #include "pic_if.h"
83 
84 #define	MP_QUIRK_CPULIST	0x01	/* The list of cpus may be wrong, */
85 					/* don't panic if one fails to start */
86 static uint32_t mp_quirks;
87 
88 #ifdef FDT
89 static struct {
90 	const char *compat;
91 	uint32_t quirks;
92 } fdt_quirks[] = {
93 	{ "arm,foundation-aarch64",	MP_QUIRK_CPULIST },
94 	{ "arm,fvp-base",		MP_QUIRK_CPULIST },
95 	/* This is incorrect in some DTS files */
96 	{ "arm,vfp-base",		MP_QUIRK_CPULIST },
97 	{ NULL, 0 },
98 };
99 #endif
100 
101 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
102 typedef void intr_ipi_handler_t(void *);
103 
104 #define INTR_IPI_NAMELEN	(MAXCOMLEN + 1)
105 struct intr_ipi {
106 	intr_ipi_handler_t *	ii_handler;
107 	void *			ii_handler_arg;
108 	intr_ipi_send_t *	ii_send;
109 	void *			ii_send_arg;
110 	char			ii_name[INTR_IPI_NAMELEN];
111 	u_long *		ii_count;
112 };
113 
114 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
115 
116 static struct intr_ipi *intr_ipi_lookup(u_int);
117 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
118     void *);
119 
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
125 
126 struct pcb stoppcbs[MAXCPU];
127 
128 #ifdef FDT
129 static u_int fdt_cpuid;
130 #endif
131 
132 void mpentry(unsigned long cpuid);
133 void init_secondary(uint64_t);
134 
135 /* Synchronize AP startup. */
136 static struct mtx ap_boot_mtx;
137 
138 /* Stacks for AP initialization, discarded once idle threads are started. */
139 void *bootstack;
140 static void *bootstacks[MAXCPU];
141 
142 /* Count of started APs, used to synchronize access to bootstack. */
143 static volatile int aps_started;
144 
145 /* Set to 1 once we're ready to let the APs out of the pen. */
146 static volatile int aps_ready;
147 
148 /* Temporary variables for init_secondary()  */
149 void *dpcpu[MAXCPU - 1];
150 
151 static bool
is_boot_cpu(uint64_t target_cpu)152 is_boot_cpu(uint64_t target_cpu)
153 {
154 
155 	return (__pcpu[0].pc_mpidr == (target_cpu & CPU_AFF_MASK));
156 }
157 
158 static void
release_aps(void * dummy __unused)159 release_aps(void *dummy __unused)
160 {
161 	int i, started;
162 
163 	/* Only release CPUs if they exist */
164 	if (mp_ncpus == 1)
165 		return;
166 
167 	intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
168 	intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
169 	intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
170 	intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
171 	intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
172 	intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
173 
174 	atomic_store_rel_int(&aps_ready, 1);
175 	/* Wake up the other CPUs */
176 	__asm __volatile(
177 	    "dsb ishst	\n"
178 	    "sev	\n"
179 	    ::: "memory");
180 
181 	printf("Release APs...");
182 
183 	started = 0;
184 	for (i = 0; i < 2000; i++) {
185 		if (smp_started) {
186 			printf("done\n");
187 			return;
188 		}
189 		/*
190 		 * Don't time out while we are making progress. Some large
191 		 * systems can take a while to start all CPUs.
192 		 */
193 		if (smp_cpus > started) {
194 			i = 0;
195 			started = smp_cpus;
196 		}
197 		DELAY(1000);
198 	}
199 
200 	printf("APs not started\n");
201 }
202 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
203 
204 void
init_secondary(uint64_t cpu)205 init_secondary(uint64_t cpu)
206 {
207 	struct pcpu *pcpup;
208 	pmap_t pmap0;
209 	u_int mpidr;
210 
211 	/*
212 	 * Verify that the value passed in 'cpu' argument (aka context_id) is
213 	 * valid. Some older U-Boot based PSCI implementations are buggy,
214 	 * they can pass random value in it.
215 	 */
216 	mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
217 	if  (cpu >= MAXCPU || __pcpu[cpu].pc_mpidr != mpidr) {
218 		for (cpu = 0; cpu < mp_maxid; cpu++)
219 			if (__pcpu[cpu].pc_mpidr == mpidr)
220 				break;
221 		if ( cpu >= MAXCPU)
222 			panic("MPIDR for this CPU is not in pcpu table");
223 	}
224 
225 	pcpup = &__pcpu[cpu];
226 	/*
227 	 * Set the pcpu pointer with a backup in tpidr_el1 to be
228 	 * loaded when entering the kernel from userland.
229 	 */
230 	__asm __volatile(
231 	    "mov x18, %0 \n"
232 	    "msr tpidr_el1, %0" :: "r"(pcpup));
233 
234 	/*
235 	 * Identify current CPU. This is necessary to setup
236 	 * affinity registers and to provide support for
237 	 * runtime chip identification.
238 	 *
239 	 * We need this before signalling the CPU is ready to
240 	 * let the boot CPU use the results.
241 	 */
242 	identify_cpu(cpu);
243 
244 	/* Ensure the stores in identify_cpu have completed */
245 	atomic_thread_fence_acq_rel();
246 
247 	/* Signal the BSP and spin until it has released all APs. */
248 	atomic_add_int(&aps_started, 1);
249 	while (!atomic_load_int(&aps_ready))
250 		__asm __volatile("wfe");
251 
252 	pcpup->pc_midr = get_midr();
253 
254 	/* Initialize curthread */
255 	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
256 	pcpup->pc_curthread = pcpup->pc_idlethread;
257 
258 	/* Initialize curpmap to match TTBR0's current setting. */
259 	pmap0 = vmspace_pmap(&vmspace0);
260 	KASSERT(pmap_to_ttbr0(pmap0) == READ_SPECIALREG(ttbr0_el1),
261 	    ("pmap0 doesn't match cpu %ld's ttbr0", cpu));
262 	pcpup->pc_curpmap = pmap0;
263 
264 	install_cpu_errata();
265 
266 	intr_pic_init_secondary();
267 
268 	/* Start per-CPU event timers. */
269 	cpu_initclocks_ap();
270 
271 #ifdef VFP
272 	vfp_init();
273 #endif
274 
275 	dbg_init();
276 	pan_enable();
277 
278 	mtx_lock_spin(&ap_boot_mtx);
279 	atomic_add_rel_32(&smp_cpus, 1);
280 	if (smp_cpus == mp_ncpus) {
281 		/* enable IPI's, tlb shootdown, freezes etc */
282 		atomic_store_rel_int(&smp_started, 1);
283 	}
284 	mtx_unlock_spin(&ap_boot_mtx);
285 
286 	kcsan_cpu_init(cpu);
287 
288 	/*
289 	 * Assert that smp_after_idle_runnable condition is reasonable.
290 	 */
291 	MPASS(PCPU_GET(curpcb) == NULL);
292 
293 	/* Enter the scheduler */
294 	sched_throw(NULL);
295 
296 	panic("scheduler returned us to init_secondary");
297 	/* NOTREACHED */
298 }
299 
300 static void
smp_after_idle_runnable(void * arg __unused)301 smp_after_idle_runnable(void *arg __unused)
302 {
303 	struct pcpu *pc;
304 	int cpu;
305 
306 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
307 		if (bootstacks[cpu] != NULL) {
308 			pc = pcpu_find(cpu);
309 			while (atomic_load_ptr(&pc->pc_curpcb) == NULL)
310 				cpu_spinwait();
311 			kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE);
312 		}
313 	}
314 }
315 SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
316     smp_after_idle_runnable, NULL);
317 
318 /*
319  *  Send IPI thru interrupt controller.
320  */
321 static void
pic_ipi_send(void * arg,cpuset_t cpus,u_int ipi)322 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
323 {
324 
325 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
326 
327 	/*
328 	 * Ensure that this CPU's stores will be visible to IPI
329 	 * recipients before starting to send the interrupts.
330 	 */
331 	dsb(ishst);
332 
333 	PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
334 }
335 
336 /*
337  *  Setup IPI handler on interrupt controller.
338  *
339  *  Not SMP coherent.
340  */
341 static void
intr_pic_ipi_setup(u_int ipi,const char * name,intr_ipi_handler_t * hand,void * arg)342 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
343     void *arg)
344 {
345 	struct intr_irqsrc *isrc;
346 	struct intr_ipi *ii;
347 	int error;
348 
349 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
350 	KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
351 
352 	error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
353 	if (error != 0)
354 		return;
355 
356 	isrc->isrc_handlers++;
357 
358 	ii = intr_ipi_lookup(ipi);
359 	KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
360 
361 	ii->ii_handler = hand;
362 	ii->ii_handler_arg = arg;
363 	ii->ii_send = pic_ipi_send;
364 	ii->ii_send_arg = isrc;
365 	strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
366 	ii->ii_count = intr_ipi_setup_counters(name);
367 }
368 
369 static void
intr_ipi_send(cpuset_t cpus,u_int ipi)370 intr_ipi_send(cpuset_t cpus, u_int ipi)
371 {
372 	struct intr_ipi *ii;
373 
374 	ii = intr_ipi_lookup(ipi);
375 	if (ii->ii_count == NULL)
376 		panic("%s: not setup IPI %u", __func__, ipi);
377 
378 	ii->ii_send(ii->ii_send_arg, cpus, ipi);
379 }
380 
381 static void
ipi_ast(void * dummy __unused)382 ipi_ast(void *dummy __unused)
383 {
384 
385 	CTR0(KTR_SMP, "IPI_AST");
386 }
387 
388 static void
ipi_hardclock(void * dummy __unused)389 ipi_hardclock(void *dummy __unused)
390 {
391 
392 	CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
393 	hardclockintr();
394 }
395 
396 static void
ipi_preempt(void * dummy __unused)397 ipi_preempt(void *dummy __unused)
398 {
399 	CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
400 	sched_preempt(curthread);
401 }
402 
403 static void
ipi_rendezvous(void * dummy __unused)404 ipi_rendezvous(void *dummy __unused)
405 {
406 
407 	CTR0(KTR_SMP, "IPI_RENDEZVOUS");
408 	smp_rendezvous_action();
409 }
410 
411 static void
ipi_stop(void * dummy __unused)412 ipi_stop(void *dummy __unused)
413 {
414 	u_int cpu;
415 
416 	CTR0(KTR_SMP, "IPI_STOP");
417 
418 	cpu = PCPU_GET(cpuid);
419 	savectx(&stoppcbs[cpu]);
420 
421 	/* Indicate we are stopped */
422 	CPU_SET_ATOMIC(cpu, &stopped_cpus);
423 
424 	/* Wait for restart */
425 	while (!CPU_ISSET(cpu, &started_cpus))
426 		cpu_spinwait();
427 
428 #ifdef DDB
429 	dbg_register_sync(NULL);
430 #endif
431 
432 	CPU_CLR_ATOMIC(cpu, &started_cpus);
433 	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
434 	CTR0(KTR_SMP, "IPI_STOP (restart)");
435 }
436 
437 struct cpu_group *
cpu_topo(void)438 cpu_topo(void)
439 {
440 
441 	return (smp_topo_none());
442 }
443 
444 /* Determine if we running MP machine */
445 int
cpu_mp_probe(void)446 cpu_mp_probe(void)
447 {
448 
449 	/* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
450 	return (1);
451 }
452 
453 /*
454  * Starts a given CPU. If the CPU is already running, i.e. it is the boot CPU,
455  * do nothing. Returns true if the CPU is present and running.
456  */
457 static bool
start_cpu(u_int cpuid,uint64_t target_cpu)458 start_cpu(u_int cpuid, uint64_t target_cpu)
459 {
460 	struct pcpu *pcpup;
461 	vm_paddr_t pa;
462 	int err, naps;
463 
464 	/* Check we are able to start this cpu */
465 	if (cpuid > mp_maxid)
466 		return (false);
467 
468 	/* Skip boot CPU */
469 	if (is_boot_cpu(target_cpu))
470 		return (true);
471 
472 	KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
473 
474 	pcpup = &__pcpu[cpuid];
475 	pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
476 	pcpup->pc_mpidr = target_cpu & CPU_AFF_MASK;
477 
478 	dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
479 	dpcpu_init(dpcpu[cpuid - 1], cpuid);
480 
481 	bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
482 
483 	naps = atomic_load_int(&aps_started);
484 	bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE;
485 
486 	printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
487 	pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
488 	err = psci_cpu_on(target_cpu, pa, cpuid);
489 	if (err != PSCI_RETVAL_SUCCESS) {
490 		/*
491 		 * Panic here if INVARIANTS are enabled and PSCI failed to
492 		 * start the requested CPU.  psci_cpu_on() returns PSCI_MISSING
493 		 * to indicate we are unable to use it to start the given CPU.
494 		 */
495 		KASSERT(err == PSCI_MISSING ||
496 		    (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
497 		    ("Failed to start CPU %u (%lx), error %d\n",
498 		    cpuid, target_cpu, err));
499 
500 		pcpu_destroy(pcpup);
501 		kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
502 		dpcpu[cpuid - 1] = NULL;
503 		kmem_free((vm_offset_t)bootstacks[cpuid], PAGE_SIZE);
504 		bootstacks[cpuid] = NULL;
505 		mp_ncpus--;
506 		return (false);
507 	}
508 
509 	/* Wait for the AP to switch to its boot stack. */
510 	while (atomic_load_int(&aps_started) < naps + 1)
511 		cpu_spinwait();
512 	CPU_SET(cpuid, &all_cpus);
513 
514 	return (true);
515 }
516 
517 #ifdef DEV_ACPI
518 static void
madt_handler(ACPI_SUBTABLE_HEADER * entry,void * arg)519 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
520 {
521 	ACPI_MADT_GENERIC_INTERRUPT *intr;
522 	u_int *cpuid;
523 	u_int id;
524 
525 	switch(entry->Type) {
526 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
527 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
528 		cpuid = arg;
529 
530 		if (is_boot_cpu(intr->ArmMpidr))
531 			id = 0;
532 		else
533 			id = *cpuid;
534 
535 		if (start_cpu(id, intr->ArmMpidr)) {
536 			__pcpu[id].pc_acpi_id = intr->Uid;
537 			/*
538 			 * Don't increment for the boot CPU, its CPU ID is
539 			 * reserved.
540 			 */
541 			if (!is_boot_cpu(intr->ArmMpidr))
542 				(*cpuid)++;
543 		}
544 
545 		break;
546 	default:
547 		break;
548 	}
549 }
550 
551 static void
cpu_init_acpi(void)552 cpu_init_acpi(void)
553 {
554 	ACPI_TABLE_MADT *madt;
555 	vm_paddr_t physaddr;
556 	u_int cpuid;
557 
558 	physaddr = acpi_find_table(ACPI_SIG_MADT);
559 	if (physaddr == 0)
560 		return;
561 
562 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
563 	if (madt == NULL) {
564 		printf("Unable to map the MADT, not starting APs\n");
565 		return;
566 	}
567 	/* Boot CPU is always 0 */
568 	cpuid = 1;
569 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
570 	    madt_handler, &cpuid);
571 
572 	acpi_unmap_table(madt);
573 
574 #if MAXMEMDOM > 1
575 	acpi_pxm_set_cpu_locality();
576 #endif
577 }
578 #endif
579 
580 #ifdef FDT
581 static boolean_t
start_cpu_fdt(u_int id,phandle_t node,u_int addr_size,pcell_t * reg)582 start_cpu_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
583 {
584 	uint64_t target_cpu;
585 	int domain;
586 	int cpuid;
587 
588 	target_cpu = reg[0];
589 	if (addr_size == 2) {
590 		target_cpu <<= 32;
591 		target_cpu |= reg[1];
592 	}
593 
594 	if (is_boot_cpu(target_cpu))
595 		cpuid = 0;
596 	else
597 		cpuid = fdt_cpuid;
598 
599 	if (!start_cpu(cpuid, target_cpu))
600 		return (FALSE);
601 
602 	/*
603 	 * Don't increment for the boot CPU, its CPU ID is reserved.
604 	 */
605 	if (!is_boot_cpu(target_cpu))
606 		fdt_cpuid++;
607 
608 	/* Try to read the numa node of this cpu */
609 	if (vm_ndomains == 1 ||
610 	    OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
611 		domain = 0;
612 	__pcpu[cpuid].pc_domain = domain;
613 	if (domain < MAXMEMDOM)
614 		CPU_SET(cpuid, &cpuset_domain[domain]);
615 	return (TRUE);
616 }
617 static void
cpu_init_fdt(void)618 cpu_init_fdt(void)
619 {
620 	phandle_t node;
621 	int i;
622 
623 	node = OF_peer(0);
624 	for (i = 0; fdt_quirks[i].compat != NULL; i++) {
625 		if (ofw_bus_node_is_compatible(node,
626 		    fdt_quirks[i].compat) != 0) {
627 			mp_quirks = fdt_quirks[i].quirks;
628 		}
629 	}
630 	fdt_cpuid = 1;
631 	ofw_cpu_early_foreach(start_cpu_fdt, true);
632 }
633 #endif
634 
635 /* Initialize and fire up non-boot processors */
636 void
cpu_mp_start(void)637 cpu_mp_start(void)
638 {
639 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
640 
641 	/* CPU 0 is always boot CPU. */
642 	CPU_SET(0, &all_cpus);
643 	__pcpu[0].pc_mpidr = READ_SPECIALREG(mpidr_el1) & CPU_AFF_MASK;
644 
645 	switch(arm64_bus_method) {
646 #ifdef DEV_ACPI
647 	case ARM64_BUS_ACPI:
648 		mp_quirks = MP_QUIRK_CPULIST;
649 		cpu_init_acpi();
650 		break;
651 #endif
652 #ifdef FDT
653 	case ARM64_BUS_FDT:
654 		cpu_init_fdt();
655 		break;
656 #endif
657 	default:
658 		break;
659 	}
660 }
661 
662 /* Introduce rest of cores to the world */
663 void
cpu_mp_announce(void)664 cpu_mp_announce(void)
665 {
666 }
667 
668 #ifdef DEV_ACPI
669 static void
cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER * entry,void * arg)670 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
671 {
672 	ACPI_MADT_GENERIC_INTERRUPT *intr;
673 	u_int *cores = arg;
674 
675 	switch(entry->Type) {
676 	case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
677 		intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
678 		(*cores)++;
679 		break;
680 	default:
681 		break;
682 	}
683 }
684 
685 static u_int
cpu_count_acpi(void)686 cpu_count_acpi(void)
687 {
688 	ACPI_TABLE_MADT *madt;
689 	vm_paddr_t physaddr;
690 	u_int cores;
691 
692 	physaddr = acpi_find_table(ACPI_SIG_MADT);
693 	if (physaddr == 0)
694 		return (0);
695 
696 	madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
697 	if (madt == NULL) {
698 		printf("Unable to map the MADT, not starting APs\n");
699 		return (0);
700 	}
701 
702 	cores = 0;
703 	acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
704 	    cpu_count_acpi_handler, &cores);
705 
706 	acpi_unmap_table(madt);
707 
708 	return (cores);
709 }
710 #endif
711 
712 void
cpu_mp_setmaxid(void)713 cpu_mp_setmaxid(void)
714 {
715 	int cores;
716 
717 	mp_ncpus = 1;
718 	mp_maxid = 0;
719 
720 	switch(arm64_bus_method) {
721 #ifdef DEV_ACPI
722 	case ARM64_BUS_ACPI:
723 		cores = cpu_count_acpi();
724 		if (cores > 0) {
725 			cores = MIN(cores, MAXCPU);
726 			if (bootverbose)
727 				printf("Found %d CPUs in the ACPI tables\n",
728 				    cores);
729 			mp_ncpus = cores;
730 			mp_maxid = cores - 1;
731 		}
732 		break;
733 #endif
734 #ifdef FDT
735 	case ARM64_BUS_FDT:
736 		cores = ofw_cpu_early_foreach(NULL, false);
737 		if (cores > 0) {
738 			cores = MIN(cores, MAXCPU);
739 			if (bootverbose)
740 				printf("Found %d CPUs in the device tree\n",
741 				    cores);
742 			mp_ncpus = cores;
743 			mp_maxid = cores - 1;
744 		}
745 		break;
746 #endif
747 	default:
748 		if (bootverbose)
749 			printf("No CPU data, limiting to 1 core\n");
750 		break;
751 	}
752 
753 	if (TUNABLE_INT_FETCH("hw.ncpu", &cores)) {
754 		if (cores > 0 && cores < mp_ncpus) {
755 			mp_ncpus = cores;
756 			mp_maxid = cores - 1;
757 		}
758 	}
759 }
760 
761 /*
762  *  Lookup IPI source.
763  */
764 static struct intr_ipi *
intr_ipi_lookup(u_int ipi)765 intr_ipi_lookup(u_int ipi)
766 {
767 
768 	if (ipi >= INTR_IPI_COUNT)
769 		panic("%s: no such IPI %u", __func__, ipi);
770 
771 	return (&ipi_sources[ipi]);
772 }
773 
774 /*
775  *  interrupt controller dispatch function for IPIs. It should
776  *  be called straight from the interrupt controller, when associated
777  *  interrupt source is learned. Or from anybody who has an interrupt
778  *  source mapped.
779  */
780 void
intr_ipi_dispatch(u_int ipi,struct trapframe * tf)781 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
782 {
783 	void *arg;
784 	struct intr_ipi *ii;
785 
786 	ii = intr_ipi_lookup(ipi);
787 	if (ii->ii_count == NULL)
788 		panic("%s: not setup IPI %u", __func__, ipi);
789 
790 	intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
791 
792 	/*
793 	 * Supply ipi filter with trapframe argument
794 	 * if none is registered.
795 	 */
796 	arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
797 	ii->ii_handler(arg);
798 }
799 
800 #ifdef notyet
801 /*
802  *  Map IPI into interrupt controller.
803  *
804  *  Not SMP coherent.
805  */
806 static int
ipi_map(struct intr_irqsrc * isrc,u_int ipi)807 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
808 {
809 	boolean_t is_percpu;
810 	int error;
811 
812 	if (ipi >= INTR_IPI_COUNT)
813 		panic("%s: no such IPI %u", __func__, ipi);
814 
815 	KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
816 
817 	isrc->isrc_type = INTR_ISRCT_NAMESPACE;
818 	isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
819 	isrc->isrc_nspc_num = ipi_next_num;
820 
821 	error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
822 	if (error == 0) {
823 		isrc->isrc_dev = intr_irq_root_dev;
824 		ipi_next_num++;
825 	}
826 	return (error);
827 }
828 
829 /*
830  *  Setup IPI handler to interrupt source.
831  *
832  *  Note that there could be more ways how to send and receive IPIs
833  *  on a platform like fast interrupts for example. In that case,
834  *  one can call this function with ASIF_NOALLOC flag set and then
835  *  call intr_ipi_dispatch() when appropriate.
836  *
837  *  Not SMP coherent.
838  */
839 int
intr_ipi_set_handler(u_int ipi,const char * name,intr_ipi_filter_t * filter,void * arg,u_int flags)840 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
841     void *arg, u_int flags)
842 {
843 	struct intr_irqsrc *isrc;
844 	int error;
845 
846 	if (filter == NULL)
847 		return(EINVAL);
848 
849 	isrc = intr_ipi_lookup(ipi);
850 	if (isrc->isrc_ipifilter != NULL)
851 		return (EEXIST);
852 
853 	if ((flags & AISHF_NOALLOC) == 0) {
854 		error = ipi_map(isrc, ipi);
855 		if (error != 0)
856 			return (error);
857 	}
858 
859 	isrc->isrc_ipifilter = filter;
860 	isrc->isrc_arg = arg;
861 	isrc->isrc_handlers = 1;
862 	isrc->isrc_count = intr_ipi_setup_counters(name);
863 	isrc->isrc_index = 0; /* it should not be used in IPI case */
864 
865 	if (isrc->isrc_dev != NULL) {
866 		PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
867 		PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
868 	}
869 	return (0);
870 }
871 #endif
872 
873 /* Sending IPI */
874 void
ipi_all_but_self(u_int ipi)875 ipi_all_but_self(u_int ipi)
876 {
877 	cpuset_t cpus;
878 
879 	cpus = all_cpus;
880 	CPU_CLR(PCPU_GET(cpuid), &cpus);
881 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
882 	intr_ipi_send(cpus, ipi);
883 }
884 
885 void
ipi_cpu(int cpu,u_int ipi)886 ipi_cpu(int cpu, u_int ipi)
887 {
888 	cpuset_t cpus;
889 
890 	CPU_ZERO(&cpus);
891 	CPU_SET(cpu, &cpus);
892 
893 	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
894 	intr_ipi_send(cpus, ipi);
895 }
896 
897 void
ipi_selected(cpuset_t cpus,u_int ipi)898 ipi_selected(cpuset_t cpus, u_int ipi)
899 {
900 
901 	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
902 	intr_ipi_send(cpus, ipi);
903 }
904