1 /*-
2 * Copyright (c) 2015-2016 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Andrew Turner under
6 * sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
30
31 #include "opt_acpi.h"
32 #include "opt_kstack_pages.h"
33 #include "opt_platform.h"
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_kern.h>
54
55 #include <machine/machdep.h>
56 #include <machine/intr.h>
57 #include <machine/smp.h>
58 #ifdef VFP
59 #include <machine/vfp.h>
60 #endif
61
62 #ifdef DEV_ACPI
63 #include <contrib/dev/acpica/include/acpi.h>
64 #include <dev/acpica/acpivar.h>
65 #endif
66
67 #ifdef FDT
68 #include <dev/ofw/openfirm.h>
69 #include <dev/ofw/ofw_bus.h>
70 #include <dev/ofw/ofw_bus_subr.h>
71 #include <dev/ofw/ofw_cpu.h>
72 #endif
73
74 #include <dev/psci/psci.h>
75
76 #include "pic_if.h"
77
78 #define MP_QUIRK_CPULIST 0x01 /* The list of cpus may be wrong, */
79 /* don't panic if one fails to start */
80 static uint32_t mp_quirks;
81
82 #ifdef FDT
83 static struct {
84 const char *compat;
85 uint32_t quirks;
86 } fdt_quirks[] = {
87 { "arm,foundation-aarch64", MP_QUIRK_CPULIST },
88 { "arm,fvp-base", MP_QUIRK_CPULIST },
89 /* This is incorrect in some DTS files */
90 { "arm,vfp-base", MP_QUIRK_CPULIST },
91 { NULL, 0 },
92 };
93 #endif
94
95 typedef void intr_ipi_send_t(void *, cpuset_t, u_int);
96 typedef void intr_ipi_handler_t(void *);
97
98 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
99 struct intr_ipi {
100 intr_ipi_handler_t * ii_handler;
101 void * ii_handler_arg;
102 intr_ipi_send_t * ii_send;
103 void * ii_send_arg;
104 char ii_name[INTR_IPI_NAMELEN];
105 u_long * ii_count;
106 };
107
108 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
109
110 static struct intr_ipi *intr_ipi_lookup(u_int);
111 static void intr_pic_ipi_setup(u_int, const char *, intr_ipi_handler_t *,
112 void *);
113
114 extern struct pcpu __pcpu[];
115
116 static device_identify_t arm64_cpu_identify;
117 static device_probe_t arm64_cpu_probe;
118 static device_attach_t arm64_cpu_attach;
119
120 static void ipi_ast(void *);
121 static void ipi_hardclock(void *);
122 static void ipi_preempt(void *);
123 static void ipi_rendezvous(void *);
124 static void ipi_stop(void *);
125
126 struct mtx ap_boot_mtx;
127 struct pcb stoppcbs[MAXCPU];
128
129 static device_t cpu_list[MAXCPU];
130
131 /*
132 * Not all systems boot from the first CPU in the device tree. To work around
133 * this we need to find which CPU we have booted from so when we later
134 * enable the secondary CPUs we skip this one.
135 */
136 static int cpu0 = -1;
137
138 void mpentry(unsigned long cpuid);
139 void init_secondary(uint64_t);
140
141 uint8_t secondary_stacks[MAXCPU - 1][PAGE_SIZE * KSTACK_PAGES] __aligned(16);
142
143 /* Set to 1 once we're ready to let the APs out of the pen. */
144 volatile int aps_ready = 0;
145
146 /* Temporary variables for init_secondary() */
147 void *dpcpu[MAXCPU - 1];
148
149 static device_method_t arm64_cpu_methods[] = {
150 /* Device interface */
151 DEVMETHOD(device_identify, arm64_cpu_identify),
152 DEVMETHOD(device_probe, arm64_cpu_probe),
153 DEVMETHOD(device_attach, arm64_cpu_attach),
154
155 DEVMETHOD_END
156 };
157
158 static devclass_t arm64_cpu_devclass;
159 static driver_t arm64_cpu_driver = {
160 "arm64_cpu",
161 arm64_cpu_methods,
162 0
163 };
164
165 DRIVER_MODULE(arm64_cpu, cpu, arm64_cpu_driver, arm64_cpu_devclass, 0, 0);
166
167 static void
arm64_cpu_identify(driver_t * driver,device_t parent)168 arm64_cpu_identify(driver_t *driver, device_t parent)
169 {
170
171 if (device_find_child(parent, "arm64_cpu", -1) != NULL)
172 return;
173 if (BUS_ADD_CHILD(parent, 0, "arm64_cpu", -1) == NULL)
174 device_printf(parent, "add child failed\n");
175 }
176
177 static int
arm64_cpu_probe(device_t dev)178 arm64_cpu_probe(device_t dev)
179 {
180 u_int cpuid;
181
182 cpuid = device_get_unit(dev);
183 if (cpuid >= MAXCPU || cpuid > mp_maxid)
184 return (EINVAL);
185
186 device_quiet(dev);
187 return (0);
188 }
189
190 static int
arm64_cpu_attach(device_t dev)191 arm64_cpu_attach(device_t dev)
192 {
193 const uint32_t *reg;
194 size_t reg_size;
195 u_int cpuid;
196 int i;
197
198 cpuid = device_get_unit(dev);
199
200 if (cpuid >= MAXCPU || cpuid > mp_maxid)
201 return (EINVAL);
202 KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid));
203
204 reg = cpu_get_cpuid(dev, ®_size);
205 if (reg == NULL)
206 return (EINVAL);
207
208 if (bootverbose) {
209 device_printf(dev, "register <");
210 for (i = 0; i < reg_size; i++)
211 printf("%s%x", (i == 0) ? "" : " ", reg[i]);
212 printf(">\n");
213 }
214
215 /* Set the device to start it later */
216 cpu_list[cpuid] = dev;
217
218 return (0);
219 }
220
221 static void
release_aps(void * dummy __unused)222 release_aps(void *dummy __unused)
223 {
224 int i, started;
225
226 /* Only release CPUs if they exist */
227 if (mp_ncpus == 1)
228 return;
229
230 intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
231 intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
232 intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
233 intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
234 intr_pic_ipi_setup(IPI_STOP_HARD, "stop hard", ipi_stop, NULL);
235 intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
236
237 atomic_store_rel_int(&aps_ready, 1);
238 /* Wake up the other CPUs */
239 __asm __volatile(
240 "dsb ishst \n"
241 "sev \n"
242 ::: "memory");
243
244 printf("Release APs...");
245
246 started = 0;
247 for (i = 0; i < 2000; i++) {
248 if (smp_started) {
249 printf("done\n");
250 return;
251 }
252 /*
253 * Don't time out while we are making progress. Some large
254 * systems can take a while to start all CPUs.
255 */
256 if (smp_cpus > started) {
257 i = 0;
258 started = smp_cpus;
259 }
260 DELAY(1000);
261 }
262
263 printf("APs not started\n");
264 }
265 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
266
267 void
init_secondary(uint64_t cpu)268 init_secondary(uint64_t cpu)
269 {
270 struct pcpu *pcpup;
271
272 pcpup = &__pcpu[cpu];
273 /*
274 * Set the pcpu pointer with a backup in tpidr_el1 to be
275 * loaded when entering the kernel from userland.
276 */
277 __asm __volatile(
278 "mov x18, %0 \n"
279 "msr tpidr_el1, %0" :: "r"(pcpup));
280
281 /* Spin until the BSP releases the APs */
282 while (!aps_ready)
283 __asm __volatile("wfe");
284
285 /* Initialize curthread */
286 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
287 pcpup->pc_curthread = pcpup->pc_idlethread;
288 pcpup->pc_curpcb = pcpup->pc_idlethread->td_pcb;
289
290 /*
291 * Identify current CPU. This is necessary to setup
292 * affinity registers and to provide support for
293 * runtime chip identification.
294 */
295 identify_cpu();
296 install_cpu_errata();
297
298 intr_pic_init_secondary();
299
300 /* Start per-CPU event timers. */
301 cpu_initclocks_ap();
302
303 #ifdef VFP
304 vfp_init();
305 #endif
306
307 dbg_init();
308 pan_enable();
309
310 /* Enable interrupts */
311 intr_enable();
312
313 mtx_lock_spin(&ap_boot_mtx);
314
315 atomic_add_rel_32(&smp_cpus, 1);
316
317 if (smp_cpus == mp_ncpus) {
318 /* enable IPI's, tlb shootdown, freezes etc */
319 atomic_store_rel_int(&smp_started, 1);
320 }
321
322 mtx_unlock_spin(&ap_boot_mtx);
323
324 /* Enter the scheduler */
325 sched_throw(NULL);
326
327 panic("scheduler returned us to init_secondary");
328 /* NOTREACHED */
329 }
330
331 /*
332 * Send IPI thru interrupt controller.
333 */
334 static void
pic_ipi_send(void * arg,cpuset_t cpus,u_int ipi)335 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
336 {
337
338 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
339 PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
340 }
341
342 /*
343 * Setup IPI handler on interrupt controller.
344 *
345 * Not SMP coherent.
346 */
347 static void
intr_pic_ipi_setup(u_int ipi,const char * name,intr_ipi_handler_t * hand,void * arg)348 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
349 void *arg)
350 {
351 struct intr_irqsrc *isrc;
352 struct intr_ipi *ii;
353 int error;
354
355 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
356 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
357
358 error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
359 if (error != 0)
360 return;
361
362 isrc->isrc_handlers++;
363
364 ii = intr_ipi_lookup(ipi);
365 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
366
367 ii->ii_handler = hand;
368 ii->ii_handler_arg = arg;
369 ii->ii_send = pic_ipi_send;
370 ii->ii_send_arg = isrc;
371 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
372 ii->ii_count = intr_ipi_setup_counters(name);
373 }
374
375 static void
intr_ipi_send(cpuset_t cpus,u_int ipi)376 intr_ipi_send(cpuset_t cpus, u_int ipi)
377 {
378 struct intr_ipi *ii;
379
380 ii = intr_ipi_lookup(ipi);
381 if (ii->ii_count == NULL)
382 panic("%s: not setup IPI %u", __func__, ipi);
383
384 ii->ii_send(ii->ii_send_arg, cpus, ipi);
385 }
386
387 static void
ipi_ast(void * dummy __unused)388 ipi_ast(void *dummy __unused)
389 {
390
391 CTR0(KTR_SMP, "IPI_AST");
392 }
393
394 static void
ipi_hardclock(void * dummy __unused)395 ipi_hardclock(void *dummy __unused)
396 {
397
398 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
399 hardclockintr();
400 }
401
402 static void
ipi_preempt(void * dummy __unused)403 ipi_preempt(void *dummy __unused)
404 {
405 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
406 sched_preempt(curthread);
407 }
408
409 static void
ipi_rendezvous(void * dummy __unused)410 ipi_rendezvous(void *dummy __unused)
411 {
412
413 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
414 smp_rendezvous_action();
415 }
416
417 static void
ipi_stop(void * dummy __unused)418 ipi_stop(void *dummy __unused)
419 {
420 u_int cpu;
421
422 CTR0(KTR_SMP, "IPI_STOP");
423
424 cpu = PCPU_GET(cpuid);
425 savectx(&stoppcbs[cpu]);
426
427 /* Indicate we are stopped */
428 CPU_SET_ATOMIC(cpu, &stopped_cpus);
429
430 /* Wait for restart */
431 while (!CPU_ISSET(cpu, &started_cpus))
432 cpu_spinwait();
433
434 CPU_CLR_ATOMIC(cpu, &started_cpus);
435 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
436 CTR0(KTR_SMP, "IPI_STOP (restart)");
437 }
438
439 struct cpu_group *
cpu_topo(void)440 cpu_topo(void)
441 {
442
443 return (smp_topo_none());
444 }
445
446 /* Determine if we running MP machine */
447 int
cpu_mp_probe(void)448 cpu_mp_probe(void)
449 {
450
451 /* ARM64TODO: Read the u bit of mpidr_el1 to determine this */
452 return (1);
453 }
454
455 static bool
start_cpu(u_int id,uint64_t target_cpu)456 start_cpu(u_int id, uint64_t target_cpu)
457 {
458 struct pcpu *pcpup;
459 vm_paddr_t pa;
460 u_int cpuid;
461 int err;
462
463 /* Check we are able to start this cpu */
464 if (id > mp_maxid)
465 return (false);
466
467 KASSERT(id < MAXCPU, ("Too many CPUs"));
468
469 /* We are already running on cpu 0 */
470 if (id == cpu0)
471 return (true);
472
473 /*
474 * Rotate the CPU IDs to put the boot CPU as CPU 0. We keep the other
475 * CPUs ordered as the are likely grouped into clusters so it can be
476 * useful to keep that property, e.g. for the GICv3 driver to send
477 * an IPI to all CPUs in the cluster.
478 */
479 cpuid = id;
480 if (cpuid < cpu0)
481 cpuid += mp_maxid + 1;
482 cpuid -= cpu0;
483
484 pcpup = &__pcpu[cpuid];
485 pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
486
487 dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
488 dpcpu_init(dpcpu[cpuid - 1], cpuid);
489
490 printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
491 pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);
492
493 err = psci_cpu_on(target_cpu, pa, cpuid);
494 if (err != PSCI_RETVAL_SUCCESS) {
495 /*
496 * Panic here if INVARIANTS are enabled and PSCI failed to
497 * start the requested CPU. If psci_cpu_on returns PSCI_MISSING
498 * to indicate we are unable to use it to start the given CPU.
499 */
500 KASSERT(err == PSCI_MISSING ||
501 (mp_quirks & MP_QUIRK_CPULIST) == MP_QUIRK_CPULIST,
502 ("Failed to start CPU %u (%lx)\n", id, target_cpu));
503
504 pcpu_destroy(pcpup);
505 kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
506 dpcpu[cpuid - 1] = NULL;
507 mp_ncpus--;
508
509 /* Notify the user that the CPU failed to start */
510 printf("Failed to start CPU %u (%lx)\n", id, target_cpu);
511 } else
512 CPU_SET(cpuid, &all_cpus);
513
514 return (true);
515 }
516
517 #ifdef DEV_ACPI
518 static void
madt_handler(ACPI_SUBTABLE_HEADER * entry,void * arg)519 madt_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
520 {
521 ACPI_MADT_GENERIC_INTERRUPT *intr;
522 u_int *cpuid;
523
524 switch(entry->Type) {
525 case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
526 intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
527 cpuid = arg;
528
529 start_cpu((*cpuid), intr->ArmMpidr);
530 (*cpuid)++;
531 break;
532 default:
533 break;
534 }
535 }
536
537 static void
cpu_init_acpi(void)538 cpu_init_acpi(void)
539 {
540 ACPI_TABLE_MADT *madt;
541 vm_paddr_t physaddr;
542 u_int cpuid;
543
544 physaddr = acpi_find_table(ACPI_SIG_MADT);
545 if (physaddr == 0)
546 return;
547
548 madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
549 if (madt == NULL) {
550 printf("Unable to map the MADT, not starting APs\n");
551 return;
552 }
553
554 cpuid = 0;
555 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
556 madt_handler, &cpuid);
557
558 acpi_unmap_table(madt);
559 }
560 #endif
561
562 #ifdef FDT
563 static boolean_t
cpu_init_fdt(u_int id,phandle_t node,u_int addr_size,pcell_t * reg)564 cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
565 {
566 uint64_t target_cpu;
567 int domain;
568
569 target_cpu = reg[0];
570 if (addr_size == 2) {
571 target_cpu <<= 32;
572 target_cpu |= reg[1];
573 }
574
575 if (!start_cpu(id, target_cpu))
576 return (FALSE);
577
578 /* Try to read the numa node of this cpu */
579 if (vm_ndomains == 1 ||
580 OF_getencprop(node, "numa-node-id", &domain, sizeof(domain)) <= 0)
581 domain = 0;
582 __pcpu[id].pc_domain = domain;
583 if (domain < MAXMEMDOM)
584 CPU_SET(id, &cpuset_domain[domain]);
585
586 return (TRUE);
587 }
588 #endif
589
590 /* Initialize and fire up non-boot processors */
591 void
cpu_mp_start(void)592 cpu_mp_start(void)
593 {
594 #ifdef FDT
595 phandle_t node;
596 int i;
597 #endif
598
599 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
600
601 CPU_SET(0, &all_cpus);
602
603 switch(arm64_bus_method) {
604 #ifdef DEV_ACPI
605 case ARM64_BUS_ACPI:
606 KASSERT(cpu0 >= 0, ("Current CPU was not found"));
607 cpu_init_acpi();
608 break;
609 #endif
610 #ifdef FDT
611 case ARM64_BUS_FDT:
612 node = OF_peer(0);
613 for (i = 0; fdt_quirks[i].compat != NULL; i++) {
614 if (ofw_bus_node_is_compatible(node,
615 fdt_quirks[i].compat) != 0) {
616 mp_quirks = fdt_quirks[i].quirks;
617 }
618 }
619 KASSERT(cpu0 >= 0, ("Current CPU was not found"));
620 ofw_cpu_early_foreach(cpu_init_fdt, true);
621 break;
622 #endif
623 default:
624 break;
625 }
626 }
627
628 /* Introduce rest of cores to the world */
629 void
cpu_mp_announce(void)630 cpu_mp_announce(void)
631 {
632 }
633
634 #ifdef DEV_ACPI
635 static void
cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER * entry,void * arg)636 cpu_count_acpi_handler(ACPI_SUBTABLE_HEADER *entry, void *arg)
637 {
638 ACPI_MADT_GENERIC_INTERRUPT *intr;
639 u_int *cores = arg;
640 uint64_t mpidr_reg;
641
642 switch(entry->Type) {
643 case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
644 intr = (ACPI_MADT_GENERIC_INTERRUPT *)entry;
645 if (cpu0 < 0) {
646 mpidr_reg = READ_SPECIALREG(mpidr_el1);
647 if ((mpidr_reg & 0xff00fffffful) == intr->ArmMpidr)
648 cpu0 = *cores;
649 }
650 (*cores)++;
651 break;
652 default:
653 break;
654 }
655 }
656
657 static u_int
cpu_count_acpi(void)658 cpu_count_acpi(void)
659 {
660 ACPI_TABLE_MADT *madt;
661 vm_paddr_t physaddr;
662 u_int cores;
663
664 physaddr = acpi_find_table(ACPI_SIG_MADT);
665 if (physaddr == 0)
666 return (0);
667
668 madt = acpi_map_table(physaddr, ACPI_SIG_MADT);
669 if (madt == NULL) {
670 printf("Unable to map the MADT, not starting APs\n");
671 return (0);
672 }
673
674 cores = 0;
675 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
676 cpu_count_acpi_handler, &cores);
677
678 acpi_unmap_table(madt);
679
680 return (cores);
681 }
682 #endif
683
684 #ifdef FDT
685 static boolean_t
cpu_find_cpu0_fdt(u_int id,phandle_t node,u_int addr_size,pcell_t * reg)686 cpu_find_cpu0_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
687 {
688 uint64_t mpidr_fdt, mpidr_reg;
689
690 if (cpu0 < 0) {
691 mpidr_fdt = reg[0];
692 if (addr_size == 2) {
693 mpidr_fdt <<= 32;
694 mpidr_fdt |= reg[1];
695 }
696
697 mpidr_reg = READ_SPECIALREG(mpidr_el1);
698
699 if ((mpidr_reg & 0xff00fffffful) == mpidr_fdt)
700 cpu0 = id;
701 }
702
703 return (TRUE);
704 }
705 #endif
706
707 void
cpu_mp_setmaxid(void)708 cpu_mp_setmaxid(void)
709 {
710 #if defined(DEV_ACPI) || defined(FDT)
711 int cores;
712 #endif
713
714 switch(arm64_bus_method) {
715 #ifdef DEV_ACPI
716 case ARM64_BUS_ACPI:
717 cores = cpu_count_acpi();
718 if (cores > 0) {
719 cores = MIN(cores, MAXCPU);
720 if (bootverbose)
721 printf("Found %d CPUs in the ACPI tables\n",
722 cores);
723 mp_ncpus = cores;
724 mp_maxid = cores - 1;
725 return;
726 }
727 break;
728 #endif
729 #ifdef FDT
730 case ARM64_BUS_FDT:
731 cores = ofw_cpu_early_foreach(cpu_find_cpu0_fdt, false);
732 if (cores > 0) {
733 cores = MIN(cores, MAXCPU);
734 if (bootverbose)
735 printf("Found %d CPUs in the device tree\n",
736 cores);
737 mp_ncpus = cores;
738 mp_maxid = cores - 1;
739 return;
740 }
741 break;
742 #endif
743 default:
744 break;
745 }
746
747 if (bootverbose)
748 printf("No CPU data, limiting to 1 core\n");
749 mp_ncpus = 1;
750 mp_maxid = 0;
751 }
752
753 /*
754 * Lookup IPI source.
755 */
756 static struct intr_ipi *
intr_ipi_lookup(u_int ipi)757 intr_ipi_lookup(u_int ipi)
758 {
759
760 if (ipi >= INTR_IPI_COUNT)
761 panic("%s: no such IPI %u", __func__, ipi);
762
763 return (&ipi_sources[ipi]);
764 }
765
766 /*
767 * interrupt controller dispatch function for IPIs. It should
768 * be called straight from the interrupt controller, when associated
769 * interrupt source is learned. Or from anybody who has an interrupt
770 * source mapped.
771 */
772 void
intr_ipi_dispatch(u_int ipi,struct trapframe * tf)773 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
774 {
775 void *arg;
776 struct intr_ipi *ii;
777
778 ii = intr_ipi_lookup(ipi);
779 if (ii->ii_count == NULL)
780 panic("%s: not setup IPI %u", __func__, ipi);
781
782 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
783
784 /*
785 * Supply ipi filter with trapframe argument
786 * if none is registered.
787 */
788 arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
789 ii->ii_handler(arg);
790 }
791
792 #ifdef notyet
793 /*
794 * Map IPI into interrupt controller.
795 *
796 * Not SMP coherent.
797 */
798 static int
ipi_map(struct intr_irqsrc * isrc,u_int ipi)799 ipi_map(struct intr_irqsrc *isrc, u_int ipi)
800 {
801 boolean_t is_percpu;
802 int error;
803
804 if (ipi >= INTR_IPI_COUNT)
805 panic("%s: no such IPI %u", __func__, ipi);
806
807 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
808
809 isrc->isrc_type = INTR_ISRCT_NAMESPACE;
810 isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI;
811 isrc->isrc_nspc_num = ipi_next_num;
812
813 error = PIC_REGISTER(intr_irq_root_dev, isrc, &is_percpu);
814 if (error == 0) {
815 isrc->isrc_dev = intr_irq_root_dev;
816 ipi_next_num++;
817 }
818 return (error);
819 }
820
821 /*
822 * Setup IPI handler to interrupt source.
823 *
824 * Note that there could be more ways how to send and receive IPIs
825 * on a platform like fast interrupts for example. In that case,
826 * one can call this function with ASIF_NOALLOC flag set and then
827 * call intr_ipi_dispatch() when appropriate.
828 *
829 * Not SMP coherent.
830 */
831 int
intr_ipi_set_handler(u_int ipi,const char * name,intr_ipi_filter_t * filter,void * arg,u_int flags)832 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter,
833 void *arg, u_int flags)
834 {
835 struct intr_irqsrc *isrc;
836 int error;
837
838 if (filter == NULL)
839 return(EINVAL);
840
841 isrc = intr_ipi_lookup(ipi);
842 if (isrc->isrc_ipifilter != NULL)
843 return (EEXIST);
844
845 if ((flags & AISHF_NOALLOC) == 0) {
846 error = ipi_map(isrc, ipi);
847 if (error != 0)
848 return (error);
849 }
850
851 isrc->isrc_ipifilter = filter;
852 isrc->isrc_arg = arg;
853 isrc->isrc_handlers = 1;
854 isrc->isrc_count = intr_ipi_setup_counters(name);
855 isrc->isrc_index = 0; /* it should not be used in IPI case */
856
857 if (isrc->isrc_dev != NULL) {
858 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
859 PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc);
860 }
861 return (0);
862 }
863 #endif
864
865 /* Sending IPI */
866 void
ipi_all_but_self(u_int ipi)867 ipi_all_but_self(u_int ipi)
868 {
869 cpuset_t cpus;
870
871 cpus = all_cpus;
872 CPU_CLR(PCPU_GET(cpuid), &cpus);
873 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
874 intr_ipi_send(cpus, ipi);
875 }
876
877 void
ipi_cpu(int cpu,u_int ipi)878 ipi_cpu(int cpu, u_int ipi)
879 {
880 cpuset_t cpus;
881
882 CPU_ZERO(&cpus);
883 CPU_SET(cpu, &cpus);
884
885 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
886 intr_ipi_send(cpus, ipi);
887 }
888
889 void
ipi_selected(cpuset_t cpus,u_int ipi)890 ipi_selected(cpuset_t cpus, u_int ipi)
891 {
892
893 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
894 intr_ipi_send(cpus, ipi);
895 }
896